├── .circleci └── config.yml ├── .gitignore ├── Jenkinsfile ├── LICENSE ├── README.md ├── ast.go ├── ast_test.go ├── doc.go ├── go.mod ├── go.sum ├── influxql.go ├── internal ├── influxdata_influxql_internal.pb.go └── influxdata_influxql_internal.proto ├── params.go ├── parse_tree.go ├── parse_tree_test.go ├── parser.go ├── parser_test.go ├── sanitize.go ├── sanitize_test.go ├── scanner.go ├── scanner_test.go ├── token.go └── utils.go /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | jobs: 4 | test: 5 | docker: 6 | - image: cimg/go:1.18 7 | steps: 8 | - checkout 9 | - run: 10 | name: Run vet 11 | command: go vet ./... 12 | - run: 13 | name: Run tests 14 | command: go test ./... 15 | when: always 16 | 17 | workflows: 18 | version: 2.1 19 | on_push: 20 | jobs: 21 | - test 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | 7 | # Test binary, build with `go test -c` 8 | *.test 9 | 10 | # Output of the go coverage tool, specifically when used with LiteIDE 11 | *.out 12 | 13 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 14 | .glide/ 15 | 16 | # Jetbrains IDE 17 | .idea/ -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | docker { 4 | image 'golang:1.13.15' 5 | } 6 | } 7 | 8 | environment { 9 | GOCACHE = "${WORKSPACE}/.go-cache" 10 | } 11 | 12 | stages { 13 | stage('Test') { 14 | steps { 15 | sh """ 16 | mkdir -p $GOCACHE 17 | rm -f $WORKSPACE/test-results.{log,xml} 18 | cd $WORKSPACE 19 | go test -v | tee $WORKSPACE/test-results.log 20 | """ 21 | } 22 | 23 | post { 24 | always { 25 | sh """ 26 | 27 | if [ -e test-results.log ]; then 28 | mkdir -p /go/src/github.com/ 29 | go get github.com/jstemmer/go-junit-report 30 | go-junit-report < $WORKSPACE/test-results.log > test-results.xml 31 | fi 32 | """ 33 | junit "test-results.xml" 34 | } 35 | } 36 | } 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2013-2016 Errplane Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # The Influx Query Language Specification 2 | 3 | ## Introduction 4 | 5 | This is a reference for the Influx Query Language ("InfluxQL"). 6 | 7 | InfluxQL is a SQL-like query language for interacting with InfluxDB. It has 8 | been lovingly crafted to feel familiar to those coming from other SQL or 9 | SQL-like environments while providing features specific to storing and analyzing 10 | time series data. 11 | 12 | 13 | ## Notation 14 | 15 | The syntax is specified using Extended Backus-Naur Form ("EBNF"). EBNF is the 16 | same notation used in the [Go](http://golang.org) programming language 17 | specification, which can be found [here](https://golang.org/ref/spec). Not so 18 | coincidentally, InfluxDB is written in Go. 19 | 20 | ``` 21 | Production = production_name "=" [ Expression ] "." . 22 | Expression = Alternative { "|" Alternative } . 23 | Alternative = Term { Term } . 24 | Term = production_name | token [ "…" token ] | Group | Option | Repetition . 25 | Group = "(" Expression ")" . 26 | Option = "[" Expression "]" . 27 | Repetition = "{" Expression "}" . 28 | ``` 29 | 30 | Notation operators in order of increasing precedence: 31 | 32 | ``` 33 | | alternation 34 | () grouping 35 | [] option (0 or 1 times) 36 | {} repetition (0 to n times) 37 | ``` 38 | 39 | ## Comments 40 | 41 | Both single and multiline comments are supported. A comment is treated 42 | the same as whitespace by the parser. 43 | 44 | ``` 45 | -- single line comment 46 | /* 47 | multiline comment 48 | */ 49 | ``` 50 | 51 | Single line comments will skip all text until the scanner hits a 52 | newline. Multiline comments will skip all text until the end comment 53 | marker is hit. Nested multiline comments are not supported so the 54 | following does not work: 55 | 56 | ``` 57 | /* /* this does not work */ */ 58 | ``` 59 | 60 | ## Query representation 61 | 62 | ### Characters 63 | 64 | InfluxQL is Unicode text encoded in [UTF-8](http://en.wikipedia.org/wiki/UTF-8). 65 | 66 | ``` 67 | newline = /* the Unicode code point U+000A */ . 68 | unicode_char = /* an arbitrary Unicode code point except newline */ . 69 | ``` 70 | 71 | ## Letters and digits 72 | 73 | Letters are the set of ASCII characters plus the underscore character _ (U+005F) 74 | is considered a letter. 75 | 76 | Only decimal digits are supported. 77 | 78 | ``` 79 | letter = ascii_letter | "_" . 80 | ascii_letter = "A" … "Z" | "a" … "z" . 81 | digit = "0" … "9" . 82 | ``` 83 | 84 | ## Identifiers 85 | 86 | Identifiers are tokens which refer to database names, retention policy names, 87 | user names, measurement names, tag keys, and field keys. 88 | 89 | The rules: 90 | 91 | - double quoted identifiers can contain any unicode character other than a new line 92 | - double quoted identifiers can contain escaped `"` characters (i.e., `\"`) 93 | - double quoted identifiers can contain InfluxQL keywords 94 | - unquoted identifiers must start with an upper or lowercase ASCII character or "_" 95 | - unquoted identifiers may contain only ASCII letters, decimal digits, and "_" 96 | 97 | ``` 98 | identifier = unquoted_identifier | quoted_identifier . 99 | unquoted_identifier = ( letter ) { letter | digit } . 100 | quoted_identifier = `"` unicode_char { unicode_char } `"` . 101 | ``` 102 | 103 | #### Examples: 104 | 105 | ``` 106 | cpu 107 | _cpu_stats 108 | "1h" 109 | "anything really" 110 | "1_Crazy-1337.identifier>NAME👍" 111 | ``` 112 | 113 | ## Keywords 114 | 115 | ``` 116 | ALL ALTER ANALYZE ANY AS ASC 117 | BEGIN BY CREATE CONTINUOUS DATABASE DATABASES 118 | DEFAULT DELETE DESC DESTINATIONS DIAGNOSTICS DISTINCT 119 | DROP DURATION END EVERY EXPLAIN FIELD 120 | FOR FROM GRANT GRANTS GROUP GROUPS 121 | IN INF INSERT INTO KEY KEYS 122 | KILL LIMIT SHOW MEASUREMENT MEASUREMENTS NAME 123 | OFFSET ON ORDER PASSWORD POLICY POLICIES 124 | PRIVILEGES QUERIES QUERY READ REPLICATION RESAMPLE 125 | RETENTION REVOKE SELECT SERIES SET SHARD 126 | SHARDS SLIMIT SOFFSET STATS SUBSCRIPTION SUBSCRIPTIONS 127 | TAG TO USER USERS VALUES WHERE 128 | WITH WRITE 129 | ``` 130 | 131 | ## Literals 132 | 133 | ### Integers 134 | 135 | InfluxQL supports decimal integer literals. Hexadecimal and octal literals are 136 | not currently supported. 137 | 138 | ``` 139 | int_lit = [ "+" | "-" ] ( "1" … "9" ) { digit } . 140 | ``` 141 | 142 | ### Floats 143 | 144 | InfluxQL supports floating-point literals. Exponents are not currently supported. 145 | 146 | ``` 147 | float_lit = [ "+" | "-" ] ( "." digit { digit } | digit { digit } "." { digit } ) . 148 | ``` 149 | 150 | ### Strings 151 | 152 | String literals must be surrounded by single quotes. Strings may contain `'` 153 | characters as long as they are escaped (i.e., `\'`). 154 | 155 | ``` 156 | string_lit = `'` { unicode_char } `'` . 157 | ``` 158 | 159 | ### Durations 160 | 161 | Duration literals specify a length of time. An integer literal followed 162 | immediately (with no spaces) by a duration unit listed below is interpreted as 163 | a duration literal. 164 | 165 | ### Duration units 166 | | Units | Meaning | 167 | |--------|-----------------------------------------| 168 | | u or µ | microseconds (1 millionth of a second) | 169 | | ms | milliseconds (1 thousandth of a second) | 170 | | s | second | 171 | | m | minute | 172 | | h | hour | 173 | | d | day | 174 | | w | week | 175 | 176 | ``` 177 | duration_lit = int_lit duration_unit . 178 | duration_unit = "u" | "µ" | "ms" | "s" | "m" | "h" | "d" | "w" . 179 | ``` 180 | 181 | ### Dates & Times 182 | 183 | The date and time literal format is not specified in EBNF like the rest of this document. It is specified using Go's date / time parsing format, which is a reference date written in the format required by InfluxQL. The reference date time is: 184 | 185 | InfluxQL reference date time: January 2nd, 2006 at 3:04:05 PM 186 | 187 | ``` 188 | time_lit = "2006-01-02 15:04:05.999999" | "2006-01-02" . 189 | ``` 190 | 191 | ### Booleans 192 | 193 | ``` 194 | bool_lit = TRUE | FALSE . 195 | ``` 196 | 197 | ### Regular Expressions 198 | 199 | ``` 200 | regex_lit = "/" { unicode_char } "/" . 201 | ``` 202 | 203 | **Comparators:** 204 | `=~` matches against 205 | `!~` doesn't match against 206 | 207 | > **Note:** Use regular expressions to match measurements and tags. 208 | You cannot use regular expressions to match databases, retention policies, or fields. 209 | 210 | ## Queries 211 | 212 | A query is composed of one or more statements separated by a semicolon. 213 | 214 | ``` 215 | query = statement { ";" statement } . 216 | 217 | statement = alter_retention_policy_stmt | 218 | create_continuous_query_stmt | 219 | create_database_stmt | 220 | create_retention_policy_stmt | 221 | create_subscription_stmt | 222 | create_user_stmt | 223 | delete_stmt | 224 | drop_continuous_query_stmt | 225 | drop_database_stmt | 226 | drop_measurement_stmt | 227 | drop_retention_policy_stmt | 228 | drop_series_stmt | 229 | drop_shard_stmt | 230 | drop_subscription_stmt | 231 | drop_user_stmt | 232 | explain_stmt | 233 | grant_stmt | 234 | kill_query_statement | 235 | show_continuous_queries_stmt | 236 | show_databases_stmt | 237 | show_field_keys_stmt | 238 | show_grants_stmt | 239 | show_measurements_stmt | 240 | show_queries_stmt | 241 | show_retention_policies | 242 | show_series_stmt | 243 | show_shard_groups_stmt | 244 | show_shards_stmt | 245 | show_subscriptions_stmt| 246 | show_tag_keys_stmt | 247 | show_tag_values_stmt | 248 | show_users_stmt | 249 | revoke_stmt | 250 | select_stmt . 251 | ``` 252 | 253 | ## Statements 254 | 255 | ### ALTER RETENTION POLICY 256 | 257 | ``` 258 | alter_retention_policy_stmt = "ALTER RETENTION POLICY" policy_name on_clause 259 | retention_policy_option 260 | [ retention_policy_option ] 261 | [ retention_policy_option ] 262 | [ retention_policy_option ] . 263 | ``` 264 | 265 | > Replication factors do not serve a purpose with single node instances. 266 | 267 | #### Examples: 268 | 269 | ```sql 270 | -- Set default retention policy for mydb to 1h.cpu. 271 | ALTER RETENTION POLICY "1h.cpu" ON "mydb" DEFAULT 272 | 273 | -- Change duration and replication factor. 274 | ALTER RETENTION POLICY "policy1" ON "somedb" DURATION 1h REPLICATION 4 275 | ``` 276 | 277 | ### CREATE CONTINUOUS QUERY 278 | 279 | ``` 280 | create_continuous_query_stmt = "CREATE CONTINUOUS QUERY" query_name on_clause 281 | [ "RESAMPLE" resample_opts ] 282 | "BEGIN" select_stmt "END" . 283 | 284 | query_name = identifier . 285 | 286 | resample_opts = (every_stmt for_stmt | every_stmt | for_stmt) . 287 | every_stmt = "EVERY" duration_lit 288 | for_stmt = "FOR" duration_lit 289 | ``` 290 | 291 | #### Examples: 292 | 293 | ```sql 294 | -- selects from DEFAULT retention policy and writes into 6_months retention policy 295 | CREATE CONTINUOUS QUERY "10m_event_count" 296 | ON "db_name" 297 | BEGIN 298 | SELECT count("value") 299 | INTO "6_months"."events" 300 | FROM "events" 301 | GROUP BY time(10m) 302 | END; 303 | 304 | -- this selects from the output of one continuous query in one retention policy and outputs to another series in another retention policy 305 | CREATE CONTINUOUS QUERY "1h_event_count" 306 | ON "db_name" 307 | BEGIN 308 | SELECT sum("count") as "count" 309 | INTO "2_years"."events" 310 | FROM "6_months"."events" 311 | GROUP BY time(1h) 312 | END; 313 | 314 | -- this customizes the resample interval so the interval is queried every 10s and intervals are resampled until 2m after their start time 315 | -- when resample is used, at least one of "EVERY" or "FOR" must be used 316 | CREATE CONTINUOUS QUERY "cpu_mean" 317 | ON "db_name" 318 | RESAMPLE EVERY 10s FOR 2m 319 | BEGIN 320 | SELECT mean("value") 321 | INTO "cpu_mean" 322 | FROM "cpu" 323 | GROUP BY time(1m) 324 | END; 325 | ``` 326 | 327 | ### CREATE DATABASE 328 | 329 | ``` 330 | create_database_stmt = "CREATE DATABASE" db_name 331 | [ WITH 332 | [ retention_policy_duration ] 333 | [ retention_policy_replication ] 334 | [ retention_policy_shard_group_duration ] 335 | [ retention_policy_name ] 336 | ] . 337 | ``` 338 | 339 | > Replication factors do not serve a purpose with single node instances. 340 | 341 | #### Examples: 342 | 343 | ```sql 344 | -- Create a database called foo 345 | CREATE DATABASE "foo" 346 | 347 | -- Create a database called bar with a new DEFAULT retention policy and specify the duration, replication, shard group duration, and name of that retention policy 348 | CREATE DATABASE "bar" WITH DURATION 1d REPLICATION 1 SHARD DURATION 30m NAME "myrp" 349 | 350 | -- Create a database called mydb with a new DEFAULT retention policy and specify the name of that retention policy 351 | CREATE DATABASE "mydb" WITH NAME "myrp" 352 | ``` 353 | 354 | ### CREATE RETENTION POLICY 355 | 356 | ``` 357 | create_retention_policy_stmt = "CREATE RETENTION POLICY" policy_name on_clause 358 | retention_policy_duration 359 | retention_policy_replication 360 | [ retention_policy_shard_group_duration ] 361 | [ "DEFAULT" ] . 362 | ``` 363 | 364 | > Replication factors do not serve a purpose with single node instances. 365 | 366 | #### Examples 367 | 368 | ```sql 369 | -- Create a retention policy. 370 | CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 371 | 372 | -- Create a retention policy and set it as the DEFAULT. 373 | CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 DEFAULT 374 | 375 | -- Create a retention policy and specify the shard group duration. 376 | CREATE RETENTION POLICY "10m.events" ON "somedb" DURATION 60m REPLICATION 2 SHARD DURATION 30m 377 | ``` 378 | 379 | ### CREATE SUBSCRIPTION 380 | 381 | Subscriptions tell InfluxDB to send all the data it receives to Kapacitor or other third parties. 382 | 383 | ``` 384 | create_subscription_stmt = "CREATE SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy "DESTINATIONS" ("ANY"|"ALL") host { "," host} . 385 | ``` 386 | 387 | #### Examples: 388 | 389 | ```sql 390 | -- Create a SUBSCRIPTION on database 'mydb' and retention policy 'autogen' that send data to 'example.com:9090' via UDP. 391 | CREATE SUBSCRIPTION "sub0" ON "mydb"."autogen" DESTINATIONS ALL 'udp://example.com:9090' 392 | 393 | -- Create a SUBSCRIPTION on database 'mydb' and retention policy 'autogen' that round robins the data to 'h1.example.com:9090' and 'h2.example.com:9090'. 394 | CREATE SUBSCRIPTION "sub0" ON "mydb"."autogen" DESTINATIONS ANY 'udp://h1.example.com:9090', 'udp://h2.example.com:9090' 395 | ``` 396 | 397 | ### CREATE USER 398 | 399 | ``` 400 | create_user_stmt = "CREATE USER" user_name "WITH PASSWORD" password 401 | [ "WITH ALL PRIVILEGES" ] . 402 | ``` 403 | 404 | #### Examples: 405 | 406 | ```sql 407 | -- Create a normal database user. 408 | CREATE USER "jdoe" WITH PASSWORD '1337password' 409 | 410 | -- Create an admin user. 411 | -- Note: Unlike the GRANT statement, the "PRIVILEGES" keyword is required here. 412 | CREATE USER "jdoe" WITH PASSWORD '1337password' WITH ALL PRIVILEGES 413 | ``` 414 | 415 | > **Note:** The password string must be wrapped in single quotes. 416 | 417 | ### DELETE 418 | 419 | ``` 420 | delete_stmt = "DELETE" ( from_clause | where_clause | from_clause where_clause ) . 421 | ``` 422 | 423 | #### Examples: 424 | 425 | ```sql 426 | DELETE FROM "cpu" 427 | DELETE FROM "cpu" WHERE time < '2000-01-01T00:00:00Z' 428 | DELETE WHERE time < '2000-01-01T00:00:00Z' 429 | ``` 430 | 431 | ### DROP CONTINUOUS QUERY 432 | 433 | ``` 434 | drop_continuous_query_stmt = "DROP CONTINUOUS QUERY" query_name on_clause . 435 | ``` 436 | 437 | #### Example: 438 | 439 | ```sql 440 | DROP CONTINUOUS QUERY "myquery" ON "mydb" 441 | ``` 442 | 443 | ### DROP DATABASE 444 | 445 | ``` 446 | drop_database_stmt = "DROP DATABASE" db_name . 447 | ``` 448 | 449 | #### Example: 450 | 451 | ```sql 452 | DROP DATABASE "mydb" 453 | ``` 454 | 455 | ### DROP MEASUREMENT 456 | 457 | ``` 458 | drop_measurement_stmt = "DROP MEASUREMENT" measurement . 459 | ``` 460 | 461 | #### Examples: 462 | 463 | ```sql 464 | -- drop the cpu measurement 465 | DROP MEASUREMENT "cpu" 466 | ``` 467 | 468 | ### DROP RETENTION POLICY 469 | 470 | ``` 471 | drop_retention_policy_stmt = "DROP RETENTION POLICY" policy_name on_clause . 472 | ``` 473 | 474 | #### Example: 475 | 476 | ```sql 477 | -- drop the retention policy named 1h.cpu from mydb 478 | DROP RETENTION POLICY "1h.cpu" ON "mydb" 479 | ``` 480 | 481 | ### DROP SERIES 482 | 483 | ``` 484 | drop_series_stmt = "DROP SERIES" ( from_clause | where_clause | from_clause where_clause ) . 485 | ``` 486 | 487 | #### Example: 488 | 489 | ```sql 490 | DROP SERIES FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu8' 491 | 492 | ``` 493 | 494 | ### DROP SHARD 495 | 496 | ``` 497 | drop_shard_stmt = "DROP SHARD" ( shard_id ) . 498 | ``` 499 | 500 | #### Example: 501 | 502 | ``` 503 | DROP SHARD 1 504 | ``` 505 | 506 | ### DROP SUBSCRIPTION 507 | 508 | ``` 509 | drop_subscription_stmt = "DROP SUBSCRIPTION" subscription_name "ON" db_name "." retention_policy . 510 | ``` 511 | 512 | #### Example: 513 | 514 | ```sql 515 | DROP SUBSCRIPTION "sub0" ON "mydb"."autogen" 516 | ``` 517 | 518 | ### DROP USER 519 | 520 | ``` 521 | drop_user_stmt = "DROP USER" user_name . 522 | ``` 523 | 524 | #### Example: 525 | 526 | ```sql 527 | DROP USER "jdoe" 528 | ``` 529 | 530 | ### EXPLAIN 531 | 532 | > **NOTE:** This functionality is unimplemented. 533 | 534 | ``` 535 | explain_stmt = "EXPLAIN" [ "ANALYZE" ] select_stmt . 536 | ``` 537 | 538 | ### GRANT 539 | 540 | > **NOTE:** Users can be granted privileges on databases that do not exist. 541 | 542 | ``` 543 | grant_stmt = "GRANT" privilege [ on_clause ] to_clause . 544 | ``` 545 | 546 | #### Examples: 547 | 548 | ```sql 549 | -- grant admin privileges 550 | GRANT ALL TO "jdoe" 551 | 552 | -- grant read access to a database 553 | GRANT READ ON "mydb" TO "jdoe" 554 | ``` 555 | 556 | ### KILL QUERY 557 | 558 | ``` 559 | kill_query_statement = "KILL QUERY" query_id . 560 | ``` 561 | 562 | #### Examples: 563 | 564 | ``` 565 | --- kill a query with the query_id 36 566 | KILL QUERY 36 567 | ``` 568 | 569 | > **NOTE:** Identify the `query_id` from the `SHOW QUERIES` output. 570 | 571 | ### SHOW CONTINUOUS QUERIES 572 | 573 | ``` 574 | show_continuous_queries_stmt = "SHOW CONTINUOUS QUERIES" . 575 | ``` 576 | 577 | #### Example: 578 | 579 | ```sql 580 | -- show all continuous queries 581 | SHOW CONTINUOUS QUERIES 582 | ``` 583 | 584 | ### SHOW DATABASES 585 | 586 | ``` 587 | show_databases_stmt = "SHOW DATABASES" . 588 | ``` 589 | 590 | #### Example: 591 | 592 | ```sql 593 | -- show all databases 594 | SHOW DATABASES 595 | ``` 596 | 597 | ### SHOW FIELD KEYS 598 | 599 | ``` 600 | show_field_keys_stmt = "SHOW FIELD KEYS" [ from_clause ] . 601 | ``` 602 | 603 | #### Examples: 604 | 605 | ```sql 606 | -- show field keys and field value data types from all measurements 607 | SHOW FIELD KEYS 608 | 609 | -- show field keys and field value data types from specified measurement 610 | SHOW FIELD KEYS FROM "cpu" 611 | ``` 612 | 613 | ### SHOW GRANTS 614 | 615 | ``` 616 | show_grants_stmt = "SHOW GRANTS FOR" user_name . 617 | ``` 618 | 619 | #### Example: 620 | 621 | ```sql 622 | -- show grants for jdoe 623 | SHOW GRANTS FOR "jdoe" 624 | ``` 625 | 626 | ### SHOW MEASUREMENTS 627 | 628 | ``` 629 | show_measurements_stmt = "SHOW MEASUREMENTS" [on_clause] [ with_measurement_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . 630 | ``` 631 | 632 | #### Examples: 633 | 634 | ```sql 635 | -- show all measurements 636 | SHOW MEASUREMENTS 637 | 638 | -- show all measurements on all databases 639 | SHOW MEASUREMENTS ON *.* 640 | 641 | -- show all measurements on specific database and retention policy 642 | SHOW MEASUREMENTS ON mydb.myrp 643 | 644 | -- show measurements where region tag = 'uswest' AND host tag = 'serverA' 645 | SHOW MEASUREMENTS WHERE "region" = 'uswest' AND "host" = 'serverA' 646 | 647 | -- show measurements that start with 'h2o' 648 | SHOW MEASUREMENTS WITH MEASUREMENT =~ /h2o.*/ 649 | ``` 650 | 651 | ### SHOW QUERIES 652 | 653 | ``` 654 | show_queries_stmt = "SHOW QUERIES" . 655 | ``` 656 | 657 | #### Example: 658 | 659 | ```sql 660 | -- show all currently-running queries 661 | SHOW QUERIES 662 | ``` 663 | 664 | ### SHOW RETENTION POLICIES 665 | 666 | ``` 667 | show_retention_policies = "SHOW RETENTION POLICIES" on_clause . 668 | ``` 669 | 670 | #### Example: 671 | 672 | ```sql 673 | -- show all retention policies on a database 674 | SHOW RETENTION POLICIES ON "mydb" 675 | ``` 676 | 677 | ### SHOW SERIES 678 | 679 | ``` 680 | show_series_stmt = "SHOW SERIES" [ from_clause ] [ where_clause ] [ limit_clause ] [ offset_clause ] . 681 | ``` 682 | 683 | #### Example: 684 | 685 | ```sql 686 | SHOW SERIES FROM "telegraf"."autogen"."cpu" WHERE cpu = 'cpu8' 687 | ``` 688 | 689 | ### SHOW SHARD GROUPS 690 | 691 | ``` 692 | show_shard_groups_stmt = "SHOW SHARD GROUPS" . 693 | ``` 694 | 695 | #### Example: 696 | 697 | ```sql 698 | SHOW SHARD GROUPS 699 | ``` 700 | 701 | ### SHOW SHARDS 702 | 703 | ``` 704 | show_shards_stmt = "SHOW SHARDS" . 705 | ``` 706 | 707 | #### Example: 708 | 709 | ```sql 710 | SHOW SHARDS 711 | ``` 712 | 713 | ### SHOW SUBSCRIPTIONS 714 | 715 | ``` 716 | show_subscriptions_stmt = "SHOW SUBSCRIPTIONS" . 717 | ``` 718 | 719 | #### Example: 720 | 721 | ```sql 722 | SHOW SUBSCRIPTIONS 723 | ``` 724 | 725 | ### SHOW TAG KEYS 726 | 727 | ``` 728 | show_tag_keys_stmt = "SHOW TAG KEYS" [ from_clause ] [ where_clause ] [ group_by_clause ] 729 | [ limit_clause ] [ offset_clause ] . 730 | ``` 731 | 732 | #### Examples: 733 | 734 | ```sql 735 | -- show all tag keys 736 | SHOW TAG KEYS 737 | 738 | -- show all tag keys from the cpu measurement 739 | SHOW TAG KEYS FROM "cpu" 740 | 741 | -- show all tag keys from the cpu measurement where the region key = 'uswest' 742 | SHOW TAG KEYS FROM "cpu" WHERE "region" = 'uswest' 743 | 744 | -- show all tag keys where the host key = 'serverA' 745 | SHOW TAG KEYS WHERE "host" = 'serverA' 746 | ``` 747 | 748 | ### SHOW TAG VALUES 749 | 750 | ``` 751 | show_tag_values_stmt = "SHOW TAG VALUES" [ from_clause ] with_tag_clause [ where_clause ] 752 | [ group_by_clause ] [ limit_clause ] [ offset_clause ] . 753 | ``` 754 | 755 | #### Examples: 756 | 757 | ```sql 758 | -- show all tag values across all measurements for the region tag 759 | SHOW TAG VALUES WITH KEY = "region" 760 | 761 | -- show tag values from the cpu measurement for the region tag 762 | SHOW TAG VALUES FROM "cpu" WITH KEY = "region" 763 | 764 | -- show tag values across all measurements for all tag keys that do not include the letter c 765 | SHOW TAG VALUES WITH KEY !~ /.*c.*/ 766 | 767 | -- show tag values from the cpu measurement for region & host tag keys where service = 'redis' 768 | SHOW TAG VALUES FROM "cpu" WITH KEY IN ("region", "host") WHERE "service" = 'redis' 769 | ``` 770 | 771 | ### SHOW USERS 772 | 773 | ``` 774 | show_users_stmt = "SHOW USERS" . 775 | ``` 776 | 777 | #### Example: 778 | 779 | ```sql 780 | -- show all users 781 | SHOW USERS 782 | ``` 783 | 784 | ### REVOKE 785 | 786 | ``` 787 | revoke_stmt = "REVOKE" privilege [ on_clause ] "FROM" user_name . 788 | ``` 789 | 790 | #### Examples: 791 | 792 | ```sql 793 | -- revoke admin privileges from jdoe 794 | REVOKE ALL PRIVILEGES FROM "jdoe" 795 | 796 | -- revoke read privileges from jdoe on mydb 797 | REVOKE READ ON "mydb" FROM "jdoe" 798 | ``` 799 | 800 | ### SELECT 801 | 802 | ``` 803 | select_stmt = "SELECT" fields from_clause [ into_clause ] [ where_clause ] 804 | [ group_by_clause ] [ order_by_clause ] [ limit_clause ] 805 | [ offset_clause ] [ slimit_clause ] [ soffset_clause ] 806 | [ timezone_clause ] . 807 | ``` 808 | 809 | #### Examples: 810 | 811 | ```sql 812 | -- select mean value from the cpu measurement where region = 'uswest' grouped by 10 minute intervals 813 | SELECT mean("value") FROM "cpu" WHERE "region" = 'uswest' GROUP BY time(10m) fill(0) 814 | 815 | -- select from all measurements beginning with cpu into the same measurement name in the cpu_1h retention policy 816 | SELECT mean("value") INTO "cpu_1h".:MEASUREMENT FROM /cpu.*/ 817 | 818 | -- select from measurements grouped by the day with a timezone 819 | SELECT mean("value") FROM "cpu" GROUP BY region, time(1d) fill(0) tz("America/Chicago") 820 | ``` 821 | 822 | ## Clauses 823 | 824 | ``` 825 | from_clause = "FROM" measurements . 826 | 827 | group_by_clause = "GROUP BY" dimensions fill(fill_option). 828 | 829 | into_clause = "INTO" ( measurement | back_ref ). 830 | 831 | limit_clause = "LIMIT" int_lit . 832 | 833 | offset_clause = "OFFSET" int_lit . 834 | 835 | slimit_clause = "SLIMIT" int_lit . 836 | 837 | soffset_clause = "SOFFSET" int_lit . 838 | 839 | timezone_clause = tz(string_lit) . 840 | 841 | on_clause = "ON" db_name . 842 | 843 | order_by_clause = "ORDER BY" sort_fields . 844 | 845 | to_clause = "TO" user_name . 846 | 847 | where_clause = "WHERE" expr . 848 | 849 | with_measurement_clause = "WITH MEASUREMENT" ( "=" measurement | "=~" regex_lit ) . 850 | 851 | with_tag_clause = "WITH KEY" ( "=" tag_key | "!=" tag_key | "=~" regex_lit | "IN (" tag_keys ")" ) . 852 | ``` 853 | 854 | ## Expressions 855 | 856 | ``` 857 | binary_op = "+" | "-" | "*" | "/" | "%" | "&" | "|" | "^" | "AND" | 858 | "OR" | "=" | "!=" | "<>" | "<" | "<=" | ">" | ">=" . 859 | 860 | expr = unary_expr { binary_op unary_expr } . 861 | 862 | unary_expr = "(" expr ")" | var_ref | time_lit | string_lit | int_lit | 863 | float_lit | bool_lit | duration_lit | regex_lit . 864 | ``` 865 | 866 | ## Other 867 | 868 | ``` 869 | alias = "AS" identifier . 870 | 871 | back_ref = ( policy_name ".:MEASUREMENT" ) | 872 | ( db_name "." [ policy_name ] ".:MEASUREMENT" ) . 873 | 874 | db_name = identifier . 875 | 876 | dimension = expr . 877 | 878 | dimensions = dimension { "," dimension } . 879 | 880 | field_key = identifier . 881 | 882 | field = expr [ alias ] . 883 | 884 | fields = field { "," field } . 885 | 886 | fill_option = "null" | "none" | "previous" | "linear" | int_lit | float_lit . 887 | 888 | host = string_lit . 889 | 890 | measurement = measurement_name | 891 | ( policy_name "." measurement_name ) | 892 | ( db_name "." [ policy_name ] "." measurement_name ) . 893 | 894 | measurements = measurement { "," measurement } . 895 | 896 | measurement_name = identifier | regex_lit . 897 | 898 | password = string_lit . 899 | 900 | policy_name = identifier . 901 | 902 | privilege = "ALL" [ "PRIVILEGES" ] | "READ" | "WRITE" . 903 | 904 | query_id = int_lit . 905 | 906 | query_name = identifier . 907 | 908 | retention_policy = identifier . 909 | 910 | retention_policy_option = retention_policy_duration | 911 | retention_policy_replication | 912 | retention_policy_shard_group_duration | 913 | "DEFAULT" . 914 | 915 | retention_policy_duration = "DURATION" duration_lit . 916 | 917 | retention_policy_replication = "REPLICATION" int_lit . 918 | 919 | retention_policy_shard_group_duration = "SHARD DURATION" duration_lit . 920 | 921 | retention_policy_name = "NAME" identifier . 922 | 923 | series_id = int_lit . 924 | 925 | shard_id = int_lit . 926 | 927 | sort_field = field_key [ ASC | DESC ] . 928 | 929 | sort_fields = sort_field { "," sort_field } . 930 | 931 | subscription_name = identifier . 932 | 933 | tag_key = identifier . 934 | 935 | tag_keys = tag_key { "," tag_key } . 936 | 937 | user_name = identifier . 938 | 939 | var_ref = measurement . 940 | ``` 941 | 942 | ## Query Engine Internals 943 | 944 | Once you understand the language itself, it's important to know how these 945 | language constructs are implemented in the query engine. This gives you an 946 | intuitive sense for how results will be processed and how to create efficient 947 | queries. 948 | 949 | The life cycle of a query looks like this: 950 | 951 | 1. InfluxQL query string is tokenized and then parsed into an abstract syntax 952 | tree (AST). This is the code representation of the query itself. 953 | 954 | 2. The AST is passed to the `QueryExecutor` which directs queries to the 955 | appropriate handlers. For example, queries related to meta data are executed 956 | by the meta service and `SELECT` statements are executed by the shards 957 | themselves. 958 | 959 | 3. The query engine then determines the shards that match the `SELECT` 960 | statement's time range. From these shards, iterators are created for each 961 | field in the statement. 962 | 963 | 4. Iterators are passed to the emitter which drains them and joins the resulting 964 | points. The emitter's job is to convert simple time/value points into the 965 | more complex result objects that are returned to the client. 966 | 967 | 968 | ### Understanding Iterators 969 | 970 | Iterators are at the heart of the query engine. They provide a simple interface 971 | for looping over a set of points. For example, this is an iterator over Float 972 | points: 973 | 974 | ``` 975 | type FloatIterator interface { 976 | Next() (*FloatPoint, error) 977 | } 978 | ``` 979 | 980 | These iterators are created through the `IteratorCreator` interface: 981 | 982 | ``` 983 | type IteratorCreator interface { 984 | CreateIterator(m *Measurement, opt IteratorOptions) (Iterator, error) 985 | } 986 | ``` 987 | 988 | The `IteratorOptions` provide arguments about field selection, time ranges, 989 | and dimensions that the iterator creator can use when planning an iterator. 990 | The `IteratorCreator` interface is used at many levels such as the `Shards`, 991 | `Shard`, and `Engine`. This allows optimizations to be performed when applicable 992 | such as returning a precomputed `COUNT()`. 993 | 994 | Iterators aren't just for reading raw data from storage though. Iterators can be 995 | composed so that they provided additional functionality around an input 996 | iterator. For example, a `DistinctIterator` can compute the distinct values for 997 | each time window for an input iterator. Or a `FillIterator` can generate 998 | additional points that are missing from an input iterator. 999 | 1000 | This composition also lends itself well to aggregation. For example, a statement 1001 | such as this: 1002 | 1003 | ``` 1004 | SELECT MEAN(value) FROM cpu GROUP BY time(10m) 1005 | ``` 1006 | 1007 | In this case, `MEAN(value)` is a `MeanIterator` wrapping an iterator from the 1008 | underlying shards. However, if we can add an additional iterator to determine 1009 | the derivative of the mean: 1010 | 1011 | ``` 1012 | SELECT DERIVATIVE(MEAN(value), 20m) FROM cpu GROUP BY time(10m) 1013 | ``` 1014 | 1015 | 1016 | ### Understanding Auxiliary Fields 1017 | 1018 | Because InfluxQL allows users to use selector functions such as `FIRST()`, 1019 | `LAST()`, `MIN()`, and `MAX()`, the engine must provide a way to return related 1020 | data at the same time with the selected point. 1021 | 1022 | For example, in this query: 1023 | 1024 | ``` 1025 | SELECT FIRST(value), host FROM cpu GROUP BY time(1h) 1026 | ``` 1027 | 1028 | We are selecting the first `value` that occurs every hour but we also want to 1029 | retrieve the `host` associated with that point. Since the `Point` types only 1030 | specify a single typed `Value` for efficiency, we push the `host` into the 1031 | auxiliary fields of the point. These auxiliary fields are attached to the point 1032 | until it is passed to the emitter where the fields get split off to their own 1033 | iterator. 1034 | 1035 | 1036 | ### Built-in Iterators 1037 | 1038 | There are many helper iterators that let us build queries: 1039 | 1040 | * Merge Iterator - This iterator combines one or more iterators into a single 1041 | new iterator of the same type. This iterator guarantees that all points 1042 | within a window will be output before starting the next window but does not 1043 | provide ordering guarantees within the window. This allows for fast access 1044 | for aggregate queries which do not need stronger sorting guarantees. 1045 | 1046 | * Sorted Merge Iterator - This iterator also combines one or more iterators 1047 | into a new iterator of the same type. However, this iterator guarantees 1048 | time ordering of every point. This makes it slower than the `MergeIterator` 1049 | but this ordering guarantee is required for non-aggregate queries which 1050 | return the raw data points. 1051 | 1052 | * Limit Iterator - This iterator limits the number of points per name/tag 1053 | group. This is the implementation of the `LIMIT` & `OFFSET` syntax. 1054 | 1055 | * Fill Iterator - This iterator injects extra points if they are missing from 1056 | the input iterator. It can provide `null` points, points with the previous 1057 | value, or points with a specific value. 1058 | 1059 | * Buffered Iterator - This iterator provides the ability to "unread" a point 1060 | back onto a buffer so it can be read again next time. This is used extensively 1061 | to provide lookahead for windowing. 1062 | 1063 | * Reduce Iterator - This iterator calls a reduction function for each point in 1064 | a window. When the window is complete then all points for that window are 1065 | output. This is used for simple aggregate functions such as `COUNT()`. 1066 | 1067 | * Reduce Slice Iterator - This iterator collects all points for a window first 1068 | and then passes them all to a reduction function at once. The results are 1069 | returned from the iterator. This is used for aggregate functions such as 1070 | `DERIVATIVE()`. 1071 | 1072 | * Transform Iterator - This iterator calls a transform function for each point 1073 | from an input iterator. This is used for executing binary expressions. 1074 | 1075 | * Dedupe Iterator - This iterator only outputs unique points. It is resource 1076 | intensive so it is only used for small queries such as meta query statements. 1077 | 1078 | 1079 | ### Call Iterators 1080 | 1081 | Function calls in InfluxQL are implemented at two levels. Some calls can be 1082 | wrapped at multiple layers to improve efficiency. For example, a `COUNT()` can 1083 | be performed at the shard level and then multiple `CountIterator`s can be 1084 | wrapped with another `CountIterator` to compute the count of all shards. These 1085 | iterators can be created using `NewCallIterator()`. 1086 | 1087 | Some iterators are more complex or need to be implemented at a higher level. 1088 | For example, the `DERIVATIVE()` needs to retrieve all points for a window first 1089 | before performing the calculation. This iterator is created by the engine itself 1090 | and is never requested to be created by the lower levels. 1091 | 1092 | ### Subqueries 1093 | 1094 | Subqueries are built on top of iterators. Most of the work involved in 1095 | supporting subqueries is in organizing how data is streamed to the 1096 | iterators that will process the data. 1097 | 1098 | The final ordering of the stream has to output all points from one 1099 | series before moving to the next series and it also needs to ensure 1100 | those points are printed in order. So there are two separate concepts we 1101 | need to consider when creating an iterator: ordering and grouping. 1102 | 1103 | When an inner query has a different grouping than the outermost query, 1104 | we still need to group together related points into buckets, but we do 1105 | not have to ensure that all points from one buckets are output before 1106 | the points in another bucket. In fact, if we do that, we will be unable 1107 | to perform the grouping for the outer query correctly. Instead, we group 1108 | all points by the outermost query for an interval and then, within that 1109 | interval, we group the points for the inner query. For example, here are 1110 | series keys and times in seconds (fields are omitted since they don't 1111 | matter in this example): 1112 | 1113 | cpu,host=server01 0 1114 | cpu,host=server01 10 1115 | cpu,host=server01 20 1116 | cpu,host=server01 30 1117 | cpu,host=server02 0 1118 | cpu,host=server02 10 1119 | cpu,host=server02 20 1120 | cpu,host=server02 30 1121 | 1122 | With the following query: 1123 | 1124 | SELECT mean(max) FROM (SELECT max(value) FROM cpu GROUP BY host, time(20s)) GROUP BY time(20s) 1125 | 1126 | The final grouping keeps all of the points together which means we need 1127 | to group `server01` with `server02`. That means we output the points 1128 | from the underlying engine like this: 1129 | 1130 | cpu,host=server01 0 1131 | cpu,host=server01 10 1132 | cpu,host=server02 0 1133 | cpu,host=server02 10 1134 | cpu,host=server01 20 1135 | cpu,host=server01 30 1136 | cpu,host=server02 20 1137 | cpu,host=server02 30 1138 | 1139 | Within each one of those time buckets, we calculate the `max()` value 1140 | for each unique host so the output stream gets transformed to look like 1141 | this: 1142 | 1143 | cpu,host=server01 0 1144 | cpu,host=server02 0 1145 | cpu,host=server01 20 1146 | cpu,host=server02 20 1147 | 1148 | Then we can process the `mean()` on this stream of data instead and it 1149 | will be output in the correct order. This is true of any order of 1150 | grouping since grouping can only go from more specific to less specific. 1151 | 1152 | When it comes to ordering, unordered data is faster to process, but we 1153 | always need to produce ordered data. When processing a raw query with no 1154 | aggregates, we need to ensure data coming from the engine is ordered so 1155 | the output is ordered. When we have an aggregate, we know one point is 1156 | being emitted for each interval and will always produce ordered output. 1157 | So for aggregates, we can take unordered data as the input and get 1158 | ordered output. Any ordered data as input will always result in ordered 1159 | data so we just need to look at how an iterator processes unordered 1160 | data. 1161 | 1162 | | | raw query | selector (without group by time) | selector (with group by time) | aggregator | 1163 | |-----------------|------------------|----------------------------------|-------------------------------|----------------| 1164 | | ordered input | ordered output | ordered output | ordered output | ordered output | 1165 | | unordered input | unordered output | unordered output | ordered output | ordered output | 1166 | 1167 | Since we always need ordered output, we just need to work backwards and 1168 | determine which pattern of input gives us ordered output. If both 1169 | ordered and unordered input produce ordered output, we prefer unordered 1170 | input since it is faster. 1171 | 1172 | There are also certain aggregates that require ordered input like 1173 | `median()` and `percentile()`. These functions will explicitly request 1174 | ordered input. It is also important to realize that selectors that are 1175 | grouped by time are the equivalent of an aggregator. It is only 1176 | selectors without a group by time that are different. 1177 | -------------------------------------------------------------------------------- /ast_test.go: -------------------------------------------------------------------------------- 1 | package influxql_test 2 | 3 | import ( 4 | "fmt" 5 | "go/importer" 6 | "math" 7 | "math/rand" 8 | "reflect" 9 | "strings" 10 | "testing" 11 | "time" 12 | 13 | "github.com/influxdata/influxql" 14 | ) 15 | 16 | func BenchmarkQuery_String(b *testing.B) { 17 | p := influxql.NewParser(strings.NewReader(`SELECT foo AS zoo, a AS b FROM bar WHERE value > 10 AND q = 'hello'`)) 18 | q, _ := p.ParseStatement() 19 | for i := 0; i < b.N; i++ { 20 | _ = q.String() 21 | } 22 | } 23 | 24 | // Ensure a value's data type can be retrieved. 25 | func TestInspectDataType(t *testing.T) { 26 | for i, tt := range []struct { 27 | v interface{} 28 | typ influxql.DataType 29 | }{ 30 | {float64(100), influxql.Float}, 31 | {int64(100), influxql.Integer}, 32 | {int32(100), influxql.Integer}, 33 | {100, influxql.Integer}, 34 | {true, influxql.Boolean}, 35 | {"string", influxql.String}, 36 | {time.Now(), influxql.Time}, 37 | {time.Second, influxql.Duration}, 38 | {nil, influxql.Unknown}, 39 | } { 40 | if typ := influxql.InspectDataType(tt.v); tt.typ != typ { 41 | t.Errorf("%d. %v (%s): unexpected type: %s", i, tt.v, tt.typ, typ) 42 | continue 43 | } 44 | } 45 | } 46 | 47 | func TestDataTypeFromString(t *testing.T) { 48 | for i, tt := range []struct { 49 | s string 50 | typ influxql.DataType 51 | }{ 52 | {s: "float", typ: influxql.Float}, 53 | {s: "integer", typ: influxql.Integer}, 54 | {s: "unsigned", typ: influxql.Unsigned}, 55 | {s: "string", typ: influxql.String}, 56 | {s: "boolean", typ: influxql.Boolean}, 57 | {s: "time", typ: influxql.Time}, 58 | {s: "duration", typ: influxql.Duration}, 59 | {s: "tag", typ: influxql.Tag}, 60 | {s: "field", typ: influxql.AnyField}, 61 | {s: "foobar", typ: influxql.Unknown}, 62 | } { 63 | if typ := influxql.DataTypeFromString(tt.s); tt.typ != typ { 64 | t.Errorf("%d. %s: unexpected type: %s != %s", i, tt.s, tt.typ, typ) 65 | } 66 | } 67 | } 68 | 69 | func TestDataType_String(t *testing.T) { 70 | for i, tt := range []struct { 71 | typ influxql.DataType 72 | v string 73 | }{ 74 | {influxql.Float, "float"}, 75 | {influxql.Integer, "integer"}, 76 | {influxql.Boolean, "boolean"}, 77 | {influxql.String, "string"}, 78 | {influxql.Time, "time"}, 79 | {influxql.Duration, "duration"}, 80 | {influxql.Tag, "tag"}, 81 | {influxql.Unknown, "unknown"}, 82 | } { 83 | if v := tt.typ.String(); tt.v != v { 84 | t.Errorf("%d. %v (%s): unexpected string: %s", i, tt.typ, tt.v, v) 85 | } 86 | } 87 | } 88 | 89 | func TestDataType_LessThan(t *testing.T) { 90 | for i, tt := range []struct { 91 | typ influxql.DataType 92 | other influxql.DataType 93 | exp bool 94 | }{ 95 | {typ: influxql.Unknown, other: influxql.Unknown, exp: true}, 96 | {typ: influxql.Unknown, other: influxql.Float, exp: true}, 97 | {typ: influxql.Unknown, other: influxql.Integer, exp: true}, 98 | {typ: influxql.Unknown, other: influxql.Unsigned, exp: true}, 99 | {typ: influxql.Unknown, other: influxql.String, exp: true}, 100 | {typ: influxql.Unknown, other: influxql.Boolean, exp: true}, 101 | {typ: influxql.Unknown, other: influxql.Tag, exp: true}, 102 | {typ: influxql.Float, other: influxql.Unknown, exp: false}, 103 | {typ: influxql.Integer, other: influxql.Unknown, exp: false}, 104 | {typ: influxql.Unsigned, other: influxql.Unknown, exp: false}, 105 | {typ: influxql.String, other: influxql.Unknown, exp: false}, 106 | {typ: influxql.Boolean, other: influxql.Unknown, exp: false}, 107 | {typ: influxql.Tag, other: influxql.Unknown, exp: false}, 108 | {typ: influxql.Float, other: influxql.Float, exp: false}, 109 | {typ: influxql.Float, other: influxql.Integer, exp: false}, 110 | {typ: influxql.Float, other: influxql.Unsigned, exp: false}, 111 | {typ: influxql.Float, other: influxql.String, exp: false}, 112 | {typ: influxql.Float, other: influxql.Boolean, exp: false}, 113 | {typ: influxql.Float, other: influxql.Tag, exp: false}, 114 | {typ: influxql.Integer, other: influxql.Float, exp: true}, 115 | {typ: influxql.Integer, other: influxql.Integer, exp: false}, 116 | {typ: influxql.Integer, other: influxql.Unsigned, exp: false}, 117 | {typ: influxql.Integer, other: influxql.String, exp: false}, 118 | {typ: influxql.Integer, other: influxql.Boolean, exp: false}, 119 | {typ: influxql.Integer, other: influxql.Tag, exp: false}, 120 | {typ: influxql.Unsigned, other: influxql.Float, exp: true}, 121 | {typ: influxql.Unsigned, other: influxql.Integer, exp: true}, 122 | {typ: influxql.Unsigned, other: influxql.Unsigned, exp: false}, 123 | {typ: influxql.Unsigned, other: influxql.String, exp: false}, 124 | {typ: influxql.Unsigned, other: influxql.Boolean, exp: false}, 125 | {typ: influxql.Unsigned, other: influxql.Tag, exp: false}, 126 | {typ: influxql.String, other: influxql.Float, exp: true}, 127 | {typ: influxql.String, other: influxql.Integer, exp: true}, 128 | {typ: influxql.String, other: influxql.Unsigned, exp: true}, 129 | {typ: influxql.String, other: influxql.String, exp: false}, 130 | {typ: influxql.String, other: influxql.Boolean, exp: false}, 131 | {typ: influxql.String, other: influxql.Tag, exp: false}, 132 | {typ: influxql.Boolean, other: influxql.Float, exp: true}, 133 | {typ: influxql.Boolean, other: influxql.Integer, exp: true}, 134 | {typ: influxql.Boolean, other: influxql.Unsigned, exp: true}, 135 | {typ: influxql.Boolean, other: influxql.String, exp: true}, 136 | {typ: influxql.Boolean, other: influxql.Boolean, exp: false}, 137 | {typ: influxql.Boolean, other: influxql.Tag, exp: false}, 138 | {typ: influxql.Tag, other: influxql.Float, exp: true}, 139 | {typ: influxql.Tag, other: influxql.Integer, exp: true}, 140 | {typ: influxql.Tag, other: influxql.Unsigned, exp: true}, 141 | {typ: influxql.Tag, other: influxql.String, exp: true}, 142 | {typ: influxql.Tag, other: influxql.Boolean, exp: true}, 143 | {typ: influxql.Tag, other: influxql.Tag, exp: false}, 144 | } { 145 | if got, exp := tt.typ.LessThan(tt.other), tt.exp; got != exp { 146 | t.Errorf("%d. %q.LessThan(%q) = %v; exp = %v", i, tt.typ, tt.other, got, exp) 147 | } 148 | } 149 | } 150 | 151 | // Ensure the SELECT statement can extract GROUP BY interval. 152 | func TestSelectStatement_GroupByInterval(t *testing.T) { 153 | q := "SELECT sum(value) from foo where time < now() GROUP BY time(10m)" 154 | stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement() 155 | if err != nil { 156 | t.Fatalf("invalid statement: %q: %s", stmt, err) 157 | } 158 | 159 | s := stmt.(*influxql.SelectStatement) 160 | d, err := s.GroupByInterval() 161 | if d != 10*time.Minute { 162 | t.Fatalf("group by interval not equal:\nexp=%s\ngot=%s", 10*time.Minute, d) 163 | } 164 | if err != nil { 165 | t.Fatalf("error parsing group by interval: %s", err.Error()) 166 | } 167 | } 168 | 169 | // Ensure the SELECT statement can have its start and end time set 170 | func TestSelectStatement_SetTimeRange(t *testing.T) { 171 | q := "SELECT sum(value) from foo where time < now() GROUP BY time(10m)" 172 | stmt, err := influxql.NewParser(strings.NewReader(q)).ParseStatement() 173 | if err != nil { 174 | t.Fatalf("invalid statement: %q: %s", stmt, err) 175 | } 176 | 177 | s := stmt.(*influxql.SelectStatement) 178 | start := time.Now().Add(-20 * time.Hour).Round(time.Second).UTC() 179 | end := time.Now().Add(10 * time.Hour).Round(time.Second).UTC() 180 | s.SetTimeRange(start, end) 181 | min, max := MustTimeRange(s.Condition) 182 | 183 | if min != start { 184 | t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) 185 | } 186 | // the end range is actually one nanosecond before the given one since end is exclusive 187 | end = end.Add(-time.Nanosecond) 188 | if max != end { 189 | t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) 190 | } 191 | 192 | // ensure we can set a time on a select that already has one set 193 | start = time.Now().Add(-20 * time.Hour).Round(time.Second).UTC() 194 | end = time.Now().Add(10 * time.Hour).Round(time.Second).UTC() 195 | q = fmt.Sprintf("SELECT sum(value) from foo WHERE time >= %ds and time <= %ds GROUP BY time(10m)", start.Unix(), end.Unix()) 196 | stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement() 197 | if err != nil { 198 | t.Fatalf("invalid statement: %q: %s", stmt, err) 199 | } 200 | 201 | s = stmt.(*influxql.SelectStatement) 202 | min, max = MustTimeRange(s.Condition) 203 | if start != min || end != max { 204 | t.Fatalf("start and end times weren't equal:\n exp: %s\n got: %s\n exp: %s\n got:%s\n", start, min, end, max) 205 | } 206 | 207 | // update and ensure it saves it 208 | start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC() 209 | end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC() 210 | s.SetTimeRange(start, end) 211 | min, max = MustTimeRange(s.Condition) 212 | 213 | // TODO: right now the SetTimeRange can't override the start time if it's more recent than what they're trying to set it to. 214 | // shouldn't matter for our purposes with continuous queries, but fix this later 215 | 216 | if min != start { 217 | t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) 218 | } 219 | // the end range is actually one nanosecond before the given one since end is exclusive 220 | end = end.Add(-time.Nanosecond) 221 | if max != end { 222 | t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) 223 | } 224 | 225 | // ensure that when we set a time range other where clause conditions are still there 226 | q = "SELECT sum(value) from foo WHERE foo = 'bar' and time < now() GROUP BY time(10m)" 227 | stmt, err = influxql.NewParser(strings.NewReader(q)).ParseStatement() 228 | if err != nil { 229 | t.Fatalf("invalid statement: %q: %s", stmt, err) 230 | } 231 | 232 | s = stmt.(*influxql.SelectStatement) 233 | 234 | // update and ensure it saves it 235 | start = time.Now().Add(-40 * time.Hour).Round(time.Second).UTC() 236 | end = time.Now().Add(20 * time.Hour).Round(time.Second).UTC() 237 | s.SetTimeRange(start, end) 238 | min, max = MustTimeRange(s.Condition) 239 | 240 | if min != start { 241 | t.Fatalf("start time wasn't set properly.\n exp: %s\n got: %s", start, min) 242 | } 243 | // the end range is actually one nanosecond before the given one since end is exclusive 244 | end = end.Add(-time.Nanosecond) 245 | if max != end { 246 | t.Fatalf("end time wasn't set properly.\n exp: %s\n got: %s", end, max) 247 | } 248 | 249 | // ensure the where clause is there 250 | hasWhere := false 251 | influxql.WalkFunc(s.Condition, func(n influxql.Node) { 252 | if ex, ok := n.(*influxql.BinaryExpr); ok { 253 | if lhs, ok := ex.LHS.(*influxql.VarRef); ok { 254 | if lhs.Val == "foo" { 255 | if rhs, ok := ex.RHS.(*influxql.StringLiteral); ok { 256 | if rhs.Val == "bar" { 257 | hasWhere = true 258 | } 259 | } 260 | } 261 | } 262 | } 263 | }) 264 | if !hasWhere { 265 | t.Fatal("set time range cleared out the where clause") 266 | } 267 | } 268 | 269 | func TestSelectStatement_HasWildcard(t *testing.T) { 270 | var tests = []struct { 271 | stmt string 272 | wildcard bool 273 | }{ 274 | // No wildcards 275 | { 276 | stmt: `SELECT value FROM cpu`, 277 | wildcard: false, 278 | }, 279 | 280 | // Query wildcard 281 | { 282 | stmt: `SELECT * FROM cpu`, 283 | wildcard: true, 284 | }, 285 | 286 | // No GROUP BY wildcards 287 | { 288 | stmt: `SELECT value FROM cpu GROUP BY host`, 289 | wildcard: false, 290 | }, 291 | 292 | // No GROUP BY wildcards, time only 293 | { 294 | stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, 295 | wildcard: false, 296 | }, 297 | 298 | // GROUP BY wildcard 299 | { 300 | stmt: `SELECT value FROM cpu GROUP BY *`, 301 | wildcard: true, 302 | }, 303 | 304 | // GROUP BY wildcard with time 305 | { 306 | stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`, 307 | wildcard: true, 308 | }, 309 | 310 | // GROUP BY wildcard with explicit 311 | { 312 | stmt: `SELECT value FROM cpu GROUP BY *,host`, 313 | wildcard: true, 314 | }, 315 | 316 | // GROUP BY multiple wildcards 317 | { 318 | stmt: `SELECT value FROM cpu GROUP BY *,*`, 319 | wildcard: true, 320 | }, 321 | 322 | // Combo 323 | { 324 | stmt: `SELECT * FROM cpu GROUP BY *`, 325 | wildcard: true, 326 | }, 327 | } 328 | 329 | for i, tt := range tests { 330 | // Parse statement. 331 | stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() 332 | if err != nil { 333 | t.Fatalf("invalid statement: %q: %s", tt.stmt, err) 334 | } 335 | 336 | // Test wildcard detection. 337 | if w := stmt.(*influxql.SelectStatement).HasWildcard(); tt.wildcard != w { 338 | t.Errorf("%d. %q: unexpected wildcard detection:\n\nexp=%v\n\ngot=%v\n\n", i, tt.stmt, tt.wildcard, w) 339 | continue 340 | } 341 | } 342 | } 343 | 344 | // Test SELECT statement field rewrite. 345 | func TestSelectStatement_RewriteFields(t *testing.T) { 346 | var tests = []struct { 347 | stmt string 348 | rewrite string 349 | err string 350 | }{ 351 | // No wildcards 352 | { 353 | stmt: `SELECT value FROM cpu`, 354 | rewrite: `SELECT value FROM cpu`, 355 | }, 356 | 357 | // Query wildcard 358 | { 359 | stmt: `SELECT * FROM cpu`, 360 | rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer FROM cpu`, 361 | }, 362 | 363 | // Parser fundamentally prohibits multiple query sources 364 | 365 | // Query wildcard with explicit 366 | { 367 | stmt: `SELECT *,value1 FROM cpu`, 368 | rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer, value1::float FROM cpu`, 369 | }, 370 | 371 | // Query multiple wildcards 372 | { 373 | stmt: `SELECT *,* FROM cpu`, 374 | rewrite: `SELECT host::tag, region::tag, value1::float, value2::integer, host::tag, region::tag, value1::float, value2::integer FROM cpu`, 375 | }, 376 | 377 | // Query wildcards with group by 378 | { 379 | stmt: `SELECT * FROM cpu GROUP BY host`, 380 | rewrite: `SELECT region::tag, value1::float, value2::integer FROM cpu GROUP BY host`, 381 | }, 382 | 383 | // No GROUP BY wildcards 384 | { 385 | stmt: `SELECT value FROM cpu GROUP BY host`, 386 | rewrite: `SELECT value FROM cpu GROUP BY host`, 387 | }, 388 | 389 | // No GROUP BY wildcards, time only 390 | { 391 | stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY time(5ms)`, 392 | rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY time(5ms)`, 393 | }, 394 | 395 | // GROUP BY wildcard 396 | { 397 | stmt: `SELECT value FROM cpu GROUP BY *`, 398 | rewrite: `SELECT value FROM cpu GROUP BY host, region`, 399 | }, 400 | 401 | // GROUP BY wildcard with time 402 | { 403 | stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m)`, 404 | rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m)`, 405 | }, 406 | 407 | // GROUP BY wildcard with fill 408 | { 409 | stmt: `SELECT mean(value) FROM cpu where time < now() GROUP BY *,time(1m) fill(0)`, 410 | rewrite: `SELECT mean(value) FROM cpu WHERE time < now() GROUP BY host, region, time(1m) fill(0)`, 411 | }, 412 | 413 | // GROUP BY wildcard with explicit 414 | { 415 | stmt: `SELECT value FROM cpu GROUP BY *,host`, 416 | rewrite: `SELECT value FROM cpu GROUP BY host, region, host`, 417 | }, 418 | 419 | // GROUP BY multiple wildcards 420 | { 421 | stmt: `SELECT value FROM cpu GROUP BY *,*`, 422 | rewrite: `SELECT value FROM cpu GROUP BY host, region, host, region`, 423 | }, 424 | 425 | // Combo 426 | { 427 | stmt: `SELECT * FROM cpu GROUP BY *`, 428 | rewrite: `SELECT value1::float, value2::integer FROM cpu GROUP BY host, region`, 429 | }, 430 | 431 | // Wildcard function with all fields. 432 | { 433 | stmt: `SELECT mean(*) FROM cpu`, 434 | rewrite: `SELECT mean(value1::float) AS mean_value1, mean(value2::integer) AS mean_value2 FROM cpu`, 435 | }, 436 | 437 | { 438 | stmt: `SELECT distinct(*) FROM strings`, 439 | rewrite: `SELECT distinct(string::string) AS distinct_string, distinct(value::float) AS distinct_value FROM strings`, 440 | }, 441 | 442 | { 443 | stmt: `SELECT distinct(*) FROM bools`, 444 | rewrite: `SELECT distinct(bool::boolean) AS distinct_bool, distinct(value::float) AS distinct_value FROM bools`, 445 | }, 446 | 447 | // Wildcard function with some fields excluded. 448 | { 449 | stmt: `SELECT mean(*) FROM strings`, 450 | rewrite: `SELECT mean(value::float) AS mean_value FROM strings`, 451 | }, 452 | 453 | { 454 | stmt: `SELECT mean(*) FROM bools`, 455 | rewrite: `SELECT mean(value::float) AS mean_value FROM bools`, 456 | }, 457 | 458 | // Wildcard function with an alias. 459 | { 460 | stmt: `SELECT mean(*) AS alias FROM cpu`, 461 | rewrite: `SELECT mean(value1::float) AS alias_value1, mean(value2::integer) AS alias_value2 FROM cpu`, 462 | }, 463 | 464 | // Query regex 465 | { 466 | stmt: `SELECT /1/ FROM cpu`, 467 | rewrite: `SELECT value1::float FROM cpu`, 468 | }, 469 | 470 | { 471 | stmt: `SELECT value1 FROM cpu GROUP BY /h/`, 472 | rewrite: `SELECT value1::float FROM cpu GROUP BY host`, 473 | }, 474 | 475 | // Query regex 476 | { 477 | stmt: `SELECT mean(/1/) FROM cpu`, 478 | rewrite: `SELECT mean(value1::float) AS mean_value1 FROM cpu`, 479 | }, 480 | // Rewrite subquery 481 | { 482 | stmt: `SELECT * FROM (SELECT mean(value1) FROM cpu GROUP BY host) GROUP BY *`, 483 | rewrite: `SELECT mean::float FROM (SELECT mean(value1::float) FROM cpu GROUP BY host) GROUP BY host`, 484 | }, 485 | 486 | // Invalid queries that can't be rewritten should return an error (to 487 | // avoid a panic in the query engine) 488 | { 489 | stmt: `SELECT count(*) / 2 FROM cpu`, 490 | err: `unsupported expression with wildcard: count(*) / 2`, 491 | }, 492 | 493 | { 494 | stmt: `SELECT * / 2 FROM (SELECT count(*) FROM cpu)`, 495 | err: `unsupported expression with wildcard: * / 2`, 496 | }, 497 | 498 | { 499 | stmt: `SELECT count(/value/) / 2 FROM cpu`, 500 | err: `unsupported expression with regex field: count(/value/) / 2`, 501 | }, 502 | 503 | // This one should be possible though since there's no wildcard in the 504 | // binary expression. 505 | { 506 | stmt: `SELECT value1 + value2, * FROM cpu`, 507 | rewrite: `SELECT value1::float + value2::integer, host::tag, region::tag, value1::float, value2::integer FROM cpu`, 508 | }, 509 | 510 | { 511 | stmt: `SELECT value1 + value2, /value/ FROM cpu`, 512 | rewrite: `SELECT value1::float + value2::integer, value1::float, value2::integer FROM cpu`, 513 | }, 514 | } 515 | 516 | for i, tt := range tests { 517 | // Parse statement. 518 | stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() 519 | if err != nil { 520 | t.Fatalf("invalid statement: %q: %s", tt.stmt, err) 521 | } 522 | 523 | var mapper FieldMapper 524 | mapper.FieldDimensionsFn = func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { 525 | switch m.Name { 526 | case "cpu": 527 | fields = map[string]influxql.DataType{ 528 | "value1": influxql.Float, 529 | "value2": influxql.Integer, 530 | } 531 | case "strings": 532 | fields = map[string]influxql.DataType{ 533 | "value": influxql.Float, 534 | "string": influxql.String, 535 | } 536 | case "bools": 537 | fields = map[string]influxql.DataType{ 538 | "value": influxql.Float, 539 | "bool": influxql.Boolean, 540 | } 541 | } 542 | dimensions = map[string]struct{}{"host": struct{}{}, "region": struct{}{}} 543 | return 544 | } 545 | 546 | // Rewrite statement. 547 | rw, err := stmt.(*influxql.SelectStatement).RewriteFields(&mapper) 548 | if tt.err != "" { 549 | if err != nil && err.Error() != tt.err { 550 | t.Errorf("%d. %q: unexpected error: %s != %s", i, tt.stmt, err.Error(), tt.err) 551 | } else if err == nil { 552 | t.Errorf("%d. %q: expected error", i, tt.stmt) 553 | } 554 | } else { 555 | if err != nil { 556 | t.Errorf("%d. %q: error: %s", i, tt.stmt, err) 557 | } else if rw == nil && tt.err == "" { 558 | t.Errorf("%d. %q: unexpected nil statement", i, tt.stmt) 559 | } else if rw := rw.String(); tt.rewrite != rw { 560 | t.Errorf("%d. %q: unexpected rewrite:\n\nexp=%s\n\ngot=%s\n\n", i, tt.stmt, tt.rewrite, rw) 561 | } 562 | } 563 | } 564 | } 565 | 566 | // Test SELECT statement regex conditions rewrite. 567 | func TestSelectStatement_RewriteRegexConditions(t *testing.T) { 568 | var tests = []struct { 569 | in string 570 | out string 571 | }{ 572 | {in: `SELECT value FROM cpu`, out: `SELECT value FROM cpu`}, 573 | {in: `SELECT value FROM cpu WHERE host = 'server-1'`, out: `SELECT value FROM cpu WHERE host = 'server-1'`}, 574 | {in: `SELECT value FROM cpu WHERE host = 'server-1'`, out: `SELECT value FROM cpu WHERE host = 'server-1'`}, 575 | {in: `SELECT value FROM cpu WHERE host != 'server-1'`, out: `SELECT value FROM cpu WHERE host != 'server-1'`}, 576 | 577 | // Non matching regex 578 | {in: `SELECT value FROM cpu WHERE host =~ /server-1|server-2|server-3/`, out: `SELECT value FROM cpu WHERE host =~ /server-1|server-2|server-3/`}, 579 | {in: `SELECT value FROM cpu WHERE host =~ /server-1/`, out: `SELECT value FROM cpu WHERE host =~ /server-1/`}, 580 | {in: `SELECT value FROM cpu WHERE host !~ /server-1/`, out: `SELECT value FROM cpu WHERE host !~ /server-1/`}, 581 | {in: `SELECT value FROM cpu WHERE host =~ /^server-1/`, out: `SELECT value FROM cpu WHERE host =~ /^server-1/`}, 582 | {in: `SELECT value FROM cpu WHERE host =~ /server-1$/`, out: `SELECT value FROM cpu WHERE host =~ /server-1$/`}, 583 | {in: `SELECT value FROM cpu WHERE host !~ /\^server-1$/`, out: `SELECT value FROM cpu WHERE host !~ /\^server-1$/`}, 584 | {in: `SELECT value FROM cpu WHERE host !~ /\^$/`, out: `SELECT value FROM cpu WHERE host !~ /\^$/`}, 585 | {in: `SELECT value FROM cpu WHERE host !~ /^server-1\$/`, out: `SELECT value FROM cpu WHERE host !~ /^server-1\$/`}, 586 | {in: `SELECT value FROM cpu WHERE host =~ /^\$/`, out: `SELECT value FROM cpu WHERE host =~ /^\$/`}, 587 | {in: `SELECT value FROM cpu WHERE host !~ /^a/`, out: `SELECT value FROM cpu WHERE host !~ /^a/`}, 588 | 589 | // These regexes are not supported due to the presence of escaped or meta characters. 590 | {in: `SELECT value FROM cpu WHERE host !~ /^?a$/`, out: `SELECT value FROM cpu WHERE host !~ /^?a$/`}, 591 | {in: `SELECT value FROM cpu WHERE host !~ /^a*$/`, out: `SELECT value FROM cpu WHERE host !~ /^a*$/`}, 592 | {in: `SELECT value FROM cpu WHERE host !~ /^a.b$/`, out: `SELECT value FROM cpu WHERE host !~ /^a.b$/`}, 593 | {in: `SELECT value FROM cpu WHERE host !~ /^ab+$/`, out: `SELECT value FROM cpu WHERE host !~ /^ab+$/`}, 594 | 595 | // These regexes are not supported due to the presence of unsupported regex flags. 596 | {in: `SELECT value FROM cpu WHERE host =~ /(?i)^SeRvEr01$/`, out: `SELECT value FROM cpu WHERE host =~ /(?i)^SeRvEr01$/`}, 597 | 598 | // These regexes are not supported due to large character class(es). 599 | {in: `SELECT value FROM cpu WHERE host =~ /^[^abcd]$/`, out: `SELECT value FROM cpu WHERE host =~ /^[^abcd]$/`}, 600 | 601 | // These regexes all match and will be rewritten. 602 | {in: `SELECT value FROM cpu WHERE host !~ /^a[2]$/`, out: `SELECT value FROM cpu WHERE host != 'a2'`}, 603 | {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/`, out: `SELECT value FROM cpu WHERE host = 'server-1'`}, 604 | {in: `SELECT value FROM cpu WHERE host !~ /^server-1$/`, out: `SELECT value FROM cpu WHERE host != 'server-1'`}, 605 | {in: `SELECT value FROM cpu WHERE host =~ /^server 1$/`, out: `SELECT value FROM cpu WHERE host = 'server 1'`}, 606 | {in: `SELECT value FROM cpu WHERE host =~ /^$/`, out: `SELECT value FROM cpu WHERE host = ''`}, 607 | {in: `SELECT value FROM cpu WHERE host !~ /^$/`, out: `SELECT value FROM cpu WHERE host != ''`}, 608 | {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/ OR host =~ /^server-2$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server-2'`}, 609 | {in: `SELECT value FROM cpu WHERE host =~ /^server-1$/ OR host =~ /^server]a$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server]a'`}, 610 | {in: `SELECT value FROM cpu WHERE host =~ /^hello\?$/`, out: `SELECT value FROM cpu WHERE host = 'hello?'`}, 611 | {in: `SELECT value FROM cpu WHERE host !~ /^\\$/`, out: `SELECT value FROM cpu WHERE host != '\\'`}, 612 | {in: `SELECT value FROM cpu WHERE host !~ /^\\\$$/`, out: `SELECT value FROM cpu WHERE host != '\\$'`}, 613 | // This is supported, but annoying to write and the below queries satisfy this condition. 614 | //{in: `SELECT value FROM cpu WHERE host =~ /^hello\world$/`, out: `SELECT value FROM cpu WHERE host =~ /^hello\world$/`}, 615 | {in: `SELECT value FROM cpu WHERE host =~ /^(server-1|server-2|server-3)$/`, out: `SELECT value FROM cpu WHERE host = 'server-1' OR host = 'server-2' OR host = 'server-3'`}, 616 | {in: `SELECT value FROM cpu WHERE host !~ /^(foo|bar)$/`, out: `SELECT value FROM cpu WHERE host != 'foo' AND host != 'bar'`}, 617 | {in: `SELECT value FROM cpu WHERE host !~ /^\d$/`, out: `SELECT value FROM cpu WHERE host != '0' AND host != '1' AND host != '2' AND host != '3' AND host != '4' AND host != '5' AND host != '6' AND host != '7' AND host != '8' AND host != '9'`}, 618 | {in: `SELECT value FROM cpu WHERE host !~ /^[a-z]$/`, out: `SELECT value FROM cpu WHERE host != 'a' AND host != 'b' AND host != 'c' AND host != 'd' AND host != 'e' AND host != 'f' AND host != 'g' AND host != 'h' AND host != 'i' AND host != 'j' AND host != 'k' AND host != 'l' AND host != 'm' AND host != 'n' AND host != 'o' AND host != 'p' AND host != 'q' AND host != 'r' AND host != 's' AND host != 't' AND host != 'u' AND host != 'v' AND host != 'w' AND host != 'x' AND host != 'y' AND host != 'z'`}, 619 | 620 | {in: `SELECT value FROM cpu WHERE host =~ /^[ab]{3}$/`, out: `SELECT value FROM cpu WHERE host = 'aaa' OR host = 'aab' OR host = 'aba' OR host = 'abb' OR host = 'baa' OR host = 'bab' OR host = 'bba' OR host = 'bbb'`}, 621 | } 622 | 623 | for i, test := range tests { 624 | stmt, err := influxql.NewParser(strings.NewReader(test.in)).ParseStatement() 625 | if err != nil { 626 | t.Fatalf("[Example %d], %v", i, err) 627 | } 628 | 629 | // Rewrite any supported regex conditions. 630 | stmt.(*influxql.SelectStatement).RewriteRegexConditions() 631 | 632 | // Get the expected rewritten statement. 633 | expStmt, err := influxql.NewParser(strings.NewReader(test.out)).ParseStatement() 634 | if err != nil { 635 | t.Fatalf("[Example %d], %v", i, err) 636 | } 637 | 638 | // Compare the (potentially) rewritten AST to the expected AST. 639 | if got, exp := stmt, expStmt; !reflect.DeepEqual(got, exp) { 640 | t.Errorf("[Example %d]\nattempting %v\ngot %v\n%s\n\nexpected %v\n%s\n", i+1, test.in, got, mustMarshalJSON(got), exp, mustMarshalJSON(exp)) 641 | } 642 | } 643 | } 644 | 645 | // Test SELECT statement time field rewrite. 646 | func TestSelectStatement_RewriteTimeFields(t *testing.T) { 647 | var tests = []struct { 648 | s string 649 | stmt influxql.Statement 650 | }{ 651 | { 652 | s: `SELECT time, field1 FROM cpu`, 653 | stmt: &influxql.SelectStatement{ 654 | IsRawQuery: true, 655 | Fields: []*influxql.Field{ 656 | {Expr: &influxql.VarRef{Val: "field1"}}, 657 | }, 658 | Sources: []influxql.Source{ 659 | &influxql.Measurement{Name: "cpu"}, 660 | }, 661 | }, 662 | }, 663 | { 664 | s: `SELECT time AS timestamp, field1 FROM cpu`, 665 | stmt: &influxql.SelectStatement{ 666 | IsRawQuery: true, 667 | Fields: []*influxql.Field{ 668 | {Expr: &influxql.VarRef{Val: "field1"}}, 669 | }, 670 | Sources: []influxql.Source{ 671 | &influxql.Measurement{Name: "cpu"}, 672 | }, 673 | TimeAlias: "timestamp", 674 | }, 675 | }, 676 | } 677 | 678 | for i, tt := range tests { 679 | // Parse statement. 680 | stmt, err := influxql.NewParser(strings.NewReader(tt.s)).ParseStatement() 681 | if err != nil { 682 | t.Fatalf("invalid statement: %q: %s", tt.s, err) 683 | } 684 | 685 | // Rewrite statement. 686 | stmt.(*influxql.SelectStatement).RewriteTimeFields() 687 | if !reflect.DeepEqual(tt.stmt, stmt) { 688 | t.Logf("\n# %s\nexp=%s\ngot=%s\n", tt.s, mustMarshalJSON(tt.stmt), mustMarshalJSON(stmt)) 689 | t.Logf("\nSQL exp=%s\nSQL got=%s\n", tt.stmt.String(), stmt.String()) 690 | t.Errorf("%d. %q\n\nstmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt) 691 | } 692 | } 693 | } 694 | 695 | // Ensure that the IsRawQuery flag gets set properly 696 | func TestSelectStatement_IsRawQuerySet(t *testing.T) { 697 | var tests = []struct { 698 | stmt string 699 | isRaw bool 700 | }{ 701 | { 702 | stmt: "select * from foo", 703 | isRaw: true, 704 | }, 705 | { 706 | stmt: "select value1,value2 from foo", 707 | isRaw: true, 708 | }, 709 | { 710 | stmt: "select value1,value2 from foo, time(10m)", 711 | isRaw: true, 712 | }, 713 | { 714 | stmt: "select mean(value) from foo where time < now() group by time(5m)", 715 | isRaw: false, 716 | }, 717 | { 718 | stmt: "select mean(value) from foo group by bar", 719 | isRaw: false, 720 | }, 721 | { 722 | stmt: "select mean(value) from foo group by *", 723 | isRaw: false, 724 | }, 725 | { 726 | stmt: "select mean(value) from foo group by *", 727 | isRaw: false, 728 | }, 729 | } 730 | 731 | for _, tt := range tests { 732 | s := MustParseSelectStatement(tt.stmt) 733 | if s.IsRawQuery != tt.isRaw { 734 | t.Errorf("'%s', IsRawQuery should be %v", tt.stmt, tt.isRaw) 735 | } 736 | } 737 | } 738 | 739 | // Ensure binary expression names can be evaluated. 740 | func TestBinaryExprName(t *testing.T) { 741 | for i, tt := range []struct { 742 | expr string 743 | name string 744 | }{ 745 | {expr: `value + 1`, name: `value`}, 746 | {expr: `"user" / total`, name: `user_total`}, 747 | {expr: `("user" + total) / total`, name: `user_total_total`}, 748 | } { 749 | expr := influxql.MustParseExpr(tt.expr) 750 | switch expr := expr.(type) { 751 | case *influxql.BinaryExpr: 752 | name := influxql.BinaryExprName(expr) 753 | if name != tt.name { 754 | t.Errorf("%d. unexpected name %s, got %s", i, name, tt.name) 755 | } 756 | default: 757 | t.Errorf("%d. unexpected expr type: %T", i, expr) 758 | } 759 | } 760 | } 761 | 762 | func TestConditionExpr(t *testing.T) { 763 | mustParseTime := func(value string) time.Time { 764 | ts, err := time.Parse(time.RFC3339, value) 765 | if err != nil { 766 | t.Fatalf("unable to parse time: %s", err) 767 | } 768 | return ts 769 | } 770 | now := mustParseTime("2000-01-01T00:00:00Z") 771 | valuer := influxql.NowValuer{Now: now} 772 | 773 | for _, tt := range []struct { 774 | s string 775 | cond string 776 | min, max time.Time 777 | err string 778 | }{ 779 | {s: `host = 'server01'`, cond: `host = 'server01'`}, 780 | {s: `time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T01:00:00Z'`, 781 | min: mustParseTime("2000-01-01T00:00:00Z"), 782 | max: mustParseTime("2000-01-01T01:00:00Z").Add(-1)}, 783 | {s: `host = 'server01' AND (region = 'uswest' AND time >= now() - 10m)`, 784 | cond: `host = 'server01' AND (region = 'uswest')`, 785 | min: mustParseTime("1999-12-31T23:50:00Z")}, 786 | {s: `(host = 'server01' AND region = 'uswest') AND time >= now() - 10m`, 787 | cond: `host = 'server01' AND region = 'uswest'`, 788 | min: mustParseTime("1999-12-31T23:50:00Z")}, 789 | {s: `host = 'server01' AND (time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T01:00:00Z')`, 790 | cond: `host = 'server01'`, 791 | min: mustParseTime("2000-01-01T00:00:00Z"), 792 | max: mustParseTime("2000-01-01T01:00:00Z").Add(-1)}, 793 | {s: `(time >= '2000-01-01T00:00:00Z' AND time < '2000-01-01T01:00:00Z') AND host = 'server01'`, 794 | cond: `host = 'server01'`, 795 | min: mustParseTime("2000-01-01T00:00:00Z"), 796 | max: mustParseTime("2000-01-01T01:00:00Z").Add(-1)}, 797 | {s: `'2000-01-01T00:00:00Z' <= time AND '2000-01-01T01:00:00Z' > time`, 798 | min: mustParseTime("2000-01-01T00:00:00Z"), 799 | max: mustParseTime("2000-01-01T01:00:00Z").Add(-1)}, 800 | {s: `'2000-01-01T00:00:00Z' < time AND '2000-01-01T01:00:00Z' >= time`, 801 | min: mustParseTime("2000-01-01T00:00:00Z").Add(1), 802 | max: mustParseTime("2000-01-01T01:00:00Z")}, 803 | {s: `time = '2000-01-01T00:00:00Z'`, 804 | min: mustParseTime("2000-01-01T00:00:00Z"), 805 | max: mustParseTime("2000-01-01T00:00:00Z")}, 806 | {s: `time >= 10s`, min: mustParseTime("1970-01-01T00:00:10Z")}, 807 | {s: `time >= 10000000000`, min: mustParseTime("1970-01-01T00:00:10Z")}, 808 | {s: `time >= 10000000000.0`, min: mustParseTime("1970-01-01T00:00:10Z")}, 809 | {s: `time > now()`, min: now.Add(1)}, 810 | {s: `value`, err: `invalid condition expression: value`}, 811 | {s: `4`, err: `invalid condition expression: 4`}, 812 | {s: `time >= 'today'`, err: `invalid operation: time and *influxql.StringLiteral are not compatible`}, 813 | {s: `time != '2000-01-01T00:00:00Z'`, err: `invalid time comparison operator: !=`}, 814 | // This query makes no logical sense, but it's common enough that we pretend 815 | // it does. Technically, this should be illegal because the AND has higher precedence 816 | // than the OR so the AND only applies to the server02 tag, but a person's intention 817 | // is to have it apply to both and previous versions worked that way. 818 | {s: `host = 'server01' OR host = 'server02' AND time >= now() - 10m`, 819 | cond: `host = 'server01' OR host = 'server02'`, 820 | min: mustParseTime("1999-12-31T23:50:00Z")}, 821 | // TODO(jsternberg): This should be an error, but we can't because the above query 822 | // needs to work. Until we can work a way for the above to work or at least get 823 | // a warning message for people to transition to a correct syntax, the bad behavior 824 | // stays. 825 | //{s: `host = 'server01' OR (time >= now() - 10m AND host = 'server02')`, err: `cannot use OR with time conditions`}, 826 | {s: `value AND host = 'server01'`, err: `invalid condition expression: value`}, 827 | {s: `host = 'server01' OR (value)`, err: `invalid condition expression: value`}, 828 | {s: `time > '2262-04-11 23:47:17'`, err: `time 2262-04-11T23:47:17Z overflows time literal`}, 829 | {s: `time > '1677-09-20 19:12:43'`, err: `time 1677-09-20T19:12:43Z underflows time literal`}, 830 | {s: `true AND (false OR product = 'xyz')`, 831 | cond: `product = 'xyz'`, 832 | }, 833 | {s: `'a' = 'a'`, cond: ``}, 834 | {s: `value > 0 OR true`, cond: ``}, 835 | {s: `host = 'server01' AND false`, cond: `false`}, 836 | {s: `TIME >= '2000-01-01T00:00:00Z'`, min: mustParseTime("2000-01-01T00:00:00Z")}, 837 | {s: `'2000-01-01T00:00:00Z' <= TIME`, min: mustParseTime("2000-01-01T00:00:00Z")}, 838 | // Remove enclosing parentheses 839 | {s: `(host = 'server01')`, cond: `host = 'server01'`}, 840 | // Preserve nested parentheses 841 | {s: `host = 'server01' AND (region = 'region01' OR region = 'region02')`, 842 | cond: `host = 'server01' AND (region = 'region01' OR region = 'region02')`, 843 | }, 844 | } { 845 | t.Run(tt.s, func(t *testing.T) { 846 | expr, err := influxql.ParseExpr(tt.s) 847 | if err != nil { 848 | t.Fatalf("unexpected error: %s", err) 849 | } 850 | 851 | cond, timeRange, err := influxql.ConditionExpr(expr, &valuer) 852 | if err != nil { 853 | if tt.err == "" { 854 | t.Fatalf("unexpected error: %s", err) 855 | } else if have, want := err.Error(), tt.err; have != want { 856 | t.Fatalf("unexpected error: %s != %s", have, want) 857 | } 858 | } 859 | if cond != nil { 860 | if have, want := cond.String(), tt.cond; have != want { 861 | t.Errorf("unexpected condition:\nhave=%s\nwant=%s", have, want) 862 | } 863 | } else { 864 | if have, want := "", tt.cond; have != want { 865 | t.Errorf("unexpected condition:\nhave=%s\nwant=%s", have, want) 866 | } 867 | } 868 | if have, want := timeRange.Min, tt.min; !have.Equal(want) { 869 | t.Errorf("unexpected min time:\nhave=%s\nwant=%s", have, want) 870 | } 871 | if have, want := timeRange.Max, tt.max; !have.Equal(want) { 872 | t.Errorf("unexpected max time:\nhave=%s\nwant=%s", have, want) 873 | } 874 | }) 875 | } 876 | } 877 | 878 | // Ensure an AST node can be rewritten. 879 | func TestRewrite(t *testing.T) { 880 | expr := MustParseExpr(`time > 1 OR foo = 2`) 881 | 882 | // Flip LHS & RHS in all binary expressions. 883 | act := influxql.RewriteFunc(expr, func(n influxql.Node) influxql.Node { 884 | switch n := n.(type) { 885 | case *influxql.BinaryExpr: 886 | return &influxql.BinaryExpr{Op: n.Op, LHS: n.RHS, RHS: n.LHS} 887 | default: 888 | return n 889 | } 890 | }) 891 | 892 | // Verify that everything is flipped. 893 | if act := act.String(); act != `2 = foo OR 1 > time` { 894 | t.Fatalf("unexpected result: %s", act) 895 | } 896 | } 897 | 898 | // Ensure an Expr can be rewritten handling nils. 899 | func TestRewriteExpr(t *testing.T) { 900 | expr := MustParseExpr(`(time > 1 AND time < 10) OR foo = 2`) 901 | 902 | // Remove all time expressions. 903 | act := influxql.RewriteExpr(expr, func(e influxql.Expr) influxql.Expr { 904 | switch e := e.(type) { 905 | case *influxql.BinaryExpr: 906 | if lhs, ok := e.LHS.(*influxql.VarRef); ok && lhs.Val == "time" { 907 | return nil 908 | } 909 | } 910 | return e 911 | }) 912 | 913 | // Verify that everything is flipped. 914 | if act := act.String(); act != `foo = 2` { 915 | t.Fatalf("unexpected result: %s", act) 916 | } 917 | } 918 | 919 | // Ensure that the String() value of a statement is parseable 920 | func TestParseString(t *testing.T) { 921 | var tests = []struct { 922 | stmt string 923 | }{ 924 | { 925 | stmt: `SELECT "cpu load" FROM myseries`, 926 | }, 927 | { 928 | stmt: `SELECT "cpu load" FROM "my series"`, 929 | }, 930 | { 931 | stmt: `SELECT "cpu\"load" FROM myseries`, 932 | }, 933 | { 934 | stmt: `SELECT "cpu'load" FROM myseries`, 935 | }, 936 | { 937 | stmt: `SELECT "cpu load" FROM "my\"series"`, 938 | }, 939 | { 940 | stmt: `SELECT "field with spaces" FROM "\"ugly\" db"."\"ugly\" rp"."\"ugly\" measurement"`, 941 | }, 942 | { 943 | stmt: `SELECT * FROM myseries`, 944 | }, 945 | { 946 | stmt: `DROP DATABASE "!"`, 947 | }, 948 | { 949 | stmt: `DROP RETENTION POLICY "my rp" ON "a database"`, 950 | }, 951 | { 952 | stmt: `CREATE RETENTION POLICY "my rp" ON "a database" DURATION 1d REPLICATION 1`, 953 | }, 954 | { 955 | stmt: `ALTER RETENTION POLICY "my rp" ON "a database" DEFAULT`, 956 | }, 957 | { 958 | stmt: `SHOW RETENTION POLICIES ON "a database"`, 959 | }, 960 | { 961 | stmt: `SHOW TAG VALUES WITH KEY IN ("a long name", short)`, 962 | }, 963 | { 964 | stmt: `DROP CONTINUOUS QUERY "my query" ON "my database"`, 965 | }, 966 | // See issues https://github.com/influxdata/influxdb/issues/1647 967 | // and https://github.com/influxdata/influxdb/issues/4404 968 | //{ 969 | // stmt: `DELETE FROM "my db"."my rp"."my measurement"`, 970 | //}, 971 | { 972 | stmt: `DROP SUBSCRIPTION "ugly \"subscription\" name" ON "\"my\" db"."\"my\" rp"`, 973 | }, 974 | { 975 | stmt: `CREATE SUBSCRIPTION "ugly \"subscription\" name" ON "\"my\" db"."\"my\" rp" DESTINATIONS ALL 'my host', 'my other host'`, 976 | }, 977 | { 978 | stmt: `SHOW MEASUREMENTS WITH MEASUREMENT =~ /foo/`, 979 | }, 980 | { 981 | stmt: `SHOW MEASUREMENTS WITH MEASUREMENT = "and/or"`, 982 | }, 983 | { 984 | stmt: `DROP USER "user with spaces"`, 985 | }, 986 | { 987 | stmt: `GRANT ALL PRIVILEGES ON "db with spaces" TO "user with spaces"`, 988 | }, 989 | { 990 | stmt: `GRANT ALL PRIVILEGES TO "user with spaces"`, 991 | }, 992 | { 993 | stmt: `SHOW GRANTS FOR "user with spaces"`, 994 | }, 995 | { 996 | stmt: `REVOKE ALL PRIVILEGES ON "db with spaces" FROM "user with spaces"`, 997 | }, 998 | { 999 | stmt: `REVOKE ALL PRIVILEGES FROM "user with spaces"`, 1000 | }, 1001 | { 1002 | stmt: `CREATE DATABASE "db with spaces"`, 1003 | }, 1004 | } 1005 | 1006 | for _, tt := range tests { 1007 | // Parse statement. 1008 | stmt, err := influxql.NewParser(strings.NewReader(tt.stmt)).ParseStatement() 1009 | if err != nil { 1010 | t.Fatalf("invalid statement: %q: %s", tt.stmt, err) 1011 | } 1012 | 1013 | stmtCopy, err := influxql.NewParser(strings.NewReader(stmt.String())).ParseStatement() 1014 | if err != nil { 1015 | t.Fatalf("failed to parse string: %v\norig: %v\ngot: %v", err, tt.stmt, stmt.String()) 1016 | } 1017 | 1018 | if !reflect.DeepEqual(stmt, stmtCopy) { 1019 | t.Fatalf("statement changed after stringifying and re-parsing:\noriginal : %v\nre-parsed: %v\n", tt.stmt, stmtCopy.String()) 1020 | } 1021 | } 1022 | } 1023 | 1024 | // Ensure an expression can be reduced. 1025 | func TestEval(t *testing.T) { 1026 | for i, tt := range []struct { 1027 | in string 1028 | out interface{} 1029 | data map[string]interface{} 1030 | }{ 1031 | // Number literals. 1032 | {in: `1 + 2`, out: int64(3)}, 1033 | {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: float64(26.5), data: map[string]interface{}{"foo": float64(5)}}, 1034 | {in: `foo / 2`, out: float64(2), data: map[string]interface{}{"foo": float64(4)}}, 1035 | {in: `4 = 4`, out: true}, 1036 | {in: `4 <> 4`, out: false}, 1037 | {in: `6 > 4`, out: true}, 1038 | {in: `4 >= 4`, out: true}, 1039 | {in: `4 < 6`, out: true}, 1040 | {in: `4 <= 4`, out: true}, 1041 | {in: `4 AND 5`, out: nil}, 1042 | {in: `0 = 'test'`, out: false}, 1043 | {in: `1.0 = 1`, out: true}, 1044 | {in: `1.2 = 1`, out: false}, 1045 | {in: `-1 = 9223372036854775808`, out: false}, 1046 | {in: `-1 != 9223372036854775808`, out: true}, 1047 | {in: `-1 < 9223372036854775808`, out: true}, 1048 | {in: `-1 <= 9223372036854775808`, out: true}, 1049 | {in: `-1 > 9223372036854775808`, out: false}, 1050 | {in: `-1 >= 9223372036854775808`, out: false}, 1051 | {in: `9223372036854775808 = -1`, out: false}, 1052 | {in: `9223372036854775808 != -1`, out: true}, 1053 | {in: `9223372036854775808 < -1`, out: false}, 1054 | {in: `9223372036854775808 <= -1`, out: false}, 1055 | {in: `9223372036854775808 > -1`, out: true}, 1056 | {in: `9223372036854775808 >= -1`, out: true}, 1057 | {in: `9223372036854775808 = 9223372036854775808`, out: true}, 1058 | {in: `9223372036854775808 != 9223372036854775808`, out: false}, 1059 | {in: `9223372036854775808 < 9223372036854775808`, out: false}, 1060 | {in: `9223372036854775808 <= 9223372036854775808`, out: true}, 1061 | {in: `9223372036854775808 > 9223372036854775808`, out: false}, 1062 | {in: `9223372036854775808 >= 9223372036854775808`, out: true}, 1063 | {in: `9223372036854775809 = 9223372036854775808`, out: false}, 1064 | {in: `9223372036854775809 != 9223372036854775808`, out: true}, 1065 | {in: `9223372036854775809 < 9223372036854775808`, out: false}, 1066 | {in: `9223372036854775809 <= 9223372036854775808`, out: false}, 1067 | {in: `9223372036854775809 > 9223372036854775808`, out: true}, 1068 | {in: `9223372036854775809 >= 9223372036854775808`, out: true}, 1069 | {in: `9223372036854775808 / 0`, out: uint64(0)}, 1070 | {in: `9223372036854775808 + 1`, out: uint64(9223372036854775809)}, 1071 | {in: `9223372036854775808 - 1`, out: uint64(9223372036854775807)}, 1072 | {in: `9223372036854775809 - 9223372036854775808`, out: uint64(1)}, 1073 | 1074 | // Boolean literals. 1075 | {in: `true AND false`, out: false}, 1076 | {in: `true OR false`, out: true}, 1077 | {in: `false = 4`, out: false}, 1078 | 1079 | // String literals. 1080 | {in: `'foo' = 'bar'`, out: false}, 1081 | {in: `'foo' = 'foo'`, out: true}, 1082 | {in: `'' = 4`, out: false}, 1083 | 1084 | // Regex literals. 1085 | {in: `'foo' =~ /f.*/`, out: true}, 1086 | {in: `'foo' =~ /b.*/`, out: false}, 1087 | {in: `'foo' !~ /f.*/`, out: false}, 1088 | {in: `'foo' !~ /b.*/`, out: true}, 1089 | 1090 | // Variable references. 1091 | {in: `foo`, out: "bar", data: map[string]interface{}{"foo": "bar"}}, 1092 | {in: `foo = 'bar'`, out: true, data: map[string]interface{}{"foo": "bar"}}, 1093 | {in: `foo = 'bar'`, out: false, data: map[string]interface{}{"foo": nil}}, 1094 | {in: `'bar' = foo`, out: false, data: map[string]interface{}{"foo": nil}}, 1095 | {in: `foo <> 'bar'`, out: true, data: map[string]interface{}{"foo": "xxx"}}, 1096 | {in: `foo =~ /b.*/`, out: true, data: map[string]interface{}{"foo": "bar"}}, 1097 | {in: `foo !~ /b.*/`, out: false, data: map[string]interface{}{"foo": "bar"}}, 1098 | {in: `foo > 2 OR bar > 3`, out: true, data: map[string]interface{}{"foo": float64(4)}}, 1099 | {in: `foo > 2 OR bar > 3`, out: true, data: map[string]interface{}{"bar": float64(4)}}, 1100 | } { 1101 | // Evaluate expression. 1102 | out := influxql.Eval(MustParseExpr(tt.in), tt.data) 1103 | 1104 | // Compare with expected output. 1105 | if !reflect.DeepEqual(tt.out, out) { 1106 | t.Errorf("%d. %s: unexpected output:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.in, tt.out, out) 1107 | continue 1108 | } 1109 | } 1110 | } 1111 | 1112 | type EvalFixture map[string]map[string]influxql.DataType 1113 | 1114 | func (e EvalFixture) MapType(measurement *influxql.Measurement, field string) influxql.DataType { 1115 | m := e[measurement.Name] 1116 | if m == nil { 1117 | return influxql.Unknown 1118 | } 1119 | return m[field] 1120 | } 1121 | 1122 | func (e EvalFixture) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { 1123 | switch name { 1124 | case "mean", "median", "integral", "stddev": 1125 | return influxql.Float, nil 1126 | case "count": 1127 | return influxql.Integer, nil 1128 | case "elapsed": 1129 | return influxql.Integer, nil 1130 | default: 1131 | return args[0], nil 1132 | } 1133 | } 1134 | 1135 | func TestEvalType(t *testing.T) { 1136 | for i, tt := range []struct { 1137 | name string 1138 | in string 1139 | typ influxql.DataType 1140 | err string 1141 | data EvalFixture 1142 | }{ 1143 | { 1144 | name: `a single data type`, 1145 | in: `min(value)`, 1146 | typ: influxql.Integer, 1147 | data: EvalFixture{ 1148 | "cpu": map[string]influxql.DataType{ 1149 | "value": influxql.Integer, 1150 | }, 1151 | }, 1152 | }, 1153 | { 1154 | name: `multiple data types`, 1155 | in: `min(value)`, 1156 | typ: influxql.Integer, 1157 | data: EvalFixture{ 1158 | "cpu": map[string]influxql.DataType{ 1159 | "value": influxql.Integer, 1160 | }, 1161 | "mem": map[string]influxql.DataType{ 1162 | "value": influxql.String, 1163 | }, 1164 | }, 1165 | }, 1166 | { 1167 | name: `count() with a float`, 1168 | in: `count(value)`, 1169 | typ: influxql.Integer, 1170 | data: EvalFixture{ 1171 | "cpu": map[string]influxql.DataType{ 1172 | "value": influxql.Float, 1173 | }, 1174 | }, 1175 | }, 1176 | { 1177 | name: `mean() with an integer`, 1178 | in: `mean(value)`, 1179 | typ: influxql.Float, 1180 | data: EvalFixture{ 1181 | "cpu": map[string]influxql.DataType{ 1182 | "value": influxql.Integer, 1183 | }, 1184 | }, 1185 | }, 1186 | { 1187 | name: `stddev() with an integer`, 1188 | in: `stddev(value)`, 1189 | typ: influxql.Float, 1190 | data: EvalFixture{ 1191 | "cpu": map[string]influxql.DataType{ 1192 | "value": influxql.Integer, 1193 | }, 1194 | }, 1195 | }, 1196 | { 1197 | name: `value inside a parenthesis`, 1198 | in: `(value)`, 1199 | typ: influxql.Float, 1200 | data: EvalFixture{ 1201 | "cpu": map[string]influxql.DataType{ 1202 | "value": influxql.Float, 1203 | }, 1204 | }, 1205 | }, 1206 | { 1207 | name: `binary expression with a float and integer`, 1208 | in: `v1 + v2`, 1209 | typ: influxql.Float, 1210 | data: EvalFixture{ 1211 | "cpu": map[string]influxql.DataType{ 1212 | "v1": influxql.Float, 1213 | "v2": influxql.Integer, 1214 | }, 1215 | }, 1216 | }, 1217 | { 1218 | name: `integer and unsigned literal`, 1219 | in: `value + 9223372036854775808`, 1220 | err: `type error: value + 9223372036854775808: cannot use + with an integer and unsigned literal`, 1221 | data: EvalFixture{ 1222 | "cpu": map[string]influxql.DataType{ 1223 | "value": influxql.Integer, 1224 | }, 1225 | }, 1226 | }, 1227 | { 1228 | name: `unsigned and integer literal`, 1229 | in: `value + 1`, 1230 | typ: influxql.Unsigned, 1231 | data: EvalFixture{ 1232 | "cpu": map[string]influxql.DataType{ 1233 | "value": influxql.Unsigned, 1234 | }, 1235 | }, 1236 | }, 1237 | { 1238 | name: `incompatible types`, 1239 | in: `v1 + v2`, 1240 | err: `type error: v1 + v2: incompatible types: string and integer`, 1241 | data: EvalFixture{ 1242 | "cpu": map[string]influxql.DataType{ 1243 | "v1": influxql.String, 1244 | "v2": influxql.Integer, 1245 | }, 1246 | }, 1247 | }, 1248 | } { 1249 | sources := make([]influxql.Source, 0, len(tt.data)) 1250 | for src := range tt.data { 1251 | sources = append(sources, &influxql.Measurement{Name: src}) 1252 | } 1253 | 1254 | expr := influxql.MustParseExpr(tt.in) 1255 | valuer := influxql.TypeValuerEval{ 1256 | TypeMapper: tt.data, 1257 | Sources: sources, 1258 | } 1259 | typ, err := valuer.EvalType(expr) 1260 | if err != nil { 1261 | if exp, got := tt.err, err.Error(); exp != got { 1262 | t.Errorf("%d. %s: unexpected error:\n\nexp=%#v\n\ngot=%v\n\n", i, tt.name, exp, got) 1263 | } 1264 | } else if typ != tt.typ { 1265 | t.Errorf("%d. %s: unexpected type:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.name, tt.typ, typ) 1266 | } 1267 | } 1268 | } 1269 | 1270 | func Benchmark_NumberLiteral_String(b *testing.B) { 1271 | // Generate test numbers using an exponential distribution. 1272 | // The numbers are heavily weighted towards fractional numbers, followed by a whole numbers. 1273 | // Nan, +Inf, and -Inf round out the test data at much lower percentages. 1274 | tc := struct { 1275 | Fractional int 1276 | Whole int 1277 | NaN int 1278 | PosInf int 1279 | NegInf int 1280 | }{} 1281 | nl := make([]influxql.NumberLiteral, b.N, b.N) 1282 | for i := range nl { 1283 | var n float64 1284 | sel := rand.ExpFloat64() * 10 1285 | switch { 1286 | case sel >= 12.5 || sel <= 5.0: 1287 | n = rand.Float64() 1288 | tc.Fractional++ 1289 | case sel > 12.0: 1290 | n = math.Inf(-1) 1291 | tc.NegInf++ 1292 | case sel > 11.5: 1293 | n = math.Inf(1) 1294 | tc.PosInf++ 1295 | case sel > 11.0: 1296 | n = math.NaN() 1297 | tc.NaN++ 1298 | case sel > 5.0: 1299 | n = float64(rand.Int31()) 1300 | tc.Whole++ 1301 | default: 1302 | panic("Shouldn't get here") 1303 | } 1304 | nl[i] = influxql.NumberLiteral{Val: n} 1305 | } 1306 | fmt.Printf("tc: %+v\n", tc) 1307 | b.ResetTimer() 1308 | 1309 | for i := range nl { 1310 | _ = nl[i].String() 1311 | } 1312 | } 1313 | 1314 | // Ensure an expression can be reduced. 1315 | func TestReduce(t *testing.T) { 1316 | now := mustParseTime("2000-01-01T00:00:00Z") 1317 | 1318 | for i, tt := range []struct { 1319 | in string 1320 | out string 1321 | data influxql.MapValuer 1322 | }{ 1323 | // Number literals. 1324 | {in: `1 + 2`, out: `3`}, 1325 | {in: `(foo*2) + ( (4/2) + (3 * 5) - 0.5 )`, out: `(foo * 2) + 16.5`}, 1326 | {in: `foo(bar(2 + 3), 4)`, out: `foo(bar(5), 4)`}, 1327 | {in: `4 / 0`, out: `0.0`}, // This is fine. (Fixing probably breaks a lot of queries.) 1328 | {in: `0.0`, out: `0.0`}, // Make sure we don't go from float to int here. 1329 | 1330 | // Fraction fever, for checking expected use of Ryu conversion used when FormatFloat is passed prec=-1. 1331 | {in: `10 / 2`, out: `5.0`}, // whole number 1332 | {in: `1 / 2`, out: `0.5`}, // 1 decimal place 1333 | {in: `1 / 3`, out: `0.3333333333333333`}, // Repeating fraction 1334 | {in: `1 / 4`, out: `0.25`}, // 2 decimal places 1335 | {in: `1 / 6`, out: `0.16666666666666666`}, // Fancier repeating decimal 1336 | {in: `1 / 7`, out: `0.14285714285714285`}, // A more exciting repeating decimal 1337 | {in: `1 / 8`, out: `0.125`}, // 3 decimal places 1338 | {in: `1 / 16`, out: `0.0625`}, // 4 decimal places 1339 | {in: `1 / 32`, out: `0.03125`}, // 5 decimal places 1340 | {in: `1 / 8388608`, out: `0.00000011920928955078125`}, // 23 decimal places, non-repeating decimal, full accuracy 1341 | {in: `1 / 16777216`, out: `0.00000005960464477539063`}, // 23 decimal places, just past the limit of full accuracy with float64 1342 | {in: `1 / 134217728`, out: `0.000000007450580596923828`}, // 23 decimal places, way past the limit of full accuracy with float64 1343 | 1344 | // Fraction fever has been decimalized. This explicitly checks that conversions are reversible. 1345 | {in: `5.0`, out: `5.0`}, // whole number 1346 | {in: `0.5`, out: `0.5`}, // 1 decimal place 1347 | {in: `0.3333333333333333`, out: `0.3333333333333333`}, // Repeating fraction 1348 | {in: `0.25`, out: `0.25`}, // 2 decimal places 1349 | {in: `0.16666666666666666`, out: `0.16666666666666666`}, // Fancier repeating decimal 1350 | {in: `0.14285714285714285`, out: `0.14285714285714285`}, // A more exciting repeating decimal 1351 | {in: `0.125`, out: `0.125`}, // 3 decimal places 1352 | {in: `0.0625`, out: `0.0625`}, // 4 decimal places 1353 | {in: `0.03125`, out: `0.03125`}, // 5 decimal places 1354 | {in: `0.00000011920928955078125`, out: `0.00000011920928955078125`}, // 23 decimal places, non-repeating decimal, full accuracy 1355 | {in: `0.00000005960464477539063`, out: `0.00000005960464477539063`}, // 23 decimal places, just past the limit of full accuracy with float64 1356 | {in: `0.000000007450580596923828`, out: `0.000000007450580596923828`}, // 23 decimal places, way past the limit of full accuracy with float64 1357 | 1358 | // math.MaxFloat64 1359 | {in: `179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0`, out: `179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0`}, 1360 | 1361 | {in: `2 % 3`, out: `2`}, 1362 | {in: `5 % 2`, out: `1`}, 1363 | {in: `2 % 0`, out: `0`}, 1364 | 1365 | // There are 3 cases where converting a float to a string gets something besides a number: `NaN`, `+Inf`, and `-Inf`. 1366 | // Check that none are disturbed by our fix to keep floats from tunring into ints. 1367 | {in: `2.5 % 0`, out: `NaN`}, // Make sure our fix to stop floats turning into ints doesn't do something weird with Nan. 1368 | 1369 | // +Inf (to make sure our fix to stop floats turning into ints doesn't do something weird with +Inf) 1370 | {in: `179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0 * 179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0`, out: `+Inf`}, 1371 | 1372 | // -Inf (to make sure our fix to stop floats turning into ints doesn't do something weird with -Inf) 1373 | {in: `-179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0 * 179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000.0`, out: `-Inf`}, 1374 | {in: `254 & 3`, out: `2`}, 1375 | {in: `254 | 3`, out: `255`}, 1376 | {in: `254 ^ 3`, out: `253`}, 1377 | {in: `-3 & 3`, out: `1`}, 1378 | {in: `8 & -3`, out: `8`}, 1379 | {in: `8.5 & -3`, out: `8.5 & -3`}, 1380 | {in: `4 = 4`, out: `true`}, 1381 | {in: `4 <> 4`, out: `false`}, 1382 | {in: `6 > 4`, out: `true`}, 1383 | {in: `4 >= 4`, out: `true`}, 1384 | {in: `4 < 6`, out: `true`}, 1385 | {in: `4 <= 4`, out: `true`}, 1386 | {in: `4 AND 5`, out: `4 AND 5`}, 1387 | {in: `-1 = 9223372036854775808`, out: `false`}, 1388 | {in: `-1 != 9223372036854775808`, out: `true`}, 1389 | {in: `-1 < 9223372036854775808`, out: `true`}, 1390 | {in: `-1 <= 9223372036854775808`, out: `true`}, 1391 | {in: `-1 > 9223372036854775808`, out: `false`}, 1392 | {in: `-1 >= 9223372036854775808`, out: `false`}, 1393 | {in: `9223372036854775808 = -1`, out: `false`}, 1394 | {in: `9223372036854775808 != -1`, out: `true`}, 1395 | {in: `9223372036854775808 < -1`, out: `false`}, 1396 | {in: `9223372036854775808 <= -1`, out: `false`}, 1397 | {in: `9223372036854775808 > -1`, out: `true`}, 1398 | {in: `9223372036854775808 >= -1`, out: `true`}, 1399 | {in: `9223372036854775808 = 9223372036854775808`, out: `true`}, 1400 | {in: `9223372036854775808 != 9223372036854775808`, out: `false`}, 1401 | {in: `9223372036854775808 < 9223372036854775808`, out: `false`}, 1402 | {in: `9223372036854775808 <= 9223372036854775808`, out: `true`}, 1403 | {in: `9223372036854775808 > 9223372036854775808`, out: `false`}, 1404 | {in: `9223372036854775808 >= 9223372036854775808`, out: `true`}, 1405 | {in: `9223372036854775809 = 9223372036854775808`, out: `false`}, 1406 | {in: `9223372036854775809 != 9223372036854775808`, out: `true`}, 1407 | {in: `9223372036854775809 < 9223372036854775808`, out: `false`}, 1408 | {in: `9223372036854775809 <= 9223372036854775808`, out: `false`}, 1409 | {in: `9223372036854775809 > 9223372036854775808`, out: `true`}, 1410 | {in: `9223372036854775809 >= 9223372036854775808`, out: `true`}, 1411 | {in: `9223372036854775808 / 0`, out: `0`}, 1412 | {in: `9223372036854775808 + 1`, out: `9223372036854775809`}, 1413 | {in: `9223372036854775808 - 1`, out: `9223372036854775807`}, 1414 | {in: `9223372036854775809 - 9223372036854775808`, out: `1`}, 1415 | 1416 | // Boolean literals. 1417 | {in: `true`, out: `true`}, 1418 | {in: `false`, out: `false`}, 1419 | {in: `true AND false`, out: `false`}, 1420 | {in: `true OR false`, out: `true`}, 1421 | {in: `true OR (foo = bar AND 1 > 2)`, out: `true`}, 1422 | {in: `(foo = bar AND 1 > 2) OR true`, out: `true`}, 1423 | {in: `false OR (foo = bar AND 1 > 2)`, out: `false`}, 1424 | {in: `(foo = bar AND 1 > 2) OR false`, out: `false`}, 1425 | {in: `true = false`, out: `false`}, 1426 | {in: `true <> false`, out: `true`}, 1427 | {in: `true + false`, out: `true + false`}, 1428 | 1429 | // Time literals with now(). 1430 | {in: `now() + 2h`, out: `'2000-01-01T02:00:00Z'`}, 1431 | {in: `now() / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`}, 1432 | {in: `4µ + now()`, out: `'2000-01-01T00:00:00.000004Z'`}, 1433 | {in: `now() + 2000000000`, out: `'2000-01-01T00:00:02Z'`}, 1434 | {in: `2000000000 + now()`, out: `'2000-01-01T00:00:02Z'`}, 1435 | {in: `now() - 2000000000`, out: `'1999-12-31T23:59:58Z'`}, 1436 | {in: `now() = now()`, out: `true`}, 1437 | {in: `now() <> now()`, out: `false`}, 1438 | {in: `now() < now() + 1h`, out: `true`}, 1439 | {in: `now() <= now() + 1h`, out: `true`}, 1440 | {in: `now() >= now() - 1h`, out: `true`}, 1441 | {in: `now() > now() - 1h`, out: `true`}, 1442 | {in: `now() - (now() - 60s)`, out: `1m`}, 1443 | {in: `now() AND now()`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`}, 1444 | {in: `946684800000000000 + 2h`, out: `'2000-01-01T02:00:00Z'`}, 1445 | 1446 | // Time literals. 1447 | {in: `'2000-01-01T00:00:00Z' + 2h`, out: `'2000-01-01T02:00:00Z'`}, 1448 | {in: `'2000-01-01T00:00:00Z' / 2h`, out: `'2000-01-01T00:00:00Z' / 2h`}, 1449 | {in: `4µ + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00.000004Z'`}, 1450 | {in: `'2000-01-01T00:00:00Z' + 2000000000`, out: `'2000-01-01T00:00:02Z'`}, 1451 | {in: `2000000000 + '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:02Z'`}, 1452 | {in: `'2000-01-01T00:00:00Z' - 2000000000`, out: `'1999-12-31T23:59:58Z'`}, 1453 | {in: `'2000-01-01T00:00:00Z' = '2000-01-01T00:00:00Z'`, out: `true`}, 1454 | {in: `'2000-01-01T00:00:00.000000000Z' = '2000-01-01T00:00:00Z'`, out: `true`}, 1455 | {in: `'2000-01-01T00:00:00Z' <> '2000-01-01T00:00:00Z'`, out: `false`}, 1456 | {in: `'2000-01-01T00:00:00.000000000Z' <> '2000-01-01T00:00:00Z'`, out: `false`}, 1457 | {in: `'2000-01-01T00:00:00Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`}, 1458 | {in: `'2000-01-01T00:00:00.000000000Z' < '2000-01-01T00:00:00Z' + 1h`, out: `true`}, 1459 | {in: `'2000-01-01T00:00:00Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`}, 1460 | {in: `'2000-01-01T00:00:00.000000000Z' <= '2000-01-01T00:00:00Z' + 1h`, out: `true`}, 1461 | {in: `'2000-01-01T00:00:00Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`}, 1462 | {in: `'2000-01-01T00:00:00.000000000Z' > '2000-01-01T00:00:00Z' - 1h`, out: `true`}, 1463 | {in: `'2000-01-01T00:00:00Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`}, 1464 | {in: `'2000-01-01T00:00:00.000000000Z' >= '2000-01-01T00:00:00Z' - 1h`, out: `true`}, 1465 | {in: `'2000-01-01T00:00:00Z' - ('2000-01-01T00:00:00Z' - 60s)`, out: `1m`}, 1466 | {in: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`, out: `'2000-01-01T00:00:00Z' AND '2000-01-01T00:00:00Z'`}, 1467 | 1468 | // Duration literals. 1469 | {in: `10m + 1h - 60s`, out: `69m`}, 1470 | {in: `(10m / 2) * 5`, out: `25m`}, 1471 | {in: `60s = 1m`, out: `true`}, 1472 | {in: `60s <> 1m`, out: `false`}, 1473 | {in: `60s < 1h`, out: `true`}, 1474 | {in: `60s <= 1h`, out: `true`}, 1475 | {in: `60s > 12s`, out: `true`}, 1476 | {in: `60s >= 1m`, out: `true`}, 1477 | {in: `60s AND 1m`, out: `1m AND 1m`}, 1478 | {in: `60m / 0`, out: `0s`}, 1479 | {in: `60m + 50`, out: `1h + 50`}, 1480 | 1481 | // String literals. 1482 | {in: `'foo' + 'bar'`, out: `'foobar'`}, 1483 | 1484 | // Variable references. 1485 | {in: `foo`, out: `'bar'`, data: map[string]interface{}{"foo": "bar"}}, 1486 | {in: `foo = 'bar'`, out: `true`, data: map[string]interface{}{"foo": "bar"}}, 1487 | {in: `foo = 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, 1488 | {in: `foo <> 'bar'`, out: `false`, data: map[string]interface{}{"foo": nil}}, 1489 | } { 1490 | // Fold expression. 1491 | expr := influxql.Reduce(MustParseExpr(tt.in), influxql.MultiValuer( 1492 | tt.data, 1493 | &influxql.NowValuer{Now: now}, 1494 | )) 1495 | 1496 | // Compare with expected output. 1497 | if out := expr.String(); tt.out != out { 1498 | t.Errorf("%d. %s: unexpected expr:\n\nexp=%s\n\ngot=%s\n\n", i, tt.in, tt.out, out) 1499 | continue 1500 | } 1501 | } 1502 | } 1503 | 1504 | func Test_fieldsNames(t *testing.T) { 1505 | for _, test := range []struct { 1506 | in []string 1507 | out []string 1508 | alias []string 1509 | }{ 1510 | { //case: binary expr(valRef) 1511 | in: []string{"value+value"}, 1512 | out: []string{"value", "value"}, 1513 | alias: []string{"value_value"}, 1514 | }, 1515 | { //case: binary expr + valRef 1516 | in: []string{"value+value", "temperature"}, 1517 | out: []string{"value", "value", "temperature"}, 1518 | alias: []string{"value_value", "temperature"}, 1519 | }, 1520 | { //case: aggregate expr 1521 | in: []string{"mean(value)"}, 1522 | out: []string{"mean"}, 1523 | alias: []string{"mean"}, 1524 | }, 1525 | { //case: binary expr(aggregate expr) 1526 | in: []string{"mean(value) + max(value)"}, 1527 | out: []string{"value", "value"}, 1528 | alias: []string{"mean_max"}, 1529 | }, 1530 | { //case: binary expr(aggregate expr) + valRef 1531 | in: []string{"mean(value) + max(value)", "temperature"}, 1532 | out: []string{"value", "value", "temperature"}, 1533 | alias: []string{"mean_max", "temperature"}, 1534 | }, 1535 | { //case: mixed aggregate and varRef 1536 | in: []string{"mean(value) + temperature"}, 1537 | out: []string{"value", "temperature"}, 1538 | alias: []string{"mean_temperature"}, 1539 | }, 1540 | { //case: ParenExpr(varRef) 1541 | in: []string{"(value)"}, 1542 | out: []string{"value"}, 1543 | alias: []string{"value"}, 1544 | }, 1545 | { //case: ParenExpr(varRef + varRef) 1546 | in: []string{"(value + value)"}, 1547 | out: []string{"value", "value"}, 1548 | alias: []string{"value_value"}, 1549 | }, 1550 | { //case: ParenExpr(aggregate) 1551 | in: []string{"(mean(value))"}, 1552 | out: []string{"value"}, 1553 | alias: []string{"mean"}, 1554 | }, 1555 | { //case: ParenExpr(aggregate + aggregate) 1556 | in: []string{"(mean(value) + max(value))"}, 1557 | out: []string{"value", "value"}, 1558 | alias: []string{"mean_max"}, 1559 | }, 1560 | } { 1561 | fields := influxql.Fields{} 1562 | for _, s := range test.in { 1563 | expr := MustParseExpr(s) 1564 | fields = append(fields, &influxql.Field{Expr: expr}) 1565 | } 1566 | got := fields.Names() 1567 | if !reflect.DeepEqual(got, test.out) { 1568 | t.Errorf("get fields name:\nexp=%v\ngot=%v\n", test.out, got) 1569 | } 1570 | alias := fields.AliasNames() 1571 | if !reflect.DeepEqual(alias, test.alias) { 1572 | t.Errorf("get fields alias name:\nexp=%v\ngot=%v\n", test.alias, alias) 1573 | } 1574 | } 1575 | 1576 | } 1577 | 1578 | func TestSelect_ColumnNames(t *testing.T) { 1579 | for i, tt := range []struct { 1580 | stmt *influxql.SelectStatement 1581 | columns []string 1582 | }{ 1583 | { 1584 | stmt: &influxql.SelectStatement{ 1585 | Fields: influxql.Fields([]*influxql.Field{ 1586 | {Expr: &influxql.VarRef{Val: "value"}}, 1587 | }), 1588 | }, 1589 | columns: []string{"time", "value"}, 1590 | }, 1591 | { 1592 | stmt: &influxql.SelectStatement{ 1593 | Fields: influxql.Fields([]*influxql.Field{ 1594 | {Expr: &influxql.VarRef{Val: "value"}}, 1595 | {Expr: &influxql.VarRef{Val: "value"}}, 1596 | {Expr: &influxql.VarRef{Val: "value_1"}}, 1597 | }), 1598 | }, 1599 | columns: []string{"time", "value", "value_1", "value_1_1"}, 1600 | }, 1601 | { 1602 | stmt: &influxql.SelectStatement{ 1603 | Fields: influxql.Fields([]*influxql.Field{ 1604 | {Expr: &influxql.VarRef{Val: "value"}}, 1605 | {Expr: &influxql.VarRef{Val: "value_1"}}, 1606 | {Expr: &influxql.VarRef{Val: "value"}}, 1607 | }), 1608 | }, 1609 | columns: []string{"time", "value", "value_1", "value_2"}, 1610 | }, 1611 | { 1612 | stmt: &influxql.SelectStatement{ 1613 | Fields: influxql.Fields([]*influxql.Field{ 1614 | {Expr: &influxql.VarRef{Val: "value"}}, 1615 | {Expr: &influxql.VarRef{Val: "total"}, Alias: "value"}, 1616 | {Expr: &influxql.VarRef{Val: "value"}}, 1617 | }), 1618 | }, 1619 | columns: []string{"time", "value_1", "value", "value_2"}, 1620 | }, 1621 | { 1622 | stmt: &influxql.SelectStatement{ 1623 | Fields: influxql.Fields([]*influxql.Field{ 1624 | {Expr: &influxql.VarRef{Val: "value"}}, 1625 | }), 1626 | TimeAlias: "timestamp", 1627 | }, 1628 | columns: []string{"timestamp", "value"}, 1629 | }, 1630 | } { 1631 | columns := tt.stmt.ColumnNames() 1632 | if !reflect.DeepEqual(columns, tt.columns) { 1633 | t.Errorf("%d. expected %s, got %s", i, tt.columns, columns) 1634 | } 1635 | } 1636 | } 1637 | 1638 | func TestSelect_Privileges(t *testing.T) { 1639 | stmt := &influxql.SelectStatement{ 1640 | Target: &influxql.Target{ 1641 | Measurement: &influxql.Measurement{Database: "db2"}, 1642 | }, 1643 | Sources: []influxql.Source{ 1644 | &influxql.Measurement{Database: "db0"}, 1645 | &influxql.Measurement{Database: "db1"}, 1646 | }, 1647 | } 1648 | 1649 | exp := influxql.ExecutionPrivileges{ 1650 | influxql.ExecutionPrivilege{Name: "db0", Privilege: influxql.ReadPrivilege}, 1651 | influxql.ExecutionPrivilege{Name: "db1", Privilege: influxql.ReadPrivilege}, 1652 | influxql.ExecutionPrivilege{Name: "db2", Privilege: influxql.WritePrivilege}, 1653 | } 1654 | 1655 | got, err := stmt.RequiredPrivileges() 1656 | if err != nil { 1657 | t.Fatal(err) 1658 | } 1659 | 1660 | if !reflect.DeepEqual(exp, got) { 1661 | t.Errorf("exp: %v, got: %v", exp, got) 1662 | } 1663 | } 1664 | 1665 | func TestSelect_SubqueryPrivileges(t *testing.T) { 1666 | stmt := &influxql.SelectStatement{ 1667 | Target: &influxql.Target{ 1668 | Measurement: &influxql.Measurement{Database: "db2"}, 1669 | }, 1670 | Sources: []influxql.Source{ 1671 | &influxql.Measurement{Database: "db0"}, 1672 | &influxql.SubQuery{ 1673 | Statement: &influxql.SelectStatement{ 1674 | Sources: []influxql.Source{ 1675 | &influxql.Measurement{Database: "db1"}, 1676 | }, 1677 | }, 1678 | }, 1679 | }, 1680 | } 1681 | 1682 | exp := influxql.ExecutionPrivileges{ 1683 | influxql.ExecutionPrivilege{Name: "db0", Privilege: influxql.ReadPrivilege}, 1684 | influxql.ExecutionPrivilege{Name: "db1", Privilege: influxql.ReadPrivilege}, 1685 | influxql.ExecutionPrivilege{Name: "db2", Privilege: influxql.WritePrivilege}, 1686 | } 1687 | 1688 | got, err := stmt.RequiredPrivileges() 1689 | if err != nil { 1690 | t.Fatal(err) 1691 | } 1692 | 1693 | if !reflect.DeepEqual(exp, got) { 1694 | t.Errorf("exp: %v, got: %v", exp, got) 1695 | } 1696 | } 1697 | 1698 | func TestShow_Privileges(t *testing.T) { 1699 | for _, c := range []struct { 1700 | stmt influxql.Statement 1701 | exp influxql.ExecutionPrivileges 1702 | }{ 1703 | { 1704 | stmt: &influxql.ShowDatabasesStatement{}, 1705 | exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.NoPrivileges}}, 1706 | }, 1707 | { 1708 | stmt: &influxql.ShowFieldKeysStatement{}, 1709 | exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, 1710 | }, 1711 | { 1712 | stmt: &influxql.ShowMeasurementsStatement{}, 1713 | exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, 1714 | }, 1715 | { 1716 | stmt: &influxql.ShowQueriesStatement{}, 1717 | exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, 1718 | }, 1719 | { 1720 | stmt: &influxql.ShowRetentionPoliciesStatement{}, 1721 | exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, 1722 | }, 1723 | { 1724 | stmt: &influxql.ShowSeriesStatement{}, 1725 | exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, 1726 | }, 1727 | { 1728 | stmt: &influxql.ShowShardGroupsStatement{}, 1729 | exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, 1730 | }, 1731 | { 1732 | stmt: &influxql.ShowShardsStatement{}, 1733 | exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, 1734 | }, 1735 | { 1736 | stmt: &influxql.ShowStatsStatement{}, 1737 | exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, 1738 | }, 1739 | { 1740 | stmt: &influxql.ShowSubscriptionsStatement{}, 1741 | exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, 1742 | }, 1743 | { 1744 | stmt: &influxql.ShowDiagnosticsStatement{}, 1745 | exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, 1746 | }, 1747 | { 1748 | stmt: &influxql.ShowTagKeysStatement{}, 1749 | exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, 1750 | }, 1751 | { 1752 | stmt: &influxql.ShowTagValuesStatement{}, 1753 | exp: influxql.ExecutionPrivileges{{Admin: false, Privilege: influxql.ReadPrivilege}}, 1754 | }, 1755 | { 1756 | stmt: &influxql.ShowUsersStatement{}, 1757 | exp: influxql.ExecutionPrivileges{{Admin: true, Privilege: influxql.AllPrivileges}}, 1758 | }, 1759 | } { 1760 | got, err := c.stmt.RequiredPrivileges() 1761 | if err != nil { 1762 | t.Fatal(err) 1763 | } 1764 | 1765 | if !reflect.DeepEqual(c.exp, got) { 1766 | t.Errorf("exp: %v, got: %v", c.exp, got) 1767 | } 1768 | } 1769 | } 1770 | 1771 | func TestBoundParameter_String(t *testing.T) { 1772 | stmt := &influxql.SelectStatement{ 1773 | IsRawQuery: true, 1774 | Fields: []*influxql.Field{{ 1775 | Expr: &influxql.VarRef{Val: "value"}}}, 1776 | Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, 1777 | Condition: &influxql.BinaryExpr{ 1778 | Op: influxql.GT, 1779 | LHS: &influxql.VarRef{Val: "value"}, 1780 | RHS: &influxql.BoundParameter{Name: "value"}, 1781 | }, 1782 | } 1783 | 1784 | if got, exp := stmt.String(), `SELECT value FROM cpu WHERE value > $value`; got != exp { 1785 | t.Fatalf("stmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", exp, got) 1786 | } 1787 | 1788 | stmt = &influxql.SelectStatement{ 1789 | IsRawQuery: true, 1790 | Fields: []*influxql.Field{{ 1791 | Expr: &influxql.VarRef{Val: "value"}}}, 1792 | Sources: []influxql.Source{&influxql.Measurement{Name: "cpu"}}, 1793 | Condition: &influxql.BinaryExpr{ 1794 | Op: influxql.GT, 1795 | LHS: &influxql.VarRef{Val: "value"}, 1796 | RHS: &influxql.BoundParameter{Name: "multi-word value"}, 1797 | }, 1798 | } 1799 | 1800 | if got, exp := stmt.String(), `SELECT value FROM cpu WHERE value > $"multi-word value"`; got != exp { 1801 | t.Fatalf("stmt mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", exp, got) 1802 | } 1803 | } 1804 | 1805 | // This test checks to ensure that we have given thought to the database 1806 | // context required for security checks. If a new statement is added, this 1807 | // test will fail until it is categorized into the correct bucket below. 1808 | func Test_EnforceHasDefaultDatabase(t *testing.T) { 1809 | pkg, err := importer.Default().Import("github.com/influxdata/influxql") 1810 | if err != nil { 1811 | fmt.Printf("error: %s\n", err.Error()) 1812 | return 1813 | } 1814 | statements := []string{} 1815 | 1816 | // this is a list of statements that do not have a database context 1817 | exemptStatements := []string{ 1818 | "CreateDatabaseStatement", 1819 | "CreateUserStatement", 1820 | "DeleteSeriesStatement", 1821 | "DropDatabaseStatement", 1822 | "DropMeasurementStatement", 1823 | "DropSeriesStatement", 1824 | "DropShardStatement", 1825 | "DropUserStatement", 1826 | "ExplainStatement", 1827 | "GrantAdminStatement", 1828 | "KillQueryStatement", 1829 | "RevokeAdminStatement", 1830 | "SelectStatement", 1831 | "SetPasswordUserStatement", 1832 | "ShowContinuousQueriesStatement", 1833 | "ShowDatabasesStatement", 1834 | "ShowDiagnosticsStatement", 1835 | "ShowGrantsForUserStatement", 1836 | "ShowQueriesStatement", 1837 | "ShowShardGroupsStatement", 1838 | "ShowShardsStatement", 1839 | "ShowStatsStatement", 1840 | "ShowSubscriptionsStatement", 1841 | "ShowUsersStatement", 1842 | } 1843 | 1844 | exists := func(stmt string) bool { 1845 | switch stmt { 1846 | // These are functions with the word statement in them, and can be ignored 1847 | case "Statement", "MustParseStatement", "ParseStatement", "RewriteStatement": 1848 | return true 1849 | default: 1850 | // check the exempt statements 1851 | for _, s := range exemptStatements { 1852 | if s == stmt { 1853 | return true 1854 | } 1855 | } 1856 | // check the statements that passed the interface test for HasDefaultDatabase 1857 | for _, s := range statements { 1858 | if s == stmt { 1859 | return true 1860 | } 1861 | } 1862 | return false 1863 | } 1864 | } 1865 | 1866 | needsHasDefault := []interface{}{ 1867 | &influxql.AlterRetentionPolicyStatement{}, 1868 | &influxql.CreateContinuousQueryStatement{}, 1869 | &influxql.CreateRetentionPolicyStatement{}, 1870 | &influxql.CreateSubscriptionStatement{}, 1871 | &influxql.DeleteStatement{}, 1872 | &influxql.DropContinuousQueryStatement{}, 1873 | &influxql.DropRetentionPolicyStatement{}, 1874 | &influxql.DropSubscriptionStatement{}, 1875 | &influxql.GrantStatement{}, 1876 | &influxql.RevokeStatement{}, 1877 | &influxql.ShowFieldKeysStatement{}, 1878 | &influxql.ShowFieldKeyCardinalityStatement{}, 1879 | &influxql.ShowMeasurementCardinalityStatement{}, 1880 | &influxql.ShowMeasurementsStatement{}, 1881 | &influxql.ShowRetentionPoliciesStatement{}, 1882 | &influxql.ShowSeriesStatement{}, 1883 | &influxql.ShowSeriesCardinalityStatement{}, 1884 | &influxql.ShowTagKeysStatement{}, 1885 | &influxql.ShowTagKeyCardinalityStatement{}, 1886 | &influxql.ShowTagValuesStatement{}, 1887 | &influxql.ShowTagValuesCardinalityStatement{}, 1888 | } 1889 | 1890 | for _, stmt := range needsHasDefault { 1891 | statements = append(statements, strings.TrimPrefix(fmt.Sprintf("%T", stmt), "*influxql.")) 1892 | if _, ok := stmt.(influxql.HasDefaultDatabase); !ok { 1893 | t.Errorf("%T was expected to declare DefaultDatabase method", stmt) 1894 | } 1895 | 1896 | } 1897 | 1898 | for _, declName := range pkg.Scope().Names() { 1899 | if strings.HasSuffix(declName, "Statement") { 1900 | if !exists(declName) { 1901 | t.Errorf("unchecked statement %s. please update this test to determine if this statement needs to declare 'DefaultDatabase'", declName) 1902 | } 1903 | } 1904 | } 1905 | } 1906 | 1907 | // MustTimeRange will parse a time range. Panic on error. 1908 | func MustTimeRange(expr influxql.Expr) (min, max time.Time) { 1909 | _, timeRange, err := influxql.ConditionExpr(expr, nil) 1910 | if err != nil { 1911 | panic(err) 1912 | } 1913 | return timeRange.Min, timeRange.Max 1914 | } 1915 | 1916 | // mustParseTime parses an IS0-8601 string. Panic on error. 1917 | func mustParseTime(s string) time.Time { 1918 | t, err := time.Parse(time.RFC3339, s) 1919 | if err != nil { 1920 | panic(err.Error()) 1921 | } 1922 | return t 1923 | } 1924 | 1925 | // FieldMapper is a mockable implementation of influxql.FieldMapper. 1926 | type FieldMapper struct { 1927 | FieldDimensionsFn func(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) 1928 | } 1929 | 1930 | func (fm *FieldMapper) FieldDimensions(m *influxql.Measurement) (fields map[string]influxql.DataType, dimensions map[string]struct{}, err error) { 1931 | return fm.FieldDimensionsFn(m) 1932 | } 1933 | 1934 | func (fm *FieldMapper) MapType(m *influxql.Measurement, field string) influxql.DataType { 1935 | f, d, err := fm.FieldDimensions(m) 1936 | if err != nil { 1937 | return influxql.Unknown 1938 | } 1939 | 1940 | if typ, ok := f[field]; ok { 1941 | return typ 1942 | } 1943 | if _, ok := d[field]; ok { 1944 | return influxql.Tag 1945 | } 1946 | return influxql.Unknown 1947 | } 1948 | 1949 | func (fm *FieldMapper) CallType(name string, args []influxql.DataType) (influxql.DataType, error) { 1950 | switch name { 1951 | case "mean", "median", "integral", "stddev": 1952 | return influxql.Float, nil 1953 | case "count": 1954 | return influxql.Integer, nil 1955 | case "elapsed": 1956 | return influxql.Integer, nil 1957 | default: 1958 | return args[0], nil 1959 | } 1960 | } 1961 | 1962 | // BenchmarkExprNames benchmarks how long it takes to run ExprNames. 1963 | func BenchmarkExprNames(b *testing.B) { 1964 | exprs := make([]string, 100) 1965 | for i := range exprs { 1966 | exprs[i] = fmt.Sprintf("host = 'server%02d'", i) 1967 | } 1968 | condition := MustParseExpr(strings.Join(exprs, " OR ")) 1969 | 1970 | b.ResetTimer() 1971 | b.ReportAllocs() 1972 | 1973 | for i := 0; i < b.N; i++ { 1974 | refs := influxql.ExprNames(condition) 1975 | if have, want := refs, []influxql.VarRef{{Val: "host"}}; !reflect.DeepEqual(have, want) { 1976 | b.Fatalf("unexpected expression names: have=%s want=%s", have, want) 1977 | } 1978 | } 1979 | } 1980 | 1981 | type FunctionValuer struct{} 1982 | 1983 | var _ influxql.CallValuer = FunctionValuer{} 1984 | 1985 | func (FunctionValuer) Value(key string) (interface{}, bool) { 1986 | return nil, false 1987 | } 1988 | 1989 | func (FunctionValuer) Call(name string, args []interface{}) (interface{}, bool) { 1990 | switch name { 1991 | case "abs": 1992 | arg0 := args[0].(float64) 1993 | return math.Abs(arg0), true 1994 | case "pow": 1995 | arg0, arg1 := args[0].(float64), args[1].(int64) 1996 | return math.Pow(arg0, float64(arg1)), true 1997 | default: 1998 | return nil, false 1999 | } 2000 | } 2001 | 2002 | // BenchmarkEval benchmarks how long it takes to run Eval. 2003 | func BenchmarkEval(b *testing.B) { 2004 | expr := MustParseExpr(`f1 + abs(f2) / pow(f3, 3)`) 2005 | valuer := influxql.ValuerEval{ 2006 | Valuer: influxql.MultiValuer( 2007 | influxql.MapValuer(map[string]interface{}{ 2008 | "f1": float64(15), 2009 | "f2": float64(-3), 2010 | "f3": float64(2), 2011 | }), 2012 | FunctionValuer{}, 2013 | ), 2014 | } 2015 | 2016 | b.ReportAllocs() 2017 | for i := 0; i < b.N; i++ { 2018 | valuer.Eval(expr) 2019 | } 2020 | } 2021 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package influxql implements a parser for the InfluxDB query language. 3 | 4 | InfluxQL is a DML and DDL language for the InfluxDB time series database. 5 | It provides the ability to query for aggregate statistics as well as create 6 | and configure the InfluxDB server. 7 | 8 | See https://docs.influxdata.com/influxdb/latest/query_language/ 9 | for a reference on using InfluxQL. 10 | 11 | */ 12 | package influxql 13 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/influxdata/influxql 2 | 3 | go 1.18 4 | 5 | require google.golang.org/protobuf v1.33.0 6 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= 2 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= 3 | google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= 4 | google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= 5 | -------------------------------------------------------------------------------- /influxql.go: -------------------------------------------------------------------------------- 1 | package influxql // import "github.com/influxdata/influxql" 2 | 3 | //go:generate protoc --go_opt=paths=source_relative --go_out=. internal/influxdata_influxql_internal.proto 4 | -------------------------------------------------------------------------------- /internal/influxdata_influxql_internal.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.27.1 4 | // protoc v3.17.3 5 | // source: internal/influxdata_influxql_internal.proto 6 | 7 | package influxql 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | ) 15 | 16 | const ( 17 | // Verify that this generated code is sufficiently up-to-date. 18 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 19 | // Verify that runtime/protoimpl is sufficiently up-to-date. 20 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 21 | ) 22 | 23 | type Measurements struct { 24 | state protoimpl.MessageState 25 | sizeCache protoimpl.SizeCache 26 | unknownFields protoimpl.UnknownFields 27 | 28 | Items []*Measurement `protobuf:"bytes,1,rep,name=Items" json:"Items,omitempty"` 29 | } 30 | 31 | func (x *Measurements) Reset() { 32 | *x = Measurements{} 33 | if protoimpl.UnsafeEnabled { 34 | mi := &file_internal_influxdata_influxql_internal_proto_msgTypes[0] 35 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 36 | ms.StoreMessageInfo(mi) 37 | } 38 | } 39 | 40 | func (x *Measurements) String() string { 41 | return protoimpl.X.MessageStringOf(x) 42 | } 43 | 44 | func (*Measurements) ProtoMessage() {} 45 | 46 | func (x *Measurements) ProtoReflect() protoreflect.Message { 47 | mi := &file_internal_influxdata_influxql_internal_proto_msgTypes[0] 48 | if protoimpl.UnsafeEnabled && x != nil { 49 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 50 | if ms.LoadMessageInfo() == nil { 51 | ms.StoreMessageInfo(mi) 52 | } 53 | return ms 54 | } 55 | return mi.MessageOf(x) 56 | } 57 | 58 | // Deprecated: Use Measurements.ProtoReflect.Descriptor instead. 59 | func (*Measurements) Descriptor() ([]byte, []int) { 60 | return file_internal_influxdata_influxql_internal_proto_rawDescGZIP(), []int{0} 61 | } 62 | 63 | func (x *Measurements) GetItems() []*Measurement { 64 | if x != nil { 65 | return x.Items 66 | } 67 | return nil 68 | } 69 | 70 | type Measurement struct { 71 | state protoimpl.MessageState 72 | sizeCache protoimpl.SizeCache 73 | unknownFields protoimpl.UnknownFields 74 | 75 | Database *string `protobuf:"bytes,1,opt,name=Database" json:"Database,omitempty"` 76 | RetentionPolicy *string `protobuf:"bytes,2,opt,name=RetentionPolicy" json:"RetentionPolicy,omitempty"` 77 | Name *string `protobuf:"bytes,3,opt,name=Name" json:"Name,omitempty"` 78 | Regex *string `protobuf:"bytes,4,opt,name=Regex" json:"Regex,omitempty"` 79 | IsTarget *bool `protobuf:"varint,5,opt,name=IsTarget" json:"IsTarget,omitempty"` 80 | } 81 | 82 | func (x *Measurement) Reset() { 83 | *x = Measurement{} 84 | if protoimpl.UnsafeEnabled { 85 | mi := &file_internal_influxdata_influxql_internal_proto_msgTypes[1] 86 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 87 | ms.StoreMessageInfo(mi) 88 | } 89 | } 90 | 91 | func (x *Measurement) String() string { 92 | return protoimpl.X.MessageStringOf(x) 93 | } 94 | 95 | func (*Measurement) ProtoMessage() {} 96 | 97 | func (x *Measurement) ProtoReflect() protoreflect.Message { 98 | mi := &file_internal_influxdata_influxql_internal_proto_msgTypes[1] 99 | if protoimpl.UnsafeEnabled && x != nil { 100 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 101 | if ms.LoadMessageInfo() == nil { 102 | ms.StoreMessageInfo(mi) 103 | } 104 | return ms 105 | } 106 | return mi.MessageOf(x) 107 | } 108 | 109 | // Deprecated: Use Measurement.ProtoReflect.Descriptor instead. 110 | func (*Measurement) Descriptor() ([]byte, []int) { 111 | return file_internal_influxdata_influxql_internal_proto_rawDescGZIP(), []int{1} 112 | } 113 | 114 | func (x *Measurement) GetDatabase() string { 115 | if x != nil && x.Database != nil { 116 | return *x.Database 117 | } 118 | return "" 119 | } 120 | 121 | func (x *Measurement) GetRetentionPolicy() string { 122 | if x != nil && x.RetentionPolicy != nil { 123 | return *x.RetentionPolicy 124 | } 125 | return "" 126 | } 127 | 128 | func (x *Measurement) GetName() string { 129 | if x != nil && x.Name != nil { 130 | return *x.Name 131 | } 132 | return "" 133 | } 134 | 135 | func (x *Measurement) GetRegex() string { 136 | if x != nil && x.Regex != nil { 137 | return *x.Regex 138 | } 139 | return "" 140 | } 141 | 142 | func (x *Measurement) GetIsTarget() bool { 143 | if x != nil && x.IsTarget != nil { 144 | return *x.IsTarget 145 | } 146 | return false 147 | } 148 | 149 | var File_internal_influxdata_influxql_internal_proto protoreflect.FileDescriptor 150 | 151 | var file_internal_influxdata_influxql_internal_proto_rawDesc = []byte{ 152 | 0x0a, 0x2b, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x69, 0x6e, 0x66, 0x6c, 0x75, 153 | 0x78, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x71, 0x6c, 0x5f, 0x69, 154 | 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x69, 155 | 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x71, 0x6c, 0x22, 0x3b, 0x0a, 0x0c, 0x4d, 0x65, 0x61, 0x73, 0x75, 156 | 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x05, 0x49, 0x74, 0x65, 0x6d, 0x73, 157 | 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x71, 158 | 0x6c, 0x2e, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x49, 159 | 0x74, 0x65, 0x6d, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x0b, 0x4d, 0x65, 0x61, 0x73, 0x75, 0x72, 0x65, 160 | 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 161 | 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 162 | 0x12, 0x28, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 163 | 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 164 | 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 165 | 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 166 | 0x0a, 0x05, 0x52, 0x65, 0x67, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x52, 167 | 0x65, 0x67, 0x65, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x49, 0x73, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 168 | 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x49, 0x73, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 169 | 0x42, 0x20, 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x69, 170 | 0x6e, 0x66, 0x6c, 0x75, 0x78, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x69, 0x6e, 0x66, 0x6c, 0x75, 0x78, 171 | 0x71, 0x6c, 172 | } 173 | 174 | var ( 175 | file_internal_influxdata_influxql_internal_proto_rawDescOnce sync.Once 176 | file_internal_influxdata_influxql_internal_proto_rawDescData = file_internal_influxdata_influxql_internal_proto_rawDesc 177 | ) 178 | 179 | func file_internal_influxdata_influxql_internal_proto_rawDescGZIP() []byte { 180 | file_internal_influxdata_influxql_internal_proto_rawDescOnce.Do(func() { 181 | file_internal_influxdata_influxql_internal_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_influxdata_influxql_internal_proto_rawDescData) 182 | }) 183 | return file_internal_influxdata_influxql_internal_proto_rawDescData 184 | } 185 | 186 | var file_internal_influxdata_influxql_internal_proto_msgTypes = make([]protoimpl.MessageInfo, 2) 187 | var file_internal_influxdata_influxql_internal_proto_goTypes = []interface{}{ 188 | (*Measurements)(nil), // 0: influxql.Measurements 189 | (*Measurement)(nil), // 1: influxql.Measurement 190 | } 191 | var file_internal_influxdata_influxql_internal_proto_depIdxs = []int32{ 192 | 1, // 0: influxql.Measurements.Items:type_name -> influxql.Measurement 193 | 1, // [1:1] is the sub-list for method output_type 194 | 1, // [1:1] is the sub-list for method input_type 195 | 1, // [1:1] is the sub-list for extension type_name 196 | 1, // [1:1] is the sub-list for extension extendee 197 | 0, // [0:1] is the sub-list for field type_name 198 | } 199 | 200 | func init() { file_internal_influxdata_influxql_internal_proto_init() } 201 | func file_internal_influxdata_influxql_internal_proto_init() { 202 | if File_internal_influxdata_influxql_internal_proto != nil { 203 | return 204 | } 205 | if !protoimpl.UnsafeEnabled { 206 | file_internal_influxdata_influxql_internal_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { 207 | switch v := v.(*Measurements); i { 208 | case 0: 209 | return &v.state 210 | case 1: 211 | return &v.sizeCache 212 | case 2: 213 | return &v.unknownFields 214 | default: 215 | return nil 216 | } 217 | } 218 | file_internal_influxdata_influxql_internal_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { 219 | switch v := v.(*Measurement); i { 220 | case 0: 221 | return &v.state 222 | case 1: 223 | return &v.sizeCache 224 | case 2: 225 | return &v.unknownFields 226 | default: 227 | return nil 228 | } 229 | } 230 | } 231 | type x struct{} 232 | out := protoimpl.TypeBuilder{ 233 | File: protoimpl.DescBuilder{ 234 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 235 | RawDescriptor: file_internal_influxdata_influxql_internal_proto_rawDesc, 236 | NumEnums: 0, 237 | NumMessages: 2, 238 | NumExtensions: 0, 239 | NumServices: 0, 240 | }, 241 | GoTypes: file_internal_influxdata_influxql_internal_proto_goTypes, 242 | DependencyIndexes: file_internal_influxdata_influxql_internal_proto_depIdxs, 243 | MessageInfos: file_internal_influxdata_influxql_internal_proto_msgTypes, 244 | }.Build() 245 | File_internal_influxdata_influxql_internal_proto = out.File 246 | file_internal_influxdata_influxql_internal_proto_rawDesc = nil 247 | file_internal_influxdata_influxql_internal_proto_goTypes = nil 248 | file_internal_influxdata_influxql_internal_proto_depIdxs = nil 249 | } 250 | -------------------------------------------------------------------------------- /internal/influxdata_influxql_internal.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto2"; 2 | package influxql; 3 | option go_package = "github.com/influxdata/influxql"; 4 | 5 | message Measurements { 6 | repeated Measurement Items = 1; 7 | } 8 | 9 | message Measurement { 10 | optional string Database = 1; 11 | optional string RetentionPolicy = 2; 12 | optional string Name = 3; 13 | optional string Regex = 4; 14 | optional bool IsTarget = 5; 15 | } 16 | -------------------------------------------------------------------------------- /params.go: -------------------------------------------------------------------------------- 1 | package influxql 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | "time" 9 | ) 10 | 11 | // Value represents a value that can be bound 12 | // to a parameter when parsing the query. 13 | type Value interface { 14 | TokenType() Token 15 | Value() string 16 | } 17 | 18 | type ( 19 | // Identifier is an identifier value. 20 | Identifier string 21 | 22 | // StringValue is a string literal. 23 | StringValue string 24 | 25 | // RegexValue is a regexp literal. 26 | RegexValue string 27 | 28 | // NumberValue is a number literal. 29 | NumberValue float64 30 | 31 | // IntegerValue is an integer literal. 32 | IntegerValue int64 33 | 34 | // BooleanValue is a boolean literal. 35 | BooleanValue bool 36 | 37 | // DurationValue is a duration literal. 38 | DurationValue string 39 | 40 | // ErrorValue is a special value that returns an error during parsing 41 | // when it is used. 42 | ErrorValue string 43 | ) 44 | 45 | // BindValue will bind an interface value to its influxql value. 46 | // This method of binding values only supports literals. 47 | func BindValue(v interface{}) Value { 48 | if jv, ok := v.(json.Number); ok { 49 | var err error 50 | v, err = jsonNumberToValue(jv) 51 | if err != nil { 52 | return ErrorValue(err.Error()) 53 | } 54 | } 55 | 56 | switch v := v.(type) { 57 | case float64: 58 | return NumberValue(v) 59 | case int64: 60 | return IntegerValue(v) 61 | case string: 62 | return StringValue(v) 63 | case bool: 64 | return BooleanValue(v) 65 | case map[string]interface{}: 66 | return bindObjectValue(v) 67 | default: 68 | s := fmt.Sprintf("unable to bind parameter with type %T", v) 69 | return ErrorValue(s) 70 | } 71 | } 72 | 73 | // bindObjectValue will bind an object to a value. 74 | func bindObjectValue(m map[string]interface{}) Value { 75 | if len(m) != 1 { 76 | return ErrorValue("bound object parameter value must have exactly one entry") 77 | } 78 | 79 | var ( 80 | k string 81 | v interface{} 82 | ) 83 | for k, v = range m { 84 | // Nothing done here. 85 | } 86 | 87 | if jv, ok := v.(json.Number); ok { 88 | var err error 89 | v, err = jsonNumberToValue(jv) 90 | if err != nil { 91 | return ErrorValue(err.Error()) 92 | } 93 | } 94 | 95 | switch k { 96 | case "ident", "identifier": 97 | s, ok := v.(string) 98 | if !ok { 99 | return ErrorValue("identifier must be a string value") 100 | } 101 | return Identifier(s) 102 | case "regex": 103 | s, ok := v.(string) 104 | if !ok { 105 | return ErrorValue("regex literal must be a string value") 106 | } 107 | return RegexValue(s) 108 | case "string": 109 | s, ok := v.(string) 110 | if !ok { 111 | return ErrorValue("string literal must be a string value") 112 | } 113 | return StringValue(s) 114 | case "float", "number": 115 | switch f := v.(type) { 116 | case float64: 117 | return NumberValue(f) 118 | case int64: 119 | return NumberValue(f) 120 | default: 121 | return ErrorValue("number literal must be a float value") 122 | } 123 | case "int", "integer": 124 | i, ok := v.(int64) 125 | if !ok { 126 | return ErrorValue("integer literal must be an integer value") 127 | } 128 | return IntegerValue(i) 129 | case "duration": 130 | switch d := v.(type) { 131 | case string: 132 | return DurationValue(d) 133 | case int64: 134 | return DurationValue(FormatDuration(time.Duration(d))) 135 | default: 136 | return ErrorValue("duration literal must be a string or integer value") 137 | } 138 | default: 139 | return ErrorValue(fmt.Sprintf("unknown bind object type: %s", k)) 140 | } 141 | } 142 | 143 | func (v Identifier) TokenType() Token { return IDENT } 144 | func (v Identifier) Value() string { return string(v) } 145 | func (v StringValue) TokenType() Token { return STRING } 146 | func (v StringValue) Value() string { return string(v) } 147 | func (v RegexValue) TokenType() Token { return REGEX } 148 | func (v RegexValue) Value() string { return string(v) } 149 | func (v NumberValue) TokenType() Token { return NUMBER } 150 | func (v NumberValue) Value() string { return strconv.FormatFloat(float64(v), 'f', -1, 64) } 151 | func (v IntegerValue) TokenType() Token { return INTEGER } 152 | func (v IntegerValue) Value() string { return strconv.FormatInt(int64(v), 10) } 153 | func (v BooleanValue) TokenType() Token { 154 | if v { 155 | return TRUE 156 | } else { 157 | return FALSE 158 | } 159 | } 160 | func (v BooleanValue) Value() string { return "" } 161 | func (v DurationValue) TokenType() Token { return DURATIONVAL } 162 | func (v DurationValue) Value() string { return string(v) } 163 | func (e ErrorValue) TokenType() Token { return BOUNDPARAM } 164 | func (e ErrorValue) Value() string { return string(e) } 165 | 166 | func jsonNumberToValue(v json.Number) (interface{}, error) { 167 | if strings.Contains(string(v), ".") { 168 | f, err := v.Float64() 169 | if err != nil { 170 | return nil, err 171 | } 172 | return f, nil 173 | } else { 174 | i, err := v.Int64() 175 | if err != nil { 176 | return nil, err 177 | } 178 | return i, nil 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /parse_tree.go: -------------------------------------------------------------------------------- 1 | package influxql 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | var Language = &ParseTree{} 8 | 9 | type ParseTree struct { 10 | Handlers map[Token]func(*Parser) (Statement, error) 11 | Tokens map[Token]*ParseTree 12 | Keys []string 13 | } 14 | 15 | // With passes the current parse tree to a function to allow nested functions. 16 | func (t *ParseTree) With(fn func(*ParseTree)) { 17 | fn(t) 18 | } 19 | 20 | // Group groups together a set of related handlers with a common token prefix. 21 | func (t *ParseTree) Group(tokens ...Token) *ParseTree { 22 | for _, tok := range tokens { 23 | // Look for the parse tree for this token. 24 | if subtree := t.Tokens[tok]; subtree != nil { 25 | t = subtree 26 | continue 27 | } 28 | 29 | // No subtree exists yet. Verify that we don't have a conflicting 30 | // statement. 31 | if _, conflict := t.Handlers[tok]; conflict { 32 | panic(fmt.Sprintf("conflict for token %s", tok)) 33 | } 34 | 35 | // Create the new parse tree and register it inside of this one for 36 | // later reference. 37 | newT := &ParseTree{} 38 | if t.Tokens == nil { 39 | t.Tokens = make(map[Token]*ParseTree) 40 | } 41 | t.Tokens[tok] = newT 42 | t.Keys = append(t.Keys, tok.String()) 43 | t = newT 44 | } 45 | return t 46 | } 47 | 48 | // Handle registers a handler to be invoked when seeing the given token. 49 | func (t *ParseTree) Handle(tok Token, fn func(*Parser) (Statement, error)) { 50 | // Verify that there is no conflict for this token in this parse tree. 51 | if _, conflict := t.Tokens[tok]; conflict { 52 | panic(fmt.Sprintf("conflict for token %s", tok)) 53 | } 54 | 55 | if _, conflict := t.Handlers[tok]; conflict { 56 | panic(fmt.Sprintf("conflict for token %s", tok)) 57 | } 58 | 59 | if t.Handlers == nil { 60 | t.Handlers = make(map[Token]func(*Parser) (Statement, error)) 61 | } 62 | t.Handlers[tok] = fn 63 | t.Keys = append(t.Keys, tok.String()) 64 | } 65 | 66 | // Parse parses a statement using the language defined in the parse tree. 67 | func (t *ParseTree) Parse(p *Parser) (Statement, error) { 68 | for { 69 | tok, pos, lit := p.ScanIgnoreWhitespace() 70 | if subtree := t.Tokens[tok]; subtree != nil { 71 | t = subtree 72 | continue 73 | } 74 | 75 | if stmt := t.Handlers[tok]; stmt != nil { 76 | return stmt(p) 77 | } 78 | 79 | // There were no registered handlers. Return the valid tokens in the order they were added. 80 | return nil, newParseError(tokstr(tok, lit), t.Keys, pos) 81 | } 82 | } 83 | 84 | func (t *ParseTree) Clone() *ParseTree { 85 | newT := &ParseTree{} 86 | if t.Handlers != nil { 87 | newT.Handlers = make(map[Token]func(*Parser) (Statement, error), len(t.Handlers)) 88 | for tok, handler := range t.Handlers { 89 | newT.Handlers[tok] = handler 90 | } 91 | } 92 | 93 | if t.Tokens != nil { 94 | newT.Tokens = make(map[Token]*ParseTree, len(t.Tokens)) 95 | for tok, subtree := range t.Tokens { 96 | newT.Tokens[tok] = subtree.Clone() 97 | } 98 | } 99 | return newT 100 | } 101 | 102 | func init() { 103 | Language.Handle(SELECT, func(p *Parser) (Statement, error) { 104 | return p.parseSelectStatement(targetNotRequired) 105 | }) 106 | Language.Handle(DELETE, func(p *Parser) (Statement, error) { 107 | return p.parseDeleteStatement() 108 | }) 109 | Language.Group(SHOW).With(func(show *ParseTree) { 110 | show.Group(CONTINUOUS).Handle(QUERIES, func(p *Parser) (Statement, error) { 111 | return p.parseShowContinuousQueriesStatement() 112 | }) 113 | show.Handle(DATABASES, func(p *Parser) (Statement, error) { 114 | return p.parseShowDatabasesStatement() 115 | }) 116 | show.Handle(DIAGNOSTICS, func(p *Parser) (Statement, error) { 117 | return p.parseShowDiagnosticsStatement() 118 | }) 119 | show.Group(FIELD).With(func(field *ParseTree) { 120 | field.Handle(KEY, func(p *Parser) (Statement, error) { 121 | return p.parseShowFieldKeyCardinalityStatement() 122 | }) 123 | field.Handle(KEYS, func(p *Parser) (Statement, error) { 124 | return p.parseShowFieldKeysStatement() 125 | }) 126 | }) 127 | show.Group(GRANTS).Handle(FOR, func(p *Parser) (Statement, error) { 128 | return p.parseGrantsForUserStatement() 129 | }) 130 | show.Group(MEASUREMENT).Handle(EXACT, func(p *Parser) (Statement, error) { 131 | return p.parseShowMeasurementCardinalityStatement(true) 132 | }) 133 | show.Group(MEASUREMENT).Handle(CARDINALITY, func(p *Parser) (Statement, error) { 134 | return p.parseShowMeasurementCardinalityStatement(false) 135 | }) 136 | show.Handle(MEASUREMENTS, func(p *Parser) (Statement, error) { 137 | return p.parseShowMeasurementsStatement() 138 | }) 139 | show.Handle(QUERIES, func(p *Parser) (Statement, error) { 140 | return p.parseShowQueriesStatement() 141 | }) 142 | show.Group(RETENTION).Handle(POLICIES, func(p *Parser) (Statement, error) { 143 | return p.parseShowRetentionPoliciesStatement() 144 | }) 145 | show.Handle(SERIES, func(p *Parser) (Statement, error) { 146 | return p.parseShowSeriesStatement() 147 | }) 148 | show.Group(SHARD).Handle(GROUPS, func(p *Parser) (Statement, error) { 149 | return p.parseShowShardGroupsStatement() 150 | }) 151 | show.Handle(SHARDS, func(p *Parser) (Statement, error) { 152 | return p.parseShowShardsStatement() 153 | }) 154 | show.Handle(STATS, func(p *Parser) (Statement, error) { 155 | return p.parseShowStatsStatement() 156 | }) 157 | show.Handle(SUBSCRIPTIONS, func(p *Parser) (Statement, error) { 158 | return p.parseShowSubscriptionsStatement() 159 | }) 160 | show.Group(TAG).With(func(tag *ParseTree) { 161 | tag.Handle(KEY, func(p *Parser) (Statement, error) { 162 | return p.parseShowTagKeyCardinalityStatement() 163 | }) 164 | tag.Handle(KEYS, func(p *Parser) (Statement, error) { 165 | return p.parseShowTagKeysStatement() 166 | }) 167 | tag.Handle(VALUES, func(p *Parser) (Statement, error) { 168 | return p.parseShowTagValuesStatement() 169 | }) 170 | }) 171 | show.Handle(USERS, func(p *Parser) (Statement, error) { 172 | return p.parseShowUsersStatement() 173 | }) 174 | }) 175 | Language.Group(CREATE).With(func(create *ParseTree) { 176 | create.Group(CONTINUOUS).Handle(QUERY, func(p *Parser) (Statement, error) { 177 | return p.parseCreateContinuousQueryStatement() 178 | }) 179 | create.Handle(DATABASE, func(p *Parser) (Statement, error) { 180 | return p.parseCreateDatabaseStatement() 181 | }) 182 | create.Handle(USER, func(p *Parser) (Statement, error) { 183 | return p.parseCreateUserStatement() 184 | }) 185 | create.Group(RETENTION).Handle(POLICY, func(p *Parser) (Statement, error) { 186 | return p.parseCreateRetentionPolicyStatement() 187 | }) 188 | create.Handle(SUBSCRIPTION, func(p *Parser) (Statement, error) { 189 | return p.parseCreateSubscriptionStatement() 190 | }) 191 | }) 192 | Language.Group(DROP).With(func(drop *ParseTree) { 193 | drop.Group(CONTINUOUS).Handle(QUERY, func(p *Parser) (Statement, error) { 194 | return p.parseDropContinuousQueryStatement() 195 | }) 196 | drop.Handle(DATABASE, func(p *Parser) (Statement, error) { 197 | return p.parseDropDatabaseStatement() 198 | }) 199 | drop.Handle(MEASUREMENT, func(p *Parser) (Statement, error) { 200 | return p.parseDropMeasurementStatement() 201 | }) 202 | drop.Group(RETENTION).Handle(POLICY, func(p *Parser) (Statement, error) { 203 | return p.parseDropRetentionPolicyStatement() 204 | }) 205 | drop.Handle(SERIES, func(p *Parser) (Statement, error) { 206 | return p.parseDropSeriesStatement() 207 | }) 208 | drop.Handle(SHARD, func(p *Parser) (Statement, error) { 209 | return p.parseDropShardStatement() 210 | }) 211 | drop.Handle(SUBSCRIPTION, func(p *Parser) (Statement, error) { 212 | return p.parseDropSubscriptionStatement() 213 | }) 214 | drop.Handle(USER, func(p *Parser) (Statement, error) { 215 | return p.parseDropUserStatement() 216 | }) 217 | }) 218 | Language.Handle(EXPLAIN, func(p *Parser) (Statement, error) { 219 | return p.parseExplainStatement() 220 | }) 221 | Language.Handle(GRANT, func(p *Parser) (Statement, error) { 222 | return p.parseGrantStatement() 223 | }) 224 | Language.Handle(REVOKE, func(p *Parser) (Statement, error) { 225 | return p.parseRevokeStatement() 226 | }) 227 | Language.Group(ALTER, RETENTION).Handle(POLICY, func(p *Parser) (Statement, error) { 228 | return p.parseAlterRetentionPolicyStatement() 229 | }) 230 | Language.Group(SET, PASSWORD).Handle(FOR, func(p *Parser) (Statement, error) { 231 | return p.parseSetPasswordUserStatement() 232 | }) 233 | Language.Group(KILL).Handle(QUERY, func(p *Parser) (Statement, error) { 234 | return p.parseKillQueryStatement() 235 | }) 236 | } 237 | -------------------------------------------------------------------------------- /parse_tree_test.go: -------------------------------------------------------------------------------- 1 | package influxql_test 2 | 3 | import ( 4 | "reflect" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/influxdata/influxql" 9 | ) 10 | 11 | func TestParseTree_Clone(t *testing.T) { 12 | // Clone the default language parse tree and add a new syntax node. 13 | language := influxql.Language.Clone() 14 | language.Group(influxql.CREATE).Handle(influxql.STATS, func(p *influxql.Parser) (influxql.Statement, error) { 15 | return &influxql.ShowStatsStatement{}, nil 16 | }) 17 | 18 | // Create a parser with CREATE STATS and parse the statement. 19 | parser := influxql.NewParser(strings.NewReader(`CREATE STATS`)) 20 | stmt, err := language.Parse(parser) 21 | if err != nil { 22 | t.Fatalf("unexpected error: %s", err) 23 | } else if !reflect.DeepEqual(stmt, &influxql.ShowStatsStatement{}) { 24 | t.Fatalf("unexpected statement returned from parser: %s", stmt) 25 | } 26 | 27 | // Recreate the parser and try parsing with the original parsing. This should fail. 28 | parser = influxql.NewParser(strings.NewReader(`CREATE STATS`)) 29 | if _, err := parser.ParseStatement(); err == nil { 30 | t.Fatal("expected error") 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /sanitize.go: -------------------------------------------------------------------------------- 1 | package influxql 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | ) 7 | 8 | var ( 9 | sanitizeSetPassword = regexp.MustCompile(`(?i)password\s+for[^=]*=\s+(["']?[^\s"]+["']?)`) 10 | 11 | sanitizeCreatePassword = regexp.MustCompile(`(?i)with\s+password\s+(["']?[^\s"]+["']?)`) 12 | ) 13 | 14 | // Sanitize attempts to sanitize passwords out of a raw query. 15 | // It looks for patterns that may be related to the SET PASSWORD and CREATE USER 16 | // statements and will redact the password that should be there. It will attempt 17 | // to redact information from common invalid queries too, but it's not guaranteed 18 | // to succeed on improper queries. 19 | // 20 | // This function works on the raw query and attempts to retain the original input 21 | // as much as possible. 22 | func Sanitize(query string) string { 23 | if matches := sanitizeSetPassword.FindAllStringSubmatchIndex(query, -1); matches != nil { 24 | var buf strings.Builder 25 | i := 0 26 | for _, match := range matches { 27 | buf.WriteString(query[i:match[2]]) 28 | buf.WriteString("[REDACTED]") 29 | i = match[3] 30 | } 31 | buf.WriteString(query[i:]) 32 | query = buf.String() 33 | } 34 | 35 | if matches := sanitizeCreatePassword.FindAllStringSubmatchIndex(query, -1); matches != nil { 36 | var buf strings.Builder 37 | i := 0 38 | for _, match := range matches { 39 | buf.WriteString(query[i:match[2]]) 40 | buf.WriteString("[REDACTED]") 41 | i = match[3] 42 | } 43 | buf.WriteString(query[i:]) 44 | query = buf.String() 45 | } 46 | return query 47 | } 48 | -------------------------------------------------------------------------------- /sanitize_test.go: -------------------------------------------------------------------------------- 1 | package influxql_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/influxdata/influxql" 7 | ) 8 | 9 | func TestSanitize(t *testing.T) { 10 | var tests = []struct { 11 | s string 12 | stmt string 13 | }{ 14 | // Proper statements that should be redacted. 15 | { 16 | s: `create user "admin" with password 'admin'`, 17 | stmt: `create user "admin" with password [REDACTED]`, 18 | }, 19 | { 20 | s: `set password for "admin" = 'admin'`, 21 | stmt: `set password for "admin" = [REDACTED]`, 22 | }, 23 | 24 | // Common invalid statements that should still be redacted. 25 | { 26 | s: `create user "admin" with password "admin"`, 27 | stmt: `create user "admin" with password [REDACTED]`, 28 | }, 29 | { 30 | s: `set password for "admin" = "admin"`, 31 | stmt: `set password for "admin" = [REDACTED]`, 32 | }, 33 | } 34 | 35 | for i, tt := range tests { 36 | stmt := influxql.Sanitize(tt.s) 37 | if tt.stmt != stmt { 38 | t.Errorf("%d. %q\n\nsanitize mismatch:\n\nexp=%#v\n\ngot=%#v\n\n", i, tt.s, tt.stmt, stmt) 39 | } 40 | } 41 | } 42 | 43 | func BenchmarkSanitize(b *testing.B) { 44 | b.ReportAllocs() 45 | q := `create user "admin" with password 'admin'; set password for "admin" = 'admin'` 46 | for i := 0; i < b.N; i++ { 47 | influxql.Sanitize(q) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /scanner.go: -------------------------------------------------------------------------------- 1 | package influxql 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "strings" 10 | ) 11 | 12 | // Scanner represents a lexical scanner for InfluxQL. 13 | type Scanner struct { 14 | r *reader 15 | } 16 | 17 | // NewScanner returns a new instance of Scanner. 18 | func NewScanner(r io.Reader) *Scanner { 19 | return &Scanner{r: &reader{r: bufio.NewReader(r)}} 20 | } 21 | 22 | // Scan returns the next token and position from the underlying reader. 23 | // Also returns the literal text read for strings, numbers, and duration tokens 24 | // since these token types can have different literal representations. 25 | func (s *Scanner) Scan() (tok Token, pos Pos, lit string) { 26 | // Read next code point. 27 | ch0, pos := s.r.read() 28 | 29 | // If we see whitespace then consume all contiguous whitespace. 30 | // If we see a letter, or certain acceptable special characters, then consume 31 | // as an ident or reserved word. 32 | if isWhitespace(ch0) { 33 | return s.scanWhitespace() 34 | } else if isLetter(ch0) || ch0 == '_' { 35 | s.r.unread() 36 | return s.scanIdent(true) 37 | } else if isDigit(ch0) { 38 | return s.scanNumber() 39 | } 40 | 41 | // Otherwise parse individual characters. 42 | switch ch0 { 43 | case eof: 44 | return EOF, pos, "" 45 | case '"': 46 | s.r.unread() 47 | return s.scanIdent(true) 48 | case '\'': 49 | return s.scanString() 50 | case '.': 51 | ch1, _ := s.r.read() 52 | s.r.unread() 53 | if isDigit(ch1) { 54 | return s.scanNumber() 55 | } 56 | return DOT, pos, "" 57 | case '$': 58 | tok, _, lit = s.scanIdent(false) 59 | if tok != IDENT { 60 | return tok, pos, "$" + lit 61 | } 62 | return BOUNDPARAM, pos, "$" + lit 63 | case '+': 64 | return ADD, pos, "" 65 | case '-': 66 | ch1, _ := s.r.read() 67 | if ch1 == '-' { 68 | s.skipUntilNewline() 69 | return COMMENT, pos, "" 70 | } 71 | s.r.unread() 72 | return SUB, pos, "" 73 | case '*': 74 | return MUL, pos, "" 75 | case '/': 76 | ch1, _ := s.r.read() 77 | if ch1 == '*' { 78 | if err := s.skipUntilEndComment(); err != nil { 79 | return ILLEGAL, pos, "" 80 | } 81 | return COMMENT, pos, "" 82 | } else { 83 | s.r.unread() 84 | } 85 | return DIV, pos, "" 86 | case '%': 87 | return MOD, pos, "" 88 | case '&': 89 | return BITWISE_AND, pos, "" 90 | case '|': 91 | return BITWISE_OR, pos, "" 92 | case '^': 93 | return BITWISE_XOR, pos, "" 94 | case '=': 95 | if ch1, _ := s.r.read(); ch1 == '~' { 96 | return EQREGEX, pos, "" 97 | } 98 | s.r.unread() 99 | return EQ, pos, "" 100 | case '!': 101 | if ch1, _ := s.r.read(); ch1 == '=' { 102 | return NEQ, pos, "" 103 | } else if ch1 == '~' { 104 | return NEQREGEX, pos, "" 105 | } 106 | s.r.unread() 107 | case '>': 108 | if ch1, _ := s.r.read(); ch1 == '=' { 109 | return GTE, pos, "" 110 | } 111 | s.r.unread() 112 | return GT, pos, "" 113 | case '<': 114 | if ch1, _ := s.r.read(); ch1 == '=' { 115 | return LTE, pos, "" 116 | } else if ch1 == '>' { 117 | return NEQ, pos, "" 118 | } 119 | s.r.unread() 120 | return LT, pos, "" 121 | case '(': 122 | return LPAREN, pos, "" 123 | case ')': 124 | return RPAREN, pos, "" 125 | case ',': 126 | return COMMA, pos, "" 127 | case ';': 128 | return SEMICOLON, pos, "" 129 | case ':': 130 | if ch1, _ := s.r.read(); ch1 == ':' { 131 | return DOUBLECOLON, pos, "" 132 | } 133 | s.r.unread() 134 | return COLON, pos, "" 135 | } 136 | 137 | return ILLEGAL, pos, string(ch0) 138 | } 139 | 140 | // scanWhitespace consumes the current rune and all contiguous whitespace. 141 | func (s *Scanner) scanWhitespace() (tok Token, pos Pos, lit string) { 142 | // Create a buffer and read the current character into it. 143 | var buf strings.Builder 144 | ch, pos := s.r.curr() 145 | _, _ = buf.WriteRune(ch) 146 | 147 | // Read every subsequent whitespace character into the buffer. 148 | // Non-whitespace characters and EOF will cause the loop to exit. 149 | for { 150 | ch, _ = s.r.read() 151 | if ch == eof { 152 | break 153 | } else if !isWhitespace(ch) { 154 | s.r.unread() 155 | break 156 | } else { 157 | _, _ = buf.WriteRune(ch) 158 | } 159 | } 160 | 161 | return WS, pos, buf.String() 162 | } 163 | 164 | // skipUntilNewline skips characters until it reaches a newline. 165 | func (s *Scanner) skipUntilNewline() { 166 | for { 167 | if ch, _ := s.r.read(); ch == '\n' || ch == eof { 168 | return 169 | } 170 | } 171 | } 172 | 173 | // skipUntilEndComment skips characters until it reaches a '*/' symbol. 174 | func (s *Scanner) skipUntilEndComment() error { 175 | for { 176 | if ch1, _ := s.r.read(); ch1 == '*' { 177 | // We might be at the end. 178 | star: 179 | ch2, _ := s.r.read() 180 | if ch2 == '/' { 181 | return nil 182 | } else if ch2 == '*' { 183 | // We are back in the state machine since we see a star. 184 | goto star 185 | } else if ch2 == eof { 186 | return io.EOF 187 | } 188 | } else if ch1 == eof { 189 | return io.EOF 190 | } 191 | } 192 | } 193 | 194 | func (s *Scanner) scanIdent(lookup bool) (tok Token, pos Pos, lit string) { 195 | // Save the starting position of the identifier. 196 | _, pos = s.r.read() 197 | s.r.unread() 198 | 199 | var buf strings.Builder 200 | for { 201 | if ch, _ := s.r.read(); ch == eof { 202 | break 203 | } else if ch == '"' { 204 | tok0, pos0, lit0 := s.scanString() 205 | if tok0 == BADSTRING || tok0 == BADESCAPE { 206 | return tok0, pos0, lit0 207 | } 208 | return IDENT, pos, lit0 209 | } else if isIdentChar(ch) { 210 | s.r.unread() 211 | buf.WriteString(ScanBareIdent(s.r)) 212 | } else { 213 | s.r.unread() 214 | break 215 | } 216 | } 217 | lit = buf.String() 218 | 219 | // If the literal matches a keyword then return that keyword. 220 | if lookup { 221 | if tok = Lookup(lit); tok != IDENT { 222 | return tok, pos, "" 223 | } 224 | } 225 | return IDENT, pos, lit 226 | } 227 | 228 | // scanString consumes a contiguous string of non-quote characters. 229 | // Quote characters can be consumed if they're first escaped with a backslash. 230 | func (s *Scanner) scanString() (tok Token, pos Pos, lit string) { 231 | s.r.unread() 232 | _, pos = s.r.curr() 233 | 234 | var err error 235 | lit, err = ScanString(s.r) 236 | if err == errBadString { 237 | return BADSTRING, pos, lit 238 | } else if err == errBadEscape { 239 | _, pos = s.r.curr() 240 | return BADESCAPE, pos, lit 241 | } 242 | return STRING, pos, lit 243 | } 244 | 245 | // ScanRegex consumes a token to find escapes 246 | func (s *Scanner) ScanRegex() (tok Token, pos Pos, lit string) { 247 | _, pos = s.r.curr() 248 | 249 | // Start & end sentinels. 250 | start, end := '/', '/' 251 | // Valid escape chars. 252 | escapes := map[rune]rune{'/': '/'} 253 | 254 | b, err := ScanDelimited(s.r, start, end, escapes, true) 255 | 256 | if err == errBadEscape { 257 | _, pos = s.r.curr() 258 | return BADESCAPE, pos, lit 259 | } else if err != nil { 260 | return BADREGEX, pos, lit 261 | } 262 | return REGEX, pos, string(b) 263 | } 264 | 265 | // scanNumber consumes anything that looks like the start of a number. 266 | func (s *Scanner) scanNumber() (tok Token, pos Pos, lit string) { 267 | var buf strings.Builder 268 | 269 | // Check if the initial rune is a ".". 270 | ch, pos := s.r.curr() 271 | if ch == '.' { 272 | // Peek and see if the next rune is a digit. 273 | ch1, _ := s.r.read() 274 | s.r.unread() 275 | if !isDigit(ch1) { 276 | return ILLEGAL, pos, "." 277 | } 278 | 279 | // Unread the full stop so we can read it later. 280 | s.r.unread() 281 | } else { 282 | s.r.unread() 283 | } 284 | 285 | // Read as many digits as possible. 286 | _, _ = buf.WriteString(s.scanDigits()) 287 | 288 | // If next code points are a full stop and digit then consume them. 289 | isDecimal := false 290 | if ch0, _ := s.r.read(); ch0 == '.' { 291 | isDecimal = true 292 | if ch1, _ := s.r.read(); isDigit(ch1) { 293 | _, _ = buf.WriteRune(ch0) 294 | _, _ = buf.WriteRune(ch1) 295 | _, _ = buf.WriteString(s.scanDigits()) 296 | } else { 297 | s.r.unread() 298 | } 299 | } else { 300 | s.r.unread() 301 | } 302 | 303 | // Read as a duration or integer if it doesn't have a fractional part. 304 | if !isDecimal { 305 | // If the next rune is a letter then this is a duration token. 306 | if ch0, _ := s.r.read(); isLetter(ch0) || ch0 == 'µ' { 307 | _, _ = buf.WriteRune(ch0) 308 | for { 309 | ch1, _ := s.r.read() 310 | if !isLetter(ch1) && ch1 != 'µ' { 311 | s.r.unread() 312 | break 313 | } 314 | _, _ = buf.WriteRune(ch1) 315 | } 316 | 317 | // Continue reading digits and letters as part of this token. 318 | for { 319 | if ch0, _ := s.r.read(); isLetter(ch0) || ch0 == 'µ' || isDigit(ch0) { 320 | _, _ = buf.WriteRune(ch0) 321 | } else { 322 | s.r.unread() 323 | break 324 | } 325 | } 326 | return DURATIONVAL, pos, buf.String() 327 | } else { 328 | s.r.unread() 329 | return INTEGER, pos, buf.String() 330 | } 331 | } 332 | return NUMBER, pos, buf.String() 333 | } 334 | 335 | // scanDigits consumes a contiguous series of digits. 336 | func (s *Scanner) scanDigits() string { 337 | var buf strings.Builder 338 | for { 339 | ch, _ := s.r.read() 340 | if !isDigit(ch) { 341 | s.r.unread() 342 | break 343 | } 344 | _, _ = buf.WriteRune(ch) 345 | } 346 | return buf.String() 347 | } 348 | 349 | // isWhitespace returns true if the rune is a space, tab, or newline. 350 | func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' } 351 | 352 | // isLetter returns true if the rune is a letter. 353 | func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') } 354 | 355 | // isDigit returns true if the rune is a digit. 356 | func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') } 357 | 358 | // isIdentChar returns true if the rune can be used in an unquoted identifier. 359 | func isIdentChar(ch rune) bool { return isLetter(ch) || isDigit(ch) || ch == '_' } 360 | 361 | // isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer. 362 | func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' } 363 | 364 | // bufScanner represents a wrapper for scanner to add a buffer. 365 | // It provides a fixed-length circular buffer that can be unread. 366 | type bufScanner struct { 367 | s *Scanner 368 | i int // buffer index 369 | n int // buffer size 370 | buf [3]struct { 371 | tok Token 372 | pos Pos 373 | lit string 374 | } 375 | } 376 | 377 | // newBufScanner returns a new buffered scanner for a reader. 378 | func newBufScanner(r io.Reader) *bufScanner { 379 | return &bufScanner{s: NewScanner(r)} 380 | } 381 | 382 | // Scan reads the next token from the scanner. 383 | func (s *bufScanner) Scan() (tok Token, pos Pos, lit string) { 384 | return s.scanFunc(s.s.Scan) 385 | } 386 | 387 | // ScanRegex reads a regex token from the scanner. 388 | func (s *bufScanner) ScanRegex() (tok Token, pos Pos, lit string) { 389 | return s.scanFunc(s.s.ScanRegex) 390 | } 391 | 392 | // scanFunc uses the provided function to scan the next token. 393 | func (s *bufScanner) scanFunc(scan func() (Token, Pos, string)) (tok Token, pos Pos, lit string) { 394 | // If we have unread tokens then read them off the buffer first. 395 | if s.n > 0 { 396 | s.n-- 397 | return s.curr() 398 | } 399 | 400 | // Move buffer position forward and save the token. 401 | s.i = (s.i + 1) % len(s.buf) 402 | buf := &s.buf[s.i] 403 | buf.tok, buf.pos, buf.lit = scan() 404 | 405 | return s.curr() 406 | } 407 | 408 | // Unscan pushes the previously token back onto the buffer. 409 | func (s *bufScanner) Unscan() { s.n++ } 410 | 411 | // curr returns the last read token. 412 | func (s *bufScanner) curr() (tok Token, pos Pos, lit string) { 413 | buf := &s.buf[(s.i-s.n+len(s.buf))%len(s.buf)] 414 | return buf.tok, buf.pos, buf.lit 415 | } 416 | 417 | // reader represents a buffered rune reader used by the scanner. 418 | // It provides a fixed-length circular buffer that can be unread. 419 | type reader struct { 420 | r io.RuneScanner 421 | i int // buffer index 422 | n int // buffer char count 423 | pos Pos // last read rune position 424 | buf [3]struct { 425 | ch rune 426 | pos Pos 427 | } 428 | eof bool // true if reader has ever seen eof. 429 | } 430 | 431 | // ReadRune reads the next rune from the reader. 432 | // This is a wrapper function to implement the io.RuneReader interface. 433 | // Note that this function does not return size. 434 | func (r *reader) ReadRune() (ch rune, size int, err error) { 435 | ch, _ = r.read() 436 | if ch == eof { 437 | err = io.EOF 438 | } 439 | return 440 | } 441 | 442 | // UnreadRune pushes the previously read rune back onto the buffer. 443 | // This is a wrapper function to implement the io.RuneScanner interface. 444 | func (r *reader) UnreadRune() error { 445 | r.unread() 446 | return nil 447 | } 448 | 449 | // read reads the next rune from the reader. 450 | func (r *reader) read() (ch rune, pos Pos) { 451 | // If we have unread characters then read them off the buffer first. 452 | if r.n > 0 { 453 | r.n-- 454 | return r.curr() 455 | } 456 | 457 | // Read next rune from underlying reader. 458 | // Any error (including io.EOF) should return as EOF. 459 | ch, _, err := r.r.ReadRune() 460 | if err != nil { 461 | ch = eof 462 | } else if ch == '\r' { 463 | if ch, _, err := r.r.ReadRune(); err != nil { 464 | // nop 465 | } else if ch != '\n' { 466 | _ = r.r.UnreadRune() 467 | } 468 | ch = '\n' 469 | } 470 | 471 | // Save character and position to the buffer. 472 | r.i = (r.i + 1) % len(r.buf) 473 | buf := &r.buf[r.i] 474 | buf.ch, buf.pos = ch, r.pos 475 | 476 | // Update position. 477 | // Only count EOF once. 478 | if ch == '\n' { 479 | r.pos.Line++ 480 | r.pos.Char = 0 481 | } else if !r.eof { 482 | r.pos.Char++ 483 | } 484 | 485 | // Mark the reader as EOF. 486 | // This is used so we don't double count EOF characters. 487 | if ch == eof { 488 | r.eof = true 489 | } 490 | 491 | return r.curr() 492 | } 493 | 494 | // unread pushes the previously read rune back onto the buffer. 495 | func (r *reader) unread() { 496 | r.n++ 497 | } 498 | 499 | // curr returns the last read character and position. 500 | func (r *reader) curr() (ch rune, pos Pos) { 501 | i := (r.i - r.n + len(r.buf)) % len(r.buf) 502 | buf := &r.buf[i] 503 | return buf.ch, buf.pos 504 | } 505 | 506 | // eof is a marker code point to signify that the reader can't read any more. 507 | const eof = rune(0) 508 | 509 | // ScanDelimited reads a delimited set of runes 510 | func ScanDelimited(r io.RuneScanner, start, end rune, escapes map[rune]rune, escapesPassThru bool) ([]byte, error) { 511 | // Scan start delimiter. 512 | if ch, _, err := r.ReadRune(); err != nil { 513 | return nil, err 514 | } else if ch != start { 515 | return nil, fmt.Errorf("expected %s; found %s", string(start), string(ch)) 516 | } 517 | 518 | var buf bytes.Buffer 519 | for { 520 | ch0, _, err := r.ReadRune() 521 | if ch0 == end { 522 | return buf.Bytes(), nil 523 | } else if err != nil { 524 | return buf.Bytes(), err 525 | } else if ch0 == '\n' { 526 | return nil, errors.New("delimited text contains new line") 527 | } else if ch0 == '\\' { 528 | // If the next character is an escape then write the escaped char. 529 | // If it's not a valid escape then return an error. 530 | ch1, _, err := r.ReadRune() 531 | if err != nil { 532 | return nil, err 533 | } 534 | 535 | c, ok := escapes[ch1] 536 | if !ok { 537 | if escapesPassThru { 538 | // Unread ch1 (char after the \) 539 | _ = r.UnreadRune() 540 | // Write ch0 (\) to the output buffer. 541 | _, _ = buf.WriteRune(ch0) 542 | continue 543 | } else { 544 | buf.Reset() 545 | _, _ = buf.WriteRune(ch0) 546 | _, _ = buf.WriteRune(ch1) 547 | return buf.Bytes(), errBadEscape 548 | } 549 | } 550 | 551 | _, _ = buf.WriteRune(c) 552 | } else { 553 | _, _ = buf.WriteRune(ch0) 554 | } 555 | } 556 | } 557 | 558 | // ScanString reads a quoted string from a rune reader. 559 | func ScanString(r io.RuneScanner) (string, error) { 560 | ending, _, err := r.ReadRune() 561 | if err != nil { 562 | return "", errBadString 563 | } 564 | 565 | var buf strings.Builder 566 | for { 567 | ch0, _, err := r.ReadRune() 568 | if ch0 == ending { 569 | return buf.String(), nil 570 | } else if err != nil || ch0 == '\n' { 571 | return buf.String(), errBadString 572 | } else if ch0 == '\\' { 573 | // If the next character is an escape then write the escaped char. 574 | // If it's not a valid escape then return an error. 575 | ch1, _, _ := r.ReadRune() 576 | if ch1 == 'n' { 577 | _, _ = buf.WriteRune('\n') 578 | } else if ch1 == '\\' { 579 | _, _ = buf.WriteRune('\\') 580 | } else if ch1 == '"' { 581 | _, _ = buf.WriteRune('"') 582 | } else if ch1 == '\'' { 583 | _, _ = buf.WriteRune('\'') 584 | } else { 585 | return string(ch0) + string(ch1), errBadEscape 586 | } 587 | } else { 588 | _, _ = buf.WriteRune(ch0) 589 | } 590 | } 591 | } 592 | 593 | var errBadString = errors.New("bad string") 594 | var errBadEscape = errors.New("bad escape") 595 | 596 | // ScanBareIdent reads bare identifier from a rune reader. 597 | func ScanBareIdent(r io.RuneScanner) string { 598 | // Read every ident character into the buffer. 599 | // Non-ident characters and EOF will cause the loop to exit. 600 | var buf strings.Builder 601 | for { 602 | ch, _, err := r.ReadRune() 603 | if err != nil { 604 | break 605 | } else if !isIdentChar(ch) { 606 | r.UnreadRune() 607 | break 608 | } else { 609 | _, _ = buf.WriteRune(ch) 610 | } 611 | } 612 | return buf.String() 613 | } 614 | 615 | // IsRegexOp returns true if the operator accepts a regex operand. 616 | func IsRegexOp(t Token) bool { 617 | return (t == EQREGEX || t == NEQREGEX) 618 | } 619 | -------------------------------------------------------------------------------- /scanner_test.go: -------------------------------------------------------------------------------- 1 | package influxql_test 2 | 3 | import ( 4 | "reflect" 5 | "strings" 6 | "testing" 7 | 8 | "github.com/influxdata/influxql" 9 | ) 10 | 11 | // Ensure the scanner can scan tokens correctly. 12 | func TestScanner_Scan(t *testing.T) { 13 | var tests = []struct { 14 | s string 15 | tok influxql.Token 16 | lit string 17 | pos influxql.Pos 18 | }{ 19 | // Special tokens (EOF, ILLEGAL, WS) 20 | {s: ``, tok: influxql.EOF}, 21 | {s: `#`, tok: influxql.ILLEGAL, lit: `#`}, 22 | {s: ` `, tok: influxql.WS, lit: " "}, 23 | {s: "\t", tok: influxql.WS, lit: "\t"}, 24 | {s: "\n", tok: influxql.WS, lit: "\n"}, 25 | {s: "\r", tok: influxql.WS, lit: "\n"}, 26 | {s: "\r\n", tok: influxql.WS, lit: "\n"}, 27 | {s: "\rX", tok: influxql.WS, lit: "\n"}, 28 | {s: "\n\r", tok: influxql.WS, lit: "\n\n"}, 29 | {s: " \n\t \r\n\t", tok: influxql.WS, lit: " \n\t \n\t"}, 30 | {s: " foo", tok: influxql.WS, lit: " "}, 31 | 32 | // Numeric operators 33 | {s: `+`, tok: influxql.ADD}, 34 | {s: `-`, tok: influxql.SUB}, 35 | {s: `*`, tok: influxql.MUL}, 36 | {s: `/`, tok: influxql.DIV}, 37 | {s: `%`, tok: influxql.MOD}, 38 | 39 | // Logical operators 40 | {s: `AND`, tok: influxql.AND}, 41 | {s: `and`, tok: influxql.AND}, 42 | {s: `OR`, tok: influxql.OR}, 43 | {s: `or`, tok: influxql.OR}, 44 | 45 | {s: `=`, tok: influxql.EQ}, 46 | {s: `<>`, tok: influxql.NEQ}, 47 | {s: `! `, tok: influxql.ILLEGAL, lit: "!"}, 48 | {s: `<`, tok: influxql.LT}, 49 | {s: `<=`, tok: influxql.LTE}, 50 | {s: `>`, tok: influxql.GT}, 51 | {s: `>=`, tok: influxql.GTE}, 52 | 53 | // Misc tokens 54 | {s: `(`, tok: influxql.LPAREN}, 55 | {s: `)`, tok: influxql.RPAREN}, 56 | {s: `,`, tok: influxql.COMMA}, 57 | {s: `;`, tok: influxql.SEMICOLON}, 58 | {s: `.`, tok: influxql.DOT}, 59 | {s: `=~`, tok: influxql.EQREGEX}, 60 | {s: `!~`, tok: influxql.NEQREGEX}, 61 | {s: `:`, tok: influxql.COLON}, 62 | {s: `::`, tok: influxql.DOUBLECOLON}, 63 | 64 | // Identifiers 65 | {s: `foo`, tok: influxql.IDENT, lit: `foo`}, 66 | {s: `_foo`, tok: influxql.IDENT, lit: `_foo`}, 67 | {s: `Zx12_3U_-`, tok: influxql.IDENT, lit: `Zx12_3U_`}, 68 | {s: `"foo"`, tok: influxql.IDENT, lit: `foo`}, 69 | {s: `"foo\\bar"`, tok: influxql.IDENT, lit: `foo\bar`}, 70 | {s: `"foo\bar"`, tok: influxql.BADESCAPE, lit: `\b`, pos: influxql.Pos{Line: 0, Char: 5}}, 71 | {s: `"foo\"bar\""`, tok: influxql.IDENT, lit: `foo"bar"`}, 72 | {s: `test"`, tok: influxql.BADSTRING, lit: "", pos: influxql.Pos{Line: 0, Char: 3}}, 73 | {s: `"test`, tok: influxql.BADSTRING, lit: `test`}, 74 | {s: `$host`, tok: influxql.BOUNDPARAM, lit: `$host`}, 75 | {s: `$"host param"`, tok: influxql.BOUNDPARAM, lit: `$host param`}, 76 | 77 | {s: `true`, tok: influxql.TRUE}, 78 | {s: `false`, tok: influxql.FALSE}, 79 | 80 | // Strings 81 | {s: `'testing 123!'`, tok: influxql.STRING, lit: `testing 123!`}, 82 | {s: `'foo\nbar'`, tok: influxql.STRING, lit: "foo\nbar"}, 83 | {s: `'foo\\bar'`, tok: influxql.STRING, lit: "foo\\bar"}, 84 | {s: `'test`, tok: influxql.BADSTRING, lit: `test`}, 85 | {s: "'test\nfoo", tok: influxql.BADSTRING, lit: `test`}, 86 | {s: `'test\g'`, tok: influxql.BADESCAPE, lit: `\g`, pos: influxql.Pos{Line: 0, Char: 6}}, 87 | 88 | // Numbers 89 | {s: `100`, tok: influxql.INTEGER, lit: `100`}, 90 | {s: `100.23`, tok: influxql.NUMBER, lit: `100.23`}, 91 | {s: `.23`, tok: influxql.NUMBER, lit: `.23`}, 92 | //{s: `.`, tok: influxql.ILLEGAL, lit: `.`}, 93 | {s: `10.3s`, tok: influxql.NUMBER, lit: `10.3`}, 94 | 95 | // Durations 96 | {s: `10u`, tok: influxql.DURATIONVAL, lit: `10u`}, 97 | {s: `10µ`, tok: influxql.DURATIONVAL, lit: `10µ`}, 98 | {s: `10ms`, tok: influxql.DURATIONVAL, lit: `10ms`}, 99 | {s: `1s`, tok: influxql.DURATIONVAL, lit: `1s`}, 100 | {s: `10m`, tok: influxql.DURATIONVAL, lit: `10m`}, 101 | {s: `10h`, tok: influxql.DURATIONVAL, lit: `10h`}, 102 | {s: `10d`, tok: influxql.DURATIONVAL, lit: `10d`}, 103 | {s: `10w`, tok: influxql.DURATIONVAL, lit: `10w`}, 104 | {s: `10x`, tok: influxql.DURATIONVAL, lit: `10x`}, // non-duration unit, but scanned as a duration value 105 | 106 | // Keywords 107 | {s: `ALL`, tok: influxql.ALL}, 108 | {s: `ALTER`, tok: influxql.ALTER}, 109 | {s: `AS`, tok: influxql.AS}, 110 | {s: `ASC`, tok: influxql.ASC}, 111 | {s: `BEGIN`, tok: influxql.BEGIN}, 112 | {s: `BY`, tok: influxql.BY}, 113 | {s: `CREATE`, tok: influxql.CREATE}, 114 | {s: `CONTINUOUS`, tok: influxql.CONTINUOUS}, 115 | {s: `DATABASE`, tok: influxql.DATABASE}, 116 | {s: `DATABASES`, tok: influxql.DATABASES}, 117 | {s: `DEFAULT`, tok: influxql.DEFAULT}, 118 | {s: `DELETE`, tok: influxql.DELETE}, 119 | {s: `DESC`, tok: influxql.DESC}, 120 | {s: `DROP`, tok: influxql.DROP}, 121 | {s: `DURATION`, tok: influxql.DURATION}, 122 | {s: `END`, tok: influxql.END}, 123 | {s: `EVERY`, tok: influxql.EVERY}, 124 | {s: `EXPLAIN`, tok: influxql.EXPLAIN}, 125 | {s: `FIELD`, tok: influxql.FIELD}, 126 | {s: `FROM`, tok: influxql.FROM}, 127 | {s: `GRANT`, tok: influxql.GRANT}, 128 | {s: `GROUP`, tok: influxql.GROUP}, 129 | {s: `GROUPS`, tok: influxql.GROUPS}, 130 | {s: `INSERT`, tok: influxql.INSERT}, 131 | {s: `INTO`, tok: influxql.INTO}, 132 | {s: `KEY`, tok: influxql.KEY}, 133 | {s: `KEYS`, tok: influxql.KEYS}, 134 | {s: `KILL`, tok: influxql.KILL}, 135 | {s: `LIMIT`, tok: influxql.LIMIT}, 136 | {s: `SHOW`, tok: influxql.SHOW}, 137 | {s: `SHARD`, tok: influxql.SHARD}, 138 | {s: `SHARDS`, tok: influxql.SHARDS}, 139 | {s: `MEASUREMENT`, tok: influxql.MEASUREMENT}, 140 | {s: `MEASUREMENTS`, tok: influxql.MEASUREMENTS}, 141 | {s: `OFFSET`, tok: influxql.OFFSET}, 142 | {s: `ON`, tok: influxql.ON}, 143 | {s: `ORDER`, tok: influxql.ORDER}, 144 | {s: `PASSWORD`, tok: influxql.PASSWORD}, 145 | {s: `POLICY`, tok: influxql.POLICY}, 146 | {s: `POLICIES`, tok: influxql.POLICIES}, 147 | {s: `PRIVILEGES`, tok: influxql.PRIVILEGES}, 148 | {s: `QUERIES`, tok: influxql.QUERIES}, 149 | {s: `QUERY`, tok: influxql.QUERY}, 150 | {s: `READ`, tok: influxql.READ}, 151 | {s: `REPLICATION`, tok: influxql.REPLICATION}, 152 | {s: `RESAMPLE`, tok: influxql.RESAMPLE}, 153 | {s: `RETENTION`, tok: influxql.RETENTION}, 154 | {s: `REVOKE`, tok: influxql.REVOKE}, 155 | {s: `SELECT`, tok: influxql.SELECT}, 156 | {s: `SERIES`, tok: influxql.SERIES}, 157 | {s: `TAG`, tok: influxql.TAG}, 158 | {s: `TO`, tok: influxql.TO}, 159 | {s: `USER`, tok: influxql.USER}, 160 | {s: `USERS`, tok: influxql.USERS}, 161 | {s: `VALUES`, tok: influxql.VALUES}, 162 | {s: `WHERE`, tok: influxql.WHERE}, 163 | {s: `WITH`, tok: influxql.WITH}, 164 | {s: `WRITE`, tok: influxql.WRITE}, 165 | {s: `explain`, tok: influxql.EXPLAIN}, // case insensitive 166 | {s: `seLECT`, tok: influxql.SELECT}, // case insensitive 167 | } 168 | 169 | for i, tt := range tests { 170 | s := influxql.NewScanner(strings.NewReader(tt.s)) 171 | tok, pos, lit := s.Scan() 172 | if tt.tok != tok { 173 | t.Errorf("%d. %q token mismatch: exp=%q got=%q <%q>", i, tt.s, tt.tok, tok, lit) 174 | } else if tt.pos.Line != pos.Line || tt.pos.Char != pos.Char { 175 | t.Errorf("%d. %q pos mismatch: exp=%#v got=%#v", i, tt.s, tt.pos, pos) 176 | } else if tt.lit != lit { 177 | t.Errorf("%d. %q literal mismatch: exp=%q got=%q", i, tt.s, tt.lit, lit) 178 | } 179 | } 180 | } 181 | 182 | // Ensure the scanner can scan a series of tokens correctly. 183 | func TestScanner_Scan_Multi(t *testing.T) { 184 | type result struct { 185 | tok influxql.Token 186 | pos influxql.Pos 187 | lit string 188 | } 189 | exp := []result{ 190 | {tok: influxql.SELECT, pos: influxql.Pos{Line: 0, Char: 0}, lit: ""}, 191 | {tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 6}, lit: " "}, 192 | {tok: influxql.IDENT, pos: influxql.Pos{Line: 0, Char: 7}, lit: "value"}, 193 | {tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 12}, lit: " "}, 194 | {tok: influxql.FROM, pos: influxql.Pos{Line: 0, Char: 13}, lit: ""}, 195 | {tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 17}, lit: " "}, 196 | {tok: influxql.IDENT, pos: influxql.Pos{Line: 0, Char: 18}, lit: "myseries"}, 197 | {tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 26}, lit: " "}, 198 | {tok: influxql.WHERE, pos: influxql.Pos{Line: 0, Char: 27}, lit: ""}, 199 | {tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 32}, lit: " "}, 200 | {tok: influxql.IDENT, pos: influxql.Pos{Line: 0, Char: 33}, lit: "a"}, 201 | {tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 34}, lit: " "}, 202 | {tok: influxql.EQ, pos: influxql.Pos{Line: 0, Char: 35}, lit: ""}, 203 | {tok: influxql.WS, pos: influxql.Pos{Line: 0, Char: 36}, lit: " "}, 204 | {tok: influxql.STRING, pos: influxql.Pos{Line: 0, Char: 36}, lit: "b"}, 205 | {tok: influxql.EOF, pos: influxql.Pos{Line: 0, Char: 40}, lit: ""}, 206 | } 207 | 208 | // Create a scanner. 209 | v := `SELECT value from myseries WHERE a = 'b'` 210 | s := influxql.NewScanner(strings.NewReader(v)) 211 | 212 | // Continually scan until we reach the end. 213 | var act []result 214 | for { 215 | tok, pos, lit := s.Scan() 216 | act = append(act, result{tok, pos, lit}) 217 | if tok == influxql.EOF { 218 | break 219 | } 220 | } 221 | 222 | // Verify the token counts match. 223 | if len(exp) != len(act) { 224 | t.Fatalf("token count mismatch: exp=%d, got=%d", len(exp), len(act)) 225 | } 226 | 227 | // Verify each token matches. 228 | for i := range exp { 229 | if !reflect.DeepEqual(exp[i], act[i]) { 230 | t.Fatalf("%d. token mismatch:\n\nexp=%#v\n\ngot=%#v", i, exp[i], act[i]) 231 | } 232 | } 233 | } 234 | 235 | // Ensure the library can correctly scan strings. 236 | func TestScanString(t *testing.T) { 237 | var tests = []struct { 238 | in string 239 | out string 240 | err string 241 | }{ 242 | {in: `""`, out: ``}, 243 | {in: `"foo bar"`, out: `foo bar`}, 244 | {in: `'foo bar'`, out: `foo bar`}, 245 | {in: `"foo\nbar"`, out: "foo\nbar"}, 246 | {in: `"foo\\bar"`, out: `foo\bar`}, 247 | {in: `"foo\"bar"`, out: `foo"bar`}, 248 | {in: `'foo\'bar'`, out: `foo'bar`}, 249 | 250 | {in: `"foo` + "\n", out: `foo`, err: "bad string"}, // newline in string 251 | {in: `"foo`, out: `foo`, err: "bad string"}, // unclosed quotes 252 | {in: `"foo\xbar"`, out: `\x`, err: "bad escape"}, // invalid escape 253 | } 254 | 255 | for i, tt := range tests { 256 | out, err := influxql.ScanString(strings.NewReader(tt.in)) 257 | if tt.err != errstring(err) { 258 | t.Errorf("%d. %s: error: exp=%s, got=%s", i, tt.in, tt.err, err) 259 | } else if tt.out != out { 260 | t.Errorf("%d. %s: out: exp=%s, got=%s", i, tt.in, tt.out, out) 261 | } 262 | } 263 | } 264 | 265 | // Test scanning regex 266 | func TestScanRegex(t *testing.T) { 267 | var tests = []struct { 268 | in string 269 | tok influxql.Token 270 | lit string 271 | err string 272 | }{ 273 | {in: `/^payments\./`, tok: influxql.REGEX, lit: `^payments\.`}, 274 | {in: `/foo\/bar/`, tok: influxql.REGEX, lit: `foo/bar`}, 275 | {in: `/foo\\/bar/`, tok: influxql.REGEX, lit: `foo\/bar`}, 276 | {in: `/foo\\bar/`, tok: influxql.REGEX, lit: `foo\\bar`}, 277 | {in: `/http\:\/\/www\.example\.com/`, tok: influxql.REGEX, lit: `http\://www\.example\.com`}, 278 | } 279 | 280 | for i, tt := range tests { 281 | s := influxql.NewScanner(strings.NewReader(tt.in)) 282 | tok, _, lit := s.ScanRegex() 283 | if tok != tt.tok { 284 | t.Errorf("%d. %s: error:\n\texp=%s\n\tgot=%s\n", i, tt.in, tt.tok.String(), tok.String()) 285 | } 286 | if lit != tt.lit { 287 | t.Errorf("%d. %s: error:\n\texp=%s\n\tgot=%s\n", i, tt.in, tt.lit, lit) 288 | } 289 | } 290 | } 291 | -------------------------------------------------------------------------------- /token.go: -------------------------------------------------------------------------------- 1 | package influxql 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | // Token is a lexical token of the InfluxQL language. 8 | type Token int 9 | 10 | // These are a comprehensive list of InfluxQL language tokens. 11 | const ( 12 | // ILLEGAL Token, EOF, WS are Special InfluxQL tokens. 13 | ILLEGAL Token = iota 14 | EOF 15 | WS 16 | COMMENT 17 | 18 | literalBeg 19 | // IDENT and the following are InfluxQL literal tokens. 20 | IDENT // main 21 | BOUNDPARAM // $param 22 | NUMBER // 12345.67 23 | INTEGER // 12345 24 | DURATIONVAL // 13h 25 | STRING // "abc" 26 | BADSTRING // "abc 27 | BADESCAPE // \q 28 | TRUE // true 29 | FALSE // false 30 | REGEX // Regular expressions 31 | BADREGEX // `.* 32 | literalEnd 33 | 34 | operatorBeg 35 | // ADD and the following are InfluxQL Operators 36 | ADD // + 37 | SUB // - 38 | MUL // * 39 | DIV // / 40 | MOD // % 41 | BITWISE_AND // & 42 | BITWISE_OR // | 43 | BITWISE_XOR // ^ 44 | 45 | AND // AND 46 | OR // OR 47 | 48 | EQ // = 49 | NEQ // != 50 | EQREGEX // =~ 51 | NEQREGEX // !~ 52 | LT // < 53 | LTE // <= 54 | GT // > 55 | GTE // >= 56 | operatorEnd 57 | 58 | LPAREN // ( 59 | RPAREN // ) 60 | COMMA // , 61 | COLON // : 62 | DOUBLECOLON // :: 63 | SEMICOLON // ; 64 | DOT // . 65 | 66 | keywordBeg 67 | // ALL and the following are InfluxQL Keywords 68 | ALL 69 | ALTER 70 | ANALYZE 71 | ANY 72 | AS 73 | ASC 74 | BEGIN 75 | BY 76 | CARDINALITY 77 | CREATE 78 | CONTINUOUS 79 | DATABASE 80 | DATABASES 81 | DEFAULT 82 | DELETE 83 | DESC 84 | DESTINATIONS 85 | DIAGNOSTICS 86 | DISTINCT 87 | DROP 88 | DURATION 89 | END 90 | EVERY 91 | EXACT 92 | EXPLAIN 93 | FIELD 94 | FOR 95 | FROM 96 | FUTURE 97 | GRANT 98 | GRANTS 99 | GROUP 100 | GROUPS 101 | IN 102 | INF 103 | INSERT 104 | INTO 105 | KEY 106 | KEYS 107 | KILL 108 | LIMIT 109 | MEASUREMENT 110 | MEASUREMENTS 111 | NAME 112 | OFFSET 113 | ON 114 | ORDER 115 | PASSWORD 116 | PAST 117 | POLICY 118 | POLICIES 119 | PRIVILEGES 120 | QUERIES 121 | QUERY 122 | READ 123 | REPLICATION 124 | RESAMPLE 125 | RETENTION 126 | REVOKE 127 | SELECT 128 | SERIES 129 | SET 130 | SHOW 131 | SHARD 132 | SHARDS 133 | SLIMIT 134 | SOFFSET 135 | STATS 136 | SUBSCRIPTION 137 | SUBSCRIPTIONS 138 | TAG 139 | TO 140 | USER 141 | USERS 142 | VALUES 143 | VERBOSE 144 | WHERE 145 | WITH 146 | WRITE 147 | keywordEnd 148 | ) 149 | 150 | var tokens = [...]string{ 151 | ILLEGAL: "ILLEGAL", 152 | EOF: "EOF", 153 | WS: "WS", 154 | 155 | IDENT: "IDENT", 156 | NUMBER: "NUMBER", 157 | DURATIONVAL: "DURATIONVAL", 158 | STRING: "STRING", 159 | BADSTRING: "BADSTRING", 160 | BADESCAPE: "BADESCAPE", 161 | TRUE: "TRUE", 162 | FALSE: "FALSE", 163 | REGEX: "REGEX", 164 | 165 | ADD: "+", 166 | SUB: "-", 167 | MUL: "*", 168 | DIV: "/", 169 | MOD: "%", 170 | BITWISE_AND: "&", 171 | BITWISE_OR: "|", 172 | BITWISE_XOR: "^", 173 | 174 | AND: "AND", 175 | OR: "OR", 176 | 177 | EQ: "=", 178 | NEQ: "!=", 179 | EQREGEX: "=~", 180 | NEQREGEX: "!~", 181 | LT: "<", 182 | LTE: "<=", 183 | GT: ">", 184 | GTE: ">=", 185 | 186 | LPAREN: "(", 187 | RPAREN: ")", 188 | COMMA: ",", 189 | COLON: ":", 190 | DOUBLECOLON: "::", 191 | SEMICOLON: ";", 192 | DOT: ".", 193 | 194 | ALL: "ALL", 195 | ALTER: "ALTER", 196 | ANALYZE: "ANALYZE", 197 | ANY: "ANY", 198 | AS: "AS", 199 | ASC: "ASC", 200 | BEGIN: "BEGIN", 201 | BY: "BY", 202 | CARDINALITY: "CARDINALITY", 203 | CREATE: "CREATE", 204 | CONTINUOUS: "CONTINUOUS", 205 | DATABASE: "DATABASE", 206 | DATABASES: "DATABASES", 207 | DEFAULT: "DEFAULT", 208 | DELETE: "DELETE", 209 | DESC: "DESC", 210 | DESTINATIONS: "DESTINATIONS", 211 | DIAGNOSTICS: "DIAGNOSTICS", 212 | DISTINCT: "DISTINCT", 213 | DROP: "DROP", 214 | DURATION: "DURATION", 215 | END: "END", 216 | EVERY: "EVERY", 217 | EXACT: "EXACT", 218 | EXPLAIN: "EXPLAIN", 219 | FIELD: "FIELD", 220 | FOR: "FOR", 221 | FROM: "FROM", 222 | FUTURE: "FUTURE", 223 | GRANT: "GRANT", 224 | GRANTS: "GRANTS", 225 | GROUP: "GROUP", 226 | GROUPS: "GROUPS", 227 | IN: "IN", 228 | INF: "INF", 229 | INSERT: "INSERT", 230 | INTO: "INTO", 231 | KEY: "KEY", 232 | KEYS: "KEYS", 233 | KILL: "KILL", 234 | LIMIT: "LIMIT", 235 | MEASUREMENT: "MEASUREMENT", 236 | MEASUREMENTS: "MEASUREMENTS", 237 | NAME: "NAME", 238 | OFFSET: "OFFSET", 239 | ON: "ON", 240 | ORDER: "ORDER", 241 | PASSWORD: "PASSWORD", 242 | PAST: "PAST", 243 | POLICY: "POLICY", 244 | POLICIES: "POLICIES", 245 | PRIVILEGES: "PRIVILEGES", 246 | QUERIES: "QUERIES", 247 | QUERY: "QUERY", 248 | READ: "READ", 249 | REPLICATION: "REPLICATION", 250 | RESAMPLE: "RESAMPLE", 251 | RETENTION: "RETENTION", 252 | REVOKE: "REVOKE", 253 | SELECT: "SELECT", 254 | SERIES: "SERIES", 255 | SET: "SET", 256 | SHOW: "SHOW", 257 | SHARD: "SHARD", 258 | SHARDS: "SHARDS", 259 | SLIMIT: "SLIMIT", 260 | SOFFSET: "SOFFSET", 261 | STATS: "STATS", 262 | SUBSCRIPTION: "SUBSCRIPTION", 263 | SUBSCRIPTIONS: "SUBSCRIPTIONS", 264 | TAG: "TAG", 265 | TO: "TO", 266 | USER: "USER", 267 | USERS: "USERS", 268 | VALUES: "VALUES", 269 | VERBOSE: "VERBOSE", 270 | WHERE: "WHERE", 271 | WITH: "WITH", 272 | WRITE: "WRITE", 273 | } 274 | 275 | var keywords map[string]Token 276 | 277 | func init() { 278 | keywords = make(map[string]Token) 279 | for tok := keywordBeg + 1; tok < keywordEnd; tok++ { 280 | keywords[strings.ToLower(tokens[tok])] = tok 281 | } 282 | for _, tok := range []Token{AND, OR} { 283 | keywords[strings.ToLower(tokens[tok])] = tok 284 | } 285 | keywords["true"] = TRUE 286 | keywords["false"] = FALSE 287 | } 288 | 289 | // String returns the string representation of the token. 290 | func (tok Token) String() string { 291 | if tok >= 0 && tok < Token(len(tokens)) { 292 | return tokens[tok] 293 | } 294 | return "" 295 | } 296 | 297 | // Precedence returns the operator precedence of the binary operator token. 298 | func (tok Token) Precedence() int { 299 | switch tok { 300 | case OR: 301 | return 1 302 | case AND: 303 | return 2 304 | case EQ, NEQ, EQREGEX, NEQREGEX, LT, LTE, GT, GTE: 305 | return 3 306 | case ADD, SUB, BITWISE_OR, BITWISE_XOR: 307 | return 4 308 | case MUL, DIV, MOD, BITWISE_AND: 309 | return 5 310 | } 311 | return 0 312 | } 313 | 314 | // isOperator returns true for operator tokens. 315 | func (tok Token) isOperator() bool { return tok > operatorBeg && tok < operatorEnd } 316 | 317 | // tokstr returns a literal if provided, otherwise returns the token string. 318 | func tokstr(tok Token, lit string) string { 319 | if lit != "" { 320 | return lit 321 | } 322 | return tok.String() 323 | } 324 | 325 | // Lookup returns the token associated with a given string. 326 | func Lookup(ident string) Token { 327 | if tok, ok := keywords[strings.ToLower(ident)]; ok { 328 | return tok 329 | } 330 | return IDENT 331 | } 332 | 333 | // Pos specifies the line and character position of a token. 334 | // The Char and Line are both zero-based indexes. 335 | type Pos struct { 336 | Line int 337 | Char int 338 | } 339 | -------------------------------------------------------------------------------- /utils.go: -------------------------------------------------------------------------------- 1 | package influxql 2 | 3 | // This package modified from the corresponding flux code for predicates in flux/semantic/utils.go 4 | 5 | // ConjunctionsToExprSlice finds all children of AndOperators that are not themselves AndOperators, 6 | // and returns them in a slice. If the root node of expr is not an AndOperator, just returns expr. 7 | // 8 | // AND 9 | // / \ 10 | // AND r => {p, q, r} 11 | // / \ 12 | // p q 13 | // 14 | func ConjunctionsToExprSlice(expr Expr) []Expr { 15 | if e, ok := expr.(*BinaryExpr); ok && e.Op == AND { 16 | exprSlice := make([]Expr, 0, 2) 17 | exprSlice = append(exprSlice, ConjunctionsToExprSlice(e.LHS)...) 18 | exprSlice = append(exprSlice, ConjunctionsToExprSlice(e.RHS)...) 19 | return exprSlice 20 | } 21 | // grouping should already be taken care of by the tree 22 | if e, ok := expr.(*ParenExpr); ok { 23 | return ConjunctionsToExprSlice(e.Expr) 24 | } 25 | 26 | return []Expr{expr} 27 | } 28 | 29 | // ExprsToConjunction accepts a variable number of expressions and ANDs them 30 | // together into a single expression. 31 | // 32 | // AND 33 | // / \ 34 | // {p, q, r} => AND r 35 | // / \ 36 | // p q 37 | // 38 | func ExprsToConjunction(exprs ...Expr) Expr { 39 | if len(exprs) == 0 { 40 | return nil 41 | } 42 | 43 | expr := exprs[0] 44 | for _, e := range exprs[1:] { 45 | expr = &BinaryExpr{ 46 | LHS: expr, 47 | RHS: e, 48 | Op: AND, 49 | } 50 | } 51 | 52 | return expr 53 | } 54 | 55 | // PartitionExpr accepts a predicate expression, separates it into components that have been 56 | // logically ANDed together, and applies partitionFn to them. Returns two expressions: one AND tree 57 | // of the expressions for which partitionFn returned true, and an AND tree of expressions for which 58 | // partitionFn returned false. 59 | // 60 | // Suppose partitonFn returns true for p and r, and false for q: 61 | // 62 | // AND passExpr failExpr 63 | // / \ 64 | // AND r => AND q 65 | // / \ / \ 66 | // p q p r 67 | // 68 | func PartitionExpr(expr Expr, partitionFn func(expression Expr) (bool, error)) (passExpr, failExpr Expr, err error) { 69 | exprSlice := ConjunctionsToExprSlice(expr) 70 | var passSlice, failSlice []Expr 71 | for _, e := range exprSlice { 72 | b, err := partitionFn(e) 73 | if err != nil { 74 | return nil, nil, err 75 | } 76 | if b { 77 | passSlice = append(passSlice, e) 78 | } else { 79 | failSlice = append(failSlice, e) 80 | } 81 | } 82 | 83 | passExpr = ExprsToConjunction(passSlice...) 84 | failExpr = ExprsToConjunction(failSlice...) 85 | return passExpr, failExpr, nil 86 | } 87 | --------------------------------------------------------------------------------