├── parser ├── testdata │ ├── basic │ │ ├── use_database.sql │ │ ├── quantile_functions.sql │ │ ├── format │ │ │ ├── use_database.sql │ │ │ ├── quantile_functions.sql │ │ │ ├── set_statement.sql │ │ │ └── settings_statement.sql │ │ ├── output │ │ │ └── use_database.sql.golden.json │ │ ├── set_statement.sql │ │ └── settings_statement.sql │ ├── ddl │ │ ├── show_tables.sql │ │ ├── show_databases.sql │ │ ├── desc_table_without_table_keyword.sql │ │ ├── show_create_table.sql │ │ ├── show_databases_limit.sql │ │ ├── desc_table_with_table_keyword.sql │ │ ├── show_databases_format.sql │ │ ├── show_databases_ilike.sql │ │ ├── show_databases_like.sql │ │ ├── create_database.sql │ │ ├── describe_table_without_table_keyword.sql │ │ ├── show_databases_not_like.sql │ │ ├── describe_table_with_table_keyword.sql │ │ ├── drop_database.sql │ │ ├── drop_table_basic.sql │ │ ├── show_databases_not_ilike.sql │ │ ├── show_databases_format_string.sql │ │ ├── truncate_table_basic.sql │ │ ├── show_databases_outfile.sql │ │ ├── alter_table_detach_partition.sql │ │ ├── alter_table_modify_column.sql │ │ ├── alter_table_modify_column_remove.sql │ │ ├── check.sql │ │ ├── create_function_simple.sql │ │ ├── alter_table_replace_partition.sql │ │ ├── alter_table_delete.sql │ │ ├── alter_table_remove_ttl.sql │ │ ├── alter_table_drop_index.sql │ │ ├── alter_table_reset_setting.sql │ │ ├── format │ │ │ ├── show_tables.sql │ │ │ ├── show_databases.sql │ │ │ ├── desc_table.sql │ │ │ ├── describe_table.sql │ │ │ ├── desc_table_without_table_keyword.sql │ │ │ ├── describe_table_without_table_keyword.sql │ │ │ ├── show_create_table.sql │ │ │ ├── show_databases_limit.sql │ │ │ ├── desc_table_with_table_keyword.sql │ │ │ ├── show_databases_like.sql │ │ │ ├── describe_table_with_table_keyword.sql │ │ │ ├── show_databases_format.sql │ │ │ ├── show_databases_ilike.sql │ │ │ ├── create_database.sql │ │ │ ├── show_databases_not_ilike.sql │ │ │ ├── show_databases_not_like.sql │ │ │ ├── drop_database.sql │ │ │ ├── drop_table_basic.sql │ │ │ ├── show_databases_format_string.sql │ │ │ ├── truncate_table_basic.sql │ │ │ ├── show_databases_outfile.sql │ │ │ ├── alter_table_detach_partition.sql │ │ │ ├── alter_table_modify_column_remove.sql │ │ │ ├── alter_table_modify_column.sql │ │ │ ├── alter_table_replace_partition.sql │ │ │ ├── create_function_simple.sql │ │ │ ├── check.sql │ │ │ ├── alter_table_delete.sql │ │ │ ├── alter_table_remove_ttl.sql │ │ │ ├── alter_table_freeze_no_specify_partition.sql │ │ │ ├── alter_table_reset_setting.sql │ │ │ ├── drop_table_with_on_clsuter.sql │ │ │ ├── alter_table_drop_index.sql │ │ │ ├── alter_table_clear_index.sql │ │ │ ├── alter_table_clear_projection.sql │ │ │ ├── alter_table_clear_column.sql │ │ │ ├── alter_table_rename_column.sql │ │ │ ├── create_table_codec_no_args.sql │ │ │ ├── drop_table_with_no_delay.sql │ │ │ ├── alter_table_drop_projection.sql │ │ │ ├── alter_table_drop_partition.sql │ │ │ ├── alter_table_drop_column.sql │ │ │ ├── show_databases_comprehensive.sql │ │ │ ├── alter_table_freeze_partition.sql │ │ │ ├── systems.sql │ │ │ ├── truncate_temporary_table_on_clsuter.sql │ │ │ ├── alter_table_add_column.sql │ │ │ ├── alter_table_update.sql │ │ │ ├── alter_table_update_with_cluster.sql │ │ │ ├── alter_table_modify_setting.sql │ │ │ ├── create_database_replicated.sql │ │ │ ├── create_table_with_qbit.sql │ │ │ ├── alter_table_materialize_index.sql │ │ │ ├── alter_table_delete_with_cluster.sql │ │ │ ├── alter_table_reset_multiple_settings.sql │ │ │ ├── alter_table_update_in_partition.sql │ │ │ ├── create_live_view_basic.sql │ │ │ ├── alter_table_materialize_projection.sql │ │ │ ├── create_view_basic.sql │ │ │ ├── alter_table_add_projection.sql │ │ │ ├── alter_table_drop_detach_partition.sql │ │ │ ├── create_table_json_typehints.sql │ │ │ ├── alter_table_attach_partition.sql │ │ │ ├── create_table_with_null_engine.sql │ │ │ ├── create_distributed_table.sql │ │ │ ├── create_materialized_view_with_gcs.sql │ │ │ ├── create_view_on_cluster_with_uuid.sql │ │ │ ├── drop_role.sql │ │ │ ├── create_table_with_keyword_partition_by.sql │ │ │ ├── create_table_with_projection.sql │ │ │ ├── create_mv_with_order_by.sql │ │ │ ├── create_materialized_view_with_refresh.sql │ │ │ ├── create_table_as_remote_function.sql │ │ │ ├── create_table_with_sample_by.sql │ │ │ ├── attach_table_basic.sql │ │ │ ├── create_materialized_view_with_definer.sql │ │ │ ├── create_table_with_on_clsuter.sql │ │ │ ├── create_table_with_uuid.sql │ │ │ ├── create_table_with_nullable.sql │ │ │ ├── create_dictionary_basic.sql │ │ │ ├── create_table_with_enum_fields.sql │ │ │ └── create_materialized_view_with_empty_table_schema.sql │ │ ├── alter_table_clear_index.sql │ │ ├── alter_table_freeze_no_specify_partition.sql │ │ ├── drop_table_with_on_clsuter.sql │ │ ├── alter_table_clear_column.sql │ │ ├── alter_table_clear_projection.sql │ │ ├── alter_table_drop_projection.sql │ │ ├── drop_table_with_no_delay.sql │ │ ├── systems.sql │ │ ├── alter_table_drop_column.sql │ │ ├── alter_table_drop_partition.sql │ │ ├── alter_table_rename_column.sql │ │ ├── create_table_codec_no_args.sql │ │ ├── show_databases_comprehensive.sql │ │ ├── alter_table_freeze_partition.sql │ │ ├── alter_table_add_column.sql │ │ ├── alter_table_update.sql │ │ ├── alter_table_modify_setting.sql │ │ ├── alter_table_update_with_cluster.sql │ │ ├── truncate_temporary_table_on_clsuter.sql │ │ ├── create_database_replicated.sql │ │ ├── alter_table_delete_with_cluster.sql │ │ ├── alter_table_materialize_index.sql │ │ ├── create_table_with_qbit.sql │ │ ├── alter_table_reset_multiple_settings.sql │ │ ├── alter_table_update_in_partition.sql │ │ ├── create_live_view_basic.sql │ │ ├── alter_table_materialize_projection.sql │ │ ├── create_view_basic.sql │ │ ├── alter_table_add_projection.sql │ │ ├── alter_table_drop_detach_partition.sql │ │ ├── alter_table_attach_partition.sql │ │ ├── create_table_json_typehints.sql │ │ ├── create_table_with_null_engine.sql │ │ ├── drop_role.sql │ │ ├── create_materialized_view_with_gcs.sql │ │ ├── create_distributed_table.sql │ │ ├── create_view_on_cluster_with_uuid.sql │ │ ├── output │ │ │ ├── drop_database.sql.golden.json │ │ │ ├── show_tables.sql.golden.json │ │ │ ├── show_databases.sql.golden.json │ │ │ ├── create_database.sql.golden.json │ │ │ ├── desc_table_without_table_keyword.sql.golden.json │ │ │ ├── desc_table_with_table_keyword.sql.golden.json │ │ │ ├── describe_table_without_table_keyword.sql.golden.json │ │ │ ├── describe_table_with_table_keyword.sql.golden.json │ │ │ ├── show_databases_format.sql.golden.json │ │ │ ├── show_databases_ilike.sql.golden.json │ │ │ ├── show_databases_like.sql.golden.json │ │ │ ├── show_databases_not_ilike.sql.golden.json │ │ │ ├── show_databases_not_like.sql.golden.json │ │ │ ├── show_databases_format_string.sql.golden.json │ │ │ ├── show_databases_limit.sql.golden.json │ │ │ ├── show_databases_outfile.sql.golden.json │ │ │ ├── show_create_table.sql.golden.json │ │ │ ├── truncate_table_basic.sql.golden.json │ │ │ ├── systems.sql.golden.json │ │ │ ├── drop_table_basic.sql.golden.json │ │ │ ├── alter_table_reset_setting.sql.golden.json │ │ │ ├── truncate_temporary_table_on_clsuter.sql.golden.json │ │ │ ├── show_databases_comprehensive.sql.golden.json │ │ │ ├── drop_table_with_on_clsuter.sql.golden.json │ │ │ ├── drop_table_with_no_delay.sql.golden.json │ │ │ ├── alter_table_remove_ttl.sql.golden.json │ │ │ ├── check.sql.golden.json │ │ │ ├── alter_table_freeze_no_specify_partition.sql.golden.json │ │ │ ├── alter_table_detach_partition.sql.golden.json │ │ │ ├── alter_table_replace_partition.sql.golden.json │ │ │ ├── alter_table_rename_column.sql.golden.json │ │ │ ├── alter_table_drop_column.sql.golden.json │ │ │ ├── alter_table_drop_index.sql.golden.json │ │ │ ├── alter_table_drop_projection.sql.golden.json │ │ │ ├── alter_table_freeze_partition.sql.golden.json │ │ │ ├── alter_table_materialize_index.sql.golden.json │ │ │ ├── alter_table_reset_multiple_settings.sql.golden.json │ │ │ ├── alter_table_delete.sql.golden.json │ │ │ ├── alter_table_clear_column.sql.golden.json │ │ │ ├── alter_table_clear_index.sql.golden.json │ │ │ ├── alter_table_clear_projection.sql.golden.json │ │ │ └── alter_table_materialize_projection.sql.golden.json │ │ ├── create_table_with_keyword_partition_by.sql │ │ ├── create_mv_with_order_by.sql │ │ ├── create_table_with_projection.sql │ │ ├── create_materialized_view_with_refresh.sql │ │ ├── create_table_with_sample_by.sql │ │ ├── create_table_as_remote_function.sql │ │ ├── attach_table_basic.sql │ │ ├── create_materialized_view_with_definer.sql │ │ ├── create_table_with_on_clsuter.sql │ │ ├── create_table_with_uuid.sql │ │ ├── create_table_with_nullable.sql │ │ ├── create_table_with_enum_fields.sql │ │ ├── create_dictionary_basic.sql │ │ ├── optimize.sql │ │ ├── create_materialized_view_with_empty_table_schema.sql │ │ ├── create_dictionary_with_comment.sql │ │ ├── alter_table_add_index.sql │ │ ├── create_mv_with_not_op.sql │ │ ├── grant_privilege.sql │ │ ├── create_materialized_view_basic.sql │ │ ├── rename.sql │ │ ├── create_or_replace.sql │ │ ├── bug_001.sql │ │ ├── create_dictionary_comprehensive.sql │ │ ├── create_table_with_codec_delta.sql │ │ ├── create_table_with_ttl_policy.sql │ │ └── create_table_basic.sql │ ├── query │ │ ├── select_expr.sql │ │ ├── select_keyword_alias_no_as.sql │ │ ├── select_with_placeholder.sql │ │ ├── select_with_join_only.sql │ │ ├── select_with_single_quote_table.sql │ │ ├── select_with_distinct_keyword.sql │ │ ├── select_with_variable.sql │ │ ├── compatible │ │ │ └── 1_stateful │ │ │ │ ├── 00001_count_hits.sql │ │ │ │ ├── 00002_count_visits.sql │ │ │ │ ├── 00048_min_max.sql │ │ │ │ ├── 00005_filtering.sql │ │ │ │ ├── 00006_agregates.sql │ │ │ │ ├── 00021_1_select_with_in.sql │ │ │ │ ├── 00021_2_select_with_in.sql │ │ │ │ ├── 00011_sorting.sql │ │ │ │ ├── 00177_select_from_gcs.sql │ │ │ │ ├── 00008_uniq.sql │ │ │ │ ├── 00032_aggregate_key64.sql │ │ │ │ ├── 00085_monotonic_evaluation_segfault.sql │ │ │ │ ├── 00014_filtering_arrays.sql │ │ │ │ ├── 00033_aggregate_key_string.sql │ │ │ │ ├── 00015_totals_and_no_aggregate_functions.sql │ │ │ │ ├── 00030_array_enumerate_uniq.sql │ │ │ │ ├── 00039_primary_key.sql │ │ │ │ ├── 00060_move_to_prewhere_and_sets.sql │ │ │ │ ├── 00021_3_select_with_in.sql │ │ │ │ ├── 00055_index_and_not.sql │ │ │ │ ├── 00059_merge_sorting_empty_array_joined.sql │ │ │ │ ├── 00068_subquery_in_prewhere.sql │ │ │ │ ├── 00160_decode_xml_component.sql │ │ │ │ ├── 00045_uniq_upto.sql │ │ │ │ ├── 00142_system_columns.sql │ │ │ │ ├── 00007_uniq.sql │ │ │ │ ├── 00013_sorting_of_nested.sql │ │ │ │ ├── 00035_aggregate_keys128.sql │ │ │ │ ├── 00036_aggregate_hashed.sql │ │ │ │ ├── 00017_aggregation_uninitialized_memory.sql │ │ │ │ ├── 00016_any_if_distributed_cond_always_false.sql │ │ │ │ ├── 00034_aggregate_key_fixed_string.sql │ │ │ │ ├── 00012_sorting_distributed.sql │ │ │ │ ├── 00053_replicate_segfault.sql │ │ │ │ ├── 00086_array_reduce.sql │ │ │ │ ├── 00009_uniq_distributed.sql │ │ │ │ ├── 00073_uniq_array.sql │ │ │ │ ├── 00069_duplicate_aggregation_keys.sql │ │ │ │ ├── 00076_system_columns_bytes.sql │ │ │ │ ├── 00037_uniq_state_merge1.sql │ │ │ │ ├── 00046_uniq_upto_distributed.sql │ │ │ │ ├── 00141_transform.sql │ │ │ │ ├── 00083_array_filter.sql │ │ │ │ ├── 00144_functions_of_aggregation_states.sql │ │ │ │ ├── 00020_distinct_order_by_distributed.sql │ │ │ │ ├── 00052_group_by_in.sql │ │ │ │ ├── 00172_early_constant_folding.sql │ │ │ │ ├── 00080_array_join_and_union.sql │ │ │ │ ├── 00143_transform_non_const_default.sql │ │ │ │ ├── 00004_top_counters.sql │ │ │ │ ├── 00153_aggregate_arena_race.sql │ │ │ │ ├── 00066_sorting_distributed_many_replicas.sql │ │ │ │ ├── 00038_uniq_state_merge2.sql │ │ │ │ ├── 00050_min_max.sql │ │ │ │ ├── 00147_global_in_aggregate_function.sql │ │ │ │ ├── 00088_global_in_one_shard_and_rows_before_limit.sql │ │ │ │ ├── 00022_merge_prewhere.sql │ │ │ │ ├── 00047_bar.sql │ │ │ │ ├── 00097_constexpr_in_index.sql │ │ │ │ ├── 00162_mmap_compression_none.sql │ │ │ │ ├── 00067_union_all.sql │ │ │ │ ├── 00049_max_string_if.sql │ │ │ │ ├── 00176_distinct_limit_by_limit_bug_43377.sql │ │ │ │ ├── 00146_aggregate_function_uniq.sql │ │ │ │ ├── 00023_totals_limit.sql │ │ │ │ ├── 00056_view.sql │ │ │ │ ├── 00093_prewhere_array_join.sql │ │ │ │ ├── 00043_any_left_join.sql │ │ │ │ ├── 00051_min_max_array.sql │ │ │ │ ├── 00095_hyperscan_profiler.sql │ │ │ │ ├── 00062_loyalty.sql │ │ │ │ ├── 00075_left_array_join.sql │ │ │ │ ├── 00139_like.sql │ │ │ │ ├── 00084_external_aggregation.sql │ │ │ │ ├── 00042_any_left_join.sql │ │ │ │ ├── 00149_quantiles_timing_distributed.sql │ │ │ │ ├── 00087_where_0.sql │ │ │ │ ├── 00094_order_by_array_join_limit.sql │ │ │ │ ├── 00044_any_left_join_string.sql │ │ │ │ ├── 00078_group_by_arrays.sql │ │ │ │ ├── 00010_quantiles_segfault.sql │ │ │ │ ├── 00154_avro.sql │ │ │ │ ├── 00079_array_join_not_used_joined_column.sql │ │ │ │ ├── 00171_grouping_aggregated_transform_bug.sql │ │ │ │ ├── 00156_max_execution_speed_sample_merge.sql │ │ │ │ ├── 00166_explain_estimate.sql │ │ │ │ ├── 00031_array_enumerate_uniq.sql │ │ │ │ ├── 00167_read_bytes_from_fs.sql │ │ │ │ ├── 00150_quantiles_timing_precision.sql │ │ │ │ ├── 00061_storage_buffer.sql │ │ │ │ ├── 00164_quantileBfloat16.sql │ │ │ │ ├── 00173_group_by_use_nulls.sql │ │ │ │ ├── 00065_loyalty_with_storage_join.sql │ │ │ │ └── 00151_order_by_read_in_order.sql │ │ ├── select_with_distinct.sql │ │ ├── select_order_by_timestamp.sql │ │ ├── select_simple_field_alias.sql │ │ ├── select_simple_with_top_clause.sql │ │ ├── select_when_condition.sql │ │ ├── select_with_number_field.sql │ │ ├── select_with_string_expr.sql │ │ ├── select_with_distinct_on_keyword.sql │ │ ├── select_column_alias_string.sql │ │ ├── select_with_multi_union.sql │ │ ├── format │ │ │ ├── select_expr.sql │ │ │ ├── select_keyword_alias_no_as.sql │ │ │ ├── select_with_placeholder.sql │ │ │ ├── select_with_single_quote_table.sql │ │ │ ├── select_with_join_only.sql │ │ │ ├── select_with_distinct_keyword.sql │ │ │ ├── select_simple_with_top_clause.sql │ │ │ ├── select_with_variable.sql │ │ │ ├── select_order_by_timestamp.sql │ │ │ ├── select_simple_field_alias.sql │ │ │ ├── select_when_condition.sql │ │ │ ├── select_with_distinct.sql │ │ │ ├── select_with_string_expr.sql │ │ │ ├── select_with_number_field.sql │ │ │ ├── select_with_multi_line_comment.sql │ │ │ ├── select_with_distinct_on_keyword.sql │ │ │ ├── select_with_literal_table_name.sql │ │ │ ├── select_with_multi_union.sql │ │ │ ├── select_column_alias_string.sql │ │ │ ├── select_simple_with_bracket.sql │ │ │ ├── select_with_keyword_placeholder.sql │ │ │ ├── select_without_from_where.sql │ │ │ ├── select_with_multi_union_distinct.sql │ │ │ ├── select_table_alias_without_keyword.sql │ │ │ ├── select_simple_with_group_by_with_cube_totals.sql │ │ │ ├── select_with_multi_except.sql │ │ │ ├── select_with_union_distinct.sql │ │ │ ├── select_simple_with_is_null.sql │ │ │ ├── select_table_function_with_query.sql │ │ │ ├── set_simple.sql │ │ │ ├── select_cast.sql │ │ │ ├── select_item_with_modifiers.sql │ │ │ ├── select_with_left_join.sql │ │ │ ├── select_simple_with_cte_with_column_aliases.sql │ │ │ ├── select_simple_with_with_clause.sql │ │ │ ├── select_order_by_with_fill_staleness.sql │ │ │ ├── select_order_by_with_fill_basic.sql │ │ │ ├── select_simple_with_is_not_null.sql │ │ │ ├── select_order_by_with_fill_step.sql │ │ │ ├── select_order_by_with_fill_from_to.sql │ │ │ ├── select_order_by_with_fill_interpolate_no_columns.sql │ │ │ ├── select_with_keyword_in_group_by.sql │ │ │ ├── select_case_when_exists.sql │ │ │ ├── query_with_expr_compare.sql │ │ │ ├── select_order_by_with_fill_interpolate.sql │ │ │ ├── select_json_type.sql │ │ │ ├── create_window_view.sql │ │ │ ├── select_with_group_by.sql │ │ │ ├── select_simple.sql │ │ │ ├── select_with_multi_join.sql │ │ │ ├── select_with_query_parameter.sql │ │ │ └── access_tuple_with_dot.sql │ │ ├── select_simple_with_bracket.sql │ │ ├── select_with_literal_table_name.sql │ │ ├── select_with_keyword_placeholder.sql │ │ ├── select_without_from_where.sql │ │ ├── select_with_multi_union_distinct.sql │ │ ├── select_table_alias_without_keyword.sql │ │ ├── select_with_multi_line_comment.sql │ │ ├── select_simple_with_group_by_with_cube_totals.sql │ │ ├── select_with_multi_except.sql │ │ ├── select_with_union_distinct.sql │ │ ├── select_cast.sql │ │ ├── select_simple_with_is_null.sql │ │ ├── select_item_with_modifiers.sql │ │ ├── set_simple.sql │ │ ├── select_table_function_with_query.sql │ │ ├── select_order_by_with_fill_staleness.sql │ │ ├── select_order_by_with_fill_basic.sql │ │ ├── select_simple_with_is_not_null.sql │ │ ├── select_simple_with_cte_with_column_aliases.sql │ │ ├── select_simple_with_with_clause.sql │ │ ├── select_order_by_with_fill_from_to.sql │ │ ├── select_order_by_with_fill_step.sql │ │ ├── select_with_left_join.sql │ │ ├── select_with_keyword_in_group_by.sql │ │ ├── select_order_by_with_fill_interpolate_no_columns.sql │ │ ├── query_with_expr_compare.sql │ │ ├── select_case_when_exists.sql │ │ ├── select_order_by_with_fill_interpolate.sql │ │ ├── select_json_type.sql │ │ ├── select_simple.sql │ │ ├── create_window_view.sql │ │ ├── select_with_query_parameter.sql │ │ ├── access_tuple_with_dot.sql │ │ ├── select_with_group_by.sql │ │ ├── select_with_multi_join.sql │ │ ├── select_with_window_function.sql │ │ ├── select_extract_with_regex.sql │ │ ├── select_window_params.sql │ │ ├── select_with_settings_additional_table_filters.sql │ │ └── select_window_cte.sql │ └── dml │ │ ├── delete_from.sql │ │ ├── insert_select_without_from.sql │ │ ├── insert_with_keyword_placeholder.sql │ │ ├── alter_table_with_modify_remove_ttl.sql │ │ ├── alter_table_modify_query.sql │ │ ├── alter_table_with_modify_ttl.sql │ │ ├── insert_with_select.sql │ │ ├── format │ │ ├── delete_from.sql │ │ ├── insert_select_without_from.sql │ │ ├── insert_with_keyword_placeholder.sql │ │ ├── alter_table_with_modify_remove_ttl.sql │ │ ├── insert_with_select.sql │ │ ├── alter_table_with_modify_ttl.sql │ │ ├── alter_table_modify_query.sql │ │ ├── create_column_with_ttl.sql │ │ ├── alter_table_with_comment.sql │ │ ├── insert_with_format.sql │ │ ├── insert_with_placeholder.sql │ │ └── insert_values.sql │ │ ├── create_column_with_ttl.sql │ │ ├── alter_table_with_comment.sql │ │ ├── insert_with_format.sql │ │ ├── insert_with_placeholder.sql │ │ ├── insert_values.sql │ │ └── output │ │ ├── delete_from.sql.golden.json │ │ └── alter_table_with_modify_remove_ttl.sql.golden.json ├── type.go ├── helper.go ├── set_test.go └── set.go ├── .gitignore ├── go.mod └── Makefile /parser/testdata/basic/use_database.sql: -------------------------------------------------------------------------------- 1 | USE test; -------------------------------------------------------------------------------- /parser/testdata/ddl/show_tables.sql: -------------------------------------------------------------------------------- 1 | SHOW TABLES -------------------------------------------------------------------------------- /parser/testdata/query/select_expr.sql: -------------------------------------------------------------------------------- 1 | SELECT 1+1 -------------------------------------------------------------------------------- /parser/testdata/ddl/show_databases.sql: -------------------------------------------------------------------------------- 1 | SHOW DATABASES -------------------------------------------------------------------------------- /parser/testdata/ddl/desc_table_without_table_keyword.sql: -------------------------------------------------------------------------------- 1 | DESC mytable -------------------------------------------------------------------------------- /parser/testdata/ddl/show_create_table.sql: -------------------------------------------------------------------------------- 1 | SHOW CREATE TABLE mytable -------------------------------------------------------------------------------- /parser/testdata/ddl/show_databases_limit.sql: -------------------------------------------------------------------------------- 1 | SHOW DATABASES LIMIT 10 -------------------------------------------------------------------------------- /parser/testdata/ddl/desc_table_with_table_keyword.sql: -------------------------------------------------------------------------------- 1 | DESC TABLE mytable -------------------------------------------------------------------------------- /parser/testdata/ddl/show_databases_format.sql: -------------------------------------------------------------------------------- 1 | SHOW DATABASES FORMAT JSON -------------------------------------------------------------------------------- /parser/testdata/ddl/show_databases_ilike.sql: -------------------------------------------------------------------------------- 1 | SHOW DATABASES ILIKE 'Test%' -------------------------------------------------------------------------------- /parser/testdata/ddl/show_databases_like.sql: -------------------------------------------------------------------------------- 1 | SHOW DATABASES LIKE 'test%' -------------------------------------------------------------------------------- /parser/testdata/ddl/create_database.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE IF NOT EXISTS `test` -------------------------------------------------------------------------------- /parser/testdata/ddl/describe_table_without_table_keyword.sql: -------------------------------------------------------------------------------- 1 | DESCRIBE mytable -------------------------------------------------------------------------------- /parser/testdata/ddl/show_databases_not_like.sql: -------------------------------------------------------------------------------- 1 | SHOW DATABASES NOT LIKE 'temp%' -------------------------------------------------------------------------------- /parser/testdata/ddl/describe_table_with_table_keyword.sql: -------------------------------------------------------------------------------- 1 | DESCRIBE TABLE mytable -------------------------------------------------------------------------------- /parser/testdata/ddl/drop_database.sql: -------------------------------------------------------------------------------- 1 | DROP DATABASE IF EXISTS datbase_name; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/drop_table_basic.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS test.table_name; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/show_databases_not_ilike.sql: -------------------------------------------------------------------------------- 1 | SHOW DATABASES NOT ILIKE 'Temp%' -------------------------------------------------------------------------------- /parser/testdata/query/select_keyword_alias_no_as.sql: -------------------------------------------------------------------------------- 1 | SELECT 'Joe' name FROM users -------------------------------------------------------------------------------- /parser/testdata/query/select_with_placeholder.sql: -------------------------------------------------------------------------------- 1 | SELECT * FROM t0 WHERE id = ?; -------------------------------------------------------------------------------- /parser/testdata/ddl/show_databases_format_string.sql: -------------------------------------------------------------------------------- 1 | SHOW DATABASES FORMAT 'TabSeparated' -------------------------------------------------------------------------------- /parser/testdata/dml/delete_from.sql: -------------------------------------------------------------------------------- 1 | DELETE FROM hits WHERE Title LIKE '%hello%'; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_join_only.sql: -------------------------------------------------------------------------------- 1 | SELECT * FROM "t1" JOIN "t2" ON true 2 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_single_quote_table.sql: -------------------------------------------------------------------------------- 1 | SELECT * FROM 'test_table' 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/truncate_table_basic.sql: -------------------------------------------------------------------------------- 1 | TRUNCATE TABLE IF EXISTS test.table_name; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_distinct_keyword.sql: -------------------------------------------------------------------------------- 1 | SELECT DISTINCT record_id FROM records -------------------------------------------------------------------------------- /parser/testdata/query/select_with_variable.sql: -------------------------------------------------------------------------------- 1 | WITH $abc AS (SELECT 1 AS a) SELECT * FROM $abc -------------------------------------------------------------------------------- /parser/testdata/basic/quantile_functions.sql: -------------------------------------------------------------------------------- 1 | SELECT quantile(0.9)(x), quantiles(0.5, 0.9)(x); 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/show_databases_outfile.sql: -------------------------------------------------------------------------------- 1 | SHOW DATABASES INTO OUTFILE '/tmp/databases.txt' -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00001_count_hits.sql: -------------------------------------------------------------------------------- 1 | SELECT count() FROM test.hits 2 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_distinct.sql: -------------------------------------------------------------------------------- 1 | SELECT count(DISTINCT(RECORD_ID)) FROM RECORD_TABLE -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_detach_partition.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE db.test DETACH PARTITION '2021-10-01'; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_modify_column.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE t1 MODIFY COLUMN f1 String COMMENT 'test'; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_modify_column_remove.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE t1 MODIFY COLUMN f1 REMOVE COMMENT; -------------------------------------------------------------------------------- /parser/testdata/ddl/check.sql: -------------------------------------------------------------------------------- 1 | CHECK TABLE test_table; 2 | CHECK TABLE test_table PARTITION 'col'; 3 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_function_simple.sql: -------------------------------------------------------------------------------- 1 | CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b; -------------------------------------------------------------------------------- /parser/testdata/dml/insert_select_without_from.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO t (c) SELECT 1 WHERE 1 = 1; 2 | 3 | -------------------------------------------------------------------------------- /parser/testdata/query/select_order_by_timestamp.sql: -------------------------------------------------------------------------------- 1 | SELECT Timestamp FROM events ORDER BY Timestamp; -------------------------------------------------------------------------------- /parser/testdata/query/select_simple_field_alias.sql: -------------------------------------------------------------------------------- 1 | SELECT field0, field1 as x, field2 y from events; -------------------------------------------------------------------------------- /parser/testdata/query/select_simple_with_top_clause.sql: -------------------------------------------------------------------------------- 1 | SELECT TOP 10 my_column FROM tableName; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/select_when_condition.sql: -------------------------------------------------------------------------------- 1 | select case when false then 'hello' else 'world' end; -------------------------------------------------------------------------------- /parser/testdata/query/select_with_number_field.sql: -------------------------------------------------------------------------------- 1 | SELECT foo, bar.1, foo.2 FROM foo ARRAY JOIN m as bar -------------------------------------------------------------------------------- /parser/testdata/query/select_with_string_expr.sql: -------------------------------------------------------------------------------- 1 | WITH "abc" AS (SELECT 1 AS a) SELECT * FROM "abc" 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_replace_partition.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE t2 REPLACE PARTITION 'partition' FROM t1; -------------------------------------------------------------------------------- /parser/testdata/dml/insert_with_keyword_placeholder.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO t (c) VALUES ({name :String}); 2 | 3 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00002_count_visits.sql: -------------------------------------------------------------------------------- 1 | SELECT sum(Sign) FROM test.visits 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_delete.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.events DELETE WHERE created_at < '2023-01-01'; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_remove_ttl.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.events ON CLUSTER 'default_cluster' REMOVE TTL; -------------------------------------------------------------------------------- /parser/testdata/query/select_with_distinct_on_keyword.sql: -------------------------------------------------------------------------------- 1 | SELECT DISTINCT ON(album,artist) record_id FROM records -------------------------------------------------------------------------------- /parser/testdata/basic/format/use_database.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | USE test; 3 | 4 | -- Format SQL: 5 | USE test; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_drop_index.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.event_local ON CLUSTER 'default_cluster' DROP INDEX f1; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_reset_setting.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE example_table RESET SETTING max_part_loading_threads; -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_tables.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW TABLES 3 | 4 | -- Format SQL: 5 | SHOW TABLES; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00048_min_max.sql: -------------------------------------------------------------------------------- 1 | SELECT min(EventDate), max(EventDate) FROM test.hits 2 | -------------------------------------------------------------------------------- /parser/testdata/query/select_column_alias_string.sql: -------------------------------------------------------------------------------- 1 | SELECT 'abc' as "value2"; 2 | 3 | SELECT $abc, a$$bc, abc$$; 4 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_multi_union.sql: -------------------------------------------------------------------------------- 1 | SELECT 1 AS v1 UNION ALL SELECT 2 AS v2 UNION ALL SELECT 3 AS v3 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_clear_index.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE my_table CLEAR INDEX my_index_name IN PARTITION partition_name; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_freeze_no_specify_partition.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.events ON CLUSTER 'default_cluster' freeze; -------------------------------------------------------------------------------- /parser/testdata/ddl/drop_table_with_on_clsuter.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS test.table_name ON CLUSTER 'default_cluster'; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_expr.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 1+1 3 | 4 | -- Format SQL: 5 | SELECT 1 + 1; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/select_simple_with_bracket.sql: -------------------------------------------------------------------------------- 1 | SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res, f1["abc"] as f2 2 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_literal_table_name.sql: -------------------------------------------------------------------------------- 1 | select table_name from "information_schema"."tables" limit 1; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_clear_column.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE my_table CLEAR COLUMN my_column_name IN PARTITION partition_name; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_clear_projection.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE my_table CLEAR PROJECTION hello IN PARTITION partition_name; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_drop_projection.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.event_local ON CLUSTER 'default_cluster' DROP PROJECTION f1; -------------------------------------------------------------------------------- /parser/testdata/ddl/drop_table_with_no_delay.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS test.table_name ON CLUSTER 'default_cluster' NO DELAY; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_databases.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW DATABASES 3 | 4 | -- Format SQL: 5 | SHOW DATABASES; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/systems.sql: -------------------------------------------------------------------------------- 1 | SYSTEM FLUSH LOGS; 2 | SYSTEM DROP UNCOMPRESSED CACHE; 3 | SYSTEM DROP FILESYSTEM CACHE; 4 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00005_filtering.sql: -------------------------------------------------------------------------------- 1 | SELECT count() FROM test.hits WHERE AdvEngineID != 0 2 | 3 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_keyword_placeholder.sql: -------------------------------------------------------------------------------- 1 | SELECT {name :String}; 2 | SELECT toString({name :String}); 3 | 4 | -------------------------------------------------------------------------------- /parser/testdata/query/select_without_from_where.sql: -------------------------------------------------------------------------------- 1 | SELECT 1 WHERE 1 = 1; 2 | SELECT {p :UInt8} WHERE {p :UInt8} = 1; 3 | 4 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_drop_column.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' DROP COLUMN IF EXISTS f1; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_drop_partition.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.events ON CLUSTER 'default_cluster' drop partition '2023-07-18'; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_rename_column.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE my_table RENAME COLUMN old_column_name TO new_column_name; 2 | 3 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_codec_no_args.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE shark_attacks ( 2 | timestamp DateTime CODEC(DoubleDelta), 3 | ); -------------------------------------------------------------------------------- /parser/testdata/ddl/show_databases_comprehensive.sql: -------------------------------------------------------------------------------- 1 | SHOW DATABASES LIKE 'prod%' LIMIT 5 INTO OUTFILE '/tmp/prod_dbs.txt' FORMAT JSON -------------------------------------------------------------------------------- /parser/testdata/query/select_with_multi_union_distinct.sql: -------------------------------------------------------------------------------- 1 | SELECT 1 AS v1 UNION DISTINCT SELECT 2 AS v2 UNION DISTINCT SELECT 3 AS v3 -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_freeze_partition.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.events ON CLUSTER 'default_cluster' freeze partition '2023-07-18';; -------------------------------------------------------------------------------- /parser/testdata/ddl/format/desc_table.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DESC TABLE mytable 3 | 4 | -- Format SQL: 5 | DESCRIBE TABLE mytable; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00006_agregates.sql: -------------------------------------------------------------------------------- 1 | SELECT sum(AdvEngineID), count(), avg(ResolutionWidth) FROM test.hits 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00021_1_select_with_in.sql: -------------------------------------------------------------------------------- 1 | select sum(Sign) from test.visits where CounterID in (942285); 2 | -------------------------------------------------------------------------------- /parser/testdata/query/select_table_alias_without_keyword.sql: -------------------------------------------------------------------------------- 1 | SELECT t1.Timestamp FROM my_table t1 INNER JOIN my_other_table t2 ON t1.a=t2.b -------------------------------------------------------------------------------- /parser/testdata/query/select_with_multi_line_comment.sql: -------------------------------------------------------------------------------- 1 | select 2 | -- first line 3 | -- second line 4 | * 5 | from 6 | t0 -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_add_column.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD COLUMN f1 String AFTER f0; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_update.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.users UPDATE status = 'active', updated_at = now() WHERE status = 'pending'; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/describe_table.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DESCRIBE TABLE mytable 3 | 4 | -- Format SQL: 5 | DESCRIBE TABLE mytable; 6 | -------------------------------------------------------------------------------- /parser/testdata/dml/alter_table_with_modify_remove_ttl.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE infra.flow_processed_emails_local ON CLUSTER default_cluster REMOVE TTL; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_modify_setting.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE example_table MODIFY SETTING max_part_loading_threads=8, max_parts_in_total=50000; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_update_with_cluster.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE db.table ON CLUSTER cluster1 UPDATE column1 = column1 + 1 WHERE id > 100; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/desc_table_without_table_keyword.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DESC mytable 3 | 4 | -- Format SQL: 5 | DESCRIBE mytable; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/truncate_temporary_table_on_clsuter.sql: -------------------------------------------------------------------------------- 1 | TRUNCATE TEMPORARY TABLE IF EXISTS test.table_name ON CLUSTER 'default_cluster'; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00021_2_select_with_in.sql: -------------------------------------------------------------------------------- 1 | select sum(Sign) from test.visits where CounterID in (942285, 577322); 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_database_replicated.sql: -------------------------------------------------------------------------------- 1 | CREATE DATABASE IF NOT EXISTS `test` ENGINE=Replicated('/root/test_local', 'shard', 'replica'); 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/describe_table_without_table_keyword.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DESCRIBE mytable 3 | 4 | -- Format SQL: 5 | DESCRIBE mytable; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_create_table.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW CREATE TABLE mytable 3 | 4 | -- Format SQL: 5 | SHOW CREATE TABLE mytable; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_databases_limit.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW DATABASES LIMIT 10 3 | 4 | -- Format SQL: 5 | SHOW DATABASES LIMIT 10; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_delete_with_cluster.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.events ON CLUSTER 'default_cluster' DELETE WHERE id = 123 AND status = 'deleted'; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_materialize_index.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE visits_order MATERIALIZE INDEX IF EXISTS user_name_index IN PARTITION '20240403'; 2 | 3 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_qbit.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE test.qbit_example ( 2 | id UInt32, 3 | vec QBit(Float32, 8) 4 | ) ENGINE = Memory; 5 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/desc_table_with_table_keyword.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DESC TABLE mytable 3 | 4 | -- Format SQL: 5 | DESCRIBE TABLE mytable; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_databases_like.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW DATABASES LIKE 'test%' 3 | 4 | -- Format SQL: 5 | SHOW DATABASES LIKE 'test%'; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00011_sorting.sql: -------------------------------------------------------------------------------- 1 | SELECT EventTime::DateTime('Asia/Dubai') FROM test.hits ORDER BY EventTime DESC LIMIT 10 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00177_select_from_gcs.sql: -------------------------------------------------------------------------------- 1 | SELECT * FROM gcs(gcs_creds,url='https://storage.googleapis.com/some-bucket/some-path/'); -------------------------------------------------------------------------------- /parser/testdata/query/select_simple_with_group_by_with_cube_totals.sql: -------------------------------------------------------------------------------- 1 | SELECT a, COUNT(b) FROM group_by_all GROUP BY CUBE(a) WITH CUBE WITH TOTALS ORDER BY a; -------------------------------------------------------------------------------- /parser/type.go: -------------------------------------------------------------------------------- 1 | package parser 2 | 3 | var intervalUnits = NewSet("MILLISECOND", "SECOND", "MINUTE", "HOUR", "DAY", "WEEK", "MONTH", "QUARTER", "YEAR") 4 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_reset_multiple_settings.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE example_table RESET SETTING max_part_loading_threads, max_parts_in_total, another_setting; -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_update_in_partition.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.users UPDATE status = 'inactive' IN PARTITION '2024-01-01' WHERE status = 'active'; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_live_view_basic.sql: -------------------------------------------------------------------------------- 1 | CREATE LIVE VIEW my_live_view 2 | WITH TIMEOUT 10 TO my_destination(id String) 3 | AS SELECT id FROM my_table; 4 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/describe_table_with_table_keyword.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DESCRIBE TABLE mytable 3 | 4 | -- Format SQL: 5 | DESCRIBE TABLE mytable; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_databases_format.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW DATABASES FORMAT JSON 3 | 4 | -- Format SQL: 5 | SHOW DATABASES FORMAT 'JSON'; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_databases_ilike.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW DATABASES ILIKE 'Test%' 3 | 4 | -- Format SQL: 5 | SHOW DATABASES ILIKE 'Test%'; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_multi_except.sql: -------------------------------------------------------------------------------- 1 | SELECT number FROM numbers(1, 10) EXCEPT SELECT number FROM numbers(3, 6) EXCEPT SELECT number FROM numbers(8, 9) -------------------------------------------------------------------------------- /parser/testdata/dml/alter_table_modify_query.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.some_mv ON CLUSTER cluster MODIFY QUERY SELECT field1, field2 FROM test.some_table WHERE count >= 3; -------------------------------------------------------------------------------- /parser/testdata/dml/alter_table_with_modify_ttl.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE infra.flow_processed_emails_local ON CLUSTER default_cluster MODIFY TTL created_at + INTERVAL 3 YEAR; -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00008_uniq.sql: -------------------------------------------------------------------------------- 1 | SELECT uniq(UserID), uniqIf(UserID, CounterID = 800784), uniqIf(FUniqID, RegionID = 213) FROM test.hits 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_materialize_projection.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE visits_order MATERIALIZE PROJECTION IF EXISTS user_name_projection IN PARTITION '20240403'; 2 | 3 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_view_basic.sql: -------------------------------------------------------------------------------- 1 | CREATE VIEW IF NOT EXISTS my_view(col1 String, col2 String) 2 | AS 3 | SELECT 4 | id, 5 | name 6 | FROM 7 | my_table; -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_database.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE DATABASE IF NOT EXISTS `test` 3 | 4 | -- Format SQL: 5 | CREATE DATABASE IF NOT EXISTS `test`; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_databases_not_ilike.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW DATABASES NOT ILIKE 'Temp%' 3 | 4 | -- Format SQL: 5 | SHOW DATABASES NOT ILIKE 'Temp%'; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_databases_not_like.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW DATABASES NOT LIKE 'temp%' 3 | 4 | -- Format SQL: 5 | SHOW DATABASES NOT LIKE 'temp%'; 6 | -------------------------------------------------------------------------------- /parser/testdata/dml/insert_with_select.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO test.visits_null 2 | SELECT 3 | CounterID, 4 | StartDate, 5 | Sign, 6 | UserID 7 | FROM test.visits; -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00032_aggregate_key64.sql: -------------------------------------------------------------------------------- 1 | SELECT SearchEngineID AS k1, count() AS c FROM test.hits GROUP BY k1 ORDER BY c DESC, k1 LIMIT 10 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00085_monotonic_evaluation_segfault.sql: -------------------------------------------------------------------------------- 1 | SELECT any(0) FROM test.visits WHERE (toInt32(toDateTime(StartDate))) > 1000000000; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_keyword_alias_no_as.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 'Joe' name FROM users 3 | 4 | -- Format SQL: 5 | SELECT 'Joe' AS name FROM users; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_placeholder.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT * FROM t0 WHERE id = ?; 3 | 4 | -- Format SQL: 5 | SELECT * FROM t0 WHERE id = ?; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_union_distinct.sql: -------------------------------------------------------------------------------- 1 | SELECT replica_name FROM system.ha_replicas UNION DISTINCT SELECT replica_name FROM system.ha_unique_replicas format JSON -------------------------------------------------------------------------------- /parser/testdata/ddl/format/drop_database.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DROP DATABASE IF EXISTS datbase_name; 3 | 4 | 5 | -- Format SQL: 6 | DROP DATABASE IF EXISTS datbase_name; 7 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00014_filtering_arrays.sql: -------------------------------------------------------------------------------- 1 | SELECT GeneralInterests FROM test.hits WHERE AdvEngineID != 0 ORDER BY GeneralInterests DESC LIMIT 10 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00033_aggregate_key_string.sql: -------------------------------------------------------------------------------- 1 | SELECT SearchPhrase AS k1, count() AS c FROM test.hits GROUP BY k1 ORDER BY c DESC, k1 LIMIT 10 2 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_single_quote_table.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT * FROM 'test_table' 3 | 4 | 5 | -- Format SQL: 6 | SELECT * FROM 'test_table'; 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/drop_table_basic.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DROP TABLE IF EXISTS test.table_name; 3 | 4 | 5 | -- Format SQL: 6 | DROP TABLE IF EXISTS test.table_name; 7 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00015_totals_and_no_aggregate_functions.sql: -------------------------------------------------------------------------------- 1 | SELECT AdvEngineID FROM test.hits GROUP BY AdvEngineID WITH TOTALS ORDER BY AdvEngineID 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00030_array_enumerate_uniq.sql: -------------------------------------------------------------------------------- 1 | SELECT max(arrayJoin(arrayEnumerateUniq(arrayMap(x -> intDiv(x, 10), URLCategories)))) FROM test.hits 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00039_primary_key.sql: -------------------------------------------------------------------------------- 1 | SELECT count() FROM test.hits WHERE CounterID < 10000; 2 | SELECT count() FROM test.hits WHERE 10000 > CounterID; 3 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00060_move_to_prewhere_and_sets.sql: -------------------------------------------------------------------------------- 1 | SET optimize_move_to_prewhere = 1; 2 | SELECT uniq(URL) FROM test.hits WHERE TraficSourceID IN (7); 3 | -------------------------------------------------------------------------------- /parser/testdata/query/select_cast.sql: -------------------------------------------------------------------------------- 1 | select cast(1 as Float64) as value; 2 | select cast(1, 'Float64') as value; 3 | select (1 as Float64) as value; 4 | select 1::Float64 as value; -------------------------------------------------------------------------------- /parser/testdata/query/select_simple_with_is_null.sql: -------------------------------------------------------------------------------- 1 | SELECT f0,f1,f2,f3 as a0 2 | FROM test.events_local 3 | WHERE (f0 IN ('foo', 'bar', 'test')) AND (f1 = 'testing') AND f2 IS NULL -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_databases_format_string.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW DATABASES FORMAT 'TabSeparated' 3 | 4 | -- Format SQL: 5 | SHOW DATABASES FORMAT 'TabSeparated'; 6 | -------------------------------------------------------------------------------- /parser/testdata/dml/format/delete_from.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DELETE FROM hits WHERE Title LIKE '%hello%'; 3 | 4 | 5 | -- Format SQL: 6 | DELETE FROM hits WHERE Title LIKE '%hello%'; 7 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00021_3_select_with_in.sql: -------------------------------------------------------------------------------- 1 | select 1 IN (1, 2, 3); 2 | 3 | SELECT count() FROM remote('localhost', test, hits) WHERE CounterID IN (598875); 4 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00055_index_and_not.sql: -------------------------------------------------------------------------------- 1 | SELECT count() FROM test.hits WHERE NOT (EventDate >= toDate('2015-01-01') AND EventDate < toDate('2015-02-01')) 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00059_merge_sorting_empty_array_joined.sql: -------------------------------------------------------------------------------- 1 | SELECT CounterID FROM test.visits ARRAY JOIN Goals.ID WHERE CounterID = 942285 ORDER BY CounterID 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00068_subquery_in_prewhere.sql: -------------------------------------------------------------------------------- 1 | SELECT count() FROM test.hits PREWHERE UserID IN (SELECT UserID FROM test.hits WHERE CounterID = 800784); 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00160_decode_xml_component.sql: -------------------------------------------------------------------------------- 1 | SELECT sum(DISTINCT sipHash64(decodeXMLComponent(Title) AS decoded)) FROM test.hits WHERE Title != decoded; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_join_only.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT * FROM "t1" JOIN "t2" ON true 3 | 4 | 5 | -- Format SQL: 6 | SELECT * FROM "t1" JOIN "t2" ON true; 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/truncate_table_basic.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | TRUNCATE TABLE IF EXISTS test.table_name; 3 | 4 | 5 | -- Format SQL: 6 | TRUNCATE TABLE IF EXISTS test.table_name; 7 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_distinct_keyword.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT DISTINCT record_id FROM records 3 | 4 | -- Format SQL: 5 | SELECT DISTINCT record_id FROM records; 6 | -------------------------------------------------------------------------------- /parser/testdata/dml/format/insert_select_without_from.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | INSERT INTO t (c) SELECT 1 WHERE 1 = 1; 3 | 4 | 5 | 6 | -- Format SQL: 7 | INSERT INTO t (c) SELECT 1 WHERE 1 = 1; 8 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00045_uniq_upto.sql: -------------------------------------------------------------------------------- 1 | SELECT RegionID, uniqExact(UserID) AS u1, uniqUpTo(10)(UserID) AS u2 FROM test.visits GROUP BY RegionID HAVING u1 <= 11 AND u1 != u2 -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00142_system_columns.sql: -------------------------------------------------------------------------------- 1 | SELECT table, name, type, default_kind, default_expression FROM system.columns WHERE database = 'test' AND table = 'hits' 2 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_simple_with_top_clause.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT TOP 10 my_column FROM tableName; 3 | 4 | 5 | -- Format SQL: 6 | SELECT TOP 10 my_column FROM tableName; 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_databases_outfile.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW DATABASES INTO OUTFILE '/tmp/databases.txt' 3 | 4 | -- Format SQL: 5 | SHOW DATABASES INTO OUTFILE '/tmp/databases.txt'; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00007_uniq.sql: -------------------------------------------------------------------------------- 1 | SELECT RegionID, uniq(UserID) AS u FROM test.hits WHERE CounterID = 800784 GROUP BY RegionID ORDER BY u DESC, RegionID LIMIT 10 -- nothing 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00013_sorting_of_nested.sql: -------------------------------------------------------------------------------- 1 | SELECT ParsedParams.Key1 FROM test.visits FINAL WHERE VisitID != 0 AND notEmpty(ParsedParams.Key1) ORDER BY VisitID LIMIT 10 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00035_aggregate_keys128.sql: -------------------------------------------------------------------------------- 1 | SELECT SearchEngineID AS k1, AdvEngineID AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC, k1, k2 LIMIT 10 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00036_aggregate_hashed.sql: -------------------------------------------------------------------------------- 1 | SELECT SearchEngineID AS k1, SearchPhrase AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC, k1, k2 LIMIT 10 2 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_variable.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | WITH $abc AS (SELECT 1 AS a) SELECT * FROM $abc 3 | 4 | -- Format SQL: 5 | WITH $abc AS (SELECT 1 AS a) SELECT * FROM $abc; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/select_item_with_modifiers.sql: -------------------------------------------------------------------------------- 1 | SELECT c0 REPLACE(c0 AS c1) FROM t0; 2 | SELECT * REPLACE(i + 1 AS i) FROM t1; 3 | SELECT * REPLACE(i + 1 AS i) EXCEPT (j) APPLY(sum) from t2; 4 | -------------------------------------------------------------------------------- /parser/testdata/query/set_simple.sql: -------------------------------------------------------------------------------- 1 | SET max_threads = 1, max_insert_threads = 0, max_block_size = 8192, min_insert_block_size_rows = 8192, min_insert_block_size_bytes = 1048576; -- lower memory usage -------------------------------------------------------------------------------- /parser/testdata/basic/format/quantile_functions.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT quantile(0.9)(x), quantiles(0.5, 0.9)(x); 3 | 4 | 5 | -- Format SQL: 6 | SELECT quantile(0.9)(x), quantiles(0.5, 0.9)(x); 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_add_projection.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE visits_order 2 | ADD PROJECTION IF NOT EXISTS user_name_projection 3 | (SELECT * GROUP BY user_name ORDER BY user_name) AFTER a.user_id; 4 | -------------------------------------------------------------------------------- /parser/testdata/dml/format/insert_with_keyword_placeholder.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | INSERT INTO t (c) VALUES ({name :String}); 3 | 4 | 5 | 6 | -- Format SQL: 7 | INSERT INTO t (c) VALUES ({name:String}); 8 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00017_aggregation_uninitialized_memory.sql: -------------------------------------------------------------------------------- 1 | SELECT DISTINCT (URLHierarchy(URL)[1]) AS q, 'x' AS w FROM test.hits WHERE CounterID = 14917930 ORDER BY URL 2 | 3 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_order_by_timestamp.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT Timestamp FROM events ORDER BY Timestamp; 3 | 4 | -- Format SQL: 5 | SELECT Timestamp FROM events ORDER BY Timestamp; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_drop_detach_partition.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE app_utc_00.app_message_as_notification_organization_sent_stats_i_d_local DROP DETACHED PARTITION '2022-05-24' SETTINGS allow_drop_detached = 1; -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_detach_partition.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE db.test DETACH PARTITION '2021-10-01'; 3 | 4 | -- Format SQL: 5 | ALTER TABLE db.test DETACH PARTITION '2021-10-01'; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_modify_column_remove.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE t1 MODIFY COLUMN f1 REMOVE COMMENT; 3 | 4 | -- Format SQL: 5 | ALTER TABLE t1 MODIFY COLUMN f1 REMOVE COMMENT; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_simple_field_alias.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT field0, field1 as x, field2 y from events; 3 | 4 | -- Format SQL: 5 | SELECT field0, field1 AS x, field2 AS y FROM events; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_when_condition.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | select case when false then 'hello' else 'world' end; 3 | 4 | -- Format SQL: 5 | SELECT CASE WHEN false THEN 'hello' ELSE 'world' END; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_distinct.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT count(DISTINCT(RECORD_ID)) FROM RECORD_TABLE 3 | 4 | -- Format SQL: 5 | SELECT count(DISTINCT (RECORD_ID)) FROM RECORD_TABLE; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_string_expr.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | WITH "abc" AS (SELECT 1 AS a) SELECT * FROM "abc" 3 | 4 | 5 | -- Format SQL: 6 | WITH "abc" AS (SELECT 1 AS a) SELECT * FROM "abc"; 7 | -------------------------------------------------------------------------------- /parser/testdata/query/select_table_function_with_query.sql: -------------------------------------------------------------------------------- 1 | SELECT 1, (SELECT 70) AS `power`, number 2 | FROM 3 | numbers( 4 | plus( 5 | ifNull((SELECT 1 AS bin_count, 1), 6 | 1) 7 | ) 8 | ) -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_modify_column.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE t1 MODIFY COLUMN f1 String COMMENT 'test'; 3 | 4 | -- Format SQL: 5 | ALTER TABLE t1 MODIFY COLUMN f1 String COMMENT 'test'; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00016_any_if_distributed_cond_always_false.sql: -------------------------------------------------------------------------------- 1 | -- Tags: distributed 2 | 3 | 4 | SELECT anyIf(SearchPhrase, CounterID = -1) FROM remote('127.0.0.{1,2}:9000', test, hits) 5 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00034_aggregate_key_fixed_string.sql: -------------------------------------------------------------------------------- 1 | SELECT toFixedString(substring(SearchPhrase, 1, 17), 17) AS k1, count() AS c FROM test.hits GROUP BY k1 ORDER BY c DESC, k1 LIMIT 10 2 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_number_field.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT foo, bar.1, foo.2 FROM foo ARRAY JOIN m as bar 3 | 4 | -- Format SQL: 5 | SELECT foo, bar.1, foo.2 FROM foo ARRAY JOIN m AS bar; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/select_order_by_with_fill_staleness.sql: -------------------------------------------------------------------------------- 1 | SELECT number as key, 5 * number value, 'original' AS source 2 | FROM numbers(16) 3 | WHERE (number % 5) == 0 4 | ORDER BY key WITH FILL STALENESS 11; 5 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_attach_partition.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test ATTACH PARTITION '20210114'; 2 | ALTER TABLE test ATTACH PARTITION '20210114' FROM test1; 3 | ALTER TABLE test ATTACH PARTITION ID '20210114'; 4 | 5 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_json_typehints.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE t ( 2 | j JSON(message String, a.b UInt64, max_dynamic_paths=0, SKIP x, SKIP REGEXP 're') 3 | ) ENGINE = MergeTree 4 | ORDER BY tuple(); 5 | 6 | 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_replace_partition.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE t2 REPLACE PARTITION 'partition' FROM t1; 3 | 4 | -- Format SQL: 5 | ALTER TABLE t2 REPLACE PARTITION 'partition' FROM t1; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_function_simple.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE FUNCTION linear_equation AS (x, k, b) -> k*x + b; 3 | 4 | -- Format SQL: 5 | CREATE FUNCTION linear_equation AS (x, k, b) -> k * x + b; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/select_order_by_with_fill_basic.sql: -------------------------------------------------------------------------------- 1 | SELECT n, source FROM ( 2 | SELECT toFloat32(number % 10) AS n, 'original' AS source 3 | FROM numbers(10) WHERE number % 3 = 1 4 | ) ORDER BY n WITH FILL; 5 | -------------------------------------------------------------------------------- /parser/testdata/query/select_simple_with_is_not_null.sql: -------------------------------------------------------------------------------- 1 | SELECT f0,f1,f2,f3 as a0 2 | FROM test.events_local 3 | WHERE (f0 IN ('foo', 'bar', 'test')) 4 | AND (f1 = 'testing') 5 | AND f2 IS NULL 6 | AND f3 IS NOT NULL -------------------------------------------------------------------------------- /parser/testdata/ddl/format/check.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CHECK TABLE test_table; 3 | CHECK TABLE test_table PARTITION 'col'; 4 | 5 | 6 | -- Format SQL: 7 | CHECK TABLE test_table; 8 | CHECK TABLE test_table PARTITION 'col'; 9 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00012_sorting_distributed.sql: -------------------------------------------------------------------------------- 1 | -- Tags: distributed 2 | 3 | 4 | SELECT EventTime::DateTime('Asia/Dubai') FROM remote('127.0.0.{1,2}', test, hits) ORDER BY EventTime DESC LIMIT 10 5 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00053_replicate_segfault.sql: -------------------------------------------------------------------------------- 1 | -- Tags: replica 2 | 3 | SELECT count() > 0 FROM (SELECT ParsedParams.Key1 AS p FROM test.visits WHERE arrayAll(y -> arrayExists(x -> y != x, p), p)) 4 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_multi_line_comment.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | select 3 | -- first line 4 | -- second line 5 | * 6 | from 7 | t0 8 | 9 | -- Format SQL: 10 | SELECT * FROM t0; 11 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_delete.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.events DELETE WHERE created_at < '2023-01-01'; 3 | 4 | 5 | -- Format SQL: 6 | ALTER TABLE test.events DELETE WHERE created_at < '2023-01-01'; 7 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_distinct_on_keyword.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT DISTINCT ON(album,artist) record_id FROM records 3 | 4 | -- Format SQL: 5 | SELECT DISTINCT ON (album, artist) record_id FROM records; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_remove_ttl.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.events ON CLUSTER 'default_cluster' REMOVE TTL; 3 | 4 | -- Format SQL: 5 | ALTER TABLE test.events ON CLUSTER 'default_cluster' REMOVE TTL; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00086_array_reduce.sql: -------------------------------------------------------------------------------- 1 | SELECT arrayFilter(x -> x != 1, arrayMap((a, b) -> a = b, GeneralInterests, arrayReduce('groupArray', GeneralInterests))) AS res FROM test.hits WHERE length(res) != 0; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/select_simple_with_cte_with_column_aliases.sql: -------------------------------------------------------------------------------- 1 | WITH 2 | test(f1, f2, f3) AS (SELECT f4, f5, f6 FROM sales) 3 | SELECT 4 | f1 AS new_f1, 5 | f2 AS new_f2, 6 | f3 AS new_f3 7 | FROM 8 | test; 9 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_freeze_no_specify_partition.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.events ON CLUSTER 'default_cluster' freeze; 3 | 4 | -- Format SQL: 5 | ALTER TABLE test.events ON CLUSTER 'default_cluster' FREEZE; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_reset_setting.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE example_table RESET SETTING max_part_loading_threads; 3 | 4 | -- Format SQL: 5 | ALTER TABLE example_table RESET SETTING max_part_loading_threads; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00009_uniq_distributed.sql: -------------------------------------------------------------------------------- 1 | -- Tags: distributed 2 | 3 | 4 | SELECT uniq(UserID), uniqIf(UserID, CounterID = 800784), uniqIf(FUniqID, RegionID = 213) FROM remote('127.0.0.{1,2}', test, hits) 5 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_literal_table_name.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | select table_name from "information_schema"."tables" limit 1; 3 | 4 | 5 | -- Format SQL: 6 | SELECT table_name FROM "information_schema"."tables" LIMIT 1; 7 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_multi_union.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 1 AS v1 UNION ALL SELECT 2 AS v2 UNION ALL SELECT 3 AS v3 3 | 4 | 5 | -- Format SQL: 6 | SELECT 1 AS v1 UNION ALL SELECT 2 AS v2 UNION ALL SELECT 3 AS v3; 7 | -------------------------------------------------------------------------------- /parser/testdata/query/select_simple_with_with_clause.sql: -------------------------------------------------------------------------------- 1 | WITH 2 | cte1 AS (SELECT f1 FROM t1), 3 | cte2 AS (SELECT f2 FROM t2) 4 | SELECT 5 | cte1.f1, 6 | cte2.f2, 7 | t3.f3 8 | FROM 9 | t3,cte1,cte2 10 | 11 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_null_engine.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE logs.t0 on cluster default 2 | ( 3 | `trace_id` String CODEC(ZSTD(1)), 4 | INDEX trace_id_bloom_idx trace_id TYPE bloom_filter(0.01) GRANULARITY 64 5 | ) ENGINE = Null(); -------------------------------------------------------------------------------- /parser/testdata/ddl/format/drop_table_with_on_clsuter.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DROP TABLE IF EXISTS test.table_name ON CLUSTER 'default_cluster'; 3 | 4 | 5 | -- Format SQL: 6 | DROP TABLE IF EXISTS test.table_name ON CLUSTER 'default_cluster'; 7 | -------------------------------------------------------------------------------- /parser/testdata/dml/create_column_with_ttl.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE example1 ( 2 | timestamp DateTime, 3 | x UInt32 TTL timestamp + INTERVAL 1 MONTH, 4 | y UInt32 TTL timestamp + INTERVAL 1 WEEK 5 | ) 6 | ENGINE = MergeTree 7 | ORDER BY tuple() -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00073_uniq_array.sql: -------------------------------------------------------------------------------- 1 | SELECT EventDate, uniqExact(UserID), length(groupUniqArray(UserID)), arrayUniq(groupArray(UserID)) FROM test.hits WHERE CounterID = 1704509 GROUP BY EventDate ORDER BY EventDate; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_column_alias_string.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 'abc' as "value2"; 3 | 4 | SELECT $abc, a$$bc, abc$$; 5 | 6 | 7 | -- Format SQL: 8 | SELECT 'abc' AS "value2"; 9 | SELECT $abc, a$$bc, abc$$; 10 | -------------------------------------------------------------------------------- /parser/testdata/query/select_order_by_with_fill_from_to.sql: -------------------------------------------------------------------------------- 1 | SELECT n, source FROM ( 2 | SELECT toFloat32(number % 10) AS n, 'original' AS source 3 | FROM numbers(10) WHERE number % 3 = 1 4 | ) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5; 5 | -------------------------------------------------------------------------------- /parser/testdata/query/select_order_by_with_fill_step.sql: -------------------------------------------------------------------------------- 1 | SELECT date, value FROM ( 2 | SELECT toDate('2020-01-01') + INTERVAL number DAY AS date, number AS value 3 | FROM numbers(5) 4 | ) ORDER BY date WITH FILL STEP INTERVAL 1 DAY; 5 | -------------------------------------------------------------------------------- /parser/testdata/ddl/drop_role.sql: -------------------------------------------------------------------------------- 1 | DROP ROLE IF EXISTS r1_01293, r2_01293, r3_01293, r4_01293, r5_01293, r6_01293, r7_01293, r8_01293, r9_01293; 2 | DROP ROLE IF EXISTS r2_01293_renamed; 3 | DROP ROLE IF EXISTS r1_01293@'%', 'r2_01293@%.myhost.com'; 4 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_drop_index.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.event_local ON CLUSTER 'default_cluster' DROP INDEX f1; 3 | 4 | -- Format SQL: 5 | ALTER TABLE test.event_local ON CLUSTER 'default_cluster' DROP INDEX f1; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00069_duplicate_aggregation_keys.sql: -------------------------------------------------------------------------------- 1 | SELECT URL, EventDate, max(URL) FROM test.hits WHERE CounterID = 1704509 AND UserID = 4322253409885123546 GROUP BY URL, EventDate, EventDate ORDER BY URL, EventDate; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00076_system_columns_bytes.sql: -------------------------------------------------------------------------------- 1 | -- NOTE: database = currentDatabase() is not mandatory 2 | SELECT sum(data_compressed_bytes) > 0, sum(data_uncompressed_bytes) > 0, sum(marks_bytes) > 0 FROM system.columns; 3 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_simple_with_bracket.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res, f1["abc"] as f2 3 | 4 | 5 | -- Format SQL: 6 | SELECT arrayConcat([1, 2], [3, 4], [5, 6]) AS res, f1["abc"] AS f2; 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_clear_index.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE my_table CLEAR INDEX my_index_name IN PARTITION partition_name; 3 | 4 | -- Format SQL: 5 | ALTER TABLE my_table CLEAR INDEX my_index_name IN PARTITION partition_name; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_clear_projection.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE my_table CLEAR PROJECTION hello IN PARTITION partition_name; 3 | 4 | -- Format SQL: 5 | ALTER TABLE my_table CLEAR PROJECTION hello IN PARTITION partition_name; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_keyword_placeholder.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT {name :String}; 3 | SELECT toString({name :String}); 4 | 5 | 6 | 7 | -- Format SQL: 8 | SELECT {name: String}; 9 | SELECT toString({name: String}); 10 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_left_join.sql: -------------------------------------------------------------------------------- 1 | WITH 2 | t1 AS 3 | ( 4 | SELECT 1 AS value 5 | ), 6 | t2 AS 7 | ( 8 | SELECT 2 AS value 9 | ) 10 | SELECT * 11 | FROM t1 12 | LEFT JOIN t2 ON true -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_clear_column.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE my_table CLEAR COLUMN my_column_name IN PARTITION partition_name; 3 | 4 | -- Format SQL: 5 | ALTER TABLE my_table CLEAR COLUMN my_column_name IN PARTITION partition_name; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_rename_column.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE my_table RENAME COLUMN old_column_name TO new_column_name; 3 | 4 | 5 | 6 | -- Format SQL: 7 | ALTER TABLE my_table RENAME COLUMN old_column_name TO new_column_name; 8 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_codec_no_args.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE shark_attacks ( 3 | timestamp DateTime CODEC(DoubleDelta), 4 | ); 5 | 6 | -- Format SQL: 7 | CREATE TABLE shark_attacks (timestamp DateTime CODEC(DoubleDelta)); 8 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/drop_table_with_no_delay.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DROP TABLE IF EXISTS test.table_name ON CLUSTER 'default_cluster' NO DELAY; 3 | 4 | 5 | -- Format SQL: 6 | DROP TABLE IF EXISTS test.table_name ON CLUSTER 'default_cluster' NO DELAY; 7 | -------------------------------------------------------------------------------- /parser/testdata/dml/alter_table_with_comment.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD COLUMN a.f1 String default '' comment 'test' ; 2 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD COLUMN hello String default ''; 3 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00037_uniq_state_merge1.sql: -------------------------------------------------------------------------------- 1 | SELECT k, any(u) AS u, uniqMerge(us) AS us FROM (SELECT domain(URL) AS k, uniq(UserID) AS u, uniqState(UserID) AS us FROM test.hits GROUP BY k) GROUP BY k ORDER BY u DESC, k ASC LIMIT 100 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00046_uniq_upto_distributed.sql: -------------------------------------------------------------------------------- 1 | -- Tags: distributed 2 | 3 | SELECT RegionID, uniqExact(UserID) AS u1, uniqUpTo(10)(UserID) AS u2 FROM remote('127.0.0.{1,2}', test, visits) GROUP BY RegionID HAVING u1 <= 11 AND u1 != u2 -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00141_transform.sql: -------------------------------------------------------------------------------- 1 | SELECT transform(SearchEngineID, [2, 3], ['Яндекс', 'Google'], 'Остальные') AS title, count() AS c FROM test.hits WHERE SearchEngineID != 0 GROUP BY title HAVING c > 0 ORDER BY c DESC LIMIT 10; 2 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_without_from_where.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 1 WHERE 1 = 1; 3 | SELECT {p :UInt8} WHERE {p :UInt8} = 1; 4 | 5 | 6 | 7 | -- Format SQL: 8 | SELECT 1 WHERE 1 = 1; 9 | SELECT {p: UInt8} WHERE {p: UInt8} = 1; 10 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_drop_projection.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.event_local ON CLUSTER 'default_cluster' DROP PROJECTION f1; 3 | 4 | -- Format SQL: 5 | ALTER TABLE test.event_local ON CLUSTER 'default_cluster' DROP PROJECTION f1; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00083_array_filter.sql: -------------------------------------------------------------------------------- 1 | SELECT sum(length(ParsedParams.Key1)) FROM test.hits WHERE notEmpty(ParsedParams.Key1); 2 | SELECT sum(length(ParsedParams.ValueDouble)) FROM test.hits WHERE notEmpty(ParsedParams.ValueDouble); 3 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00144_functions_of_aggregation_states.sql: -------------------------------------------------------------------------------- 1 | SELECT EventDate, finalizeAggregation(state), runningAccumulate(state) FROM (SELECT EventDate, uniqState(UserID) AS state FROM test.hits GROUP BY EventDate ORDER BY EventDate); 2 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_multi_union_distinct.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 1 AS v1 UNION DISTINCT SELECT 2 AS v2 UNION DISTINCT SELECT 3 AS v3 3 | 4 | -- Format SQL: 5 | SELECT 1 AS v1 UNION DISTINCT SELECT 2 AS v2 UNION DISTINCT SELECT 3 AS v3; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_keyword_in_group_by.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | toStartOfInterval(timestamp, toIntervalMinute(1)) AS interval, 3 | column_name 4 | FROM table 5 | WHERE true 6 | GROUP BY (interval, column_name) 7 | ORDER BY (interval AS i, column_name) ASC -------------------------------------------------------------------------------- /parser/testdata/basic/output/use_database.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "UsePos": 0, 4 | "StatementEnd": 8, 5 | "Database": { 6 | "Name": "test", 7 | "QuoteType": 1, 8 | "NamePos": 4, 9 | "NameEnd": 8 10 | } 11 | } 12 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/create_materialized_view_with_gcs.sql: -------------------------------------------------------------------------------- 1 | CREATE MATERIALIZED VIEW database_name.view_name 2 | REFRESH EVERY 5 MINUTE TO database_name.table_name AS 3 | SELECT * FROM gcs(gcs_creds,url='https://storage.googleapis.com/some-bucket/some-path/'); -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_drop_partition.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.events ON CLUSTER 'default_cluster' drop partition '2023-07-18'; 3 | 4 | -- Format SQL: 5 | ALTER TABLE test.events ON CLUSTER 'default_cluster' DROP PARTITION '2023-07-18'; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00020_distinct_order_by_distributed.sql: -------------------------------------------------------------------------------- 1 | -- Tags: distributed 2 | 3 | SET max_rows_to_sort = 10000; 4 | SELECT count() FROM (SELECT DISTINCT PredLastVisit AS x FROM remote('127.0.0.{1,2}', test, visits) ORDER BY VisitID); 5 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00052_group_by_in.sql: -------------------------------------------------------------------------------- 1 | select StartDate, TraficSourceID in (0) ? 'type_in' : 'other' as traf_type, sum(Sign) 2 | from test.visits 3 | where CounterID = 842440 4 | group by StartDate, traf_type ORDER BY StartDate, traf_type 5 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00172_early_constant_folding.sql: -------------------------------------------------------------------------------- 1 | -- Tags: no-parallel-replicas 2 | 3 | set max_threads=10; 4 | EXPLAIN PIPELINE SELECT count(JavaEnable) FROM test.hits WHERE WatchID = 1 OR Title = 'next' OR URL = 'prev' OR URL = '???' OR 1; 5 | -------------------------------------------------------------------------------- /parser/testdata/query/select_order_by_with_fill_interpolate_no_columns.sql: -------------------------------------------------------------------------------- 1 | SELECT n, value FROM ( 2 | SELECT toFloat32(number % 10) AS n, number AS value 3 | FROM numbers(10) WHERE number % 3 = 1 4 | ) ORDER BY n WITH FILL FROM 0 TO 10 STEP 1 5 | INTERPOLATE; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_drop_column.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' DROP COLUMN IF EXISTS f1; 3 | 4 | -- Format SQL: 5 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' DROP COLUMN IF EXISTS f1; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/show_databases_comprehensive.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SHOW DATABASES LIKE 'prod%' LIMIT 5 INTO OUTFILE '/tmp/prod_dbs.txt' FORMAT JSON 3 | 4 | -- Format SQL: 5 | SHOW DATABASES LIKE 'prod%' LIMIT 5 INTO OUTFILE '/tmp/prod_dbs.txt' FORMAT 'JSON'; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_freeze_partition.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.events ON CLUSTER 'default_cluster' freeze partition '2023-07-18';; 3 | 4 | -- Format SQL: 5 | ALTER TABLE test.events ON CLUSTER 'default_cluster' FREEZE PARTITION '2023-07-18'; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/systems.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SYSTEM FLUSH LOGS; 3 | SYSTEM DROP UNCOMPRESSED CACHE; 4 | SYSTEM DROP FILESYSTEM CACHE; 5 | 6 | 7 | -- Format SQL: 8 | SYSTEM FLUSH LOGS; 9 | SYSTEM DROP UNCOMPRESSED CACHE; 10 | SYSTEM DROP FILESYSTEM CACHE; 11 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00080_array_join_and_union.sql: -------------------------------------------------------------------------------- 1 | SELECT count() FROM (SELECT Goals.ID FROM test.visits ARRAY JOIN Goals WHERE CounterID = 842440 LIMIT 10 UNION ALL SELECT Goals.ID FROM test.visits ARRAY JOIN Goals WHERE CounterID = 842440 LIMIT 10); 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00143_transform_non_const_default.sql: -------------------------------------------------------------------------------- 1 | SELECT transform(SearchEngineID, [2, 3], ['Яндекс', 'Google'], PageCharset) AS title, count() AS c FROM test.hits WHERE SearchEngineID != 0 GROUP BY title HAVING c > 0 ORDER BY c DESC LIMIT 10; 2 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_distributed_table.sql: -------------------------------------------------------------------------------- 1 | create table test.event_all 2 | ON CLUSTER 'default_cluster' 3 | AS test.evnets_local 4 | ENGINE = Distributed( 5 | default_cluster, 6 | test, 7 | events_local, 8 | rand() 9 | ) SETTINGS fsync_after_insert=0; 10 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/truncate_temporary_table_on_clsuter.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | TRUNCATE TEMPORARY TABLE IF EXISTS test.table_name ON CLUSTER 'default_cluster'; 3 | 4 | 5 | -- Format SQL: 6 | TRUNCATE TEMPORARY TABLE IF EXISTS test.table_name ON CLUSTER 'default_cluster'; 7 | -------------------------------------------------------------------------------- /parser/testdata/dml/format/alter_table_with_modify_remove_ttl.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE infra.flow_processed_emails_local ON CLUSTER default_cluster REMOVE TTL; 3 | 4 | -- Format SQL: 5 | ALTER TABLE infra.flow_processed_emails_local ON CLUSTER default_cluster REMOVE TTL; 6 | -------------------------------------------------------------------------------- /parser/testdata/dml/insert_with_format.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO `_test_1345# $.ДБ`.`2. Таблица №2`; 2 | INSERT INTO "db"."table_name" (col1, col2) VALUES (1, 2); 3 | INSERT INTO `_test_1345# $.ДБ`.`2. Таблица №2` (col1, col2); 4 | INSERT INTO table_name (col1, col2) VALUES (1, 2) FORMAT Native; -------------------------------------------------------------------------------- /parser/testdata/query/format/select_table_alias_without_keyword.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT t1.Timestamp FROM my_table t1 INNER JOIN my_other_table t2 ON t1.a=t2.b 3 | 4 | -- Format SQL: 5 | SELECT t1.Timestamp FROM my_table AS t1 INNER JOIN my_other_table AS t2 ON t1.a = t2.b; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_add_column.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD COLUMN f1 String AFTER f0; 3 | 4 | 5 | -- Format SQL: 6 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD COLUMN f1 String AFTER f0; 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_update.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.users UPDATE status = 'active', updated_at = now() WHERE status = 'pending'; 3 | 4 | 5 | -- Format SQL: 6 | ALTER TABLE test.users UPDATE status = 'active', updated_at = now() WHERE status = 'pending'; 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_update_with_cluster.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE db.table ON CLUSTER cluster1 UPDATE column1 = column1 + 1 WHERE id > 100; 3 | 4 | 5 | -- Format SQL: 6 | ALTER TABLE db.table ON CLUSTER cluster1 UPDATE column1 = column1 + 1 WHERE id > 100; 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_modify_setting.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE example_table MODIFY SETTING max_part_loading_threads=8, max_parts_in_total=50000; 3 | 4 | -- Format SQL: 5 | ALTER TABLE example_table MODIFY SETTING max_part_loading_threads=8, max_parts_in_total=50000; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00004_top_counters.sql: -------------------------------------------------------------------------------- 1 | SELECT CounterID, count() AS c FROM test.hits GROUP BY CounterID ORDER BY c DESC LIMIT 10; 2 | SELECT CounterID, count() AS c FROM test.hits GROUP BY CounterID ORDER BY c DESC LIMIT 10 SETTINGS optimize_aggregation_in_order = 1 3 | -------------------------------------------------------------------------------- /parser/testdata/query/query_with_expr_compare.sql: -------------------------------------------------------------------------------- 1 | SELECT date, path, splitByChar('/', path)[2] AS path_b 2 | FROM( 3 | SELECT 'pathA/pathB/pathC' AS path, '2024-09-10' AS date 4 | ) 5 | WHERE toDate(date) BETWEEN '2024-09-01' AND '2024-09-30' 6 | AND splitByChar('/', path)[1] = 'pathA' -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00153_aggregate_arena_race.sql: -------------------------------------------------------------------------------- 1 | -- Tags: race 2 | 3 | create temporary table dest00153 (`s` AggregateFunction(groupUniqArray, String)) engine Memory; 4 | insert into dest00153 select groupUniqArrayState(RefererDomain) from test.hits group by URLDomain; 5 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_view_on_cluster_with_uuid.sql: -------------------------------------------------------------------------------- 1 | CREATE VIEW IF NOT EXISTS cluster_name.my_view 2 | UUID '3493e374-e2bb-481b-b493-e374e2bb981b' 3 | ON CLUSTER 'my_cluster' 4 | AS ( 5 | SELECT 6 | column1, 7 | column2 8 | FROM 9 | my_other_table 10 | ); -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_database_replicated.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE DATABASE IF NOT EXISTS `test` ENGINE=Replicated('/root/test_local', 'shard', 'replica'); 3 | 4 | 5 | -- Format SQL: 6 | CREATE DATABASE IF NOT EXISTS `test` ENGINE = Replicated('/root/test_local', 'shard', 'replica'); 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_with_qbit.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE test.qbit_example ( 3 | id UInt32, 4 | vec QBit(Float32, 8) 5 | ) ENGINE = Memory; 6 | 7 | 8 | -- Format SQL: 9 | CREATE TABLE test.qbit_example (id UInt32, vec QBit(Float32, 8)) ENGINE = Memory; 10 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00066_sorting_distributed_many_replicas.sql: -------------------------------------------------------------------------------- 1 | -- Tags: replica, distributed, no-random-settings 2 | 3 | 4 | SET max_parallel_replicas = 2; 5 | SELECT EventTime::DateTime('Asia/Dubai') FROM remote('127.0.0.{1|2}', test, hits) ORDER BY EventTime DESC LIMIT 10 6 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_simple_with_group_by_with_cube_totals.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT a, COUNT(b) FROM group_by_all GROUP BY CUBE(a) WITH CUBE WITH TOTALS ORDER BY a; 3 | 4 | -- Format SQL: 5 | SELECT a, COUNT(b) FROM group_by_all GROUP BY CUBE(a) WITH CUBE WITH TOTALS ORDER BY a; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_materialize_index.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE visits_order MATERIALIZE INDEX IF EXISTS user_name_index IN PARTITION '20240403'; 3 | 4 | 5 | 6 | -- Format SQL: 7 | ALTER TABLE visits_order MATERIALIZE INDEX IF EXISTS user_name_index IN PARTITION '20240403'; 8 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00038_uniq_state_merge2.sql: -------------------------------------------------------------------------------- 1 | SELECT topLevelDomain(concat('http://', k)) AS tld, sum(u) AS u, uniqMerge(us) AS us FROM (SELECT domain(URL) AS k, uniq(UserID) AS u, uniqState(UserID) AS us FROM test.hits GROUP BY k) GROUP BY tld ORDER BY u DESC, tld ASC LIMIT 100 2 | -------------------------------------------------------------------------------- /parser/testdata/query/select_case_when_exists.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | *, 3 | CASE 4 | WHEN EXISTS(SELECT 1 5 | FROM table_name 6 | WHERE col1 = '999999999') 7 | THEN 'then' 8 | ELSE 'else' 9 | END as check_result 10 | FROM table_name 11 | WHERE col1 = '123456789' 12 | -------------------------------------------------------------------------------- /parser/testdata/query/select_order_by_with_fill_interpolate.sql: -------------------------------------------------------------------------------- 1 | SELECT n, source, inter FROM ( 2 | SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter 3 | FROM numbers(10) WHERE number % 3 = 1 4 | ) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5 5 | INTERPOLATE (inter AS inter + 1); 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_delete_with_cluster.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.events ON CLUSTER 'default_cluster' DELETE WHERE id = 123 AND status = 'deleted'; 3 | 4 | 5 | -- Format SQL: 6 | ALTER TABLE test.events ON CLUSTER 'default_cluster' DELETE WHERE id = 123 AND status = 'deleted'; 7 | -------------------------------------------------------------------------------- /parser/testdata/dml/insert_with_placeholder.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO t0(user_id, message, timestamp, metric) VALUES 2 | (?, ?, ?, ?), 3 | (?, ?, ?, ?), 4 | (?, ?, ?, ?), 5 | (?, ?, ?, ?) 6 | ; 7 | 8 | INSERT INTO test_with_typed_columns (id, created_at) 9 | VALUES ({id: Int32}, {created_at: DateTime64(6)}); -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_reset_multiple_settings.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE example_table RESET SETTING max_part_loading_threads, max_parts_in_total, another_setting; 3 | 4 | -- Format SQL: 5 | ALTER TABLE example_table RESET SETTING max_part_loading_threads, max_parts_in_total, another_setting; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_update_in_partition.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.users UPDATE status = 'inactive' IN PARTITION '2024-01-01' WHERE status = 'active'; 3 | 4 | 5 | -- Format SQL: 6 | ALTER TABLE test.users UPDATE status = 'inactive' IN PARTITION '2024-01-01' WHERE status = 'active'; 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_live_view_basic.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE LIVE VIEW my_live_view 3 | WITH TIMEOUT 10 TO my_destination(id String) 4 | AS SELECT id FROM my_table; 5 | 6 | 7 | -- Format SQL: 8 | CREATE LIVE VIEW my_live_view WITH TIMEOUT 10 TO my_destination (id String) AS SELECT id FROM my_table; 9 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/drop_database.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "DropPos": 0, 4 | "StatementEnd": 36, 5 | "Name": { 6 | "Name": "datbase_name", 7 | "QuoteType": 1, 8 | "NamePos": 24, 9 | "NameEnd": 36 10 | }, 11 | "IfExists": true, 12 | "OnCluster": null 13 | } 14 | ] -------------------------------------------------------------------------------- /parser/testdata/dml/format/insert_with_select.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | INSERT INTO test.visits_null 3 | SELECT 4 | CounterID, 5 | StartDate, 6 | Sign, 7 | UserID 8 | FROM test.visits; 9 | 10 | -- Format SQL: 11 | INSERT INTO test.visits_null SELECT CounterID, StartDate, Sign, UserID FROM test.visits; 12 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00050_min_max.sql: -------------------------------------------------------------------------------- 1 | SELECT CounterID, min(WatchID), max(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; 2 | SELECT CounterID, min(WatchID), max(WatchID) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 SETTINGS optimize_aggregation_in_order = 1 3 | -------------------------------------------------------------------------------- /parser/testdata/query/select_json_type.sql: -------------------------------------------------------------------------------- 1 | SELECT a, a.b, a.b.c.d.e; 2 | SELECT JSON_TYPE('{"a": 1, "b": {"c": 2}}', '$.b'); 3 | SELECT CAST(some, 'String') AS value; 4 | SELECT CAST(some.long, 'String') AS value; 5 | SELECT CAST(some.long.json, 'String') AS value; 6 | SELECT CAST(some.long.json.path, 'String') AS value; 7 | 8 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_materialize_projection.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE visits_order MATERIALIZE PROJECTION IF EXISTS user_name_projection IN PARTITION '20240403'; 3 | 4 | 5 | 6 | -- Format SQL: 7 | ALTER TABLE visits_order MATERIALIZE PROJECTION IF EXISTS user_name_projection IN PARTITION '20240403'; 8 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_view_basic.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE VIEW IF NOT EXISTS my_view(col1 String, col2 String) 3 | AS 4 | SELECT 5 | id, 6 | name 7 | FROM 8 | my_table; 9 | 10 | -- Format SQL: 11 | CREATE VIEW IF NOT EXISTS my_view (col1 String, col2 String) AS SELECT id, name FROM my_table; 12 | -------------------------------------------------------------------------------- /parser/testdata/dml/format/alter_table_with_modify_ttl.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE infra.flow_processed_emails_local ON CLUSTER default_cluster MODIFY TTL created_at + INTERVAL 3 YEAR; 3 | 4 | -- Format SQL: 5 | ALTER TABLE infra.flow_processed_emails_local ON CLUSTER default_cluster MODIFY TTL created_at + INTERVAL 3 YEAR; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_multi_except.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT number FROM numbers(1, 10) EXCEPT SELECT number FROM numbers(3, 6) EXCEPT SELECT number FROM numbers(8, 9) 3 | 4 | -- Format SQL: 5 | SELECT number FROM numbers(1, 10) EXCEPT SELECT number FROM numbers(3, 6) EXCEPT SELECT number FROM numbers(8, 9); 6 | -------------------------------------------------------------------------------- /parser/testdata/dml/format/alter_table_modify_query.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.some_mv ON CLUSTER cluster MODIFY QUERY SELECT field1, field2 FROM test.some_table WHERE count >= 3; 3 | 4 | -- Format SQL: 5 | ALTER TABLE test.some_mv ON CLUSTER cluster MODIFY QUERY SELECT field1, field2 FROM test.some_table WHERE count >= 3; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00147_global_in_aggregate_function.sql: -------------------------------------------------------------------------------- 1 | -- Tags: global 2 | 3 | SELECT sum(UserID GLOBAL IN (SELECT UserID FROM remote('127.0.0.{1,2}', test.hits))) FROM remote('127.0.0.{1,2}', test.hits); 4 | SELECT sum(UserID GLOBAL IN (SELECT UserID FROM test.hits)) FROM remote('127.0.0.{1,2}', test.hits); 5 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_tables.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 12, 5 | "ShowType": "TABLES", 6 | "Target": null, 7 | "NotLike": false, 8 | "LikeType": "", 9 | "LikePattern": null, 10 | "Limit": null, 11 | "OutFile": null, 12 | "Format": null 13 | } 14 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_databases.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 15, 5 | "ShowType": "DATABASES", 6 | "Target": null, 7 | "NotLike": false, 8 | "LikeType": "", 9 | "LikePattern": null, 10 | "Limit": null, 11 | "OutFile": null, 12 | "Format": null 13 | } 14 | ] -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00088_global_in_one_shard_and_rows_before_limit.sql: -------------------------------------------------------------------------------- 1 | -- Tags: shard 2 | 3 | SET output_format_write_statistics = 0; 4 | SELECT EventDate, count() FROM remote('127.0.0.1', test.hits) WHERE UserID GLOBAL IN (SELECT UserID FROM test.hits) GROUP BY EventDate ORDER BY EventDate LIMIT 5 FORMAT JSONCompact; 5 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_keyword_partition_by.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE test.events_local UUID 'dad17568-b070-49d0-9ad1-7568b07029d0' ( 2 | `date` Date, 3 | `f1` String, 4 | `f2` String, 5 | `f3` UInt64 6 | ) ENGINE = ReplacingMergeTree 7 | PARTITION BY date 8 | ORDER BY (f1, f2) 9 | SETTINGS index_granularity = 8192; -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_union_distinct.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT replica_name FROM system.ha_replicas UNION DISTINCT SELECT replica_name FROM system.ha_unique_replicas format JSON 3 | 4 | -- Format SQL: 5 | SELECT replica_name FROM system.ha_replicas UNION DISTINCT SELECT replica_name FROM system.ha_unique_replicas FORMAT JSON; 6 | -------------------------------------------------------------------------------- /parser/testdata/basic/set_statement.sql: -------------------------------------------------------------------------------- 1 | SET allow_suspicious_low_cardinality_types = true; 2 | 3 | SET max_block_size = 65536; 4 | 5 | SET output_format_json_quote_64bit_integers = 'true'; 6 | 7 | SET max_threads = 8, max_memory_usage = 10000000000, enable_optimize_predicate_expression = false; 8 | 9 | SET allow_experimental_analyzer = true; 10 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_mv_with_order_by.sql: -------------------------------------------------------------------------------- 1 | CREATE MATERIALIZED VIEW IF NOT EXISTS test_mv 2 | ENGINE = ReplacingMergeTree() 3 | PRIMARY KEY (id) 4 | ORDER BY (id) 5 | AS 6 | SELECT * FROM test_table; 7 | 8 | CREATE MATERIALIZED VIEW IF NOT EXISTS test_mv 9 | ENGINE = ReplacingMergeTree() 10 | PRIMARY KEY (id) 11 | AS 12 | SELECT * FROM test_table; -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00022_merge_prewhere.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS test.merge_hits; 2 | CREATE TABLE IF NOT EXISTS test.merge_hits AS test.hits ENGINE = Merge(test, '^hits$'); 3 | SELECT count() FROM test.merge_hits WHERE AdvEngineID = 2; 4 | SELECT count() FROM test.merge_hits PREWHERE AdvEngineID = 2; 5 | DROP TABLE test.merge_hits; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00047_bar.sql: -------------------------------------------------------------------------------- 1 | SELECT CounterID, count() AS c, bar(c, 0, 523264) FROM test.hits GROUP BY CounterID ORDER BY c DESC, CounterID ASC LIMIT 100; 2 | SELECT CounterID, count() AS c, bar(c, 0, 523264) FROM test.hits GROUP BY CounterID ORDER BY c DESC, CounterID ASC LIMIT 100 SETTINGS optimize_aggregation_in_order = 1 3 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00097_constexpr_in_index.sql: -------------------------------------------------------------------------------- 1 | -- Even in presense of OR, we evaluate the "0 IN (1, 2, 3)" as a constant expression therefore it does not prevent the index analysis. 2 | 3 | SELECT count() FROM test.hits WHERE CounterID IN (14917930, 33034174) OR 0 IN (1, 2, 3) SETTINGS max_rows_to_read = 1000000, force_primary_key = 1; 4 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_simple_with_is_null.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT f0,f1,f2,f3 as a0 3 | FROM test.events_local 4 | WHERE (f0 IN ('foo', 'bar', 'test')) AND (f1 = 'testing') AND f2 IS NULL 5 | 6 | -- Format SQL: 7 | SELECT f0, f1, f2, f3 AS a0 FROM test.events_local WHERE (f0 IN ('foo', 'bar', 'test')) AND (f1 = 'testing') AND f2 IS NULL; 8 | -------------------------------------------------------------------------------- /parser/testdata/query/select_simple.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | f0, coalesce(f1, f2) AS f3, row_number() 3 | OVER (PARTITION BY f0 ORDER BY f1 ASC) AS rn 4 | FROM test.events_local 5 | WHERE (f0 IN ('foo', 'bar', 'test')) AND (f1 = 'testing') AND (f2 NOT LIKE 'testing2') 6 | AND f3 NOT IN ('a', 'b', 'c') 7 | 8 | 9 | GROUP BY f0, f1 10 | 11 | Limit 100, 10 By f0; -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00162_mmap_compression_none.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS hits_none; 2 | CREATE TABLE hits_none (Title String CODEC(NONE)) ENGINE = MergeTree ORDER BY tuple(); 3 | INSERT INTO hits_none SELECT Title FROM test.hits; 4 | 5 | SET min_bytes_to_use_mmap_io = 1; 6 | SELECT sum(length(Title)) FROM hits_none; 7 | 8 | DROP TABLE hits_none; 9 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/create_database.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "CreatePos": 0, 4 | "StatementEnd": 35, 5 | "Name": { 6 | "Name": "test", 7 | "QuoteType": 3, 8 | "NamePos": 31, 9 | "NameEnd": 35 10 | }, 11 | "IfNotExists": true, 12 | "OnCluster": null, 13 | "Engine": null, 14 | "Comment": null 15 | } 16 | ] -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00067_union_all.sql: -------------------------------------------------------------------------------- 1 | SELECT * FROM 2 | ( 3 | SELECT UserID AS id, 1 AS event 4 | FROM remote('127.0.0.{1,2}', test, hits) 5 | ORDER BY id DESC 6 | LIMIT 10 7 | UNION ALL 8 | SELECT FUniqID AS id, 2 AS event 9 | FROM remote('127.0.0.{1,2}', test, hits) 10 | ORDER BY id DESC 11 | LIMIT 10 12 | ) 13 | ORDER BY id, event; 14 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_table_function_with_query.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 1, (SELECT 70) AS `power`, number 3 | FROM 4 | numbers( 5 | plus( 6 | ifNull((SELECT 1 AS bin_count, 1), 7 | 1) 8 | ) 9 | ) 10 | 11 | -- Format SQL: 12 | SELECT 1, (SELECT 70) AS `power`, number FROM numbers(plus(ifNull((SELECT 1 AS bin_count, 1), 1))); 13 | -------------------------------------------------------------------------------- /parser/testdata/basic/settings_statement.sql: -------------------------------------------------------------------------------- 1 | SETTINGS allow_suspicious_low_cardinality_types = true; 2 | 3 | SETTINGS max_block_size = 65536; 4 | 5 | SETTINGS output_format_json_quote_64bit_integers = 'true'; 6 | 7 | SETTINGS max_threads = 8, max_memory_usage = 10000000000, enable_optimize_predicate_expression = false; 8 | 9 | SETTINGS allow_experimental_analyzer = true; 10 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00049_max_string_if.sql: -------------------------------------------------------------------------------- 1 | SELECT CounterID, count(), maxIf(SearchPhrase, notEmpty(SearchPhrase)) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; 2 | SELECT CounterID, count(), maxIf(SearchPhrase, notEmpty(SearchPhrase)) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 SETTINGS optimize_aggregation_in_order = 1 3 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00176_distinct_limit_by_limit_bug_43377.sql: -------------------------------------------------------------------------------- 1 | SELECT count() 2 | FROM 3 | ( 4 | SELECT DISTINCT 5 | Title, 6 | SearchPhrase 7 | FROM test.hits 8 | WHERE (SearchPhrase != '') AND (NOT match(Title, '[а-яА-ЯёЁ]')) AND (NOT match(SearchPhrase, '[а-яА-ЯёЁ]')) 9 | LIMIT 1 BY Title 10 | LIMIT 10 11 | ); 12 | -------------------------------------------------------------------------------- /parser/testdata/query/create_window_view.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE VIEW asdf AS 2 | SELECT id, 3 | price * 1.5 AS computed_value, 4 | row_number() OVER ( 5 | PARTITION BY category 6 | ORDER BY created_at 7 | RANGE BETWEEN 3600 PRECEDING AND CURRENT ROW 8 | ) AS rn 9 | FROM source_table 10 | WHERE date >= '2023-01-01'; 11 | -------------------------------------------------------------------------------- /parser/testdata/query/format/set_simple.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SET max_threads = 1, max_insert_threads = 0, max_block_size = 8192, min_insert_block_size_rows = 8192, min_insert_block_size_bytes = 1048576; -- lower memory usage 3 | 4 | -- Format SQL: 5 | SET max_threads=1, max_insert_threads=0, max_block_size=8192, min_insert_block_size_rows=8192, min_insert_block_size_bytes=1048576; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_projection.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE events 2 | ( 3 | `event_time` DateTime, 4 | `event_id` UInt64, 5 | `user_id` UInt64, 6 | `huge_string` String, 7 | PROJECTION order_by_user_id 8 | ( 9 | SELECT 10 | _part_offset 11 | ORDER BY user_id 12 | ) 13 | ) 14 | ENGINE = MergeTree() 15 | ORDER BY (event_id); -------------------------------------------------------------------------------- /parser/testdata/ddl/output/desc_table_without_table_keyword.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "DescribePos": 0, 4 | "StatementEnd": 12, 5 | "DescribeType": "", 6 | "Target": { 7 | "Database": null, 8 | "Table": { 9 | "Name": "mytable", 10 | "QuoteType": 1, 11 | "NamePos": 5, 12 | "NameEnd": 12 13 | } 14 | } 15 | } 16 | ] -------------------------------------------------------------------------------- /parser/testdata/query/format/select_cast.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | select cast(1 as Float64) as value; 3 | select cast(1, 'Float64') as value; 4 | select (1 as Float64) as value; 5 | select 1::Float64 as value; 6 | 7 | -- Format SQL: 8 | SELECT CAST(1 AS Float64) AS value; 9 | SELECT CAST(1, 'Float64') AS value; 10 | SELECT (1 AS Float64) AS value; 11 | SELECT 1::Float64 AS value; 12 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/desc_table_with_table_keyword.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "DescribePos": 0, 4 | "StatementEnd": 18, 5 | "DescribeType": "TABLE", 6 | "Target": { 7 | "Database": null, 8 | "Table": { 9 | "Name": "mytable", 10 | "QuoteType": 1, 11 | "NamePos": 11, 12 | "NameEnd": 18 13 | } 14 | } 15 | } 16 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/describe_table_without_table_keyword.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "DescribePos": 0, 4 | "StatementEnd": 16, 5 | "DescribeType": "", 6 | "Target": { 7 | "Database": null, 8 | "Table": { 9 | "Name": "mytable", 10 | "QuoteType": 1, 11 | "NamePos": 9, 12 | "NameEnd": 16 13 | } 14 | } 15 | } 16 | ] -------------------------------------------------------------------------------- /parser/testdata/query/select_with_query_parameter.sql: -------------------------------------------------------------------------------- 1 | SET param_a = 13; 2 | SET param_b = 'str'; 3 | SET param_c = '2022-08-04 18:30:53'; 4 | SET param_d = {'10': [11, 12], '13': [14, 15]}; 5 | 6 | SELECT 7 | {a: UInt32}, 8 | {b: String}, 9 | {c: DateTime}, 10 | {d: Map(String, Array(UInt8))}; 11 | 12 | SELECT * FROM clickhouse WHERE tenant_id = {tenant_id: String}; 13 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_add_projection.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE visits_order 3 | ADD PROJECTION IF NOT EXISTS user_name_projection 4 | (SELECT * GROUP BY user_name ORDER BY user_name) AFTER a.user_id; 5 | 6 | 7 | -- Format SQL: 8 | ALTER TABLE visits_order ADD PROJECTION IF NOT EXISTS user_name_projection (SELECT * GROUP BY user_name ORDER BY user_name) AFTER a.user_id; 9 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/describe_table_with_table_keyword.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "DescribePos": 0, 4 | "StatementEnd": 22, 5 | "DescribeType": "TABLE", 6 | "Target": { 7 | "Database": null, 8 | "Table": { 9 | "Name": "mytable", 10 | "QuoteType": 1, 11 | "NamePos": 15, 12 | "NameEnd": 22 13 | } 14 | } 15 | } 16 | ] -------------------------------------------------------------------------------- /parser/testdata/query/access_tuple_with_dot.sql: -------------------------------------------------------------------------------- 1 | SELECT tuple('a','b','c').3, .1234; 2 | 3 | SELECT toTypeName( tuple('a' as first,'b' as second ,'c' as third)::Tuple(first String,second String,third String)), 4 | (tuple('a' as first,'b' as second ,'c' as third)::Tuple(first String,second String,third String)).second, 5 | tuple('a','b','c').3, 6 | tupleElement(tuple('a','b','c'),1) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | 17 | .vscode 18 | .idea 19 | .DS_Store 20 | 21 | # Binary 22 | main 23 | clickhouse-sql-parser -------------------------------------------------------------------------------- /parser/testdata/query/format/select_item_with_modifiers.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT c0 REPLACE(c0 AS c1) FROM t0; 3 | SELECT * REPLACE(i + 1 AS i) FROM t1; 4 | SELECT * REPLACE(i + 1 AS i) EXCEPT (j) APPLY(sum) from t2; 5 | 6 | 7 | -- Format SQL: 8 | SELECT c0 REPLACE(c0 AS c1) FROM t0; 9 | SELECT * REPLACE(i + 1 AS i) FROM t1; 10 | SELECT * REPLACE(i + 1 AS i) EXCEPT(j) APPLY(sum) FROM t2; 11 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_group_by.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | datacenter, 3 | distro, 4 | SUM (quantity) AS qty 5 | FROM 6 | servers 7 | GROUP BY 8 | GROUPING SETS( 9 | (datacenter,distro), 10 | (datacenter), 11 | (distro), 12 | () 13 | ); 14 | 15 | SELECT 16 | datacenter, 17 | distro, 18 | SUM (quantity) AS qty 19 | FROM 20 | servers 21 | GROUP BY ALL; -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/AfterShip/clickhouse-sql-parser 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/sebdah/goldie/v2 v2.5.3 7 | github.com/stretchr/testify v1.8.4 8 | ) 9 | 10 | require ( 11 | github.com/davecgh/go-spew v1.1.1 // indirect 12 | github.com/pmezard/go-difflib v1.0.0 // indirect 13 | github.com/sergi/go-diff v1.0.0 // indirect 14 | gopkg.in/yaml.v3 v3.0.1 // indirect 15 | ) 16 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_left_join.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | WITH 3 | t1 AS 4 | ( 5 | SELECT 1 AS value 6 | ), 7 | t2 AS 8 | ( 9 | SELECT 2 AS value 10 | ) 11 | SELECT * 12 | FROM t1 13 | LEFT JOIN t2 ON true 14 | 15 | -- Format SQL: 16 | WITH t1 AS (SELECT 1 AS value), t2 AS (SELECT 2 AS value) SELECT * FROM t1 LEFT JOIN t2 ON true; 17 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_simple_with_cte_with_column_aliases.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | WITH 3 | test(f1, f2, f3) AS (SELECT f4, f5, f6 FROM sales) 4 | SELECT 5 | f1 AS new_f1, 6 | f2 AS new_f2, 7 | f3 AS new_f3 8 | FROM 9 | test; 10 | 11 | 12 | -- Format SQL: 13 | WITH test(f1, f2, f3) AS (SELECT f4, f5, f6 FROM sales) SELECT f1 AS new_f1, f2 AS new_f2, f3 AS new_f3 FROM test; 14 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_simple_with_with_clause.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | WITH 3 | cte1 AS (SELECT f1 FROM t1), 4 | cte2 AS (SELECT f2 FROM t2) 5 | SELECT 6 | cte1.f1, 7 | cte2.f2, 8 | t3.f3 9 | FROM 10 | t3,cte1,cte2 11 | 12 | 13 | 14 | -- Format SQL: 15 | WITH cte1 AS (SELECT f1 FROM t1), cte2 AS (SELECT f2 FROM t2) SELECT cte1.f1, cte2.f2, t3.f3 FROM t3,cte1,cte2; 16 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_drop_detach_partition.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE app_utc_00.app_message_as_notification_organization_sent_stats_i_d_local DROP DETACHED PARTITION '2022-05-24' SETTINGS allow_drop_detached = 1; 3 | 4 | -- Format SQL: 5 | ALTER TABLE app_utc_00.app_message_as_notification_organization_sent_stats_i_d_local DROP DETACHED PARTITION '2022-05-24' SETTINGS allow_drop_detached=1; 6 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_json_typehints.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE t ( 3 | j JSON(message String, a.b UInt64, max_dynamic_paths=0, SKIP x, SKIP REGEXP 're') 4 | ) ENGINE = MergeTree 5 | ORDER BY tuple(); 6 | 7 | 8 | 9 | 10 | -- Format SQL: 11 | CREATE TABLE t (j JSON(max_dynamic_paths=0, message String, a.b UInt64, SKIP x, SKIP REGEXP 're')) ENGINE = MergeTree ORDER BY tuple(); 12 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_order_by_with_fill_staleness.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT number as key, 5 * number value, 'original' AS source 3 | FROM numbers(16) 4 | WHERE (number % 5) == 0 5 | ORDER BY key WITH FILL STALENESS 11; 6 | 7 | 8 | -- Format SQL: 9 | SELECT number AS key, 5 * number AS value, 'original' AS source FROM numbers(16) WHERE (number % 5) == 0 ORDER BY key WITH FILL STALENESS 11; 10 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_order_by_with_fill_basic.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT n, source FROM ( 3 | SELECT toFloat32(number % 10) AS n, 'original' AS source 4 | FROM numbers(10) WHERE number % 3 = 1 5 | ) ORDER BY n WITH FILL; 6 | 7 | 8 | -- Format SQL: 9 | SELECT n, source FROM (SELECT toFloat32(number % 10) AS n, 'original' AS source FROM numbers(10) WHERE number % 3 = 1) ORDER BY n WITH FILL; 10 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_multi_join.sql: -------------------------------------------------------------------------------- 1 | with t1 as ( 2 | select 'value1' as value 3 | ), t2 as ( 4 | select 'value2' as value 5 | ), t3 as ( 6 | select 'value3' as value 7 | ) 8 | select 9 | t1.value as value1, 10 | t2.value as value2, 11 | t3.value as value3 12 | from 13 | t1 14 | join t2 on true 15 | join t3 16 | join t4 on true 17 | join t5 18 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_materialized_view_with_refresh.sql: -------------------------------------------------------------------------------- 1 | CREATE MATERIALIZED VIEW fresh_mv 2 | REFRESH EVERY 1 HOUR OFFSET 10 MINUTE 3 | RANDOMIZE FOR 1 SECOND 4 | DEPENDS ON table_v5 5 | SETTINGS 6 | randomize_for = 1, 7 | randomize_offset = 10, 8 | randomize_period = 1 9 | APPEND TO target_table_name 10 | EMPTY 11 | AS SELECT 12 | `field_1`, 13 | `field_2`, 14 | `field_3`, 15 | FROM table_v5 -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00146_aggregate_function_uniq.sql: -------------------------------------------------------------------------------- 1 | SELECT RegionID, uniqHLL12(WatchID) AS X FROM remote('127.0.0.{1,2}', test, hits) GROUP BY RegionID HAVING X > 100000 ORDER BY RegionID ASC; 2 | SELECT RegionID, uniqCombined(WatchID) AS X FROM remote('127.0.0.{1,2}', test, hits) GROUP BY RegionID HAVING X > 100000 ORDER BY RegionID ASC; 3 | SELECT abs(uniq(WatchID) - uniqExact(WatchID)) FROM test.hits; 4 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_simple_with_is_not_null.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT f0,f1,f2,f3 as a0 3 | FROM test.events_local 4 | WHERE (f0 IN ('foo', 'bar', 'test')) 5 | AND (f1 = 'testing') 6 | AND f2 IS NULL 7 | AND f3 IS NOT NULL 8 | 9 | -- Format SQL: 10 | SELECT f0, f1, f2, f3 AS a0 FROM test.events_local WHERE (f0 IN ('foo', 'bar', 'test')) AND (f1 = 'testing') AND f2 IS NULL AND f3 IS NOT NULL; 11 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00023_totals_limit.sql: -------------------------------------------------------------------------------- 1 | SET output_format_write_statistics = 0; 2 | SELECT goals_alias.ID AS `ym:s:goalDimension`, uniqIf(UserID, (UserID != 0) AND (`_uniq_Goals` = 1)) FROM test.visits ARRAY JOIN Goals AS goals_alias, arrayEnumerateUniq(Goals.ID) AS `_uniq_Goals` WHERE (CounterID = 842440) GROUP BY `ym:s:goalDimension` WITH TOTALS ORDER BY `ym:s:goalDimension` LIMIT 0, 1 FORMAT JSONCompact; 3 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/alter_table_attach_partition.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test ATTACH PARTITION '20210114'; 3 | ALTER TABLE test ATTACH PARTITION '20210114' FROM test1; 4 | ALTER TABLE test ATTACH PARTITION ID '20210114'; 5 | 6 | 7 | 8 | -- Format SQL: 9 | ALTER TABLE test ATTACH PARTITION '20210114'; 10 | ALTER TABLE test ATTACH PARTITION '20210114' FROM test1; 11 | ALTER TABLE test ATTACH PARTITION '20210114'; 12 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_databases_format.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 27, 5 | "ShowType": "DATABASES", 6 | "Target": null, 7 | "NotLike": false, 8 | "LikeType": "", 9 | "LikePattern": null, 10 | "Limit": null, 11 | "OutFile": null, 12 | "Format": { 13 | "LiteralPos": 22, 14 | "LiteralEnd": 26, 15 | "Literal": "JSON" 16 | } 17 | } 18 | ] -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00056_view.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS test.view; 2 | CREATE VIEW test.view AS SELECT CounterID, count() AS c FROM test.hits GROUP BY CounterID; 3 | SELECT count() FROM test.view; 4 | SELECT c, count() FROM test.view GROUP BY c ORDER BY count() DESC LIMIT 10; 5 | SELECT * FROM test.view ORDER BY c DESC LIMIT 10; 6 | SELECT * FROM test.view SAMPLE 0.1 ORDER BY c DESC LIMIT 10; 7 | DROP TABLE test.view; 8 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_sample_by.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE default.test UUID '87887901-e33c-497e-8788-7901e33c997e' 2 | ( 3 | `f0` DateTime, 4 | `f1` UInt32, 5 | `f3` UInt32 6 | ) 7 | ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}/{shard}/default/test', '{replica}') 8 | PARTITION BY toYYYYMM(timestamp) 9 | ORDER BY (contractid, toDate(timestamp), userid) 10 | SAMPLE BY userid 11 | SETTINGS index_granularity = 8192; -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_databases_ilike.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 29, 5 | "ShowType": "DATABASES", 6 | "Target": null, 7 | "NotLike": false, 8 | "LikeType": "ILIKE", 9 | "LikePattern": { 10 | "LiteralPos": 22, 11 | "LiteralEnd": 27, 12 | "Literal": "Test%" 13 | }, 14 | "Limit": null, 15 | "OutFile": null, 16 | "Format": null 17 | } 18 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_databases_like.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 28, 5 | "ShowType": "DATABASES", 6 | "Target": null, 7 | "NotLike": false, 8 | "LikeType": "LIKE", 9 | "LikePattern": { 10 | "LiteralPos": 21, 11 | "LiteralEnd": 26, 12 | "Literal": "test%" 13 | }, 14 | "Limit": null, 15 | "OutFile": null, 16 | "Format": null 17 | } 18 | ] -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00093_prewhere_array_join.sql: -------------------------------------------------------------------------------- 1 | SELECT arrayJoin([SearchEngineID]) AS search_engine, URL FROM test.hits WHERE SearchEngineID != 0 AND search_engine != 0 FORMAT Null; 2 | 3 | SELECT 4 | arrayJoin([0]) AS browser, 5 | arrayJoin([SearchEngineID]) AS search_engine, 6 | URL 7 | FROM test.hits 8 | WHERE 1 AND (SearchEngineID != 0) AND (browser != 0) AND (search_engine != 0) 9 | FORMAT Null; 10 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_databases_not_ilike.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 33, 5 | "ShowType": "DATABASES", 6 | "Target": null, 7 | "NotLike": true, 8 | "LikeType": "ILIKE", 9 | "LikePattern": { 10 | "LiteralPos": 26, 11 | "LiteralEnd": 31, 12 | "Literal": "Temp%" 13 | }, 14 | "Limit": null, 15 | "OutFile": null, 16 | "Format": null 17 | } 18 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_databases_not_like.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 32, 5 | "ShowType": "DATABASES", 6 | "Target": null, 7 | "NotLike": true, 8 | "LikeType": "LIKE", 9 | "LikePattern": { 10 | "LiteralPos": 25, 11 | "LiteralEnd": 30, 12 | "Literal": "temp%" 13 | }, 14 | "Limit": null, 15 | "OutFile": null, 16 | "Format": null 17 | } 18 | ] -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00043_any_left_join.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | EventDate, 3 | count() AS hits, 4 | any(visits) 5 | FROM test.hits ANY LEFT JOIN 6 | ( 7 | SELECT 8 | StartDate AS EventDate, 9 | sum(Sign) AS visits 10 | FROM test.visits 11 | GROUP BY EventDate 12 | ) USING EventDate 13 | GROUP BY EventDate 14 | ORDER BY hits DESC 15 | LIMIT 10 16 | SETTINGS joined_subquery_requires_alias = 0; 17 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_as_remote_function.sql: -------------------------------------------------------------------------------- 1 | -- CREATE TABLE with columns AS table function (remoteSecure) 2 | CREATE TABLE test_remote 3 | ( 4 | id UInt64, 5 | name String, 6 | value Int32 7 | ) 8 | AS remoteSecure('host.example.com', 'source_db', 'source_table', 'user', 'password'); 9 | 10 | -- Simpler test case with remote() 11 | CREATE TABLE test_table (id UInt64, name String) AS remote('localhost', 'db', 'source_table'); 12 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_databases_format_string.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 37, 5 | "ShowType": "DATABASES", 6 | "Target": null, 7 | "NotLike": false, 8 | "LikeType": "", 9 | "LikePattern": null, 10 | "Limit": null, 11 | "OutFile": null, 12 | "Format": { 13 | "LiteralPos": 23, 14 | "LiteralEnd": 35, 15 | "Literal": "TabSeparated" 16 | } 17 | } 18 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_databases_limit.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 24, 5 | "ShowType": "DATABASES", 6 | "Target": null, 7 | "NotLike": false, 8 | "LikeType": "", 9 | "LikePattern": null, 10 | "Limit": { 11 | "NumPos": 21, 12 | "NumEnd": 23, 13 | "Literal": "10", 14 | "Base": 10 15 | }, 16 | "OutFile": null, 17 | "Format": null 18 | } 19 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_databases_outfile.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 49, 5 | "ShowType": "DATABASES", 6 | "Target": null, 7 | "NotLike": false, 8 | "LikeType": "", 9 | "LikePattern": null, 10 | "Limit": null, 11 | "OutFile": { 12 | "LiteralPos": 29, 13 | "LiteralEnd": 47, 14 | "Literal": "/tmp/databases.txt" 15 | }, 16 | "Format": null 17 | } 18 | ] -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00051_min_max_array.sql: -------------------------------------------------------------------------------- 1 | SELECT CounterID, count(), max(GoalsReached), min(GoalsReached), minIf(GoalsReached, notEmpty(GoalsReached)) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20; 2 | SELECT CounterID, count(), max(GoalsReached), min(GoalsReached), minIf(GoalsReached, notEmpty(GoalsReached)) FROM test.hits GROUP BY CounterID ORDER BY count() DESC LIMIT 20 SETTINGS optimize_aggregation_in_order = 1 3 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00095_hyperscan_profiler.sql: -------------------------------------------------------------------------------- 1 | -- Tags: no-debug, use-vectorscan 2 | 3 | -- Check that server does not get segfault due to bad stack unwinding from Hyperscan 4 | 5 | SET query_profiler_cpu_time_period_ns = 1000000; 6 | SET query_profiler_real_time_period_ns = 1000000; 7 | 8 | SELECT count() FROM test.hits WHERE multiFuzzyMatchAny(URL, 2, ['about/address', 'for_woman', '^https?://lm-company.ruy/$', 'ultimateguitar.com']); 9 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_order_by_with_fill_step.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT date, value FROM ( 3 | SELECT toDate('2020-01-01') + INTERVAL number DAY AS date, number AS value 4 | FROM numbers(5) 5 | ) ORDER BY date WITH FILL STEP INTERVAL 1 DAY; 6 | 7 | 8 | -- Format SQL: 9 | SELECT date, value FROM (SELECT toDate('2020-01-01') + INTERVAL number DAY AS date, number AS value FROM numbers(5)) ORDER BY date WITH FILL STEP INTERVAL 1 DAY; 10 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_with_null_engine.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE logs.t0 on cluster default 3 | ( 4 | `trace_id` String CODEC(ZSTD(1)), 5 | INDEX trace_id_bloom_idx trace_id TYPE bloom_filter(0.01) GRANULARITY 64 6 | ) ENGINE = Null(); 7 | 8 | -- Format SQL: 9 | CREATE TABLE logs.t0 ON CLUSTER default (`trace_id` String CODEC(ZSTD(1)), INDEX trace_id_bloom_idx trace_id TYPE bloom_filter(0.01) GRANULARITY 64) ENGINE = Null(); 10 | -------------------------------------------------------------------------------- /parser/testdata/dml/format/create_column_with_ttl.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE example1 ( 3 | timestamp DateTime, 4 | x UInt32 TTL timestamp + INTERVAL 1 MONTH, 5 | y UInt32 TTL timestamp + INTERVAL 1 WEEK 6 | ) 7 | ENGINE = MergeTree 8 | ORDER BY tuple() 9 | 10 | -- Format SQL: 11 | CREATE TABLE example1 (timestamp DateTime, x UInt32 TTL timestamp + INTERVAL 1 MONTH, y UInt32 TTL timestamp + INTERVAL 1 WEEK) ENGINE = MergeTree ORDER BY tuple(); 12 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_order_by_with_fill_from_to.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT n, source FROM ( 3 | SELECT toFloat32(number % 10) AS n, 'original' AS source 4 | FROM numbers(10) WHERE number % 3 = 1 5 | ) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5; 6 | 7 | 8 | -- Format SQL: 9 | SELECT n, source FROM (SELECT toFloat32(number % 10) AS n, 'original' AS source FROM numbers(10) WHERE number % 3 = 1) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5; 10 | -------------------------------------------------------------------------------- /parser/testdata/dml/insert_values.sql: -------------------------------------------------------------------------------- 1 | INSERT INTO helloworld.my_first_table (user_id, message, timestamp, metric) VALUES 2 | (101, 'Hello, ClickHouse!', now(), -1.0 ), 3 | (102, 'Insert a lot of rows per batch', yesterday(), 1.41421 ), 4 | (102, 'Sort your data based on your commonly-used queries', today(), 2.718 ), 5 | (101, 'Granules are the smallest chunks of data read', now() + 5, 3.14159 ) -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00062_loyalty.sql: -------------------------------------------------------------------------------- 1 | SELECT loyalty, count() AS c, bar(log(c + 1) * 1000, 0, log(6000) * 1000, 80) FROM (SELECT UserID, toInt8((yandex > google ? yandex / (yandex + google) : -google / (yandex + google)) * 10) AS loyalty FROM (SELECT UserID, sum(SearchEngineID = 2) AS yandex, sum(SearchEngineID = 3) AS google FROM test.hits WHERE SearchEngineID = 2 OR SearchEngineID = 3 GROUP BY UserID HAVING yandex + google > 10)) GROUP BY loyalty ORDER BY loyalty -------------------------------------------------------------------------------- /parser/testdata/ddl/attach_table_basic.sql: -------------------------------------------------------------------------------- 1 | ATTACH TABLE IF NOT EXISTS test.events_local ON CLUSTER 'default_cluster' ( 2 | f0 String, 3 | f1 String, 4 | f2 String, 5 | f3 Datetime, 6 | f4 Datetime, 7 | f5 Map(String,String), 8 | f6 String, 9 | f7 Datetime DEFAULT now() 10 | ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/test/events_local', '{replica}') 11 | TTL f3 + INTERVAL 6 MONTH 12 | PARTITION BY toYYYYMMDD(f3) 13 | ORDER BY (f0,f1,f2); -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00075_left_array_join.sql: -------------------------------------------------------------------------------- 1 | SELECT UserID, EventTime::DateTime('Asia/Dubai'), pp.Key1, pp.Key2, ParsedParams.Key1 FROM test.hits ARRAY JOIN ParsedParams AS pp WHERE CounterID = 1704509 ORDER BY UserID, EventTime, pp.Key1, pp.Key2 LIMIT 100; 2 | SELECT UserID, EventTime::DateTime('Asia/Dubai'), pp.Key1, pp.Key2, ParsedParams.Key1 FROM test.hits LEFT ARRAY JOIN ParsedParams AS pp WHERE CounterID = 1704509 ORDER BY UserID, EventTime, pp.Key1, pp.Key2 LIMIT 100; 3 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_order_by_with_fill_interpolate_no_columns.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT n, value FROM ( 3 | SELECT toFloat32(number % 10) AS n, number AS value 4 | FROM numbers(10) WHERE number % 3 = 1 5 | ) ORDER BY n WITH FILL FROM 0 TO 10 STEP 1 6 | INTERPOLATE; 7 | 8 | 9 | -- Format SQL: 10 | SELECT n, value FROM (SELECT toFloat32(number % 10) AS n, number AS value FROM numbers(10) WHERE number % 3 = 1) ORDER BY n WITH FILL FROM 0 TO 10 STEP 1 INTERPOLATE; 11 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_materialized_view_with_definer.sql: -------------------------------------------------------------------------------- 1 | CREATE MATERIALIZED VIEW fresh_mv 2 | REFRESH EVERY 1 HOUR OFFSET 10 MINUTE APPEND TO events_export 3 | ( 4 | `timestamp` DateTime64(9), 5 | `field_1` String, 6 | `field_2` String, 7 | ) 8 | DEFINER = default SQL SECURITY DEFINER 9 | AS (SELECT 10 | timestamp, 11 | field_1, 12 | field_2, 13 | FROM event_table 14 | WHERE toStartOfHour(timestamp) = toStartOfHour(now() - toIntervalHour(1))) 15 | COMMENT 'Test comment' 16 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_on_clsuter.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS test.events_local ON CLUSTER 'default_cluster' ( 2 | f0 String, 3 | f1 String, 4 | f2 String, 5 | f3 Datetime, 6 | f4 Datetime, 7 | f5 Map(String,String), 8 | f6 String, 9 | f7 Datetime DEFAULT now() 10 | ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/test/events_local', '{replica}') 11 | TTL f3 + INTERVAL 6 MONTH 12 | PARTITION BY toYYYYMMDD(f3) 13 | ORDER BY (f0,f1,f2); -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_keyword_in_group_by.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 3 | toStartOfInterval(timestamp, toIntervalMinute(1)) AS interval, 4 | column_name 5 | FROM table 6 | WHERE true 7 | GROUP BY (interval, column_name) 8 | ORDER BY (interval AS i, column_name) ASC 9 | 10 | -- Format SQL: 11 | SELECT toStartOfInterval(timestamp, toIntervalMinute(1)) AS interval, column_name FROM table WHERE true GROUP BY (interval, column_name) ORDER BY (interval AS i, column_name) ASC; 12 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_uuid.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS test.events_local UUID '1234' ON CLUSTER 'default_cluster' ( 2 | f0 String, 3 | f1 String, 4 | f2 String, 5 | f3 Datetime, 6 | f4 Datetime, 7 | f5 Map(String,String), 8 | f6 String, 9 | f7 Datetime DEFAULT now() 10 | ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/test/events_local', '{replica}') 11 | TTL f3 + INTERVAL 6 MONTH 12 | PARTITION BY toYYYYMMDD(f3) 13 | ORDER BY (f0,f1,f2); -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00139_like.sql: -------------------------------------------------------------------------------- 1 | /* Note that queries are written as the user doesn't really understand that the symbol _ has special meaning in LIKE pattern. */ 2 | SELECT count() FROM test.hits WHERE URL LIKE '%/avtomobili_s_probegom/_%__%__%__%'; 3 | SELECT count() FROM test.hits WHERE URL LIKE '/avtomobili_s_probegom/_%__%__%__%'; 4 | SELECT count() FROM test.hits WHERE URL LIKE '%_/avtomobili_s_probegom/_%__%__%__%'; 5 | SELECT count() FROM test.hits WHERE URL LIKE '%avtomobili%'; 6 | -------------------------------------------------------------------------------- /parser/helper.go: -------------------------------------------------------------------------------- 1 | package parser 2 | 3 | func IsDigit(c byte) bool { 4 | return '0' <= c && c <= '9' 5 | } 6 | 7 | func IsHexDigit(c byte) bool { 8 | return '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' 9 | } 10 | 11 | func IsIdentStart(c byte) bool { 12 | return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' 13 | } 14 | 15 | func IsIdentPart(c byte) bool { 16 | return '0' <= c && c <= '9' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || c == '_' || c == '$' 17 | } 18 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_distributed_table.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | create table test.event_all 3 | ON CLUSTER 'default_cluster' 4 | AS test.evnets_local 5 | ENGINE = Distributed( 6 | default_cluster, 7 | test, 8 | events_local, 9 | rand() 10 | ) SETTINGS fsync_after_insert=0; 11 | 12 | 13 | -- Format SQL: 14 | CREATE TABLE test.event_all ON CLUSTER 'default_cluster' AS test.evnets_local ENGINE = Distributed(default_cluster, test, events_local, rand()) SETTINGS fsync_after_insert=0; 15 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_materialized_view_with_gcs.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE MATERIALIZED VIEW database_name.view_name 3 | REFRESH EVERY 5 MINUTE TO database_name.table_name AS 4 | SELECT * FROM gcs(gcs_creds,url='https://storage.googleapis.com/some-bucket/some-path/'); 5 | 6 | -- Format SQL: 7 | CREATE MATERIALIZED VIEW database_name.view_name REFRESH EVERY 5 MINUTE TO database_name.table_name AS SELECT * FROM gcs(gcs_creds, url='https://storage.googleapis.com/some-bucket/some-path/'); 8 | -------------------------------------------------------------------------------- /parser/testdata/dml/format/alter_table_with_comment.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD COLUMN a.f1 String default '' comment 'test' ; 3 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD COLUMN hello String default ''; 4 | 5 | 6 | -- Format SQL: 7 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD COLUMN a.f1 String DEFAULT '' COMMENT 'test'; 8 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD COLUMN hello String DEFAULT ''; 9 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_view_on_cluster_with_uuid.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE VIEW IF NOT EXISTS cluster_name.my_view 3 | UUID '3493e374-e2bb-481b-b493-e374e2bb981b' 4 | ON CLUSTER 'my_cluster' 5 | AS ( 6 | SELECT 7 | column1, 8 | column2 9 | FROM 10 | my_other_table 11 | ); 12 | 13 | -- Format SQL: 14 | CREATE VIEW IF NOT EXISTS cluster_name.my_view UUID '3493e374-e2bb-481b-b493-e374e2bb981b' ON CLUSTER 'my_cluster' AS (SELECT column1, column2 FROM my_other_table); 15 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00084_external_aggregation.sql: -------------------------------------------------------------------------------- 1 | SET max_bytes_before_external_group_by = 200000000; 2 | 3 | SET max_memory_usage = 1500000000; 4 | SET max_threads = 12; 5 | SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u DESC, URL LIMIT 10; 6 | 7 | SET max_memory_usage = 300000000; 8 | SET max_threads = 2; 9 | SET aggregation_memory_efficient_merge_threads = 1; 10 | SELECT URL, uniq(SearchPhrase) AS u FROM test.hits GROUP BY URL ORDER BY u DESC, URL LIMIT 10; 11 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_nullable.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE test.`.inner.752391fb-44cc-4dd5-b523-91fb44cc9dd5` 2 | UUID '27673372-7973-44f5-a767-33727973c4f5' ( 3 | `f0` String, 4 | `f1` String, 5 | `f2` LowCardinality(String), 6 | `f3` LowCardinality(String), 7 | `f4` DateTime64(3), 8 | `f5` Nullable(DateTime64(3)), 9 | `succeed_at` Nullable(DateTime64(3)) 10 | ) ENGINE = MergeTree 11 | PARTITION BY xxHash32(tag_id) % 20 12 | ORDER BY label_id 13 | SETTINGS index_granularity = 8192; 14 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/drop_role.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | DROP ROLE IF EXISTS r1_01293, r2_01293, r3_01293, r4_01293, r5_01293, r6_01293, r7_01293, r8_01293, r9_01293; 3 | DROP ROLE IF EXISTS r2_01293_renamed; 4 | DROP ROLE IF EXISTS r1_01293@'%', 'r2_01293@%.myhost.com'; 5 | 6 | 7 | -- Format SQL: 8 | DROP ROLE IF EXISTS r1_01293, r2_01293, r3_01293, r4_01293, r5_01293, r6_01293, r7_01293, r8_01293, r9_01293; 9 | DROP ROLE IF EXISTS r2_01293_renamed; 10 | DROP ROLE IF EXISTS r1_01293@'%', 'r2_01293@%.myhost.com'; 11 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_case_when_exists.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 3 | *, 4 | CASE 5 | WHEN EXISTS(SELECT 1 6 | FROM table_name 7 | WHERE col1 = '999999999') 8 | THEN 'then' 9 | ELSE 'else' 10 | END as check_result 11 | FROM table_name 12 | WHERE col1 = '123456789' 13 | 14 | 15 | -- Format SQL: 16 | SELECT *, CASE WHEN EXISTS(SELECT 1 FROM table_name WHERE col1 = '999999999') THEN 'then' ELSE 'else' END AS check_result FROM table_name WHERE col1 = '123456789'; 17 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_enum_fields.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE t0 on cluster default_cluster 2 | ( 3 | `method` Enum8('GET'=1 , 'POST'=2, 'HEAD'=3, 'PUT'=4,'PATCH'=5, 'DELETE'=6, 'CONNECT'=7, 'OPTIONS'=8, 'TRACE'=9) CODEC(ZSTD(1)), 4 | `timestamp` DateTime64(3) CODEC(DoubleDelta, ZSTD) 5 | ) 6 | ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}', '{replica}') 7 | PARTITION BY toDate(timestamp) 8 | ORDER BY (method,timestamp) 9 | TTL toDate(timestamp) + toIntervalDay(3) 10 | SETTINGS index_granularity = 8192; -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_create_table.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 26, 5 | "ShowType": "CREATE TABLE", 6 | "Target": { 7 | "Database": null, 8 | "Table": { 9 | "Name": "mytable", 10 | "QuoteType": 1, 11 | "NamePos": 18, 12 | "NameEnd": 25 13 | } 14 | }, 15 | "NotLike": false, 16 | "LikeType": "", 17 | "LikePattern": null, 18 | "Limit": null, 19 | "OutFile": null, 20 | "Format": null 21 | } 22 | ] -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00042_any_left_join.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | EventDate, 3 | hits, 4 | visits 5 | FROM 6 | ( 7 | SELECT 8 | EventDate, 9 | count() AS hits 10 | FROM test.hits 11 | GROUP BY EventDate 12 | ) ANY LEFT JOIN 13 | ( 14 | SELECT 15 | StartDate AS EventDate, 16 | sum(Sign) AS visits 17 | FROM test.visits 18 | GROUP BY EventDate 19 | ) USING EventDate 20 | ORDER BY hits DESC 21 | LIMIT 10 22 | SETTINGS joined_subquery_requires_alias = 0; 23 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00149_quantiles_timing_distributed.sql: -------------------------------------------------------------------------------- 1 | -- Tags: distributed 2 | 3 | SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID); 4 | SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1; 5 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00087_where_0.sql: -------------------------------------------------------------------------------- 1 | SET max_rows_to_read = 1000; 2 | SELECT CounterID, uniq(UserID) FROM test.hits WHERE 0 != 0 GROUP BY CounterID; 3 | SELECT CounterID, uniq(UserID) FROM test.hits WHERE 0 != 0 GROUP BY CounterID SETTINGS optimize_aggregation_in_order = 1; 4 | SELECT CounterID, uniq(UserID) FROM test.hits WHERE 0 AND CounterID = 1704509 GROUP BY CounterID; 5 | SELECT CounterID, uniq(UserID) FROM test.hits WHERE 0 AND CounterID = 1704509 GROUP BY CounterID SETTINGS optimize_aggregation_in_order = 1; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00094_order_by_array_join_limit.sql: -------------------------------------------------------------------------------- 1 | SELECT `ParsedParams.Key2` AS x 2 | FROM test.hits 3 | ARRAY JOIN ParsedParams AS PP 4 | ORDER BY x ASC 5 | LIMIT 2; 6 | 7 | SELECT arrayJoin(`ParsedParams.Key2`) AS x FROM test.hits ORDER BY x ASC LIMIT 2; 8 | WITH arrayJoin(`ParsedParams.Key2`) AS pp SELECT ParsedParams.Key2 AS x FROM test.hits ORDER BY x ASC LIMIT 2; 9 | WITH arrayJoin(`ParsedParams.Key2`) AS pp SELECT ParsedParams.Key2 AS x FROM test.hits WHERE NOT ignore(pp) ORDER BY x ASC LIMIT 2; 10 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00044_any_left_join_string.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | domain, 3 | hits, 4 | visits 5 | FROM 6 | ( 7 | SELECT 8 | domain(URL) AS domain, 9 | count() AS hits 10 | FROM test.hits 11 | GROUP BY domain 12 | ) ANY LEFT JOIN 13 | ( 14 | SELECT 15 | domain(StartURL) AS domain, 16 | sum(Sign) AS visits 17 | FROM test.visits 18 | GROUP BY domain 19 | ) USING domain 20 | ORDER BY hits DESC 21 | LIMIT 10 22 | SETTINGS joined_subquery_requires_alias = 0; 23 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/truncate_table_basic.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "TruncatePos": 0, 4 | "StatementEnd": 40, 5 | "IsTemporary": false, 6 | "IfExists": true, 7 | "Name": { 8 | "Database": { 9 | "Name": "test", 10 | "QuoteType": 1, 11 | "NamePos": 25, 12 | "NameEnd": 29 13 | }, 14 | "Table": { 15 | "Name": "table_name", 16 | "QuoteType": 1, 17 | "NamePos": 30, 18 | "NameEnd": 40 19 | } 20 | }, 21 | "OnCluster": null 22 | } 23 | ] -------------------------------------------------------------------------------- /parser/testdata/query/format/query_with_expr_compare.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT date, path, splitByChar('/', path)[2] AS path_b 3 | FROM( 4 | SELECT 'pathA/pathB/pathC' AS path, '2024-09-10' AS date 5 | ) 6 | WHERE toDate(date) BETWEEN '2024-09-01' AND '2024-09-30' 7 | AND splitByChar('/', path)[1] = 'pathA' 8 | 9 | -- Format SQL: 10 | SELECT date, path, splitByChar('/', path)[2] AS path_b FROM (SELECT 'pathA/pathB/pathC' AS path, '2024-09-10' AS date) WHERE toDate(date) BETWEEN '2024-09-01' AND '2024-09-30' AND splitByChar('/', path)[1] = 'pathA'; 11 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/systems.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "SystemPos": 0, 4 | "Expr": { 5 | "FlushPos": 7, 6 | "StatementEnd": 17, 7 | "Logs": true, 8 | "Distributed": null 9 | } 10 | }, 11 | { 12 | "SystemPos": 19, 13 | "Expr": { 14 | "DropPos": 26, 15 | "StatementEnd": 49, 16 | "Type": "UNCOMPRESSED CACHE" 17 | } 18 | }, 19 | { 20 | "SystemPos": 51, 21 | "Expr": { 22 | "DropPos": 58, 23 | "StatementEnd": 79, 24 | "Type": "FILESYSTEM CACHE" 25 | } 26 | } 27 | ] -------------------------------------------------------------------------------- /parser/testdata/dml/format/insert_with_format.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | INSERT INTO `_test_1345# $.ДБ`.`2. Таблица №2`; 3 | INSERT INTO "db"."table_name" (col1, col2) VALUES (1, 2); 4 | INSERT INTO `_test_1345# $.ДБ`.`2. Таблица №2` (col1, col2); 5 | INSERT INTO table_name (col1, col2) VALUES (1, 2) FORMAT Native; 6 | 7 | -- Format SQL: 8 | INSERT INTO `_test_1345# $.ДБ`.`2. Таблица №2`; 9 | INSERT INTO "db"."table_name" (col1, col2) VALUES (1, 2); 10 | INSERT INTO `_test_1345# $.ДБ`.`2. Таблица №2` (col1, col2); 11 | INSERT INTO table_name (col1, col2) VALUES (1, 2); 12 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00078_group_by_arrays.sql: -------------------------------------------------------------------------------- 1 | SELECT GoalsReached AS k, count() AS c FROM test.hits GROUP BY k ORDER BY c DESC LIMIT 10; 2 | SELECT GeneralInterests AS k1, GoalsReached AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC LIMIT 10; 3 | SELECT ParsedParams.Key1 AS k1, GeneralInterests AS k2, count() AS c FROM test.hits GROUP BY k1, k2 ORDER BY c DESC LIMIT 10; 4 | SELECT ParsedParams.Key1 AS k1, GeneralInterests AS k2, count() AS c FROM test.hits WHERE notEmpty(k1) AND notEmpty(k2) GROUP BY k1, k2 ORDER BY c DESC LIMIT 10; 5 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_dictionary_basic.sql: -------------------------------------------------------------------------------- 1 | CREATE DICTIONARY test.my_dict ( 2 | id UInt64, 3 | name String DEFAULT '', 4 | value Float64 EXPRESSION toFloat64OrZero(name), 5 | parent_id UInt64 HIERARCHICAL, 6 | is_active UInt8 INJECTIVE, 7 | object_id UInt64 IS_OBJECT_ID 8 | ) 9 | PRIMARY KEY id 10 | SOURCE(MYSQL( 11 | host 'localhost' 12 | port 3306 13 | user 'default' 14 | password '' 15 | db 'test' 16 | table 'dict_table' 17 | )) 18 | LIFETIME(MIN 1000 MAX 2000) 19 | LAYOUT(HASHED()) 20 | SETTINGS(max_block_size = 8192); -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00010_quantiles_segfault.sql: -------------------------------------------------------------------------------- 1 | SELECT URL AS `ym:ah:URL`, sum((NOT DontCountHits AND NOT Refresh)), quantilesTimingIf(0.1, 0.5, 0.9)((DOMCompleteTiming + LoadEventEndTiming), DOMCompleteTiming != -1 AND LoadEventEndTiming != -1) as t FROM remote('127.0.0.{1,2}', test, hits) WHERE (CounterID = 800784) AND (((DontCountHits = 0) OR (IsNotBounce = 1)) AND (URL != '')) GROUP BY `ym:ah:URL` WITH TOTALS HAVING (sum((NOT DontCountHits AND NOT Refresh)) > 0) AND (count() > 0) ORDER BY sum((NOT DontCountHits AND NOT Refresh)) DESC, URL LIMIT 0, 1 2 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00154_avro.sql: -------------------------------------------------------------------------------- 1 | -- Tags: no-fasttest 2 | 3 | DROP TABLE IF EXISTS test.avro; 4 | 5 | SET max_threads = 1, max_insert_threads = 0, max_block_size = 8192, min_insert_block_size_rows = 8192, min_insert_block_size_bytes = 1048576; -- lower memory usage 6 | 7 | CREATE TABLE test.avro AS test.hits ENGINE = File(Avro); 8 | INSERT INTO test.avro SELECT * FROM test.hits LIMIT 10000; 9 | 10 | SELECT sum(cityHash64(*)) FROM (SELECT * FROM test.hits LIMIT 10000); 11 | SELECT sum(cityHash64(*)) FROM test.avro; 12 | 13 | DROP TABLE test.avro; 14 | -------------------------------------------------------------------------------- /parser/testdata/ddl/optimize.sql: -------------------------------------------------------------------------------- 1 | OPTIMIZE TABLE table DEDUPLICATE; -- all columns 2 | OPTIMIZE TABLE table DEDUPLICATE BY *; -- excludes MATERIALIZED and ALIAS columns 3 | OPTIMIZE TABLE table DEDUPLICATE BY colX,colY,colZ; 4 | OPTIMIZE TABLE table DEDUPLICATE BY * EXCEPT colX; 5 | OPTIMIZE TABLE table DEDUPLICATE BY * EXCEPT (colX, colY); 6 | OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex'); 7 | OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT colX; 8 | OPTIMIZE TABLE table DEDUPLICATE BY COLUMNS('column-matched-by-regex') EXCEPT (colX, colY); -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00079_array_join_not_used_joined_column.sql: -------------------------------------------------------------------------------- 1 | SELECT PP.Key1 AS `ym:s:paramsLevel1`, sum(arrayAll(`x_1` -> `x_1`= '', ParsedParams.Key2)) AS `ym:s:visits` FROM test.hits ARRAY JOIN ParsedParams AS `PP` WHERE CounterID = 1704509 GROUP BY `ym:s:paramsLevel1` ORDER BY PP.Key1, `ym:s:visits` LIMIT 0, 100; 2 | SELECT PP.Key1 AS x1, ParsedParams.Key2 AS x2 FROM test.hits ARRAY JOIN ParsedParams AS PP WHERE CounterID = 1704509 ORDER BY x1, x2 LIMIT 10; 3 | SELECT ParsedParams.Key2 AS x FROM test.hits ARRAY JOIN ParsedParams AS PP ORDER BY x DESC LIMIT 10; 4 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_materialized_view_with_empty_table_schema.sql: -------------------------------------------------------------------------------- 1 | CREATE MATERIALIZED VIEW test.t0 on cluster default_cluster 2 | ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/{layer}-{shard}/test/t0', '{replica}') 3 | PARTITION BY toYYYYMM(f0) 4 | ORDER BY (f0) 5 | POPULATE AS 6 | select f0,f1,f2,coalesce(f0,f1) as f333 7 | from 8 | (select 9 | f0,f1,f2, 10 | ROW_NUMBER() over(partition by f0 order by coalesce(f1,f2)) as rn 11 | from test.t 12 | where f3 in ('foo', 'bar', 'test') 13 | and env ='test' 14 | ) as tmp 15 | where rn = 1; -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00171_grouping_aggregated_transform_bug.sql: -------------------------------------------------------------------------------- 1 | -- Tags: distributed 2 | 3 | SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS max_block_size = 63169; 4 | SELECT sum(cityHash64(*)) FROM (SELECT CounterID, quantileTiming(0.5)(SendTiming), count() FROM remote('127.0.0.{1,2,3,4,5,6,7,8,9,10}', test.hits) WHERE SendTiming != -1 GROUP BY CounterID) SETTINGS optimize_aggregation_in_order = 1, max_block_size = 63169; 5 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_order_by_with_fill_interpolate.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT n, source, inter FROM ( 3 | SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter 4 | FROM numbers(10) WHERE number % 3 = 1 5 | ) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5 6 | INTERPOLATE (inter AS inter + 1); 7 | 8 | 9 | -- Format SQL: 10 | SELECT n, source, inter FROM (SELECT toFloat32(number % 10) AS n, 'original' AS source, number AS inter FROM numbers(10) WHERE number % 3 = 1) ORDER BY n WITH FILL FROM 0 TO 5.51 STEP 0.5 INTERPOLATE (inter AS inter + 1); 11 | -------------------------------------------------------------------------------- /parser/testdata/dml/format/insert_with_placeholder.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | INSERT INTO t0(user_id, message, timestamp, metric) VALUES 3 | (?, ?, ?, ?), 4 | (?, ?, ?, ?), 5 | (?, ?, ?, ?), 6 | (?, ?, ?, ?) 7 | ; 8 | 9 | INSERT INTO test_with_typed_columns (id, created_at) 10 | VALUES ({id: Int32}, {created_at: DateTime64(6)}); 11 | 12 | -- Format SQL: 13 | INSERT INTO t0 (user_id, message, timestamp, metric) VALUES (?, ?, ?, ?), (?, ?, ?, ?), (?, ?, ?, ?), (?, ?, ?, ?); 14 | INSERT INTO test_with_typed_columns (id, created_at) VALUES ({id:Int32}, {created_at:DateTime64(6)}); 15 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/drop_table_basic.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "DropPos": 0, 4 | "StatementEnd": 36, 5 | "DropTarget": "TABLE", 6 | "Name": { 7 | "Database": { 8 | "Name": "test", 9 | "QuoteType": 1, 10 | "NamePos": 21, 11 | "NameEnd": 25 12 | }, 13 | "Table": { 14 | "Name": "table_name", 15 | "QuoteType": 1, 16 | "NamePos": 26, 17 | "NameEnd": 36 18 | } 19 | }, 20 | "IfExists": true, 21 | "OnCluster": null, 22 | "IsTemporary": false, 23 | "Modifier": "" 24 | } 25 | ] -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | PROGRAM=clickhouse-sql-parser 3 | PKG_FILES=`go list ./... | sed -e 's=github.com/AfterShip/clickhouse-sql-parser/=./='` 4 | 5 | CCCOLOR="\033[37;1m" 6 | MAKECOLOR="\033[32;1m" 7 | ENDCOLOR="\033[0m" 8 | 9 | all: $(PROGRAM) 10 | 11 | .PHONY: all 12 | 13 | $(PROGRAM): 14 | go build -o $(PROGRAM) main.go 15 | 16 | test: 17 | @go test -v ./... -covermode=atomic -coverprofile=coverage.out -race -compatible 18 | 19 | update_test: 20 | @go test -v ./... -update -race -compatible 21 | 22 | lint: 23 | @printf $(CCCOLOR)"GolangCI Lint...\n"$(ENDCOLOR) 24 | @golangci-lint run --timeout 20m0s 25 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00156_max_execution_speed_sample_merge.sql: -------------------------------------------------------------------------------- 1 | SET max_execution_speed = 4000000, timeout_before_checking_execution_speed = 0; 2 | 3 | CREATE TEMPORARY TABLE times (t DateTime); 4 | 5 | INSERT INTO times SELECT now(); 6 | SELECT count() FROM test.hits SAMPLE 1 / 2; 7 | INSERT INTO times SELECT now(); 8 | 9 | SELECT max(t) - min(t) >= 1 FROM times; 10 | TRUNCATE TABLE times; 11 | 12 | INSERT INTO times SELECT now(); 13 | SELECT count() FROM merge(test, '^hits$') SAMPLE 1 / 2; 14 | INSERT INTO times SELECT now(); 15 | 16 | SELECT max(t) - min(t) >= 1 FROM times; 17 | -------------------------------------------------------------------------------- /parser/testdata/query/select_with_window_function.sql: -------------------------------------------------------------------------------- 1 | SELECT aggregation_target AS aggregation_target, 2 | timestamp AS timestamp, 3 | step_0 AS step_0, 4 | latest_0 AS latest_0, 5 | step_1 AS step_1, 6 | latest_1 AS latest_1, 7 | step_2 AS step_2, 8 | min(latest_2) OVER (PARTITION BY aggregation_target 9 | ORDER BY timestamp DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 0 PRECEDING) AS latest_2, 10 | min(latest_1) OVER w AS latest_1 11 | FROM t0 12 | WINDOW w AS (PARTITION BY aggregation_target 13 | ORDER BY timestamp DESC ROWS BETWEEN UNBOUNDED PRECEDING AND 0 PRECEDING); -------------------------------------------------------------------------------- /parser/set_test.go: -------------------------------------------------------------------------------- 1 | package parser 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/require" 7 | ) 8 | 9 | func TestSet(t *testing.T) { 10 | s := NewSet[int](1, 2, 3) 11 | 12 | if !s.Contains(1) { 13 | t.Errorf("Set should contain 1") 14 | } 15 | 16 | if s.Contains(4) { 17 | t.Errorf("Set should not contain 4") 18 | } 19 | 20 | s.Add(4) 21 | 22 | if !s.Contains(4) { 23 | t.Errorf("Set should contain 4") 24 | } 25 | 26 | s.Remove(4) 27 | 28 | if s.Contains(4) { 29 | t.Errorf("Set should not contain 4") 30 | } 31 | 32 | require.Equal(t, 3, len(s.Members())) 33 | } 34 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00166_explain_estimate.sql: -------------------------------------------------------------------------------- 1 | -- Tags: no-replicated-database 2 | -- Tag no-replicated-database: Requires investigation 3 | 4 | EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID = 29103473; 5 | EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID != 29103473; 6 | EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID > 29103473; 7 | EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID < 29103473; 8 | EXPLAIN ESTIMATE SELECT count() FROM test.hits WHERE CounterID = 29103473 UNION ALL SELECT count() FROM test.hits WHERE CounterID = 1704509; 9 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00031_array_enumerate_uniq.sql: -------------------------------------------------------------------------------- 1 | SELECT UserID, arrayEnumerateUniq(groupArray(SearchPhrase)) AS arr 2 | FROM 3 | ( 4 | SELECT UserID, SearchPhrase 5 | FROM test.hits 6 | WHERE CounterID = 1704509 AND UserID IN 7 | ( 8 | SELECT UserID 9 | FROM test.hits 10 | WHERE notEmpty(SearchPhrase) AND CounterID = 1704509 11 | GROUP BY UserID 12 | HAVING count() > 1 13 | ) 14 | ORDER BY UserID, WatchID 15 | ) 16 | WHERE notEmpty(SearchPhrase) 17 | GROUP BY UserID 18 | HAVING length(arr) > 1 19 | ORDER BY UserID 20 | LIMIT 20 21 | -------------------------------------------------------------------------------- /parser/testdata/query/select_extract_with_regex.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | COUNT(1), SRC_TYPE, NODE_CLASS, PORT, CLIENT_PORT 3 | FROM 4 | test.table 5 | WHERE 6 | app_id = 999118646 7 | AND toUnixTimestamp(timestamp) >= 1740366695 8 | AND toUnixTimestamp(timestamp) <= 1740377495 9 | GROUP BY 10 | CASE 11 | WHEN length(extract(instance, '((\\d+\\.){3}\\d+)')) > 0 THEN instance 12 | ELSE '空' 13 | END, 14 | CASE 15 | WHEN length(extract(client_ip, '((\\d+\\.){3}\\d+)')) > 0 THEN client_ip 16 | ELSE '空' 17 | END, 18 | src_type, 19 | node_class, 20 | port, 21 | client_port 22 | LIMIT 10000 23 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_with_keyword_partition_by.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE test.events_local UUID 'dad17568-b070-49d0-9ad1-7568b07029d0' ( 3 | `date` Date, 4 | `f1` String, 5 | `f2` String, 6 | `f3` UInt64 7 | ) ENGINE = ReplacingMergeTree 8 | PARTITION BY date 9 | ORDER BY (f1, f2) 10 | SETTINGS index_granularity = 8192; 11 | 12 | -- Format SQL: 13 | CREATE TABLE test.events_local UUID 'dad17568-b070-49d0-9ad1-7568b07029d0' (`date` Date, `f1` String, `f2` String, `f3` UInt64) ENGINE = ReplacingMergeTree ORDER BY (f1, f2) PARTITION BY date SETTINGS index_granularity=8192; 14 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_dictionary_with_comment.sql: -------------------------------------------------------------------------------- 1 | CREATE DICTIONARY test.my_dict ( 2 | id UInt64, 3 | name String DEFAULT '', 4 | value Float64 EXPRESSION toFloat64OrZero(name), 5 | parent_id UInt64 HIERARCHICAL, 6 | is_active UInt8 INJECTIVE, 7 | object_id UInt64 IS_OBJECT_ID 8 | ) 9 | PRIMARY KEY id 10 | SOURCE(MYSQL( 11 | host 'localhost' 12 | port 3306 13 | user 'default' 14 | password '' 15 | db 'test' 16 | table 'dict_table' 17 | )) 18 | LIFETIME(MIN 1000 MAX 2000) 19 | LAYOUT(HASHED()) 20 | SETTINGS(max_block_size = 8192) 21 | COMMENT 'This is a test dictionary with comment'; 22 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_with_projection.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE events 3 | ( 4 | `event_time` DateTime, 5 | `event_id` UInt64, 6 | `user_id` UInt64, 7 | `huge_string` String, 8 | PROJECTION order_by_user_id 9 | ( 10 | SELECT 11 | _part_offset 12 | ORDER BY user_id 13 | ) 14 | ) 15 | ENGINE = MergeTree() 16 | ORDER BY (event_id); 17 | 18 | -- Format SQL: 19 | CREATE TABLE events (`event_time` DateTime, `event_id` UInt64, `user_id` UInt64, `huge_string` String, PROJECTION order_by_user_id (SELECT _part_offset ORDER BY user_id)) ENGINE = MergeTree() ORDER BY (event_id); 20 | -------------------------------------------------------------------------------- /parser/testdata/ddl/alter_table_add_index.sql: -------------------------------------------------------------------------------- 1 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX my_index(f0) TYPE minmax GRANULARITY 1024; 2 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX api_id_idx api_id TYPE set(100) GRANULARITY 2; 3 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX arr_idx arr TYPE bloom_filter(0.01) GRANULARITY 3; 4 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX content_idx content TYPE tokenbf_v1(30720, 2, 0) GRANULARITY 1; 5 | ALTER TABLE test.events_local ON CLUSTER 'default_cluster' ADD INDEX output_idx output TYPE ngrambf_v1(3, 10000, 2, 1) GRANULARITY 2; 6 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00167_read_bytes_from_fs.sql: -------------------------------------------------------------------------------- 1 | -- Tags: no-random-settings 2 | 3 | SELECT sum(cityHash64(*)) FROM test.hits SETTINGS max_threads=40; 4 | 5 | -- We had a bug which lead to additional compressed data read. test.hits compressed size is about 1.2Gb, but we read more then 3Gb. 6 | -- Small additional reads still possible, so we compare with about 1.5Gb. 7 | SYSTEM FLUSH LOGS; 8 | 9 | SELECT ProfileEvents['ReadBufferFromFileDescriptorReadBytes'] < 1500000000 from system.query_log where query = 'SELECT sum(cityHash64(*)) FROM test.hits SETTINGS max_threads=40;' and current_database = currentDatabase() and type = 'QueryFinish'; 10 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_json_type.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT a, a.b, a.b.c.d.e; 3 | SELECT JSON_TYPE('{"a": 1, "b": {"c": 2}}', '$.b'); 4 | SELECT CAST(some, 'String') AS value; 5 | SELECT CAST(some.long, 'String') AS value; 6 | SELECT CAST(some.long.json, 'String') AS value; 7 | SELECT CAST(some.long.json.path, 'String') AS value; 8 | 9 | 10 | 11 | -- Format SQL: 12 | SELECT a, a.b, a.b.c.d.e; 13 | SELECT JSON_TYPE('{"a": 1, "b": {"c": 2}}', '$.b'); 14 | SELECT CAST(some, 'String') AS value; 15 | SELECT CAST(some.long, 'String') AS value; 16 | SELECT CAST(some.long.json, 'String') AS value; 17 | SELECT CAST(some.long.json.path, 'String') AS value; 18 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_mv_with_order_by.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE MATERIALIZED VIEW IF NOT EXISTS test_mv 3 | ENGINE = ReplacingMergeTree() 4 | PRIMARY KEY (id) 5 | ORDER BY (id) 6 | AS 7 | SELECT * FROM test_table; 8 | 9 | CREATE MATERIALIZED VIEW IF NOT EXISTS test_mv 10 | ENGINE = ReplacingMergeTree() 11 | PRIMARY KEY (id) 12 | AS 13 | SELECT * FROM test_table; 14 | 15 | -- Format SQL: 16 | CREATE MATERIALIZED VIEW IF NOT EXISTS test_mv ENGINE = ReplacingMergeTree() ORDER BY (id) PRIMARY KEY (id) AS SELECT * FROM test_table; 17 | CREATE MATERIALIZED VIEW IF NOT EXISTS test_mv ENGINE = ReplacingMergeTree() PRIMARY KEY (id) AS SELECT * FROM test_table; 18 | -------------------------------------------------------------------------------- /parser/testdata/query/format/create_window_view.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE OR REPLACE VIEW asdf AS 3 | SELECT id, 4 | price * 1.5 AS computed_value, 5 | row_number() OVER ( 6 | PARTITION BY category 7 | ORDER BY created_at 8 | RANGE BETWEEN 3600 PRECEDING AND CURRENT ROW 9 | ) AS rn 10 | FROM source_table 11 | WHERE date >= '2023-01-01'; 12 | 13 | 14 | -- Format SQL: 15 | CREATE OR REPLACE VIEW asdf AS SELECT id, price * 1.5 AS computed_value, row_number() OVER (PARTITION BY category ORDER BY created_at RANGE BETWEEN 3600 PRECEDING AND CURRENT ROW) AS rn FROM source_table WHERE date >= '2023-01-01'; 16 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_group_by.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 3 | datacenter, 4 | distro, 5 | SUM (quantity) AS qty 6 | FROM 7 | servers 8 | GROUP BY 9 | GROUPING SETS( 10 | (datacenter,distro), 11 | (datacenter), 12 | (distro), 13 | () 14 | ); 15 | 16 | SELECT 17 | datacenter, 18 | distro, 19 | SUM (quantity) AS qty 20 | FROM 21 | servers 22 | GROUP BY ALL; 23 | 24 | -- Format SQL: 25 | SELECT datacenter, distro, SUM(quantity) AS qty FROM servers GROUP BY GROUPING SETS((datacenter, distro), (datacenter), (distro), ()); 26 | SELECT datacenter, distro, SUM(quantity) AS qty FROM servers GROUP BY ALL; 27 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_mv_with_not_op.sql: -------------------------------------------------------------------------------- 1 | CREATE MATERIALIZED VIEW infra_bm.view_name 2 | ON CLUSTER 'default_cluster' TO infra_bm.table_name 3 | ( 4 | `f1` DateTime64(3), 5 | `f2` String, 6 | `f3` String, 7 | `f4` String, 8 | `f5` String, 9 | `f6` Int64 10 | ) AS 11 | SELECT f1, 12 | f2, 13 | visitParamExtractString(properties, 'f3') AS f3, 14 | visitParamExtractString(properties, 'f4') AS f4, 15 | visitParamExtractString(properties, 'f5') AS f5, 16 | visitParamExtractInt(properties, 'f6') AS f6 17 | FROM infra_bm.table_name1 18 | WHERE infra_bm.table_name1.event = 'test-event' AND 19 | NOT isZeroOrNull(f2) AND f6-2 > 0 -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00150_quantiles_timing_precision.sql: -------------------------------------------------------------------------------- 1 | SELECT CounterID, quantileTiming(0.5)(SendTiming) AS qt, least(30000, quantileExact(0.5)(SendTiming)) AS qe, count() AS c, round(abs(qt - qe) / greatest(qt, qe) AS diff, 3) AS rounded_diff FROM test.hits WHERE SendTiming != -1 GROUP BY CounterID HAVING diff != 0 ORDER BY diff DESC; 2 | SELECT CounterID, quantileTiming(0.5)(SendTiming) AS qt, least(30000, quantileExact(0.5)(SendTiming)) AS qe, count() AS c, round(abs(qt - qe) / greatest(qt, qe) AS diff, 3) AS rounded_diff FROM test.hits WHERE SendTiming != -1 GROUP BY CounterID HAVING diff != 0 ORDER BY diff DESC SETTINGS optimize_aggregation_in_order = 1; 3 | -------------------------------------------------------------------------------- /parser/testdata/basic/format/set_statement.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SET allow_suspicious_low_cardinality_types = true; 3 | 4 | SET max_block_size = 65536; 5 | 6 | SET output_format_json_quote_64bit_integers = 'true'; 7 | 8 | SET max_threads = 8, max_memory_usage = 10000000000, enable_optimize_predicate_expression = false; 9 | 10 | SET allow_experimental_analyzer = true; 11 | 12 | 13 | -- Format SQL: 14 | SET allow_suspicious_low_cardinality_types=true; 15 | SET max_block_size=65536; 16 | SET output_format_json_quote_64bit_integers='true'; 17 | SET max_threads=8, max_memory_usage=10000000000, enable_optimize_predicate_expression=false; 18 | SET allow_experimental_analyzer=true; 19 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00061_storage_buffer.sql: -------------------------------------------------------------------------------- 1 | DROP TABLE IF EXISTS test.hits_dst; 2 | DROP TABLE IF EXISTS test.hits_buffer; 3 | 4 | CREATE TABLE test.hits_dst AS test.hits; 5 | CREATE TABLE test.hits_buffer AS test.hits_dst ENGINE = Buffer(test, hits_dst, 8, 1, 10, 10000, 100000, 10000000, 100000000); 6 | 7 | INSERT INTO test.hits_buffer SELECT * FROM test.hits WHERE CounterID = 800784; 8 | SELECT count() FROM test.hits_buffer; 9 | SELECT count() FROM test.hits_dst; 10 | 11 | OPTIMIZE TABLE test.hits_buffer; 12 | SELECT count() FROM test.hits_buffer; 13 | SELECT count() FROM test.hits_dst; 14 | 15 | DROP TABLE test.hits_dst; 16 | DROP TABLE test.hits_buffer; 17 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_simple.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT 3 | f0, coalesce(f1, f2) AS f3, row_number() 4 | OVER (PARTITION BY f0 ORDER BY f1 ASC) AS rn 5 | FROM test.events_local 6 | WHERE (f0 IN ('foo', 'bar', 'test')) AND (f1 = 'testing') AND (f2 NOT LIKE 'testing2') 7 | AND f3 NOT IN ('a', 'b', 'c') 8 | 9 | 10 | GROUP BY f0, f1 11 | 12 | Limit 100, 10 By f0; 13 | 14 | -- Format SQL: 15 | SELECT f0, coalesce(f1, f2) AS f3, row_number() OVER (PARTITION BY f0 ORDER BY f1 ASC) AS rn FROM test.events_local WHERE (f0 IN ('foo', 'bar', 'test')) AND (f1 = 'testing') AND (f2 NOT LIKE 'testing2') AND f3 NOT IN ('a', 'b', 'c') GROUP BY f0, f1 LIMIT 10 OFFSET 100 BY f0; 16 | -------------------------------------------------------------------------------- /parser/testdata/ddl/grant_privilege.sql: -------------------------------------------------------------------------------- 1 | GRANT SELECT(x,y) ON db.table TO john; 2 | GRANT SELECT(x,y) ON db.table TO john WITH GRANT OPTION WITH ADMIN OPTION; 3 | GRANT SELECT(x,y) ON db.* TO john; 4 | GRANT SELECT(x,y) ON *.table TO john; 5 | GRANT SELECT(x,y) ON *.* TO john; 6 | GRANT SELECT(x,y) ON *.table TO CURRENT_USER; 7 | GRANT SELECT(x,y) ON *.table TO CURRENT_USER,john,mary; 8 | GRANT ALL ON *.* TO admin_role WITH GRANT OPTION; 9 | GRANT SELECT,INSERT ON database.table_1 TO table_1_select_role; 10 | GRANT SELECT(x, y, z),INSERT ON database.table_1 TO table_1_select_role; 11 | GRANT SELECT, dictGet ON *.* TO select_all_role; 12 | GRANT ADMIN OPTION ON *.* TO select_all_role; 13 | 14 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00164_quantileBfloat16.sql: -------------------------------------------------------------------------------- 1 | SELECT CounterID AS k, quantileBFloat16(0.5)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; 2 | SELECT CounterID AS k, quantilesBFloat16(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM test.hits GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; 3 | 4 | 5 | SELECT CounterID AS k, quantileBFloat16(0.5)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; 6 | SELECT CounterID AS k, quantilesBFloat16(0.1, 0.5, 0.9, 0.99, 0.999)(ResolutionWidth) FROM remote('127.0.0.{1,2}', test.hits) GROUP BY k ORDER BY count() DESC, CounterID LIMIT 10; 7 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_materialized_view_basic.sql: -------------------------------------------------------------------------------- 1 | CREATE 2 | MATERIALIZED VIEW infra_bm.view_name 3 | ON CLUSTER 'default_cluster' TO infra_bm.table_name 4 | ( 5 | `f1` DateTime64(3), 6 | `f2` String, 7 | `f3` String, 8 | `f4` String, 9 | `f5` String, 10 | `f6` Int64 11 | ) AS 12 | SELECT f1, 13 | f2, 14 | visitParamExtractString(properties, 'f3') AS f3, 15 | visitParamExtractString(properties, 'f4') AS f4, 16 | visitParamExtractString(properties, 'f5') AS f5, 17 | visitParamExtractInt(properties, 'f6') AS f6 18 | FROM 19 | infra_bm.table_name1 20 | WHERE 21 | infra_bm.table_name1.event = 'test-event' 22 | COMMENT 'Comment for table'; -------------------------------------------------------------------------------- /parser/testdata/ddl/rename.sql: -------------------------------------------------------------------------------- 1 | -- rename table 2 | RENAME TABLE t1 TO t11; 3 | RENAME TABLE t1 TO t11 ON CLUSTER 'default_cluster'; 4 | RENAME TABLE t1 TO t11, t2 TO t22; 5 | RENAME TABLE t1 TO t11, t2 TO t22 ON CLUSTER 'default_cluster'; 6 | -- rename dictionary 7 | RENAME DICTIONARY t1 TO t11; 8 | RENAME DICTIONARY t1 TO t11 ON CLUSTER 'default_cluster'; 9 | RENAME DICTIONARY t1 TO t11, t2 TO t22; 10 | RENAME DICTIONARY t1 TO t11, t2 TO t22 ON CLUSTER 'default_cluster'; 11 | -- rename database 12 | RENAME DATABASE t1 TO t11; 13 | RENAME DATABASE t1 TO t11 ON CLUSTER 'default_cluster'; 14 | RENAME DATABASE t1 TO t11, t2 TO t22; 15 | RENAME DATABASE t1 TO t11, t2 TO t22 ON CLUSTER 'default_cluster'; 16 | -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_multi_join.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | with t1 as ( 3 | select 'value1' as value 4 | ), t2 as ( 5 | select 'value2' as value 6 | ), t3 as ( 7 | select 'value3' as value 8 | ) 9 | select 10 | t1.value as value1, 11 | t2.value as value2, 12 | t3.value as value3 13 | from 14 | t1 15 | join t2 on true 16 | join t3 17 | join t4 on true 18 | join t5 19 | 20 | 21 | -- Format SQL: 22 | WITH t1 AS (SELECT 'value1' AS value), t2 AS (SELECT 'value2' AS value), t3 AS (SELECT 'value3' AS value) SELECT t1.value AS value1, t2.value AS value2, t3.value AS value3 FROM t1 JOIN t2 ON true JOIN t3 JOIN t4 ON true JOIN t5; 23 | -------------------------------------------------------------------------------- /parser/testdata/basic/format/settings_statement.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SETTINGS allow_suspicious_low_cardinality_types = true; 3 | 4 | SETTINGS max_block_size = 65536; 5 | 6 | SETTINGS output_format_json_quote_64bit_integers = 'true'; 7 | 8 | SETTINGS max_threads = 8, max_memory_usage = 10000000000, enable_optimize_predicate_expression = false; 9 | 10 | SETTINGS allow_experimental_analyzer = true; 11 | 12 | 13 | -- Format SQL: 14 | SET allow_suspicious_low_cardinality_types=true; 15 | SET max_block_size=65536; 16 | SET output_format_json_quote_64bit_integers='true'; 17 | SET max_threads=8, max_memory_usage=10000000000, enable_optimize_predicate_expression=false; 18 | SET allow_experimental_analyzer=true; 19 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_or_replace.sql: -------------------------------------------------------------------------------- 1 | -- It's a short link events table 2 | /** 3 | * @name Short link events 4 | * @description It's a short link events table 5 | */ 6 | CREATE OR REPLACE TABLE IF NOT EXISTS test.events_local ( 7 | f0 String, 8 | f1 String CODEC(ZSTD(1)), 9 | f2 VARCHAR(255), 10 | ) ENGINE = MergeTree 11 | PRIMARY KEY (f0, f1, f2) 12 | PARTITION BY toYYYYMMDD(f1) 13 | TTL f1 + INTERVAL 6 MONTH 14 | ORDER BY (f1,f2) 15 | COMMENT 'Comment for table'; 16 | 17 | CREATE OR REPLACE VIEW IF NOT EXISTS my_view(col1 String, col2 String) 18 | AS 19 | SELECT 20 | id, 21 | name 22 | FROM 23 | my_table; 24 | 25 | CREATE OR REPLACE FUNCTION IF NOT EXISTS my_function AS (x, y) -> x + y; -------------------------------------------------------------------------------- /parser/testdata/query/select_window_params.sql: -------------------------------------------------------------------------------- 1 | -- Parameters in WHERE and in window frames (UInt32 & String; both spacing styles; shorthand frame) 2 | SELECT sum(x) OVER (ORDER BY y ROWS BETWEEN {start:UInt32} PRECEDING AND CURRENT ROW) AS total1, 3 | avg(x) OVER (ORDER BY y ROWS BETWEEN CURRENT ROW AND {end:UInt32} FOLLOWING) AS avg1, 4 | count(*) OVER (ORDER BY y RANGE BETWEEN {range_start:UInt32} PRECEDING AND {range_end:UInt32} FOLLOWING) AS cnt1, 5 | sum(x) OVER (ROWS {window_size :UInt32} PRECEDING) AS rows_shorthand 6 | FROM t 7 | WHERE category = {category :String} 8 | AND type = {type:String}; 9 | -------------------------------------------------------------------------------- /parser/testdata/ddl/bug_001.sql: -------------------------------------------------------------------------------- 1 | CREATE MATERIALIZED VIEW IF NOT EXISTS db.table 2 | ON CLUSTER 'default_cluster' TO db.table_mv 3 | AS 4 | SELECT 5 | event_ts, 6 | org_id, 7 | visitParamExtractString(properties, 'x') AS x, 8 | visitParamExtractString(properties, 'y') AS y, 9 | visitParamExtractString(properties, 'z') AS z, 10 | visitParamExtractString(properties, 'a') AS a, 11 | visitParamExtractString(properties, 'b') AS b, 12 | visitParamExtractString(properties, 'c') AS c, 13 | visitParamExtractString(properties, 'd') AS d, 14 | visitParamExtractInt(properties, 'e') AS e, 15 | visitParamExtractInt(properties, 'f') AS f 16 | FROM db.table 17 | WHERE db.table.event = 'hello'; -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_reset_setting.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 64, 5 | "TableIdentifier": { 6 | "Database": null, 7 | "Table": { 8 | "Name": "example_table", 9 | "QuoteType": 1, 10 | "NamePos": 12, 11 | "NameEnd": 25 12 | } 13 | }, 14 | "OnCluster": null, 15 | "AlterExprs": [ 16 | { 17 | "ResetPos": 26, 18 | "StatementEnd": 64, 19 | "Settings": [ 20 | { 21 | "Name": "max_part_loading_threads", 22 | "QuoteType": 1, 23 | "NamePos": 40, 24 | "NameEnd": 64 25 | } 26 | ] 27 | } 28 | ] 29 | } 30 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/truncate_temporary_table_on_clsuter.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "TruncatePos": 0, 4 | "StatementEnd": 78, 5 | "IsTemporary": true, 6 | "IfExists": true, 7 | "Name": { 8 | "Database": { 9 | "Name": "test", 10 | "QuoteType": 1, 11 | "NamePos": 35, 12 | "NameEnd": 39 13 | }, 14 | "Table": { 15 | "Name": "table_name", 16 | "QuoteType": 1, 17 | "NamePos": 40, 18 | "NameEnd": 50 19 | } 20 | }, 21 | "OnCluster": { 22 | "OnPos": 51, 23 | "Expr": { 24 | "LiteralPos": 63, 25 | "LiteralEnd": 78, 26 | "Literal": "default_cluster" 27 | } 28 | } 29 | } 30 | ] -------------------------------------------------------------------------------- /parser/testdata/query/format/select_with_query_parameter.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SET param_a = 13; 3 | SET param_b = 'str'; 4 | SET param_c = '2022-08-04 18:30:53'; 5 | SET param_d = {'10': [11, 12], '13': [14, 15]}; 6 | 7 | SELECT 8 | {a: UInt32}, 9 | {b: String}, 10 | {c: DateTime}, 11 | {d: Map(String, Array(UInt8))}; 12 | 13 | SELECT * FROM clickhouse WHERE tenant_id = {tenant_id: String}; 14 | 15 | 16 | -- Format SQL: 17 | SET param_a=13; 18 | SET param_b='str'; 19 | SET param_c='2022-08-04 18:30:53'; 20 | SET param_d={'10': [11, 12], '13': [14, 15]}; 21 | SELECT {a: UInt32}, {b: String}, {c: DateTime}, {d: Map(String, Array(UInt8))}; 22 | SELECT * FROM clickhouse WHERE tenant_id = {tenant_id: String}; 23 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_dictionary_comprehensive.sql: -------------------------------------------------------------------------------- 1 | CREATE OR REPLACE DICTIONARY test.comprehensive_dict 2 | UUID '12345678-1234-1234-1234-123456789012' 3 | ON CLUSTER production_cluster 4 | ( 5 | id UInt64, 6 | name String DEFAULT '', 7 | value Float64 EXPRESSION toFloat64OrZero(name), 8 | parent_id UInt64 HIERARCHICAL, 9 | is_active UInt8 INJECTIVE, 10 | object_id UInt64 IS_OBJECT_ID 11 | ) 12 | PRIMARY KEY id 13 | SOURCE(MYSQL( 14 | host 'localhost' 15 | port 3306 16 | user 'root' 17 | password 'secret' 18 | db 'test_db' 19 | table 'dictionary_table' 20 | )) 21 | LIFETIME(MIN 1000 MAX 2000) 22 | LAYOUT(HASHED()) 23 | SETTINGS(max_block_size = 8192, max_insert_block_size = 1048576); -------------------------------------------------------------------------------- /parser/testdata/dml/output/delete_from.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "DeletePos": 0, 4 | "Table": { 5 | "Database": null, 6 | "Table": { 7 | "Name": "hits", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 16 11 | } 12 | }, 13 | "OnCluster": null, 14 | "WhereExpr": { 15 | "LeftExpr": { 16 | "Name": "Title", 17 | "QuoteType": 1, 18 | "NamePos": 23, 19 | "NameEnd": 28 20 | }, 21 | "Operation": "LIKE", 22 | "RightExpr": { 23 | "LiteralPos": 35, 24 | "LiteralEnd": 42, 25 | "Literal": "%hello%" 26 | }, 27 | "HasGlobal": false, 28 | "HasNot": false 29 | } 30 | } 31 | ] -------------------------------------------------------------------------------- /parser/set.go: -------------------------------------------------------------------------------- 1 | package parser 2 | 3 | type Set[T comparable] struct { 4 | m map[T]struct{} 5 | } 6 | 7 | func NewSet[T comparable](members ...T) *Set[T] { 8 | m := make(map[T]struct{}) 9 | for _, member := range members { 10 | m[member] = struct{}{} 11 | } 12 | return &Set[T]{m: m} 13 | } 14 | 15 | func (s *Set[T]) Add(member T) { 16 | s.m[member] = struct{}{} 17 | } 18 | 19 | func (s *Set[T]) Remove(member T) { 20 | delete(s.m, member) 21 | } 22 | 23 | func (s *Set[T]) Contains(member T) bool { 24 | _, ok := s.m[member] 25 | return ok 26 | } 27 | 28 | func (s *Set[T]) Members() []T { 29 | members := make([]T, 0, len(s.m)) 30 | for member := range s.m { 31 | members = append(members, member) 32 | } 33 | return members 34 | } 35 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_materialized_view_with_refresh.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE MATERIALIZED VIEW fresh_mv 3 | REFRESH EVERY 1 HOUR OFFSET 10 MINUTE 4 | RANDOMIZE FOR 1 SECOND 5 | DEPENDS ON table_v5 6 | SETTINGS 7 | randomize_for = 1, 8 | randomize_offset = 10, 9 | randomize_period = 1 10 | APPEND TO target_table_name 11 | EMPTY 12 | AS SELECT 13 | `field_1`, 14 | `field_2`, 15 | `field_3`, 16 | FROM table_v5 17 | 18 | -- Format SQL: 19 | CREATE MATERIALIZED VIEW fresh_mv REFRESH EVERY 1 HOUR OFFSET 10 MINUTE RANDOMIZE FOR 1 SECOND DEPENDS ON table_v5 SETTINGS randomize_for=1, randomize_offset=10, randomize_period=1 APPEND TO target_table_name EMPTY AS SELECT `field_1`, `field_2`, `field_3`, FROM AS table_v5; 20 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/show_databases_comprehensive.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "ShowPos": 0, 4 | "StatementEnd": 81, 5 | "ShowType": "DATABASES", 6 | "Target": null, 7 | "NotLike": false, 8 | "LikeType": "LIKE", 9 | "LikePattern": { 10 | "LiteralPos": 21, 11 | "LiteralEnd": 26, 12 | "Literal": "prod%" 13 | }, 14 | "Limit": { 15 | "NumPos": 34, 16 | "NumEnd": 35, 17 | "Literal": "5", 18 | "Base": 10 19 | }, 20 | "OutFile": { 21 | "LiteralPos": 50, 22 | "LiteralEnd": 67, 23 | "Literal": "/tmp/prod_dbs.txt" 24 | }, 25 | "Format": { 26 | "LiteralPos": 76, 27 | "LiteralEnd": 80, 28 | "Literal": "JSON" 29 | } 30 | } 31 | ] -------------------------------------------------------------------------------- /parser/testdata/query/select_with_settings_additional_table_filters.sql: -------------------------------------------------------------------------------- 1 | SELECT * FROM test_table SETTINGS additional_table_filters={'test_table': 'status = 1'}; 2 | 3 | SELECT * FROM test_table SETTINGS additional_table_filters={'test_table': 'value = \'test\''}; 4 | 5 | SELECT * FROM test_table SETTINGS additional_table_filters={'test_table': 'value = ''test'''}; 6 | 7 | SELECT * FROM test_table 8 | SETTINGS additional_table_filters={'test_table': 'id IN (\'a\', \'b\') AND status = \'active\''} 9 | FORMAT JSON; 10 | 11 | SELECT number, x, y FROM (SELECT number FROM system.numbers LIMIT 5) f 12 | ANY LEFT JOIN (SELECT x, y FROM table_1) s ON f.number = s.x 13 | SETTINGS additional_table_filters={'system.numbers':'number != 3', 'table_1':'x != 2'}; 14 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_as_remote_function.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | -- CREATE TABLE with columns AS table function (remoteSecure) 3 | CREATE TABLE test_remote 4 | ( 5 | id UInt64, 6 | name String, 7 | value Int32 8 | ) 9 | AS remoteSecure('host.example.com', 'source_db', 'source_table', 'user', 'password'); 10 | 11 | -- Simpler test case with remote() 12 | CREATE TABLE test_table (id UInt64, name String) AS remote('localhost', 'db', 'source_table'); 13 | 14 | 15 | -- Format SQL: 16 | CREATE TABLE test_remote (id UInt64, name String, value Int32) AS remoteSecure('host.example.com', 'source_db', 'source_table', 'user', 'password'); 17 | CREATE TABLE test_table (id UInt64, name String) AS remote('localhost', 'db', 'source_table'); 18 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/drop_table_with_on_clsuter.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "DropPos": 0, 4 | "StatementEnd": 65, 5 | "DropTarget": "TABLE", 6 | "Name": { 7 | "Database": { 8 | "Name": "test", 9 | "QuoteType": 1, 10 | "NamePos": 21, 11 | "NameEnd": 25 12 | }, 13 | "Table": { 14 | "Name": "table_name", 15 | "QuoteType": 1, 16 | "NamePos": 26, 17 | "NameEnd": 36 18 | } 19 | }, 20 | "IfExists": true, 21 | "OnCluster": { 22 | "OnPos": 37, 23 | "Expr": { 24 | "LiteralPos": 49, 25 | "LiteralEnd": 64, 26 | "Literal": "default_cluster" 27 | } 28 | }, 29 | "IsTemporary": false, 30 | "Modifier": "" 31 | } 32 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/drop_table_with_no_delay.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "DropPos": 0, 4 | "StatementEnd": 74, 5 | "DropTarget": "TABLE", 6 | "Name": { 7 | "Database": { 8 | "Name": "test", 9 | "QuoteType": 1, 10 | "NamePos": 21, 11 | "NameEnd": 25 12 | }, 13 | "Table": { 14 | "Name": "table_name", 15 | "QuoteType": 1, 16 | "NamePos": 26, 17 | "NameEnd": 36 18 | } 19 | }, 20 | "IfExists": true, 21 | "OnCluster": { 22 | "OnPos": 37, 23 | "Expr": { 24 | "LiteralPos": 49, 25 | "LiteralEnd": 64, 26 | "Literal": "default_cluster" 27 | } 28 | }, 29 | "IsTemporary": false, 30 | "Modifier": "NO DELAY" 31 | } 32 | ] -------------------------------------------------------------------------------- /parser/testdata/query/format/access_tuple_with_dot.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | SELECT tuple('a','b','c').3, .1234; 3 | 4 | SELECT toTypeName( tuple('a' as first,'b' as second ,'c' as third)::Tuple(first String,second String,third String)), 5 | (tuple('a' as first,'b' as second ,'c' as third)::Tuple(first String,second String,third String)).second, 6 | tuple('a','b','c').3, 7 | tupleElement(tuple('a','b','c'),1) 8 | 9 | -- Format SQL: 10 | SELECT tuple('a', 'b', 'c').3, .1234; 11 | SELECT toTypeName(tuple('a' AS first, 'b' AS second, 'c' AS third)::Tuple(first String, second String, third String)), (tuple('a' AS first, 'b' AS second, 'c' AS third)::Tuple(first String, second String, third String)).second, tuple('a', 'b', 'c').3, tupleElement(tuple('a', 'b', 'c'), 1); 12 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_remove_ttl.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 63, 5 | "TableIdentifier": { 6 | "Database": { 7 | "Name": "test", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 16 11 | }, 12 | "Table": { 13 | "Name": "events", 14 | "QuoteType": 1, 15 | "NamePos": 17, 16 | "NameEnd": 23 17 | } 18 | }, 19 | "OnCluster": { 20 | "OnPos": 24, 21 | "Expr": { 22 | "LiteralPos": 36, 23 | "LiteralEnd": 51, 24 | "Literal": "default_cluster" 25 | } 26 | }, 27 | "AlterExprs": [ 28 | { 29 | "RemovePos": 53, 30 | "StatementEnd": 63 31 | } 32 | ] 33 | } 34 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/check.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "CheckPos": 0, 4 | "Table": { 5 | "Database": null, 6 | "Table": { 7 | "Name": "test_table", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 22 11 | } 12 | }, 13 | "Partition": null 14 | }, 15 | { 16 | "CheckPos": 24, 17 | "Table": { 18 | "Database": null, 19 | "Table": { 20 | "Name": "test_table", 21 | "QuoteType": 1, 22 | "NamePos": 36, 23 | "NameEnd": 46 24 | } 25 | }, 26 | "Partition": { 27 | "PartitionPos": 47, 28 | "Expr": { 29 | "LiteralPos": 58, 30 | "LiteralEnd": 61, 31 | "Literal": "col" 32 | }, 33 | "ID": null, 34 | "All": false 35 | } 36 | } 37 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_with_sample_by.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE default.test UUID '87887901-e33c-497e-8788-7901e33c997e' 3 | ( 4 | `f0` DateTime, 5 | `f1` UInt32, 6 | `f3` UInt32 7 | ) 8 | ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}/{shard}/default/test', '{replica}') 9 | PARTITION BY toYYYYMM(timestamp) 10 | ORDER BY (contractid, toDate(timestamp), userid) 11 | SAMPLE BY userid 12 | SETTINGS index_granularity = 8192; 13 | 14 | -- Format SQL: 15 | CREATE TABLE default.test UUID '87887901-e33c-497e-8788-7901e33c997e' (`f0` DateTime, `f1` UInt32, `f3` UInt32) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}/{shard}/default/test', '{replica}') ORDER BY (contractid, toDate(timestamp), userid) PARTITION BY toYYYYMM(timestamp) SAMPLE BY userid SETTINGS index_granularity=8192; 16 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_freeze_no_specify_partition.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 59, 5 | "TableIdentifier": { 6 | "Database": { 7 | "Name": "test", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 16 11 | }, 12 | "Table": { 13 | "Name": "events", 14 | "QuoteType": 1, 15 | "NamePos": 17, 16 | "NameEnd": 23 17 | } 18 | }, 19 | "OnCluster": { 20 | "OnPos": 24, 21 | "Expr": { 22 | "LiteralPos": 36, 23 | "LiteralEnd": 51, 24 | "Literal": "default_cluster" 25 | } 26 | }, 27 | "AlterExprs": [ 28 | { 29 | "FreezePos": 53, 30 | "StatementEnd": 59, 31 | "Partition": null 32 | } 33 | ] 34 | } 35 | ] -------------------------------------------------------------------------------- /parser/testdata/dml/output/alter_table_with_modify_remove_ttl.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 83, 5 | "TableIdentifier": { 6 | "Database": { 7 | "Name": "infra", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 17 11 | }, 12 | "Table": { 13 | "Name": "flow_processed_emails_local", 14 | "QuoteType": 1, 15 | "NamePos": 18, 16 | "NameEnd": 45 17 | } 18 | }, 19 | "OnCluster": { 20 | "OnPos": 46, 21 | "Expr": { 22 | "Name": "default_cluster", 23 | "QuoteType": 1, 24 | "NamePos": 57, 25 | "NameEnd": 72 26 | } 27 | }, 28 | "AlterExprs": [ 29 | { 30 | "RemovePos": 73, 31 | "StatementEnd": 83 32 | } 33 | ] 34 | } 35 | ] -------------------------------------------------------------------------------- /parser/testdata/dml/format/insert_values.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | INSERT INTO helloworld.my_first_table (user_id, message, timestamp, metric) VALUES 3 | (101, 'Hello, ClickHouse!', now(), -1.0 ), 4 | (102, 'Insert a lot of rows per batch', yesterday(), 1.41421 ), 5 | (102, 'Sort your data based on your commonly-used queries', today(), 2.718 ), 6 | (101, 'Granules are the smallest chunks of data read', now() + 5, 3.14159 ) 7 | 8 | -- Format SQL: 9 | INSERT INTO helloworld.my_first_table (user_id, message, timestamp, metric) VALUES (101, 'Hello, ClickHouse!', now(), -1.0), (102, 'Insert a lot of rows per batch', yesterday(), 1.41421), (102, 'Sort your data based on your commonly-used queries', today(), 2.718), (101, 'Granules are the smallest chunks of data read', now() + 5, 3.14159); 10 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00173_group_by_use_nulls.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | CounterID AS k, 3 | quantileBFloat16(0.5)(ResolutionWidth) 4 | FROM remote('127.0.0.{1,2}', test, hits) 5 | GROUP BY k 6 | ORDER BY 7 | count() DESC, 8 | CounterID ASC 9 | LIMIT 10 10 | SETTINGS group_by_use_nulls = 1; 11 | 12 | SELECT 13 | CounterID AS k, 14 | quantileBFloat16(0.5)(ResolutionWidth) 15 | FROM test.hits 16 | GROUP BY k 17 | ORDER BY 18 | count() DESC, 19 | CounterID ASC 20 | LIMIT 10 21 | SETTINGS group_by_use_nulls = 1 FORMAT Null; 22 | 23 | -- { echoOn } 24 | set allow_experimental_analyzer = 1; 25 | 26 | SELECT 27 | CounterID AS k, 28 | quantileBFloat16(0.5)(ResolutionWidth) 29 | FROM remote('127.0.0.{1,2}', test, hits) 30 | GROUP BY k 31 | ORDER BY 32 | count() DESC, 33 | CounterID ASC 34 | LIMIT 10 35 | SETTINGS group_by_use_nulls = 1; 36 | -------------------------------------------------------------------------------- /parser/testdata/query/select_window_cte.sql: -------------------------------------------------------------------------------- 1 | WITH 2 | monthly AS ( 3 | SELECT toStartOfMonth(date) AS month, 4 | department, 5 | avg(salary) AS avg_salary 6 | FROM salary_table 7 | WHERE year = 2023 8 | GROUP BY month, department 9 | ), 10 | ranked AS ( 11 | SELECT month, 12 | department, 13 | avg_salary, 14 | row_number() OVER (PARTITION BY department ORDER BY avg_salary DESC) AS dept_rank 15 | FROM monthly 16 | ) 17 | SELECT month, 18 | department, 19 | avg_salary, 20 | lag(avg_salary, 1, 0) OVER ( 21 | PARTITION BY department 22 | ORDER BY month 23 | ROWS BETWEEN 1 PRECEDING AND CURRENT ROW 24 | ) AS prev_month_avg 25 | FROM ranked 26 | WHERE dept_rank <= 5 27 | ORDER BY month, department; 28 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_detach_partition.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 48, 5 | "TableIdentifier": { 6 | "Database": { 7 | "Name": "db", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 14 11 | }, 12 | "Table": { 13 | "Name": "test", 14 | "QuoteType": 1, 15 | "NamePos": 15, 16 | "NameEnd": 19 17 | } 18 | }, 19 | "OnCluster": null, 20 | "AlterExprs": [ 21 | { 22 | "DetachPos": 27, 23 | "Partition": { 24 | "PartitionPos": 27, 25 | "Expr": { 26 | "LiteralPos": 38, 27 | "LiteralEnd": 48, 28 | "Literal": "2021-10-01" 29 | }, 30 | "ID": null, 31 | "All": false 32 | }, 33 | "Settings": null 34 | } 35 | ] 36 | } 37 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/format/attach_table_basic.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | ATTACH TABLE IF NOT EXISTS test.events_local ON CLUSTER 'default_cluster' ( 3 | f0 String, 4 | f1 String, 5 | f2 String, 6 | f3 Datetime, 7 | f4 Datetime, 8 | f5 Map(String,String), 9 | f6 String, 10 | f7 Datetime DEFAULT now() 11 | ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/test/events_local', '{replica}') 12 | TTL f3 + INTERVAL 6 MONTH 13 | PARTITION BY toYYYYMMDD(f3) 14 | ORDER BY (f0,f1,f2); 15 | 16 | -- Format SQL: 17 | CREATE TABLE IF NOT EXISTS test.events_local ON CLUSTER 'default_cluster' (f0 String, f1 String, f2 String, f3 Datetime, f4 Datetime, f5 Map(String, String), f6 String, f7 Datetime DEFAULT now()) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/test/events_local', '{replica}') ORDER BY (f0, f1, f2) PARTITION BY toYYYYMMDD(f3) TTL f3 + INTERVAL 6 MONTH; 18 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_materialized_view_with_definer.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE MATERIALIZED VIEW fresh_mv 3 | REFRESH EVERY 1 HOUR OFFSET 10 MINUTE APPEND TO events_export 4 | ( 5 | `timestamp` DateTime64(9), 6 | `field_1` String, 7 | `field_2` String, 8 | ) 9 | DEFINER = default SQL SECURITY DEFINER 10 | AS (SELECT 11 | timestamp, 12 | field_1, 13 | field_2, 14 | FROM event_table 15 | WHERE toStartOfHour(timestamp) = toStartOfHour(now() - toIntervalHour(1))) 16 | COMMENT 'Test comment' 17 | 18 | 19 | -- Format SQL: 20 | CREATE MATERIALIZED VIEW fresh_mv REFRESH EVERY 1 HOUR OFFSET 10 MINUTE APPEND TO events_export (`timestamp` DateTime64(9), `field_1` String, `field_2` String) DEFINER = default SQL SECURITY DEFINER AS (SELECT timestamp, field_1, field_2, FROM AS event_table WHERE toStartOfHour(timestamp) = toStartOfHour(now() - toIntervalHour(1))) COMMENT 'Test comment'; 21 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_codec_delta.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS test_local 2 | ( 3 | `id` UInt64 CODEC(Delta, ZSTD(1)), 4 | `api_id` UInt64 CODEC(ZSTD(1)), 5 | `app_id` UInt64 CODEC(Delta(9), ZSTD(1)), 6 | `device_id` UInt64 CODEC(DoubleDelta, ZSTD(1)), 7 | `guage` Float64 CODEC(Gorilla, LZ4), 8 | `value` UInt64 CODEC(T64, LZ4), 9 | `timestamp` DateTime64(9) CODEC(ZSTD(1)), 10 | INDEX timestamp_index(timestamp) TYPE minmax GRANULARITY 4 11 | ) 12 | ENGINE = ReplicatedMergeTree('/root/test_local', '{replica}') 13 | PARTITION BY toStartOfHour(`timestamp`) 14 | ORDER BY (toUnixTimestamp64Nano(`timestamp`), `api_id`) 15 | TTL toStartOfHour(`timestamp`) + INTERVAL 7 DAY,toStartOfHour(`timestamp`) + INTERVAL 2 DAY 16 | SETTINGS execute_merges_on_single_replica_time_threshold=1200, index_granularity=16384, max_bytes_to_merge_at_max_space_in_pool=64424509440, storage_policy='main', ttl_only_drop_parts=1; 17 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_with_on_clsuter.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE IF NOT EXISTS test.events_local ON CLUSTER 'default_cluster' ( 3 | f0 String, 4 | f1 String, 5 | f2 String, 6 | f3 Datetime, 7 | f4 Datetime, 8 | f5 Map(String,String), 9 | f6 String, 10 | f7 Datetime DEFAULT now() 11 | ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/test/events_local', '{replica}') 12 | TTL f3 + INTERVAL 6 MONTH 13 | PARTITION BY toYYYYMMDD(f3) 14 | ORDER BY (f0,f1,f2); 15 | 16 | -- Format SQL: 17 | CREATE TABLE IF NOT EXISTS test.events_local ON CLUSTER 'default_cluster' (f0 String, f1 String, f2 String, f3 Datetime, f4 Datetime, f5 Map(String, String), f6 String, f7 Datetime DEFAULT now()) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/test/events_local', '{replica}') ORDER BY (f0, f1, f2) PARTITION BY toYYYYMMDD(f3) TTL f3 + INTERVAL 6 MONTH; 18 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_with_uuid.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE IF NOT EXISTS test.events_local UUID '1234' ON CLUSTER 'default_cluster' ( 3 | f0 String, 4 | f1 String, 5 | f2 String, 6 | f3 Datetime, 7 | f4 Datetime, 8 | f5 Map(String,String), 9 | f6 String, 10 | f7 Datetime DEFAULT now() 11 | ) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/test/events_local', '{replica}') 12 | TTL f3 + INTERVAL 6 MONTH 13 | PARTITION BY toYYYYMMDD(f3) 14 | ORDER BY (f0,f1,f2); 15 | 16 | -- Format SQL: 17 | CREATE TABLE IF NOT EXISTS test.events_local UUID '1234' ON CLUSTER 'default_cluster' (f0 String, f1 String, f2 String, f3 Datetime, f4 Datetime, f5 Map(String, String), f6 String, f7 Datetime DEFAULT now()) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}/test/events_local', '{replica}') ORDER BY (f0, f1, f2) PARTITION BY toYYYYMMDD(f3) TTL f3 + INTERVAL 6 MONTH; 18 | -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00065_loyalty_with_storage_join.sql: -------------------------------------------------------------------------------- 1 | USE test; 2 | 3 | DROP TABLE IF EXISTS join; 4 | CREATE TABLE join (UserID UInt64, loyalty Int8) ENGINE = Join(SEMI, LEFT, UserID); 5 | 6 | INSERT INTO join 7 | SELECT 8 | UserID, 9 | toInt8(if((sum(SearchEngineID = 2) AS yandex) > (sum(SearchEngineID = 3) AS google), 10 | yandex / (yandex + google), 11 | -google / (yandex + google)) * 10) AS loyalty 12 | FROM hits 13 | WHERE (SearchEngineID = 2) OR (SearchEngineID = 3) 14 | GROUP BY UserID 15 | HAVING (yandex + google) > 10; 16 | 17 | SELECT 18 | loyalty, 19 | count() 20 | FROM hits SEMI LEFT JOIN join USING UserID 21 | GROUP BY loyalty 22 | ORDER BY loyalty ASC; 23 | 24 | DETACH TABLE join; 25 | ATTACH TABLE join; 26 | 27 | SELECT 28 | loyalty, 29 | count() 30 | FROM hits SEMI LEFT JOIN join USING UserID 31 | GROUP BY loyalty 32 | ORDER BY loyalty ASC; 33 | 34 | DROP TABLE join; 35 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_with_ttl_policy.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE tab 2 | ( 3 | d DateTime, 4 | a Int 5 | ) 6 | ENGINE = MergeTree 7 | PARTITION BY toYYYYMM(d) 8 | ORDER BY d 9 | TTL d + INTERVAL 1 MONTH DELETE, 10 | d + INTERVAL 1 WEEK TO VOLUME 'aaa', 11 | d + INTERVAL 2 WEEK TO DISK 'bbb'; 12 | 13 | 14 | CREATE TABLE table_with_where 15 | ( 16 | d DateTime, 17 | a Int 18 | ) 19 | ENGINE = MergeTree 20 | PARTITION BY toYYYYMM(d) 21 | ORDER BY d 22 | TTL d + INTERVAL 1 MONTH DELETE WHERE toDayOfWeek(d) = 1; 23 | 24 | CREATE TABLE table_for_recompression 25 | ( 26 | d DateTime, 27 | key UInt64, 28 | value String 29 | ) ENGINE MergeTree() 30 | ORDER BY tuple() 31 | PARTITION BY key 32 | TTL d + INTERVAL 1 MONTH RECOMPRESS CODEC(ZSTD(17)), d + INTERVAL 1 YEAR RECOMPRESS CODEC(LZ4HC(10)) 33 | SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0, allow_experimental_replacing_merge_with_cleanup = true; 34 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_with_nullable.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE test.`.inner.752391fb-44cc-4dd5-b523-91fb44cc9dd5` 3 | UUID '27673372-7973-44f5-a767-33727973c4f5' ( 4 | `f0` String, 5 | `f1` String, 6 | `f2` LowCardinality(String), 7 | `f3` LowCardinality(String), 8 | `f4` DateTime64(3), 9 | `f5` Nullable(DateTime64(3)), 10 | `succeed_at` Nullable(DateTime64(3)) 11 | ) ENGINE = MergeTree 12 | PARTITION BY xxHash32(tag_id) % 20 13 | ORDER BY label_id 14 | SETTINGS index_granularity = 8192; 15 | 16 | 17 | -- Format SQL: 18 | CREATE TABLE test.`.inner.752391fb-44cc-4dd5-b523-91fb44cc9dd5` UUID '27673372-7973-44f5-a767-33727973c4f5' (`f0` String, `f1` String, `f2` LowCardinality(String), `f3` LowCardinality(String), `f4` DateTime64(3), `f5` Nullable(DateTime64(3)), `succeed_at` Nullable(DateTime64(3))) ENGINE = MergeTree ORDER BY label_id PARTITION BY xxHash32(tag_id) % 20 SETTINGS index_granularity=8192; 19 | -------------------------------------------------------------------------------- /parser/testdata/ddl/create_table_basic.sql: -------------------------------------------------------------------------------- 1 | -- It's a short link events table 2 | /** 3 | * @name Short link events 4 | * @description It's a short link events table 5 | */ 6 | CREATE TABLE IF NOT EXISTS test.events_local ( 7 | f0 String, 8 | f1 String CODEC(ZSTD(1)), 9 | f2 VARCHAR(255), 10 | f3 Datetime, 11 | f4 Datetime, 12 | f5 Map(String,String), 13 | f6 String, 14 | f7 Nested ( 15 | f70 UInt32, 16 | f71 UInt32, 17 | f72 DateTime, 18 | f73 Int64, 19 | f74 Int64, 20 | f75 String 21 | ), 22 | f8 Datetime DEFAULT now(), 23 | f9 String MATERIALIZED toString(f7['f70']), 24 | f10 String ALIAS f11, 25 | f12 JSON(max_dynamic_types=10, max_dynamic_paths=3, SKIP a, SKIP a.b.c, SKIP REGEXP 'hello'), 26 | ) ENGINE = MergeTree 27 | PRIMARY KEY (f0, f1, f2) 28 | PARTITION BY toYYYYMMDD(f3) 29 | TTL f3 + INTERVAL 6 MONTH 30 | ORDER BY (f1,f2,f3) 31 | COMMENT 'Comment for table'; -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_replace_partition.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 52, 5 | "TableIdentifier": { 6 | "Database": null, 7 | "Table": { 8 | "Name": "t2", 9 | "QuoteType": 1, 10 | "NamePos": 12, 11 | "NameEnd": 14 12 | } 13 | }, 14 | "OnCluster": null, 15 | "AlterExprs": [ 16 | { 17 | "ReplacePos": 15, 18 | "Partition": { 19 | "PartitionPos": 23, 20 | "Expr": { 21 | "LiteralPos": 34, 22 | "LiteralEnd": 43, 23 | "Literal": "partition" 24 | }, 25 | "ID": null, 26 | "All": false 27 | }, 28 | "Table": { 29 | "Database": null, 30 | "Table": { 31 | "Name": "t1", 32 | "QuoteType": 1, 33 | "NamePos": 50, 34 | "NameEnd": 52 35 | } 36 | } 37 | } 38 | ] 39 | } 40 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_dictionary_basic.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE DICTIONARY test.my_dict ( 3 | id UInt64, 4 | name String DEFAULT '', 5 | value Float64 EXPRESSION toFloat64OrZero(name), 6 | parent_id UInt64 HIERARCHICAL, 7 | is_active UInt8 INJECTIVE, 8 | object_id UInt64 IS_OBJECT_ID 9 | ) 10 | PRIMARY KEY id 11 | SOURCE(MYSQL( 12 | host 'localhost' 13 | port 3306 14 | user 'default' 15 | password '' 16 | db 'test' 17 | table 'dict_table' 18 | )) 19 | LIFETIME(MIN 1000 MAX 2000) 20 | LAYOUT(HASHED()) 21 | SETTINGS(max_block_size = 8192); 22 | 23 | -- Format SQL: 24 | CREATE DICTIONARY test.my_dict (id UInt64, name String DEFAULT '', value Float64 EXPRESSION toFloat64OrZero(name), parent_id UInt64 HIERARCHICAL, is_active UInt8 INJECTIVE, object_id UInt64 IS_OBJECT_ID) PRIMARY KEY id SOURCE(MYSQL(host 'localhost' port 3306 user 'default' password '' db 'test' table 'dict_table')) LIFETIME(MIN 1000 MAX 2000) LAYOUT(HASHED()) SETTINGS(max_block_size=8192); 25 | -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_table_with_enum_fields.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE TABLE t0 on cluster default_cluster 3 | ( 4 | `method` Enum8('GET'=1 , 'POST'=2, 'HEAD'=3, 'PUT'=4,'PATCH'=5, 'DELETE'=6, 'CONNECT'=7, 'OPTIONS'=8, 'TRACE'=9) CODEC(ZSTD(1)), 5 | `timestamp` DateTime64(3) CODEC(DoubleDelta, ZSTD) 6 | ) 7 | ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}', '{replica}') 8 | PARTITION BY toDate(timestamp) 9 | ORDER BY (method,timestamp) 10 | TTL toDate(timestamp) + toIntervalDay(3) 11 | SETTINGS index_granularity = 8192; 12 | 13 | -- Format SQL: 14 | CREATE TABLE t0 ON CLUSTER default_cluster (`method` Enum8('GET'=1, 'POST'=2, 'HEAD'=3, 'PUT'=4, 'PATCH'=5, 'DELETE'=6, 'CONNECT'=7, 'OPTIONS'=8, 'TRACE'=9) CODEC(ZSTD(1)), `timestamp` DateTime64(3) CODEC(DoubleDelta, ZSTD)) ENGINE = ReplicatedMergeTree('/clickhouse/tables/{layer}-{shard}', '{replica}') ORDER BY (method, timestamp) PARTITION BY toDate(timestamp) TTL toDate(timestamp) + toIntervalDay(3) SETTINGS index_granularity=8192; 15 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_rename_column.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 69, 5 | "TableIdentifier": { 6 | "Database": null, 7 | "Table": { 8 | "Name": "my_table", 9 | "QuoteType": 1, 10 | "NamePos": 12, 11 | "NameEnd": 20 12 | } 13 | }, 14 | "OnCluster": null, 15 | "AlterExprs": [ 16 | { 17 | "RenamePos": 21, 18 | "IfExists": false, 19 | "OldColumnName": { 20 | "Ident": { 21 | "Name": "old_column_name", 22 | "QuoteType": 1, 23 | "NamePos": 35, 24 | "NameEnd": 50 25 | }, 26 | "DotIdent": null 27 | }, 28 | "NewColumnName": { 29 | "Ident": { 30 | "Name": "new_column_name", 31 | "QuoteType": 1, 32 | "NamePos": 54, 33 | "NameEnd": 69 34 | }, 35 | "DotIdent": null 36 | } 37 | } 38 | ] 39 | } 40 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_drop_column.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 83, 5 | "TableIdentifier": { 6 | "Database": { 7 | "Name": "test", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 16 11 | }, 12 | "Table": { 13 | "Name": "events_local", 14 | "QuoteType": 1, 15 | "NamePos": 17, 16 | "NameEnd": 29 17 | } 18 | }, 19 | "OnCluster": { 20 | "OnPos": 30, 21 | "Expr": { 22 | "LiteralPos": 42, 23 | "LiteralEnd": 57, 24 | "Literal": "default_cluster" 25 | } 26 | }, 27 | "AlterExprs": [ 28 | { 29 | "DropPos": 59, 30 | "ColumnName": { 31 | "Ident": { 32 | "Name": "f1", 33 | "QuoteType": 1, 34 | "NamePos": 81, 35 | "NameEnd": 83 36 | }, 37 | "DotIdent": null 38 | }, 39 | "IfExists": true 40 | } 41 | ] 42 | } 43 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_drop_index.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 71, 5 | "TableIdentifier": { 6 | "Database": { 7 | "Name": "test", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 16 11 | }, 12 | "Table": { 13 | "Name": "event_local", 14 | "QuoteType": 1, 15 | "NamePos": 17, 16 | "NameEnd": 28 17 | } 18 | }, 19 | "OnCluster": { 20 | "OnPos": 29, 21 | "Expr": { 22 | "LiteralPos": 41, 23 | "LiteralEnd": 56, 24 | "Literal": "default_cluster" 25 | } 26 | }, 27 | "AlterExprs": [ 28 | { 29 | "DropPos": 58, 30 | "IndexName": { 31 | "Ident": { 32 | "Name": "f1", 33 | "QuoteType": 1, 34 | "NamePos": 69, 35 | "NameEnd": 71 36 | }, 37 | "DotIdent": null 38 | }, 39 | "IfExists": false 40 | } 41 | ] 42 | } 43 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_drop_projection.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 76, 5 | "TableIdentifier": { 6 | "Database": { 7 | "Name": "test", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 16 11 | }, 12 | "Table": { 13 | "Name": "event_local", 14 | "QuoteType": 1, 15 | "NamePos": 17, 16 | "NameEnd": 28 17 | } 18 | }, 19 | "OnCluster": { 20 | "OnPos": 29, 21 | "Expr": { 22 | "LiteralPos": 41, 23 | "LiteralEnd": 56, 24 | "Literal": "default_cluster" 25 | } 26 | }, 27 | "AlterExprs": [ 28 | { 29 | "DropPos": 58, 30 | "ProjectionName": { 31 | "Ident": { 32 | "Name": "f1", 33 | "QuoteType": 1, 34 | "NamePos": 74, 35 | "NameEnd": 76 36 | }, 37 | "DotIdent": null 38 | }, 39 | "IfExists": false 40 | } 41 | ] 42 | } 43 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/format/create_materialized_view_with_empty_table_schema.sql: -------------------------------------------------------------------------------- 1 | -- Origin SQL: 2 | CREATE MATERIALIZED VIEW test.t0 on cluster default_cluster 3 | ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/{layer}-{shard}/test/t0', '{replica}') 4 | PARTITION BY toYYYYMM(f0) 5 | ORDER BY (f0) 6 | POPULATE AS 7 | select f0,f1,f2,coalesce(f0,f1) as f333 8 | from 9 | (select 10 | f0,f1,f2, 11 | ROW_NUMBER() over(partition by f0 order by coalesce(f1,f2)) as rn 12 | from test.t 13 | where f3 in ('foo', 'bar', 'test') 14 | and env ='test' 15 | ) as tmp 16 | where rn = 1; 17 | 18 | -- Format SQL: 19 | CREATE MATERIALIZED VIEW test.t0 ON CLUSTER default_cluster ENGINE = ReplicatedAggregatingMergeTree('/clickhouse/{layer}-{shard}/test/t0', '{replica}') ORDER BY (f0) PARTITION BY toYYYYMM(f0) POPULATE AS SELECT f0, f1, f2, coalesce(f0, f1) AS f333 FROM (SELECT f0, f1, f2, ROW_NUMBER() OVER (PARTITION BY f0 ORDER BY coalesce(f1, f2)) AS rn FROM test.t WHERE f3 IN ('foo', 'bar', 'test') AND env = 'test') AS tmp WHERE rn = 1; 20 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_freeze_partition.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 81, 5 | "TableIdentifier": { 6 | "Database": { 7 | "Name": "test", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 16 11 | }, 12 | "Table": { 13 | "Name": "events", 14 | "QuoteType": 1, 15 | "NamePos": 17, 16 | "NameEnd": 23 17 | } 18 | }, 19 | "OnCluster": { 20 | "OnPos": 24, 21 | "Expr": { 22 | "LiteralPos": 36, 23 | "LiteralEnd": 51, 24 | "Literal": "default_cluster" 25 | } 26 | }, 27 | "AlterExprs": [ 28 | { 29 | "FreezePos": 53, 30 | "StatementEnd": 81, 31 | "Partition": { 32 | "PartitionPos": 60, 33 | "Expr": { 34 | "LiteralPos": 71, 35 | "LiteralEnd": 81, 36 | "Literal": "2023-07-18" 37 | }, 38 | "ID": null, 39 | "All": false 40 | } 41 | } 42 | ] 43 | } 44 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_materialize_index.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 91, 5 | "TableIdentifier": { 6 | "Database": null, 7 | "Table": { 8 | "Name": "visits_order", 9 | "QuoteType": 1, 10 | "NamePos": 12, 11 | "NameEnd": 24 12 | } 13 | }, 14 | "OnCluster": null, 15 | "AlterExprs": [ 16 | { 17 | "MaterializedPos": 25, 18 | "StatementEnd": 91, 19 | "IfExists": true, 20 | "IndexName": { 21 | "Ident": { 22 | "Name": "user_name_index", 23 | "QuoteType": 1, 24 | "NamePos": 53, 25 | "NameEnd": 68 26 | }, 27 | "DotIdent": null 28 | }, 29 | "Partition": { 30 | "PartitionPos": 72, 31 | "Expr": { 32 | "LiteralPos": 83, 33 | "LiteralEnd": 91, 34 | "Literal": "20240403" 35 | }, 36 | "ID": null, 37 | "All": false 38 | } 39 | } 40 | ] 41 | } 42 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_reset_multiple_settings.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 101, 5 | "TableIdentifier": { 6 | "Database": null, 7 | "Table": { 8 | "Name": "example_table", 9 | "QuoteType": 1, 10 | "NamePos": 12, 11 | "NameEnd": 25 12 | } 13 | }, 14 | "OnCluster": null, 15 | "AlterExprs": [ 16 | { 17 | "ResetPos": 26, 18 | "StatementEnd": 101, 19 | "Settings": [ 20 | { 21 | "Name": "max_part_loading_threads", 22 | "QuoteType": 1, 23 | "NamePos": 40, 24 | "NameEnd": 64 25 | }, 26 | { 27 | "Name": "max_parts_in_total", 28 | "QuoteType": 1, 29 | "NamePos": 66, 30 | "NameEnd": 84 31 | }, 32 | { 33 | "Name": "another_setting", 34 | "QuoteType": 1, 35 | "NamePos": 86, 36 | "NameEnd": 101 37 | } 38 | ] 39 | } 40 | ] 41 | } 42 | ] -------------------------------------------------------------------------------- /parser/testdata/query/compatible/1_stateful/00151_order_by_read_in_order.sql: -------------------------------------------------------------------------------- 1 | SET optimize_read_in_order = 1; 2 | SELECT CounterID FROM test.hits ORDER BY CounterID DESC LIMIT 50; 3 | SELECT CounterID FROM test.hits ORDER BY CounterID LIMIT 50; 4 | SELECT CounterID FROM test.hits ORDER BY CounterID, EventDate LIMIT 50; 5 | SELECT EventDate FROM test.hits ORDER BY CounterID, EventDate LIMIT 50; 6 | SELECT EventDate FROM test.hits ORDER BY CounterID, EventDate DESC LIMIT 50; 7 | SELECT CounterID FROM test.hits ORDER BY CounterID, EventDate DESC LIMIT 50; 8 | SELECT CounterID FROM test.hits ORDER BY CounterID DESC, EventDate DESC LIMIT 50; 9 | SELECT EventDate FROM test.hits ORDER BY CounterID DESC, EventDate DESC LIMIT 50; 10 | 11 | SELECT CounterID, EventDate FROM test.hits ORDER BY CounterID, EventDate LIMIT 50; 12 | SELECT CounterID, EventDate FROM test.hits ORDER BY CounterID, EventDate DESC LIMIT 50; 13 | SELECT CounterID, EventDate FROM test.hits ORDER BY CounterID DESC, EventDate LIMIT 50; 14 | SELECT CounterID, EventDate FROM test.hits ORDER BY CounterID DESC, EventDate DESC LIMIT 50; 15 | -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_delete.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 61, 5 | "TableIdentifier": { 6 | "Database": { 7 | "Name": "test", 8 | "QuoteType": 1, 9 | "NamePos": 12, 10 | "NameEnd": 16 11 | }, 12 | "Table": { 13 | "Name": "events", 14 | "QuoteType": 1, 15 | "NamePos": 17, 16 | "NameEnd": 23 17 | } 18 | }, 19 | "OnCluster": null, 20 | "AlterExprs": [ 21 | { 22 | "DeletePos": 24, 23 | "StatementEnd": 61, 24 | "WhereClause": { 25 | "LeftExpr": { 26 | "Name": "created_at", 27 | "QuoteType": 1, 28 | "NamePos": 37, 29 | "NameEnd": 47 30 | }, 31 | "Operation": "\u003c", 32 | "RightExpr": { 33 | "LiteralPos": 51, 34 | "LiteralEnd": 61, 35 | "Literal": "2023-01-01" 36 | }, 37 | "HasGlobal": false, 38 | "HasNot": false 39 | } 40 | } 41 | ] 42 | } 43 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_clear_column.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 76, 5 | "TableIdentifier": { 6 | "Database": null, 7 | "Table": { 8 | "Name": "my_table", 9 | "QuoteType": 1, 10 | "NamePos": 12, 11 | "NameEnd": 20 12 | } 13 | }, 14 | "OnCluster": null, 15 | "AlterExprs": [ 16 | { 17 | "ClearPos": 21, 18 | "StatementEnd": 76, 19 | "IfExists": false, 20 | "ColumnName": { 21 | "Ident": { 22 | "Name": "my_column_name", 23 | "QuoteType": 1, 24 | "NamePos": 34, 25 | "NameEnd": 48 26 | }, 27 | "DotIdent": null 28 | }, 29 | "PartitionExpr": { 30 | "PartitionPos": 52, 31 | "Expr": { 32 | "Name": "partition_name", 33 | "QuoteType": 1, 34 | "NamePos": 62, 35 | "NameEnd": 76 36 | }, 37 | "ID": null, 38 | "All": false 39 | } 40 | } 41 | ] 42 | } 43 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_clear_index.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 74, 5 | "TableIdentifier": { 6 | "Database": null, 7 | "Table": { 8 | "Name": "my_table", 9 | "QuoteType": 1, 10 | "NamePos": 12, 11 | "NameEnd": 20 12 | } 13 | }, 14 | "OnCluster": null, 15 | "AlterExprs": [ 16 | { 17 | "ClearPos": 21, 18 | "StatementEnd": 74, 19 | "IfExists": false, 20 | "IndexName": { 21 | "Ident": { 22 | "Name": "my_index_name", 23 | "QuoteType": 1, 24 | "NamePos": 33, 25 | "NameEnd": 46 26 | }, 27 | "DotIdent": null 28 | }, 29 | "PartitionExpr": { 30 | "PartitionPos": 50, 31 | "Expr": { 32 | "Name": "partition_name", 33 | "QuoteType": 1, 34 | "NamePos": 60, 35 | "NameEnd": 74 36 | }, 37 | "ID": null, 38 | "All": false 39 | } 40 | } 41 | ] 42 | } 43 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_clear_projection.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 71, 5 | "TableIdentifier": { 6 | "Database": null, 7 | "Table": { 8 | "Name": "my_table", 9 | "QuoteType": 1, 10 | "NamePos": 12, 11 | "NameEnd": 20 12 | } 13 | }, 14 | "OnCluster": null, 15 | "AlterExprs": [ 16 | { 17 | "ClearPos": 21, 18 | "StatementEnd": 71, 19 | "IfExists": false, 20 | "ProjectionName": { 21 | "Ident": { 22 | "Name": "hello", 23 | "QuoteType": 1, 24 | "NamePos": 38, 25 | "NameEnd": 43 26 | }, 27 | "DotIdent": null 28 | }, 29 | "PartitionExpr": { 30 | "PartitionPos": 47, 31 | "Expr": { 32 | "Name": "partition_name", 33 | "QuoteType": 1, 34 | "NamePos": 57, 35 | "NameEnd": 71 36 | }, 37 | "ID": null, 38 | "All": false 39 | } 40 | } 41 | ] 42 | } 43 | ] -------------------------------------------------------------------------------- /parser/testdata/ddl/output/alter_table_materialize_projection.sql.golden.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "AlterPos": 0, 4 | "StatementEnd": 101, 5 | "TableIdentifier": { 6 | "Database": null, 7 | "Table": { 8 | "Name": "visits_order", 9 | "QuoteType": 1, 10 | "NamePos": 12, 11 | "NameEnd": 24 12 | } 13 | }, 14 | "OnCluster": null, 15 | "AlterExprs": [ 16 | { 17 | "MaterializedPos": 25, 18 | "StatementEnd": 101, 19 | "IfExists": true, 20 | "ProjectionName": { 21 | "Ident": { 22 | "Name": "user_name_projection", 23 | "QuoteType": 1, 24 | "NamePos": 58, 25 | "NameEnd": 78 26 | }, 27 | "DotIdent": null 28 | }, 29 | "Partition": { 30 | "PartitionPos": 82, 31 | "Expr": { 32 | "LiteralPos": 93, 33 | "LiteralEnd": 101, 34 | "Literal": "20240403" 35 | }, 36 | "ID": null, 37 | "All": false 38 | } 39 | } 40 | ] 41 | } 42 | ] --------------------------------------------------------------------------------