├── .gitignore ├── LICENSE ├── README.md ├── backend ├── dbtoaster │ ├── CHANGELOG │ ├── LICENSE │ ├── README │ ├── bin │ │ ├── dbtoaster │ │ ├── dbtoaster_frontend_linux │ │ └── dbtoaster_frontend_macos │ ├── examples │ │ ├── code │ │ │ └── main.cpp │ │ ├── data │ │ │ ├── finance.csv │ │ │ ├── mddb │ │ │ │ ├── angles.csv │ │ │ │ ├── atommeta.csv │ │ │ │ ├── atompositions.csv │ │ │ │ ├── bonds.csv │ │ │ │ ├── buckets.csv │ │ │ │ ├── conformationpoints.csv │ │ │ │ ├── dihedrals.csv │ │ │ │ ├── dimensions.csv │ │ │ │ ├── improperdihedrals.csv │ │ │ │ └── nonbonded.csv │ │ │ ├── simple │ │ │ │ ├── r.dat │ │ │ │ ├── s.dat │ │ │ │ └── t.dat │ │ │ └── tpch │ │ │ │ ├── customer.csv │ │ │ │ ├── lineitem.csv │ │ │ │ ├── nation.csv │ │ │ │ ├── orders.csv │ │ │ │ ├── part.csv │ │ │ │ ├── partsupp.csv │ │ │ │ ├── region.csv │ │ │ │ └── supplier.csv │ │ └── queries │ │ │ ├── finance │ │ │ ├── axfinder.sql │ │ │ ├── brokerspread.sql │ │ │ ├── brokervariance.sql │ │ │ ├── missedtrades.sql │ │ │ ├── pricespread.sql │ │ │ └── vwap.sql │ │ │ ├── mddb │ │ │ ├── README │ │ │ ├── query1.sql │ │ │ ├── query2.sql │ │ │ └── schemas.sql │ │ │ ├── simple │ │ │ ├── r_aggcomparison.sql │ │ │ ├── r_aggofnested.sql │ │ │ ├── r_aggofnestedagg.sql │ │ │ ├── r_agtb.sql │ │ │ ├── r_agtbexists.sql │ │ │ ├── r_avg.sql │ │ │ ├── r_bigsumstar.sql │ │ │ ├── r_btimesa.sql │ │ │ ├── r_btimesacorrelated.sql │ │ │ ├── r_case.sql │ │ │ ├── r_case2.sql │ │ │ ├── r_count.sql │ │ │ ├── r_count_of_one.sql │ │ │ ├── r_count_of_one_prime.sql │ │ │ ├── r_deepscoping.sql │ │ │ ├── r_distinct.sql │ │ │ ├── r_distincttest.sql │ │ │ ├── r_divb.sql │ │ │ ├── r_existsnestedagg.sql │ │ │ ├── r_gbasumb.sql │ │ │ ├── r_gtealldynamic.sql │ │ │ ├── r_gtesomedynamic.sql │ │ │ ├── r_gtsomedynamic.sql │ │ │ ├── r_impossibleineq.sql │ │ │ ├── r_indynamic.sql │ │ │ ├── r_ineqandeq.sql │ │ │ ├── r_inlist.sql │ │ │ ├── r_instatic.sql │ │ │ ├── r_lift_of_count.sql │ │ │ ├── r_ltallagg.sql │ │ │ ├── r_ltallavg.sql │ │ │ ├── r_ltallcorravg.sql │ │ │ ├── r_ltalldynamic.sql │ │ │ ├── r_multinest.sql │ │ │ ├── r_multiors.sql │ │ │ ├── r_natselfjoin.sql │ │ │ ├── r_nestedfrom.sql │ │ │ ├── r_nestedrename.sql │ │ │ ├── r_nogroupby.sql │ │ │ ├── r_nonjoineq.sql │ │ │ ├── r_possibleineq.sql │ │ │ ├── r_possibleineqwitheq.sql │ │ │ ├── r_selectstar.sql │ │ │ ├── r_simplenest.sql │ │ │ ├── r_smallstar.sql │ │ │ ├── r_starofnested.sql │ │ │ ├── r_starofnestedagg.sql │ │ │ ├── r_sum_gb_all_out_of_aggregate.sql │ │ │ ├── r_sum_gb_out_of_aggregate.sql │ │ │ ├── r_sum_out_of_aggregate.sql │ │ │ ├── r_sumadivsumb.sql │ │ │ ├── r_sumdivgrp.sql │ │ │ ├── r_sumnestedintarget.sql │ │ │ ├── r_sumnestedintargetwitheq.sql │ │ │ ├── r_sumoutsideofagg.sql │ │ │ ├── r_sumstar.sql │ │ │ ├── r_union.sql │ │ │ ├── r_unique_counts_by_a.sql │ │ │ ├── rr_ormyself.sql │ │ │ ├── rs.sql │ │ │ ├── rs_cmpnest.sql │ │ │ ├── rs_column_mapping_1.sql │ │ │ ├── rs_column_mapping_2.sql │ │ │ ├── rs_column_mapping_3.sql │ │ │ ├── rs_eqineq.sql │ │ │ ├── rs_example1.sql │ │ │ ├── rs_example2.sql │ │ │ ├── rs_ineqonnestedagg.sql │ │ │ ├── rs_inequality.sql │ │ │ ├── rs_ineqwithnestedagg.sql │ │ │ ├── rs_joinon.sql │ │ │ ├── rs_joinwithnestedagg.sql │ │ │ ├── rs_natjoin.sql │ │ │ ├── rs_natjoinineq.sql │ │ │ ├── rs_natjoinnooverlap.sql │ │ │ ├── rs_natjoinpartstar.sql │ │ │ ├── rs_selectconstcmp.sql │ │ │ ├── rs_selectpartstar.sql │ │ │ ├── rs_selectstar.sql │ │ │ ├── rs_simple.sql │ │ │ ├── rs_streamvtable.sql │ │ │ ├── rs_stringjoin.sql │ │ │ ├── rst.sql │ │ │ ├── rstar.sql │ │ │ └── rtt_or_with_stars.sql │ │ │ └── tpch │ │ │ ├── query1.sql │ │ │ ├── query10.sql │ │ │ ├── query11.sql │ │ │ ├── query11a.sql │ │ │ ├── query12.sql │ │ │ ├── query13.sql │ │ │ ├── query14.sql │ │ │ ├── query15.sql │ │ │ ├── query16.sql │ │ │ ├── query17.sql │ │ │ ├── query17a.sql │ │ │ ├── query18.sql │ │ │ ├── query18a.sql │ │ │ ├── query19.sql │ │ │ ├── query2.sql │ │ │ ├── query20.sql │ │ │ ├── query21.sql │ │ │ ├── query22.sql │ │ │ ├── query22a.sql │ │ │ ├── query3.sql │ │ │ ├── query4.sql │ │ │ ├── query5.sql │ │ │ ├── query6.sql │ │ │ ├── query7.sql │ │ │ ├── query8.sql │ │ │ ├── query9.sql │ │ │ └── schemas.sql │ └── lib │ │ ├── dbt_c++ │ │ ├── Aggregator.hpp │ │ ├── ExecutionProfiler.h │ │ ├── GenericEntry.hpp │ │ ├── MB1.h │ │ ├── Predicate.h │ │ ├── ScExtra.h │ │ ├── SpinLock.h │ │ ├── TPCC.h │ │ ├── Transaction.h │ │ ├── TransactionManager.h │ │ ├── Version.h │ │ ├── benchHashCmp.cpp │ │ ├── circular_buffer.hpp │ │ ├── event.cpp │ │ ├── event.hpp │ │ ├── filepath.hpp │ │ ├── hash.hpp │ │ ├── hpds │ │ │ ├── KDouble.cpp │ │ │ ├── KDouble.hpp │ │ │ ├── charpool.hpp │ │ │ ├── macro.hpp │ │ │ ├── pool.hpp │ │ │ ├── pstring.cpp │ │ │ ├── pstring.hpp │ │ │ └── pstringops.hpp │ │ ├── iprogram.cpp │ │ ├── iprogram.hpp │ │ ├── libdbtoaster.a │ │ ├── main.cpp │ │ ├── makefile │ │ ├── mmap │ │ │ ├── cmmap.hpp │ │ │ ├── mmap.cpp │ │ │ ├── mmap.hpp │ │ │ ├── mmap1.hpp │ │ │ ├── mmap2.hpp │ │ │ └── pool.hpp │ │ ├── optionparser.hpp │ │ ├── program_base.cpp │ │ ├── program_base.hpp │ │ ├── runtime.cpp │ │ ├── runtime.hpp │ │ ├── serialization.hpp │ │ ├── smhasher │ │ │ ├── MurmurHash2.cpp │ │ │ └── MurmurHash2.hpp │ │ ├── standard_adaptors.cpp │ │ ├── standard_adaptors.hpp │ │ ├── standard_functions.cpp │ │ ├── standard_functions.hpp │ │ ├── statistics.hpp │ │ ├── statistics_split.cpp │ │ ├── statistics_split.hpp │ │ ├── streams.cpp │ │ ├── streams.hpp │ │ ├── types.h │ │ └── util.hpp │ │ └── dbt_scala │ │ ├── akka-actor_2.11-2.5.4.jar │ │ ├── config-1.3.1.jar │ │ ├── dbtoaster-2.3-lms.jar │ │ ├── dbtoaster-core_2.11-2.3.jar │ │ ├── dbtoaster-lms_2.11-2.3.jar │ │ ├── dbtoaster-pardis_2.11-2.3.jar │ │ ├── dbtoaster-spark_2.11-2.3.jar │ │ ├── dbtoaster-sstore_2.11-2.3.jar │ │ ├── lms_2.11-0.3-SNAPSHOT.jar │ │ ├── sc-pardis-compiler_2.11-0.1.4-SNAPSHOT.jar │ │ ├── sc-pardis-core-compiler_2.11-0.1.4-SNAPSHOT.jar │ │ ├── sc-pardis-library_2.11-0.1.4-SNAPSHOT.jar │ │ ├── sc-pardis-quasi-core_2.11-0.1.4-SNAPSHOT.jar │ │ ├── sc-pardis-quasi_2.11-0.1.4-SNAPSHOT.jar │ │ ├── sc-shared_2.11-0.1.4-SNAPSHOT.jar │ │ ├── scala-library-2.11.11.jar │ │ ├── scala-library-2.11.2.jar │ │ ├── scala-parser-combinators_2.11-1.0.4.jar │ │ ├── scala-parser-combinators_2.11-1.0.6.jar │ │ ├── scala-reflect-2.11.11.jar │ │ ├── scala-reflect-2.11.2.jar │ │ ├── scala-xml_2.11-1.0.6.jar │ │ ├── scala-yinyang_2.11-0.2.0.jar │ │ ├── scalariform_2.11-0.2.3.jar │ │ ├── squid-sc-backend-macros_2.11-0.1-SNAPSHOT.jar │ │ └── squid-sc-backend_2.11-0.1-SNAPSHOT.jar └── lib │ ├── functions.hpp │ ├── hash.hpp │ ├── macro.hpp │ ├── memory.hpp │ ├── mmap.hpp │ ├── pool.hpp │ ├── serialization.hpp │ ├── string.hpp │ └── types.hpp ├── bin ├── compile_frontend.sh ├── run_backend.sh └── run_frontend.sh ├── examples ├── data │ ├── housing-4-normalised │ │ ├── Demographics.tbl │ │ ├── House.tbl │ │ ├── Institution.tbl │ │ ├── Restaurant.tbl │ │ ├── Shop.tbl │ │ └── Transport.tbl │ └── tpch0.01 │ │ ├── customer.csv │ │ ├── lineitem.csv │ │ ├── nation.csv │ │ ├── orders.csv │ │ ├── part.csv │ │ ├── partsupp.csv │ │ ├── region.csv │ │ └── supplier.csv ├── makefile ├── queries │ ├── favorita │ │ ├── favorita.txt │ │ ├── favorita_regression_categorical.sql │ │ ├── favorita_regression_categorical_general.sql │ │ ├── favorita_regression_continuous.sql │ │ └── favorita_regression_continuous_general.sql │ ├── housing │ │ ├── housing.txt │ │ ├── housing_avg.sql │ │ ├── housing_factorized_join.sql │ │ ├── housing_listing_join.sql │ │ ├── housing_regression.sql │ │ └── housing_sum.sql │ ├── retailer │ │ ├── retailer.txt │ │ ├── retailer_factorized_join.sql │ │ ├── retailer_factorized_join_INVENTORY.sql │ │ ├── retailer_listing_join.sql │ │ ├── retailer_listing_join_INVENTORY.sql │ │ ├── retailer_regression.sql │ │ ├── retailer_regression_INVENTORY.sql │ │ ├── retailer_regression_categorical.sql │ │ ├── retailer_regression_categorical_INVENTORY.sql │ │ ├── retailer_regression_categorical_general.sql │ │ ├── retailer_regression_continuous_general.sql │ │ ├── retailer_sum.sql │ │ └── retailer_sum_INVENTORY.sql │ ├── simple │ │ ├── rst.txt │ │ ├── rst2.txt │ │ ├── rst_RT.sql │ │ ├── rst_RT_complex.sql │ │ └── rst_datacube.sql │ └── tpch │ │ ├── tpch_FQ1.txt │ │ ├── tpch_FQ1_factorized_join.sql │ │ ├── tpch_FQ1_listing_join.sql │ │ ├── tpch_FQ2.txt │ │ ├── tpch_FQ2_factorized_join.sql │ │ ├── tpch_FQ2_listing_join.sql │ │ ├── tpch_FQ3.txt │ │ ├── tpch_FQ3_factorized_join.sql │ │ ├── tpch_FQ3_listing_join.sql │ │ ├── tpch_FQ4.txt │ │ ├── tpch_FQ4_factorized_join.sql │ │ ├── tpch_FQ4_listing_join.sql │ │ ├── tpch_query01.sql │ │ ├── tpch_query01.txt │ │ ├── tpch_query03.sql │ │ ├── tpch_query03.txt │ │ ├── tpch_query06.sql │ │ ├── tpch_query06.txt │ │ ├── tpch_query10.sql │ │ ├── tpch_query10.txt │ │ ├── tpch_query12.sql │ │ ├── tpch_query12.txt │ │ ├── tpch_query14.sql │ │ └── tpch_query14.txt └── src │ ├── application │ ├── application.hpp │ ├── favorita │ │ ├── application_favorita.hpp │ │ ├── application_favorita_base.hpp │ │ └── application_favorita_regression.hpp │ ├── housing │ │ ├── application_housing.hpp │ │ ├── application_housing_base.hpp │ │ ├── application_housing_factorized_join.hpp │ │ ├── application_housing_listing_join.hpp │ │ └── application_housing_regression.hpp │ ├── retailer │ │ ├── application_retailer.hpp │ │ ├── application_retailer_base.hpp │ │ ├── application_retailer_factorized_join.hpp │ │ ├── application_retailer_listing_join.hpp │ │ └── application_retailer_regression.hpp │ ├── simple │ │ ├── application_simple.hpp │ │ └── application_simple_base.hpp │ └── tpch │ │ ├── application_tpch.hpp │ │ ├── application_tpch_FQ1_factorized_join.hpp │ │ ├── application_tpch_FQ1_listing_join.hpp │ │ ├── application_tpch_FQ2_factorized_join.hpp │ │ ├── application_tpch_FQ2_listing_join.hpp │ │ ├── application_tpch_FQ3_factorized_join.hpp │ │ ├── application_tpch_FQ3_listing_join.hpp │ │ ├── application_tpch_FQ4_factorized_join.hpp │ │ ├── application_tpch_FQ4_listing_join.hpp │ │ └── application_tpch_base.hpp │ ├── lib │ ├── bgd_solver.hpp │ ├── csvreader.hpp │ ├── dispatcher.hpp │ ├── relation.hpp │ └── stopwatch.hpp │ ├── main.cpp │ └── ring │ ├── container.hpp │ ├── dictionary.hpp │ ├── ring_avg.hpp │ ├── ring_cofactor_degree1.hpp │ ├── ring_cofactor_degree1_categorical.hpp │ ├── ring_cofactor_degree2.hpp │ ├── ring_cofactor_general.hpp │ ├── ring_datacube.hpp │ ├── ring_factorized.hpp │ ├── ring_relational.hpp │ ├── ring_relational_opt.hpp │ ├── ring_tpch_query01.hpp │ ├── ring_tpch_query12.hpp │ └── ring_tpch_query14.hpp └── frontend ├── build.sbt ├── project ├── build.properties └── plugins.sbt └── src └── main ├── main.iml └── scala └── fdbresearch ├── CodeGenerator.scala ├── Driver.scala ├── Main.scala ├── Optimizer.scala ├── core ├── AST.scala ├── SQLToM3Compiler.scala └── Types.scala ├── parsing ├── M3Parser.scala ├── Parser.scala ├── SQLParser.scala └── VariableOrderParser.scala ├── tree ├── Tree.scala ├── VariableOrder.scala └── ViewTree.scala └── util ├── Logger.scala └── Utils.scala /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea/ 3 | frontend/project 4 | frontend/target 5 | examples/bin/ 6 | examples/generated/ 7 | 8 | -------------------------------------------------------------------------------- /backend/dbtoaster/README: -------------------------------------------------------------------------------- 1 | Welcome to the DBToaster Binary Distribution 2 | 3 | The layout of this distribution is as follows: 4 | 5 | LICENSE The DBToaster distribution end user license agreement 6 | README This README file 7 | bin/ The dbtoaster compiler binary 8 | docs/ Documentation for DBToaster 9 | examples/code Examples of how to integrate DBToaster with 10 | your own code 11 | examples/data Data for the example queries 12 | examples/queries Example queries in DBT-SQL 13 | examples/queries/simple Illustrations and tests of various features 14 | examples/queries/tpch The full TPC-H benchmark, adapted for DBToaster 15 | examples/queries/finance An algorithmic trading-oriented benchmark 16 | lib/ DBToaster support libraries. These are required 17 | for compilingcode generated by dbtoaster -------------------------------------------------------------------------------- /backend/dbtoaster/bin/dbtoaster_frontend_linux: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/bin/dbtoaster_frontend_linux -------------------------------------------------------------------------------- /backend/dbtoaster/bin/dbtoaster_frontend_macos: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/bin/dbtoaster_frontend_macos -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/mddb/angles.csv: -------------------------------------------------------------------------------- 1 | 1,8,10,12,77.5,120 2 | 1,4,6,7,65,108.5 3 | 1,2,4,6,77.5,120 4 | 1,1,2,4,20,117.5 5 | 1,4,6,8,45,111.6 6 | 1,7,6,8,70,106.5 7 | 1,6,8,10,20,117.5 8 | 1,2,4,5,30,120 9 | 1,11,10,12,35,120 10 | 1,1,2,3,85,121.5 11 | 1,6,8,9,85,121.5 12 | 1,3,2,4,65,121 13 | 1,5,4,6,35,120 14 | 1,8,10,11,30,120 15 | 1,9,8,10,65,121 16 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/mddb/atommeta.csv: -------------------------------------------------------------------------------- 1 | 1,6,CH1E,CA,2,ALA,ALAD 2 | 1,5,H,H,2,ALA,ALAD 3 | 1,8,C,C,2,ALA,ALAD 4 | 1,7,CH3E,CB,2,ALA,ALAD 5 | 1,10,NH1,N,3,CBX,ALAD 6 | 1,9,O,O,2,ALA,ALAD 7 | 1,12,CH3E,CA,3,CBX,ALAD 8 | 1,11,H,H,3,CBX,ALAD 9 | 1,2,C,C,1,AMN,ALAD 10 | 1,1,CH3E,CL,1,AMN,ALAD 11 | 1,4,NH1,N,2,ALA,ALAD 12 | 1,3,O,O,1,AMN,ALAD 13 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/mddb/bonds.csv: -------------------------------------------------------------------------------- 1 | 1,10,12,422,1.49 2 | 1,6,8,405,1.52 3 | 1,2,4,471,1.33 4 | 1,4,6,422,1.45 5 | 1,4,5,405,0.98 6 | 1,8,10,471,1.33 7 | 1,10,11,405,0.98 8 | 1,1,2,405,1.52 9 | 1,6,7,225,1.52 10 | 1,2,3,580,1.23 11 | 1,8,9,580,1.23 12 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/mddb/dihedrals.csv: -------------------------------------------------------------------------------- 1 | 1,4,6,8,10,0,3,0 2 | 1,6,8,10,12,8.2,2,180 3 | 1,1,2,4,6,8.2,2,180 4 | 1,2,4,6,8,0.3,3,0 5 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/mddb/dimensions.csv: -------------------------------------------------------------------------------- 1 | 2,4,6,8,1 2 | 4,6,8,10,2 3 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/mddb/improperdihedrals.csv: -------------------------------------------------------------------------------- 1 | 1,2,1,4,3,100,0 2 | 1,8,6,10,9,100,0 3 | 1,4,2,6,5,45,0 4 | 1,10,8,12,11,45,0 5 | 1,6,4,8,7,55,35.26439 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/simple/r.dat: -------------------------------------------------------------------------------- 1 | 1,3 2 | 4,2 3 | 2,1 4 | 5,3 5 | 3,4 6 | 2,3 7 | 5,5 8 | 4,5 9 | 2,3 10 | 4,2 -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/simple/s.dat: -------------------------------------------------------------------------------- 1 | 4,3 2 | 3,2 3 | 4,1 4 | 5,2 5 | 2,3 6 | 4,5 7 | 2,4 8 | 3,1 9 | 1,5 10 | 4,5 -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/simple/t.dat: -------------------------------------------------------------------------------- 1 | 5,4 2 | 4,3 3 | 5,3 4 | 4,4 5 | 4,4 6 | 1,3 7 | 3,1 8 | 2,2 9 | 1,4 10 | 3,2 -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/tpch/customer.csv: -------------------------------------------------------------------------------- 1 | 1|Customer#000000001|IVhzIApeRb ot,c,E|15|25-989-741-2988|711.56|BUILDING|to the even, regular platelets. regular, ironic epitaphs nag e 2 | 2|Customer#000000002|XSTf4,NCwDVaWNe6tEgvwfmRchLXak|13|23-768-687-3665|121.65|AUTOMOBILE|l accounts. blithely ironic theodolites integrate boldly: caref 3 | 3|Customer#000000003|MG9kdTD2WBHm|1|11-719-748-3364|7498.12|AUTOMOBILE| deposits eat slyly ironic, even instructions. express foxes detect slyly. blithely even accounts abov 4 | 4|Customer#000000004|XxVSJsLAGtn|4|14-128-190-5944|2866.83|MACHINERY| requests. final, regular ideas sleep final accou 5 | 5|Customer#000000005|KvpyuHCplrB84WgAiGV6sYpZq7Tj|3|13-750-942-6364|794.47|HOUSEHOLD|n accounts will have to unwind. foxes cajole accor 6 | 6|Customer#000000006|sKZz0CsnMD7mp4Xd0YrBvx,LREYKUWAh yVn|20|30-114-968-4951|7638.57|AUTOMOBILE|tions. even deposits boost according to the slyly bold packages. final accounts cajole requests. furious 7 | 7|Customer#000000007|TcGe5gaZNgVePxU5kRrvXBfkasDTea|18|28-190-982-9759|9561.95|AUTOMOBILE|ainst the ironic, express theodolites. express, even pinto beans among the exp 8 | 8|Customer#000000008|I0B10bB0AymmC, 0PrRYBCP1yGJ8xcBPmWhl5|17|27-147-574-9335|6819.74|BUILDING|among the slyly regular theodolites kindle blithely courts. carefully even theodolites haggle slyly along the ide 9 | 9|Customer#000000009|xKiAFTjUsCuxfeleNqefumTrjS|8|18-338-906-3675|8324.07|FURNITURE|r theodolites according to the requests wake thinly excuses: pending requests haggle furiousl 10 | 10|Customer#000000010|6LrEaV6KR6PLVcgl2ArL Q3rqzLzcT1 v2|5|15-741-346-9870|2753.54|HOUSEHOLD|es regular deposits haggle. fur 11 | 11|Customer#000000011|PkWS 3HlXqwTuzrKg633BEi|23|33-464-151-3439|-272.60|BUILDING|ckages. requests sleep slyly. quickly even pinto beans promise above the slyly regular pinto beans. 12 | 12|Customer#000000012|9PWKuhzT4Zr1Q|13|23-791-276-1263|3396.49|HOUSEHOLD| to the carefully final braids. blithely regular requests nag. ironic theodolites boost quickly along 13 | 13|Customer#000000013|nsXQu0oVjD7PM659uC3SRSp|3|13-761-547-5974|3857.34|BUILDING|ounts sleep carefully after the close frays. carefully bold notornis use ironic requests. blithely 14 | 14|Customer#000000014|KXkletMlL2JQEA |1|11-845-129-3851|5266.30|FURNITURE|, ironic packages across the unus 15 | 15|Customer#000000015|YtWggXoOLdwdo7b0y,BZaGUQMLJMX1Y,EC,6Dn|23|33-687-542-7601|2788.52|HOUSEHOLD| platelets. regular deposits detect asymptotes. blithely unusual packages nag slyly at the fluf 16 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/tpch/nation.csv: -------------------------------------------------------------------------------- 1 | 0|ALGERIA|0| haggle. carefully final deposits detect slyly agai 2 | 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon 3 | 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special 4 | 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold 5 | 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d 6 | 5|ETHIOPIA|0|ven packages wake quickly. regu 7 | 6|FRANCE|3|refully final requests. regular, ironi 8 | 7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco 9 | 8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun 10 | 9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull 11 | 10|IRAN|4|efully alongside of the slyly final dependencies. 12 | 11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula 13 | 12|JAPAN|2|ously. final, express gifts cajole a 14 | 13|JORDAN|4|ic deposits are blithely about the carefully regular pa 15 | 14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t 16 | 15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets? 17 | 16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r 18 | 17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun 19 | 18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos 20 | 19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account 21 | 20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely 22 | 21|VIETNAM|2|hely enticingly express accounts. even, final 23 | 22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint 24 | 23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull 25 | 24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be 26 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/tpch/part.csv: -------------------------------------------------------------------------------- 1 | 1|goldenrod lace spring peru powder|Manufacturer#1|Brand#13|PROMO BURNISHED COPPER|7|JUMBO PKG|901.00|ly. slyly ironi 2 | 2|blush rosy metallic lemon navajo|Manufacturer#1|Brand#13|LARGE BRUSHED BRASS|1|LG CASE|902.00|lar accounts amo 3 | 3|dark green antique puff wheat|Manufacturer#4|Brand#42|STANDARD POLISHED BRASS|21|WRAP CASE|903.00|egular deposits hag 4 | 4|chocolate metallic smoke ghost drab|Manufacturer#3|Brand#34|SMALL PLATED BRASS|14|MED DRUM|904.00|p furiously r 5 | 5|forest blush chiffon thistle chocolate|Manufacturer#3|Brand#32|STANDARD POLISHED TIN|15|SM PKG|905.00| wake carefully 6 | 6|white ivory azure firebrick black|Manufacturer#2|Brand#24|PROMO PLATED STEEL|4|MED BAG|906.00|sual a 7 | 7|blue blanched tan indian olive|Manufacturer#1|Brand#11|SMALL PLATED COPPER|45|SM BAG|907.00|lyly. ex 8 | 8|ivory khaki cream midnight rosy|Manufacturer#4|Brand#44|PROMO BURNISHED TIN|41|LG DRUM|908.00|eposi 9 | 9|thistle rose moccasin light floral|Manufacturer#4|Brand#43|SMALL BURNISHED STEEL|12|WRAP CASE|909.00|ironic foxe 10 | 10|floral moccasin royal powder burnished|Manufacturer#5|Brand#54|LARGE BURNISHED STEEL|44|LG CAN|910.01|ithely final deposit 11 | 11|chocolate turquoise sandy snow misty|Manufacturer#2|Brand#25|STANDARD BURNISHED NICKEL|43|WRAP BOX|911.01|ng gr 12 | 12|peru ivory olive powder frosted|Manufacturer#3|Brand#33|MEDIUM ANODIZED STEEL|25|JUMBO CASE|912.01| quickly 13 | 13|ghost blue olive sky gainsboro|Manufacturer#5|Brand#55|MEDIUM BURNISHED NICKEL|1|JUMBO PACK|913.01|osits. 14 | 14|linen seashell burnished blue gainsboro|Manufacturer#1|Brand#13|SMALL POLISHED STEEL|28|JUMBO BOX|914.01|kages c 15 | 15|navajo dark sky turquoise royal|Manufacturer#1|Brand#15|LARGE ANODIZED BRASS|45|LG CASE|915.01|usual ac 16 | 16|deep brown turquoise dim papaya|Manufacturer#3|Brand#32|PROMO PLATED TIN|2|MED PACK|916.01|unts a 17 | 17|burnished navy orange dodger cream|Manufacturer#4|Brand#43|ECONOMY BRUSHED STEEL|16|LG BOX|917.01| regular accounts 18 | 18|spring indian forest khaki midnight|Manufacturer#1|Brand#11|SMALL BURNISHED STEEL|42|JUMBO PACK|918.01|s cajole slyly a 19 | 19|dodger forest floral cream black|Manufacturer#2|Brand#23|SMALL ANODIZED NICKEL|33|WRAP BOX|919.01| pending acc 20 | 20|bisque salmon dark blanched linen|Manufacturer#1|Brand#12|LARGE POLISHED NICKEL|48|MED BAG|920.02|are across the asympt 21 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/tpch/region.csv: -------------------------------------------------------------------------------- 1 | 0|AFRICA|lar deposits. blithely final packages cajole. regular waters are final requests. regular accounts are according to 2 | 1|AMERICA|hs use ironic, even requests. s 3 | 2|ASIA|ges. thinly even pinto beans ca 4 | 3|EUROPE|ly final courts cajole furiously final excuse 5 | 4|MIDDLE EAST|uickly special accounts cajole carefully blithely close requests. carefully final asymptotes haggle furiousl 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/data/tpch/supplier.csv: -------------------------------------------------------------------------------- 1 | 1|Supplier#000000001| N kD4on9OM Ipw3,gf0JBoQDd7tgrzrddZ|17|27-918-335-1736|5755.94|each slyly above the careful 2 | 2|Supplier#000000002|89eJ5ksX3ImxJQBvxObC,|5|15-679-861-2259|4032.68| slyly bold instructions. idle dependen 3 | 3|Supplier#000000003|q1,G3Pj6OjIuUYfUoH18BFTKP5aU9bEV3|1|11-383-516-1199|4192.40|blithely silent requests after the express dependencies are sl 4 | 4|Supplier#000000004|Bk7ah4CK8SYQTepEmvMkkgMwg|15|25-843-787-7479|4641.08|riously even requests above the exp 5 | 5|Supplier#000000005|Gcdm2rJRzl5qlTVzc|11|21-151-690-3663|-283.84|. slyly regular pinto bea 6 | 6|Supplier#000000006|tQxuVm7s7CnK|14|24-696-997-4969|1365.79|final accounts. regular dolphins use against the furiously ironic decoys. 7 | 7|Supplier#000000007|s,4TicNGB4uO6PaSqNBUq|23|33-990-965-2201|6820.35|s unwind silently furiously regular courts. final requests are deposits. requests wake quietly blit 8 | 8|Supplier#000000008|9Sq4bBH2FQEmaFOocY45sRTxo6yuoG|17|27-498-742-3860|7627.85|al pinto beans. asymptotes haggl 9 | 9|Supplier#000000009|1KhUgZegwM3ua7dsYmekYBsK|10|20-403-398-8662|5302.37|s. unusual, even requests along the furiously regular pac 10 | 10|Supplier#000000010|Saygah3gYWMp72i PY|24|34-852-489-8585|3891.91|ing waters. regular requests ar 11 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/finance/axfinder.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM bids(t FLOAT, id INT, broker_id INT, volume FLOAT, price FLOAT) 2 | FROM FILE 'examples/data/finance.csv' 3 | LINE DELIMITED orderbook (book := 'bids', brokers := '10', 4 | deterministic := 'yes'); 5 | 6 | CREATE STREAM asks(t FLOAT, id INT, broker_id INT, volume FLOAT, price FLOAT) 7 | FROM FILE 'examples/data/finance.csv' 8 | LINE DELIMITED orderbook (book := 'asks', brokers := '10', 9 | deterministic := 'yes'); 10 | 11 | SELECT b.broker_id, SUM(a.volume + (-1 * b.volume)) AS axfinder 12 | FROM bids b, asks a 13 | WHERE b.broker_id = a.broker_id 14 | AND ( (a.price + ((-1) * b.price) > 1000) OR 15 | (b.price + ((-1) * a.price) > 1000) ) 16 | GROUP BY b.broker_id; 17 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/finance/brokerspread.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM bids(t FLOAT, id INT, broker_id INT, volume FLOAT, price FLOAT) 2 | FROM FILE 'examples/data/finance.csv' 3 | LINE DELIMITED orderbook (book := 'bids', brokers := '10', 4 | deterministic := 'yes'); 5 | 6 | SELECT x.broker_id, SUM((x.volume * x.price) - (y.volume * y.price)) AS bsp 7 | FROM bids x, bids y 8 | WHERE x.broker_id = y.broker_id AND x.t > y.t 9 | GROUP BY x.broker_id; 10 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/finance/brokervariance.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM bids(t FLOAT, id INT, broker_id INT, volume FLOAT, price FLOAT) 2 | FROM FILE 'examples/data/finance.csv' 3 | LINE DELIMITED orderbook (book := 'bids', brokers := '10', 4 | deterministic := 'yes'); 5 | 6 | SELECT x.broker_id, SUM(x.volume * x.price * y.volume * y.price * 0.5) AS bsv 7 | FROM bids x, bids y 8 | WHERE x.broker_id = y.broker_id 9 | GROUP BY x.broker_id; 10 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/finance/missedtrades.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM bids(t FLOAT, id INT, broker_id INT, volume FLOAT, price FLOAT) 2 | FROM FILE 'examples/data/finance.csv' 3 | LINE DELIMITED orderbook (book := 'bids', brokers := '10', 4 | deterministic := 'yes'); 5 | 6 | CREATE STREAM asks(t FLOAT, id INT, broker_id INT, volume FLOAT, price FLOAT) 7 | FROM FILE 'examples/data/finance.csv' 8 | LINE DELIMITED orderbook (book := 'asks', brokers := '10', 9 | deterministic := 'yes'); 10 | 11 | SELECT b.broker_id, SUM((a.price * a.volume) + (-1 * b.price * b.volume)) AS mst 12 | FROM bids b, asks a 13 | WHERE 0.25 * (SELECT SUM(a1.volume) FROM asks a1) > 14 | (SELECT SUM(a2.volume) FROM asks a2 WHERE a2.price > a.price) 15 | AND 0.25 * (SELECT SUM(b1.volume) FROM bids b1) > 16 | (SELECT SUM(b2.volume) FROM bids b2 WHERE b2.price > b.price) 17 | GROUP BY b.broker_id; 18 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/finance/pricespread.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM bids(t FLOAT, id INT, broker_id INT, volume FLOAT, price FLOAT) 2 | FROM FILE 'examples/data/finance.csv' 3 | LINE DELIMITED orderbook (book := 'bids', brokers := '10', 4 | deterministic := 'yes'); 5 | 6 | CREATE STREAM asks(t FLOAT, id INT, broker_id INT, volume FLOAT, price FLOAT) 7 | FROM FILE 'examples/data/finance.csv' 8 | LINE DELIMITED orderbook (book := 'asks', brokers := '10', 9 | deterministic := 'yes'); 10 | 11 | SELECT SUM(a.price + (-1 * b.price)) AS psp 12 | FROM bids b, asks a 13 | WHERE ( b.volume > 0.0001 * (SELECT SUM(b1.volume) FROM bids b1) ) 14 | AND ( a.volume > 0.0001 * (SELECT SUM(a1.volume) FROM asks a1) ); 15 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/finance/vwap.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM bids(t FLOAT, id INT, broker_id INT, volume FLOAT, price FLOAT) 2 | FROM FILE 'examples/data/finance.csv' 3 | LINE DELIMITED orderbook (book := 'bids', brokers := '10', 4 | deterministic := 'yes'); 5 | 6 | SELECT SUM(b1.price * b1.volume) AS vwap 7 | FROM bids b1 8 | WHERE 0.25 * 9 | (SELECT SUM(b3.volume) FROM bids b3) 10 | > 11 | (SELECT SUM(b2.volume) FROM bids b2 WHERE b2.price > b1.price); 12 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/mddb/README: -------------------------------------------------------------------------------- 1 | MDDB queries might take longer to compile! 2 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/mddb/query1.sql: -------------------------------------------------------------------------------- 1 | INCLUDE 'examples/queries/mddb/schemas.sql'; 2 | 3 | select P.trj_id, P.t, avg(vec_length(P.x-P2.x, P.y-P2.y, P.z-P2.z)) as rdf 4 | from AtomPositions P, AtomMeta M, 5 | AtomPositions P2, AtomMeta M2 6 | where P.trj_id = P2.trj_id 7 | and P.t = P2.t 8 | and P.atom_id = M.atom_id 9 | and P2.atom_id = M2.atom_id 10 | and M.residue_name = 'LYS' 11 | and M.atom_name = 'NZ' 12 | and M2.residue_name = 'TIP3' 13 | and M2.atom_name = 'OH2' 14 | group by P.trj_id, P.t; 15 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/mddb/query2.sql: -------------------------------------------------------------------------------- 1 | INCLUDE 'examples/queries/mddb/schemas.sql'; 2 | 3 | select P1.trj_id, P1.t, 4 | dihedral_angle(P1.x,P1.y,P1.z, 5 | P2.x,P2.y,P2.z, 6 | P3.x,P3.y,P3.z, 7 | P4.x,P4.y,P4.z) 8 | as phi_psi, 9 | DM.dim_id 10 | from Dihedrals D, Dimensions DM, 11 | AtomPositions P1, AtomPositions P2, AtomPositions P3, AtomPositions P4, 12 | AtomMeta M1, AtomMeta M2, AtomMeta M3, AtomMeta M4 13 | where P1.t = P2.t and P1.t = P3.t and P1.t = P4.t 14 | and P1.trj_id = P2.trj_id and P1.trj_id = P3.trj_id and P1.trj_id = P4.trj_id 15 | and (D.atom_id1 = M1.atom_id and M1.atom_id = P1.atom_id) 16 | and (D.atom_id2 = M2.atom_id and M2.atom_id = P2.atom_id) 17 | and (D.atom_id3 = M3.atom_id and M3.atom_id = P3.atom_id) 18 | and (D.atom_id4 = M4.atom_id and M4.atom_id = P4.atom_id) 19 | and (D.atom_id1 = DM.atom_id1 and D.atom_id2 = DM.atom_id2 and 20 | D.atom_id3 = DM.atom_id3 and D.atom_id4 = DM.atom_id4) 21 | and ( (M1.atom_name = 'N' and M2.atom_name = 'CA' and M3.atom_name = 'C') 22 | or (M2.atom_name = 'N' and M3.atom_name = 'CA' and M4.atom_name = 'C') ) 23 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/mddb/schemas.sql: -------------------------------------------------------------------------------- 1 | create stream AtomPositions ( 2 | trj_id int, 3 | t int, 4 | atom_id int, 5 | x float, 6 | y float, 7 | z float 8 | ) FROM FILE 'examples/data/mddb/atompositions.csv' 9 | LINE DELIMITED CSV; 10 | 11 | create table AtomMeta ( 12 | protein_id int, 13 | atom_id int, 14 | atom_type varchar(100), 15 | atom_name varchar(100), 16 | residue_id int, 17 | residue_name varchar(100), 18 | segment_name varchar(100) 19 | ) FROM FILE 'examples/data/mddb/atommeta.csv' 20 | LINE DELIMITED CSV; 21 | 22 | create table Bonds ( 23 | protein_id int, 24 | atom_id1 int, 25 | atom_id2 int, 26 | bond_const float, 27 | bond_length float 28 | ) FROM FILE 'examples/data/mddb/bonds.csv' 29 | LINE DELIMITED CSV; 30 | 31 | create table Angles ( 32 | protein_id int, 33 | atom_id1 int, 34 | atom_id2 int, 35 | atom_id3 int, 36 | angle_const float, 37 | angle float 38 | ) FROM FILE 'examples/data/mddb/angles.csv' 39 | LINE DELIMITED CSV; 40 | 41 | create table Dihedrals ( 42 | protein_id int, 43 | atom_id1 int, 44 | atom_id2 int, 45 | atom_id3 int, 46 | atom_id4 int, 47 | force_const float, 48 | n float, 49 | delta float 50 | ) FROM FILE 'examples/data/mddb/dihedrals.csv' 51 | LINE DELIMITED CSV; 52 | 53 | create table ImproperDihedrals ( 54 | protein_id int, 55 | atom_id1 int, 56 | atom_id2 int, 57 | atom_id3 int, 58 | atom_id4 int, 59 | force_const float, 60 | delta float 61 | ) FROM FILE 'examples/data/mddb/improperdihedrals.csv' 62 | LINE DELIMITED CSV; 63 | 64 | create table NonBonded ( 65 | protein_id int, 66 | atom_id1 int, 67 | atom_id2 int, 68 | atom_ty1 varchar(100), 69 | atom_ty2 varchar(100), 70 | rmin float, 71 | eps float, 72 | acoef float, 73 | bcoef float, 74 | charge1 float, 75 | charge2 float 76 | ) FROM FILE 'examples/data/mddb/nonbonded.csv' 77 | LINE DELIMITED CSV; 78 | 79 | create table ConformationPoints ( 80 | trj_id int, 81 | t int, 82 | point_id int 83 | ) FROM FILE 'examples/data/mddb/conformationpoints.csv' 84 | LINE DELIMITED CSV; 85 | 86 | create table Dimensions ( 87 | atom_id1 int, 88 | atom_id2 int, 89 | atom_id3 int, 90 | atom_id4 int, 91 | dim_id int 92 | ) FROM FILE 'examples/data/mddb/dimensions.csv' 93 | LINE DELIMITED CSV; 94 | 95 | create table Buckets ( 96 | dim_id int, 97 | bucket_id int, 98 | bucket_start float, 99 | bucket_end float 100 | ) FROM FILE 'examples/data/mddb/buckets.csv' 101 | LINE DELIMITED CSV; 102 | 103 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_aggcomparison.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT * 5 | FROM R r1, (SELECT SUM(r3.B) AS C FROM R r3) S 6 | WHERE r1.A > (SELECT SUM(C) FROM R r2); 7 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_aggofnested.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT COUNT(*) FROM (SELECT * FROM R) n; 5 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_aggofnestedagg.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT SUM(1) FROM (SELECT SUM(1) FROM R) r; 5 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_agtb.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT r1.A as BOB, r2.B as JOE 5 | FROM R r1, R r2 6 | WHERE r1.B > r2.A 7 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_agtbexists.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT r1.A, SUM(1) 5 | FROM R r1 6 | WHERE EXISTS (SELECT r2.B FROM R r2 WHERE r1.A > r2.A) 7 | GROUP BY r1.A 8 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_avg.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT AVG(A) FROM R; 5 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_bigsumstar.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT r1.A, SUM(r1.B + r2.B + r3.B + r4.B) 5 | FROM R r1, R r2, R r3, R r4 6 | WHERE r1.A = r2.A 7 | AND r2.A = r3.A 8 | AND r3.A = r4.A 9 | GROUP BY r1.A 10 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_btimesa.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A float, B float) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT A, SUM(B * (SELECT SUM(r2.A) FROM R r2)) FROM R r1 GROUP BY A 5 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_btimesacorrelated.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A float, B float) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT A, SUM(B * (SELECT SUM(r2.A) FROM R r2 WHERE r1.A = r2.A)) 5 | FROM R r1 GROUP BY A 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_case.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT total.YEAR, SUM(CASE total.NAME WHEN 3 THEN total.VOLUME ELSE 0 END) / 5 | LISTMAX(1, SUM(total.VOLUME)) AS mkt_share_1 6 | FROM ( 7 | SELECT R.A as YEAR, R.B as NAME, R.B AS VOLUME 8 | FROM R 9 | ) total 10 | GROUP BY total.YEAR; 11 | 12 | SELECT R.A, SUM(CASE R.B WHEN 3 THEN R.B ELSE 0 END) / 13 | LISTMAX(1, SUM(R.B)) AS mkt_share_2 14 | FROM R 15 | GROUP BY R.A; 16 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_case2.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int, C int) 2 | FROM FILE './r.dat' LINE DELIMITED csv; 3 | 4 | SELECT total.YEAR, SUM(CASE total.NAME WHEN 42 THEN total.VOLUME ELSE 0 END) / 5 | LISTMAX(1, SUM(total.VOLUME)) AS mkt_share_1 6 | FROM ( 7 | SELECT R.A as YEAR, R.B as NAME, R.C AS VOLUME 8 | FROM R 9 | ) total 10 | GROUP BY total.YEAR; 11 | 12 | SELECT R.A, SUM(CASE R.B WHEN 42 THEN R.C ELSE 0 END) / 13 | LISTMAX(1, SUM(R.C)) AS mkt_share_2 14 | FROM R 15 | GROUP BY R.A; 16 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_count.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT SUM(1) FROM R; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_count_of_one.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT C 5 | FROM (SELECT R.A, COUNT(*) as C FROM R GROUP BY A) s 6 | WHERE s.A = 3 7 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_count_of_one_prime.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT * 5 | FROM (SELECT R.A, COUNT(*) as C FROM R GROUP BY R.A) s 6 | WHERE s.A = 3 7 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_deepscoping.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT r3.A 6 | FROM (SELECT r1.A FROM R r1, 7 | (SELECT r2.A FROM R r2) r2b WHERE r1.A=r2b.A) r3, 8 | R r4 9 | WHERE r3.A = r4.A 10 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_distinct.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT A, B FROM R; 5 | 6 | SELECT DISTINCT A, B FROM R; 7 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_distincttest.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT * FROM R; 5 | SELECT COUNT(*) FROM R; 6 | SELECT COUNT(DISTINCT) FROM R; 7 | SELECT COUNT(DISTINCT B) FROM R; 8 | SELECT COUNT(DISTINCT B) FROM R GROUP BY A; 9 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_divb.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT (100000*SUM(1))/B FROM R GROUP BY B; 6 | 7 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_existsnestedagg.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT r1.A, SUM(1) 6 | FROM R r1 7 | WHERE EXISTS 8 | ( SELECT 1 9 | FROM (SELECT SUM(r2.B) as B FROM R r2 WHERE r2.A = r1.B) n1 10 | WHERE r1.A *3 < n1.B ) 11 | GROUP BY r1.A 12 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_gbasumb.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | csv (); 4 | 5 | SELECT A, A*SUM(B) FROM R GROUP BY A; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_gtealldynamic.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R r2 WHERE r2.B >= ALL (SELECT r1.A FROM R r1); 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_gtesomedynamic.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R r2 WHERE r2.B >= SOME (SELECT r1.A FROM R r1); 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_gtsomedynamic.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R r2 WHERE r2.B > SOME (SELECT r1.A FROM R r1); 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_impossibleineq.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R WHERE R.A < R.B AND R.A > R.B; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_indynamic.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | csv (); 4 | 5 | SELECT * FROM R r2 WHERE r2.B IN (SELECT r1.A FROM R r1); 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_ineqandeq.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT SUM(A) FROM R WHERE A = B AND A <= B 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_inlist.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT * FROM R WHERE A IN LIST (1, 2, 3); 5 | SELECT * FROM R WHERE NOT A IN LIST (1, 2, 3); 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_instatic.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R WHERE B IN (SELECT 1); 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_lift_of_count.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | csv (); 4 | 5 | SELECT r2.C FROM ( 6 | SELECT r1.A, COUNT(*) AS C FROM R r1 GROUP BY r1.A 7 | ) r2; 8 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_ltallagg.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R r2 WHERE r2.B < ALL (SELECT SUM(r1.A) FROM R r1); 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_ltallavg.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R r2 WHERE r2.B < ALL (SELECT SUM(r1.A) / 10 FROM R r1); 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_ltallcorravg.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R r2 6 | WHERE r2.A < ALL ( 7 | SELECT AVG(r1.A) FROM R r1 WHERE r1.B = r2.B 8 | ); 9 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_ltalldynamic.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R r2 WHERE r2.B < ALL (SELECT r1.A FROM R r1); 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_multinest.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * 6 | FROM 7 | ( SELECT y.D, SUM(y.C) 8 | FROM 9 | ( SELECT x.C, SUM(x.A) AS D 10 | FROM 11 | ( SELECT r1.A, SUM(r1.B) AS C 12 | FROM R r1 13 | GROUP BY r1.A ) x 14 | GROUP BY x.C 15 | ) y 16 | GROUP BY D 17 | ) z; 18 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_multiors.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT sum(B) AS agg FROM R WHERE (R.A=1) OR (R.A=2) OR (R.A=3); 5 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_natselfjoin.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT R1.* FROM R r1 NATURAL JOIN R r2; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_nestedfrom.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT p.A, sum(p.value) AS Q FROM ( 5 | SELECT R.A, sum(R.B) AS value FROM R GROUP BY R.A 6 | ) p 7 | WHERE p.value > 5 8 | GROUP BY p.A; 9 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_nestedrename.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | SELECT foo 5 | FROM (SELECT R.A AS foo, COUNT(*) FROM R GROUP BY foo) q; 6 | 7 | SELECT foo 8 | FROM (SELECT R.A AS foo, COUNT(*) FROM R GROUP BY R.A) q; 9 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_nogroupby.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT A FROM R; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_nonjoineq.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT A FROM R WHERE A <= B AND A=B; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_possibleineq.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R WHERE R.A <= R.B AND R.A >= R.B; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_possibleineqwitheq.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R WHERE R.A = R.B AND R.A <= R.B AND R.A >= R.B; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_selectstar.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM R; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_simplenest.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT A FROM R r1 WHERE EXISTS (SELECT R2.A FROM R r2); 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_smallstar.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT r1.B, SUM(r1.A * r2.A) 6 | FROM R r1, R r2 7 | WHERE r1.B = r2.B 8 | GROUP BY r1.B 9 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_starofnested.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT * FROM (SELECT * FROM R) n; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_starofnestedagg.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | csv (); 4 | 5 | SELECT * FROM (SELECT COUNT(*) FROM R) n; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_sum_gb_all_out_of_aggregate.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT A+SUM(1) FROM R GROUP BY A,B 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_sum_gb_out_of_aggregate.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT A+SUM(B) FROM R GROUP BY A 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_sum_out_of_aggregate.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT 1+SUM(A) FROM R 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_sumadivsumb.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A float, B float) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | /* We insert a LISTMAX to support incremental computation. */ 6 | 7 | SELECT SUM(A)/LISTMAX(1, 1+SUM(B)) FROM R 8 | 9 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_sumdivgrp.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT A, SUM(B)/A FROM R GROUP BY A; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_sumnestedintarget.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT SUM((SELECT SUM(1) FROM R r1)) FROM R r2; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_sumnestedintargetwitheq.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT SUM((SELECT SUM(1) FROM R r2 WHERE r1.A = r2.A)) FROM R r1; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_sumoutsideofagg.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | csv (); 4 | 5 | SELECT A, A+SUM(B) FROM R GROUP BY A; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_sumstar.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT r1.B, SUM(r1.A + r2.A) 6 | FROM R r1, R r2 7 | WHERE r1.B = r2.B 8 | GROUP BY r1.B 9 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_union.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT R.A AS C, SUM(R.B) AS D 6 | FROM R GROUP BY R.A 7 | UNION 8 | SELECT R.B AS C, SUM(R.A) AS D FROM R GROUP BY R.B 9 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/r_unique_counts_by_a.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT r3.C FROM ( 6 | SELECT r2.C, COUNT(*) FROM ( 7 | SELECT r1.A, COUNT(*) AS C FROM R r1 GROUP BY r1.A 8 | ) r2 GROUP BY C 9 | ) r3; 10 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rr_ormyself.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | SELECT r1.A FROM R r1, R r2 WHERE (r1.A = r2.A or r1.A = r2.A) and r1.B = r2.B; 6 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rs.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | CREATE STREAM S(B int, C int) 5 | FROM FILE 'examples/data/simple/s.dat' LINE DELIMITED csv; 6 | 7 | SELECT sum(A*C) AS agg1, sum(A+C) AS agg2 FROM R,S WHERE R.B=S.B; 8 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rs_cmpnest.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | CREATE STREAM S(B int, C int) 6 | FROM FILE 'examples/data/simple/s.dat' LINE DELIMITED 7 | CSV (); 8 | 9 | SELECT R.A,R.B FROM R WHERE R.A < ( SELECT SUM(S.C) FROM S WHERE R.B = S.B ); 10 | 11 | /* 12 | SELECT sum(A+B) FROM R; 13 | 14 | SELECT sum(R.A) FROM R WHERE R.B = (SELECT sum(S.C) FROM S); 15 | */ 16 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rs_column_mapping_1.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | CREATE STREAM S(C int, D int) 5 | FROM FILE 'examples/data/simple/s.dat' LINE DELIMITED csv; 6 | 7 | SELECT r1.B FROM S s, R r1, R r2 WHERE s.C=r1.A AND s.D = r2.A; 8 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rs_column_mapping_2.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | CREATE STREAM S(C int, D int) 5 | FROM FILE 'examples/data/simple/s.dat' LINE DELIMITED csv; 6 | 7 | SELECT r1.A, r1.B FROM S s, R r1, R r2 WHERE s.C=r1.A AND s.D = r2.A; 8 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rs_column_mapping_3.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED csv; 3 | 4 | CREATE STREAM S(C int, D int) 5 | FROM FILE 'examples/data/simple/s.dat' LINE DELIMITED csv; 6 | 7 | SELECT r1.A, r1.B FROM S s, R r1, R r2 WHERE s.C=r2.A AND s.D = r2.A; 8 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rs_eqineq.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | CREATE STREAM S(B int, C int) 6 | FROM FILE 'examples/data/simple/s.dat' LINE DELIMITED 7 | CSV (); 8 | 9 | SELECT * FROM R,S WHERE R.B = S.B AND R.A < S.C 10 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rs_example1.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | CREATE STREAM S(B int, C int) 6 | FROM FILE 'examples/data/simple/s.dat' LINE DELIMITED 7 | CSV (); 8 | 9 | SELECT SUM(r.A*s.C) as RESULT FROM R r, S s WHERE r.B = s.B; 10 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rs_example2.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | CREATE STREAM S(B int, C int) 6 | FROM FILE 'examples/data/simple/s.dat' LINE DELIMITED 7 | CSV (); 8 | 9 | SELECT r.B, SUM(r.A*s.C) as RESULT_1, SUM(r.A+s.C) as RESULT_2 FROM R r, S s WHERE r.B = s.B GROUP BY r.B; 10 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rs_ineqonnestedagg.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | CSV (); 4 | 5 | CREATE STREAM S(B int, C int) 6 | FROM FILE 'examples/data/simple/s.dat' LINE DELIMITED 7 | CSV (); 8 | 9 | SELECT A FROM R r, (SELECT s2.B, COUNT(*) AS CNT FROM S s2 GROUP BY s2.B) s WHERE r.B = s.B AND r.A < CNT; 10 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/simple/rs_inequality.sql: -------------------------------------------------------------------------------- 1 | CREATE STREAM R(A int, B int) 2 | FROM FILE 'examples/data/simple/r.dat' LINE DELIMITED 3 | csv (); 4 | 5 | CREATE STREAM S(C int, D int) 6 | FROM FILE 'examples/data/simple/s.dat' LINE DELIMITED 7 | csv (); 8 | 9 | SELECT sum(A*D) FROM R,S WHERE R.B= DATE('1993-10-01') 14 | AND o.orderdate < DATE('1994-01-01') 15 | AND l.returnflag = 'R' 16 | AND c.nationkey = n.nationkey 17 | GROUP BY c.custkey, c.name, c.acctbal, c.phone, n.name, c.address, c.comment 18 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query11.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT p.partkey, SUM(p.value) AS QUERY11 4 | FROM 5 | ( 6 | SELECT ps.partkey, sum(ps.supplycost * ps.availqty) AS value 7 | FROM partsupp ps, supplier s, nation n 8 | WHERE ps.suppkey = s.suppkey 9 | AND s.nationkey = n.nationkey 10 | AND n.name = 'GERMANY' 11 | GROUP BY ps.partkey 12 | ) p 13 | WHERE p.value > ( 14 | SELECT sum(ps.supplycost * ps.availqty) * 0.001 15 | FROM partsupp ps, supplier s, nation n 16 | WHERE ps.suppkey = s.suppkey 17 | AND s.nationkey = n.nationkey 18 | AND n.name = 'GERMANY' 19 | ) 20 | GROUP BY p.partkey; 21 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query11a.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT ps.partkey, SUM(ps.supplycost * ps.availqty) AS query11a 4 | FROM partsupp ps, supplier s 5 | WHERE ps.suppkey = s.suppkey 6 | GROUP BY ps.partkey; 7 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query12.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT l.shipmode, 4 | SUM(CASE WHEN o.orderpriority IN LIST ('1-URGENT', '2-HIGH') 5 | THEN 1 ELSE 0 END) AS high_line_count, 6 | SUM(CASE WHEN o.orderpriority NOT IN LIST ('1-URGENT', '2-HIGH') 7 | THEN 1 ELSE 0 END) AS low_line_count 8 | FROM orders o, lineitem l 9 | WHERE o.orderkey = l.orderkey 10 | AND (l.shipmode IN LIST ('MAIL', 'SHIP')) 11 | AND l.commitdate < l.receiptdate 12 | AND l.shipdate < l.commitdate 13 | AND l.receiptdate >= DATE('1994-01-01') 14 | AND l.receiptdate < DATE('1995-01-01') 15 | GROUP BY l.shipmode; 16 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query13.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT c_count, COUNT(*) AS custdist 4 | FROM ( 5 | SELECT c.custkey AS c_custkey, COUNT(o.orderkey) AS c_count 6 | FROM customer c, orders o 7 | WHERE c.custkey = o.custkey 8 | AND (o.comment NOT LIKE '%special%requests%') 9 | GROUP BY c.custkey 10 | ) c_orders 11 | GROUP BY c_count; 12 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query14.sql: -------------------------------------------------------------------------------- 1 | /* We insert a LISTMAX to support incremental computation. For this particular 2 | query, this is safe, because if the denominator equals 0, then the numerator 3 | of the division is also guaranteed to be 0. */ 4 | 5 | INCLUDE './examples/queries/tpch/schemas.sql'; 6 | 7 | SELECT (100.00 * SUM(CASE WHEN (p.type LIKE 'PROMO%') 8 | THEN l.extendedprice * (1 - l.discount) ELSE 0 END) / 9 | LISTMAX(1, SUM(l.extendedprice * (1 - l.discount)))) AS 10 | promo_revenue 11 | FROM lineitem l, part p 12 | WHERE l.partkey = p.partkey 13 | AND l.shipdate >= DATE('1995-09-01') 14 | AND l.shipdate < DATE('1995-10-01') 15 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query15.sql: -------------------------------------------------------------------------------- 1 | /* We change the query to be more "incrementality friendly". In order to avoid 2 | issues with floating points, we cast the keys to integers. */ 3 | 4 | INCLUDE './examples/queries/tpch/schemas.sql'; 5 | 6 | SELECT s.suppkey, s.name, s.address, s.phone, r1.total_revenue as total_revenue 7 | FROM supplier s, 8 | (SELECT l.suppkey AS supplier_no, 9 | SUM(l.extendedprice * (1 - l.discount)) AS total_revenue 10 | FROM lineitem l 11 | WHERE l.shipdate >= DATE('1996-01-01') 12 | AND l.shipdate < DATE('1996-04-01') 13 | GROUP BY l.suppkey) AS r1 14 | WHERE 15 | s.suppkey = r1.supplier_no 16 | AND (NOT EXISTS (SELECT 1 17 | FROM (SELECT l.suppkey, 18 | SUM(l.extendedprice * (1 - l.discount)) 19 | AS total_revenue 20 | FROM lineitem l 21 | WHERE l.shipdate >= DATE('1996-01-01') 22 | AND l.shipdate < DATE('1996-04-01') 23 | GROUP BY l.suppkey) AS r2 24 | WHERE r2.total_revenue > r1.total_revenue) ); 25 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query16.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT p.brand, 4 | p.type, 5 | p.size, 6 | COUNT(DISTINCT ps.suppkey) AS supplier_cnt 7 | FROM partsupp ps, part p 8 | WHERE p.partkey = ps.partkey 9 | AND p.brand <> 'Brand#45' 10 | AND (p.type NOT LIKE 'MEDIUM POLISHED%') 11 | AND (p.size IN LIST (49, 14, 23, 45, 19, 3, 36, 9)) 12 | AND (ps.suppkey NOT IN ( 13 | SELECT s.suppkey 14 | FROM supplier s 15 | WHERE s.comment LIKE '%Customer%Complaints%' 16 | )) 17 | GROUP BY p.brand, p.type, p.size; 18 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query17.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT SUM(l.extendedprice) / 7.0 AS avg_yearly 4 | FROM lineitem l, part p 5 | WHERE p.partkey = l.partkey 6 | AND p.brand = 'Brand#23' 7 | AND p.container = 'MED BOX' 8 | AND l.quantity < ( 9 | SELECT 0.2 * AVG(l2.quantity) 10 | FROM lineitem l2 11 | WHERE l2.partkey = p.partkey 12 | ) 13 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query17a.sql: -------------------------------------------------------------------------------- 1 | /* Result: 2 | sum 3 | 4 | 898778.73 5 | (1 row) 6 | */ 7 | 8 | INCLUDE './examples/queries/tpch/schemas.sql'; 9 | 10 | SELECT sum(l.extendedprice) AS query17 11 | FROM lineitem l, part p 12 | WHERE p.partkey = l.partkey 13 | AND l.quantity < 0.005 * 14 | (SELECT sum(l2.quantity) 15 | FROM lineitem l2 WHERE l2.partkey = p.partkey); 16 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query18.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT c.name, c.custkey, o.orderkey, o.orderdate, o.totalprice, 4 | sum(l.quantity) AS query18 5 | FROM customer c, orders o, lineitem l 6 | WHERE o.orderkey IN 7 | ( SELECT l3.orderkey FROM ( 8 | SELECT l2.orderkey, SUM(l2.quantity) AS QTY 9 | FROM lineitem l2 GROUP BY l2.orderkey 10 | ) l3 11 | WHERE QTY > 100 12 | ) 13 | AND c.custkey = o.custkey 14 | AND o.orderkey = l.orderkey 15 | GROUP BY c.name, c.custkey, o.orderkey, o.orderdate, o.totalprice; 16 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query18a.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT c.custkey, sum(l1.quantity) AS query18 4 | FROM customer c, orders o, lineitem l1 5 | WHERE 1 <= 6 | (SELECT sum(1) FROM lineitem l2 7 | WHERE l1.orderkey = l2.orderkey 8 | AND 100 < (SELECT sum(l3.quantity) FROM lineitem l3 9 | WHERE l2.orderkey = l3.orderkey)) 10 | AND c.custkey = o.custkey 11 | AND o.orderkey = l1.orderkey 12 | GROUP BY c.custkey; 13 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query19.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT SUM(l.extendedprice * (1 - l.discount) ) AS revenue 4 | FROM lineitem l, part p 5 | WHERE 6 | ( 7 | p.partkey = l.partkey 8 | AND p.brand = 'Brand#12' 9 | AND ( p.container IN LIST ( 'SM CASE', 'SM BOX', 'SM PACK', 'SM PKG') ) 10 | AND l.quantity >= 1 AND l.quantity <= 1 + 10 11 | AND ( p.size BETWEEN 1 AND 5 ) 12 | AND (l.shipmode IN LIST ('AIR', 'AIR REG') ) 13 | AND l.shipinstruct = 'DELIVER IN PERSON' 14 | ) 15 | OR 16 | ( 17 | p.partkey = l.partkey 18 | AND p.brand = 'Brand#23' 19 | AND ( p.container IN LIST ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK') ) 20 | AND l.quantity >= 10 AND l.quantity <= 10 + 10 21 | AND ( p.size BETWEEN 1 AND 10 ) 22 | AND ( l.shipmode IN LIST ('AIR', 'AIR REG') ) 23 | AND l.shipinstruct = 'DELIVER IN PERSON' 24 | ) 25 | OR 26 | ( 27 | p.partkey = l.partkey 28 | AND p.brand = 'Brand#34' 29 | AND ( p.container IN LIST ( 'LG CASE', 'LG BOX', 'LG PACK', 'LG PKG') ) 30 | AND l.quantity >= 20 AND l.quantity <= 20 + 10 31 | AND ( p.size BETWEEN 1 AND 15 ) 32 | AND ( l.shipmode IN LIST ('AIR', 'AIR REG') ) 33 | AND l.shipinstruct = 'DELIVER IN PERSON' 34 | ); 35 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query2.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT s.acctbal, s.name, n.name, p.partkey, p.mfgr, s.address, s.phone, 4 | s.comment 5 | FROM part p, supplier s, partsupp ps, nation n, region r 6 | WHERE p.partkey = ps.partkey 7 | AND s.suppkey = ps.suppkey 8 | AND p.size = 15 9 | AND (p.type LIKE '%BRASS') 10 | AND s.nationkey = n.nationkey 11 | AND n.regionkey = r.regionkey 12 | AND r.name = 'EUROPE' 13 | AND (NOT EXISTS (SELECT 1 14 | FROM partsupp ps2, supplier s2, nation n2, region r2 15 | WHERE p.partkey = ps2.partkey 16 | AND s2.suppkey = ps2.suppkey 17 | AND s2.nationkey = n2.nationkey 18 | AND n2.regionkey = r2.regionkey 19 | AND r2.name = 'EUROPE' 20 | AND ps2.supplycost < ps.supplycost)); 21 | 22 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query20.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT s.name, s.address 4 | FROM supplier s, nation n 5 | WHERE s.suppkey IN 6 | ( SELECT ps.suppkey 7 | FROM partsupp ps 8 | WHERE ps.partkey IN ( SELECT p.partkey 9 | FROM part p 10 | WHERE p.name like 'forest%' ) 11 | AND ps.availqty > ( SELECT 0.5 * SUM(l.quantity) 12 | FROM lineitem l 13 | WHERE l.partkey = ps.partkey 14 | AND l.suppkey = ps.suppkey 15 | AND l.shipdate >= DATE('1994-01-01') 16 | AND l.shipdate < DATE('1995-01-01') ) ) 17 | AND s.nationkey = n.nationkey 18 | AND n.name = 'CANADA'; 19 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query21.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT s.name, COUNT(*) AS numwait 4 | FROM supplier s, lineitem l1, orders o, nation n 5 | WHERE s.suppkey = l1.suppkey 6 | AND o.orderkey = l1.orderkey 7 | AND o.orderstatus = 'F' 8 | AND l1.receiptdate > l1.commitdate 9 | AND (EXISTS (SELECT * FROM lineitem l2 10 | WHERE l2.orderkey = l1.orderkey 11 | AND l2.suppkey <> l1.suppkey)) 12 | AND (NOT EXISTS (SELECT * FROM lineitem l3 13 | WHERE l3.orderkey = l1.orderkey 14 | AND l3.suppkey <> l1.suppkey 15 | AND l3.receiptdate > l3.commitdate)) 16 | AND s.nationkey = n.nationkey 17 | AND n.name = 'SAUDI ARABIA' 18 | GROUP BY s.name; 19 | 20 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query22.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT cntrycode, 4 | COUNT(*) AS numcust, 5 | SUM(custsale.acctbal) AS totalacctbal 6 | FROM ( 7 | SELECT SUBSTRING(c.phone, 0, 2) AS cntrycode, 8 | c.acctbal 9 | FROM customer c 10 | WHERE (SUBSTRING(c.phone, 0, 2) IN LIST 11 | ('13', '31', '23', '29', '30', '18', '17')) 12 | AND c.acctbal > ( 13 | SELECT AVG(c2.acctbal) 14 | FROM customer c2 15 | WHERE c2.acctbal > 0.00 16 | AND (SUBSTRING(c2.phone, 0, 2) IN LIST 17 | ('13', '31', '23', '29', '30', '18', '17'))) 18 | AND (NOT EXISTS (SELECT * FROM orders o WHERE o.custkey = c.custkey)) 19 | ) as custsale 20 | GROUP BY cntrycode 21 | 22 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query22a.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT c1.nationkey, sum(c1.acctbal) AS query22 4 | FROM customer c1 5 | WHERE c1.acctbal < 6 | (SELECT sum(c2.acctbal) FROM customer c2 WHERE c2.acctbal > 0) 7 | AND 0 = (SELECT sum(1) FROM orders o WHERE o.custkey = c1.custkey) 8 | GROUP BY c1.nationkey 9 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query3.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT ORDERS.orderkey, 4 | ORDERS.orderdate, 5 | ORDERS.shippriority, 6 | SUM(extendedprice * (1 - discount)) AS query3 7 | FROM CUSTOMER, ORDERS, LINEITEM 8 | WHERE CUSTOMER.mktsegment = 'BUILDING' 9 | AND ORDERS.custkey = CUSTOMER.custkey 10 | AND LINEITEM.orderkey = ORDERS.orderkey 11 | AND ORDERS.orderdate < DATE('1995-03-15') 12 | AND LINEITEM.shipdate > DATE('1995-03-15') 13 | GROUP BY ORDERS.orderkey, ORDERS.orderdate, ORDERS.shippriority; 14 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query4.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT o.orderpriority, COUNT(*) AS order_count 4 | FROM orders o 5 | WHERE o.orderdate >= DATE('1993-07-01') 6 | AND o.orderdate < DATE('1993-10-01') 7 | AND (EXISTS ( 8 | SELECT * FROM lineitem l 9 | WHERE l.orderkey = o.orderkey 10 | AND l.commitdate < l.receiptdate 11 | )) 12 | GROUP BY o.orderpriority; 13 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query5.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT n.name, SUM(l.extendedprice * (1 - l.discount)) AS revenue 4 | FROM customer c, orders o, lineitem l, supplier s, nation n, region r 5 | WHERE c.custkey = o.custkey 6 | AND l.orderkey = o.orderkey 7 | AND l.suppkey = s.suppkey 8 | AND c.nationkey = s.nationkey 9 | AND s.nationkey = n.nationkey 10 | AND n.regionkey = r.regionkey 11 | AND r.name = 'ASIA' 12 | AND o.orderdate >= DATE('1994-01-01') 13 | AND o.orderdate < DATE('1994-01-01') + interval '1' year 14 | GROUP BY n.name 15 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query6.sql: -------------------------------------------------------------------------------- 1 | /* Note that this query will fail to produce the correct answer on the OCaml 2 | interpreter due to a floating point error in OCaml itself. Specifically, 3 | in OCaml, 0.06+0.01 <> 0.07. This can not be helped. */ 4 | 5 | INCLUDE './examples/queries/tpch/schemas.sql'; 6 | 7 | SELECT SUM(l.extendedprice*l.discount) AS revenue 8 | FROM lineitem l 9 | WHERE l.shipdate >= DATE('1994-01-01') 10 | AND l.shipdate < DATE('1995-01-01') 11 | AND (l.discount BETWEEN (0.06 - 0.01) AND (0.06 + 0.01)) 12 | AND l.quantity < 24; 13 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query7.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT supp_nation, cust_nation, l_year, SUM(volume) as revenue 4 | FROM ( 5 | SELECT n1.name AS supp_nation, 6 | n2.name AS cust_nation, 7 | (DATE_PART('year', l.shipdate)) AS l_year, 8 | l.extendedprice * (1 - l.discount) AS volume 9 | FROM supplier s, lineitem l, orders o, customer c, nation n1, nation n2 10 | WHERE s.suppkey = l.suppkey 11 | AND o.orderkey = l.orderkey 12 | AND c.custkey = o.custkey 13 | AND s.nationkey = n1.nationkey 14 | AND c.nationkey = n2.nationkey 15 | AND ( 16 | (n1.name = 'FRANCE' and n2.name = 'GERMANY') 17 | OR 18 | (n1.name = 'GERMANY' and n2.name = 'FRANCE') 19 | ) 20 | AND (l.shipdate BETWEEN DATE('1995-01-01') AND DATE('1996-12-31') ) 21 | ) AS shipping 22 | GROUP BY supp_nation, cust_nation, l_year; 23 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query8.sql: -------------------------------------------------------------------------------- 1 | /* We insert a LISTMAX to support incremental computation. For this particular 2 | query, this is safe, because if total.volume == 0, then the numerator of the 3 | division is also guaranteed to be 0. */ 4 | 5 | INCLUDE './examples/queries/tpch/schemas.sql'; 6 | 7 | SELECT total.o_year, 8 | (SUM(CASE total.name WHEN 'BRAZIL' THEN total.volume ELSE 0 END) / 9 | LISTMAX(1, SUM(total.volume))) AS mkt_share 10 | FROM 11 | ( 12 | SELECT n2.name, 13 | DATE_PART('year', o.orderdate) AS o_year, 14 | l.extendedprice * (1-l.discount) AS volume 15 | FROM part p, supplier s, lineitem l, orders o, customer c, nation n1, 16 | nation n2, region r 17 | WHERE p.partkey = l.partkey 18 | AND s.suppkey = l.suppkey 19 | AND l.orderkey = o.orderkey 20 | AND o.custkey = c.custkey 21 | AND c.nationkey = n1.nationkey 22 | AND n1.regionkey = r.regionkey 23 | AND r.name = 'AMERICA' 24 | AND s.nationkey = n2.nationkey 25 | AND (o.orderdate BETWEEN DATE('1995-01-01') AND DATE('1996-12-31')) 26 | AND p.type = 'ECONOMY ANODIZED STEEL' 27 | ) total 28 | GROUP BY total.o_year; 29 | -------------------------------------------------------------------------------- /backend/dbtoaster/examples/queries/tpch/query9.sql: -------------------------------------------------------------------------------- 1 | INCLUDE './examples/queries/tpch/schemas.sql'; 2 | 3 | SELECT nation, o_year, SUM(amount) AS sum_profit 4 | FROM ( 5 | SELECT n.name AS nation, 6 | EXTRACT(year from o.orderdate) AS o_year, 7 | ((l.extendedprice * (1 - l.discount)) - (ps.supplycost * l.quantity)) 8 | AS amount 9 | FROM part p, supplier s, lineitem l, partsupp ps, orders o, nation n 10 | WHERE s.suppkey = l.suppkey 11 | AND ps.suppkey = l.suppkey 12 | AND ps.partkey = l.partkey 13 | AND p.partkey = l.partkey 14 | AND o.orderkey = l.orderkey 15 | AND s.nationkey = n.nationkey 16 | AND (p.name LIKE '%green%') 17 | ) AS profit 18 | GROUP BY nation, o_year; 19 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/ScExtra.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef SCEXTRA_H 3 | #define SCEXTRA_H 4 | #include 5 | #include 6 | #include 7 | #include "hpds/pstring.hpp" 8 | #include "program_base.hpp" 9 | #include "GenericEntry.hpp" 10 | #include "Aggregator.hpp" 11 | #ifdef CONCURRENT 12 | #include "types.h" 13 | #include "Version.h" 14 | #endif 15 | using namespace std; 16 | using namespace dbtoaster; 17 | #define EXPAND(x) #x 18 | #define STRINGIFY(x) EXPAND(x) 19 | #define CHECK_STAT(x) cerr << STRINGIFY(x) << " -> "; x.getBucketStats() 20 | #define GET_RUN_STAT(x, f) f << "\"" << STRINGIFY(x) << "\" : ["; x.getSizeStats(f); f << "]"; 21 | #define GET_RUN_STAT_P(x, f)\ 22 | f << "\"" << STRINGIFY(x) << "\" : [";\ 23 | partitions[0].x.getSizeStats(f);\ 24 | for(int i=1; i 5 | 6 | class __attribute__((aligned(64))) SpinLock { 7 | std::atomic_flag lock_; 8 | public: 9 | 10 | SpinLock() { 11 | lock_.clear(); 12 | } 13 | 14 | inline void lock() { 15 | while (lock_.test_and_set(std::memory_order_acquire)); 16 | } 17 | 18 | inline void unlock() { 19 | lock_.clear(std::memory_order_release); 20 | } 21 | 22 | inline bool try_lock() { 23 | return !lock_.test_and_set(std::memory_order_acquire); 24 | } 25 | 26 | }; 27 | 28 | #endif 29 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/Transaction.h: -------------------------------------------------------------------------------- 1 | #ifndef TRANSACTION_H 2 | #define TRANSACTION_H 3 | #include "types.h" 4 | 5 | struct ALIGN Transaction { 6 | VBase* undoBufferHead; 7 | PRED* predicateHead; 8 | static TransactionManager& tm; 9 | timestamp startTS; 10 | volatile timestamp commitTS; 11 | Transaction * prevCommitted; 12 | uint8_t threadId; 13 | 14 | uint8_t ptype; 15 | Transaction* failedBecauseOf; 16 | Transaction() { 17 | failedBecauseOf = nullptr; 18 | threadId = 0; 19 | commitTS = initCommitTS; 20 | undoBufferHead = nullptr; 21 | predicateHead = nullptr; 22 | prevCommitted = nullptr; 23 | } 24 | 25 | void reset() { 26 | threadId = 0; 27 | commitTS = initCommitTS; 28 | undoBufferHead = nullptr; 29 | predicateHead = nullptr; 30 | prevCommitted = nullptr; 31 | } 32 | }; 33 | 34 | #endif /* TRANSACTION_H */ 35 | 36 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/event.cpp: -------------------------------------------------------------------------------- 1 | #include "event.hpp" 2 | 3 | namespace dbtoaster { 4 | 5 | std::string event_name[] = { 6 | std::string("insert"), 7 | std::string("delete"), 8 | std::string("batch_update"), 9 | std::string("system_ready") 10 | }; 11 | bool compare_event_timestamp_order (event_t const & p1, event_t const & p2) 12 | { 13 | return p1.event_order < p2.event_order; 14 | } 15 | } -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/event.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * event.hpp 3 | * 4 | * Created on: May 8, 2012 5 | * Author: daniel 6 | */ 7 | 8 | #ifndef DBTOASTER_EVENT_H 9 | #define DBTOASTER_EVENT_H 10 | 11 | #include 12 | #include 13 | #include 14 | 15 | namespace dbtoaster { 16 | 17 | typedef int date; 18 | 19 | /** 20 | * Type definitions of data-structures used for representing events. 21 | */ 22 | 23 | enum event_type { 24 | delete_tuple = 0, 25 | insert_tuple, 26 | batch_update, 27 | system_ready_event 28 | }; 29 | 30 | typedef int relation_id_t; 31 | typedef std::vector> event_args_t; 32 | 33 | extern std::string event_name[]; 34 | 35 | /** 36 | * Data-structure used for representing a event consisting of: event type, 37 | * relation identifier corresponding to the stream/table it relates to and 38 | * finally, the tuple associated with event. 39 | */ 40 | struct event_t 41 | { 42 | event_type type; 43 | relation_id_t id; 44 | unsigned int event_order; 45 | event_args_t data; 46 | 47 | event_t(const event_t& other) 48 | : type(other.type), id(other.id), event_order(other.event_order), data(other.data) 49 | {} 50 | 51 | event_t(event_type t, relation_id_t i, unsigned int ord, event_args_t& d) 52 | : type(t), id(i), event_order(ord), data(d) 53 | {} 54 | }; 55 | 56 | bool compare_event_timestamp_order (event_t const & p1, event_t const & p2); 57 | 58 | struct event_timestamp_order 59 | { 60 | bool operator()(event_t const & p1, event_t const & p2) { 61 | return compare_event_timestamp_order(p1, p2); 62 | } 63 | }; 64 | 65 | 66 | } 67 | 68 | #endif /* DBTOASTER_DBT_EVENT_H */ 69 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/hash.hpp: -------------------------------------------------------------------------------- 1 | #include "hpds/macro.hpp" 2 | #include "hpds/KDouble.hpp" 3 | 4 | namespace dbtoaster { 5 | template 6 | FORCE_INLINE void hash_combine(std::size_t& seed, const T& v) 7 | { 8 | seed ^= hash_value(v) + 0x9e3779b9 + (seed<<6) + (seed>>2); 9 | } 10 | 11 | std::hash double_hasher; 12 | std::hash string_hasher; 13 | 14 | union Cast 15 | { 16 | double d; 17 | long l; 18 | }; 19 | volatile Cast c; 20 | inline int float2int( double d ) 21 | { 22 | c.d = d + 6755399441055744.0; 23 | return c.l; 24 | } 25 | 26 | union CastLLD 27 | { 28 | long double d; 29 | long l; 30 | }; 31 | volatile Cast cLLD; 32 | inline int longDouble2int( long double d ) 33 | { 34 | cLLD.d = d + 6755399441055744.0; 35 | return cLLD.l; 36 | } 37 | 38 | template <> 39 | FORCE_INLINE void hash_combine(std::size_t& seed, const int& v) 40 | { 41 | seed ^= v + 0x9e3779b9 + (seed<<6) + (seed>>2); 42 | } 43 | 44 | template <> 45 | FORCE_INLINE void hash_combine(std::size_t& seed, const long& v) 46 | { 47 | seed ^= v + 0x9e3779b9 + (seed<<6) + (seed>>2); 48 | } 49 | 50 | template <> 51 | FORCE_INLINE void hash_combine(std::size_t& seed, const std::string& v) 52 | { 53 | seed ^= string_hasher(v) + 0x9e3779b9 + (seed<<6) + (seed>>2); 54 | } 55 | 56 | template <> 57 | FORCE_INLINE void hash_combine(std::size_t& seed, const float& v) 58 | { 59 | seed ^= float2int(v) + 0x9e3779b9 + (seed<<6) + (seed>>2); 60 | } 61 | 62 | #if DOUBLE_TYPE_SYM == DOUBLE_TYPE_KAHAN_DOUBLE 63 | 64 | template <> 65 | FORCE_INLINE void hash_combine(std::size_t& seed, const KDouble& v) 66 | { 67 | seed ^= float2int(v.sum) + 0x9e3779b9 + (seed<<6) + (seed>>2); 68 | } 69 | 70 | #elif DOUBLE_TYPE_SYM == DOUBLE_TYPE_BOOST 71 | 72 | template <> 73 | FORCE_INLINE void hash_combine(std::size_t& seed, const cpp_dec_float_1000& v) 74 | { 75 | seed ^= static_cast(v) + 0x9e3779b9 + (seed<<6) + (seed>>2); 76 | } 77 | 78 | #elif DOUBLE_TYPE_SYM == DOUBLE_TYPE_STD_LONG_DOUBLE 79 | 80 | template <> 81 | FORCE_INLINE void hash_combine(std::size_t& seed, const long double& v) 82 | { 83 | seed ^= longDouble2int(v) + 0x9e3779b9 + (seed<<6) + (seed>>2); 84 | } 85 | 86 | #else 87 | 88 | template <> 89 | FORCE_INLINE void hash_combine(std::size_t& seed, const double& v) 90 | { 91 | seed ^= float2int(v) + 0x9e3779b9 + (seed<<6) + (seed>>2); 92 | } 93 | 94 | #endif 95 | 96 | template <> 97 | FORCE_INLINE void hash_combine(std::size_t& seed, const char& v) 98 | { 99 | seed ^= v + 0x9e3779b9 + (seed<<6) + (seed>>2); 100 | } 101 | 102 | } -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/hpds/KDouble.cpp: -------------------------------------------------------------------------------- 1 | // The MIT License (MIT) 2 | 3 | // Copyright (c) 2014 Mohammad Dashti 4 | // (www.mdashti.com - mohammad.dashti [at] epfl [dot] ch - mdashti [at] gmail [dot] com) 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in all 14 | // copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | 24 | #include "KDouble.hpp" 25 | 26 | namespace dbtoaster { 27 | 28 | size_t precision = 7; // significative numbers (7 to pass r_sumdivgrp, 10 otherwise) 29 | double KDouble::diff_p = std::pow(0.1,precision); 30 | 31 | } 32 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/hpds/macro.hpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | //----------------------------------------------------------------------------- 4 | // Microsoft Visual Studio 5 | 6 | #if defined(_MSC_VER) 7 | 8 | #define FORCE_INLINE __forceinline 9 | #define NEVER_INLINE __declspec(noinline) 10 | 11 | //----------------------------------------------------------------------------- 12 | // Other compilers 13 | 14 | #else // defined(_MSC_VER) 15 | 16 | #define FORCE_INLINE inline __attribute__((always_inline)) 17 | #define NEVER_INLINE __attribute__((noinline)) 18 | 19 | #endif // !defined(_MSC_VER) -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/hpds/pstring.cpp: -------------------------------------------------------------------------------- 1 | // The MIT License (MIT) 2 | 3 | // Copyright (c) 2014 Mohammad Dashti 4 | // (www.mdashti.com - mohammad.dashti [at] epfl [dot] ch - mdashti [at] gmail [dot] com) 5 | 6 | // Permission is hereby granted, free of charge, to any person obtaining a copy 7 | // of this software and associated documentation files (the "Software"), to deal 8 | // in the Software without restriction, including without limitation the rights 9 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | // copies of the Software, and to permit persons to whom the Software is 11 | // furnished to do so, subject to the following conditions: 12 | 13 | // The above copyright notice and this permission notice shall be included in all 14 | // copies or substantial portions of the Software. 15 | 16 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | // SOFTWARE. 23 | 24 | #include "pstring.hpp" 25 | 26 | 27 | //global operators 28 | #ifdef USE_POOL 29 | CharPool<> PString::pool_; 30 | #endif //USE_POOL -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/hpds/pstringops.hpp: -------------------------------------------------------------------------------- 1 | #include "pstring.hpp" 2 | 3 | 4 | inline bool operator==(const char *str1, const PString &str2) 5 | { 6 | return (strcmp(str1,str2.data_) == 0); 7 | } 8 | 9 | inline bool operator!=(const char *str1, const PString &str2) 10 | { 11 | return (strcmp(str1,str2.data_) != 0); 12 | } 13 | 14 | std::ostream& operator<< (std::ostream& o, PString const& str) 15 | { 16 | return o << "\"" << str.data_ << "\""; 17 | } 18 | 19 | FORCE_INLINE size_t hash_value(PString const& str) 20 | { 21 | return MurmurHash2(str.data_,(str.size_-1)*sizeof(char),0); 22 | } -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/libdbtoaster.a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_c++/libdbtoaster.a -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/makefile: -------------------------------------------------------------------------------- 1 | #include ../../makefile.inc 2 | #include ../../makefile.local 3 | 4 | ifndef BOOST_INC_DIR 5 | BOOST_INC_DIR := /usr/local/include 6 | endif 7 | 8 | ifndef BOOST_LIB_DIR 9 | BOOST_LIB_DIR := /usr/local/lib 10 | endif 11 | 12 | HDR_FILES := smhasher/MurmurHash2.hpp \ 13 | hpds/pstring.hpp \ 14 | hpds/KDouble.hpp \ 15 | event.hpp \ 16 | iprogram.hpp \ 17 | program_base.hpp \ 18 | runtime.hpp \ 19 | standard_adaptors.hpp \ 20 | standard_functions.hpp \ 21 | statistics.hpp \ 22 | streams.hpp \ 23 | util.hpp \ 24 | 25 | 26 | SRC_FILES := smhasher/MurmurHash2.cpp \ 27 | hpds/pstring.cpp \ 28 | hpds/KDouble.cpp \ 29 | event.cpp \ 30 | iprogram.cpp \ 31 | program_base.cpp \ 32 | runtime.cpp \ 33 | standard_adaptors.cpp \ 34 | standard_functions.cpp \ 35 | streams.cpp 36 | 37 | 38 | FILES := $(HDR_FILES) $(SRC_FILES) 39 | OBJ_FILES := $(patsubst %.cpp,bin/%.o,$(SRC_FILES)) 40 | 41 | G++ := g++ 42 | LIB_OBJ := libdbtoaster.a 43 | TARGET:=$(shell which $(G++) &>/dev/null && echo $(LIB_OBJ) || echo warn) 44 | 45 | all: $(TARGET) 46 | 47 | warn: $(FILES) 48 | @echo 49 | @echo "------------------------------------------------------------" 50 | @echo "Warning: C++ library will not be built: $(G++) not found!" 51 | @echo "------------------------------------------------------------" 52 | @echo 53 | 54 | 55 | $(LIB_OBJ) : $(OBJ_FILES) 56 | @echo "Linking $@" 57 | @ar cr $@ $^ 58 | 59 | $(OBJ_FILES) : bin/%.o : %.cpp $(HDR_FILES) 60 | @mkdir -p ./bin 61 | @mkdir -p ./bin/hpds 62 | @mkdir -p ./bin/smhasher 63 | @echo Compiling $< 64 | @$(G++) -I$(BOOST_INC_DIR) -L$(BOOST_LIB_DIR) -Wall -std=c++11 $(CPP_FLAGS) $(patsubst %,-I %,$(CPP_HDR_PATH)) -O3 -o $(patsubst %.cpp,bin/%.o,$<) -c $< 65 | 66 | clean: 67 | rm -rf bin $(LIB_OBJ) 68 | 69 | .PHONY: all clean 70 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/mmap/mmap.hpp: -------------------------------------------------------------------------------- 1 | #ifdef SC_GENERATED //using SC 2 | #include "ScExtra.h" 3 | 4 | #ifdef CONCURRENT 5 | #include "cmmap.hpp" // For SC concurrent CPP 6 | #else 7 | #include "mmap2.hpp" // For SC CPP 8 | #endif 9 | #else 10 | #include "mmap1.hpp" //For vanilla CPP 11 | #endif 12 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/smhasher/MurmurHash2.hpp: -------------------------------------------------------------------------------- 1 | //----------------------------------------------------------------------------- 2 | // MurmurHash2 was written by Austin Appleby, and is placed in the public 3 | // domain. The author hereby disclaims copyright to this source code. 4 | 5 | #ifndef _MURMURHASH2_H_ 6 | #define _MURMURHASH2_H_ 7 | 8 | //----------------------------------------------------------------------------- 9 | // Platform-specific functions and macros 10 | 11 | // Microsoft Visual Studio 12 | 13 | #if defined(_MSC_VER) && (_MSC_VER < 1600) 14 | 15 | typedef unsigned char uint8_t; 16 | typedef unsigned int uint32_t; 17 | typedef unsigned __int64 uint64_t; 18 | 19 | // Other compilers 20 | 21 | #else // defined(_MSC_VER) 22 | 23 | #include 24 | 25 | #endif // !defined(_MSC_VER) 26 | 27 | //----------------------------------------------------------------------------- 28 | 29 | uint32_t MurmurHash2 ( const void * key, int len, uint32_t seed ); 30 | uint64_t MurmurHash64A ( const void * key, int len, uint64_t seed ); 31 | uint64_t MurmurHash64B ( const void * key, int len, uint64_t seed ); 32 | uint32_t MurmurHash2A ( const void * key, int len, uint32_t seed ); 33 | uint32_t MurmurHashNeutral2 ( const void * key, int len, uint32_t seed ); 34 | uint32_t MurmurHashAligned2 ( const void * key, int len, uint32_t seed ); 35 | 36 | //----------------------------------------------------------------------------- 37 | 38 | #endif // _MURMURHASH2_H_ 39 | 40 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/streams.hpp: -------------------------------------------------------------------------------- 1 | #ifndef DBTOASTER_STREAMS_H 2 | #define DBTOASTER_STREAMS_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #include 16 | 17 | #include "runtime.hpp" 18 | #include "event.hpp" 19 | 20 | 21 | namespace dbtoaster { 22 | namespace streams { 23 | 24 | // Adaptor and stream interfaces. 25 | 26 | struct stream_adaptor 27 | { 28 | stream_adaptor() {} 29 | 30 | virtual void read_adaptor_events(char* data, std::shared_ptr > eventList, std::shared_ptr > eventQue) = 0; 31 | }; 32 | 33 | // Framing 34 | enum frame_type { fixed_size, delimited, variable_size }; 35 | struct frame_descriptor { 36 | frame_type type; 37 | int size; 38 | std::string delimiter; 39 | int off_to_size; 40 | int off_to_end; 41 | frame_descriptor() : type(delimited), size(0), delimiter("\n") {} 42 | frame_descriptor(std::string d) : type(delimited), size(0), delimiter(d) {} 43 | frame_descriptor(int sz) : type(fixed_size), size(sz) {} 44 | frame_descriptor(int os, int oe) 45 | : type(variable_size), size(0), off_to_size(os), off_to_end(oe) 46 | {} 47 | }; 48 | 49 | // Sources 50 | struct source 51 | { 52 | frame_descriptor frame_info; 53 | std::shared_ptr adaptor; 54 | 55 | source(frame_descriptor& f, std::shared_ptr a); 56 | 57 | // Process adaptors in the first stage, accumulating and returning 58 | // stream events 59 | virtual void read_source_events(std::shared_ptr > eventList, std::shared_ptr > eventQue) = 0; 60 | 61 | virtual void init_source() = 0; 62 | }; 63 | 64 | struct dbt_file_source : public source 65 | { 66 | typedef std::ifstream file_stream; 67 | std::shared_ptr source_stream; 68 | 69 | dbt_file_source(const std::string& path, frame_descriptor& f, std::shared_ptr a); 70 | 71 | void read_source_events(std::shared_ptr > eventList, std::shared_ptr > eventQue); 72 | 73 | void init_source() {} 74 | }; 75 | 76 | struct source_multiplexer 77 | { 78 | std::vector > inputs; 79 | std::shared_ptr current; 80 | int step, remaining, block; 81 | std::shared_ptr > eventList; 82 | std::shared_ptr > eventQue; 83 | 84 | source_multiplexer(int seed, int st); 85 | source_multiplexer(int seed, int st, std::set >& s); 86 | 87 | void add_source(std::shared_ptr s); 88 | void remove_source(std::shared_ptr s); 89 | 90 | void init_source(size_t batch_size, size_t parallel, bool is_table); 91 | }; 92 | 93 | } 94 | } 95 | 96 | #endif 97 | 98 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/types.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef TYPES_H 3 | #define TYPES_H 4 | 5 | #define ALIGN alignas(64) 6 | 7 | struct ALIGN Transaction; 8 | struct ALIGN TransactionManager; 9 | struct PRED; 10 | struct VBase; 11 | struct ALIGN MBase; 12 | struct ALIGN EBase; 13 | struct Program; 14 | 15 | 16 | typedef uint64_t timestamp; 17 | const timestamp mask = 1LL << 63; 18 | const timestamp nonAccessibleMemory = mask + 100; 19 | const timestamp initCommitTS = mask + 5; 20 | 21 | #define isTempTS(ts) (ts&mask) //to check if timestamp is temporary or a proper commit ts 22 | #define PTRtoTS(t) ((timestamp) t ^ mask) // generate temporary timestamp for transaction from its pointer 23 | #define TStoPTR(ts) ((Transaction*) (ts ^ mask)) //get transaction pointer from its temporary timestamp 24 | 25 | #define aligned_malloc(x) (x*)aligned_alloc(alignof(x), sizeof(x)) 26 | template 27 | FORCE_INLINE bool isMarked(T t) { 28 | return ((size_t) t & mask); 29 | } 30 | 31 | template 32 | FORCE_INLINE T mark(T t) { 33 | return (T) ((size_t) t | mask); 34 | } 35 | 36 | template 37 | FORCE_INLINE T unmark(T t) { 38 | return (T) ((size_t) t & ~mask); 39 | } 40 | 41 | typedef std::bitset<32> col_type; 42 | 43 | enum TransactionReturnStatus : char { 44 | SUCCESS, ABORT, WW_ABORT, COMMIT_FAILURE 45 | }; 46 | 47 | //enum Operation : char { 48 | // NOOP, INSERT, DELETE, UPDATE, INVALID 49 | //}; 50 | 51 | enum OperationReturnStatus : char { 52 | OP_SUCCESS, NO_KEY, WW_VALUE 53 | }; 54 | 55 | FORCE_INLINE OperationReturnStatus OR(TransactionReturnStatus op) { 56 | return op == WW_ABORT ? WW_VALUE : NO_KEY; 57 | } 58 | 59 | #define setAffinity(thread_id)\ 60 | cpu_set_t cpuset;\ 61 | CPU_ZERO(&cpuset);\ 62 | CPU_SET(thread_id+1, &cpuset);\ 63 | auto s = sched_setaffinity(0, sizeof (cpu_set_t), &cpuset);\ 64 | if (s != 0)\ 65 | throw std::runtime_error("Cannot set affinity"); 66 | 67 | #define setSched(type)\ 68 | sched_param param;\ 69 | param.__sched_priority = sched_get_priority_max(type);\ 70 | s = sched_setscheduler(0, type, ¶m);\ 71 | if (s != 0)\ 72 | cerr << "Cannot set scheduler" << endl; 73 | 74 | #ifndef NUMTHREADS 75 | #define NUMTHREADS 5 76 | #endif 77 | 78 | #define MAX_IDXES_PER_TBL 3 79 | 80 | 81 | const uint numThreads = NUMTHREADS; 82 | #define EXEC_PROFILE 1 83 | #endif /* TYPES_H */ 84 | 85 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_c++/util.hpp: -------------------------------------------------------------------------------- 1 | #ifndef DBTOASTER_UTIL_H 2 | #define DBTOASTER_UTIL_H 3 | 4 | #include 5 | 6 | namespace dbtoaster { 7 | namespace util { 8 | // Misc function object helpers. 9 | struct fold_hash { 10 | typedef std::size_t result_type; 11 | template 12 | std::size_t operator()(std::size_t current, const T& arg) { 13 | hash_combine(current, arg); 14 | return(current); 15 | } 16 | }; 17 | } 18 | 19 | template 20 | std::list singleton(T elem) { 21 | std::list sing; 22 | sing.push_back(elem); 23 | return sing; 24 | } 25 | } 26 | 27 | #endif 28 | -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/akka-actor_2.11-2.5.4.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/akka-actor_2.11-2.5.4.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/config-1.3.1.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/config-1.3.1.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/dbtoaster-2.3-lms.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/dbtoaster-2.3-lms.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/dbtoaster-core_2.11-2.3.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/dbtoaster-core_2.11-2.3.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/dbtoaster-lms_2.11-2.3.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/dbtoaster-lms_2.11-2.3.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/dbtoaster-pardis_2.11-2.3.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/dbtoaster-pardis_2.11-2.3.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/dbtoaster-spark_2.11-2.3.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/dbtoaster-spark_2.11-2.3.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/dbtoaster-sstore_2.11-2.3.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/dbtoaster-sstore_2.11-2.3.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/lms_2.11-0.3-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/lms_2.11-0.3-SNAPSHOT.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/sc-pardis-compiler_2.11-0.1.4-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/sc-pardis-compiler_2.11-0.1.4-SNAPSHOT.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/sc-pardis-core-compiler_2.11-0.1.4-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/sc-pardis-core-compiler_2.11-0.1.4-SNAPSHOT.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/sc-pardis-library_2.11-0.1.4-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/sc-pardis-library_2.11-0.1.4-SNAPSHOT.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/sc-pardis-quasi-core_2.11-0.1.4-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/sc-pardis-quasi-core_2.11-0.1.4-SNAPSHOT.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/sc-pardis-quasi_2.11-0.1.4-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/sc-pardis-quasi_2.11-0.1.4-SNAPSHOT.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/sc-shared_2.11-0.1.4-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/sc-shared_2.11-0.1.4-SNAPSHOT.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/scala-library-2.11.11.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/scala-library-2.11.11.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/scala-library-2.11.2.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/scala-library-2.11.2.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/scala-parser-combinators_2.11-1.0.4.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/scala-parser-combinators_2.11-1.0.4.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/scala-parser-combinators_2.11-1.0.6.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/scala-parser-combinators_2.11-1.0.6.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/scala-reflect-2.11.11.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/scala-reflect-2.11.11.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/scala-reflect-2.11.2.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/scala-reflect-2.11.2.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/scala-xml_2.11-1.0.6.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/scala-xml_2.11-1.0.6.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/scala-yinyang_2.11-0.2.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/scala-yinyang_2.11-0.2.0.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/scalariform_2.11-0.2.3.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/scalariform_2.11-0.2.3.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/squid-sc-backend-macros_2.11-0.1-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/squid-sc-backend-macros_2.11-0.1-SNAPSHOT.jar -------------------------------------------------------------------------------- /backend/dbtoaster/lib/dbt_scala/squid-sc-backend_2.11-0.1-SNAPSHOT.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fdbresearch/FIVM/ef0ebd6e72e4ce8abc792b097d4784b0d9771256/backend/dbtoaster/lib/dbt_scala/squid-sc-backend_2.11-0.1-SNAPSHOT.jar -------------------------------------------------------------------------------- /backend/lib/functions.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // Copyright (c) 2010-2017 EPFL DATA Lab (http://data.epfl.ch) 4 | // 5 | // Modified by FDB Research Group, University of Oxford 6 | // 7 | // https://fdbresearch.github.io/ 8 | // 9 | //===----------------------------------------------------------------------===// 10 | #ifndef DBTOASTER_FUNCTIONS_HPP 11 | #define DBTOASTER_FUNCTIONS_HPP 12 | 13 | #include 14 | #include 15 | #include 16 | #include "macro.hpp" 17 | 18 | using namespace std; 19 | 20 | namespace dbtoaster { 21 | 22 | // Conversion helpers 23 | date str2date(const char* c) { 24 | unsigned int y, m, d; 25 | if (sscanf(c, "%u-%u-%u", &y, &m, &d) < 3 || m > 12 || d > 31) { 26 | throw std::invalid_argument(std::string("invalid date string ") + c); 27 | } 28 | return (y % 10000) * 10000 + (m % 100) * 100 + (d % 100); 29 | } 30 | 31 | date str2date(const STRING_TYPE& s) { return str2date(s.c_str()); } 32 | 33 | date Udate(const char* c) { return str2date(c); } 34 | 35 | date Udate(const STRING_TYPE& s) { return str2date(s.c_str()); } 36 | 37 | FORCE_INLINE long Ulistmax(long v1, long v2) { return ((v1 > v2) ? v1 : v2 ); } 38 | 39 | FORCE_INLINE DOUBLE_TYPE Ulistmax(DOUBLE_TYPE v1, DOUBLE_TYPE v2) { return ((v1 > v2) ? v1 : v2); } 40 | 41 | FORCE_INLINE DOUBLE_TYPE Udiv(DOUBLE_TYPE x) { return (x != 0.0 ? 1.0 / x : 0.0); } 42 | 43 | FORCE_INLINE long Udate_year(date d) { return (d / 10000) % 10000; } 44 | 45 | FORCE_INLINE long Udate_month(date d) { return (d / 100) % 100; } 46 | 47 | FORCE_INLINE long Udate_day(date d) { return d % 100; } 48 | 49 | FORCE_INLINE int Upreg_match(const regex_t& preg, const STRING_TYPE& s) { 50 | int ret = regexec(&preg, s.c_str(), 0, NULL, 0); 51 | if (ret == 0) return 1; 52 | else if (ret == REG_NOMATCH) return 0; 53 | 54 | std::cerr << "Error evaluating regular expression." << std::endl; 55 | exit(-1); 56 | } 57 | 58 | FORCE_INLINE STRING_TYPE Usubstring(const STRING_TYPE &s, uint32_t start, uint32_t len) { 59 | return s.substr(start, len); 60 | } 61 | } 62 | 63 | #endif /* DBTOASTER_FUNCTIONS_HPP */ -------------------------------------------------------------------------------- /backend/lib/macro.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // Copyright (c) 2010-2017 EPFL DATA Lab (http://data.epfl.ch) 4 | // 5 | // Modified by FDB Research Group, University of Oxford 6 | // 7 | // https://fdbresearch.github.io/ 8 | // 9 | //===----------------------------------------------------------------------===// 10 | #ifndef DBTOASTER_MACRO_HPP 11 | #define DBTOASTER_MACRO_HPP 12 | 13 | #define STRING(s) #s 14 | 15 | //----------------------------------------------------------------------------- 16 | // Microsoft Visual Studio 17 | 18 | #if defined(_MSC_VER) 19 | 20 | typedef unsigned int uint32_t; 21 | 22 | #define INLINE inline 23 | #define FORCE_INLINE __forceinline 24 | #define NEVER_INLINE __declspec(noinline) 25 | 26 | //----------------------------------------------------------------------------- 27 | // Other compilers 28 | 29 | #else // defined(_MSC_VER) 30 | 31 | #include 32 | 33 | // Force inlining currently disabled as it introduces bugs in GCC 6.3.0 and 5.4.0 34 | #define FORCE_INLINE inline //__attribute__((always_inline)) 35 | #define NEVER_INLINE __attribute__((noinline)) 36 | 37 | #endif // !defined(_MSC_VER) 38 | 39 | #endif /* DBTOASTER_MACRO_HPP */ -------------------------------------------------------------------------------- /backend/lib/memory.hpp: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // Copyright (c) 2010-2017 EPFL DATA Lab (http://data.epfl.ch) 4 | // 5 | // Modified by FDB Research Group, University of Oxford 6 | // 7 | // https://fdbresearch.github.io/ 8 | // 9 | //===----------------------------------------------------------------------===// 10 | #ifndef DBTOASTER_MEMORY_HPP 11 | #define DBTOASTER_MEMORY_HPP 12 | 13 | #include 14 | 15 | static void startHeapProfiler(const char* prefix) { 16 | HeapProfilerStart(prefix); 17 | } 18 | 19 | static void stopHeapProfiler() { 20 | HeapProfilerStop(); 21 | } 22 | 23 | static void dumpHeapProfile(const char* prefix) { 24 | HeapProfilerDump(prefix); 25 | } 26 | 27 | /* 28 | // #include 29 | 30 | static void display_mallinfo() { 31 | // IMPORTANT: malloc_stats is used for experiments on Ubuntu but doesn't work on Mac 32 | malloc_stats(); 33 | 34 | // Here is an alternative to malloc_stats, which incidentally gives some strange results 35 | struct mallinfo mi; 36 | 37 | mi = mallinfo(); 38 | 39 | cout << "Total allocated space (uordblks): " << mi.uordblks << " " 40 | << "Total free space (fordblks): " << mi.fordblks << " " 41 | << "Total non-mmapped bytes (arena): " << mi.arena << " " 42 | << "Bytes in mapped regions (hblkhd): " << mi.hblkhd << " " 43 | << "Max. total allocated space (usmblks): " << mi.usmblks; 44 | 45 | // printf("Total non-mmapped bytes (arena): %d\n", mi.arena); 46 | // printf("# of free chunks (ordblks): %d\n", mi.ordblks); 47 | // printf("# of free fastbin blocks (smblks): %d\n", mi.smblks); 48 | // printf("# of mapped regions (hblks): %d\n", mi.hblks); 49 | // printf("Bytes in mapped regions (hblkhd): %d\n", mi.hblkhd); 50 | // printf("Max. total allocated space (usmblks): %d\n", mi.usmblks); 51 | // printf("Free bytes held in fastbins (fsmblks): %d\n", mi.fsmblks); 52 | // printf("Total allocated space (uordblks): %d\n", mi.uordblks); 53 | // printf("Total free space (fordblks): %d\n", mi.fordblks); 54 | // printf("Topmost releasable block (keepcost): %d\n", mi.keepcost); 55 | } 56 | */ 57 | 58 | #endif /* DBTOASTER_MEMORY_HPP */ -------------------------------------------------------------------------------- /bin/compile_frontend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BIN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | FRONTEND_DIR=${BIN_DIR}/../frontend 5 | 6 | echo "Compiling F-IVM frontend..." 7 | cd ${FRONTEND_DIR} 8 | sbt assembly 9 | cd - 10 | echo "Done." -------------------------------------------------------------------------------- /bin/run_backend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BIN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | BACKEND_DIR=${BIN_DIR}/../backend 5 | BACKEND_BIN=${BACKEND_DIR}/dbtoaster/bin/dbtoaster 6 | BACKEND_FLAGS="-l cpp -O3 -xhashmap -xruntime -F HEURISTICS-DECOMPOSE-OVER-TABLES" 7 | 8 | ${BACKEND_BIN} ${BACKEND_FLAGS} $@ 9 | -------------------------------------------------------------------------------- /bin/run_frontend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FIVM_VERSION=1.0 4 | SCALA_VERSION=2.12 5 | 6 | BIN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 7 | FRONTEND_DIR=${BIN_DIR}/../frontend 8 | FRONTEND_BIN=${FRONTEND_DIR}/target/scala-${SCALA_VERSION}/FIVM-assembly-${FIVM_VERSION}.jar 9 | 10 | if [ ! -f ${FRONTEND_BIN} ] 11 | then 12 | ${BIN_DIR}/compile_frontend.sh 13 | fi 14 | 15 | scala $FRONTEND_BIN $@ -------------------------------------------------------------------------------- /examples/data/tpch0.01/nation.csv: -------------------------------------------------------------------------------- 1 | 0|ALGERIA|0| haggle. carefully final deposits detect slyly agai| 2 | 1|ARGENTINA|1|al foxes promise slyly according to the regular accounts. bold requests alon| 3 | 2|BRAZIL|1|y alongside of the pending deposits. carefully special packages are about the ironic forges. slyly special | 4 | 3|CANADA|1|eas hang ironic, silent packages. slyly regular packages are furiously over the tithes. fluffily bold| 5 | 4|EGYPT|4|y above the carefully unusual theodolites. final dugouts are quickly across the furiously regular d| 6 | 5|ETHIOPIA|0|ven packages wake quickly. regu| 7 | 6|FRANCE|3|refully final requests. regular, ironi| 8 | 7|GERMANY|3|l platelets. regular accounts x-ray: unusual, regular acco| 9 | 8|INDIA|2|ss excuses cajole slyly across the packages. deposits print aroun| 10 | 9|INDONESIA|2| slyly express asymptotes. regular deposits haggle slyly. carefully ironic hockey players sleep blithely. carefull| 11 | 10|IRAN|4|efully alongside of the slyly final dependencies. | 12 | 11|IRAQ|4|nic deposits boost atop the quickly final requests? quickly regula| 13 | 12|JAPAN|2|ously. final, express gifts cajole a| 14 | 13|JORDAN|4|ic deposits are blithely about the carefully regular pa| 15 | 14|KENYA|0| pending excuses haggle furiously deposits. pending, express pinto beans wake fluffily past t| 16 | 15|MOROCCO|0|rns. blithely bold courts among the closely regular packages use furiously bold platelets?| 17 | 16|MOZAMBIQUE|0|s. ironic, unusual asymptotes wake blithely r| 18 | 17|PERU|1|platelets. blithely pending dependencies use fluffily across the even pinto beans. carefully silent accoun| 19 | 18|CHINA|2|c dependencies. furiously express notornis sleep slyly regular accounts. ideas sleep. depos| 20 | 19|ROMANIA|3|ular asymptotes are about the furious multipliers. express dependencies nag above the ironically ironic account| 21 | 20|SAUDI ARABIA|4|ts. silent requests haggle. closely express packages sleep across the blithely| 22 | 21|VIETNAM|2|hely enticingly express accounts. even, final | 23 | 22|RUSSIA|3| requests against the platelets use never according to the quickly regular pint| 24 | 23|UNITED KINGDOM|3|eans boost carefully special requests. accounts are. carefull| 25 | 24|UNITED STATES|1|y final packages. slow foxes cajole quickly. quickly silent platelets breach ironic accounts. unusual pinto be| 26 | -------------------------------------------------------------------------------- /examples/data/tpch0.01/region.csv: -------------------------------------------------------------------------------- 1 | 0|AFRICA|lar deposits. blithely final packages cajole. regular waters are final requests. regular accounts are according to | 2 | 1|AMERICA|hs use ironic, even requests. s| 3 | 2|ASIA|ges. thinly even pinto beans ca| 4 | 3|EUROPE|ly final courts cajole furiously final excuse| 5 | 4|MIDDLE EAST|uickly special accounts cajole carefully blithely close requests. carefully final asymptotes haggle furiousl| 6 | -------------------------------------------------------------------------------- /examples/queries/favorita/favorita.txt: -------------------------------------------------------------------------------- 1 | 18 6 2 | 0 date int -1 {} 0 3 | 1 store int 0 {0} 0 4 | 2 item int 1 {0,1} 0 5 | 3 unit_sales double 2 {0,1,2} 0 6 | 4 onpromotion int 3 {0,1,2,3} 0 7 | 5 oilprize double 0 {0} 0 8 | 6 holiday_type int 0 {0} 0 9 | 7 locale int 6 {0,6} 0 10 | 8 locale_id int 7 {0,6,7} 0 11 | 9 transferred int 8 {0,6,7,8} 0 12 | 10 transactions int 1 {0,1} 0 13 | 11 city int 1 {1} 1 14 | 12 state int 11 {1,11} 0 15 | 13 store_type int 12 {1,11,12} 0 16 | 14 cluster int 13 {1,11,12,13} 0 17 | 15 family int 2 {2} 1 18 | 16 itemclass int 15 {2,15} 0 19 | 17 perishable int 16 {2,15,16} 0 20 | SALES 4 date,store,item,unit_sales,onpromotion 21 | OIL 5 date,oilprize 22 | HOLIDAY 9 date,holiday_type,locale,locale_id,transferred 23 | TRANSACTIONS 10 date,store,transactions 24 | STORES 14 store,city,state,store_type,cluster 25 | ITEMS 17 item,family,itemclass,perishable 26 | -------------------------------------------------------------------------------- /examples/queries/favorita/favorita_regression_categorical.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'favorita.txt'; 2 | 3 | CREATE TYPE RingCofactorMixed 4 | FROM FILE 'ring/ring_cofactor_degree1_categorical.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_min, dynamic_sum, dynamic_sum); 6 | 7 | CREATE STREAM SALES(date int, store int, item int, unit_sales double, onpromotion int) 8 | FROM FILE './datasets/favorita/Sales.csv' LINE DELIMITED CSV(delimiter := ','); 9 | 10 | CREATE STREAM OIL(date int, oilprize double) 11 | FROM FILE './datasets/favorita/Oil.csv' LINE DELIMITED CSV(delimiter := ','); 12 | 13 | CREATE STREAM HOLIDAY(date int, holiday_type int, locale int, locale_id int, transferred int) 14 | FROM FILE './datasets/favorita/Holidays.csv' LINE DELIMITED CSV(delimiter := ','); 15 | 16 | CREATE STREAM TRANSACTIONS(date int, store int, transactions int) 17 | FROM FILE './datasets/favorita/Transactions.csv' LINE DELIMITED CSV(delimiter := ','); 18 | 19 | CREATE STREAM STORES(store int, city int, state int, store_type int, cluster int) 20 | FROM FILE './datasets/favorita/Stores.csv' LINE DELIMITED CSV(delimiter := ','); 21 | 22 | CREATE STREAM ITEMS(item int, family int, itemclass int, perishable int) 23 | FROM FILE './datasets/favorita/Stores.csv' LINE DELIMITED CSV(delimiter := ','); 24 | 25 | SELECT SUM( 26 | [liftCont<0>: RingCofactorMixed<0,1,0>](unit_sales) * 27 | [liftCat<1>: RingCofactorMixed<1,0,1>](onpromotion) * 28 | [liftCat<2>: RingCofactorMixed<2,0,3>](family, itemclass, perishable) * 29 | [liftCont<5>: RingCofactorMixed<5,1,0>](transactions) * 30 | [liftCat<6>: RingCofactorMixed<6,0,4>](city, state, store_type, cluster) * 31 | [liftCont<10>: RingCofactorMixed<10,1,0>](oilprize) * 32 | [liftCat<11>: RingCofactorMixed<11,0,4>](holiday_type, locale, locale_id, transferred) 33 | ) 34 | FROM Sales NATURAL JOIN Oil NATURAL JOIN Holiday NATURAL JOIN Transactions NATURAL JOIN Stores NATURAL JOIN Items; 35 | -------------------------------------------------------------------------------- /examples/queries/favorita/favorita_regression_categorical_general.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'favorita.txt'; 2 | 3 | CREATE TYPE RingCofactorGeneral 4 | FROM FILE 'ring/ring_cofactor_general.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_min, dynamic_sum, dynamic_sum); 6 | 7 | CREATE STREAM SALES(date int, store int, item int, unit_sales double, onpromotion int) 8 | FROM FILE './datasets/favorita/Sales.csv' LINE DELIMITED CSV(delimiter := ','); 9 | 10 | CREATE STREAM OIL(date int, oilprize double) 11 | FROM FILE './datasets/favorita/Oil.csv' LINE DELIMITED CSV(delimiter := ','); 12 | 13 | CREATE STREAM HOLIDAY(date int, holiday_type int, locale int, locale_id int, transferred int) 14 | FROM FILE './datasets/favorita/Holidays.csv' LINE DELIMITED CSV(delimiter := ','); 15 | 16 | CREATE STREAM TRANSACTIONS(date int, store int, transactions int) 17 | FROM FILE './datasets/favorita/Transactions.csv' LINE DELIMITED CSV(delimiter := ','); 18 | 19 | CREATE STREAM STORES(store int, city int, state int, store_type int, cluster int) 20 | FROM FILE './datasets/favorita/Stores.csv' LINE DELIMITED CSV(delimiter := ','); 21 | 22 | CREATE STREAM ITEMS(item int, family int, itemclass int, perishable int) 23 | FROM FILE './datasets/favorita/Stores.csv' LINE DELIMITED CSV(delimiter := ','); 24 | 25 | SELECT SUM( 26 | [liftCont<0>: RingCofactorGeneral<0,1,0>](unit_sales) * 27 | [liftCat<1>: RingCofactorGeneral<1,0,1>](onpromotion) * 28 | [liftCat<2>: RingCofactorGeneral<2,0,3>](family, itemclass, perishable) * 29 | [liftCont<5>: RingCofactorGeneral<5,1,0>](transactions) * 30 | [liftCat<6>: RingCofactorGeneral<6,0,4>](city, state, store_type, cluster) * 31 | [liftCont<10>: RingCofactorGeneral<10,1,0>](oilprize) * 32 | [liftCat<11>: RingCofactorGeneral<11,0,4>](holiday_type, locale, locale_id, transferred) 33 | ) 34 | FROM Sales NATURAL JOIN Oil NATURAL JOIN Holiday NATURAL JOIN Transactions NATURAL JOIN Stores NATURAL JOIN Items; 35 | -------------------------------------------------------------------------------- /examples/queries/favorita/favorita_regression_continuous.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'favorita.txt'; 2 | 3 | CREATE TYPE RingCofactor 4 | FROM FILE 'ring/ring_cofactor_degree1.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_min, static, dynamic_sum); 6 | 7 | CREATE STREAM SALES(date int, store int, item int, unit_sales double, onpromotion int) 8 | FROM FILE './datasets/favorita/Sales.csv' LINE DELIMITED CSV(delimiter := ','); 9 | 10 | CREATE STREAM OIL(date int, oilprize double) 11 | FROM FILE './datasets/favorita/Oil.csv' LINE DELIMITED CSV(delimiter := ','); 12 | 13 | CREATE STREAM HOLIDAY(date int, holiday_type int, locale int, locale_id int, transferred int) 14 | FROM FILE './datasets/favorita/Holidays.csv' LINE DELIMITED CSV(delimiter := ','); 15 | 16 | CREATE STREAM TRANSACTIONS(date int, store int, transactions int) 17 | FROM FILE './datasets/favorita/Transactions.csv' LINE DELIMITED CSV(delimiter := ','); 18 | 19 | CREATE STREAM STORES(store int, city int, state int, store_type int, cluster int) 20 | FROM FILE './datasets/favorita/Stores.csv' LINE DELIMITED CSV(delimiter := ','); 21 | 22 | CREATE STREAM ITEMS(item int, family int, itemclass int, perishable int) 23 | FROM FILE './datasets/favorita/Stores.csv' LINE DELIMITED CSV(delimiter := ','); 24 | 25 | SELECT SUM( 26 | [lift<0>: RingCofactor<0,double,2>](unit_sales, onpromotion) * 27 | [lift<2>: RingCofactor<2,double,3>](family, itemclass, perishable) * 28 | [lift<5>: RingCofactor<5,double,1>](transactions) * 29 | [lift<6>: RingCofactor<6,double,4>](city, state, store_type, cluster) * 30 | [lift<10>: RingCofactor<10,double,1>](oilprize) * 31 | [lift<11>: RingCofactor<11,double,4>](holiday_type, locale, locale_id, transferred) 32 | ) 33 | FROM Sales NATURAL JOIN Oil NATURAL JOIN Holiday NATURAL JOIN Transactions NATURAL JOIN Stores NATURAL JOIN Items; 34 | -------------------------------------------------------------------------------- /examples/queries/favorita/favorita_regression_continuous_general.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'favorita.txt'; 2 | 3 | CREATE TYPE RingCofactorGeneral 4 | FROM FILE 'ring/ring_cofactor_general.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_min, dynamic_sum, dynamic_sum); 6 | 7 | CREATE STREAM SALES(date int, store int, item int, unit_sales double, onpromotion int) 8 | FROM FILE './datasets/favorita/Sales.csv' LINE DELIMITED CSV(delimiter := ','); 9 | 10 | CREATE STREAM OIL(date int, oilprize double) 11 | FROM FILE './datasets/favorita/Oil.csv' LINE DELIMITED CSV(delimiter := ','); 12 | 13 | CREATE STREAM HOLIDAY(date int, holiday_type int, locale int, locale_id int, transferred int) 14 | FROM FILE './datasets/favorita/Holidays.csv' LINE DELIMITED CSV(delimiter := ','); 15 | 16 | CREATE STREAM TRANSACTIONS(date int, store int, transactions int) 17 | FROM FILE './datasets/favorita/Transactions.csv' LINE DELIMITED CSV(delimiter := ','); 18 | 19 | CREATE STREAM STORES(store int, city int, state int, store_type int, cluster int) 20 | FROM FILE './datasets/favorita/Stores.csv' LINE DELIMITED CSV(delimiter := ','); 21 | 22 | CREATE STREAM ITEMS(item int, family int, itemclass int, perishable int) 23 | FROM FILE './datasets/favorita/Stores.csv' LINE DELIMITED CSV(delimiter := ','); 24 | 25 | SELECT SUM( 26 | [liftCont<0>: RingCofactorGeneral<0,2,0>](unit_sales, onpromotion) * 27 | [liftCont<2>: RingCofactorGeneral<2,3,0>](family, itemclass, perishable) * 28 | [liftCont<5>: RingCofactorGeneral<5,1,0>](transactions) * 29 | [liftCont<6>: RingCofactorGeneral<6,4,0>](city, state, store_type, cluster) * 30 | [liftCont<10>: RingCofactorGeneral<10,1,0>](oilprize) * 31 | [liftCont<11>: RingCofactorGeneral<11,4,0>](holiday_type, locale, locale_id, transferred) 32 | ) 33 | FROM Sales NATURAL JOIN Oil NATURAL JOIN Holiday NATURAL JOIN Transactions NATURAL JOIN Stores NATURAL JOIN Items; 34 | -------------------------------------------------------------------------------- /examples/queries/housing/housing.txt: -------------------------------------------------------------------------------- 1 | 27 6 2 | 0 postcode double -1 {} 0 3 | 1 house double 0 {0} 0 4 | 2 flat double 1 {0,1} 0 5 | 3 unknown double 2 {0,1,2} 0 6 | 4 parking double 3 {0,1,2,3} 0 7 | 5 nbbedrooms double 4 {0,1,2,3,4} 0 8 | 6 nbbathrooms double 5 {0,1,2,3,4,5} 0 9 | 7 garden double 6 {0,1,2,3,4,5,6} 0 10 | 8 kitchensize double 7 {0,1,2,3,4,5,6,7} 0 11 | 9 livingarea double 8 {0,1,2,3,4,5,6,7,8} 0 12 | 10 price double 9 {0,1,2,3,4,5,6,7,8,9} 0 13 | 11 sainsburys double 0 {0} 0 14 | 12 tesco double 11 {0,11} 0 15 | 13 ms double 12 {0,11,12} 0 16 | 14 pricerangeshop double 13 {0,11,12,13} 0 17 | 15 openinghoursshop double 14 {0,11,12,13,14} 0 18 | 16 typeeducation double 0 {0} 0 19 | 17 sizeinstitution double 16 {0,16} 0 20 | 18 pricerangerest double 0 {0} 0 21 | 19 openinghoursrest double 18 {0,18} 0 22 | 20 unemployment double 0 {0} 0 23 | 21 nbhospitals double 20 {0,20} 0 24 | 22 crimesperyear double 21 {0,20,21} 0 25 | 23 averagesalary double 22 {0,20,21,22} 0 26 | 24 nbbuslines double 0 {0} 0 27 | 25 nbtrainstations double 24 {0,24} 0 28 | 26 distancecitycentre double 25 {0,24,25} 0 29 | HOUSE 10 postcode,livingarea,price,nbbedrooms,nbbathrooms,kitchensize,house,flat,unknown,garden,parking 30 | SHOP 15 postcode,openinghoursshop,pricerangeshop,sainsburys,tesco,ms 31 | INSTITUTION 17 postcode,typeeducation,sizeinstitution 32 | RESTAURANT 19 postcode,openinghoursrest,pricerangerest 33 | DEMOGRAPHICS 23 postcode,averagesalary,crimesperyear,unemployment,nbhospitals 34 | TRANSPORT 26 postcode,nbbuslines,nbtrainstations,distancecitycentre -------------------------------------------------------------------------------- /examples/queries/housing/housing_avg.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'housing.txt'; 2 | 3 | CREATE TYPE RingAvg 4 | FROM FILE 'ring/ring_avg.hpp'; 5 | 6 | CREATE STREAM HOUSE(postcode double, livingarea double, price double, nbbedrooms double, nbbathrooms double, kitchensize double, house double, flat double, unknown double, garden double, parking double) 7 | FROM FILE './datasets/housing/House.tbl' LINE DELIMITED CSV(delimiter := '|'); 8 | 9 | CREATE STREAM SHOP(postcode double, openinghoursshop double, pricerangeshop double, sainsburys double, tesco double, ms double) 10 | FROM FILE './datasets/housing/Shop.tbl' LINE DELIMITED CSV(delimiter := '|'); 11 | 12 | CREATE STREAM INSTITUTION(postcode double, typeeducation double, sizeinstitution double) 13 | FROM FILE './datasets/housing/Institution.tbl' LINE DELIMITED CSV(delimiter := '|'); 14 | 15 | CREATE STREAM RESTAURANT(postcode double, openinghoursrest double, pricerangerest double) 16 | FROM FILE './datasets/housing/Restaurant.tbl' LINE DELIMITED CSV(delimiter := '|'); 17 | 18 | CREATE STREAM DEMOGRAPHICS(postcode double, averagesalary double, crimesperyear double, unemployment double, nbhospitals double) 19 | FROM FILE './datasets/housing/Demographics.tbl' LINE DELIMITED CSV(delimiter := '|'); 20 | 21 | CREATE STREAM TRANSPORT(postcode double, nbbuslines double, nbtrainstations double, distancecitycentre double) 22 | FROM FILE './datasets/housing/Transport.tbl' LINE DELIMITED CSV(delimiter := '|'); 23 | 24 | SELECT SUM([lift: RingAvg](HOUSE.postcode * HOUSE.postcode)) 25 | FROM HOUSE NATURAL JOIN SHOP NATURAL JOIN INSTITUTION NATURAL JOIN RESTAURANT NATURAL JOIN DEMOGRAPHICS NATURAL JOIN TRANSPORT; -------------------------------------------------------------------------------- /examples/queries/housing/housing_sum.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'housing.txt'; 2 | 3 | CREATE STREAM HOUSE(postcode double, livingarea double, price double, nbbedrooms double, nbbathrooms double, kitchensize double, house double, flat double, unknown double, garden double, parking double) 4 | FROM FILE './datasets/housing/House.tbl' LINE DELIMITED CSV(delimiter := '|'); 5 | 6 | CREATE STREAM SHOP(postcode double, openinghoursshop double, pricerangeshop double, sainsburys double, tesco double, ms double) 7 | FROM FILE './datasets/housing/Shop.tbl' LINE DELIMITED CSV(delimiter := '|'); 8 | 9 | CREATE STREAM INSTITUTION(postcode double, typeeducation double, sizeinstitution double) 10 | FROM FILE './datasets/housing/Institution.tbl' LINE DELIMITED CSV(delimiter := '|'); 11 | 12 | CREATE STREAM RESTAURANT(postcode double, openinghoursrest double, pricerangerest double) 13 | FROM FILE './datasets/housing/Restaurant.tbl' LINE DELIMITED CSV(delimiter := '|'); 14 | 15 | CREATE STREAM DEMOGRAPHICS(postcode double, averagesalary double, crimesperyear double, unemployment double, nbhospitals double) 16 | FROM FILE './datasets/housing/Demographics.tbl' LINE DELIMITED CSV(delimiter := '|'); 17 | 18 | CREATE STREAM TRANSPORT(postcode double, nbbuslines double, nbtrainstations double, distancecitycentre double) 19 | FROM FILE './datasets/housing/Transport.tbl' LINE DELIMITED CSV(delimiter := '|'); 20 | 21 | SELECT SUM(HOUSE.postcode*HOUSE.postcode) 22 | FROM HOUSE NATURAL JOIN SHOP NATURAL JOIN INSTITUTION NATURAL JOIN RESTAURANT NATURAL JOIN DEMOGRAPHICS NATURAL JOIN TRANSPORT; -------------------------------------------------------------------------------- /examples/queries/retailer/retailer.txt: -------------------------------------------------------------------------------- 1 | 43 5 2 | 0 locn int -1 {} 0 3 | 1 dateid int 0 {0} 0 4 | 2 ksn int 1 {0,1} 0 5 | 3 inventoryunits int 2 {0,1,2} 0 6 | 4 zip int 0 {0} 0 7 | 5 rgn_cd int 4 {0,4} 0 8 | 6 clim_zn_nbr int 5 {0,4,5} 0 9 | 7 tot_area_sq_ft int 6 {0,4,5,6} 0 10 | 8 sell_area_sq_ft int 7 {0,4,5,6,7} 0 11 | 9 avghhi int 8 {0,4,5,6,7,8} 0 12 | 10 supertargetdistance double 9 {0,4,5,6,7,8,9} 0 13 | 11 supertargetdrivetime double 10 {0,4,5,6,7,8,9,10} 0 14 | 12 targetdistance double 11 {0,4,5,6,7,8,9,10,11} 0 15 | 13 targetdrivetime double 12 {0,4,5,6,7,8,9,10,11,12} 0 16 | 14 walmartdistance double 13 {0,4,5,6,7,8,9,10,11,12,13} 0 17 | 15 walmartdrivetime double 14 {0,4,5,6,7,8,9,10,11,12,13,14} 0 18 | 16 walmartsupercenterdistance double 15 {0,4,5,6,7,8,9,10,11,12,13,14,15} 0 19 | 17 walmartsupercenterdrivetime double 16 {0,4,5,6,7,8,9,10,11,12,13,14,15,16} 0 20 | 18 population int 4 {4} 1 21 | 19 white int 18 {4,18} 0 22 | 20 asian int 19 {4,18,19} 0 23 | 21 pacific int 20 {4,18,19,20} 0 24 | 22 blackafrican int 21 {4,18,19,20,21} 0 25 | 23 medianage double 22 {4,18,19,20,21,22} 0 26 | 24 occupiedhouseunits int 23 {4,18,19,20,21,22,23} 0 27 | 25 houseunits int 24 {4,18,19,20,21,22,23,24} 0 28 | 26 families int 25 {4,18,19,20,21,22,23,24,25} 0 29 | 27 households int 26 {4,18,19,20,21,22,23,24,25,26} 0 30 | 28 husbwife int 27 {4,18,19,20,21,22,23,24,25,26,27} 0 31 | 29 males int 28 {4,18,19,20,21,22,23,24,25,26,27,28} 0 32 | 30 females int 29 {4,18,19,20,21,22,23,24,25,26,27,28,29} 0 33 | 31 householdschildren int 30 {4,18,19,20,21,22,23,24,25,26,27,28,29,30} 0 34 | 32 hispanic int 31 {4,18,19,20,21,22,23,24,25,26,27,28,29,30,31} 0 35 | 33 subcategory byte 2 {2} 1 36 | 34 category byte 33 {2,33} 0 37 | 35 categoryCluster byte 34 {2,33,34} 0 38 | 36 prize double 35 {2,33,34,35} 0 39 | 37 rain byte 1 {0,1} 0 40 | 38 snow byte 37 {0,1,37} 0 41 | 39 maxtemp int 38 {0,1,37,38} 0 42 | 40 mintemp int 39 {0,1,37,38,39} 0 43 | 41 meanwind double 40 {0,1,37,38,39,40} 0 44 | 42 thunder byte 41 {0,1,37,38,39,40,41} 0 45 | INVENTORY 3 locn,dateid,ksn,inventoryunits 46 | LOCATION 17 locn,zip,rgn_cd,clim_zn_nbr,tot_area_sq_ft,sell_area_sq_ft,avghhi,supertargetdistance,supertargetdrivetime,targetdistance,targetdrivetime,walmartdistance,walmartdrivetime,walmartsupercenterdistance,walmartsupercenterdrivetime 47 | CENSUS 32 zip,population,white,asian,pacific,blackafrican,medianage,occupiedhouseunits,houseunits,families,households,husbwife,males,females,householdschildren,hispanic 48 | ITEM 36 ksn,subcategory,category,categoryCluster,prize 49 | WEATHER 42 locn,dateid,rain,snow,maxtemp,mintemp,meanwind,thunder 50 | -------------------------------------------------------------------------------- /examples/queries/retailer/retailer_regression_categorical.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'retailer.txt'; 2 | 3 | CREATE TYPE RingCofactorMixed 4 | FROM FILE 'ring/ring_cofactor_degree1_categorical.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_min, dynamic_sum, dynamic_sum); 6 | 7 | CREATE STREAM INVENTORY(locn int, dateid int, ksn int, inventoryunits int) 8 | FROM FILE './datasets/retailer/Inventory.tbl' LINE DELIMITED CSV(delimiter := '|'); 9 | 10 | CREATE STREAM LOCATION(locn int, zip int, rgn_cd int, clim_zn_nbr int, tot_area_sq_ft int, sell_area_sq_ft int, avghhi int, supertargetdistance double, supertargetdrivetime double, targetdistance double, targetdrivetime double, walmartdistance double, walmartdrivetime double, walmartsupercenterdistance double, walmartsupercenterdrivetime double) 11 | FROM FILE './datasets/retailer/Location.tbl' LINE DELIMITED CSV(delimiter := '|'); 12 | 13 | CREATE STREAM CENSUS(zip int, population int, white int, asian int, pacific int, blackafrican int, medianage double, occupiedhouseunits int, houseunits int, families int, households int, husbwife int, males int, females int, householdschildren int, hispanic int) 14 | FROM FILE './datasets/retailer/Census.tbl' LINE DELIMITED CSV(delimiter := '|'); 15 | 16 | CREATE STREAM ITEM(ksn int, subcategory byte, category byte, categoryCluster byte, prize double) 17 | FROM FILE './datasets/retailer/Item.tbl' LINE DELIMITED CSV(delimiter := '|'); 18 | 19 | CREATE STREAM WEATHER(locn int, dateid int, rain byte, snow byte, maxtemp int, mintemp int, meanwind double, thunder byte) 20 | FROM FILE './datasets/retailer/Weather.tbl' LINE DELIMITED CSV(delimiter := '|'); 21 | 22 | SELECT SUM( 23 | [liftCont<0>: RingCofactorMixed<0,1,0>](inventoryunits) * 24 | [liftCont<1>: RingCofactorMixed<1,1,0>](prize) * 25 | [liftCat<2>: RingCofactorMixed<2,0,3>](subcategory, category, categoryCluster) * 26 | [liftCont<5>: RingCofactorMixed<5,3,0>](maxtemp, mintemp, meanwind) * 27 | [liftCat<8>: RingCofactorMixed<8,0,3>](rain, snow, thunder) * 28 | [liftCont<11>: RingCofactorMixed<11,13,0>](rgn_cd, clim_zn_nbr, tot_area_sq_ft, sell_area_sq_ft, avghhi, supertargetdistance, supertargetdrivetime, targetdistance, targetdrivetime, walmartdistance, walmartdrivetime, walmartsupercenterdistance, walmartsupercenterdrivetime) * 29 | [liftCont<24>: RingCofactorMixed<24,15,0>](population, white, asian, pacific, blackafrican, medianage, occupiedhouseunits, houseunits, families, households, husbwife, males, females, householdschildren, hispanic) 30 | ) 31 | FROM INVENTORY NATURAL JOIN LOCATION NATURAL JOIN CENSUS NATURAL JOIN ITEM NATURAL JOIN WEATHER; 32 | -------------------------------------------------------------------------------- /examples/queries/retailer/retailer_regression_categorical_INVENTORY.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'retailer.txt'; 2 | 3 | CREATE TYPE RingCofactorMixed 4 | FROM FILE 'ring/ring_cofactor_degree1_categorical.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_min, dynamic_sum, dynamic_sum); 6 | 7 | CREATE STREAM INVENTORY(locn int, dateid int, ksn int, inventoryunits int) 8 | FROM FILE './datasets/retailer/Inventory.tbl' LINE DELIMITED CSV(delimiter := '|'); 9 | 10 | CREATE TABLE LOCATION(locn int, zip int, rgn_cd int, clim_zn_nbr int, tot_area_sq_ft int, sell_area_sq_ft int, avghhi int, supertargetdistance double, supertargetdrivetime double, targetdistance double, targetdrivetime double, walmartdistance double, walmartdrivetime double, walmartsupercenterdistance double, walmartsupercenterdrivetime double) 11 | FROM FILE './datasets/retailer/Location.tbl' LINE DELIMITED CSV(delimiter := '|'); 12 | 13 | CREATE TABLE CENSUS(zip int, population int, white int, asian int, pacific int, blackafrican int, medianage double, occupiedhouseunits int, houseunits int, families int, households int, husbwife int, males int, females int, householdschildren int, hispanic int) 14 | FROM FILE './datasets/retailer/Census.tbl' LINE DELIMITED CSV(delimiter := '|'); 15 | 16 | CREATE TABLE ITEM(ksn int, subcategory byte, category byte, categoryCluster byte, prize double) 17 | FROM FILE './datasets/retailer/Item.tbl' LINE DELIMITED CSV(delimiter := '|'); 18 | 19 | CREATE TABLE WEATHER(locn int, dateid int, rain byte, snow byte, maxtemp int, mintemp int, meanwind double, thunder byte) 20 | FROM FILE './datasets/retailer/Weather.tbl' LINE DELIMITED CSV(delimiter := '|'); 21 | 22 | SELECT SUM( 23 | [liftCont<0>: RingCofactorMixed<0,1,0>](inventoryunits) * 24 | [liftCont<1>: RingCofactorMixed<1,1,0>](prize) * 25 | [liftCat<2>: RingCofactorMixed<2,0,3>](subcategory, category, categoryCluster) * 26 | [liftCont<5>: RingCofactorMixed<5,3,0>](maxtemp, mintemp, meanwind) * 27 | [liftCat<8>: RingCofactorMixed<8,0,3>](rain, snow, thunder) * 28 | [liftCont<11>: RingCofactorMixed<11,13,0>](rgn_cd, clim_zn_nbr, tot_area_sq_ft, sell_area_sq_ft, avghhi, supertargetdistance, supertargetdrivetime, targetdistance, targetdrivetime, walmartdistance, walmartdrivetime, walmartsupercenterdistance, walmartsupercenterdrivetime) * 29 | [liftCont<24>: RingCofactorMixed<24,15,0>](population, white, asian, pacific, blackafrican, medianage, occupiedhouseunits, houseunits, families, households, husbwife, males, females, householdschildren, hispanic) 30 | ) 31 | FROM INVENTORY NATURAL JOIN LOCATION NATURAL JOIN CENSUS NATURAL JOIN ITEM NATURAL JOIN WEATHER; 32 | -------------------------------------------------------------------------------- /examples/queries/retailer/retailer_regression_categorical_general.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'retailer.txt'; 2 | 3 | CREATE TYPE RingCofactorGeneral 4 | FROM FILE 'ring/ring_cofactor_general.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_min, dynamic_sum, dynamic_sum); 6 | 7 | CREATE STREAM INVENTORY(locn int, dateid int, ksn int, inventoryunits int) 8 | FROM FILE './datasets/retailer/Inventory.tbl' LINE DELIMITED CSV(delimiter := '|'); 9 | 10 | CREATE STREAM LOCATION(locn int, zip int, rgn_cd int, clim_zn_nbr int, tot_area_sq_ft int, sell_area_sq_ft int, avghhi int, supertargetdistance double, supertargetdrivetime double, targetdistance double, targetdrivetime double, walmartdistance double, walmartdrivetime double, walmartsupercenterdistance double, walmartsupercenterdrivetime double) 11 | FROM FILE './datasets/retailer/Location.tbl' LINE DELIMITED CSV(delimiter := '|'); 12 | 13 | CREATE STREAM CENSUS(zip int, population int, white int, asian int, pacific int, blackafrican int, medianage double, occupiedhouseunits int, houseunits int, families int, households int, husbwife int, males int, females int, householdschildren int, hispanic int) 14 | FROM FILE './datasets/retailer/Census.tbl' LINE DELIMITED CSV(delimiter := '|'); 15 | 16 | CREATE STREAM ITEM(ksn int, subcategory byte, category byte, categoryCluster byte, prize double) 17 | FROM FILE './datasets/retailer/Item.tbl' LINE DELIMITED CSV(delimiter := '|'); 18 | 19 | CREATE STREAM WEATHER(locn int, dateid int, rain byte, snow byte, maxtemp int, mintemp int, meanwind double, thunder byte) 20 | FROM FILE './datasets/retailer/Weather.tbl' LINE DELIMITED CSV(delimiter := '|'); 21 | 22 | SELECT SUM( 23 | [liftCont<0>: RingCofactorGeneral<0,1,0>](inventoryunits) * 24 | [liftCont<1>: RingCofactorGeneral<1,1,0>](prize) * 25 | [liftCat<2>: RingCofactorGeneral<2,0,3>](subcategory, category, categoryCluster) * 26 | [liftCont<5>: RingCofactorGeneral<5,3,0>](maxtemp, mintemp, meanwind) * 27 | [liftCat<8>: RingCofactorGeneral<8,0,3>](rain, snow, thunder) * 28 | [liftCont<11>: RingCofactorGeneral<11,13,0>](rgn_cd, clim_zn_nbr, tot_area_sq_ft, sell_area_sq_ft, avghhi, supertargetdistance, supertargetdrivetime, targetdistance, targetdrivetime, walmartdistance, walmartdrivetime, walmartsupercenterdistance, walmartsupercenterdrivetime) * 29 | [liftCont<24>: RingCofactorGeneral<24,15,0>](population, white, asian, pacific, blackafrican, medianage, occupiedhouseunits, houseunits, families, households, husbwife, males, females, householdschildren, hispanic) 30 | ) 31 | FROM INVENTORY NATURAL JOIN LOCATION NATURAL JOIN CENSUS NATURAL JOIN ITEM NATURAL JOIN WEATHER; 32 | -------------------------------------------------------------------------------- /examples/queries/retailer/retailer_regression_continuous_general.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'retailer.txt'; 2 | 3 | CREATE TYPE RingCofactorGeneral 4 | FROM FILE 'ring/ring_cofactor_general.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_min, dynamic_sum, dynamic_sum); 6 | 7 | CREATE STREAM INVENTORY(locn int, dateid int, ksn int, inventoryunits int) 8 | FROM FILE './datasets/retailer/Inventory.tbl' LINE DELIMITED CSV(delimiter := '|'); 9 | 10 | CREATE STREAM LOCATION(locn int, zip int, rgn_cd int, clim_zn_nbr int, tot_area_sq_ft int, sell_area_sq_ft int, avghhi int, supertargetdistance double, supertargetdrivetime double, targetdistance double, targetdrivetime double, walmartdistance double, walmartdrivetime double, walmartsupercenterdistance double, walmartsupercenterdrivetime double) 11 | FROM FILE './datasets/retailer/Location.tbl' LINE DELIMITED CSV(delimiter := '|'); 12 | 13 | CREATE STREAM CENSUS(zip int, population int, white int, asian int, pacific int, blackafrican int, medianage double, occupiedhouseunits int, houseunits int, families int, households int, husbwife int, males int, females int, householdschildren int, hispanic int) 14 | FROM FILE './datasets/retailer/Census.tbl' LINE DELIMITED CSV(delimiter := '|'); 15 | 16 | CREATE STREAM ITEM(ksn int, subcategory byte, category byte, categoryCluster byte, prize double) 17 | FROM FILE './datasets/retailer/Item.tbl' LINE DELIMITED CSV(delimiter := '|'); 18 | 19 | CREATE STREAM WEATHER(locn int, dateid int, rain byte, snow byte, maxtemp int, mintemp int, meanwind double, thunder byte) 20 | FROM FILE './datasets/retailer/Weather.tbl' LINE DELIMITED CSV(delimiter := '|'); 21 | 22 | SELECT SUM( 23 | [liftcont<0>: RingCofactorGeneral<0,1,0>](inventoryunits) * 24 | [liftcont<1>: RingCofactorGeneral<1,4,0>](subcategory, category, categoryCluster, prize) * 25 | [liftcont<5>: RingCofactorGeneral<5,6,0>](rain, snow, maxtemp, mintemp, meanwind, thunder) * 26 | [liftcont<11>: RingCofactorGeneral<11,13,0>](rgn_cd, clim_zn_nbr, tot_area_sq_ft, sell_area_sq_ft, avghhi, supertargetdistance, supertargetdrivetime, targetdistance, targetdrivetime, walmartdistance, walmartdrivetime, walmartsupercenterdistance, walmartsupercenterdrivetime) * 27 | [liftcont<24>: RingCofactorGeneral<24,15,0>](population, white, asian, pacific, blackafrican, medianage, occupiedhouseunits, houseunits, families, households, husbwife, males, females, householdschildren, hispanic) 28 | ) 29 | FROM INVENTORY NATURAL JOIN LOCATION NATURAL JOIN CENSUS NATURAL JOIN ITEM NATURAL JOIN WEATHER; 30 | -------------------------------------------------------------------------------- /examples/queries/retailer/retailer_sum.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'retailer.txt'; 2 | 3 | CREATE STREAM INVENTORY(locn int, dateid int, ksn int, inventoryunits int) 4 | FROM FILE './datasets/retailer/Inventory.tbl' LINE DELIMITED CSV(delimiter := '|'); 5 | 6 | CREATE STREAM LOCATION(locn int, zip int, rgn_cd int, clim_zn_nbr int, tot_area_sq_ft int, sell_area_sq_ft int, avghhi int, supertargetdistance double, supertargetdrivetime double, targetdistance double, targetdrivetime double, walmartdistance double, walmartdrivetime double, walmartsupercenterdistance double, walmartsupercenterdrivetime double) 7 | FROM FILE './datasets/retailer/Location.tbl' LINE DELIMITED CSV(delimiter := '|'); 8 | 9 | CREATE STREAM CENSUS(zip int, population int, white int, asian int, pacific int, blackafrican int, medianage double, occupiedhouseunits int, houseunits int, families int, households int, husbwife int, males int, females int, householdschildren int, hispanic int) 10 | FROM FILE './datasets/retailer/Census.tbl' LINE DELIMITED CSV(delimiter := '|'); 11 | 12 | CREATE STREAM ITEM(ksn int, subcategory byte, category byte, categoryCluster byte, prize double) 13 | FROM FILE './datasets/retailer/Item.tbl' LINE DELIMITED CSV(delimiter := '|'); 14 | 15 | CREATE STREAM WEATHER(locn int, dateid int, rain byte, snow byte, maxtemp int, mintemp int, meanwind double, thunder byte) 16 | FROM FILE './datasets/retailer/Weather.tbl' LINE DELIMITED CSV(delimiter := '|'); 17 | 18 | SELECT SUM(inventoryunits * inventoryunits) 19 | FROM INVENTORY NATURAL JOIN LOCATION NATURAL JOIN CENSUS NATURAL JOIN ITEM NATURAL JOIN WEATHER; 20 | -------------------------------------------------------------------------------- /examples/queries/retailer/retailer_sum_INVENTORY.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'retailer.txt'; 2 | 3 | CREATE STREAM INVENTORY(locn int, dateid int, ksn int, inventoryunits int) 4 | FROM FILE './datasets/retailer/Inventory.tbl' LINE DELIMITED CSV(delimiter := '|'); 5 | 6 | CREATE TABLE LOCATION(locn int, zip int, rgn_cd int, clim_zn_nbr int, tot_area_sq_ft int, sell_area_sq_ft int, avghhi int, supertargetdistance double, supertargetdrivetime double, targetdistance double, targetdrivetime double, walmartdistance double, walmartdrivetime double, walmartsupercenterdistance double, walmartsupercenterdrivetime double) 7 | FROM FILE './datasets/retailer/Location.tbl' LINE DELIMITED CSV(delimiter := '|'); 8 | 9 | CREATE TABLE CENSUS(zip int, population int, white int, asian int, pacific int, blackafrican int, medianage double, occupiedhouseunits int, houseunits int, families int, households int, husbwife int, males int, females int, householdschildren int, hispanic int) 10 | FROM FILE './datasets/retailer/Census.tbl' LINE DELIMITED CSV(delimiter := '|'); 11 | 12 | CREATE TABLE ITEM(ksn int, subcategory byte, category byte, categoryCluster byte, prize double) 13 | FROM FILE './datasets/retailer/Item.tbl' LINE DELIMITED CSV(delimiter := '|'); 14 | 15 | CREATE TABLE WEATHER(locn int, dateid int, rain byte, snow byte, maxtemp int, mintemp int, meanwind double, thunder byte) 16 | FROM FILE './datasets/retailer/Weather.tbl' LINE DELIMITED CSV(delimiter := '|'); 17 | 18 | SELECT SUM(inventoryunits * inventoryunits) 19 | FROM INVENTORY NATURAL JOIN LOCATION NATURAL JOIN CENSUS NATURAL JOIN ITEM NATURAL JOIN WEATHER; 20 | -------------------------------------------------------------------------------- /examples/queries/simple/rst.txt: -------------------------------------------------------------------------------- 1 | 5 3 2 | 0 A int -1 {} 0 3 | 1 B float 0 {0} 0 4 | 2 C int 0 {0} 0 5 | 3 D float 2 {2} 0 6 | 4 E float 2 {0,2} 0 7 | 5 extra int 4 {0,2,4} 0 8 | R 1 A,B 9 | T 3 C,D 10 | S 5 A,C,E,extra -------------------------------------------------------------------------------- /examples/queries/simple/rst2.txt: -------------------------------------------------------------------------------- 1 | 5 3 2 | 0 A int -1 {} 0 3 | 1 B int 0 {0} 0 4 | 2 C int 0 {0} 0 5 | 3 D int 2 {2} 0 6 | 4 E int 2 {0,2} 0 7 | R 1 A,B 8 | T 3 C,D 9 | S 4 A,C,E -------------------------------------------------------------------------------- /examples/queries/simple/rst_RT.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'rst.txt'; 2 | 3 | CREATE STREAM R(A int, B float) FROM FILE 'R.dat' LINE DELIMITED CSV; 4 | CREATE TABLE S(A int, C int, E float, extra int) FROM FILE 'S.dat' LINE DELIMITED CSV; 5 | CREATE STREAM T(C int, D float) FROM FILE 'T.dat' LINE DELIMITED CSV; 6 | 7 | SELECT SUM(A*B*C*D) FROM R NATURAL JOIN S NATURAL JOIN T; -------------------------------------------------------------------------------- /examples/queries/simple/rst_RT_complex.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'rst.txt'; 2 | 3 | CREATE STREAM R(A int, B float) FROM FILE 'R.dat' LINE DELIMITED CSV; 4 | CREATE TABLE S(A int, C int, E float, extra int) FROM FILE 'S.dat' LINE DELIMITED CSV; 5 | CREATE STREAM T(C int, D float) FROM FILE 'T.dat' LINE DELIMITED CSV; 6 | 7 | SELECT SUM((A+B*extra)*B*D) FROM R NATURAL JOIN S NATURAL JOIN T; -------------------------------------------------------------------------------- /examples/queries/simple/rst_datacube.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'rst2.txt'; 2 | 3 | CREATE TYPE DataCube 4 | FROM FILE 'ring/ring_datacube.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_concat); 6 | 7 | CREATE STREAM R(A int, B int) FROM FILE 'R.tbl' LINE DELIMITED CSV; 8 | CREATE STREAM S(A int, C int, E int) FROM FILE 'S.tbl' LINE DELIMITED CSV; 9 | CREATE STREAM T(C int, D int) FROM FILE 'T.tbl' LINE DELIMITED CSV; 10 | 11 | SELECT SUM( 12 | [liftgroupby<0>: DataCube<[0,int]>](A) * 13 | [liftgroupby<1>: DataCube<[1,int]>](C) * 14 | [liftgroupby<2>: DataCube<[2,int]>](D) 15 | ) 16 | FROM R NATURAL JOIN S NATURAL JOIN T; -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_FQ1.txt: -------------------------------------------------------------------------------- 1 | /* 2 | DTREE FOR FQ1 3 | --- 4 | OK 5 | / \ 6 | PK Orders(...) 7 | / \ 8 | SK Part(...) 9 | / \ 10 | Lineitem(...) PartSupp(...) 11 | */ 12 | 13 | 35 4 14 | 0 orderkey int -1 {} 0 15 | 1 partkey int 0 {0} 0 16 | 2 suppkey int 1 {0,1} 0 17 | 3 l_linenumber int 2 {0,1,2} 0 18 | 4 l_quantity double 3 {0,1,2,3} 0 19 | 5 l_extendedprice double 4 {0,1,2,3,4} 0 20 | 6 l_discount double 5 {0,1,2,3,4,5} 0 21 | 7 l_tax double 6 {0,1,2,3,4,5,6} 0 22 | 8 l_returnflag char 7 {0,1,2,3,4,5,6,7} 0 23 | 9 l_linestatus char 8 {0,1,2,3,4,5,6,7,8} 0 24 | 10 l_shipdate date 9 {0,1,2,3,4,5,6,7,8,9} 0 25 | 11 l_commitdate date 10 {0,1,2,3,4,5,6,7,8,9,10} 0 26 | 12 l_receiptdate date 11 {0,1,2,3,4,5,6,7,8,9,10,11} 0 27 | 13 l_shipinstruct string 12 {0,1,2,3,4,5,6,7,8,9,10,11,12} 0 28 | 14 l_shipmode string 13 {0,1,2,3,4,5,6,7,8,9,10,11,12,13} 0 29 | 15 l_comment string 14 {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} 0 30 | 16 ps_availqty int 2 {1,2} 0 31 | 17 ps_supplycost double 16 {1,2,16} 0 32 | 18 ps_comment string 17 {1,2,16,17} 0 33 | 19 p_name string 1 {1} 0 34 | 20 p_mfgr string 19 {1,19} 0 35 | 21 p_brand string 20 {1,19,20} 0 36 | 22 p_type string 21 {1,19,20,21} 0 37 | 23 p_size int 22 {1,19,20,21,22} 0 38 | 24 p_container string 23 {1,19,20,21,22,23} 0 39 | 25 p_retailprice double 24 {1,19,20,21,22,23,24} 0 40 | 26 p_comment string 25 {1,19,20,21,22,23,24,25} 0 41 | 27 o_custkey int 0 {0} 0 42 | 28 o_orderstatus char 27 {0,27} 0 43 | 29 o_totalprice double 28 {0,27,28} 0 44 | 30 o_orderdate date 29 {0,27,28,29} 0 45 | 31 o_shippriority int 30 {0,27,28,29,30} 0 46 | 32 o_orderpriority string 31 {0,27,28,29,30,31} 0 47 | 33 o_clerk string 32 {0,27,28,29,30,31,32} 0 48 | 34 o_comment string 33 {0,27,28,29,30,31,32,33} 0 49 | 50 | LINEITEM 15 orderkey, partkey, suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment 51 | PARTSUPP 18 partkey, suppkey, ps_availqty, ps_supplycost, ps_comment 52 | PART 26 partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment 53 | ORDERS 34 orderkey, o_custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_clerk, o_shippriority, o_comment -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_FQ2.txt: -------------------------------------------------------------------------------- 1 | /* 2 | DTREE FOR FQ2 3 | --- 4 | OK 5 | / \ 6 | PK CK 7 | / \ / \ 8 | Lineitem(...) Part(...) NK Orders(...) 9 | / \ 10 | Customer(...) Nation(...) 11 | */ 12 | 13 | 42 5 14 | 0 orderkey int -1 {} 0 15 | 1 partkey int 0 {0} 0 16 | 2 l_suppkey int 1 {0,1} 0 17 | 3 l_linenumber int 2 {0,1,2} 0 18 | 4 l_quantity double 3 {0,1,2,3} 0 19 | 5 l_extendedprice double 4 {0,1,2,3,4} 0 20 | 6 l_discount double 5 {0,1,2,3,4,5} 0 21 | 7 l_tax double 6 {0,1,2,3,4,5,6} 0 22 | 8 l_returnflag char 7 {0,1,2,3,4,5,6,7} 0 23 | 9 l_linestatus char 8 {0,1,2,3,4,5,6,7,8} 0 24 | 10 l_shipdate date 9 {0,1,2,3,4,5,6,7,8,9} 0 25 | 11 l_commitdate date 10 {0,1,2,3,4,5,6,7,8,9,10} 0 26 | 12 l_receiptdate date 11 {0,1,2,3,4,5,6,7,8,9,10,11} 0 27 | 13 l_shipinstruct string 12 {0,1,2,3,4,5,6,7,8,9,10,11,12} 0 28 | 14 l_shipmode string 13 {0,1,2,3,4,5,6,7,8,9,10,11,12,13} 0 29 | 15 l_comment string 14 {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} 0 30 | 16 p_name string 1 {1} 0 31 | 17 p_mfgr string 16 {1,16} 0 32 | 18 p_brand string 17 {1,16,17} 0 33 | 19 p_type string 18 {1,16,17,18} 0 34 | 20 p_size int 19 {1,16,17,18,19} 0 35 | 21 p_container string 20 {1,16,17,18,19,20} 0 36 | 22 p_retailprice double 21 {1,16,17,18,19,20,21} 0 37 | 23 p_comment string 22 {1,16,17,18,19,20,21,22} 0 38 | 24 custkey int 0 {0} 0 39 | 25 nationkey int 24 {24} 0 40 | 26 c_name string 25 {24,25} 0 41 | 27 c_address string 26 {24,25,26} 0 42 | 28 c_phone string 27 {24,25,26,27} 0 43 | 29 c_acctbal double 28 {24,25,26,27,28} 0 44 | 30 c_mktsegment string 29 {24,25,26,27,28,29} 0 45 | 31 c_comment string 30 {24,25,26,27,28,29,30} 0 46 | 32 n_name string 25 {25} 0 47 | 33 n_regionkey int 32 {25,32} 0 48 | 34 n_comment string 33 {25,32,33} 0 49 | 35 o_orderstatus char 24 {0,24} 0 50 | 36 o_totalprice double 35 {0,24,35} 0 51 | 37 o_orderdate date 36 {0,24,35,36} 0 52 | 38 o_shippriority int 37 {0,24,35,36,37} 0 53 | 39 o_orderpriority string 38 {0,24,35,36,37,38} 0 54 | 40 o_clerk string 39 {0,24,35,36,37,38,39} 0 55 | 41 o_comment string 40 {0,24,35,36,37,38,39,40} 0 56 | 57 | LINEITEM 15 orderkey, partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment 58 | PART 23 partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment 59 | CUSTOMER 31 custkey, c_name, c_address, nationkey, c_phone, c_acctbal, c_mktsegment, c_comment 60 | NATION 34 nationkey, n_name, n_regionkey, n_comment 61 | ORDERS 41 orderkey, custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_clerk, o_shippriority, o_comment -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_FQ3.txt: -------------------------------------------------------------------------------- 1 | /* 2 | DTREE FOR FQ3 3 | --- 4 | OK 5 | / \ 6 | PK CK 7 | / / \ 8 | SK C(...) O(...) 9 | / | \ 10 | LI(...) PS(...) S(...) 11 | */ 12 | 13 | 40 5 14 | 0 orderkey int -1 {} 0 15 | 1 partkey int 0 {0} 0 16 | 2 suppkey int 1 {0,1} 0 17 | 3 l_linenumber int 2 {0,1,2} 0 18 | 4 l_quantity double 3 {0,1,2,3} 0 19 | 5 l_extendedprice double 4 {0,1,2,3,4} 0 20 | 6 l_discount double 5 {0,1,2,3,4,5} 0 21 | 7 l_tax double 6 {0,1,2,3,4,5,6} 0 22 | 8 l_returnflag char 7 {0,1,2,3,4,5,6,7} 0 23 | 9 l_linestatus char 8 {0,1,2,3,4,5,6,7,8} 0 24 | 10 l_shipdate date 9 {0,1,2,3,4,5,6,7,8,9} 0 25 | 11 l_commitdate date 10 {0,1,2,3,4,5,6,7,8,9,10} 0 26 | 12 l_receiptdate date 11 {0,1,2,3,4,5,6,7,8,9,10,11} 0 27 | 13 l_shipinstruct string 12 {0,1,2,3,4,5,6,7,8,9,10,11,12} 0 28 | 14 l_shipmode string 13 {0,1,2,3,4,5,6,7,8,9,10,11,12,13} 0 29 | 15 l_comment string 14 {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} 0 30 | 16 ps_availqty int 2 {1,2} 0 31 | 17 ps_supplycost double 16 {1,2,16} 0 32 | 18 ps_comment string 17 {1,2,16,17} 0 33 | 19 s_name string 2 {2} 0 34 | 20 s_address string 19 {2,19} 0 35 | 21 s_nationkey int 20 {2,19,20} 0 36 | 22 s_phone string 21 {2,19,20,21} 0 37 | 23 s_acctbal double 22 {2,19,20,21,22} 0 38 | 24 s_comment string 23 {2,19,20,21,22,23} 0 39 | 25 custkey int 0 {0} 0 40 | 26 c_name string 25 {25} 0 41 | 27 c_address string 26 {25,26} 0 42 | 28 c_nationkey int 27 {25,26,27} 0 43 | 29 c_phone string 28 {25,26,27,28} 0 44 | 30 c_acctbal double 29 {25,26,27,28,29} 0 45 | 31 c_mktsegment string 30 {25,26,27,28,29,30} 0 46 | 32 c_comment string 31 {25,26,27,28,29,30,31} 0 47 | 33 o_orderstatus char 25 {0,25} 0 48 | 34 o_totalprice double 33 {0,25,33} 0 49 | 35 o_orderdate date 34 {0,25,33,34} 0 50 | 36 o_shippriority int 35 {0,25,33,34,35} 0 51 | 37 o_orderpriority string 36 {0,25,33,34,35,36} 0 52 | 38 o_clerk string 37 {0,25,33,34,35,36,37} 0 53 | 39 o_comment string 38 {0,25,33,34,35,36,37,38} 0 54 | 55 | LINEITEM 15 orderkey, partkey, suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment 56 | PARTSUPP 18 partkey, suppkey, ps_availqty, ps_supplycost, ps_comment 57 | SUPPLIER 24 suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment 58 | CUSTOMER 32 custkey, c_name, c_address, c_nationkey, c_phone, c_acctbal, c_mktsegment, c_comment 59 | ORDERS 39 orderkey, custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_clerk, o_shippriority, o_comment -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_FQ4.txt: -------------------------------------------------------------------------------- 1 | /* 2 | DTREE FOR FQ4 3 | --- 4 | SK 5 | / \ 6 | PK S(...) 7 | / \ 8 | LI(...) PS(...) 9 | */ 10 | 11 | 25 3 12 | 0 suppkey int -1 {} 0 13 | 1 partkey int 0 {0} 0 14 | 2 l_orderkey int 1 {0,1} 0 15 | 3 l_linenumber int 2 {0,1,2} 0 16 | 4 l_quantity double 3 {0,1,2,3} 0 17 | 5 l_extendedprice double 4 {0,1,2,3,4} 0 18 | 6 l_discount double 5 {0,1,2,3,4,5} 0 19 | 7 l_tax double 6 {0,1,2,3,4,5,6} 0 20 | 8 l_returnflag char 7 {0,1,2,3,4,5,6,7} 0 21 | 9 l_linestatus char 8 {0,1,2,3,4,5,6,7,8} 0 22 | 10 l_shipdate date 9 {0,1,2,3,4,5,6,7,8,9} 0 23 | 11 l_commitdate date 10 {0,1,2,3,4,5,6,7,8,9,10} 0 24 | 12 l_receiptdate date 11 {0,1,2,3,4,5,6,7,8,9,10,11} 0 25 | 13 l_shipinstruct string 12 {0,1,2,3,4,5,6,7,8,9,10,11,12} 0 26 | 14 l_shipmode string 13 {0,1,2,3,4,5,6,7,8,9,10,11,12,13} 0 27 | 15 l_comment string 14 {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} 0 28 | 16 ps_availqty int 1 {0,1} 0 29 | 17 ps_supplycost double 16 {0,1,16} 0 30 | 18 ps_comment string 17 {0,1,16,17} 0 31 | 19 s_name string 0 {0} 0 32 | 20 s_address string 19 {0,19} 0 33 | 21 s_nationkey int 20 {0,19,20} 0 34 | 22 s_phone string 21 {0,19,20,21} 0 35 | 23 s_acctbal double 22 {0,19,20,21,22} 0 36 | 24 s_comment string 23 {0,19,20,21,22,23} 0 37 | 38 | LINEITEM 15 l_orderkey, partkey, suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment 39 | PARTSUPP 18 partkey, suppkey, ps_availqty, ps_supplycost, ps_comment 40 | SUPPLIER 24 suppkey, s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_FQ4_factorized_join.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'tpch_FQ4.txt'; 2 | 3 | CREATE DISTRIBUTED TYPE RingFactorizedRelation 4 | FROM FILE 'ring/ring_factorized.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_min); 6 | 7 | CREATE STREAM LINEITEM ( 8 | l_orderkey INT, 9 | partkey INT, 10 | suppkey INT, 11 | l_linenumber INT, 12 | l_quantity DECIMAL, 13 | l_extendedprice DECIMAL, 14 | l_discount DECIMAL, 15 | l_tax DECIMAL, 16 | l_returnflag CHAR(1), 17 | l_linestatus CHAR(1), 18 | l_shipdate DATE, 19 | l_commitdate DATE, 20 | l_receiptdate DATE, 21 | l_shipinstruct CHAR(25), 22 | l_shipmode CHAR(10), 23 | l_comment VARCHAR(44) 24 | ) 25 | FROM FILE './datasets/tpch/lineitem.csv' 26 | LINE DELIMITED CSV (delimiter := '|'); 27 | 28 | CREATE STREAM PARTSUPP ( 29 | partkey INT, 30 | suppkey INT, 31 | ps_availqty INT, 32 | ps_supplycost DECIMAL, 33 | ps_comment VARCHAR(199) 34 | ) 35 | FROM FILE './datasets/tpch/partsupp.csv' 36 | LINE DELIMITED CSV (delimiter := '|'); 37 | 38 | CREATE STREAM SUPPLIER ( 39 | suppkey INT, 40 | s_name CHAR(25), 41 | s_address VARCHAR(40), 42 | s_nationkey INT, 43 | s_phone CHAR(15), 44 | s_acctbal DECIMAL, 45 | s_comment VARCHAR(101) 46 | ) 47 | FROM FILE './datasets/tpch/supplier.csv' 48 | LINE DELIMITED CSV (delimiter := '|'); 49 | 50 | SELECT SUM( 51 | [lift<0>: RingFactorizedRelation<[0, INT]>](suppkey) * 52 | [lift<1>: RingFactorizedRelation<[1, INT]>](partkey) * 53 | [lift<2>: RingFactorizedRelation<[2, INT, INT, DECIMAL, DECIMAL, DECIMAL, DECIMAL, CHAR(1), CHAR(1), DATE, DATE, DATE, CHAR(25), CHAR(10), VARCHAR(44)]>](l_orderkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) * 54 | [lift<16>: RingFactorizedRelation<[16, INT, DECIMAL, VARCHAR(199)]>](ps_availqty, ps_supplycost, ps_comment) * 55 | [lift<19>: RingFactorizedRelation<[19, CHAR(25), VARCHAR(40), INT, CHAR(15), DECIMAL, VARCHAR(101)]>](s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment) 56 | ) 57 | FROM LINEITEM NATURAL JOIN PARTSUPP NATURAL JOIN SUPPLIER; -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_FQ4_listing_join.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'tpch_FQ4.txt'; 2 | 3 | CREATE TYPE RingRelation 4 | FROM FILE 'ring/ring_relational_opt.hpp' 5 | WITH PARAMETER SCHEMA (dynamic_concat); 6 | 7 | CREATE STREAM LINEITEM ( 8 | l_orderkey INT, 9 | partkey INT, 10 | suppkey INT, 11 | l_linenumber INT, 12 | l_quantity DECIMAL, 13 | l_extendedprice DECIMAL, 14 | l_discount DECIMAL, 15 | l_tax DECIMAL, 16 | l_returnflag CHAR(1), 17 | l_linestatus CHAR(1), 18 | l_shipdate DATE, 19 | l_commitdate DATE, 20 | l_receiptdate DATE, 21 | l_shipinstruct CHAR(25), 22 | l_shipmode CHAR(10), 23 | l_comment VARCHAR(44) 24 | ) 25 | FROM FILE './datasets/tpch/lineitem.csv' 26 | LINE DELIMITED CSV (delimiter := '|'); 27 | 28 | CREATE STREAM PARTSUPP ( 29 | partkey INT, 30 | suppkey INT, 31 | ps_availqty INT, 32 | ps_supplycost DECIMAL, 33 | ps_comment VARCHAR(199) 34 | ) 35 | FROM FILE './datasets/tpch/partsupp.csv' 36 | LINE DELIMITED CSV (delimiter := '|'); 37 | 38 | CREATE STREAM SUPPLIER ( 39 | suppkey INT, 40 | s_name CHAR(25), 41 | s_address VARCHAR(40), 42 | s_nationkey INT, 43 | s_phone CHAR(15), 44 | s_acctbal DECIMAL, 45 | s_comment VARCHAR(101) 46 | ) 47 | FROM FILE './datasets/tpch/supplier.csv' 48 | LINE DELIMITED CSV (delimiter := '|'); 49 | 50 | SELECT SUM( 51 | [lift<0>: RingRelation<[0, INT]>](suppkey) * 52 | [lift<1>: RingRelation<[1, INT]>](partkey) * 53 | [lift<2>: RingRelation<[2, INT, INT, DECIMAL, DECIMAL, DECIMAL, DECIMAL, CHAR(1), CHAR(1), DATE, DATE, DATE, CHAR(25), CHAR(10), VARCHAR(44)]>](l_orderkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment) * 54 | [lift<16>: RingRelation<[16, INT, DECIMAL, VARCHAR(199)]>](ps_availqty, ps_supplycost, ps_comment) * 55 | [lift<19>: RingRelation<[19, CHAR(25), VARCHAR(40), INT, CHAR(15), DECIMAL, VARCHAR(101)]>](s_name, s_address, s_nationkey, s_phone, s_acctbal, s_comment) 56 | ) 57 | FROM LINEITEM NATURAL JOIN PARTSUPP NATURAL JOIN SUPPLIER; -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query01.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'tpch_query01.txt'; 2 | 3 | CREATE TYPE TPCH1Payload 4 | FROM FILE 'ring/ring_tpch_query01.hpp'; 5 | 6 | CREATE STREAM LINEITEM ( 7 | orderkey INT, 8 | partkey INT, 9 | suppkey INT, 10 | linenumber INT, 11 | quantity DECIMAL, 12 | extendedprice DECIMAL, 13 | discount DECIMAL, 14 | tax DECIMAL, 15 | returnflag CHAR(1), 16 | linestatus CHAR(1), 17 | shipdate DATE, 18 | commitdate DATE, 19 | receiptdate DATE, 20 | shipinstruct CHAR(25), 21 | shipmode CHAR(10), 22 | comment VARCHAR(44) 23 | ) 24 | FROM FILE './datasets/tpch/lineitem.csv' 25 | LINE DELIMITED CSV (delimiter := '|'); 26 | 27 | SELECT returnflag, linestatus, 28 | SUM([lift: TPCH1Payload]( 29 | quantity, 30 | extendedprice, 31 | extendedprice * (1-discount), 32 | extendedprice * (1-discount)*(1+tax), 33 | discount 34 | )) 35 | FROM lineitem 36 | WHERE shipdate <= DATE('1997-09-01') 37 | GROUP BY returnflag, linestatus; -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query01.txt: -------------------------------------------------------------------------------- 1 | 16 1 2 | 0 returnflag char -1 {} 0 3 | 1 linestatus char 0 {0} 0 4 | 2 orderkey int 1 {0,1} 0 5 | 3 partkey int 2 {0,1,2} 0 6 | 4 suppkey int 3 {0,1,2,3} 0 7 | 5 linenumber int 4 {0,1,2,3,4} 0 8 | 6 quantity double 5 {0,1,2,3,4,5} 0 9 | 7 extendedprice double 6 {0,1,2,3,4,5,6} 0 10 | 8 discount double 7 {0,1,2,3,4,5,6,7} 0 11 | 9 tax double 8 {0,1,2,3,4,5,6,7,8} 0 12 | 10 shipdate date 9 {0,1,2,3,4,5,6,7,8,9} 0 13 | 11 commitdate date 10 {0,1,2,3,4,5,6,7,8,9,10} 0 14 | 12 receiptdate date 11 {0,1,2,3,4,5,6,7,8,9,10,11} 0 15 | 13 shipinstruct string 12 {0,1,2,3,4,5,6,7,8,9,10,11,12} 0 16 | 14 shipmode string 13 {0,1,2,3,4,5,6,7,8,9,10,11,12,13} 0 17 | 15 comment string 14 {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} 0 18 | LINEITEM 15 orderkey,partkey,suppkey,linenumber,quantity,extendedprice,discount,tax,returnflag,linestatus,shipdate,commitdate,receiptdate,shipinstruct,shipmode,comment -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query03.sql: -------------------------------------------------------------------------------- 1 | -- Unsupported features for this query 2 | -- ORDER BY (ignored) 3 | -- LIMIT (ignored) 4 | 5 | IMPORT DTREE FROM FILE 'tpch_query03.txt'; 6 | 7 | CREATE STREAM LINEITEM ( 8 | orderkey INT, 9 | l_partkey INT, 10 | l_suppkey INT, 11 | l_linenumber INT, 12 | l_quantity DECIMAL, 13 | l_extendedprice DECIMAL, 14 | l_discount DECIMAL, 15 | l_tax DECIMAL, 16 | l_returnflag CHAR(1), 17 | l_linestatus CHAR(1), 18 | l_shipdate DATE, 19 | l_commitdate DATE, 20 | l_receiptdate DATE, 21 | l_shipinstruct CHAR(25), 22 | l_shipmode CHAR(10), 23 | l_comment VARCHAR(44) 24 | ) 25 | FROM FILE '../../experiments/data/tpch/standard/lineitem.csv' 26 | LINE DELIMITED CSV (delimiter := '|'); 27 | 28 | CREATE STREAM ORDERS ( 29 | orderkey INT, 30 | custkey INT, 31 | o_orderstatus CHAR(1), 32 | o_totalprice DECIMAL, 33 | o_orderdate DATE, 34 | o_orderpriority CHAR(15), 35 | o_clerk CHAR(15), 36 | o_shippriority INT, 37 | o_comment VARCHAR(79) 38 | ) 39 | FROM FILE '../../experiments/data/tpch/standard/orders.csv' 40 | LINE DELIMITED CSV (delimiter := '|'); 41 | 42 | CREATE STREAM CUSTOMER ( 43 | custkey INT, 44 | c_name VARCHAR(25), 45 | c_address VARCHAR(40), 46 | c_nationkey INT, 47 | c_phone CHAR(15), 48 | c_acctbal DECIMAL, 49 | c_mktsegment CHAR(10), 50 | c_comment VARCHAR(117) 51 | ) 52 | FROM FILE '../../experiments/data/tpch/standard/customer.csv' 53 | LINE DELIMITED CSV (delimiter := '|'); 54 | 55 | SELECT orderkey, 56 | o_orderdate, 57 | o_shippriority, 58 | SUM(l_extendedprice * (1 - l_discount)) 59 | FROM CUSTOMER NATURAL JOIN ORDERS NATURAL JOIN LINEITEM 60 | WHERE c_mktsegment = 'BUILDING' 61 | AND o_orderdate < DATE('1995-03-15') 62 | AND l_shipdate > DATE('1995-03-15') 63 | GROUP BY orderkey, o_orderdate, o_shippriority; 64 | -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query03.txt: -------------------------------------------------------------------------------- 1 | 31 3 2 | 0 orderkey int -1 {} 0 3 | 1 l_partkey int 0 {0} 0 4 | 2 l_suppkey int 1 {0,1} 0 5 | 3 l_linenumber int 2 {0,1,2} 0 6 | 4 l_quantity double 3 {0,1,2,3} 0 7 | 5 l_extendedprice double 4 {0,1,2,3,4} 0 8 | 6 l_discount double 5 {0,1,2,3,4,5} 0 9 | 7 l_tax double 6 {0,1,2,3,4,5,6} 0 10 | 8 l_returnflag char 7 {0,1,2,3,4,5,6,7} 0 11 | 9 l_linestatus char 8 {0,1,2,3,4,5,6,7,8} 0 12 | 10 l_shipdate date 9 {0,1,2,3,4,5,6,7,8,9} 0 13 | 11 l_commitdate date 10 {0,1,2,3,4,5,6,7,8,9,10} 0 14 | 12 l_receiptdate date 11 {0,1,2,3,4,5,6,7,8,9,10,11} 0 15 | 13 l_shipinstruct string 12 {0,1,2,3,4,5,6,7,8,9,10,11,12} 0 16 | 14 l_shipmode string 13 {0,1,2,3,4,5,6,7,8,9,10,11,12,13} 0 17 | 15 l_comment string 14 {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} 0 18 | 16 o_orderdate date 0 {0} 0 19 | 17 o_shippriority int 16 {0,16} 0 20 | 18 custkey int 17 {0,16,17} 0 21 | 19 o_orderstatus char 18 {0,16,17,18} 0 22 | 20 o_totalprice double 19 {0,16,17,18,19} 0 23 | 21 o_orderpriority string 20 {0,16,17,18,19,20} 0 24 | 22 o_clerk string 21 {0,16,17,18,19,20,21} 0 25 | 23 o_comment string 22 {0,16,17,18,19,20,21,22} 0 26 | 24 c_name string 18 {18} 0 27 | 25 c_address string 24 {18,24} 0 28 | 26 c_nationkey int 25 {18,24,25} 0 29 | 27 c_phone string 26 {18,24,25,26} 0 30 | 28 c_acctbal double 27 {18,24,25,26,27} 0 31 | 29 c_mktsegment string 28 {18,24,25,26,27,28} 0 32 | 30 c_comment string 29 {18,24,25,26,27,28,29} 0 33 | LINEITEM 15 orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment 34 | ORDERS 23 orderkey, custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_clerk, o_shippriority, o_comment 35 | CUSTOMER 30 custkey, c_name, c_address, c_nationkey, c_phone, c_acctbal, c_mktsegment, c_comment -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query06.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'tpch_query06.txt'; 2 | 3 | CREATE STREAM LINEITEM ( 4 | l_orderkey INT, 5 | l_partkey INT, 6 | l_suppkey INT, 7 | l_linenumber INT, 8 | l_quantity DECIMAL, 9 | l_extendedprice DECIMAL, 10 | l_discount DECIMAL, 11 | l_tax DECIMAL, 12 | l_returnflag CHAR(1), 13 | l_linestatus CHAR(1), 14 | l_shipdate DATE, 15 | l_commitdate DATE, 16 | l_receiptdate DATE, 17 | l_shipinstruct CHAR(25), 18 | l_shipmode CHAR(10), 19 | l_comment VARCHAR(44) 20 | ) 21 | FROM FILE './datasets/tpch/lineitem.csv' 22 | LINE DELIMITED CSV (delimiter := '|'); 23 | 24 | SELECT SUM(l_extendedprice * l_discount) 25 | FROM lineitem 26 | WHERE l_shipdate >= DATE('1994-01-01') 27 | AND l_shipdate < DATE('1995-01-01') 28 | AND (l_discount BETWEEN (0.06 - 0.01) AND (0.06 + 0.01)) 29 | AND l_quantity < 24; -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query06.txt: -------------------------------------------------------------------------------- 1 | 16 1 2 | 0 l_orderkey int -1 {} 0 3 | 1 l_partkey int 0 {0} 0 4 | 2 l_suppkey int 1 {0,1} 0 5 | 3 l_linenumber int 2 {0,1,2} 0 6 | 4 l_quantity double 3 {0,1,2,3} 0 7 | 5 l_extendedprice double 4 {0,1,2,3,4} 0 8 | 6 l_discount double 5 {0,1,2,3,4,5} 0 9 | 7 l_tax double 6 {0,1,2,3,4,5,6} 0 10 | 8 l_returnflag char 7 {0,1,2,3,4,5,6,7} 0 11 | 9 l_linestatus char 8 {0,1,2,3,4,5,6,7,8} 0 12 | 10 l_shipdate date 9 {0,1,2,3,4,5,6,7,8,9} 0 13 | 11 l_commitdate date 10 {0,1,2,3,4,5,6,7,8,9,10} 0 14 | 12 l_receiptdate date 11 {0,1,2,3,4,5,6,7,8,9,10,11} 0 15 | 13 l_shipinstruct string 12 {0,1,2,3,4,5,6,7,8,9,10,11,12} 0 16 | 14 l_shipmode string 13 {0,1,2,3,4,5,6,7,8,9,10,11,12,13} 0 17 | 15 l_comment string 14 {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} 0 18 | LINEITEM 15 l_orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query10.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'tpch_query10.txt'; 2 | 3 | CREATE STREAM LINEITEM ( 4 | orderkey INT, 5 | l_partkey INT, 6 | l_suppkey INT, 7 | l_linenumber INT, 8 | l_quantity DECIMAL, 9 | l_extendedprice DECIMAL, 10 | l_discount DECIMAL, 11 | l_tax DECIMAL, 12 | l_returnflag CHAR(1), 13 | l_linestatus CHAR(1), 14 | l_shipdate DATE, 15 | l_commitdate DATE, 16 | l_receiptdate DATE, 17 | l_shipinstruct CHAR(25), 18 | l_shipmode CHAR(10), 19 | l_comment VARCHAR(44) 20 | ) 21 | FROM FILE './datasets/tpch/lineitem.csv' 22 | LINE DELIMITED CSV (delimiter := '|'); 23 | 24 | 25 | CREATE STREAM ORDERS ( 26 | orderkey INT, 27 | custkey INT, 28 | o_orderstatus CHAR(1), 29 | o_totalprice DECIMAL, 30 | o_orderdate DATE, 31 | o_orderpriority CHAR(15), 32 | o_clerk CHAR(15), 33 | o_shippriority INT, 34 | o_comment VARCHAR(79) 35 | ) 36 | FROM FILE './datasets/tpch/orders.csv' 37 | LINE DELIMITED CSV (delimiter := '|'); 38 | 39 | CREATE STREAM CUSTOMER ( 40 | custkey INT, 41 | c_name VARCHAR(25), 42 | c_address VARCHAR(40), 43 | nationkey INT, 44 | c_phone CHAR(15), 45 | c_acctbal DECIMAL, 46 | c_mktsegment CHAR(10), 47 | c_comment VARCHAR(117) 48 | ) 49 | FROM FILE './datasets/tpch/customer.csv' 50 | LINE DELIMITED CSV (delimiter := '|'); 51 | 52 | CREATE TABLE NATION ( 53 | nationkey INT, 54 | n_name CHAR(25), 55 | n_regionkey INT, 56 | n_comment VARCHAR(152) 57 | ) 58 | FROM FILE './datasets/tpch/nation.csv' 59 | LINE DELIMITED CSV (delimiter := '|'); 60 | 61 | SELECT custkey, c_name, 62 | c_acctbal, 63 | n_name, 64 | c_address, 65 | c_phone, 66 | c_comment, 67 | SUM(l_extendedprice * (1 - l_discount)) 68 | FROM customer NATURAL JOIN orders NATURAL JOIN lineitem NATURAL JOIN nation 69 | WHERE o_orderdate >= DATE('1993-10-01') 70 | AND o_orderdate < DATE('1994-01-01') 71 | AND l_returnflag = 'R' 72 | GROUP BY custkey, c_name, c_acctbal, c_phone, n_name, c_address, c_comment; 73 | -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query10.txt: -------------------------------------------------------------------------------- 1 | 34 4 2 | 0 orderkey int -1 {} 0 3 | 1 l_partkey int 0 {0} 0 4 | 2 l_suppkey int 1 {0,1} 0 5 | 3 l_linenumber int 2 {0,1,2} 0 6 | 4 l_quantity double 3 {0,1,2,3} 0 7 | 5 l_extendedprice double 4 {0,1,2,3,4} 0 8 | 6 l_discount double 5 {0,1,2,3,4,5} 0 9 | 7 l_tax double 6 {0,1,2,3,4,5,6} 0 10 | 8 l_returnflag char 7 {0,1,2,3,4,5,6,7} 0 11 | 9 l_linestatus char 8 {0,1,2,3,4,5,6,7,8} 0 12 | 10 l_shipdate date 9 {0,1,2,3,4,5,6,7,8,9} 0 13 | 11 l_commitdate date 10 {0,1,2,3,4,5,6,7,8,9,10} 0 14 | 12 l_receiptdate date 11 {0,1,2,3,4,5,6,7,8,9,10,11} 0 15 | 13 l_shipinstruct string 12 {0,1,2,3,4,5,6,7,8,9,10,11,12} 0 16 | 14 l_shipmode string 13 {0,1,2,3,4,5,6,7,8,9,10,11,12,13} 0 17 | 15 l_comment string 14 {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} 0 18 | 16 custkey int 0 {0} 0 19 | 17 o_orderstatus char 16 {0,16} 0 20 | 18 o_totalprice double 17 {0,16,17} 0 21 | 19 o_orderdate date 18 {0,16,17,18} 0 22 | 20 o_shippriority int 19 {0,16,17,18,19} 0 23 | 21 o_orderpriority string 20 {0,16,17,18,19,20} 0 24 | 22 o_clerk string 21 {0,16,17,18,19,20,21} 0 25 | 23 o_comment string 22 {0,16,17,18,19,20,21,22} 0 26 | 24 nationkey int 16 {16} 0 27 | 25 c_name string 24 {16,24} 0 28 | 26 c_address string 25 {16,24,25} 0 29 | 27 c_phone string 26 {16,24,25,26} 0 30 | 28 c_acctbal double 27 {16,24,25,26,27} 0 31 | 29 c_comment string 28 {16,24,25,26,27,28} 0 32 | 30 c_mktsegment string 29 {16,24,25,26,27,28,29} 0 33 | 31 n_name string 24 {24} 0 34 | 32 n_regionkey int 31 {24,31} 0 35 | 33 n_comment string 32 {24,31,32} 0 36 | LINEITEM 15 orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment 37 | ORDERS 23 orderkey, custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_clerk, o_shippriority, o_comment 38 | CUSTOMER 30 custkey, c_name, c_address, nationkey, c_phone, c_acctbal, c_mktsegment, c_comment 39 | NATION 33 nationkey, n_name, n_regionkey, n_comment -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query12.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'tpch_query12.txt'; 2 | 3 | CREATE TYPE TPCH12Payload 4 | FROM FILE 'ring/ring_tpch_query12.hpp'; 5 | 6 | CREATE STREAM LINEITEM ( 7 | orderkey INT, 8 | l_partkey INT, 9 | l_suppkey INT, 10 | l_linenumber INT, 11 | l_quantity DECIMAL, 12 | l_extendedprice DECIMAL, 13 | l_discount DECIMAL, 14 | l_tax DECIMAL, 15 | l_returnflag CHAR(1), 16 | l_linestatus CHAR(1), 17 | l_shipdate DATE, 18 | l_commitdate DATE, 19 | l_receiptdate DATE, 20 | l_shipinstruct CHAR(25), 21 | l_shipmode CHAR(10), 22 | l_comment VARCHAR(44) 23 | ) 24 | FROM FILE './datasets/tpch/lineitem.csv' 25 | LINE DELIMITED CSV (delimiter := '|'); 26 | 27 | 28 | CREATE STREAM ORDERS ( 29 | orderkey INT, 30 | o_custkey INT, 31 | o_orderstatus CHAR(1), 32 | o_totalprice DECIMAL, 33 | o_orderdate DATE, 34 | o_orderpriority CHAR(15), 35 | o_clerk CHAR(15), 36 | o_shippriority INT, 37 | o_comment VARCHAR(79) 38 | ) 39 | FROM FILE './datasets/tpch/orders.csv' 40 | LINE DELIMITED CSV (delimiter := '|'); 41 | 42 | SELECT l_shipmode, SUM([lift: TPCH12Payload](o_orderpriority)) 43 | FROM lineitem NATURAL JOIN orders 44 | WHERE l_shipmode INLIST ('MAIL', 'SHIP') 45 | AND l_commitdate < l_receiptdate 46 | AND l_shipdate < l_commitdate 47 | AND l_receiptdate >= DATE('1994-01-01') 48 | AND l_receiptdate < DATE('1995-01-01') 49 | GROUP BY l_shipmode; -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query12.txt: -------------------------------------------------------------------------------- 1 | 24 2 2 | 0 orderkey int -1 {} 0 3 | 1 l_partkey int 0 {0} 0 4 | 2 l_suppkey int 1 {0,1} 0 5 | 3 l_linenumber int 2 {0,1,2} 0 6 | 4 l_quantity double 3 {0,1,2,3} 0 7 | 5 l_extendedprice double 4 {0,1,2,3,4} 0 8 | 6 l_discount double 5 {0,1,2,3,4,5} 0 9 | 7 l_tax double 6 {0,1,2,3,4,5,6} 0 10 | 8 l_returnflag char 7 {0,1,2,3,4,5,6,7} 0 11 | 9 l_linestatus char 8 {0,1,2,3,4,5,6,7,8} 0 12 | 10 l_shipdate date 9 {0,1,2,3,4,5,6,7,8,9} 0 13 | 11 l_commitdate date 10 {0,1,2,3,4,5,6,7,8,9,10} 0 14 | 12 l_receiptdate date 11 {0,1,2,3,4,5,6,7,8,9,10,11} 0 15 | 13 l_shipinstruct string 12 {0,1,2,3,4,5,6,7,8,9,10,11,12} 0 16 | 14 l_shipmode string 13 {0,1,2,3,4,5,6,7,8,9,10,11,12,13} 0 17 | 15 l_comment string 14 {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} 0 18 | 16 o_orderpriority string 0 {0} 0 19 | 17 o_custkey int 16 {0,16} 0 20 | 18 o_orderstatus char 17 {0,16,17} 0 21 | 19 o_totalprice double 18 {0,16,17,18} 0 22 | 20 o_orderdate date 19 {0,16,17,18,19} 0 23 | 21 o_shippriority int 20 {0,16,17,18,19,20} 0 24 | 22 o_clerk string 21 {0,16,17,18,19,20,21} 0 25 | 23 o_comment string 22 {0,16,17,18,19,20,21,22} 0 26 | LINEITEM 15 orderkey, l_partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment 27 | ORDERS 23 orderkey, o_custkey, o_orderstatus, o_totalprice, o_orderdate, o_orderpriority, o_clerk, o_shippriority, o_comment -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query14.sql: -------------------------------------------------------------------------------- 1 | IMPORT DTREE FROM FILE 'tpch_query14.txt'; 2 | 3 | CREATE TYPE TPCH14Payload 4 | FROM FILE 'ring/ring_tpch_query14.hpp'; 5 | 6 | CREATE STREAM LINEITEM ( 7 | l_orderkey INT, 8 | partkey INT, 9 | l_suppkey INT, 10 | l_linenumber INT, 11 | l_quantity DECIMAL, 12 | l_extendedprice DECIMAL, 13 | l_discount DECIMAL, 14 | l_tax DECIMAL, 15 | l_returnflag CHAR(1), 16 | l_linestatus CHAR(1), 17 | l_shipdate DATE, 18 | l_commitdate DATE, 19 | l_receiptdate DATE, 20 | l_shipinstruct CHAR(25), 21 | l_shipmode CHAR(10), 22 | l_comment VARCHAR(44) 23 | ) 24 | FROM FILE './datasets/tpch/lineitem.csv' 25 | LINE DELIMITED CSV (delimiter := '|'); 26 | 27 | CREATE STREAM PART ( 28 | partkey INT, 29 | p_name VARCHAR(55), 30 | p_mfgr CHAR(25), 31 | p_brand CHAR(10), 32 | p_type VARCHAR(25), 33 | p_size INT, 34 | p_container CHAR(10), 35 | p_retailprice DECIMAL, 36 | p_comment VARCHAR(23) 37 | ) 38 | FROM FILE './datasets/tpch/part.csv' 39 | LINE DELIMITED CSV (delimiter := '|'); 40 | 41 | SELECT SUM([liftpart: TPCH14Payload](p_type) * 42 | [liftlineitem: TPCH14Payload](l_extendedprice * (1 - l_discount))) 43 | FROM lineitem NATURAL JOIN part 44 | WHERE l_shipdate >= DATE('1995-09-01') 45 | AND l_shipdate < DATE('1995-10-01'); -------------------------------------------------------------------------------- /examples/queries/tpch/tpch_query14.txt: -------------------------------------------------------------------------------- 1 | 24 2 2 | 0 partkey int -1 {} 0 3 | 1 l_orderkey int 0 {0} 0 4 | 2 l_suppkey int 1 {0,1} 0 5 | 3 l_linenumber int 2 {0,1,2} 0 6 | 4 l_quantity double 3 {0,1,2,3} 0 7 | 5 l_extendedprice double 4 {0,1,2,3,4} 0 8 | 6 l_discount double 5 {0,1,2,3,4,5} 0 9 | 7 l_tax double 6 {0,1,2,3,4,5,6} 0 10 | 8 l_returnflag char 7 {0,1,2,3,4,5,6,7} 0 11 | 9 l_linestatus char 8 {0,1,2,3,4,5,6,7,8} 0 12 | 10 l_shipdate date 9 {0,1,2,3,4,5,6,7,8,9} 0 13 | 11 l_commitdate date 10 {0,1,2,3,4,5,6,7,8,9,10} 0 14 | 12 l_receiptdate date 11 {0,1,2,3,4,5,6,7,8,9,10,11} 0 15 | 13 l_shipinstruct string 12 {0,1,2,3,4,5,6,7,8,9,10,11,12} 0 16 | 14 l_shipmode string 13 {0,1,2,3,4,5,6,7,8,9,10,11,12,13} 0 17 | 15 l_comment string 14 {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14} 0 18 | 16 p_name string 0 {0} 0 19 | 17 p_mfgr string 16 {0,16} 0 20 | 18 p_brand string 17 {0,16,17} 0 21 | 19 p_type string 18 {0,16,17,18} 0 22 | 20 p_size int 19 {0,16,17,18,19} 0 23 | 21 p_container string 20 {0,16,17,18,19,20} 0 24 | 22 p_retailprice double 21 {0,16,17,18,19,20,21} 0 25 | 23 p_comment string 22 {0,16,17,18,19,20,21,22} 0 26 | LINEITEM 15 l_orderkey, partkey, l_suppkey, l_linenumber, l_quantity, l_extendedprice, l_discount, l_tax, l_returnflag, l_linestatus, l_shipdate, l_commitdate, l_receiptdate, l_shipinstruct, l_shipmode, l_comment 27 | PART 23 partkey, p_name, p_mfgr, p_brand, p_type, p_size, p_container, p_retailprice, p_comment -------------------------------------------------------------------------------- /examples/src/application/favorita/application_favorita.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_FAVORITA_HPP 2 | #define APPLICATION_FAVORITA_HPP 3 | 4 | #include 5 | #include "application_favorita_base.hpp" 6 | 7 | void Application::on_snapshot(dbtoaster::data_t& data) { 8 | struct timeval tp; 9 | gettimeofday(&tp, nullptr); 10 | std::cout << data.tN << " tuples processed at " 11 | << tp.tv_sec * 1000 + tp.tv_usec / 1000 12 | << " ms" << std::endl; 13 | DUMP_HEAP_PROFILE 14 | } 15 | 16 | void Application::on_begin_processing(dbtoaster::data_t& data) { 17 | START_HEAP_PROFILE 18 | } 19 | 20 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 21 | STOP_HEAP_PROFILE 22 | 23 | if (print_result) { 24 | data.serialize(std::cout, 0); 25 | } 26 | } 27 | 28 | #endif /* APPLICATION_FAVORITA_HPP */ -------------------------------------------------------------------------------- /examples/src/application/housing/application_housing.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_HOUSING_HPP 2 | #define APPLICATION_HOUSING_HPP 3 | 4 | #include 5 | #include "application_housing_base.hpp" 6 | 7 | void Application::on_snapshot(dbtoaster::data_t& data) { 8 | struct timeval tp; 9 | gettimeofday(&tp, nullptr); 10 | std::cout << data.tN << " tuples processed at " 11 | << tp.tv_sec * 1000 + tp.tv_usec / 1000 12 | << " ms" << std::endl; 13 | DUMP_HEAP_PROFILE 14 | } 15 | 16 | void Application::on_begin_processing(dbtoaster::data_t& data) { 17 | START_HEAP_PROFILE 18 | } 19 | 20 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 21 | STOP_HEAP_PROFILE 22 | 23 | if (print_result) { 24 | data.serialize(std::cout, 0); 25 | } 26 | } 27 | 28 | #endif /* APPLICATION_HOUSING_HPP */ -------------------------------------------------------------------------------- /examples/src/application/housing/application_housing_listing_join.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_HOUSING_LISTING_JOIN_HPP 2 | #define APPLICATION_HOUSING_LISTING_JOIN_HPP 3 | 4 | #include "application_housing_base.hpp" 5 | 6 | void Application::on_snapshot(dbtoaster::data_t& data) { 7 | on_end_processing(data, false); 8 | } 9 | 10 | void Application::on_begin_processing(dbtoaster::data_t& data) { 11 | 12 | } 13 | 14 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 15 | 16 | cout << endl << "Enumerating listing join result... " << endl; 17 | 18 | const auto& v_postcode_HSIRDT1 = data.get_V_postcode_HSIRDT1(); 19 | 20 | size_t output_size = v_postcode_HSIRDT1.store.size(); 21 | size_t total_multiplicity = 0; 22 | 23 | for (auto &t : v_postcode_HSIRDT1.store) { 24 | if (print_result) cout << t.first << " -> " << t.second << endl; 25 | total_multiplicity += t.second; 26 | } 27 | 28 | cout << "Number of output tuples: " << output_size << endl; 29 | cout << "Total multiplicity: " << total_multiplicity << endl; 30 | } 31 | 32 | #endif /* APPLICATION_HOUSING_LISTING_JOIN_HPP */ -------------------------------------------------------------------------------- /examples/src/application/retailer/application_retailer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_RETAILER_HPP 2 | #define APPLICATION_RETAILER_HPP 3 | 4 | #include 5 | #include "application_retailer_base.hpp" 6 | 7 | void Application::on_snapshot(dbtoaster::data_t& data) { 8 | struct timeval tp; 9 | gettimeofday(&tp, nullptr); 10 | std::cout << data.tN << " tuples processed at " 11 | << tp.tv_sec * 1000 + tp.tv_usec / 1000 12 | << " ms" << std::endl; 13 | DUMP_HEAP_PROFILE 14 | } 15 | 16 | void Application::on_begin_processing(dbtoaster::data_t& data) { 17 | START_HEAP_PROFILE 18 | } 19 | 20 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 21 | STOP_HEAP_PROFILE 22 | 23 | if (print_result) { 24 | data.serialize(std::cout, 0); 25 | } 26 | } 27 | 28 | #endif /* APPLICATION_RETAILER_HPP */ -------------------------------------------------------------------------------- /examples/src/application/retailer/application_retailer_listing_join.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_RETAILER_LISTING_JOIN_HPP 2 | #define APPLICATION_RETAILER_LISTING_JOIN_HPP 3 | 4 | #include "application_retailer_base.hpp" 5 | 6 | void Application::on_snapshot(dbtoaster::data_t& data) { 7 | on_end_processing(data, false); 8 | } 9 | 10 | void Application::on_begin_processing(dbtoaster::data_t& data) { 11 | 12 | } 13 | 14 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 15 | 16 | cout << endl << "Enumerating listing join result... " << endl; 17 | 18 | const auto& viewIIWLC = data.get_V_locn_IIWLC1(); 19 | 20 | size_t output_size = viewIIWLC.store.size(); 21 | size_t total_multiplicity = 0; 22 | 23 | for (auto &t : viewIIWLC.store) { 24 | if (print_result) cout << t.first << " -> " << t.second << endl; 25 | total_multiplicity += t.second; 26 | } 27 | 28 | cout << "Number of output tuples: " << output_size << endl; 29 | cout << "Total multiplicity: " << total_multiplicity << endl; 30 | } 31 | 32 | #endif /* APPLICATION_RETAILER_LISTING_JOIN_HPP */ -------------------------------------------------------------------------------- /examples/src/application/simple/application_simple.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_TPCH_HPP 2 | #define APPLICATION_TPCH_HPP 3 | 4 | #include "application_simple_base.hpp" 5 | 6 | void Application::on_snapshot(dbtoaster::data_t& data) { 7 | on_end_processing(data, false); 8 | } 9 | 10 | void Application::on_begin_processing(dbtoaster::data_t& data) { 11 | 12 | } 13 | 14 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 15 | if (print_result) { 16 | data.serialize(std::cout, 0); 17 | } 18 | } 19 | 20 | #endif /* APPLICATION_TPCH_HPP */ -------------------------------------------------------------------------------- /examples/src/application/tpch/application_tpch.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_TPCH_HPP 2 | #define APPLICATION_TPCH_HPP 3 | 4 | #include "application_tpch_base.hpp" 5 | 6 | void Application::on_snapshot(dbtoaster::data_t& data) { 7 | on_end_processing(data, false); 8 | } 9 | 10 | void Application::on_begin_processing(dbtoaster::data_t& data) { 11 | 12 | } 13 | 14 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 15 | if (print_result) { 16 | data.serialize(std::cout, 0); 17 | } 18 | } 19 | 20 | #endif /* APPLICATION_TPCH_HPP */ -------------------------------------------------------------------------------- /examples/src/application/tpch/application_tpch_FQ1_listing_join.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_TPCH_FQ1_LISTING_JOIN_HPP 2 | #define APPLICATION_TPCH_FQ1_LISTING_JOIN_HPP 3 | 4 | #include "application_tpch_base.hpp" 5 | 6 | void Application::on_snapshot(dbtoaster::data_t& data) { 7 | on_end_processing(data, false); 8 | } 9 | 10 | void Application::on_begin_processing(dbtoaster::data_t& data) { 11 | 12 | } 13 | 14 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 15 | 16 | cout << endl << "Enumerating listing join result... " << endl; 17 | 18 | const auto& v_orderkey_LPPO1 = data.get_V_orderkey_LPPO1(); 19 | 20 | size_t output_size = v_orderkey_LPPO1.store.size(); 21 | size_t total_multiplicity = 0; 22 | 23 | for (auto &t : v_orderkey_LPPO1.store) { 24 | if (print_result) cout << t.first << " -> " << t.second << endl; 25 | total_multiplicity += t.second; 26 | } 27 | 28 | cout << "Number of output tuples: " << output_size << endl; 29 | cout << "Total multiplicity: " << total_multiplicity << endl; 30 | } 31 | 32 | #endif /* APPLICATION_TPCH_FQ1_LISTING_JOIN_HPP */ -------------------------------------------------------------------------------- /examples/src/application/tpch/application_tpch_FQ2_listing_join.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_TPCH_FQ2_LISTING_JOIN_HPP 2 | #define APPLICATION_TPCH_FQ2_LISTING_JOIN_HPP 3 | 4 | #include "application_tpch_base.hpp" 5 | 6 | void Application::on_snapshot(dbtoaster::data_t& data) { 7 | on_end_processing(data, false); 8 | } 9 | 10 | void Application::on_begin_processing(dbtoaster::data_t& data) { 11 | 12 | } 13 | 14 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 15 | 16 | cout << endl << "Enumerating listing join result... " << endl; 17 | 18 | const auto& v_orderkey_LPCNO1 = data.get_V_orderkey_LPCNO1(); 19 | 20 | size_t output_size = v_orderkey_LPCNO1.store.size(); 21 | size_t total_multiplicity = 0; 22 | 23 | for (auto &t : v_orderkey_LPCNO1.store) { 24 | if (print_result) cout << t.first << " -> " << t.second << endl; 25 | total_multiplicity += t.second; 26 | } 27 | 28 | cout << "Number of output tuples: " << output_size << endl; 29 | cout << "Total multiplicity: " << total_multiplicity << endl; 30 | } 31 | 32 | #endif /* APPLICATION_TPCH_FQ2_LISTING_JOIN_HPP */ -------------------------------------------------------------------------------- /examples/src/application/tpch/application_tpch_FQ3_listing_join.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_TPCH_FQ3_LISTING_JOIN_HPP 2 | #define APPLICATION_TPCH_FQ3_LISTING_JOIN_HPP 3 | 4 | #include "application_tpch_base.hpp" 5 | 6 | void Application::on_snapshot(dbtoaster::data_t& data) { 7 | on_end_processing(data, false); 8 | } 9 | 10 | void Application::on_begin_processing(dbtoaster::data_t& data) { 11 | 12 | } 13 | 14 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 15 | 16 | cout << endl << "Enumerating listing join result... " << endl; 17 | 18 | const auto& v_orderkey_LPSCO1 = data.get_V_orderkey_LPSCO1(); 19 | 20 | size_t output_size = v_orderkey_LPSCO1.store.size(); 21 | size_t total_multiplicity = 0; 22 | 23 | for (auto &t : v_orderkey_LPSCO1.store) { 24 | if (print_result) cout << t.first << " -> " << t.second << endl; 25 | total_multiplicity += t.second; 26 | } 27 | 28 | cout << "Number of output tuples: " << output_size << endl; 29 | cout << "Total multiplicity: " << total_multiplicity << endl; 30 | } 31 | 32 | #endif /* APPLICATION_TPCH_FQ3_LISTING_JOIN_HPP */ -------------------------------------------------------------------------------- /examples/src/application/tpch/application_tpch_FQ4_listing_join.hpp: -------------------------------------------------------------------------------- 1 | #ifndef APPLICATION_TPCH_FQ4_LISTING_JOIN_HPP 2 | #define APPLICATION_TPCH_FQ4_LISTING_JOIN_HPP 3 | 4 | #include "application_tpch_base.hpp" 5 | 6 | void Application::on_snapshot(dbtoaster::data_t& data) { 7 | on_end_processing(data, false); 8 | } 9 | 10 | void Application::on_begin_processing(dbtoaster::data_t& data) { 11 | 12 | } 13 | 14 | void Application::on_end_processing(dbtoaster::data_t& data, bool print_result) { 15 | 16 | cout << endl << "Enumerating listing join result... " << endl; 17 | 18 | const auto& v_suppkey_LPS1 = data.get_V_suppkey_LPS1(); 19 | 20 | size_t output_size = v_suppkey_LPS1.store.size(); 21 | size_t total_multiplicity = 0; 22 | 23 | for (auto &t : v_suppkey_LPS1.store) { 24 | if (print_result) cout << t.first << " -> " << t.second << endl; 25 | total_multiplicity += t.second; 26 | } 27 | 28 | cout << "Number of output tuples: " << output_size << endl; 29 | cout << "Total multiplicity: " << total_multiplicity << endl; 30 | } 31 | 32 | #endif /* APPLICATION_TPCH_FQ4_LISTING_JOIN_HPP */ -------------------------------------------------------------------------------- /examples/src/lib/csvreader.hpp: -------------------------------------------------------------------------------- 1 | #ifndef FIVM_CSVREADER_HPP 2 | #define FIVM_CSVREADER_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | using namespace std; 10 | 11 | class CSVAdaptor { 12 | public: 13 | CSVAdaptor(char del) : delimiter(del) { } 14 | 15 | std::string const& operator[](std::size_t index) const { 16 | return data[index]; 17 | } 18 | 19 | std::size_t size() const { 20 | return data.size(); 21 | } 22 | 23 | void readNextRow(std::istream& str) { 24 | data.clear(); 25 | 26 | std::string line; 27 | std::getline(str, line); 28 | 29 | std::stringstream lineStream(line); 30 | std::string cell; 31 | 32 | while (std::getline(lineStream, cell, delimiter)) { 33 | data.push_back(cell); 34 | } 35 | } 36 | 37 | std::vector data; 38 | 39 | private: 40 | char delimiter; 41 | }; 42 | 43 | std::istream& operator>>(std::istream& str, CSVAdaptor& data) { 44 | data.readNextRow(str); 45 | return str; 46 | } 47 | 48 | template 49 | void readFromFile(std::vector& data, const std::string& path, char delimiter) { 50 | data.clear(); 51 | 52 | std::ifstream file(path); 53 | if (!file) { 54 | cerr << "ERROR: " << path << " doesn't exist" << endl; 55 | return; 56 | } 57 | 58 | CSVAdaptor row(delimiter); 59 | while (file >> row) { 60 | T tmp(row.data, 1L); 61 | data.push_back(tmp); 62 | } 63 | 64 | file.close(); 65 | } 66 | 67 | template 68 | void readFromBinaryFile(std::vector& data, const std::string& path) { 69 | data.clear(); 70 | 71 | std::ifstream file(path, std::ios::in | std::ios::binary); 72 | 73 | size_t length; 74 | file.read((char*) &length, sizeof(size_t)); 75 | data.reserve(length); 76 | 77 | T tmp; 78 | for (size_t i = 0; i < length; i++) { 79 | tmp.readFrom(file); 80 | data.push_back(tmp); 81 | } 82 | 83 | file.close(); 84 | } 85 | 86 | template 87 | void writeToBinaryFile(std::vector& data, const std::string& path) { 88 | std::ofstream file(path, std::ios::out | std::ios::binary); 89 | 90 | size_t length = data.size(); 91 | file.write((char*) &length, sizeof(size_t)); 92 | for (T t : data) t.writeTo(file); 93 | 94 | file.close(); 95 | } 96 | 97 | #endif /* FIVM_CSVREADER_HPP */ -------------------------------------------------------------------------------- /examples/src/lib/relation.hpp: -------------------------------------------------------------------------------- 1 | #ifndef RELATION_HPP 2 | #define RELATION_HPP 3 | 4 | #include 5 | #include 6 | #include 7 | #include "dispatcher.hpp" 8 | #include "csvreader.hpp" 9 | 10 | class IRelation { 11 | protected: 12 | string name; 13 | string path; 14 | char delimiter; 15 | bool static_table; 16 | 17 | public: 18 | IRelation(string _name, string _path, char _delimiter, bool _static_table) 19 | : name(_name), path(_path), delimiter(_delimiter), static_table(_static_table) { } 20 | 21 | virtual ~IRelation() { } 22 | 23 | string get_name() { return name; } 24 | 25 | bool is_static() { return static_table; } 26 | 27 | virtual size_t size() = 0; 28 | 29 | virtual void load() = 0; 30 | 31 | virtual void clear() = 0; 32 | 33 | virtual Dispatcher* create_dispatcher(dbtoaster::data_t& data) = 0; 34 | }; 35 | 36 | template 37 | class Relation : public IRelation { 38 | protected: 39 | std::vector tuples; 40 | 41 | public: 42 | Relation(string name, string path, char delimiter, bool static_table) 43 | : IRelation(name, path, delimiter, static_table) { } 44 | 45 | size_t size() { return tuples.size(); } 46 | 47 | void load() { readFromFile(tuples, path, delimiter); } 48 | 49 | void clear() { tuples.clear(); } 50 | }; 51 | 52 | template 53 | class EventDispatchableRelation : public Relation { 54 | protected: 55 | typedef std::function Func; 56 | typedef std::function DataFunc; 57 | 58 | DataFunc data_func; 59 | Func func; 60 | 61 | public: 62 | EventDispatchableRelation(string name, string path, char delimiter, bool static_table, DataFunc f) 63 | : Relation(name, path, delimiter, static_table), data_func(f), func(nullptr) { } 64 | 65 | Dispatcher* create_dispatcher(dbtoaster::data_t& data) { 66 | func = data_func(data); 67 | return new EventDispatcher(this->tuples, func); 68 | } 69 | }; 70 | 71 | #ifdef BATCH_SIZE 72 | 73 | template 74 | class BatchDispatchableRelation : public Relation { 75 | protected: 76 | typedef typename std::vector::iterator Iterator; 77 | typedef std::function Func; 78 | typedef std::function DataFunc; 79 | 80 | DataFunc data_func; 81 | Func func; 82 | 83 | public: 84 | BatchDispatchableRelation(string name, string path, char delimiter, bool static_table, DataFunc f) 85 | : Relation(name, path, delimiter, static_table), data_func(f), func(nullptr) { } 86 | 87 | Dispatcher* create_dispatcher(dbtoaster::data_t& data) { 88 | func = data_func(data); 89 | return new BatchDispatcher(this->tuples, func); 90 | } 91 | }; 92 | 93 | #endif 94 | 95 | #endif /* RELATION_HPP */ -------------------------------------------------------------------------------- /examples/src/lib/stopwatch.hpp: -------------------------------------------------------------------------------- 1 | #ifndef FIVM_STOPWATCH_HPP 2 | #define FIVM_STOPWATCH_HPP 3 | 4 | #include 5 | 6 | class Stopwatch { 7 | private: 8 | long startTime; 9 | long endTime; 10 | 11 | public: 12 | Stopwatch() : startTime(0), endTime(0) { } 13 | 14 | void restart() { 15 | timeval start_time; 16 | gettimeofday(&start_time, NULL); 17 | startTime = start_time.tv_sec * 1000 + start_time.tv_usec / 1000; 18 | } 19 | 20 | void stop() { 21 | timeval end_time; 22 | gettimeofday(&end_time, NULL); 23 | endTime = end_time.tv_sec * 1000 + end_time.tv_usec / 1000; 24 | } 25 | 26 | long elapsedTimeInMilliSeconds() { 27 | return endTime - startTime; 28 | } 29 | }; 30 | #endif /* FIVM_STOPWATCH_HPP */ 31 | -------------------------------------------------------------------------------- /examples/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "application/application.hpp" 4 | 5 | int main(int argc, char** argv) { 6 | 7 | int opt_num_runs = 1; 8 | bool opt_print_result = true; 9 | for (int i = 0; i < argc; i++) { 10 | if (strcmp(argv[i], "--num-runs") == 0 || strcmp(argv[i], "-r") == 0) { 11 | opt_num_runs = atoi(argv[i + 1]); 12 | } 13 | opt_print_result = opt_print_result && (strcmp(argv[i], "--no-output") != 0); 14 | } 15 | 16 | #ifndef __APPLE__ 17 | cpu_set_t mask; 18 | CPU_ZERO(&mask); 19 | CPU_SET(0, &mask); 20 | sched_setaffinity(0, sizeof(mask), &mask); 21 | #endif 22 | 23 | Application app; 24 | app.run(opt_num_runs, opt_print_result); 25 | 26 | return 0; 27 | } -------------------------------------------------------------------------------- /examples/src/ring/dictionary.hpp: -------------------------------------------------------------------------------- 1 | #ifndef DBTOASTER_DICTIONARY_HPP 2 | #define DBTOASTER_DICTIONARY_HPP 3 | 4 | #include 5 | 6 | struct Tuple { 7 | union { 8 | uint64_t key; 9 | uint32_t slots[2]; /* composite key can consist of two 4B keys */ 10 | }; 11 | double value; 12 | }; 13 | static_assert(sizeof(Tuple) == 16, "size of Tuple not 16 bytes"); 14 | 15 | struct Dictionary { 16 | std::vector tuples; 17 | 18 | Dictionary() { } 19 | 20 | explicit Dictionary(size_t n) { 21 | tuples.reserve(n); 22 | } 23 | 24 | explicit Dictionary(Tuple t) : tuples { t } { } 25 | 26 | inline bool isEmpty() const { return tuples.empty(); } 27 | 28 | Dictionary& operator+=(const Dictionary& other) { 29 | for (size_t i = 0; i < other.tuples.size(); i++) { 30 | bool found = false; 31 | for (size_t j = 0; !found && j < tuples.size(); j++) { 32 | if (tuples[j].key == other.tuples[i].key) { 33 | tuples[j].value += other.tuples[i].value; 34 | found = true; 35 | } 36 | } 37 | if (!found) { 38 | tuples.push_back(other.tuples[i]); 39 | } 40 | } 41 | return *this; 42 | } 43 | 44 | Dictionary operator*(const Dictionary& other) const { 45 | if (other.isEmpty()) return Dictionary(); 46 | return multiply(other); 47 | } 48 | 49 | Dictionary multiply(const Dictionary& other) const { 50 | Dictionary r(tuples.size() * other.tuples.size()); 51 | for (size_t i = 0; i < tuples.size(); i++) { 52 | for (size_t j = 0; j < other.tuples.size(); j++) { 53 | Tuple t = { 54 | .slots = { tuples[i].slots[0], other.tuples[j].slots[0] }, 55 | .value = tuples[i].value * other.tuples[j].value 56 | }; 57 | r.tuples.push_back(t); 58 | } 59 | } 60 | return r; 61 | } 62 | 63 | Dictionary operator*(double alpha) const { 64 | if (alpha == 0.0) return Dictionary(); 65 | return multiply(alpha); 66 | } 67 | 68 | Dictionary multiply(double alpha) const { 69 | Dictionary r(tuples.size()); 70 | for (size_t i = 0; i < tuples.size(); i++) { 71 | Tuple t = { 72 | .key = tuples[i].key, 73 | .value = tuples[i].value * alpha 74 | }; 75 | r.tuples.push_back(t); 76 | } 77 | return r; 78 | } 79 | 80 | template 81 | void serialize(Archive & ar, const unsigned int version) const { 82 | ar << "{ "; 83 | for (auto &t : tuples) { 84 | ar << t.key << " -> " << t.value << ", "; 85 | } 86 | ar << " }"; 87 | } 88 | 89 | }; 90 | 91 | #endif /* DBTOASTER_DICTIONARY_HPP */ 92 | -------------------------------------------------------------------------------- /examples/src/ring/ring_avg.hpp: -------------------------------------------------------------------------------- 1 | #ifndef RINGAVG_HPP 2 | #define RINGAVG_HPP 3 | 4 | #include "types.hpp" 5 | #include "serialization.hpp" 6 | 7 | using namespace dbtoaster; 8 | 9 | struct RingAvg { 10 | int count; 11 | DOUBLE_TYPE sum; 12 | 13 | static RingAvg zero; 14 | 15 | explicit RingAvg() : count(0), sum(0.0) { } 16 | 17 | explicit RingAvg(int c, DOUBLE_TYPE s) : count(c), sum(s) { } 18 | 19 | inline bool isZero() const { return count == 0; } 20 | 21 | RingAvg& operator+=(const RingAvg &r) { 22 | this->count += r.count; 23 | this->sum += r.sum; 24 | return *this; 25 | } 26 | 27 | RingAvg operator*(const RingAvg &other) { 28 | return RingAvg(count * other.count, sum * other.count + other.sum * count); 29 | } 30 | 31 | template 32 | void serialize(Archive& ar, const unsigned int version) const { 33 | ar << ELEM_SEPARATOR; 34 | DBT_SERIALIZATION_NVP(ar, count); 35 | ar << ELEM_SEPARATOR; 36 | DBT_SERIALIZATION_NVP(ar, sum); 37 | } 38 | }; 39 | 40 | RingAvg operator*(int alpha, const RingAvg &r) { 41 | return RingAvg(alpha * r.count, alpha * r.sum); 42 | } 43 | 44 | RingAvg Ulift(DOUBLE_TYPE a) { 45 | return RingAvg(1, a); 46 | } 47 | 48 | #endif /* RINGAVG_HPP */ -------------------------------------------------------------------------------- /examples/src/ring/ring_tpch_query12.hpp: -------------------------------------------------------------------------------- 1 | #ifndef RINGTPCH12_HPP 2 | #define RINGTPCH12_HPP 3 | 4 | #include "types.hpp" 5 | #include "serialization.hpp" 6 | 7 | using namespace dbtoaster; 8 | 9 | const STRING_TYPE c1 = STRING_TYPE("1-URGENT"); 10 | const STRING_TYPE c2 = STRING_TYPE("2-HIGH"); 11 | 12 | struct TPCH12Payload { 13 | long count; 14 | long high_line_count; 15 | long low_line_count; 16 | 17 | explicit TPCH12Payload() : count(0), high_line_count(0), low_line_count(0) { } 18 | 19 | explicit TPCH12Payload(long c, const STRING_TYPE& order_priority) : count(c) { 20 | high_line_count = ((order_priority == c1) || (order_priority == c2)); 21 | low_line_count = !((order_priority == c1) || (order_priority == c2)); 22 | } 23 | 24 | inline bool isZero() const { return count == 0; } 25 | 26 | TPCH12Payload& operator+=(const TPCH12Payload& other) { 27 | if (other.isZero()) return *this; 28 | count += other.count; 29 | high_line_count += other.high_line_count; 30 | low_line_count += other.low_line_count; 31 | return *this; 32 | } 33 | 34 | TPCH12Payload operator*(const TPCH12Payload& other) const { 35 | if (isZero() || other.isZero()) return TPCH12Payload(); 36 | 37 | TPCH12Payload r; 38 | r.count = count * other.count; 39 | r.high_line_count = count * other.high_line_count + other.count * high_line_count; 40 | r.low_line_count = count * other.low_line_count + other.count * low_line_count; 41 | return r; 42 | } 43 | 44 | TPCH12Payload operator*(long int alpha) const { 45 | if (alpha == 1L) return *this; 46 | return multiply(alpha); 47 | } 48 | 49 | TPCH12Payload multiply(long int alpha) const { 50 | TPCH12Payload r; 51 | r.count = alpha * count; 52 | r.high_line_count = alpha * high_line_count; 53 | r.low_line_count = alpha * low_line_count; 54 | return r; 55 | } 56 | 57 | FORCE_INLINE void clear() { 58 | count = 0L; 59 | high_line_count = 0L; 60 | low_line_count = 0L; 61 | } 62 | 63 | template 64 | void serialize(Archive& ar, const unsigned int version) const { 65 | ar << ELEM_SEPARATOR << "\t"; 66 | DBT_SERIALIZATION_NVP(ar, high_line_count); 67 | ar << ELEM_SEPARATOR << "\t"; 68 | DBT_SERIALIZATION_NVP(ar, low_line_count); 69 | } 70 | }; 71 | 72 | TPCH12Payload operator*(long int alpha, const TPCH12Payload& p) { 73 | if (alpha == 1L) return p; 74 | return p.multiply(alpha); 75 | } 76 | 77 | TPCH12Payload Ulift(const STRING_TYPE& order_priority) { 78 | return TPCH12Payload(1, order_priority); 79 | } 80 | 81 | #endif /* RINGTPCH12_HPP */ -------------------------------------------------------------------------------- /frontend/build.sbt: -------------------------------------------------------------------------------- 1 | name := "FIVM" 2 | 3 | version := "1.0" 4 | 5 | scalaVersion := "2.12.6" 6 | 7 | mainClass in (Compile, run) := Some("fdbresearch.Main") 8 | 9 | libraryDependencies ++= Seq( 10 | "org.scala-lang.modules" %% "scala-parser-combinators" % "1.1.1", 11 | "com.github.scopt" %% "scopt" % "3.7.1", 12 | "org.slf4j" % "slf4j-api" % "1.7.25", 13 | "org.slf4j" % "slf4j-simple" % "1.7.25" 14 | ) 15 | -------------------------------------------------------------------------------- /frontend/project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.2.7 2 | -------------------------------------------------------------------------------- /frontend/project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.14.9") 2 | -------------------------------------------------------------------------------- /frontend/src/main/main.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /frontend/src/main/scala/fdbresearch/Driver.scala: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // Factorized IVM (F-IVM) 4 | // 5 | // https://fdbresearch.github.io/ 6 | // 7 | // Copyright (c) 2018-2019, FDB Research Group, University of Oxford 8 | // 9 | //===----------------------------------------------------------------------===// 10 | package fdbresearch 11 | 12 | import fdbresearch.tree.{VariableOrderNode, VariableOrderRelation, Tree, ViewTree} 13 | import fdbresearch.core.{SQL, SQLToM3Compiler, Source} 14 | import fdbresearch.parsing.M3Parser 15 | import fdbresearch.util.Logger 16 | 17 | class Driver { 18 | 19 | import fdbresearch.tree.VariableOrder._ 20 | 21 | // TODO: allow definitions of unused streams and tables 22 | 23 | /** 24 | * Check if all SQL sources have consistent schemas with DTree relations 25 | */ 26 | private def checkSchemas(sqlSources: List[Source], relations: List[VariableOrderRelation]): Unit = { 27 | val rm = relations.map { r => 28 | r.name -> r.keys.map(v => (v.name, v.tp)).toSet 29 | }.toMap 30 | val diff = sqlSources.flatMap { s => 31 | val f1 = s.schema.fields.toSet 32 | val f2 = rm(s.schema.name) 33 | f1.diff(f2).union(f2.diff(f1)) 34 | } 35 | assert(diff.isEmpty, "Inconsistent schemas in SQL and DTree files:\n" + diff.mkString("\n")) 36 | } 37 | 38 | /** 39 | * Resolve missing types in SQL system 40 | */ 41 | private def resolveTypes(s: SQL.System): SQL.System = { 42 | val vm = s.sources.flatMap(_.schema.fields.map(x => x._1 -> x._2)).toMap 43 | s.replace { 44 | case SQL.Field(n, t, tp) => 45 | assert(tp == null || tp == vm(n)) 46 | SQL.Field(n, t, vm(n)) 47 | }.asInstanceOf[SQL.System] 48 | } 49 | 50 | def compile(sql: SQL.System, dtree: Tree[VariableOrderNode], batchUpdates: Boolean): String = { 51 | 52 | checkSchemas(sql.sources, dtree.getRelations) 53 | 54 | Logger.instance.debug("CHECK SCHEMAS: OK") 55 | 56 | val typedSQL = resolveTypes(sql) 57 | val (sumFn, _, whCond, gb) = SQLToM3Compiler.compile(typedSQL) 58 | 59 | Logger.instance.debug("BUILDING VIEW TREE:") 60 | 61 | val viewtree = ViewTree(dtree, gb.toSet, sumFn, whCond) 62 | 63 | Logger.instance.debug("\n\nVIEW TREE:\n" + viewtree) 64 | 65 | val cg = new CodeGenerator(viewtree, sql.typeDefs, sql.sources, batchUpdates) 66 | val m3 = cg.generateM3 67 | Logger.instance.debug("\n\nORIGINAL M3\n" + m3) 68 | 69 | val optM3 = Optimizer.optimize(m3) 70 | Logger.instance.debug("\n\nOPTIMIZED M3\n" + optM3) 71 | 72 | // test that the output can be parsed by the M3 parser 73 | val checkedM3 = new M3Parser().apply(optM3.toString) 74 | Logger.instance.debug("M3 SYNTAX CHECKED") 75 | 76 | checkedM3.toString 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /frontend/src/main/scala/fdbresearch/tree/Tree.scala: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // Factorized IVM (F-IVM) 4 | // 5 | // https://fdbresearch.github.io/ 6 | // 7 | // Copyright (c) 2018-2019, FDB Research Group, University of Oxford 8 | // 9 | //===----------------------------------------------------------------------===// 10 | package fdbresearch.tree 11 | 12 | /** 13 | * Generic tree implementation 14 | * 15 | * @author Milos Nikolic 16 | */ 17 | class Tree[A](val node: A, private var _parent: Option[Tree[A]], 18 | childrenFactory: Tree[A] => List[Tree[A]]) extends Iterable[A] { 19 | 20 | def parent: Option[Tree[A]] = _parent 21 | 22 | def setParent(p: Tree[A]) = _parent = Some(p) 23 | 24 | val children: List[Tree[A]] = childrenFactory(this) 25 | 26 | def isRoot: Boolean = parent.isEmpty 27 | 28 | def leftSiblings: List[Tree[A]] = 29 | parent.map(_.children.takeWhile(_ != this)).getOrElse(Nil) 30 | 31 | def rightSiblings: List[Tree[A]] = 32 | parent.map(_.children.dropWhile(_ != this).tail).getOrElse(Nil) 33 | 34 | // Number of nodes including itself 35 | val treeSize: Int = children.map(_.treeSize).sum + 1 36 | 37 | override def toString: String = node.toString + "{" + isRoot + "}" + (children match { 38 | case Nil => "" 39 | case hd :: Nil => " - " + hd 40 | case _ => " - { " + children.mkString(", ") + " }" 41 | }) 42 | 43 | def iterator: Iterator[A] = Iterator.single(node) ++ children.flatMap(_.iterator) 44 | 45 | def post_order_traversal: List[A] = 46 | children.flatMap(_.post_order_traversal) ++ Iterator.single(node) 47 | 48 | def map[B](f: A => B): Tree[B] = map2(t => f(t.node)) 49 | 50 | def mapWithPostChildren[B](f: (A, List[Tree[B]]) => B): Tree[B] = 51 | map2WithPostChildren { (t, l) => f(t.node, l) } 52 | 53 | def map2[B](f: Tree[A] => B): Tree[B] = { 54 | def createChildTrees(children: List[Tree[A]])(parent: Tree[B]): List[Tree[B]] = 55 | children.map(n => new Tree(f(n), Some(parent), createChildTrees(n.children))) 56 | 57 | new Tree(f(this), None, createChildTrees(children)) 58 | } 59 | 60 | def map2WithPostChildren[B](f: (Tree[A], List[Tree[B]]) => B): Tree[B] = { 61 | def bottomUp(tree: Tree[A]): Tree[B] = { 62 | val newChildren = tree.children.map(bottomUp) 63 | new Tree(f(tree, newChildren), None, p => { 64 | newChildren.foreach(_._parent = Some(p)); newChildren 65 | }) 66 | } 67 | bottomUp(this) 68 | } 69 | 70 | // // More expensive but immutable implementation 71 | // def map2WithPostChildren2[B](f: (Tree[A], List[Tree[B]]) => B): Tree[B] = { 72 | // def create(tree: Tree[A]): Option[Tree[B]] => Tree[B] = 73 | // parent => { 74 | // val postChildren = tree.children.map(create) 75 | // new Tree(f(tree, postChildren.map(g => g(None))), parent, p => postChildren.map(g => g(Some(p)))) 76 | // } 77 | // create(this)(None) 78 | // } 79 | } 80 | -------------------------------------------------------------------------------- /frontend/src/main/scala/fdbresearch/util/Logger.scala: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // Factorized IVM (F-IVM) 4 | // 5 | // https://fdbresearch.github.io/ 6 | // 7 | // Copyright (c) 2018-2019, FDB Research Group, University of Oxford 8 | // 9 | //===----------------------------------------------------------------------===// 10 | package fdbresearch.util 11 | 12 | import org.slf4j.LoggerFactory 13 | 14 | object Logger { 15 | 16 | System.setProperty(org.slf4j.impl.SimpleLogger.DEFAULT_LOG_LEVEL_KEY, "INFO") 17 | 18 | val instance = LoggerFactory.getLogger("fdbresearch.fivm") 19 | } 20 | -------------------------------------------------------------------------------- /frontend/src/main/scala/fdbresearch/util/Utils.scala: -------------------------------------------------------------------------------- 1 | //===----------------------------------------------------------------------===// 2 | // 3 | // Factorized IVM (F-IVM) 4 | // 5 | // https://fdbresearch.github.io/ 6 | // 7 | // Copyright (c) 2018-2019, FDB Research Group, University of Oxford 8 | // 9 | //===----------------------------------------------------------------------===// 10 | package fdbresearch.util 11 | 12 | object Utils { 13 | 14 | // Fresh variables name provider 15 | private val counter = scala.collection.mutable.HashMap[String, Int]() 16 | 17 | def fresh(name: String = "x"): String = { 18 | val c = counter.getOrElse(name, 0) + 1 19 | counter.put(name, c) 20 | name + c 21 | } 22 | 23 | def freshClear(): Unit = counter.clear 24 | 25 | // Indent text by n*2 spaces (and trim trailing space) 26 | def ind(s: String, n: Int = 1): String = { 27 | val i = " " * n 28 | i + s.replaceAll("\n? *$", "").replaceAll("\n", "\n" + i) 29 | } 30 | } 31 | --------------------------------------------------------------------------------