├── .gitignore ├── .gitmodules ├── CMakeLists.txt ├── LICENSE ├── README.md ├── cmake └── FindTBB.cmake ├── customize.cpp ├── evaluation ├── README.md ├── build_order_table.py ├── build_parameterstudy_table.py ├── build_pareto_table.py ├── build_scalability_table.py ├── compute_orders_for_all_partitioners.sh ├── configurable_inertialflowcutter_order.py ├── flowcutter_cut.py ├── flowcutter_order.py ├── inertialflow_cut.py ├── inertialflow_order.py ├── inertialflowcutter_cut.py ├── inertialflowcutter_order.py ├── kahip_cut.py ├── kahip_order.py ├── metis_cut.py ├── metis_order.sh ├── order_experiments.py ├── order_running_time.csv ├── parameterstudy.py ├── parameterstudy_configs.csv └── toplevel_cut_experiments.py ├── inertialflowcutter_order.py ├── query.cpp └── src ├── array_id_func.h ├── back_arc.h ├── chain.h ├── connected_components.h ├── console.cpp ├── contraction_graph.h ├── count_range.h ├── csv.h ├── cut.h ├── dijkstra.h ├── dinic.h ├── edmond_karp.h ├── fancy_input.cpp ├── fancy_input.h ├── file_utility.cpp ├── file_utility.h ├── filter.h ├── flow_cutter.h ├── flow_cutter_accelerated.h ├── flow_cutter_config.h ├── flow_cutter_config.h.gen.py ├── flow_cutter_config.h.template ├── flow_cutter_dinic.h ├── ford_fulkerson.h ├── geo_pos.cpp ├── geo_pos.h ├── heap.h ├── histogram.h ├── id_func.h ├── id_func_traits.h ├── id_multi_func.h ├── id_sort.h ├── id_string.h ├── inertial_flow.h ├── inverse_vector.h ├── io_helper.h ├── kaHIP_interface.h ├── list_graph.cpp ├── list_graph.h ├── min_fill_in.h ├── min_max.h ├── multi_arc.h ├── my_kahip.h ├── node_flow_cutter.h ├── permutation.cpp ├── permutation.h ├── preorder.h ├── range.h ├── separator.h ├── sort_arc.h ├── timer.h ├── timestamp_id_func.h ├── tiny_id_func.h ├── tree_node_ranking.h ├── triangle_count.h ├── union_find.h └── vector_io.h /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "extern/KaHIP"] 2 | path = extern/KaHIP 3 | url = https://github.com/schulzchristian/KaHIP.git 4 | [submodule "extern/RoutingKit"] 5 | path = extern/RoutingKit 6 | url = https://github.com/RoutingKit/RoutingKit.git 7 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required (VERSION 3.1) 2 | 3 | project (InertialFlowCutter) 4 | option (USE_KAHIP "Link KaHiP and at kahip related commands to console." ON) 5 | 6 | set (CMAKE_CXX_STANDARD 14) 7 | set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wpedantic -Werror=return-type -Wno-stringop-truncation -pthread") 8 | set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lm") 9 | 10 | list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") 11 | find_package(TBB REQUIRED) 12 | include_directories(SYSTEM ${TBB_INCLUDE_DIRS}) 13 | 14 | # load submodules 15 | find_package(Git QUIET) 16 | if(GIT_FOUND AND EXISTS "${PROJECT_SOURCE_DIR}/.git") 17 | # Update submodules as needed 18 | option(GIT_SUBMODULE "Check submodules during build" ON) 19 | if(GIT_SUBMODULE) 20 | message(STATUS "Submodule update") 21 | execute_process(COMMAND ${GIT_EXECUTABLE} submodule update --init --recursive 22 | WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} 23 | RESULT_VARIABLE GIT_SUBMOD_RESULT) 24 | if(NOT GIT_SUBMOD_RESULT EQUAL "0") 25 | message(FATAL_ERROR "git submodule update --init failed with ${GIT_SUBMOD_RESULT}, please checkout submodules") 26 | endif() 27 | endif() 28 | endif() 29 | 30 | if(NOT EXISTS "${PROJECT_SOURCE_DIR}/extern/KaHIP/CMakeLists.txt") 31 | message(FATAL_ERROR "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules and try again.") 32 | endif() 33 | 34 | find_package (OpenMP REQUIRED) 35 | find_library (READLINE NAMES libreadline.a libreadline readline) 36 | if (NOT READLINE) 37 | message (FATAL_ERROR "readline library not found.") 38 | endif() 39 | 40 | if (USE_KAHIP) 41 | add_subdirectory (extern/KaHIP EXCLUDE_FROM_ALL) 42 | endif() 43 | 44 | set (SOURCE_FILES 45 | src/console.cpp 46 | src/fancy_input.cpp 47 | src/permutation.cpp 48 | src/list_graph.cpp 49 | src/file_utility.cpp 50 | src/geo_pos.cpp) 51 | 52 | add_custom_target ( 53 | routingkit 54 | COMMAND make 55 | WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/extern/RoutingKit 56 | ) 57 | 58 | 59 | #add_custom_target (routingkit DEPENDS ${CMAKE_SOURCE_DIR}/extern/RoutingKit/lib/libroutingkit.a ) 60 | 61 | add_executable (console ${SOURCE_FILES}) 62 | add_executable (customize customize.cpp) 63 | add_executable (query query.cpp) 64 | 65 | target_include_directories (customize PRIVATE extern/RoutingKit/include) 66 | target_link_libraries (customize ${CMAKE_SOURCE_DIR}/extern/RoutingKit/lib/libroutingkit.a OpenMP::OpenMP_CXX) 67 | add_dependencies (customize routingkit) 68 | 69 | target_include_directories (query PRIVATE extern/RoutingKit/include) 70 | target_link_libraries (query ${CMAKE_SOURCE_DIR}/extern/RoutingKit/lib/libroutingkit.a OpenMP::OpenMP_CXX) 71 | add_dependencies (query routingkit) 72 | 73 | if (USE_KAHIP) 74 | target_compile_definitions(console PUBLIC USE_KAHIP) 75 | target_link_libraries (console PUBLIC interface_static) 76 | endif() 77 | target_link_libraries(console PUBLIC ${READLINE}) 78 | target_link_libraries(console PUBLIC ${TBB_LIBRARIES}) 79 | if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "Darwin") 80 | target_link_libraries(console PUBLIC tinfo) 81 | endif() 82 | 83 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2019, KIT ITI Algorithmics Group 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # InertialFlowCutter 2 | C++ Implementation and evaluation scripts for the InertialFlowCutter algorithm to compute Customizable Contraction Hierarchy orders. 3 | 4 | If you use this code in a publication, please cite our TR https://arxiv.org/abs/1906.11811 5 | 6 | All code in src/ other than flow_cutter_accelerated.h, ford_fulkerson.h was written by Ben Strasser. We modified separator.h, min_fill_in.h and inertial_flow.h. Find his code at https://github.com/kit-algo/flow-cutter. 7 | 8 | To compile the code you need a recent C++14 ready compiler (e.g. g++ version >= 8.2) and the Intel Threading Building Blocks (TBB) library. 9 | You also need the readline library https://tiswww.case.edu/php/chet/readline/rltop.html. 10 | 11 | We use KaHiP and RoutingKit as submodules. For these, you will also need OpenMP and MPI (for KaHiP, even though we don't use the distributed partitioner). 12 | If you don't want to use them, you can disable them in the CMakeLists.txt. 13 | 14 | 15 | ## Building 16 | 17 | Run 18 | 19 | ```shell 20 | mkdir build && cd build 21 | cmake -DCMAKE_BUILD_TYPE=Release .. 22 | make 23 | ``` 24 | from the top-level directory of this repository. 25 | 26 | ### Building on macOS 27 | If you want to build the RoutingKit query and customization on macOS, additional steps are required. As macOS does not support `aligned_alloc` you need to modify `extern/RoutingKit/generate_make_file` and change line 10 to 28 | ``` 29 | compiler_options = ["-Wall", "-DNDEBUG", "-march=native", "-ffast-math", "-std=c++11", "-O3", "-DROUTING_KIT_NO_ALIGNED_ALLOC"] 30 | ``` 31 | Then, run `generate_make_file` to build a suitable Makefile. 32 | 33 | When running cmake, add the flags `-DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++`. This ensures that gcc is used, as CMake fails to properly autodetect gcc on macOS. 34 | 35 | ## Computing a Contraction Order 36 | 37 | Currently, we expect the input graph in RoutingKit's format, i.e. a directory containing five files `first_out`, `head`, `travel_time`, `latitude`, `longitude` in RoutingKit's binary vector format. 38 | The first two represent the graph in CSR format, the third contains the metric information, the fourth and fifth the geo-coordinates. 39 | You can convert from Metis and Dimacs format using tools in this repository, see evaluation/README.md for further details; or write your own converter. 40 | 41 | Run 42 | ```shell 43 | python3 inertialflowcutter_order.py 44 | ``` 45 | to obtain a CCH order and store it at `` using the default parameters suggested in the paper. 46 | The order is again in the RoutingKit binary vector format. 47 | You can instead get it in text format by uncommenting line 44. 48 | For more parallelism (yes, please), increase the thread_count parameter in line 36. 49 | You can specify the number of cutters in line 32, however more than 8 do not seem particularly useful. 50 | -------------------------------------------------------------------------------- /customize.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | 8 | int main(int argc, char **argv) { 9 | if (argc != 6) { 10 | std::cout << "Usage: " << argv[0] << " first_out head order metric cores" << std::endl; 11 | return 1; 12 | } 13 | 14 | std::string first_out_file = argv[1]; 15 | std::string head_file = argv[2]; 16 | std::string order_file = argv[3]; 17 | std::string weight_file = argv[4]; 18 | int cores = std::stoi(argv[5]); 19 | 20 | std::vector first_out = RoutingKit::load_vector(first_out_file); 21 | std::vector tail = RoutingKit::invert_inverse_vector(first_out); 22 | std::vector head = RoutingKit::load_vector(head_file); 23 | std::vector node_order = RoutingKit::load_vector(order_file); 24 | std::vector weight = RoutingKit::load_vector(weight_file); 25 | 26 | //std::cout << "read input " << first_out.size() << " " << tail.size() << " " << head.size() << " " << node_order.size() << " " << weight.size() << std::endl; 27 | RoutingKit::CustomizableContractionHierarchy cch(node_order, tail, head); 28 | //std::cout << "built CCH" << std::endl; 29 | RoutingKit::CustomizableContractionHierarchyMetric metric(cch, weight); 30 | //std::cout << "built metric" << std::endl; 31 | double time = -RoutingKit::get_micro_time(); 32 | if (cores > 1) { 33 | RoutingKit::CustomizableContractionHierarchyParallelization parallel_custom(cch); 34 | parallel_custom.customize(metric, static_cast(cores)); 35 | } 36 | else { 37 | metric.customize(); 38 | } 39 | time += RoutingKit::get_micro_time(); 40 | std::cout << time << std::endl; 41 | return 0; 42 | } 43 | -------------------------------------------------------------------------------- /evaluation/README.md: -------------------------------------------------------------------------------- 1 | # Running Experiments 2 | 3 | Disclaimer: The evaluation scripts are not particularly clean and contain a lot of duplication because they were hacked together quickly. 4 | 5 | ## General Setup 6 | 1. Every script references an experiment folder. Make sure the path is set appropriately before you run a script. The folder should contain all the necessary files, i.e., graphs, contraction orders, logs for the specific script. Just putting everything in one folder works fine, e.g., this folder. 7 | 8 | 2. Everything related to computing contraction orders is controlled from the one monolithic console binary. Yes, we know. It emulates a terminal that can run a bunch of commands, as specified in console.cpp. 9 | ```shell 10 | ./console interactive 11 | ``` 12 | starts the console in interactive mode. You can list all commands via the help command. Autocompletion works if you install the readline library https://tiswww.case.edu/php/chet/readline/rltop.html. 13 | 14 | In the current state of the evaluation setup, the graphs and orders are expected in RoutingKit's binary vector format. For every graph we expect a directory containing five files `first_out`, `head`, `travel_time`, `latitude`, `longitude` in binary vector format. 15 | The first two represent the graph in CSR format, the third contains the metric information, the fourth and fifth the geo-coordinates. 16 | Get the graphs in Dimacs format, including geo-coordinates, at http://users.diag.uniroma1.it/challenge9/download.shtml. 17 | Convert from Dimacs format to RoutingKit format by starting the console and loading the graph in one format (load_dimacs_graph, load_dimacs_geo_pos) and saving in another (save_routingkit_unweighted_graph, save_routingkit_latitude, save_routingkit_longitude). 18 | The scripts expect the four graphs from the paper in directories named col, cal, europe, usa. 19 | 20 | 3. Get python3. 21 | 22 | ## Metis 23 | For reproducing the experiments, you should get Metis from http://glaros.dtc.umn.edu/gkhome/metis/metis/download and put `ndmetis` and `gpmetis`in this directory. If you don't want to do that, just remove Metis from the list of partitioners, where appropriate. 24 | 25 | ## Top-Level Cut Experiments 26 | 1. Build toplevel cut stats using 27 | ```shell 28 | python3 toplevel_cut_experiments.py 29 | ``` 30 | Only non-existing files will be generated in the format `Graph.Partitioner.cut` for all graphs and partitioners. 31 | 32 | 2. Build pareto experiment table using 33 | ```shell 34 | python3 build_pareto_table.py 35 | ``` 36 | for the specified graph. The latex table will be written to stdout. 37 | If you did not obtain Metis or KaHiP, you should remove them from the partitioners list in both scripts. 38 | 39 | ## Contraction Order Experiments 40 | 41 | 1. Compute contraction orders on the europe graph with all considered partitioners (Metis, Inertial Flow, FlowCutter, KaHiP and InertialFlowCutter) by calling the super-script, which calls the scripts for the different partitioners 42 | ```shell 43 | chmod +x metis_order.sh compute_order_for_all_partitioners.sh 44 | ./compute_orders_for_all_partitioners.sh europe 45 | ``` 46 | This will take a while, especially if you run FlowCutter20/100 and KaHiP. 47 | If you did not get Metis or KaHiP, just remove them from the script. 48 | The orders are stored in RoutingKit's binary vector format, with filenames in the format `Graph.Partitioner.order`, as well as a log with a `.log`suffix. 49 | 50 | 2. Generate random test queries 51 | ```shell 52 | ./../extern/RoutingKit/bin/generate_test_queries 53 | ``` 54 | `` must be `graph.q.s`, e.g. `europe.q.s` for the europe graph. Similarly `europe.q.t` for ``. 55 | 56 | 3. Run customizations and queries: `python3 order_experiments.py` and generate the table from the paper `python3 build_order_table.py order_experiments.csv order_table.tex` 57 | If an order file is missing, the script will issue a warning and continue. You can repeatedly call the script and it will only run configurations that were not run before. If you want to rerun certain configurations, delete the corresponding lines in `order_experiments.csv` 58 | 59 | ## Parameter Study 60 | 61 | 1. Run 62 | ```shell 63 | python3 parameterstudy.py 64 | python3 build_parameterstudy_table.py parameterstudy.csv parameterstudy_table.tex 65 | ``` 66 | 67 | This will take a while since it runs 42 different configurations of InertialFlowCutter. 68 | -------------------------------------------------------------------------------- /evaluation/build_order_table.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import pandas as pd 3 | import re 4 | 5 | def is_number(num): 6 | pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$') 7 | result = pattern.match(num) 8 | if result: 9 | return True 10 | else: 11 | return False 12 | 13 | 14 | df = pd.read_csv(sys.argv[1]) 15 | f = open(sys.argv[2], 'w') 16 | 17 | #Graph/Algo, search space, Arcs in CCH/Triangles/Treewidth, running times order/customization/queries 18 | print( r"""\begin{tabular}{ *{2}{c} *{4}{r} *{3}{r} *{3}{r} } 19 | \toprule 20 | & & \multicolumn{4}{c}{Search Space} & CCH & & Up. & \multicolumn{3}{c}{Running times} \\ 21 | \cmidrule(lr){3-6} \cmidrule(lr){10-12} 22 | & & \multicolumn{2}{c}{Nodes} & \multicolumn{2}{c}{Arcs {[}$\cdot10^{3}${]}} & Arcs & \#Tri. & Tw. & Order & Cust. & Query \\ 23 | \cmidrule(lr){3-4} \cmidrule(lr){5-6} 24 | & & Avg. & Max.& Avg. & Max. & {[}$\cdot10^{6}${]} & {[}$\cdot10^{6}${]} & Bd. & {[}s{]} & {[}ms{]} & {[}$\mu$s{]} 25 | """, file=f) 26 | highlight_min = True 27 | columns = ['partitioner', 28 | 'average_elimination_tree_depth', 'elimination_tree_height', 'average_arcs_in_search_space', 'maximum_arcs_in_search_space', 29 | 'super_graph_upward_arc_count', 'number_of_triangles_in_super_graph', 'upper_tree_width_bound', 30 | 'order_running_time', 'median_customization_time', 'avg_query_time'] 31 | 32 | 33 | df["super_graph_upward_arc_count"] = df["super_graph_upward_arc_count"].map(lambda x : x/100000) 34 | df["number_of_triangles_in_super_graph"] = df["number_of_triangles_in_super_graph"].map(lambda x : x/100000) 35 | df["average_arcs_in_search_space"] = df["average_arcs_in_search_space"].map(lambda x : x/1000) 36 | df["maximum_arcs_in_search_space"] = df["maximum_arcs_in_search_space"].map(lambda x : x/1000) 37 | 38 | for column in columns: 39 | if df[column].dtype == 'float': 40 | df[column] = df[column].map(lambda x : round(x,1)) 41 | 42 | graph_names = {"col" : "Col", "cal" : "Cal", "europe" : "Eur" , "usa" : "USA"} 43 | df["graph"] = df["graph"].map(graph_names) 44 | 45 | #Why did Ben call kahip_v0_71 K0.61? 46 | partitioner_names = {"flowcutter3" : "F3", "flowcutter20" : "F20", "flowcutter100" : "F100", "inertial_flow" : "I", 47 | "metis" : "M", "inertialflowcutter" : "IFC", "kahip_v0_71" : "K0.61", "kahip_v1_00_cut" : "K1.00", "kahip_v2_11" : "K2.11", 48 | "inertialflowcutter4" : "IFC4", "inertialflowcutter8" : "IFC8", "inertialflowcutter12" : "IFC12", "inertialflowcutter16" : "IFC16"} 49 | df["partitioner"] = df["partitioner"].map(partitioner_names) 50 | 51 | 52 | graph = "nonsense" 53 | print(r"\\ ", end='', file=f) 54 | for row in range(len(df)): 55 | new_graph = False 56 | if df.loc[row, "graph"] != graph: 57 | graph = df.loc[row,"graph"] 58 | new_graph = True 59 | gdf = df[df["graph"] == graph].copy() 60 | num_algos = len(gdf.index) 61 | if new_graph: 62 | print(r"\midrule", file=f) 63 | print(r"\multirow{" + str(num_algos) + r"}{*}{\begin{sideways}" + graph + r" \end{sideways}}", end='', file=f) 64 | #for i in range(num_algos): 65 | for column in columns: 66 | print(' & ', end='', file=f) 67 | value = gdf.loc[row, column] 68 | formatted_value = value 69 | 70 | if is_number(str(value)): 71 | comp = re.compile('([0-9]{3}(?=[0-9]))') 72 | formatted_value = re.sub( comp, '\\g<0>,\\\\', str(value)[::-1] )[::-1] 73 | 74 | if column != 'partitioner' and highlight_min and value == gdf[column].min() and column in columns[1:]: 75 | print(r"\bfseries{", formatted_value, r"}", sep='', end='', file=f) 76 | else: 77 | print(formatted_value, end='', file=f) 78 | print('\n', r"\\", sep='', file=f) 79 | #row += 1 80 | print(r"\bottomrule", file=f) 81 | print(r"\end{tabular}", file=f) 82 | f.close() 83 | -------------------------------------------------------------------------------- /evaluation/build_parameterstudy_table.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import re 3 | import pandas as pd 4 | 5 | def is_number(num): 6 | pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$') 7 | result = pattern.match(num) 8 | if result: 9 | return True 10 | else: 11 | return False 12 | 13 | color = "cyan" 14 | def get_color(value, column): 15 | col_min = column.min() 16 | col_max = column.max() 17 | if col_max == col_min: 18 | return f"{color}!{100}" 19 | bucket_size = (col_max - col_min) / 10 20 | bucket = (value - col_min) // bucket_size 21 | bucket = min(bucket, 10) 22 | return f"{color}!{100 - bucket*10}" 23 | 24 | df = pd.read_csv(sys.argv[1]) 25 | 26 | f = open(sys.argv[2], 'w') 27 | 28 | #config, search space, Arcs in CCH/Triangles/Treewidth, running times order/customization/queries 29 | print( r"""\begin{tabular}{ *{4}{c} *{4}{r} *{3}{r} *{3}{r} } 30 | \toprule 31 | & & & & \multicolumn{4}{c}{Search Space} & CCH & & Up. & \multicolumn{3}{c}{Running times} \\ 32 | \cmidrule(lr){5-8} \cmidrule(lr){12-14} 33 | \multicolumn{4}{c}{Configuration} & \multicolumn{2}{c}{Nodes} & \multicolumn{2}{c}{Arcs {[}$\cdot10^{3}${]}} & Arcs & \#Tri. & Tw. & Order & Cust. & Query \\ 34 | \cmidrule(lr){1-4} \cmidrule(lr){5-6} \cmidrule(lr){7-8} 35 | $\alpha$ & $\delta$ & $\gamma_a$ & $\gamma_o$ & Avg. & Max.& Avg. & Max. & {[}$\cdot10^{6}${]} & {[}$\cdot10^{6}${]} & Bd. & {[}s{]} & {[}ms{]} & {[}$\mu$s{]}\\ 36 | \midrule 37 | """, file=f) 38 | highlight_min = True 39 | heatmap = True 40 | columns = ['initial_assimilated_fraction', 'bulk_step_fraction', 'bulk_assimilation_threshold', 'bulk_assimilation_order_threshold', 41 | 'average_elimination_tree_depth', 'elimination_tree_height', 'average_arcs_in_search_space', 'maximum_arcs_in_search_space', 42 | 'super_graph_upward_arc_count', 'number_of_triangles_in_super_graph', 'upper_tree_width_bound', 43 | 'order_running_time', 'median_customization_time', 'avg_query_time'] 44 | 45 | df["super_graph_upward_arc_count"] = df["super_graph_upward_arc_count"].map(lambda x : x/100000) 46 | df["number_of_triangles_in_super_graph"] = df["number_of_triangles_in_super_graph"].map(lambda x : x/100000) 47 | df["average_arcs_in_search_space"] = df["average_arcs_in_search_space"].map(lambda x : x/1000) 48 | df["maximum_arcs_in_search_space"] = df["maximum_arcs_in_search_space"].map(lambda x : x/1000) 49 | 50 | for column in columns[4:]: 51 | if df[column].dtype == 'float': 52 | df[column] = df[column].map(lambda x : round(x,1)) 53 | 54 | for row in range(0, len(df.index)): 55 | for column in columns: 56 | if column != columns[0]: 57 | print(' & ', end='', file=f) 58 | value = df.loc[row, column] 59 | 60 | formatted_value = value 61 | if column == "order_running_time" or column == "median_customization_time": 62 | formatted_value = int(round(value,0)) 63 | if is_number(str(value)): 64 | comp = re.compile('([0-9]{3}(?=[0-9]))') 65 | formatted_value = re.sub( comp, '\\g<0>,\\\\', str(formatted_value)[::-1] )[::-1] 66 | 67 | if heatmap and column in columns[4:]: 68 | print(r"\cellcolor{", get_color(value, df[column]), r"}", sep='', end='', file=f) 69 | if highlight_min and value == df[column].min() and column in columns[4:]: 70 | print(r"\bfseries{", formatted_value, r"}", sep='', end='', file=f) 71 | else: 72 | print(formatted_value, end='', file=f) 73 | print(r"\\", file=f) 74 | 75 | print(r"\bottomrule", file=f) 76 | print(r"\end{tabular}", file=f) 77 | -------------------------------------------------------------------------------- /evaluation/build_pareto_table.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import pandas as pd 3 | import re 4 | import sys 5 | from io import StringIO 6 | from itertools import * 7 | 8 | experiments_folder = "" 9 | #experiments_folder = "./" 10 | 11 | def output_file(G, P): 12 | return experiments_folder + G + "." + P + ".cut" 13 | 14 | def format_bool(value): 15 | if value: 16 | return r"$\bullet$" 17 | else: 18 | return r"$\circ$" 19 | 20 | G = sys.argv[1] 21 | partitioners = ["inertialflowcutter4", "inertialflowcutter8", "inertialflowcutter12", "flowcutter3", "flowcutter20", "kahip_v2_11", "metis", "inertial_flow"] 22 | imbalances = [0.0, 0.01, 0.03, 0.05, 0.1, 0.2, 0.3, 0.5, 0.7, 0.9] 23 | partitioner_names = {"flowcutter3" : "F3", "flowcutter20" : "F20", "flowcutter100" : "F100", "inertial_flow" : "I", 24 | "metis" : "M", "inertialflowcutter" : "IFC", "kahip_v0_71" : "K0.61", "kahip_v1_00_cut" : "K1.00", 25 | "kahip_v2_11" : "K2.11", 26 | "inertialflowcutter4" : "IFC4", "inertialflowcutter8" : "IFC8", "inertialflowcutter12" : "IFC12"} 27 | data = {P: pd.read_csv(output_file(G, P)) for P in partitioners} 28 | 29 | def is_number(num): 30 | pattern = re.compile(r'^[-+]?[-0-9]\d*\.\d*|[-+]?\.?[0-9]\d*$') 31 | result = pattern.match(num) 32 | if result: 33 | return True 34 | else: 35 | return False 36 | 37 | def make_header(heading1, heading2): 38 | def midrule(start, end): 39 | return "\cmidrule(lr){{{}-{}}}".format(start, end) 40 | span = len(partitioners) 41 | header = f"\multirow{{2}}{{*}}{{\\rotatebox[origin=c]{{90}}{{$\max\epsilon$}}}} & \multicolumn{{{span}}}{{c}}{{{heading1}}} & \multicolumn{{{span}}}{{c}}{{{heading2}}}\\\\" + "\n" 42 | header += midrule(2, 2 + span - 1) + " " + midrule(2 + span, 2 + span + span - 1) + "\n" 43 | for P in partitioners: 44 | header += f"& {partitioner_names[P]}" 45 | for P in partitioners: 46 | header += f"& {partitioner_names[P]}" 47 | header += "\\\\\n" 48 | header += r"\midrule" 49 | return header 50 | 51 | def table_content(key1, format1, key2, format2): 52 | 53 | 54 | output = StringIO() 55 | for eps in imbalances: 56 | unbalanced = set() 57 | output.write(f"{int(eps * 100)}") 58 | for i,P in enumerate(partitioners): 59 | value = data[P][key1][imbalances.index(eps)] 60 | if key1 == "achieved_epsilon" and value > eps: 61 | #output.write("& " + r"\textcolor{red}{\cancel{" + f"{100*value:.3f}" + r"}}") 62 | output.write("& " + r"\textcolor{red}{\cancel{" + f"{format1(value)}" + r"}}") 63 | unbalanced.add(i) 64 | else: 65 | output.write(f"& {format1(value)}") 66 | for i,P in enumerate(partitioners): 67 | value = data[P][key2][imbalances.index(eps)] 68 | if i in unbalanced: 69 | output.write("& " + r"\textcolor{red}{\cancel{" + f"{format2(value)}" + r"}}") 70 | else: 71 | output.write(f"& {format2(value)}") 72 | output.write('\\\\\n') 73 | return output.getvalue() 74 | 75 | col_str = r"R{\mycolwidth}" * len(partitioners) * 2 76 | 77 | print( 78 | r""" 79 | \newcolumntype{R}[1]{>{\raggedleft\arraybackslash}p{#1}} 80 | \setlength\arraycolsep{50pt} 81 | \setlength\tabcolsep{2pt} 82 | 83 | \setlength\tabcolsep{3pt} 84 | \setlength\mycolwidth{0.74cm} 85 | \setlength\mysmallcolwidth{0.8cm} 86 | 87 | \begin{tabular}{r""", end='') 88 | print(col_str, end='') 89 | print("}\n\\toprule") 90 | print(make_header(r"Achieved $\epsilon$ [\%]", r"Cut Size")) 91 | print(table_content("achieved_epsilon", lambda x: r"${<0.1}$" if round(100*x,1) == 0.0 and x != 0.0 else f"{100*x:.1f}", "cut_size", lambda x: x)) 92 | print(r"\midrule") 93 | print(make_header(r"Are sides connected?", r"Running Time [s]")) 94 | print(table_content("connected", format_bool, "running_time", lambda x: f"{x:.1f}")) 95 | print(r"\bottomrule") 96 | print(r"\end{tabular}") 97 | -------------------------------------------------------------------------------- /evaluation/build_scalability_table.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import pandas as pd 3 | import re 4 | 5 | def format_running_time(r): 6 | r = r.replace("running time : ", "") 7 | r = r.replace("musec", "") 8 | return round(int(r) / 1000000, 1) 9 | 10 | graph_names = {"col" : "Col", "cal" : "Cal", "europe" : "Eur" , "usa" : "USA"} 11 | df = pd.read_csv(sys.argv[1]) 12 | df["running_time_sec"] = df["running_time_musec"].map(format_running_time) 13 | df["graph"] = df["graph"].map(graph_names) 14 | 15 | 16 | print(r"\begin{tabular}{ll *{5}{r}}") 17 | print(r"\toprule") 18 | 19 | print(r"\multirow{2}{*}{Graph} & & \multicolumn{5}{c}{Cores} \\") 20 | print(r"\cmidrule(lr){3-7}") 21 | print(r" & & ",end='') 22 | print(" & ".join(map(str,[1,2,4,8,16]))) 23 | print(r"\\") 24 | 25 | 26 | 27 | for G in ["Col","Cal","Eur","USA"]: 28 | print(r"\midrule") 29 | print(r"\multirow{2}{*}{" + G + r"}", end='') 30 | print(" & Time [s]", end='') 31 | for T in [1,2,4,8,16]: 32 | time = float(df[(df.graph==G) & (df.cores==T)].running_time_sec) 33 | print(" &", time, end='') #slow but who cares 34 | print(r"\\") 35 | seq = time = float(df[(df.graph==G) & (df.cores==1)].running_time_sec) 36 | print(" & Speedup", end='') 37 | for T in [1,2,4,8,16]: 38 | time = float(df[(df.graph==G) & (df.cores==T)].running_time_sec) 39 | print(" &", round(seq / time, 1), end='') 40 | print(r"\\") 41 | print(r"\bottomrule") 42 | print(r"\end{tabular}") 43 | -------------------------------------------------------------------------------- /evaluation/compute_orders_for_all_partitioners.sh: -------------------------------------------------------------------------------- 1 | G=$1 2 | ./metis_order.sh $G >> order_running_time.csv 3 | for cutters in 4 8 12 16; do python3 inertialflowcutter_order.py $G $cutters >> order_running_time.csv; done 4 | python3 inertialflow_order.py $G >> order_running_time.csv 5 | for cutters in 3 20 100; do python3 flowcutter_order.py $G $cutters >> order_running_time.csv; done 6 | python3 kahip_order.py $G >> order_running_time.csv 7 | -------------------------------------------------------------------------------- /evaluation/configurable_inertialflowcutter_order.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | def save_inertialflowcutter_cch_order(config, console, graph_path, order_path, info_log): 4 | args = [console] 5 | 6 | args.append("load_routingkit_unweighted_graph") 7 | args.append(graph_path + "first_out") 8 | args.append(graph_path + "head") 9 | 10 | args.append("load_routingkit_longitude") 11 | args.append(graph_path + "longitude") 12 | args.append("load_routingkit_latitude") 13 | args.append(graph_path + "latitude") 14 | 15 | args.append("add_back_arcs") 16 | args.append("remove_multi_arcs") 17 | args.append("remove_loops") 18 | 19 | args.append("flow_cutter_set") 20 | args.append("random_seed") 21 | args.append("5489") 22 | args.append("reorder_nodes_at_random") 23 | args.append("reorder_nodes_in_preorder") 24 | args.append("sort_arcs") 25 | 26 | args.append("flow_cutter_set") 27 | args.append("max_cut_size") 28 | args.append("100000000") 29 | 30 | args.append("flow_cutter_set") 31 | args.append("distance_ordering_cutter_count") 32 | args.append(str(config.hop_distance_cutters)) 33 | 34 | args.append("flow_cutter_set") 35 | args.append("geo_pos_ordering_cutter_count") 36 | args.append(str(config.geo_distance_cutters)) 37 | 38 | args.append("flow_cutter_set") 39 | args.append("bulk_assimilation_threshold") 40 | args.append(str(config.bulk_assimilation_threshold)) 41 | 42 | args.append("flow_cutter_set") 43 | args.append("bulk_assimilation_order_threshold") 44 | args.append(str(config.bulk_assimilation_order_threshold)) 45 | 46 | args.append("flow_cutter_set") 47 | args.append("bulk_step_fraction") 48 | args.append(str(config.bulk_step_fraction)) 49 | 50 | args.append("flow_cutter_set") 51 | args.append("initial_assimilated_fraction") 52 | args.append(str(config.initial_assimilated_fraction)) 53 | 54 | args.append("report_time") 55 | args.append("reorder_nodes_in_accelerated_flow_cutter_cch_order") 56 | args.append("do_not_report_time") 57 | args.append("examine_chordal_supergraph") 58 | 59 | #args.append("save_permutation_of_nodes_since_last_file_load") 60 | args.append("save_routingkit_node_permutation_since_last_load") 61 | args.append(order_path) 62 | with open(info_log, 'w') as f: 63 | subprocess.run(args, universal_newlines=True, stdout=f) 64 | -------------------------------------------------------------------------------- /evaluation/flowcutter_cut.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import io 3 | import pandas as pd 4 | 5 | def flowcutter_pareto(console, graph_path, cutters): 6 | args = [console] 7 | 8 | args.append("load_routingkit_unweighted_graph") 9 | args.append(graph_path + "first_out") 10 | args.append(graph_path + "head") 11 | 12 | args.append("add_back_arcs") 13 | args.append("remove_multi_arcs") 14 | args.append("remove_loops") 15 | 16 | args.append("flow_cutter_set") 17 | args.append("random_seed") 18 | args.append("5489") 19 | args.append("reorder_nodes_at_random") 20 | args.append("reorder_nodes_in_preorder") 21 | args.append("sort_arcs") 22 | 23 | args.append("flow_cutter_set") 24 | args.append("ReportCuts") 25 | args.append("no") 26 | 27 | args.append("flow_cutter_set") 28 | args.append("cutter_count") 29 | args.append(str(cutters)) 30 | 31 | args.append("flow_cutter_enum_cuts") 32 | args.append("-") 33 | 34 | 35 | output = subprocess.check_output(args, universal_newlines=True) 36 | rename = {' time' : 'time'} 37 | return pd.read_csv(io.StringIO(output)).rename(rename, axis='columns') 38 | 39 | 40 | -------------------------------------------------------------------------------- /evaluation/flowcutter_order.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import re 3 | import sys 4 | 5 | experiments_folder = "" 6 | 7 | binary_path = "./../build/" 8 | console = binary_path + "console" 9 | 10 | def graph_path(G): 11 | return experiments_folder + G + "/" 12 | 13 | def order_path(G,ncutters): 14 | return experiments_folder + G + ".flowcutter" + str(ncutters) + ".order" 15 | 16 | def log_path(G,ncutters): 17 | return order_path(G,ncutters) + ".log" 18 | 19 | def save_flowcutter_cch_order(console, ncutters, graph_path, order_path, info_log): 20 | args = [console] 21 | 22 | args.append("load_routingkit_unweighted_graph") 23 | args.append(graph_path + "first_out") 24 | args.append(graph_path + "head") 25 | 26 | args.append("add_back_arcs") 27 | args.append("remove_multi_arcs") 28 | args.append("remove_loops") 29 | 30 | args.append("flow_cutter_set") 31 | args.append("random_seed") 32 | args.append("5489") 33 | 34 | args.append("reorder_nodes_at_random") 35 | args.append("reorder_nodes_in_preorder") 36 | args.append("sort_arcs") 37 | 38 | args.append("flow_cutter_set") 39 | args.append("cutter_count") 40 | args.append(str(ncutters)) 41 | 42 | args.append("report_time") 43 | args.append("reorder_nodes_in_flow_cutter_cch_order") 44 | args.append("do_not_report_time") 45 | args.append("examine_chordal_supergraph") 46 | 47 | #args.append("save_permutation_of_nodes_since_last_file_load") 48 | args.append("save_routingkit_node_permutation_since_last_load") 49 | args.append(order_path) 50 | 51 | with open(info_log, 'w') as f: 52 | subprocess.run(args, universal_newlines=True, stdout=f) 53 | 54 | 55 | def parse_order_log(log_path): 56 | log = open(log_path) 57 | row_dict = dict() 58 | for l in log: 59 | m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l) 60 | assert(m) 61 | name = m.group(1).replace(" ", "_") 62 | value = m.group(2) 63 | if '.' in value: 64 | value = float(value) 65 | else: 66 | value = int(value) 67 | if "running_time" in name: 68 | name = "order_running_time" 69 | value /= 1000000 #in seconds 70 | row_dict[name] = value 71 | log.close() 72 | return row_dict 73 | 74 | def main(): 75 | G = sys.argv[1] 76 | q = int(sys.argv[2]) 77 | P = "flowcutter" + str(q) 78 | save_flowcutter_cch_order(console, q, graph_path(G), order_path(G,q), log_path(G,q)) 79 | order_time = parse_order_log(log_path(G,q))["order_running_time"] 80 | print(P, G, round(order_time, 3), sep=',') 81 | 82 | if __name__ == '__main__': 83 | main() 84 | -------------------------------------------------------------------------------- /evaluation/inertialflow_cut.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import re 3 | 4 | def inertialflow_cut(console, graph_path, balance): 5 | args = [console] 6 | 7 | args.append("load_routingkit_unweighted_graph") 8 | args.append(graph_path + "first_out") 9 | args.append(graph_path + "head") 10 | 11 | args.append("load_routingkit_longitude") 12 | args.append(graph_path + "longitude") 13 | args.append("load_routingkit_latitude") 14 | args.append(graph_path + "latitude") 15 | 16 | args.append("add_back_arcs") 17 | args.append("remove_multi_arcs") 18 | args.append("remove_loops") 19 | 20 | args.append("flow_cutter_set") 21 | args.append("random_seed") 22 | args.append("5489") 23 | args.append("reorder_nodes_at_random") 24 | args.append("reorder_nodes_in_preorder") 25 | args.append("sort_arcs") 26 | 27 | args.append("report_time") 28 | args.append("inertial_flow_cut") 29 | args.append(str(balance)) 30 | args.append("do_not_report_time") 31 | args.append("examine_node_color_cut") 32 | 33 | output = subprocess.check_output(args, universal_newlines=True) 34 | row_dict = {} 35 | for l in output.splitlines(): 36 | m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l) 37 | assert(m) 38 | name = m.group(1).replace(" ", "_") 39 | value = m.group(2) 40 | if '.' in value: 41 | value = float(value) 42 | else: 43 | value = int(value) 44 | if "running_time" in name: 45 | value /= 1000000 #in seconds 46 | row_dict[name] = value 47 | return row_dict 48 | -------------------------------------------------------------------------------- /evaluation/inertialflow_order.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import re 3 | import sys 4 | 5 | experiments_folder = "" 6 | 7 | binary_path = "./../build/" 8 | console = binary_path + "console" 9 | 10 | def graph_path(G): 11 | return experiments_folder + G + "/" 12 | 13 | def order_path(G): 14 | return experiments_folder + G + ".inertial_flow.order" 15 | 16 | def log_path(G): 17 | return order_path(G) + ".log" 18 | 19 | def save_inertial_flow_cch_order(console, graph_path, order_path, info_log): 20 | args = [console] 21 | 22 | args.append("load_routingkit_unweighted_graph") 23 | args.append(graph_path + "first_out") 24 | args.append(graph_path + "head") 25 | 26 | args.append("load_routingkit_longitude") 27 | args.append(graph_path + "longitude") 28 | args.append("load_routingkit_latitude") 29 | args.append(graph_path + "latitude") 30 | 31 | args.append("flow_cutter_set") 32 | args.append("random_seed") 33 | args.append("5489") 34 | 35 | args.append("add_back_arcs") 36 | args.append("remove_multi_arcs") 37 | args.append("remove_loops") 38 | 39 | args.append("reorder_nodes_at_random") 40 | args.append("reorder_nodes_in_preorder") 41 | args.append("sort_arcs") 42 | 43 | args.append("report_time") 44 | args.append("reorder_nodes_in_inertial_flow_ford_fulkerson_nested_dissection_order") 45 | args.append("0.2") 46 | args.append("do_not_report_time") 47 | args.append("examine_chordal_supergraph") 48 | 49 | args.append("save_routingkit_node_permutation_since_last_load") 50 | args.append(order_path) 51 | 52 | with open(info_log, 'w') as f: 53 | subprocess.run(args, universal_newlines=True, stdout=f) 54 | 55 | 56 | def parse_order_log(log_path): 57 | log = open(log_path) 58 | row_dict = dict() 59 | for l in log: 60 | m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l) 61 | assert(m) 62 | name = m.group(1).replace(" ", "_") 63 | value = m.group(2) 64 | if '.' in value: 65 | value = float(value) 66 | else: 67 | value = int(value) 68 | if "running_time" in name: 69 | name = "order_running_time" 70 | value /= 1000000 #in seconds 71 | row_dict[name] = value 72 | log.close() 73 | return row_dict 74 | 75 | def main(): 76 | G = sys.argv[1] 77 | P = "inertial_flow" 78 | save_inertial_flow_cch_order(console, graph_path(G), order_path(G), log_path(G)) 79 | order_time = parse_order_log(log_path(G))["order_running_time"] 80 | print(P, G, round(order_time, 3), sep=',') 81 | 82 | if __name__ == '__main__': 83 | main() 84 | -------------------------------------------------------------------------------- /evaluation/inertialflowcutter_cut.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import io 3 | import pandas as pd 4 | 5 | def inertialflowcutter_pareto(console, graph_path, cutters): 6 | args = [console] 7 | 8 | args.append("load_routingkit_unweighted_graph") 9 | args.append(graph_path + "first_out") 10 | args.append(graph_path + "head") 11 | 12 | args.append("load_routingkit_longitude") 13 | args.append(graph_path + "longitude") 14 | args.append("load_routingkit_latitude") 15 | args.append(graph_path + "latitude") 16 | 17 | args.append("add_back_arcs") 18 | args.append("remove_multi_arcs") 19 | args.append("remove_loops") 20 | 21 | args.append("flow_cutter_set") 22 | args.append("random_seed") 23 | args.append("5489") 24 | args.append("reorder_nodes_at_random") 25 | args.append("reorder_nodes_in_preorder") 26 | args.append("sort_arcs") 27 | 28 | args.append("flow_cutter_set") 29 | args.append("geo_pos_ordering_cutter_count") 30 | args.append(str(cutters)) 31 | 32 | args.append("flow_cutter_set") 33 | args.append("ReportCuts") 34 | args.append("no") 35 | 36 | args.append("flow_cutter_accelerated_enum_cuts") 37 | args.append("-") 38 | 39 | 40 | 41 | output = subprocess.check_output(args, universal_newlines=True) 42 | rename = {' time' : 'time'} 43 | return pd.read_csv(io.StringIO(output)).rename(rename, axis='columns') 44 | -------------------------------------------------------------------------------- /evaluation/inertialflowcutter_order.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import subprocess 4 | import sys 5 | import re 6 | 7 | experiments_folder = "" 8 | 9 | binary_path = "./../build/" 10 | console = binary_path + "console" 11 | 12 | def order_path(G,P): 13 | return experiments_folder + G + "." + P + ".order" 14 | 15 | def graph_path(G): 16 | return experiments_folder + G + "/" 17 | 18 | def log_path(G,P): 19 | return order_path(G,P) + ".log" 20 | 21 | def parse_order_log(G,P): 22 | log = open(log_path(G,P)) 23 | row_dict = dict() 24 | for l in log: 25 | m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l) 26 | assert(m) 27 | name = m.group(1).replace(" ", "_") 28 | value = m.group(2) 29 | if '.' in value: 30 | value = float(value) 31 | else: 32 | value = int(value) 33 | if "running_time" in name: 34 | name = "order_running_time" 35 | value /= 1000000 #in seconds 36 | row_dict[name] = value 37 | log.close() 38 | return row_dict 39 | 40 | def save_inertialflowcutter_cch_order(cutters, G): 41 | P = "inertialflowcutter" + str(cutters) 42 | 43 | args = [console] 44 | args.append("load_routingkit_unweighted_graph") 45 | args.append(graph_path(G) + "first_out") 46 | args.append(graph_path(G) + "head") 47 | 48 | args.append("load_routingkit_longitude") 49 | args.append(graph_path(G) + "longitude") 50 | args.append("load_routingkit_latitude") 51 | args.append(graph_path(G) + "latitude") 52 | 53 | args.append("add_back_arcs") 54 | args.append("remove_multi_arcs") 55 | args.append("remove_loops") 56 | 57 | args.append("flow_cutter_set") 58 | args.append("random_seed") 59 | args.append("5489") 60 | args.append("reorder_nodes_at_random") 61 | args.append("reorder_nodes_in_preorder") 62 | args.append("sort_arcs") 63 | 64 | args.append("flow_cutter_set") 65 | args.append("geo_pos_ordering_cutter_count") 66 | args.append(str(cutters)) 67 | 68 | args.append("report_time") 69 | args.append("reorder_nodes_in_accelerated_flow_cutter_cch_order") 70 | args.append("do_not_report_time") 71 | args.append("examine_chordal_supergraph") 72 | 73 | args.append("save_routingkit_node_permutation_since_last_load") 74 | args.append(order_path(G,P)) 75 | with open(log_path(G,P), 'w') as f: 76 | subprocess.run(args, universal_newlines=True, stdout=f) 77 | 78 | 79 | def main(): 80 | G = sys.argv[1] 81 | cutters = int(sys.argv[2]) 82 | P = "inertialflowcutter" + str(cutters) 83 | save_inertialflowcutter_cch_order(cutters, G) 84 | order_time = parse_order_log(G,P)["order_running_time"] 85 | print(P, G, round(order_time, 3), sep=',') 86 | 87 | if __name__ == '__main__': 88 | main() 89 | -------------------------------------------------------------------------------- /evaluation/kahip_cut.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import subprocess 4 | import os, re 5 | 6 | def kahip_cut(console, graph_path, balance): 7 | args = [console] 8 | 9 | args.append("load_routingkit_unweighted_graph") 10 | args.append(graph_path + "first_out") 11 | args.append(graph_path + "head") 12 | 13 | args.append("load_routingkit_longitude") 14 | args.append(graph_path + "longitude") 15 | args.append("load_routingkit_latitude") 16 | args.append(graph_path + "latitude") 17 | 18 | args.append("add_back_arcs") 19 | args.append("remove_multi_arcs") 20 | args.append("remove_loops") 21 | 22 | args.append("flow_cutter_set") 23 | args.append("random_seed") 24 | args.append("5489") 25 | args.append("reorder_nodes_at_random") 26 | args.append("reorder_nodes_in_preorder") 27 | args.append("sort_arcs") 28 | 29 | args.append("report_time") 30 | args.append("kahip_cut") 31 | args.append(str(balance)) 32 | args.append("do_not_report_time") 33 | args.append("examine_node_color_cut") 34 | 35 | output = subprocess.check_output(args, universal_newlines=True) 36 | row_dict = {} 37 | for l in output.splitlines(): 38 | m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l) 39 | assert(m) 40 | name = m.group(1).replace(" ", "_") 41 | value = m.group(2) 42 | if '.' in value: 43 | value = float(value) 44 | else: 45 | value = int(value) 46 | if "running_time" in name: 47 | value /= 1000000 #in seconds 48 | row_dict[name] = value 49 | return row_dict 50 | -------------------------------------------------------------------------------- /evaluation/kahip_order.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import re 3 | import sys 4 | 5 | experiments_folder = "" 6 | 7 | binary_path = "./../build/" 8 | console = binary_path + "console" 9 | 10 | def graph_path(G): 11 | return experiments_folder + G + "/" 12 | 13 | def order_path(G): 14 | return experiments_folder + G + ".kahip_v2_11.order" 15 | 16 | def log_path(G): 17 | return order_path(G) + ".log" 18 | 19 | def save_kahip_cch_order(console, graph_path, order_path, info_log): 20 | args = [console] 21 | 22 | args.append("load_routingkit_unweighted_graph") 23 | args.append(graph_path + "first_out") 24 | args.append(graph_path + "head") 25 | 26 | args.append("add_back_arcs") 27 | args.append("remove_multi_arcs") 28 | args.append("remove_loops") 29 | args.append("sort_arcs") 30 | 31 | args.append("flow_cutter_set") 32 | args.append("random_seed") 33 | args.append("5489") 34 | 35 | args.append("report_time") 36 | args.append("reorder_nodes_in_kahip_nested_dissection_order") 37 | args.append("0.2") #maximum imbalance 38 | args.append("do_not_report_time") 39 | args.append("examine_chordal_supergraph") 40 | 41 | args.append("save_routingkit_node_permutation_since_last_load") 42 | args.append(order_path) 43 | 44 | with open(info_log, 'w') as f: 45 | subprocess.run(args, universal_newlines=True, stdout=f) 46 | 47 | def parse_order_log(log_path): 48 | log = open(log_path) 49 | row_dict = dict() 50 | for l in log: 51 | m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l) 52 | assert(m) 53 | name = m.group(1).replace(" ", "_") 54 | value = m.group(2) 55 | if '.' in value: 56 | value = float(value) 57 | else: 58 | value = int(value) 59 | if "running_time" in name: 60 | name = "order_running_time" 61 | value /= 1000000 #in seconds 62 | row_dict[name] = value 63 | log.close() 64 | return row_dict 65 | 66 | def main(): 67 | P="kahip_v2_11" 68 | G = sys.argv[1] 69 | save_kahip_cch_order(console, graph_path(G), order_path(G), log_path(G)) 70 | order_time = parse_order_log(log_path(G))["order_running_time"] 71 | print(P, G, round(order_time, 3), sep=',') 72 | 73 | if __name__ == '__main__': 74 | main() 75 | -------------------------------------------------------------------------------- /evaluation/metis_cut.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import re, os 3 | 4 | def graph_to_metis(console, graph_path, out_path): 5 | args = [console] 6 | 7 | args.append("load_routingkit_unweighted_graph") 8 | args.append(graph_path + "first_out") 9 | args.append(graph_path + "head") 10 | 11 | args.append("load_routingkit_longitude") 12 | args.append(graph_path + "longitude") 13 | args.append("load_routingkit_latitude") 14 | args.append(graph_path + "latitude") 15 | 16 | args.append("assign_constant_arc_weights") 17 | args.append("1") 18 | 19 | args.append("add_back_arcs") 20 | args.append("remove_multi_arcs") 21 | args.append("remove_loops") 22 | 23 | args.append("flow_cutter_set") 24 | args.append("random_seed") 25 | args.append("5489") 26 | args.append("reorder_nodes_at_random") 27 | args.append("reorder_nodes_in_preorder") 28 | args.append("sort_arcs") 29 | 30 | args.append("save_metis_graph") 31 | args.append(out_path) 32 | subprocess.run(args) 33 | 34 | def examine_cut(console, graph_path, partition): 35 | args = [console] 36 | args.append("load_metis_graph") 37 | args.append(graph_path) 38 | args.append("load_node_color_partition") 39 | args.append(partition) 40 | args.append("examine_node_color_cut") 41 | 42 | output = subprocess.check_output(args, universal_newlines=True) 43 | row_dict = {} 44 | for l in output.splitlines(): 45 | m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l) 46 | assert(m) 47 | name = m.group(1).replace(" ", "_") 48 | value = m.group(2) 49 | if '.' in value: 50 | value = float(value) 51 | else: 52 | value = int(value) 53 | row_dict[name] = value 54 | return row_dict 55 | 56 | def metis_cut(metis, console, graph_path, epsilon): 57 | graph_to_metis(console, graph_path, "tmp.graph") 58 | args = [metis] 59 | args.append("tmp.graph") 60 | args.append("2") 61 | if epsilon == 0: 62 | epsilon = 0.001 63 | args.append(f"-ufactor={int(epsilon * 1000)}") 64 | output = subprocess.check_output(args, universal_newlines=True) 65 | row_dict = {} 66 | for l in output.splitlines(): 67 | if l.startswith(" Partitioning:"): 68 | print(l) 69 | m = re.search(r"([0-9.]+) sec", l) 70 | if m: 71 | row_dict["running_time"] = float(m.group(1)) 72 | metrics = examine_cut(console, "tmp.graph", "tmp.graph.part.2") 73 | row_dict.update(metrics) 74 | os.remove("tmp.graph.part.2") 75 | os.remove("tmp.graph") 76 | return row_dict 77 | -------------------------------------------------------------------------------- /evaluation/metis_order.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | G=$1 3 | echo -n metis,$G, 4 | ./../build/console load_routingkit_unweighted_graph $G/first_out $G/head add_back_arcs remove_loops remove_multi_arcs assign_constant_arc_weights 1 save_metis_graph tmp 5 | ./ndmetis tmp | grep "Ordering" | sed -E 's/^[ \t]*Ordering:[ \t]+([0-9.]+)[ \t]+sec[ \t]+\(METIS time\)[ \t]*$/\1/' 6 | ./../build/console load_routingkit_unweighted_graph $G/first_out $G/head permutate_nodes tmp.iperm save_routingkit_node_permutation_since_last_load $G.metis.order 7 | rm tmp tmp.iperm 8 | 9 | -------------------------------------------------------------------------------- /evaluation/order_experiments.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | import re 4 | import subprocess 5 | import os 6 | 7 | experiments_folder = "" 8 | 9 | graphs = ["col", "cal", "europe", "usa"] 10 | partitioners = ["metis", "kahip_v0_71", "kahip_v1_00_cut", "kahip_v2_11", "inertial_flow", "flowcutter3", "flowcutter20", "flowcutter100","inertialflowcutter4", "inertialflowcutter8", "inertialflowcutter12", "inertialflowcutter16"] 11 | 12 | binary_path = "./../build/" 13 | console = binary_path + "console" 14 | customization_binary = binary_path + "customize" 15 | query_binary = binary_path + "query" 16 | 17 | def config_contained(G, P, results): 18 | cpd = pd.DataFrame.from_dict({ 19 | 'graph' : [G], 20 | 'partitioner' : [P], 21 | }) 22 | return len(cpd.merge(results)) > 0 23 | 24 | def partitioner_id(P): 25 | return next(i for i,v in enumerate(partitioners) if v == P) 26 | 27 | def graph_id(G): 28 | return next(i for i,v in enumerate(graphs) if v == G) 29 | 30 | def order_path(G,P): 31 | return experiments_folder + G + "." + P + ".order" 32 | 33 | def graph_path(G): 34 | return experiments_folder + G + "/" 35 | 36 | def metric_file(G): 37 | return graph_path(G) + "travel_time" 38 | 39 | def query_file(G): 40 | return experiments_folder + G + ".q." 41 | 42 | def parse_order_log(G,P): 43 | args = [console] 44 | args.append("load_routingkit_unweighted_graph") 45 | 46 | args.append(graph_path(G) + "first_out") 47 | args.append(graph_path(G) + "head") 48 | args.append("add_back_arcs") 49 | args.append("remove_multi_arcs") 50 | args.append("remove_loops") 51 | 52 | args.append("permutate_nodes_routingkit") 53 | args.append(order_path(G,P)) 54 | 55 | args.append("examine_chordal_supergraph") 56 | log = subprocess.check_output(args, universal_newlines=True) 57 | print(log) 58 | row_dict = dict() 59 | for l in log.splitlines(): 60 | if l == "": 61 | continue 62 | m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l) 63 | assert(m) 64 | name = m.group(1).replace(" ", "_") 65 | value = m.group(2) 66 | if '.' in value: 67 | value = float(value) 68 | else: 69 | value = int(value) 70 | row_dict[name] = value 71 | return row_dict 72 | 73 | def run_customizations(G,P): 74 | q = query_file(G) 75 | g = graph_path(G) 76 | args = [customization_binary, g + "first_out", g + "head", order_path(G,P), metric_file(G), str(1)] 77 | for x in args: 78 | print(x, end=' ') 79 | print() 80 | runtimes = [] 81 | for i in range(9): 82 | t = subprocess.check_output(args, universal_newlines=True) 83 | runtimes.append(float(t) / 1000) #in ms 84 | print(t.strip()) 85 | return np.median(np.array(runtimes)) 86 | 87 | def run_queries(G,P): 88 | q = query_file(G) 89 | g = graph_path(G) 90 | args = [query_binary, g + "first_out", g + "head", order_path(G,P), metric_file(G), q + "s", q + "t"] 91 | for x in args: 92 | print(x, end=' ') 93 | print() 94 | t = subprocess.check_output(args, universal_newlines=True) 95 | print(t.strip()) 96 | return float(t) 97 | 98 | def main(): 99 | 100 | order_times = pd.read_csv(experiments_folder + "order_running_time.csv") 101 | 102 | if not os.path.isfile(experiments_folder + "order_experiments.csv"): #Create nonsensical file 103 | f = open(experiments_folder + "order_experiments.csv", 'w') 104 | f.write("graph,partitioner\n") #Could be anything in csv 105 | f.close() 106 | results = pd.read_csv(experiments_folder + "order_experiments.csv") 107 | 108 | for G in graphs: 109 | for P in partitioners: 110 | if not os.path.isfile(order_path(G,P)): 111 | print("Warning: order for partitioner", P, "on graph", G, "missing. Skip.") 112 | continue 113 | if config_contained(G, P, results): 114 | print("Skipping", P, G, "because this config was already run") 115 | continue 116 | print("Running", P, G) 117 | row_dict = dict() 118 | row_dict["graph"] = G 119 | row_dict["partitioner"] = P 120 | print("parsing order log") 121 | row_dict.update(parse_order_log(G,P)) 122 | row_dict["order_running_time"] = float(order_times[(order_times.partitioner==P) & (order_times.graph==G)].order_running_time_sec) #If this fails, the order exists, but the running time is not in order_running_time.csv 123 | print("running customization") 124 | row_dict["median_customization_time"] = run_customizations(G,P) 125 | print("running queries") 126 | row_dict["avg_query_time"] = run_queries(G,P) 127 | print(row_dict) 128 | results = results.append(pd.DataFrame([row_dict]), ignore_index=True) 129 | 130 | print("Order experiments done.") 131 | new_cols = list(results.columns) 132 | new_cols.remove("graph") 133 | new_cols.remove("partitioner") 134 | new_cols = ["graph", "partitioner"] + new_cols 135 | results = results[new_cols] 136 | results["graph_id"] = results["graph"].map(graph_id) 137 | results["partitioner_id"] = results["partitioner"].map(partitioner_id) 138 | results.sort_values(["graph_id", "partitioner_id"], ascending=[True,True], inplace=True) 139 | results.drop(columns=["graph_id", "partitioner_id"]) 140 | results.to_csv(experiments_folder + "order_experiments.csv", index=False) 141 | 142 | 143 | 144 | if __name__ == '__main__': 145 | main() 146 | -------------------------------------------------------------------------------- /evaluation/order_running_time.csv: -------------------------------------------------------------------------------- 1 | partitioner,graph,order_running_time_sec 2 | -------------------------------------------------------------------------------- /evaluation/parameterstudy.py: -------------------------------------------------------------------------------- 1 | import configurable_inertialflowcutter_order as ifc 2 | import pandas as pd 3 | import numpy as np 4 | import re 5 | import subprocess 6 | import os 7 | 8 | experiments_folder = "" 9 | graph = "col" #TODO replace again with europe 10 | graph_path = experiments_folder + graph + "/" 11 | metric_path = graph_path + "travel_time" 12 | query_sources = experiments_folder + graph + ".q.s" 13 | query_targets = experiments_folder + graph + ".q.t" 14 | 15 | binary_path = "./../build/" 16 | order_console = binary_path + "console" 17 | customization_binary = binary_path + "customize" 18 | query_binary = binary_path + "query" 19 | 20 | 21 | def config_contained(config, results): 22 | cpd = pd.DataFrame([config._asdict()]) 23 | return len(cpd.merge(results)) > 0 24 | 25 | def config_to_string(config): 26 | return '.'.join(map(str,config)) 27 | 28 | def order_path(config): 29 | return experiments_folder + "parameterstudy/" + graph + "." + config_to_string(config) + ".order" 30 | 31 | def log_path(config): 32 | return order_path(config) + ".log" 33 | 34 | def parse_order_log(config): 35 | log = open(log_path(config)) 36 | row_dict = dict() 37 | for l in log: 38 | m = re.match(r"^\s*([a-zA-Z_ ]+) : ([0-9.]+)[^0-9]*$", l) 39 | assert(m) 40 | name = m.group(1).replace(" ", "_") 41 | value = m.group(2) 42 | if '.' in value: 43 | value = float(value) 44 | else: 45 | value = int(value) 46 | if "running_time" in name: 47 | name = "order_running_time" 48 | value /= 1000000 #in seconds 49 | row_dict[name] = value 50 | return row_dict 51 | 52 | def run_customizations(config): 53 | args = [customization_binary, graph_path + "first_out", graph_path + "head", order_path(config), metric_path, str(1)] 54 | runtimes = [] 55 | for i in range(9): 56 | t = subprocess.check_output(args, universal_newlines=True) 57 | runtimes.append(float(t) / 1000) #in ms 58 | return np.median(np.array(runtimes)) 59 | 60 | def run_queries(config): 61 | args = [query_binary, graph_path + "first_out", graph_path + "head", order_path(config), metric_path, query_sources, query_targets] 62 | t = subprocess.check_output(args, universal_newlines=True) 63 | return float(t) 64 | 65 | def main(): 66 | configs = pd.read_csv(experiments_folder + "parameterstudy_configs.csv") 67 | if not os.path.isfile(experiments_folder + "parameterstudy.csv"): 68 | x = pd.DataFrame(columns=["geo_distance_cutters","hop_distance_cutters","initial_assimilated_fraction","bulk_step_fraction","bulk_assimilation_order_threshold","bulk_assimilation_threshold"]) 69 | x.to_csv(experiments_folder + "parameterstudy.csv", index=False) 70 | results = pd.read_csv(experiments_folder + "parameterstudy.csv") 71 | 72 | for config in configs.itertuples(index=False): 73 | if not config_contained(config, results): 74 | print("computing order with config", config) 75 | ifc.save_inertialflowcutter_cch_order(config, order_console, graph_path, order_path(config), log_path(config)) 76 | row_dict = config._asdict() 77 | row_dict.update(parse_order_log(config)) 78 | print("running customization") 79 | row_dict["median_customization_time"] = run_customizations(config) 80 | print("running queries") 81 | row_dict["avg_query_time"] = run_queries(config) 82 | print(row_dict) 83 | results = results.append(pd.DataFrame([row_dict]), ignore_index=True) 84 | 85 | results.sort_values([x for x in configs.columns], ascending=[True for i in configs.columns], inplace=True) 86 | results.to_csv(experiments_folder + "parameterstudy.csv", index=False) #careful 87 | if __name__ == '__main__': 88 | main() 89 | -------------------------------------------------------------------------------- /evaluation/parameterstudy_configs.csv: -------------------------------------------------------------------------------- 1 | geo_distance_cutters,hop_distance_cutters,initial_assimilated_fraction,bulk_step_fraction,bulk_assimilation_order_threshold,bulk_assimilation_threshold 2 | 8,0,0.05,0.05,0.25,0.3 3 | 8,0,0.05,0.05,0.3,0.35 4 | 8,0,0.05,0.05,0.35,0.4 5 | 8,0,0.05,0.05,0.2,0.3 6 | 8,0,0.05,0.05,0.25,0.35 7 | 8,0,0.05,0.05,0.3,0.4 8 | 8,0,0.05,0.05,0.1,0.3 9 | 8,0,0.05,0.05,0.15,0.35 10 | 8,0,0.05,0.05,0.2,0.4 11 | 8,0,0.05,0.1,0.25,0.3 12 | 8,0,0.05,0.1,0.3,0.35 13 | 8,0,0.05,0.1,0.35,0.4 14 | 8,0,0.05,0.1,0.2,0.3 15 | 8,0,0.05,0.1,0.25,0.35 16 | 8,0,0.05,0.1,0.3,0.4 17 | 8,0,0.05,0.1,0.1,0.3 18 | 8,0,0.05,0.1,0.15,0.35 19 | 8,0,0.05,0.1,0.2,0.4 20 | 8,0,0.05,0.15,0.25,0.3 21 | 8,0,0.05,0.15,0.3,0.35 22 | 8,0,0.05,0.15,0.35,0.4 23 | 8,0,0.05,0.15,0.2,0.3 24 | 8,0,0.05,0.15,0.25,0.35 25 | 8,0,0.05,0.15,0.3,0.4 26 | 8,0,0.05,0.15,0.1,0.3 27 | 8,0,0.05,0.15,0.15,0.35 28 | 8,0,0.05,0.15,0.2,0.4 29 | 8,0,0.05,0.05,0.15,0.3 30 | 8,0,0.05,0.05,0.2,0.35 31 | 8,0,0.05,0.05,0.25,0.4 32 | 8,0,0.05,0.1,0.15,0.3 33 | 8,0,0.05,0.1,0.2,0.35 34 | 8,0,0.05,0.1,0.25,0.4 35 | 8,0,0.05,0.15,0.15,0.3 36 | 8,0,0.05,0.15,0.2,0.35 37 | 8,0,0.05,0.15,0.25,0.4 38 | 39 | 8,0,0.075,0.05,0.25,0.4 40 | 8,0,0.1,0.05,0.25,0.4 41 | 8,0,0.125,0.05,0.25,0.4 42 | 8,0,0.15,0.05,0.25,0.4 43 | 8,0,0.01,0.05,0.25,0.4 44 | 8,0,0.025,0.05,0.25,0.4 45 | -------------------------------------------------------------------------------- /evaluation/toplevel_cut_experiments.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import pandas as pd 4 | import metis_cut as metis 5 | import kahip_cut as kahip 6 | import inertialflow_cut as inertialflow 7 | import flowcutter_cut as flowcutter 8 | import inertialflowcutter_cut as ifc 9 | 10 | experiments_folder = "" 11 | graphs = ["col", "cal", "europe", "usa"] 12 | partitioners = ["metis", "kahip_v2_11", "inertial_flow", "flowcutter3", "flowcutter20", "inertialflowcutter4", "inertialflowcutter8", "inertialflowcutter12", "inertialflowcutter16"] 13 | imbalances = [0.0, 0.01, 0.03, 0.05, 0.1, 0.2, 0.3, 0.5, 0.7, 0.9] 14 | 15 | binary_path = "./../build/" 16 | console = binary_path + "console" 17 | metis_path = "./" + experiments_folder + "gpmetis" 18 | 19 | def graph_path(G): 20 | return experiments_folder + G + "/" 21 | 22 | def output_file(G, P): 23 | return experiments_folder + G + "." + P + ".cut" 24 | 25 | def compute_cuts(G, P): 26 | if P == "metis": 27 | return compute_metis_cuts(G) 28 | elif P == "kahip_v2_11": 29 | return compute_kahip_cuts(G, P, old=False) 30 | elif P.startswith("flowcutter"): 31 | cutters = int(re.match(r"flowcutter([0-9]+)", P).group(1)) 32 | return compute_flow_cutter_cuts(G, cutters) 33 | elif P == 'inertial_flow': 34 | return compute_inertial_flow_cuts(G) 35 | elif P.startswith("inertialflowcutter"): 36 | cutters = int(re.match(r"inertialflowcutter([0-9]+)", P).group(1)) 37 | return compute_inertial_flow_cutter_cuts(G, cutters) 38 | else: 39 | assert(false) 40 | 41 | def compute_metis_cuts(G): 42 | rows = [] 43 | for epsilon in imbalances: 44 | row_dict = {} 45 | metrics = metis.metis_cut(metis_path, console, graph_path(G), epsilon) 46 | row_dict["epsilon"] = epsilon 47 | row_dict["achieved_epsilon"] = metrics["epsilon"] 48 | row_dict["cut_size"] = metrics["cut_size"] 49 | row_dict["running_time"] = metrics["running_time"] 50 | row_dict["connected"] = metrics["left_components"] == 1 and metrics["right_components"] == 1 51 | rows.append(row_dict) 52 | results = pd.DataFrame(rows) 53 | return results.set_index("epsilon").sort_index() 54 | 55 | def compute_kahip_cuts(G, P, old): 56 | rows = [] 57 | for epsilon in imbalances: 58 | row_dict = {} 59 | metrics = kahip.kahip_cut(console, graph_path(G), epsilon) 60 | row_dict["epsilon"] = epsilon 61 | row_dict["achieved_epsilon"] = metrics["epsilon"] 62 | row_dict["cut_size"] = metrics["cut_size"] 63 | row_dict["running_time"] = metrics["running_time"] 64 | row_dict["connected"] = metrics["left_components"] == 1 and metrics["right_components"] == 1 65 | rows.append(row_dict) 66 | results = pd.DataFrame(rows) 67 | return results.set_index("epsilon").sort_index() 68 | 69 | def compute_inertial_flow_cuts(G): 70 | rows = [] 71 | for epsilon in imbalances: 72 | row_dict = {} 73 | metrics = inertialflow.inertialflow_cut(console, graph_path(G), (1 - epsilon) / 2) 74 | row_dict["epsilon"] = epsilon 75 | row_dict["achieved_epsilon"] = metrics["epsilon"] 76 | row_dict["cut_size"] = metrics["cut_size"] 77 | row_dict["running_time"] = metrics["running_time"] 78 | row_dict["connected"] = metrics["left_components"] == 1 and metrics["right_components"] == 1 79 | rows.append(row_dict) 80 | results = pd.DataFrame(rows) 81 | return results.set_index("epsilon").sort_index() 82 | 83 | def compute_flow_cutter_cuts(G, cutters): 84 | cuts = flowcutter.flowcutter_pareto(console, graph_path(G), cutters) 85 | node_count = cuts.iloc[-1]["small_side_size"] + cuts.iloc[-1]["large_side_size"] 86 | cuts['imbalance'] = cuts["large_side_size"] / ((node_count + 1) // 2) - 1 87 | rows = [] 88 | for epsilon in imbalances: 89 | row_dict = {} 90 | cut = cuts[cuts.imbalance <= epsilon].iloc[0] 91 | row_dict["epsilon"] = epsilon 92 | row_dict["achieved_epsilon"] = float(cut["imbalance"]) 93 | row_dict["cut_size"] = int(cut["cut_size"]) 94 | row_dict["running_time"] = cut["time"] * 1e-6 95 | row_dict["connected"] = True 96 | rows.append(row_dict) 97 | results = pd.DataFrame(rows) 98 | return results.set_index("epsilon").sort_index() 99 | 100 | def compute_inertial_flow_cutter_cuts(G, cutters): 101 | cuts = ifc.inertialflowcutter_pareto(console, graph_path(G), cutters) 102 | node_count = cuts.iloc[-1]["small_side_size"] + cuts.iloc[-1]["large_side_size"] 103 | cuts['imbalance'] = cuts["large_side_size"] / ((node_count + 1) // 2) - 1 104 | rows = [] 105 | for epsilon in imbalances: 106 | row_dict = {} 107 | cut = cuts[cuts.imbalance <= epsilon].iloc[0] 108 | row_dict["epsilon"] = epsilon 109 | row_dict["achieved_epsilon"] = float(cut["imbalance"]) 110 | row_dict["cut_size"] = int(cut["cut_size"]) 111 | row_dict["running_time"] = cut["time"] * 1e-6 112 | row_dict["connected"] = True 113 | rows.append(row_dict) 114 | results = pd.DataFrame(rows) 115 | return results.set_index("epsilon").sort_index() 116 | 117 | def main(): 118 | for G in graphs: 119 | for P in partitioners: 120 | if not os.path.exists(output_file(G, P)): 121 | print(P,G,"compute cuts") 122 | cuts = compute_cuts(G, P) 123 | cuts.to_csv(output_file(G, P)) 124 | else: 125 | print(P,G,"skip cuts") 126 | 127 | if __name__ == '__main__': 128 | main() 129 | -------------------------------------------------------------------------------- /inertialflowcutter_order.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | 4 | 5 | binary_path = "./build/" 6 | console = binary_path + "console" 7 | 8 | def save_inertialflowcutter_cch_order(G, order_path): 9 | args = [console] 10 | args.append("load_routingkit_unweighted_graph") 11 | args.append(G + "first_out") 12 | args.append(G + "head") 13 | 14 | args.append("load_routingkit_longitude") 15 | args.append(G + "longitude") 16 | args.append("load_routingkit_latitude") 17 | args.append(G + "latitude") 18 | 19 | args.append("add_back_arcs") 20 | args.append("remove_multi_arcs") 21 | args.append("remove_loops") 22 | 23 | args.append("flow_cutter_set") 24 | args.append("random_seed") 25 | args.append("5489") 26 | args.append("reorder_nodes_at_random") 27 | args.append("reorder_nodes_in_preorder") 28 | args.append("sort_arcs") 29 | 30 | args.append("flow_cutter_set") 31 | args.append("geo_pos_ordering_cutter_count") 32 | args.append("8") #should be multiple of 4. otherwise the four standard directions form the Inertial Flow paper are not chosen. 33 | 34 | args.append("flow_cutter_set") 35 | args.append("thread_count") 36 | args.append("1") #use more parallelism! 37 | 38 | args.append("report_time") 39 | args.append("reorder_nodes_in_accelerated_flow_cutter_cch_order") 40 | args.append("do_not_report_time") 41 | args.append("examine_chordal_supergraph") #print some statistics on the CCH 42 | 43 | args.append("save_routingkit_node_permutation_since_last_load") #use this for binary vector format as used by RoutingKit 44 | #args.append("save_permutation_of_nodes_since_last_file_load") #use this for order in text format 45 | 46 | args.append(order_path) 47 | subprocess.run(args, universal_newlines=True) 48 | 49 | if __name__ == '__main__': 50 | save_inertialflowcutter_cch_order(sys.argv[1], sys.argv[2]) 51 | -------------------------------------------------------------------------------- /query.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | int main(int argc, char **argv) { 8 | if (argc != 7) { 9 | std::cout << "Usage: " << argv[0] << " first_out head order metric query_sources query_tails" << std::endl; 10 | return 1; 11 | } 12 | 13 | std::string first_out_file = argv[1]; 14 | std::string head_file = argv[2]; 15 | std::string order_file = argv[3]; 16 | std::string weight_file = argv[4]; 17 | std::string query_sources_file = argv[5]; 18 | std::string query_targets_file = argv[6]; 19 | 20 | std::vector first_out = RoutingKit::load_vector(first_out_file); 21 | std::vector tail = RoutingKit::invert_inverse_vector(first_out); 22 | std::vector head = RoutingKit::load_vector(head_file); 23 | std::vector node_order = RoutingKit::load_vector(order_file); 24 | std::vector weight = RoutingKit::load_vector(weight_file); 25 | std::vector query_sources = RoutingKit::load_vector(query_sources_file); 26 | std::vector query_targets = RoutingKit::load_vector(query_targets_file); 27 | 28 | RoutingKit::CustomizableContractionHierarchy cch(node_order, tail, head); 29 | RoutingKit::CustomizableContractionHierarchyMetric metric(cch, weight); 30 | metric.customize(); 31 | RoutingKit::CustomizableContractionHierarchyQuery query(metric); 32 | double time = -RoutingKit::get_micro_time(); 33 | for (int i = 0; i < (int) query_sources.size(); ++i) { 34 | unsigned s = query_sources[i]; unsigned t = query_targets[i]; 35 | query.reset().add_source(s).add_target(t).run(); 36 | } 37 | time += RoutingKit::get_micro_time(); 38 | time /= query_sources.size(); 39 | std::cout << time << std::endl; 40 | 41 | return 0; 42 | 43 | } 44 | -------------------------------------------------------------------------------- /src/array_id_func.h: -------------------------------------------------------------------------------- 1 | #ifndef ARRAY_ID_FUNC_H 2 | #define ARRAY_ID_FUNC_H 3 | 4 | #include "id_func.h" 5 | #include 6 | #include 7 | #include 8 | 9 | template 10 | class ArrayIDFunc{ 11 | public: 12 | ArrayIDFunc()noexcept:preimage_count_(0), data_(nullptr){} 13 | 14 | explicit ArrayIDFunc(int preimage_count) 15 | :preimage_count_(preimage_count){ 16 | assert(preimage_count >= 0 && "ids may not be negative"); 17 | if(preimage_count == 0) 18 | data_ = nullptr; 19 | else 20 | data_ = new T[preimage_count_]; 21 | } 22 | 23 | template 24 | ArrayIDFunc(const IDFunc&o) 25 | :preimage_count_(o.preimage_count()){ 26 | if(preimage_count_ == 0) 27 | data_ = nullptr; 28 | else{ 29 | data_ = new T[preimage_count_]; 30 | try{ 31 | for(int id=0; id 71 | typename std::enable_if< 72 | is_id_func::value && 73 | std::is_convertible::type, T>::value, 74 | ArrayIDFunc&>::type operator=(const IDFunc&o){ 75 | ArrayIDFunc(o).swap(*this); 76 | return *this; 77 | } 78 | 79 | ArrayIDFunc&operator=(const ArrayIDFunc&o){ 80 | ArrayIDFunc(o).swap(*this); 81 | return *this; 82 | } 83 | 84 | ArrayIDFunc&operator=(ArrayIDFunc&&o)noexcept{ 85 | this->~ArrayIDFunc(); 86 | data_ = nullptr; 87 | preimage_count_ = 0; 88 | swap(o); 89 | return *this; 90 | } 91 | 92 | // IDFunc 93 | int preimage_count() const{return preimage_count_;} 94 | 95 | const T&operator()(int id) const{ 96 | assert(0 <= id && id < preimage_count_ && "id out of bounds"); 97 | return data_[id]; 98 | } 99 | 100 | // Mutable IDFunc 101 | void set(int id, T t){ 102 | assert(0 <= id && id < preimage_count_ && "id out of bounds"); 103 | data_[id] = std::move(t); 104 | } 105 | 106 | T move(int id){ 107 | assert(0 <= id && id < preimage_count_ && "id out of bounds"); 108 | return std::move(data_[id]); 109 | } 110 | 111 | void fill(const T&t){ 112 | std::fill(data_, data_+preimage_count_, t); 113 | } 114 | 115 | // Array only functionality 116 | T&operator[](int id){ 117 | assert(0 <= id && id < preimage_count_ && "id out of bounds"); 118 | return data_[id]; 119 | } 120 | 121 | const T&operator[](int id) const{ 122 | assert(0 <= id && id < preimage_count_ && "id out of bounds"); 123 | return data_[id]; 124 | } 125 | 126 | T*begin(){ return data_; } 127 | const T*begin() const{ return data_; } 128 | T*end(){ return data_ + preimage_count_; } 129 | const T*end()const{ return data_ + preimage_count_; } 130 | 131 | int preimage_count_; 132 | T*data_; 133 | }; 134 | 135 | struct ArrayIDIDFunc : public ArrayIDFunc{ 136 | 137 | ArrayIDIDFunc()noexcept :image_count_(0){} 138 | 139 | ArrayIDIDFunc(int preimage_count, int image_count) 140 | :ArrayIDFunc(preimage_count), image_count_(image_count){} 141 | 142 | ArrayIDIDFunc(const ArrayIDIDFunc&o) = default; 143 | ArrayIDIDFunc(ArrayIDIDFunc&&) = default; 144 | ArrayIDIDFunc&operator=(const ArrayIDIDFunc&) = default; 145 | ArrayIDIDFunc&operator=(ArrayIDIDFunc&&) = default; 146 | 147 | void swap(ArrayIDIDFunc&o){ 148 | std::swap(image_count_, o.image_count_); 149 | ArrayIDFunc::swap(static_cast&>(o)); 150 | } 151 | 152 | template 153 | ArrayIDIDFunc(const IDFunc&f, int image_count_) 154 | : ArrayIDFunc(f), image_count_(image_count_){} 155 | 156 | 157 | template 158 | ArrayIDIDFunc(const IDIDFunc&f/*, 159 | typename std::enable_if::value, void>::type*dummy=0*/) 160 | : ArrayIDFunc(f), image_count_(f.image_count()){} 161 | 162 | template 163 | typename std::enable_if< 164 | is_id_id_func::value, 165 | ArrayIDIDFunc& 166 | >::type operator=(const IDIDFunc&o){ 167 | ArrayIDIDFunc(o).swap(*this); 168 | return *this; 169 | } 170 | 171 | int image_count()const { return image_count_; } 172 | 173 | int operator()(int x) const{ 174 | assert(0 <= x && x < preimage_count_ && "preimage id out of bounds"); 175 | int y = data_[x]; 176 | assert(0 <= y && y < image_count_ && "image id out of bounds"); 177 | return y; 178 | } 179 | 180 | void set_image_count(int new_image_count){ 181 | image_count_ = new_image_count; 182 | } 183 | 184 | int image_count_; 185 | }; 186 | 187 | 188 | template 189 | typename std::enable_if::value, ArrayIDFunc::type>>::type 190 | add_preimage_at_end(const IDFunc&in, int n){ 191 | ArrayIDFunc::type> out(in.preimage_count() + n); 192 | std::copy(in.begin(), in.end(), out.begin()); 193 | return out; // NVRO 194 | } 195 | 196 | template 197 | ArrayIDFuncadd_preimage_at_end(const ArrayIDFunc&in, int n){ 198 | ArrayIDFunc out(in.preimage_count() + n); 199 | std::copy(in.begin(), in.end(), out.begin()); 200 | return out; // NVRO 201 | } 202 | 203 | template 204 | ArrayIDFuncadd_preimage_at_end(ArrayIDFunc&&in, int n){ 205 | ArrayIDFunc out(in.preimage_count() + n); 206 | std::move(in.begin(), in.end(), out.begin()); 207 | return out; // NVRO 208 | } 209 | 210 | template 211 | typename std::enable_if::value, ArrayIDIDFunc>::type 212 | add_preimage_at_end(const IDIDFunc&in, int n){ 213 | ArrayIDIDFunc out(in.preimage_count() + n, in.image_count()); 214 | std::copy(in.begin(), in.end(), out.begin()); 215 | return out; // NVRO 216 | } 217 | 218 | template 219 | auto add_preimage_at_end(F&&f, int n, const T&t)->decltype(add_preimage_at_end(std::forward(f), n)){ 220 | int old_preimage_count = f.preimage_count(); 221 | auto x = add_preimage_at_end(std::forward(f), n); 222 | for(int i=old_preimage_count; i 8 | 9 | // Input graph must be symmetric 10 | template 11 | ArrayIDIDFunc compute_back_arc_permutation(const Tail&tail, const Head&head){ 12 | 13 | const int arc_count = head.preimage_count(); 14 | const int node_count = head.image_count(); 15 | 16 | struct D{ 17 | int tail, head, arc_id; 18 | }; 19 | 20 | ArrayIDFuncarc_list(arc_count), tmp(arc_count); 21 | for(int i=0; i arc_list[i].head) 25 | std::swap(arc_list[i].tail, arc_list[i].head); 26 | arc_list[i].arc_id = i; 27 | } 28 | 29 | stable_sort_copy_by_id( 30 | std::begin(arc_list), std::end(arc_list), 31 | std::begin(tmp), 32 | node_count, 33 | [](D d){return d.head;} 34 | ); 35 | stable_sort_copy_by_id( 36 | std::begin(tmp), std::end(tmp), 37 | std::begin(arc_list), 38 | node_count, 39 | [](D d){return d.tail;} 40 | ); 41 | 42 | ArrayIDIDFunc back_arc(head.preimage_count(), head.preimage_count()); 43 | 44 | for(int i=0; i= arc_count) 47 | throw std::runtime_error("Cannot compute back arc if number of edges is odd"); 48 | 49 | if(tail(arc_list(i).arc_id) != head(arc_list(i+1).arc_id) || head(arc_list(i).arc_id) != tail(arc_list(i+1).arc_id)) 50 | throw std::runtime_error("Cannot compute back arc if graph is not symmetric, arc with ID "+std::to_string(arc_list(i).arc_id)+" has no backarc"); 51 | 52 | back_arc[arc_list(i).arc_id] = arc_list(i+1).arc_id; 53 | back_arc[arc_list(i+1).arc_id] = arc_list(i).arc_id; 54 | } 55 | 56 | return back_arc; 57 | } 58 | 59 | #endif 60 | 61 | -------------------------------------------------------------------------------- /src/chain.h: -------------------------------------------------------------------------------- 1 | #ifndef CHAIN_H 2 | #define CHAIN_H 3 | 4 | #include 5 | #include "id_func.h" 6 | #include "array_id_func.h" 7 | 8 | // chain(IDIDFunc, IDFunc) 9 | template 10 | typename std::enable_if< 11 | is_id_id_func::value 12 | && is_id_func::value 13 | && !is_id_id_func::value, 14 | ArrayIDFunc::type> 15 | >::type 16 | chain(const L&l, const R&r){ 17 | ArrayIDFunc::type>result(l.preimage_count()); 18 | for(int i=0; i 25 | typename std::enable_if< 26 | is_mutable_id_id_func::value 27 | && is_id_id_func::value, 28 | L 29 | >::type 30 | chain(L l, const R&r){ 31 | assert(l.image_count() == r.preimage_count()); 32 | for(int i=0; i 39 | typename std::enable_if< 40 | is_id_id_func::value 41 | && !is_mutable_id_id_func::value 42 | && is_id_id_func::value, 43 | ArrayIDIDFunc 44 | >::type 45 | chain(const L&l, const R&r){ 46 | assert(l.image_count() == r.preimage_count()); 47 | ArrayIDIDFunc result(l.preimage_count(), r.image_count()); 48 | for(int i=0; i 12 | #include 13 | 14 | template 15 | ArrayIDIDFunc compute_connected_components(const Tail&tail, const Head&head){ 16 | const int node_count = tail.image_count(); 17 | const int arc_count = tail.preimage_count(); 18 | 19 | UnionFind uf(node_count); 20 | for(int i=0; i 34 | bool is_connected(const Tail&tail, const Head&head){ 35 | const int node_count = tail.image_count(); 36 | const int arc_count = tail.preimage_count(); 37 | 38 | UnionFind uf(node_count); 39 | for(int i=0; i 49 | ArrayIDIDFunc compute_strongly_connected_components( 50 | const Successor&succ 51 | ){ 52 | const int node_count = succ.preimage_count(); 53 | 54 | ArrayIDFunc dfs_stack(node_count); 55 | int dfs_stack_top = 0; 56 | 57 | ArrayIDFunc scc_stack(node_count); 58 | int scc_stack_top = 0; 59 | 60 | BitIDFunc in_scc_stack(node_count); 61 | in_scc_stack.fill(false); 62 | 63 | typedef typename std::decay::type Iter; 64 | 65 | ArrayIDFuncnext_out(node_count); 66 | for(int i=0; i dfs_pos(node_count); 70 | dfs_pos.fill(-1); 71 | int next_preorder_id = 0; 72 | 73 | ArrayIDFunc low_link(node_count); 74 | 75 | ArrayIDIDFunc component_of(node_count, 0); 76 | BitIDFunc in_component(node_count); 77 | in_component.fill(false); 78 | 79 | for(int r=0; r 131 | void symmetric_depth_first_search( 132 | const OutArc&out_arc, 133 | const Head&head, 134 | const OnRootFirstVisit&on_root_first_visit, 135 | const OnRootLastVisit&on_root_last_visit, 136 | const OnTreeUpArcVisit&on_tree_down_arc_visit, 137 | const OnTreeDownArcVisit&on_tree_up_arc_visit, 138 | const OnNonTreeArcVisit&on_non_tree_arc_visit 139 | ){ 140 | const int arc_count = head.preimage_count(); 141 | const int node_count = out_arc.preimage_count(); 142 | 143 | (void)arc_count; 144 | (void)node_count; 145 | 146 | ArrayIDFunc dfs_stack(node_count); 147 | int dfs_stack_end = 0; 148 | 149 | ArrayIDFunc parent_arc(node_count); 150 | parent_arc.fill(-1); 151 | 152 | ArrayIDFunc parent_node(node_count); 153 | parent_node.fill(-1); 154 | 155 | typedef typename std::decay::type Iter; 156 | ArrayIDFuncnext_out(node_count); 157 | for(int i=0; i 206 | ArrayIDIDFunc compute_biconnected_components( 207 | const OutArc&out_arc, const Head&head, const BackArc&back_arc 208 | ){ 209 | const int node_count = out_arc.preimage_count(); 210 | const int arc_count = head.preimage_count(); 211 | 212 | (void)arc_count; 213 | (void)node_count; 214 | 215 | ArrayIDFunc arc_stack(arc_count); 216 | int arc_stack_end = 0; 217 | 218 | ArrayIDIDFunc arc_component(arc_count, 0); 219 | arc_component.fill(-1); 220 | 221 | ArrayIDFunc depth(node_count); 222 | ArrayIDFunc min_succ_depth(node_count); 223 | 224 | auto min_to = [](int&x, int y){ 225 | if(y < x) 226 | x = y; 227 | }; 228 | 229 | auto on_first_root_visit = [&](int x){ 230 | depth[x] = 0; 231 | }; 232 | 233 | auto on_last_root_visit = [&](int){ 234 | 235 | }; 236 | 237 | auto on_tree_down_arc_visit = [&](int x, int xy, int y){ 238 | arc_stack[arc_stack_end++] = xy; 239 | 240 | min_succ_depth[y] = std::numeric_limits::max(); 241 | depth[y] = depth[x]+1; 242 | }; 243 | 244 | auto on_tree_up_arc_visit = [&](int x, int xy, int y){ 245 | arc_stack[arc_stack_end++] = xy; 246 | 247 | min_to(min_succ_depth[y], min_succ_depth[x]); 248 | min_to(min_succ_depth[y], depth[x]); 249 | 250 | if(min_succ_depth[x] >= depth[y]){ 251 | const int new_component_id = arc_component.image_count(); 252 | arc_component.set_image_count(arc_component.image_count() + 1); 253 | 254 | while(arc_stack_end != 0){ 255 | int ab = arc_stack[--arc_stack_end]; 256 | int ba = back_arc(ab); 257 | if(arc_component[ba] == -1){ 258 | assert(arc_component[ab] == -1); 259 | arc_component[ab] = new_component_id; 260 | arc_component[ba] = new_component_id; 261 | } 262 | if(ba == xy) 263 | break; 264 | } 265 | } 266 | }; 267 | 268 | auto on_non_tree_arc_visit = [&](int x, int xy, int y){ 269 | arc_stack[arc_stack_end++] = xy; 270 | 271 | min_to(min_succ_depth[x], depth[y]); 272 | }; 273 | 274 | symmetric_depth_first_search( 275 | out_arc, head, 276 | on_first_root_visit, on_last_root_visit, 277 | on_tree_down_arc_visit, on_tree_up_arc_visit, 278 | on_non_tree_arc_visit 279 | ); 280 | 281 | #ifndef NDEBUG 282 | for(int i=0; i 290 | bool is_biconnected(const Tail&tail, const Head&head){ 291 | return compute_biconnected_components(invert_id_id_func(tail), head, compute_back_arc_permutation(tail, head)).image_count() <= 1; 292 | } 293 | 294 | #endif 295 | 296 | -------------------------------------------------------------------------------- /src/contraction_graph.h: -------------------------------------------------------------------------------- 1 | #ifndef CONTRACTION_GRAPH_H 2 | #define CONTRACTION_GRAPH_H 3 | 4 | #include "array_id_func.h" 5 | #include "tiny_id_func.h" 6 | #include "min_max.h" 7 | #include "multi_arc.h" 8 | #include 9 | #include 10 | 11 | class EdgeContractionGraph{ 12 | public: 13 | void rewire_arcs_from_second_to_first(int u, int v){ 14 | union_find_parent[v] = u; 15 | std::swap(next_adjacency_in_ring[u], next_adjacency_in_ring[v]); 16 | } 17 | 18 | template 19 | void forall_nodes_in_last_computed_neighborhood(const F&f){ 20 | for(int i=0; i 85 | EdgeContractionGraph(const Tail&tail, const Head&head): 86 | next_adjacency_in_ring(tail.image_count()), 87 | union_find_parent(tail.image_count()), 88 | out_arc_begin(tail.image_count()), 89 | out_arc_end(tail.image_count()), 90 | arc_head(tail.preimage_count()), 91 | in_neighborhood(tail.image_count()), 92 | neighborhood(tail.image_count()), 93 | neighborhood_size(0) 94 | { 95 | assert(is_symmetric(tail, head)); 96 | for(int i=0; i next_adjacency_in_ring; 128 | ArrayIDFunc union_find_parent; 129 | ArrayIDFunc out_arc_begin; 130 | ArrayIDFunc out_arc_end; 131 | ArrayIDFunc arc_head; 132 | BitIDFunc in_neighborhood; 133 | ArrayIDFunc neighborhood; 134 | int neighborhood_size; 135 | }; 136 | 137 | class NodeContractionGraph{ 138 | public: 139 | template 140 | NodeContractionGraph(const Tail&tail, const Head&head): 141 | g(tail, head), is_virtual(tail.image_count()){ 142 | assert(is_symmetric(tail, head)); 143 | is_virtual.fill(false); 144 | } 145 | 146 | template 147 | void forall_neighbors_then_contract_node(int v, const F&callback){ 148 | g.compute_neighborhood_of(v); 149 | g.forall_nodes_in_last_computed_neighborhood( 150 | [&](int u){ 151 | if(is_virtual(u)) 152 | g.rewire_arcs_from_second_to_first(v, u); 153 | } 154 | ); 155 | is_virtual.set(v, true); 156 | g.compute_neighborhood_of(v); 157 | g.forall_nodes_in_last_computed_neighborhood(callback); 158 | } 159 | 160 | private: 161 | EdgeContractionGraph g; 162 | BitIDFunc is_virtual; 163 | }; 164 | 165 | template 166 | int compute_chordal_supergraph(const Tail&tail, const Head&head, const OnNewArc&on_new_arc){ 167 | assert(is_symmetric(tail, head)); 168 | 169 | NodeContractionGraph g(tail, head); 170 | int max_upward_degree = 0; 171 | for(int x=0; x 6 | #include 7 | 8 | struct CountIterator{ 9 | typedef int value_type; 10 | typedef int difference_type; 11 | typedef const int* pointer; 12 | typedef const int& reference; 13 | typedef std::random_access_iterator_tag iterator_category; 14 | 15 | CountIterator&operator++(){ ++n_; return *this;} 16 | CountIterator operator++(int) {CountIterator tmp(*this); operator++(); return tmp;} 17 | CountIterator&operator--(){ --n_; return *this;} 18 | CountIterator operator--(int) {CountIterator tmp(*this); operator++(); return tmp;} 19 | int operator*() const {return n_;} 20 | 21 | const int*operator->() const {return &n_;} 22 | 23 | int operator[](int o)const{return n_ + o;} 24 | CountIterator&operator+=(CountIterator::difference_type o){n_ += o; return *this;} 25 | CountIterator&operator-=(CountIterator::difference_type o){n_ -= o; return *this;} 26 | 27 | int n_; 28 | }; 29 | 30 | inline bool operator==(CountIterator l, CountIterator r){return l.n_ == r.n_;} 31 | inline bool operator!=(CountIterator l, CountIterator r){return l.n_ != r.n_;} 32 | inline bool operator< (CountIterator l, CountIterator r){return l.n_ < r.n_;} 33 | inline bool operator> (CountIterator l, CountIterator r){return l.n_ > r.n_;} 34 | inline bool operator<=(CountIterator l, CountIterator r){return l.n_ <= r.n_;} 35 | inline bool operator>=(CountIterator l, CountIterator r){return l.n_ >= r.n_;} 36 | 37 | inline CountIterator::difference_type operator-(CountIterator l, CountIterator r){return l.n_ - r.n_;} 38 | inline CountIterator operator-(CountIterator l, CountIterator::difference_type r){return {l.n_ - r};} 39 | inline CountIterator operator+(CountIterator l, CountIterator::difference_type r){return {l.n_ + r};} 40 | inline CountIterator operator+(CountIterator::difference_type l, CountIterator r){return {l + r.n_};} 41 | 42 | typedef Range CountRange; 43 | 44 | inline CountRange count_range(int n){assert(n >= 0); return {CountIterator{0}, CountIterator{n}}; } 45 | inline CountRange count_range(int begin, int end){assert(begin <= end);return {CountIterator{begin}, CountIterator{end}};} 46 | 47 | #endif 48 | -------------------------------------------------------------------------------- /src/cut.h: -------------------------------------------------------------------------------- 1 | #ifndef CUT_H 2 | #define CUT_H 3 | 4 | #include "node_flow_cutter.h" 5 | #include "flow_cutter.h" 6 | #include "flow_cutter_config.h" 7 | 8 | #include 9 | 10 | #include 11 | 12 | namespace flow_cutter{ 13 | 14 | template 15 | class ComputeCut{ 16 | public: 17 | explicit ComputeCut(const GetGeoPos& geo_pos, Config config, bool reorder_arcs = true):geo_pos(geo_pos), config(config), reorder_arcs(reorder_arcs){} 18 | 19 | template 20 | std::deque operator()(const Tail&tail, const Head&head, const InputNodeID& input_node_id, const ArcWeight&arc_weight)const{ 21 | const int node_count = tail.image_count(); 22 | const int arc_count = tail.preimage_count(); 23 | 24 | auto out_arc = invert_sorted_id_id_func(tail); 25 | auto back_arc = compute_back_arc_permutation(tail, head); 26 | 27 | auto adapted_geo_pos = id_func(node_count, [&](int x){return geo_pos(input_node_id(x));}); 28 | 29 | std::deque best_cut; 30 | CutterFactory factory(config); 31 | 32 | switch(config.separator_selection){ 33 | case Config::SeparatorSelection::node_min_expansion: 34 | { 35 | auto graph = flow_cutter::make_graph( 36 | make_const_ref_id_id_func(tail), 37 | make_const_ref_id_id_func(head), 38 | make_const_ref_id_id_func(back_arc), 39 | make_const_ref_id_func(arc_weight), 40 | ConstIntIDFunc<1>(arc_count),//make_const_ref_id_func(arc_weight),// 41 | make_const_ref_id_func(out_arc) 42 | ); 43 | auto cutter = factory(graph); 44 | auto pairs = factory.select_source_target_pairs(node_count, adapted_geo_pos, config.cutter_count, config.random_seed); 45 | 46 | std::atomic best_score(std::numeric_limits::max()); 47 | int best_flow_intensity = std::numeric_limits::max(); 48 | int best_cutter_id = std::numeric_limits::max(); 49 | 50 | cutter.init(pairs, config.random_seed, adapted_geo_pos); 51 | tbb::spin_mutex current_cut_mutex; 52 | 53 | cutter.enum_cuts( 54 | /* shall_continue */ 55 | [&](const auto& cutter) { 56 | double cut_size = cutter.get_current_flow_intensity(); 57 | // If a cut is available, the next cut will be at least one larger 58 | if (cutter.cut_available()) { 59 | cut_size += 1; 60 | } 61 | double potential_best_next_score = cut_size/(double)(node_count/2); 62 | return potential_best_next_score < best_score.load(std::memory_order_acquire); 63 | }, 64 | /* report_cut */ 65 | [&](const auto& cutter, int cutter_id) { 66 | double cut_size = cutter.get_current_flow_intensity(); 67 | double small_side_size = cutter.get_current_smaller_cut_side_size(); 68 | 69 | double score = cut_size / small_side_size; 70 | 71 | 72 | if(cutter.get_current_smaller_cut_side_size() < config.max_imbalance * node_count) 73 | score += 1000000; 74 | 75 | if(score <= best_score.load(std::memory_order_acquire)) { 76 | tbb::spin_mutex::scoped_lock cut_lock(current_cut_mutex); 77 | 78 | double tmp_best_score = best_score.load(std::memory_order_acquire); 79 | if (std::tie(score, cut_size, cutter_id) < std::tie(tmp_best_score, best_flow_intensity, best_cutter_id)) { 80 | best_score.store(score, std::memory_order_release); 81 | best_flow_intensity = cut_size; 82 | best_cutter_id = cutter_id; 83 | /* order edges by direction */ 84 | 85 | //TODO are those x real edges? are those all edges? 86 | std::deque cur_cut; 87 | auto cut = cutter.get_current_cut(); 88 | if (reorder_arcs) { 89 | for (auto x : cut) { 90 | if (cutter.is_on_smaller_side(head(x))) { 91 | cur_cut.push_back(x); 92 | cur_cut.push_front(back_arc(x)); 93 | } else { 94 | cur_cut.push_front(x); 95 | cur_cut.push_back(back_arc(x)); 96 | } 97 | } 98 | } else { 99 | for (auto x : cut) { 100 | cur_cut.push_back(x); 101 | cur_cut.push_back(back_arc(x)); 102 | } 103 | } 104 | best_cut = cur_cut; 105 | } 106 | } 107 | }, 108 | /* report_cuts_in_order */ 109 | false); 110 | } 111 | break; 112 | default: 113 | throw std::logic_error("Invalid cut selection config"); 114 | } 115 | return best_cut; 116 | } 117 | 118 | private: 119 | const GetGeoPos& geo_pos; 120 | Config config; 121 | const bool reorder_arcs; 122 | }; 123 | } 124 | 125 | #endif 126 | -------------------------------------------------------------------------------- /src/dijkstra.h: -------------------------------------------------------------------------------- 1 | #ifndef DIJKSTRA_H 2 | #define DIJKSTRA_H 3 | 4 | #include "heap.h" 5 | #include "tiny_id_func.h" 6 | #include "array_id_func.h" 7 | #include "timestamp_id_func.h" 8 | #include 9 | 10 | template 11 | class ForAllSuccessors{ 12 | public: 13 | ForAllSuccessors(OutArc out_arc, Head head, Weight weight): 14 | out_arc(std::move(out_arc)), head(std::move(head)), weight(std::move(weight)){} 15 | 16 | int preimage_count()const{ 17 | return out_arc.preimage_count(); 18 | } 19 | 20 | typedef typename id_func_image_type::type WeightType; 21 | 22 | template 23 | void operator()(int x, const F&f)const{ 24 | for(auto xy:out_arc(x)) 25 | f(head(xy), weight(xy)); 26 | } 27 | 28 | private: 29 | OutArc out_arc; 30 | Head head; 31 | Weight weight; 32 | 33 | }; 34 | 35 | template 36 | ForAllSuccessors make_forall_successors( 37 | OutArc out_arc, Head head, Weight weight 38 | ){ 39 | return {std::move(out_arc), std::move(head), std::move(weight)}; 40 | } 41 | 42 | template 43 | class Dijkstra{ 44 | public: 45 | Dijkstra(){} 46 | 47 | explicit Dijkstra(int node_count): 48 | distance(node_count), queue(node_count), was_pushed(node_count){} 49 | 50 | void clear(){ 51 | was_pushed.fill(false); 52 | queue.clear(); 53 | } 54 | 55 | void add_source_node(int x, Dist d = 0){ 56 | distance.set(x, d); 57 | queue.push_or_decrease_key(x, d); 58 | was_pushed.set(x, true); 59 | } 60 | 61 | bool was_reached(int x)const{ 62 | return was_pushed(x); 63 | } 64 | 65 | // Returns an upper bound to the distance. This bound is tight if it is not larger than get_radius. This bound is numeric_limits::max if x was not visited yet. 66 | Dist extract_current_distance(int x)const{ 67 | if(was_pushed(x)) 68 | return distance(x); 69 | else 70 | return std::numeric_limits::max(); 71 | } 72 | 73 | bool is_finished()const{ 74 | return queue.empty(); 75 | } 76 | 77 | // Removes one node from the queue and relaxes its outgoing arc. The node ID is returned. 78 | // * on_push_or_decrease_distance(x, new_node_pushed, pred) is called for each node that is added to the queue or its key is decreased. 79 | // new_node_pushed is boolean that is true when a new node is pushed instead of just decreasing its current distance. 80 | // pred is the ID of the node from which x was reached with the new distance. 81 | template 82 | int settle_next( 83 | const OutArc&out_arc, const Head&head, 84 | const Weight&weight, 85 | const OnPushOrDecreaseDistance&on_push_or_decrease_dist 86 | ){ 87 | return settle_next( 88 | make_forall_successors( 89 | make_const_ref_id_func(out_arc), 90 | make_const_ref_id_id_func(head), 91 | make_const_ref_id_func(weight) 92 | ), 93 | on_push_or_decrease_dist 94 | ); 95 | } 96 | 97 | template 98 | int settle_next( 99 | const ForAllSuccessors&forall_successors, 100 | const OnPushOrDecreaseDistance&on_push_or_decrease_dist 101 | ){ 102 | assert(!queue.empty()); 103 | auto x = queue.pop(); 104 | assert(was_pushed(x)); 105 | Dist x_dist = distance(x); 106 | forall_successors( 107 | x, 108 | [&](int y, Dist xy_weight){ 109 | #ifdef DIJKSTRA_RUNTIME_OVERFLOW_CHECK 110 | if(xy_weight < 0) 111 | throw std::runtime_error("arc weight is negative"); 112 | #endif 113 | 114 | assert(xy_weight >= 0); 115 | Dist y_dist; 116 | if(was_pushed(y)) 117 | y_dist = distance(y); 118 | else 119 | y_dist = std::numeric_limits::max(); 120 | 121 | if(x_dist < y_dist - xy_weight){ 122 | 123 | #ifdef DIJKSTRA_RUNTIME_OVERFLOW_CHECK 124 | if(x_dist >= std::numeric_limits::max() - xy_weight) 125 | throw std::runtime_error("path length exceeds 64 bits"); 126 | #endif 127 | 128 | y_dist = x_dist + xy_weight; 129 | distance.set(y, y_dist); 130 | bool is_decrease_key = queue.contains(y); 131 | queue.push_or_decrease_key(y, y_dist); 132 | was_pushed.set(y, true); 133 | on_push_or_decrease_dist(y, !is_decrease_key, x); 134 | } 135 | } 136 | ); 137 | return x; 138 | } 139 | 140 | Dist get_radius()const{ 141 | if(queue.empty()) 142 | return std::numeric_limits::max(); 143 | else 144 | return queue.peek_min_key(); 145 | } 146 | 147 | int get_front_size()const{ 148 | return queue.size(); 149 | } 150 | 151 | ArrayIDFuncmove_distance_array(){ 152 | return std::move(distance); 153 | } 154 | 155 | private: 156 | ArrayIDFuncdistance; 157 | min_id_heapqueue; 158 | BoolIDFunc was_pushed; 159 | }; 160 | 161 | template 162 | void compute_distances( 163 | const OutArc&out, const Head&head, const Weight&weight, 164 | int source_node, 165 | BitIDFunc&visited, ArrayIDFunc&dist, min_id_heap&q 166 | ){ 167 | q.clear(); 168 | visited.fill(false); 169 | q.push(source_node, 0); 170 | visited.set(source_node, true); 171 | dist[source_node] = 0; 172 | while(!q.empty()){ 173 | int x = q.pop(); 174 | 175 | for(auto xy:out(x)){ 176 | int y = head(xy); 177 | auto w = weight(xy); 178 | if(!visited(y) || dist[x] < dist[y] - w){ 179 | visited.set(y, true); 180 | dist[y] = dist[x] + w; 181 | q.push_or_decrease_key(y, dist[y]); 182 | } 183 | } 184 | } 185 | } 186 | 187 | template 188 | ArrayIDFunccompute_distances(const OutArc&out, const Head&head, const Weight&weight, int source_node){ 189 | const int node_count = head.image_count(); 190 | 191 | BitIDFunc visited(node_count); 192 | ArrayIDFuncdist(node_count); 193 | min_id_heapq(node_count); 194 | 195 | dist.fill(std::numeric_limits::max()); 196 | 197 | compute_distances(out, head, weight, source_node, visited, dist, q); 198 | 199 | return dist; // NVRO 200 | } 201 | 202 | template 203 | void depth_first_traverse_shortest_path_tree( 204 | const OutArc&out, const Head&head, const Weight&weight, 205 | const Dist&dist, 206 | int source_node, 207 | const OnFirst&on_first, const OnLast&on_last, 208 | BitIDFunc&pushed, ArrayIDFunc&stack 209 | ){ 210 | int stack_top = 1; 211 | stack[0] = source_node; 212 | 213 | pushed.fill(false); 214 | pushed.set(source_node, true); 215 | 216 | while(stack_top != 0){ 217 | int x = stack[--stack_top]; 218 | if(x < 0) 219 | on_last(~x); 220 | else{ 221 | stack[stack_top++] = ~x; 222 | 223 | on_first(x); 224 | 225 | for(auto xy:out(x)){ 226 | auto y = head(xy); 227 | if(!pushed(y) && dist(x) + weight(xy) == dist(y)){ 228 | stack[stack_top++] = y; 229 | pushed.set(y, true); 230 | } 231 | } 232 | } 233 | } 234 | } 235 | 236 | template 237 | void depth_first_traverse_shortest_path_tree( 238 | const OutArc&out, const Head&head, const Weight&weight, 239 | int source_node, 240 | const OnFirst&on_first, const OnLast&on_last 241 | ){ 242 | const int node_count = head.image_count(); 243 | 244 | BitIDFunc visited(node_count); 245 | ArrayIDFuncdist(node_count); 246 | 247 | { 248 | min_id_heapq(node_count); 249 | compute_distances(out, head, weight, source_node, visited, dist, q); 250 | } 251 | 252 | { 253 | ArrayIDFuncstack(node_count); 254 | depth_first_traverse_shortest_path_tree(out, head, weight, dist, source_node, on_first, on_last, visited, stack); 255 | } 256 | } 257 | 258 | #endif 259 | 260 | -------------------------------------------------------------------------------- /src/dinic.h: -------------------------------------------------------------------------------- 1 | #ifndef DINIC_H 2 | #define DINIC_H 3 | 4 | #include "tiny_id_func.h" 5 | #include "array_id_func.h" 6 | #include 7 | 8 | namespace max_flow{ 9 | 10 | template 11 | class UnitDinicAlgo{ 12 | public: 13 | UnitDinicAlgo( 14 | const Tail&, 15 | const InvTail&inv_tail, const Head&head, const BackArc&back_arc, 16 | const SourceList&source_list, const TargetList&target_list, 17 | const BitIDFunc& is_source, const SourceList& source_front 18 | ): 19 | node_count(head.image_count()), arc_count(head.preimage_count()), 20 | inv_tail(inv_tail), head(head), back_arc(back_arc), source_list(source_list), 21 | source_front(source_front), 22 | is_source(is_source), is_target(node_count), 23 | is_saturated(arc_count), is_blocked(arc_count), 24 | queue(node_count), was_pushed(node_count), 25 | is_on_same_level_or_lower(node_count), current_path_node(node_count), current_path_arc(node_count-1){ 26 | 27 | is_target.fill(false); 28 | for(int i=0; i(is_saturated(uv)); 134 | excess += static_cast(is_saturated(back_arc(uv))); 135 | } 136 | return excess; 137 | } 138 | 139 | public: 140 | void verify_flow_is_maximum() { 141 | #ifndef NDEBUG 142 | int old_num_reachable = num_reachable; 143 | BitIDFunc old_is_blocked = is_blocked; 144 | BitIDFunc old_was_pushed = was_pushed; 145 | BitIDFunc old_is_on_same_level_or_lower = is_on_same_level_or_lower; 146 | assert(!compute_blocking_flow()); //should technically not be done with compute_blocking_flow. but we know it works 147 | for (int u = 0; u < node_count; ++u) { 148 | assert(was_pushed(u) == old_was_pushed(u)); 149 | assert(is_on_same_level_or_lower(u) == old_is_on_same_level_or_lower(u)); 150 | } 151 | for (int e = 0; e < arc_count; ++e) { 152 | assert(is_blocked(e) == old_is_blocked(e)); 153 | } 154 | assert(num_reachable == old_num_reachable); 155 | verify_flow_conservation(); 156 | #endif 157 | } 158 | 159 | void verify_flow_conservation() { 160 | #ifndef NDEBUG 161 | int source_excess = 0; 162 | int target_excess = 0; 163 | for (int u = 0; u < node_count; ++u) { 164 | if (is_source(u)) source_excess += excess_at_node(u); 165 | else if (is_target(u)) target_excess += excess_at_node(u); 166 | else assert (excess_at_node(u) == 0); 167 | } 168 | assert(source_excess == - flow_intensity); 169 | assert(target_excess == flow_intensity); 170 | for (int e = 0; e < arc_count; ++e) 171 | assert(!(is_saturated(e) && is_saturated(back_arc(e)))); 172 | #endif 173 | } 174 | 175 | 176 | void advance(){ 177 | if(!is_finished_flag && compute_blocking_flow()){ 178 | augment_all_non_blocked_path(); 179 | is_finished_flag = false; 180 | }else{ 181 | is_finished_flag = true; 182 | } 183 | } 184 | 185 | int get_current_flow_intensity()const{ 186 | return flow_intensity; 187 | } 188 | 189 | int get_num_reachable_nodes()const{ 190 | return num_reachable; 191 | } 192 | 193 | const BitIDFunc&get_saturated_flags()const{ 194 | return is_saturated; 195 | } 196 | 197 | BitIDFunc move_saturated_flags()const{ 198 | return std::move(is_saturated); 199 | } 200 | 201 | BitIDFunc move_reachable_flags() { 202 | return std::move(is_on_same_level_or_lower); 203 | } 204 | 205 | bool is_reachable_from_source(const int u) const { 206 | return is_on_same_level_or_lower(u); 207 | } 208 | 209 | bool is_finished()const{ 210 | return is_finished_flag; 211 | } 212 | 213 | private: 214 | int node_count, arc_count; 215 | const InvTail&inv_tail; 216 | const Head&head; 217 | const BackArc&back_arc; 218 | const SourceList& source_list; 219 | const SourceList& source_front; 220 | 221 | const BitIDFunc& is_source; 222 | BitIDFunc is_target; 223 | BitIDFunc is_saturated; 224 | int flow_intensity; 225 | int num_reachable; 226 | BitIDFunc is_blocked; 227 | 228 | ArrayIDFunc queue; 229 | BitIDFunc was_pushed; 230 | BitIDFunc is_on_same_level_or_lower; 231 | 232 | ArrayIDFunccurrent_path_node; 233 | ArrayIDFunccurrent_path_arc; 234 | 235 | bool is_finished_flag; 236 | }; 237 | } 238 | 239 | #endif 240 | 241 | -------------------------------------------------------------------------------- /src/edmond_karp.h: -------------------------------------------------------------------------------- 1 | #ifndef EDMOND_KARP_H 2 | #define EDMOND_KARP_H 3 | 4 | #include "tiny_id_func.h" 5 | #include "array_id_func.h" 6 | 7 | namespace max_flow{ 8 | template 9 | BitIDFunc compute_maximum_unit_flow_using_edmond_karp( 10 | const InvTail&inv_tail, const Head&head, const BackArc&back_arc, 11 | const Source&source_list, const Target&target_list 12 | ){ 13 | int node_count = head.image_count(); 14 | int arc_count = head.preimage_count(); 15 | 16 | BitIDFunc is_target(node_count); 17 | is_target.fill(false); 18 | for(int i=0; i queue(node_count); 28 | int queue_begin = 0; 29 | int queue_end = 0; 30 | BitIDFunc was_pushed(node_count); 31 | 32 | auto advance_queue_pos = [&](int pos){ 33 | if(pos == node_count-1) 34 | return 0; 35 | else 36 | return pos+1; 37 | }; 38 | 39 | auto push = [&](int x){ 40 | queue[queue_end] = x; 41 | queue_end = advance_queue_pos(queue_end); 42 | was_pushed.set(x, true); 43 | }; 44 | 45 | auto pop = [&]{ 46 | auto x = queue[queue_begin]; 47 | queue_begin = advance_queue_pos(queue_begin); 48 | return x; 49 | }; 50 | 51 | auto clear = [&]{ 52 | queue_begin = 0; 53 | queue_end = 0; 54 | was_pushed.fill(false); 55 | }; 56 | 57 | auto is_empty = [&]{ 58 | return queue_begin == queue_end; 59 | }; 60 | 61 | 62 | struct Pred{ 63 | int node; 64 | int arc; 65 | }; 66 | 67 | ArrayIDFunc pred(node_count); 68 | 69 | BitIDFunc flow(arc_count); 70 | flow.fill(false); 71 | 72 | auto find_augmenting_path = [&](int s){ 73 | clear(); 74 | push(s); 75 | while(!is_empty()){ 76 | auto x = pop(); 77 | for(auto xy:inv_tail(x)){ 78 | if(!flow(xy)){ 79 | int y = head(xy); 80 | if(!was_pushed(y) && !is_source(y)){ 81 | pred[y].node = x; 82 | pred[y].arc = xy; 83 | if(is_target(y)){ 84 | return y; 85 | }else{ 86 | push(y); 87 | } 88 | } 89 | } 90 | } 91 | } 92 | return -1; 93 | }; 94 | 95 | auto augment_flow_along_path = [&](int s, int y){ 96 | while(y != s){ 97 | auto x = pred(y).node; 98 | auto xy = pred(y).arc; 99 | auto yx = back_arc(xy); 100 | 101 | assert(!flow(xy)); 102 | if(flow(yx)) 103 | flow.set(yx, false); 104 | else 105 | flow.set(xy, true); 106 | y = x; 107 | } 108 | }; 109 | 110 | auto check_flow_invariants = [&]{ 111 | #ifndef NDEBUG 112 | for(int i=0; i= 0); 126 | else if(is_target(x)) 127 | assert(surplus <= 0); 128 | else 129 | assert(surplus == 0); 130 | } 131 | 132 | for(int x=0; x 6 | #include 7 | using namespace std; 8 | 9 | void set_autocomplete_command_list(std::vector){} 10 | bool get_command_line(std::string&line){ 11 | for(;;){ 12 | cout << " $ " << flush; 13 | if(getline(cin, line)){ 14 | if(!line.empty()) 15 | return true; 16 | else 17 | continue; 18 | }else 19 | return false; 20 | } 21 | } 22 | 23 | #else 24 | 25 | // link with -lreadline 26 | 27 | #include 28 | #include 29 | #include 30 | #include 31 | #include 32 | 33 | static std::vectorcmd; 34 | 35 | void set_autocomplete_command_list(std::vectorcmd_list_){ 36 | cmd = std::move(cmd_list_); 37 | } 38 | 39 | bool get_command_line(std::string&line){ 40 | const char*x; 41 | do{ 42 | x = readline(" $ "); 43 | if(!x) 44 | return false; 45 | }while(*x == '\0'); 46 | add_history(x); 47 | line = x; 48 | return true; 49 | } 50 | 51 | 52 | static char** my_completion(const char*, int ,int); 53 | static char* my_generator(const char*,int); 54 | static char * dupstr (const char*); 55 | static void *xmalloc (int); 56 | 57 | static char** my_completion( const char * text , int start, int end) 58 | { 59 | (void)end; 60 | if (start == 0) 61 | return rl_completion_matches ((char*)text, &my_generator); 62 | else{ 63 | return nullptr; 64 | } 65 | } 66 | 67 | static char* my_generator(const char* text, int state) 68 | { 69 | static int list_index, len; 70 | 71 | if (!state) { 72 | list_index = -1; 73 | len = strlen (text); 74 | } 75 | 76 | for(++list_index; list_index < (int)cmd.size(); ++list_index){ 77 | if (strncmp (cmd[list_index].c_str(), text, len) == 0){ 78 | return (dupstr(cmd[list_index].c_str())); 79 | } 80 | } 81 | return nullptr; 82 | } 83 | 84 | char* dupstr (const char* s) { 85 | char*r = (char*) xmalloc ((strlen (s) + 1)); 86 | strcpy (r, s); 87 | return r; 88 | } 89 | 90 | void* xmalloc (int size) 91 | { 92 | void *buf; 93 | buf = malloc (size); 94 | if (!buf) { 95 | fprintf (stderr, "Error: Out of memory. Exiting.'n"); 96 | exit (1); 97 | } 98 | return buf; 99 | } 100 | 101 | static 102 | void initialize_readline ()__attribute__((constructor)); 103 | 104 | static 105 | void initialize_readline () 106 | { 107 | rl_attempted_completion_function = my_completion; 108 | } 109 | 110 | #endif 111 | 112 | -------------------------------------------------------------------------------- /src/fancy_input.h: -------------------------------------------------------------------------------- 1 | #ifndef FANCY_INPUT_H 2 | #define FANCY_INPUT_H 3 | 4 | #include 5 | #include 6 | 7 | void set_autocomplete_command_list(std::vectorcmd_list); 8 | bool get_command_line(std::string&line); 9 | 10 | #endif 11 | 12 | -------------------------------------------------------------------------------- /src/file_utility.cpp: -------------------------------------------------------------------------------- 1 | #include "file_utility.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #ifndef _WIN32 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #else 14 | #include 15 | #endif 16 | 17 | std::time_t file_last_modified(const std::string&file_name){ 18 | if(file_name.empty()) 19 | throw std::runtime_error("The empty string is no filename."); 20 | #ifndef _WIN32 21 | struct stat st; 22 | if(stat(file_name.c_str(), &st)){ 23 | if(errno == ENOENT) 24 | throw std::runtime_error("File "+file_name+" not found"); 25 | throw std::runtime_error("Error while reading timestamp of "+file_name+" : "+strerror(errno)); 26 | } 27 | return st.st_mtime; // I have no idea what unit this is, the docs wont tell... 28 | #else 29 | HANDLE file = CreateFile( 30 | file_name.c_str(), 31 | GENERIC_READ, 32 | FILE_SHARE_READ, 33 | NULL, 34 | OPEN_EXISTING, 35 | FILE_ATTRIBUTE_NORMAL, 36 | NULL 37 | ); 38 | if(file == INVALID_HANDLE_VALUE){ 39 | int error_code = GetLastError(); 40 | throw std::runtime_error("CreateFile failed with error code "+std::to_string(error_code)); 41 | }else{ 42 | FILETIME filetime; 43 | int ok = GetFileTime(file, NULL, NULL, &filetime); 44 | int error_code = GetLastError(); 45 | CloseHandle(file); 46 | if(!ok) 47 | throw std::runtime_error("GetFileTime failed with error code "+std::to_string(error_code)); 48 | ULARGE_INTEGER t; 49 | t.LowPart = filetime.dwLowDateTime; 50 | t.HighPart = filetime.dwHighDateTime; 51 | return t.QuadPart/10000; // convert to milliseconds 52 | } 53 | #endif 54 | } 55 | 56 | std::string concat_file_path_and_file_name(std::string path, const std::string&name){ 57 | if(path.empty() || path[path.length()-1] == 58 | #ifndef _WIN32 59 | '/' 60 | #else 61 | '\\' 62 | #endif 63 | ){ 64 | path += name; 65 | }else{ 66 | path += 67 | #ifndef _WIN32 68 | "/" 69 | #else 70 | "\\" 71 | #endif 72 | ; 73 | path += name; 74 | } 75 | return path; 76 | } 77 | 78 | std::string make_absolute_file_name(const std::string&file_name){ 79 | #ifndef _WIN32 80 | char path[PATH_MAX+1]; 81 | if(realpath(file_name.c_str(), path) == nullptr) 82 | throw std::runtime_error(std::string("realpath failed for file \""+file_name+"\" with the error message : ")+strerror(errno)); 83 | return path; 84 | #else 85 | char path[MAX_PATH]; 86 | GetFullPathName(file_name.c_str(), path, MAX_PATH, NULL); 87 | return path; 88 | #endif 89 | } 90 | 91 | std::string get_temp_directory_path(){ 92 | #ifndef _WIN32 93 | const char*cache_tmp_file = getenv ("CACHETMPDIR"); 94 | if(cache_tmp_file) 95 | return cache_tmp_file; 96 | const char*tmp_file = getenv ("TMPDIR"); 97 | if(tmp_file) 98 | return tmp_file; 99 | return "/tmp/"; 100 | #else 101 | char buf[MAX_PATH]; 102 | GetTempPath(buf, MAX_PATH); 103 | return buf; 104 | #endif 105 | } 106 | 107 | std::string uniquely_hash_file_name(const std::string&file_name){ 108 | std::ostringstream out; 109 | for(auto c:file_name) 110 | if(std::isalnum(c)) 111 | out << c; 112 | return out.str(); 113 | } 114 | 115 | bool file_exists(const std::string&file_name){ 116 | #ifndef _WIN32 117 | struct stat buffer; 118 | return stat(file_name.c_str(), &buffer) == 0; 119 | #else 120 | DWORD dwAttrib = GetFileAttributes(szPath); 121 | return (dwAttrib != INVALID_FILE_ATTRIBUTES && !(dwAttrib & FILE_ATTRIBUTE_DIRECTORY)); 122 | #endif 123 | } 124 | 125 | -------------------------------------------------------------------------------- /src/file_utility.h: -------------------------------------------------------------------------------- 1 | #ifndef FILE_UTILITY_H 2 | #define FILE_UTILITY_H 3 | 4 | #include 5 | #include 6 | 7 | std::string concat_file_path_and_file_name(std::string path, const std::string&name); 8 | std::string make_absolute_file_name(const std::string&file_name); 9 | std::string get_temp_directory_path(); 10 | std::string uniquely_hash_file_name(const std::string&file_name); 11 | bool file_exists(const std::string&file_name); 12 | std::time_t file_last_modified(const std::string&file_name); 13 | 14 | #endif 15 | -------------------------------------------------------------------------------- /src/filter.h: -------------------------------------------------------------------------------- 1 | #ifndef FILTER_H 2 | #define FILTER_H 3 | 4 | #include "tiny_id_func.h" 5 | 6 | template 7 | int count_true(const Pred&p){ 8 | int sum = 0; 9 | for(int i=0; i 16 | typename std::enable_if< 17 | is_only_id_func::value, 18 | ArrayIDFunc::type> 19 | >::type keep_if(const Pred&p, int new_preimage_count, const IDFunc&f){ 20 | assert(p.preimage_count() == f.preimage_count()); 21 | assert(new_preimage_count == count_true(p)); 22 | 23 | ArrayIDFunc::type>result(new_preimage_count); 24 | 25 | int out = 0; 26 | for(int in=0; in 34 | typename std::enable_if< 35 | is_id_id_func::value, 36 | ArrayIDIDFunc 37 | >::type keep_if(const Pred&p, int new_preimage_count, const IDFunc&f){ 38 | assert(p.preimage_count() == f.preimage_count()); 39 | assert(new_preimage_count == count_true(p)); 40 | 41 | ArrayIDIDFunc result(new_preimage_count, f.image_count()); 42 | 43 | int out = 0; 44 | for(int in=0; in 52 | ArrayIDIDFunc compute_keep_function(const Pred&pred, int new_image_count){ 53 | ArrayIDIDFunc f(pred.preimage_count(), new_image_count); 54 | int next_id = 0; 55 | for(int i=0; i 64 | ArrayIDIDFunc compute_inverse_keep_function(const Pred&pred, int new_image_count){ 65 | ArrayIDIDFunc f(new_image_count, pred.preimage_count()); 66 | int next_id = 0; 67 | for(int i=0; i 29 | #include 30 | #include 31 | #include 32 | 33 | namespace flow_cutter{ 34 | struct Config{ 35 | """, 36 | 37 | for x in var_names: 38 | print "\t\t"+var_type[x]+" "+x+";" 39 | 40 | print "\n", 41 | 42 | for x in enum_members.keys(): 43 | print "\t\tenum class "+x+"{\n\t\t\t" + ",\n\t\t\t".join(enum_members[x])+"\n\t\t};" 44 | print "\t\t"+x+" "+enum_var_name[x]+";\n" 45 | 46 | 47 | print("\t\tConfig():\n\t\t\t" + 48 | ",\n\t\t\t".join([x + "("+str(var_default_value[x])+")" for x in var_names]) + ",\n\t\t\t" + 49 | ",\n\t\t\t".join([enum_var_name[x]+"("+x+"::"+enum_members[x][0]+")" for x in enum_members.keys()]) + "{}\n") 50 | 51 | print "\t\tvoid set(const std::string&var, const std::string&val){\n\t\t\t", 52 | 53 | #print "int val_id; try{val_id = std::stoi(val);}catch(...){val_id = -1;};\n\t\t\t", 54 | print "int val_id = -1;\n\t\t\t", 55 | 56 | for x in enum_members.keys(): 57 | print "if(var == \""+x+"\" || var == \""+enum_var_name[x]+"\"){\n\t\t\t\t", 58 | for y in enum_members[x]: 59 | print "if(val == \""+y+"\" || val_id == static_cast("+x+"::"+y+")) \n\t\t\t\t\t" + enum_var_name[x] + " = "+x+"::"+y+";\n\t\t\t\telse", 60 | print "throw std::runtime_error(\"Unknown config value \"+val+\" for variable "+x+"; valid are "+", ".join(enum_members[x])+"\");\n", 61 | 62 | print "\t\t\t}else", 63 | 64 | for x in var_names: 65 | print "if(var == \""+x+"\"){\n\t\t\t\t", 66 | if var_requirement[x] != "true": 67 | if var_type[x] == "int": 68 | print "int x = std::stoi(val);\n\t\t\t\t", 69 | elif var_type[x] == "float": 70 | print "float x = std::stof(val);\n\t\t\t\t", 71 | elif var_type[x] == "double": 72 | print "double x = std::stof(val);\n\t\t\t\t", 73 | else: 74 | raise Exception("unsupported type "+var_type[x]) 75 | print "if(!("+var_requirement[x]+"))\n\t\t\t\t\tthrow std::runtime_error(\"Value for \\\""+x+"\\\" must fullfill \\\""+var_requirement[x]+"\\\"\");\n\t\t\t\t", 76 | print x+" = x;", 77 | else: 78 | if var_type[x] == "int": 79 | print x+" = std::stoi(val);", 80 | if var_type[x] == "float": 81 | print x+" = std::stof(val);", 82 | print "\n\t\t\t}else", 83 | 84 | print "throw std::runtime_error(\"Unknown config variable \"+var+\"; valid are "+", ".join(enum_members.keys())+", "+", ".join(var_names)+"\");\n", 85 | print "\t\t}\n", 86 | 87 | print "\t\tstd::string get(const std::string&var)const{\n\t\t\t", 88 | for x in enum_members.keys(): 89 | print "if(var == \""+x+"\" || var == \""+enum_var_name[x]+"\"){\n\t\t\t\t", 90 | 91 | for y in enum_members[x]: 92 | print "if("+enum_var_name[x]+" == "+x+"::"+y+") return \""+y+"\";\n\t\t\t\telse", 93 | print "{assert(false); return \"\";}\n", 94 | 95 | print "\t\t\t}else", 96 | 97 | for x in var_names: 98 | print "if(var == \""+x+"\"){\n\t\t\t\t", 99 | print "return std::to_string("+x+");\n", 100 | print "\t\t\t}else", 101 | 102 | print "throw std::runtime_error(\"Unknown config variable \"+var+\"; valid are "+",".join(enum_members.keys())+", "+", ".join(var_names)+"\");\n", 103 | print "\t\t}\n", 104 | 105 | print("\t\tstd::string get_config()const{\n\t\t\tstd::ostringstream out;\n\t\t\tout\n\t\t\t\t" + 106 | "\n\t\t\t\t".join(["<< std::setw(30) << \""+x+"\" << \" : \" << get(\""+x+"\") << '\\n'" for x in enum_members.keys()]) + "\n\t\t\t\t" + 107 | "\n\t\t\t\t".join(["<< std::setw(30) << \""+x+"\" << \" : \" << get(\""+x+"\") << '\\n'" for x in var_names]) + 108 | ";\n\t\t\treturn out.str();\n\t\t}" 109 | ) 110 | 111 | print """ 112 | }; 113 | } 114 | #endif 115 | """, 116 | -------------------------------------------------------------------------------- /src/flow_cutter_config.h.template: -------------------------------------------------------------------------------- 1 | PierceRating pierce_rating max_target_minus_source_hop_dist min_source_hop_dist max_target_hop_dist max_target_minus_source_weight_dist min_source_weight_dist max_target_weight_dist random oldest max_arc_weight min_arc_weight circular_hop circular_weight max_target_minus_source_hop_dist_with_source_dist_tie_break max_target_minus_source_hop_dist_with_closer_dist_tie_break 2 | AvoidAugmentingPath avoid_augmenting_path avoid_and_pick_best do_not_avoid avoid_and_pick_oldest avoid_and_pick_random 3 | GraphSearchAlgorithm graph_search_algorithm pseudo_depth_first_search breadth_first_search depth_first_search 4 | SkipNonMaximumSides skip_non_maximum_sides skip no_skip 5 | DumpState dump_state no yes 6 | ReportCuts report_cuts yes no 7 | BulkDistance bulk_distance no yes 8 | SeparatorSelection separator_selection node_min_expansion edge_min_expansion node_first edge_first 9 | var int cutter_count x>0 3 10 | var int random_seed true 5489 11 | var int source x>=-1 -1 12 | var int target x>=-1 -1 13 | var int thread_count x>=1 1 14 | var int max_cut_size x>=1 1000 15 | var float max_imbalance 0.5>=x&&x>=0.0 0.2 16 | var int branch_factor x>=1 5 17 | var double chunk_size 0.5>=x&&x>=0.0 0.1 18 | var double bulk_distance_factor 0.5>x&&x>0.0 0.05 19 | var double bulk_assimilation_threshold 0.5>x&&x>0.0 0.4 20 | var double bulk_assimilation_order_threshold 0.5>x&&x>0.0 0.25 21 | var double initial_assimilated_fraction 0.5>x&&x>0.0 0.05 22 | var double bulk_step_fraction 0.5>x&&x>0.0 0.05 23 | var int geo_pos_ordering_cutter_count x>0 8 24 | var int distance_ordering_cutter_count x>=0 0 25 | -------------------------------------------------------------------------------- /src/flow_cutter_dinic.h: -------------------------------------------------------------------------------- 1 | #ifndef FLOW_CUTTER_DINIC_H 2 | #define FLOW_CUTTER_DINIC_H 3 | 4 | #include "tiny_id_func.h" 5 | #include "array_id_func.h" 6 | 7 | namespace flow_cutter_accelerated{ 8 | 9 | class UnitDinicAlgo{ 10 | public: 11 | template 12 | explicit UnitDinicAlgo( 13 | const Graph& graph 14 | ): 15 | node_count(graph.node_count()), arc_count(graph.arc_count()), 16 | is_blocked(arc_count), 17 | queue(node_count), 18 | is_on_same_level_or_lower(node_count), current_path_node(node_count), current_path_arc(node_count-1){ 19 | 20 | flow_intensity = 0; 21 | 22 | is_finished_flag = false; 23 | } 24 | 25 | private: 26 | template 27 | bool compute_blocking_flow( 28 | const Graph& graph, 29 | const SourceList& source_list, 30 | const SourceSet& source_set, 31 | const TargetSet& target_set, 32 | SourceReachable& source_reachable, 33 | const IsSaturated& is_saturated){ 34 | assert(!source_list.empty()); 35 | auto is_target = [&](int x) { 36 | return target_set.is_inside(x); 37 | }; 38 | bool target_reachable = false; 39 | is_blocked.fill(false); 40 | is_on_same_level_or_lower.fill(false); 41 | source_reachable.reset(source_set, false); 42 | int queue_begin = 0; 43 | int queue_end = source_list.size(); 44 | for(size_t i=0; i 86 | int find_first_non_block_out_arc(const Graph& graph, int x)const{ 87 | for(int xy : graph.out_arc(x)) 88 | if(!is_blocked(xy)) 89 | return xy; 90 | return -1; 91 | } 92 | 93 | template 94 | int augment_all_non_blocked_path(const Graph& graph, const SourceList& source_list, const SourceSet& source_set, const TargetSet& target_set, IsSaturated is_saturated, IncreaseFlow& increase_flow){ 95 | (void)source_set; 96 | (void)is_saturated; 97 | auto is_target = [&](int x) { 98 | return target_set.is_inside(x); 99 | }; 100 | int augmented_intensity = 0; 101 | for(size_t i=0; i 144 | int advance( 145 | const Graph& graph, 146 | const SourceList& source_list, 147 | const SourceSet& source_set, 148 | const TargetSet& target_set, 149 | SourceReachable& source_reachable, 150 | const IsSaturated& is_saturated, 151 | const IncreaseFlow& increase_flow 152 | ){ 153 | if(!is_finished_flag && compute_blocking_flow(graph, source_list, source_set, target_set, source_reachable, is_saturated)){ 154 | int augmented_intensity = augment_all_non_blocked_path(graph, source_list, source_set, target_set, is_saturated, increase_flow); 155 | is_finished_flag = false; 156 | return augmented_intensity; 157 | }else{ 158 | is_finished_flag = true; 159 | return 0; 160 | } 161 | } 162 | 163 | int get_current_flow_intensity()const{ 164 | return flow_intensity; 165 | } 166 | 167 | bool is_finished()const{ 168 | return is_finished_flag; 169 | } 170 | 171 | template 172 | int run(const Graph& graph, 173 | const SourceSet& source_set, 174 | const TargetSet& target_set, 175 | SourceReachable& source_reachable, 176 | const IsSaturated& is_saturated, 177 | const IncreaseFlow& increase_flow) { 178 | assert(!source_set.get_extra_nodes().empty()); 179 | flow_intensity = 0; 180 | is_finished_flag = false; 181 | do { 182 | advance(graph, source_set.get_extra_nodes(), source_set, target_set, source_reachable, is_saturated, increase_flow); 183 | } while (!is_finished()); 184 | return get_current_flow_intensity(); 185 | } 186 | 187 | private: 188 | int node_count, arc_count; 189 | 190 | int flow_intensity; 191 | BitIDFunc is_blocked; 192 | 193 | ArrayIDFunc queue; 194 | BitIDFunc is_on_same_level_or_lower; 195 | 196 | ArrayIDFunccurrent_path_node; 197 | ArrayIDFunccurrent_path_arc; 198 | 199 | bool is_finished_flag; 200 | }; 201 | 202 | } 203 | 204 | #endif 205 | 206 | -------------------------------------------------------------------------------- /src/ford_fulkerson.h: -------------------------------------------------------------------------------- 1 | #ifndef FORD_FULKERSON_H 2 | #define FORD_FULKERSON_H 3 | 4 | #include "tiny_id_func.h" 5 | #include "array_id_func.h" 6 | #include 7 | 8 | namespace max_flow{ 9 | 10 | template 11 | class FordFulkersonAlgo{ 12 | public: 13 | FordFulkersonAlgo( 14 | const Tail&tail, 15 | const InvTail&inv_tail, const Head&head, const BackArc&back_arc, 16 | const SourceList&source_list, const TargetList&target_list, 17 | const BitIDFunc& is_source, const SourceList& source_front 18 | ): 19 | node_count(head.image_count()), arc_count(head.preimage_count()), 20 | tail(tail), inv_tail(inv_tail), head(head), back_arc(back_arc), 21 | source_list(source_list), source_front(source_front), 22 | is_source(is_source), is_target(node_count), 23 | is_saturated(arc_count), 24 | stack(node_count), predecessor(node_count), is_reachable(node_count) 25 | { 26 | 27 | is_target.fill(false); 28 | for(int i=0; i(is_saturated(uv)); 86 | excess += static_cast(is_saturated(back_arc(uv))); 87 | } 88 | return excess; 89 | } 90 | public: 91 | 92 | void verify_flow_is_maximum() { 93 | #ifndef NDEBUG 94 | BitIDFunc assert_reachable(node_count); 95 | assert_reachable = is_source; 96 | int old_num_reachable = num_reachable; 97 | num_reachable = source_list.preimage_count(); 98 | for (int u : source_list) { 99 | int target_hit = search_from(u, assert_reachable); //technically not fine, but search_from looks correct 100 | assert(target_hit == -1); 101 | } 102 | for (int u = 0; u < node_count; ++u) 103 | assert(assert_reachable(u) == is_reachable(u)); 104 | assert(num_reachable == old_num_reachable); 105 | verify_flow_conservation(); 106 | #endif 107 | } 108 | 109 | void verify_flow_conservation() { 110 | #ifndef NDEBUG 111 | int source_excess = 0; 112 | int target_excess = 0; 113 | for (int u = 0; u < node_count; ++u) { 114 | if (is_source(u)) source_excess += excess_at_node(u); 115 | else if (is_target(u)) target_excess += excess_at_node(u); 116 | else assert (excess_at_node(u) == 0); 117 | } 118 | assert(source_excess == - flow_intensity); 119 | assert(target_excess == flow_intensity); 120 | for (int e = 0; e < arc_count; ++e) 121 | assert(!(is_saturated(e) && is_saturated(back_arc(e)))); 122 | #endif 123 | } 124 | 125 | void advance(){ 126 | is_reachable = is_source; 127 | num_reachable = source_list.preimage_count(); 128 | 129 | for (int i = last_source; i < source_front.preimage_count(); ++i) { 130 | if (augment_flow_from(source_front(i))) { 131 | last_source = i; 132 | return; 133 | } 134 | } 135 | 136 | for (int i = 0; i < last_source; ++i) { 137 | if (augment_flow_from(source_front(i))) { 138 | assert(false); 139 | last_source = i; 140 | return; 141 | } 142 | } 143 | 144 | is_finished_flag = true; 145 | } 146 | 147 | int get_current_flow_intensity()const{ 148 | return flow_intensity; 149 | } 150 | 151 | int get_num_reachable_nodes() const { 152 | return num_reachable; 153 | } 154 | 155 | const BitIDFunc&get_saturated_flags()const{ 156 | return is_saturated; 157 | } 158 | 159 | BitIDFunc move_saturated_flags()const{ 160 | return std::move(is_saturated); 161 | } 162 | 163 | BitIDFunc move_reachable_flags() { 164 | return std::move(is_reachable); 165 | } 166 | 167 | bool is_reachable_from_source(const int u) const { 168 | return is_reachable(u); 169 | } 170 | 171 | bool is_finished()const{ 172 | return is_finished_flag; 173 | } 174 | 175 | private: 176 | int node_count, arc_count; 177 | const Tail&tail; 178 | const InvTail&inv_tail; 179 | const Head&head; 180 | const BackArc&back_arc; 181 | const SourceList& source_list; 182 | const SourceList& source_front; 183 | 184 | const BitIDFunc& is_source; 185 | BitIDFunc is_target; 186 | BitIDFunc is_saturated; 187 | ArrayIDFunc stack; 188 | ArrayIDFunc predecessor; 189 | BitIDFunc is_reachable; 190 | int last_source; 191 | 192 | int flow_intensity; 193 | int num_reachable; 194 | bool is_finished_flag; 195 | }; 196 | } 197 | 198 | #endif 199 | -------------------------------------------------------------------------------- /src/geo_pos.cpp: -------------------------------------------------------------------------------- 1 | #include "geo_pos.h" 2 | #include "io_helper.h" 3 | #include "tiny_id_func.h" 4 | #include "union_find.h" 5 | #include 6 | #include 7 | #include 8 | 9 | 10 | 11 | std::vector order_geo_positions_along_line(const std::vector&pos){ 12 | int point_count = pos.size(); 13 | 14 | if(point_count <= 1) 15 | return pos; 16 | 17 | struct P{ 18 | int p, q; 19 | double dist; 20 | }; 21 | 22 | std::vector

candidate_segments; 23 | for(int i=0; ineighbors(point_count); 39 | UnionFind are_connected(point_count); 40 | 41 | int segment_count = 0; 42 | for(auto s:candidate_segments){ 43 | if(!neighbors[s.p].is_full() && !neighbors[s.q].is_full() && !are_connected.in_same(s.p, s.q)){ 44 | neighbors[s.p].add(s.q); 45 | neighbors[s.q].add(s.p); 46 | are_connected.unite(s.q, s.p); 47 | ++segment_count; 48 | if(segment_count == point_count - 1) 49 | break; 50 | } 51 | } 52 | 53 | 54 | int x = -1; 55 | for(int i=0; inew_pos(point_count); 64 | 65 | int new_id = 0; 66 | 67 | for(;;){ 68 | new_pos[new_id++] = pos[x]; 69 | if(y == -1) 70 | break; 71 | int z = neighbors[y].other(x); 72 | x = y; 73 | y = z; 74 | } 75 | 76 | return new_pos; // NVRO 77 | } 78 | 79 | 80 | 81 | 82 | constexpr int dimacs_scale = 1000000; 83 | 84 | static void save_binary_geo_pos_impl(std::ostream&out, const ArrayIDFunc&geo_pos){ 85 | int s = geo_pos.preimage_count(); 86 | 87 | out 88 | .write((const char*)&s, sizeof(s)) 89 | .write((const char*)geo_pos.begin(), s*sizeof(geo_pos[0])); 90 | } 91 | 92 | static void save_dimacs_geo_pos_impl(std::ostream&out, const ArrayIDFunc&geo_pos){ 93 | out << "p aux sp co "<< geo_pos.preimage_count() << '\n'; 94 | for(int i=0; i load_binary_geo_pos_impl(std::istream&in, long long size){ 99 | int s; 100 | if(!in.read((char*)&s, sizeof(s))) 101 | throw std::runtime_error("Could not read number of geo coordinates"); 102 | if(size != static_cast(sizeof(GeoPos))*s + static_cast(sizeof(s))) 103 | throw std::runtime_error("Binary GeoPos file has an invalid size"); 104 | 105 | ArrayIDFuncf(s); 106 | if(!in.read((char*)f.begin(), sizeof(GeoPos)*s)) 107 | throw std::runtime_error("Could not read GeoPos data"); 108 | return f; // NVRO 109 | } 110 | 111 | static ArrayIDFunc load_dimacs_geo_pos_impl(std::istream&in){ 112 | 113 | ArrayIDFuncgeo_pos; 114 | BitIDFunc seen; 115 | bool seen_header = false; 116 | int seen_count = 0; 117 | 118 | std::string line; 119 | int line_num = 0; 120 | while(std::getline(in, line)){ 121 | ++line_num; 122 | if(line.empty() || line[0] == 'c') 123 | continue; 124 | if(!seen_header){ 125 | std::istringstream l(line); 126 | std::string b; 127 | if(!(l>>b) || b != "p" || !(l>>b) || b != "aux" || !(l>>b) || b != "sp" || !(l>>b) || b != "co") 128 | throw std::runtime_error("Header broken in line "+std::to_string(line_num)); 129 | 130 | int s; 131 | if(!(l>>s)) 132 | throw std::runtime_error("Header missing size"); 133 | 134 | if(l >> b) 135 | throw std::runtime_error("The header in line "+std::to_string(line_num)+" contains extra charachters at the end of the line"); 136 | 137 | geo_pos = ArrayIDFunc(s); 138 | seen = BitIDFunc(s); 139 | seen.fill(false); 140 | seen_header = true; 141 | }else{ 142 | std::istringstream l(line); 143 | std::string v; 144 | if(!(l>>v) || v != "v") 145 | throw std::runtime_error("Line "+std::to_string(line_num)+" is broken"); 146 | 147 | long long node, lon, lat; 148 | if(!(l >> node >> lon >> lat)) 149 | throw std::runtime_error("Could not read the data in line "+std::to_string(line_num)); 150 | 151 | if(l >> v) 152 | throw std::runtime_error("Line "+std::to_string(line_num)+" contains extra charachters at the end of the line"); 153 | --node; 154 | if(seen(node)) 155 | throw std::runtime_error("Node "+std::to_string(node)+" has a second pair of coordinates in line "+std::to_string(line_num)); 156 | seen.set(node, true); 157 | ++seen_count; 158 | geo_pos[node].lon = static_cast(lon) / dimacs_scale; 159 | geo_pos[node].lat = static_cast(lat) / dimacs_scale; 160 | } 161 | } 162 | 163 | if(!seen_header) 164 | throw std::runtime_error("File is missing header"); 165 | if(seen_count != geo_pos.preimage_count()) 166 | throw std::runtime_error("Not every node has a coordinate"); 167 | return geo_pos; // NVRO 168 | } 169 | 170 | 171 | ArrayIDFuncload_binary_geo_pos(const std::string&file_name){ 172 | return load_binary_file(file_name, load_binary_geo_pos_impl); 173 | } 174 | 175 | void save_binary_geo_pos(const std::string&file_name, const ArrayIDFunc&geo_pos){ 176 | save_binary_file(file_name, save_binary_geo_pos_impl, geo_pos); 177 | } 178 | 179 | void save_dimacs_geo_pos(const std::string&file_name, const ArrayIDFunc&geo_pos){ 180 | save_text_file(file_name, save_dimacs_geo_pos_impl, geo_pos); 181 | } 182 | 183 | ArrayIDFuncuncached_load_dimacs_geo_pos(const std::string&file_name){ 184 | return load_uncached_text_file(file_name, load_dimacs_geo_pos_impl); 185 | } 186 | 187 | ArrayIDFuncload_dimacs_geo_pos(const std::string&file_name){ 188 | return load_cached_text_file(file_name, "dimacs_coord", load_dimacs_geo_pos_impl, load_binary_geo_pos_impl, save_binary_geo_pos_impl); 189 | } 190 | 191 | -------------------------------------------------------------------------------- /src/geo_pos.h: -------------------------------------------------------------------------------- 1 | #ifndef GEO_POS_H 2 | #define GEO_POS_H 3 | 4 | #include 5 | #include 6 | 7 | struct GeoPos{ 8 | double lat, lon; 9 | }; 10 | 11 | //! Returns the distance between two latlon in meters. 12 | //! 13 | //! Code adapted from http://www.movable-type.co.uk/scripts/latlong.html 14 | inline 15 | double geo_dist(GeoPos a, GeoPos b){ 16 | 17 | const double pi = 3.14159265359; 18 | const double R = 6371000.0; 19 | 20 | a.lat /= 180; 21 | a.lat *= pi; 22 | b.lat /= 180; 23 | b.lat *= pi; 24 | a.lon /= 180; 25 | a.lon *= pi; 26 | b.lon /= 180; 27 | b.lon *= pi; 28 | 29 | double dlat = b.lat - a.lat; 30 | double dlon = b.lon - a.lon; 31 | 32 | 33 | double a_ = sin(dlat/2.0) * sin(dlat/2.0) + sin(dlon/2.0) * sin(dlon/2.0) * cos(a.lat) * cos(b.lat); 34 | double c = 2 * atan2(sqrt(a_), sqrt(1-a_)); 35 | return R * c; 36 | 37 | /*var R = 6371; // km 38 | var dLat = (b.lat-a.lat).toRad(); 39 | var dLon = (b.lon-a.lon).toRad(); 40 | var a.lat = a.lat.toRad(); 41 | var b.lat = b.lat.toRad(); 42 | 43 | var a = Math.sin(dLat/2) * Math.sin(dLat/2) + 44 | Math.sin(dLon/2) * Math.sin(dLon/2) * Math.cos(a.lat) * Math.cos(b.lat); 45 | var c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1-a)); 46 | var d = R * c;*/ 47 | 48 | } 49 | 50 | inline GeoPos mid_geo_pos(GeoPos a, GeoPos b){ 51 | return GeoPos {(a.lat+b.lat)/2, (a.lon+b.lon)/2}; 52 | } 53 | 54 | std::vector order_geo_positions_along_line(const std::vector&pos); 55 | 56 | 57 | #include "array_id_func.h" 58 | #include 59 | 60 | ArrayIDFuncload_binary_geo_pos(const std::string&file_name); 61 | void save_binary_geo_pos(const std::string&file_name, const ArrayIDFunc&geo_pos); 62 | void save_dimacs_geo_pos(const std::string&file_name, const ArrayIDFunc&geo_pos); 63 | ArrayIDFuncuncached_load_dimacs_geo_pos(const std::string&file_name); 64 | ArrayIDFuncload_dimacs_geo_pos(const std::string&file_name); 65 | 66 | #endif 67 | -------------------------------------------------------------------------------- /src/histogram.h: -------------------------------------------------------------------------------- 1 | #ifndef HISTOGRAM_H 2 | #define HISTOGRAM_H 3 | 4 | #include "id_func_traits.h" 5 | #include "array_id_func.h" 6 | #include 7 | 8 | template 9 | typename std::enable_if< 10 | is_id_id_func::value, 11 | ArrayIDFunc 12 | >::type compute_histogram(const IDIDFunc&f){ 13 | ArrayIDFunch(f.image_count()); 14 | h.fill(0); 15 | for(int i=0; i 21 | typename std::enable_if< 22 | is_id_func::value, 23 | int 24 | >::type max_histogram_id(const IDFunc&h){ 25 | assert(h.preimage_count() != 0); 26 | 27 | typename id_func_image_type::type max_element = h(0); 28 | int max_id = 0; 29 | for(int i=1; i 44 | typename std::enable_if< 45 | is_id_func::value, 46 | int 47 | >::type min_histogram_id(const IDFunc&h){ 48 | assert(h.preimage_count() != 0); 49 | 50 | typename id_func_image_type::type min_element = h(0); 51 | int min_id = 0; 52 | for(int i=1; i element){ 55 | min_element = element; 56 | min_id = i; 57 | } 58 | } 59 | #ifndef NDEBUG 60 | for(int i=0; i= h(min_id)); 62 | #endif 63 | return min_id; 64 | } 65 | 66 | #endif 67 | -------------------------------------------------------------------------------- /src/id_func.h: -------------------------------------------------------------------------------- 1 | #ifndef ID_FUNC_H 2 | #define ID_FUNC_H 3 | 4 | #include 5 | #include 6 | #include "id_func_traits.h" 7 | #include 8 | 9 | template 10 | struct LambdaIDFunc{ 11 | int preimage_count()const{return preimage_count_;} 12 | 13 | typename id_func_image_type::type operator()(int id)const{ 14 | assert(0 <= id && id <= preimage_count_ && "id out of bounds"); 15 | return func_(id); 16 | } 17 | 18 | int preimage_count_; 19 | Func func_; 20 | }; 21 | 22 | template 23 | typename std::enable_if< 24 | has_int_call_operator::value, 25 | LambdaIDFunc 26 | >::type id_func(int preimage_count, Func func){ 27 | return {preimage_count, std::move(func)}; 28 | } 29 | 30 | template 31 | struct LambdaIDIDFunc{ 32 | static_assert(std::is_same::type>::value, "IDIDFunc must return int"); 33 | 34 | int preimage_count()const{return id_func_.preimage_count();} 35 | int image_count()const{return image_count_;} 36 | 37 | int operator()(int preimage)const{ 38 | assert(0 <= preimage && preimage <= preimage_count() && "preimage out of bounds"); 39 | int image = id_func_(preimage); 40 | assert(0 <= image && image <= image_count() && "image out of bounds"); 41 | return image; 42 | } 43 | 44 | int image_count_; 45 | IDFunc id_func_; 46 | 47 | }; 48 | 49 | template 50 | typename std::enable_if< 51 | is_id_func::value, 52 | LambdaIDIDFunc 53 | >::type id_id_func(int image_count, IDFunc func){ 54 | return {image_count, std::move(func)}; 55 | } 56 | 57 | template 58 | typename std::enable_if< 59 | has_int_call_operator::value, 60 | LambdaIDIDFunc> 61 | >::type id_id_func(int preimage_count, int image_count, Func func){ 62 | return {image_count, id_func(preimage_count, std::move(func))}; 63 | } 64 | 65 | struct IdentityIDIDFunc{ 66 | IdentityIDIDFunc(int image_count) : image_count_(image_count) {} 67 | 68 | int preimage_count() const {return image_count_;} 69 | int image_count() const {return image_count_;} 70 | int operator()(int preimage) const { 71 | assert(0 <= preimage && preimage <= preimage_count() && "preimage out of bounds"); 72 | return preimage; 73 | } 74 | int image_count_; 75 | }; 76 | 77 | template 78 | struct ConstIntIDFunc{ 79 | explicit ConstIntIDFunc(int preimage_count):preimage_count_(preimage_count){} 80 | 81 | int preimage_count()const{ 82 | return preimage_count_; 83 | } 84 | 85 | int operator()(int)const{ 86 | return value; 87 | } 88 | 89 | int preimage_count_; 90 | }; 91 | 92 | template 93 | class ConstRefIDIDFunc{ 94 | public: 95 | ConstRefIDIDFunc():ptr(0){} 96 | ConstRefIDIDFunc(const IDIDFunc&x):ptr(&x){} 97 | 98 | int preimage_count()const{ return ptr->preimage_count(); } 99 | int image_count()const{return ptr->image_count(); } 100 | int operator()(int x)const{return (*ptr)(x);} 101 | 102 | private: 103 | const IDIDFunc*ptr; 104 | }; 105 | 106 | template 107 | ConstRefIDIDFuncmake_const_ref_id_id_func(const IDIDFunc&f){ 108 | return {f}; 109 | } 110 | 111 | template 112 | class ConstRefIDFunc{ 113 | public: 114 | ConstRefIDFunc():ptr(0){} 115 | ConstRefIDFunc(const IDFunc&x):ptr(&x){} 116 | 117 | int preimage_count()const{ return ptr->preimage_count(); } 118 | decltype(std::declval()(0)) operator()(int x)const{return (*ptr)(x);} 119 | 120 | private: 121 | const IDFunc*ptr; 122 | }; 123 | 124 | template 125 | ConstRefIDFuncmake_const_ref_id_func(const IDFunc&f){ 126 | return {f}; 127 | } 128 | 129 | 130 | #endif 131 | 132 | -------------------------------------------------------------------------------- /src/id_func_traits.h: -------------------------------------------------------------------------------- 1 | #ifndef ID_FUNC_TRAITS_H 2 | #define ID_FUNC_TRAITS_H 3 | 4 | #include 5 | 6 | template 7 | struct id_func_image_type{ 8 | typedef typename std::decay()(0))>::type type; 9 | }; 10 | 11 | #define MAKE_TYPED_HAS_TRAIT(HAS, TYPE, EXPR)\ 12 | templatestd::false_type HAS##_impl2(...);\ 13 | templatestd::true_type HAS##_impl2(typename std::decay::type*);\ 14 | templateauto HAS##_impl1()->decltype(HAS##_impl2(static_cast(nullptr)));\ 15 | templatestruct HAS : std::enable_if())>::type{}; 16 | 17 | #define MAKE_UNTYPED_HAS_TRAIT(HAS, EXPR)\ 18 | templatestd::false_type HAS##_impl2(...);\ 19 | templatestd::true_type HAS##_impl2(typename std::decay::type*);\ 20 | templateauto HAS##_impl1()->decltype(HAS##_impl2(nullptr));\ 21 | templatestruct HAS : std::enable_if())>::type{}; 22 | 23 | #define F std::declval() 24 | #define IT typename id_func_image_type::type 25 | #define I std::declval() 26 | 27 | // F is an instance of the function, FT is the function type, I is an instance of the function's image type and IT is the image type 28 | 29 | MAKE_TYPED_HAS_TRAIT (has_preimage_count, int, F.preimage_count() ) 30 | MAKE_TYPED_HAS_TRAIT (has_image_count, int, F.image_count() ) 31 | MAKE_UNTYPED_HAS_TRAIT(has_int_call_operator, F(0) ) 32 | MAKE_TYPED_HAS_TRAIT (has_int_int_call_operator, int, F(0) ) 33 | MAKE_TYPED_HAS_TRAIT (has_set, void, F.set(0, I) ) 34 | MAKE_TYPED_HAS_TRAIT (has_fill, void, F.fill(I) ) 35 | MAKE_TYPED_HAS_TRAIT (has_move, IT, F.move(0) ) 36 | MAKE_TYPED_HAS_TRAIT (has_set_image_count, void, F.set_image_count(0) ) 37 | 38 | #define MAKE_BOOL_TRAIT(NAME, EXPR)\ 39 | template struct NAME : std::integral_constant{}; 40 | 41 | MAKE_BOOL_TRAIT(is_id_func, 42 | has_preimage_count::value 43 | && has_int_call_operator::value 44 | ) 45 | MAKE_BOOL_TRAIT(is_id_id_func, 46 | has_preimage_count::value 47 | && has_image_count::value && 48 | has_int_int_call_operator::value 49 | ) 50 | MAKE_BOOL_TRAIT(is_only_id_func, 51 | !is_id_id_func::value 52 | && is_id_func::value 53 | ) 54 | MAKE_BOOL_TRAIT(is_mutable_id_func, 55 | is_id_func::value 56 | && has_set::value 57 | && has_fill::value 58 | && has_move::value 59 | ) 60 | MAKE_BOOL_TRAIT(is_mutable_id_id_func, 61 | is_id_id_func::value 62 | && has_set::value 63 | && has_fill::value 64 | && has_move::value 65 | && has_set_image_count::value 66 | ) 67 | 68 | #undef F 69 | #undef IT 70 | #undef I 71 | #undef MAKE_TYPED_HAS_TRAIT 72 | #undef MAKE_UNTYPED_HAS_TRAIT 73 | #undef MAKE_BOOL_TRAIT 74 | 75 | #define REQUIRES(...) \ 76 | typename std::enable_if< __VA_ARGS__ :: value, int >::type = 0 77 | 78 | #endif 79 | -------------------------------------------------------------------------------- /src/id_multi_func.h: -------------------------------------------------------------------------------- 1 | #ifndef ID_MULTI_FUNC_H 2 | #define ID_MULTI_FUNC_H 3 | 4 | #include "array_id_func.h" 5 | #include "count_range.h" 6 | #include "range.h" 7 | #include "chain.h" 8 | #include 9 | 10 | struct RangeIDIDMultiFunc{ 11 | int preimage_count()const{ return range_begin.preimage_count()-1; } 12 | int image_count()const{ return range_begin(preimage_count()); } 13 | 14 | CountRange operator()(int id)const{ 15 | assert(0 <= id && id < preimage_count() && "id out of bounds"); 16 | return count_range(range_begin(id), range_begin(id+1)); 17 | } 18 | 19 | ArrayIDFuncrange_begin; 20 | }; 21 | 22 | 23 | template 24 | struct ArrayIDMultiFunc{ 25 | int preimage_count()const{ return preimage_to_intermediate.preimage_count(); } 26 | 27 | Rangeoperator()(int id){ 28 | assert(0 <= id && id < preimage_count() && "id out of bounds"); 29 | return { 30 | intermediate_to_image.begin() + *std::begin(preimage_to_intermediate(id)), 31 | intermediate_to_image.begin() + *std::end(preimage_to_intermediate(id)) 32 | }; 33 | } 34 | 35 | Rangeoperator()(int id)const{ 36 | assert(0 <= id && id < preimage_count() && "id out of bounds"); 37 | return { 38 | intermediate_to_image.begin() + *std::begin(preimage_to_intermediate(id)), 39 | intermediate_to_image.begin() + *std::end(preimage_to_intermediate(id)) 40 | }; 41 | } 42 | 43 | RangeIDIDMultiFunc preimage_to_intermediate; 44 | ArrayIDFuncintermediate_to_image; 45 | }; 46 | 47 | 48 | struct ArrayIDIDMultiFunc{ 49 | int image_count()const{ return intermediate_to_image.image_count(); } 50 | int preimage_count()const{ return preimage_to_intermediate.preimage_count(); } 51 | 52 | Rangeoperator()(int id){ 53 | assert(0 <= id && id < preimage_count() && "id out of bounds"); 54 | return { 55 | intermediate_to_image.begin() + *std::begin(preimage_to_intermediate(id)), 56 | intermediate_to_image.begin() + *std::end(preimage_to_intermediate(id)) 57 | }; 58 | } 59 | 60 | Rangeoperator()(int id)const{ 61 | assert(0 <= id && id < preimage_count() && "id out of bounds"); 62 | return { 63 | intermediate_to_image.begin() + *std::begin(preimage_to_intermediate(id)), 64 | intermediate_to_image.begin() + *std::end(preimage_to_intermediate(id)) 65 | }; 66 | } 67 | 68 | RangeIDIDMultiFunc preimage_to_intermediate; 69 | ArrayIDIDFunc intermediate_to_image; 70 | }; 71 | 72 | //! Inverts an id-id function f. In this context we have two ID types: preimage IDs 73 | //! and image IDs. f maps preimage IDs onto image IDs. This function computes a 74 | //! id-id multi function g that maps image IDs onto preimage ID ranges. 75 | //! g(x) is a range of all y such that f(y) = x ordered increasing by y. 76 | template 77 | ArrayIDIDMultiFunc invert_id_id_func(const IDIDFunc&f){ 78 | ArrayIDIDMultiFunc g = { 79 | RangeIDIDMultiFunc{ 80 | ArrayIDFunc{f.image_count()+1} 81 | }, 82 | ArrayIDIDFunc{f.preimage_count(), f.preimage_count()} 83 | }; 84 | 85 | auto&begin = g.preimage_to_intermediate.range_begin; 86 | begin.fill(0); 87 | 88 | for(int i=0; i 115 | RangeIDIDMultiFunc invert_sorted_id_id_func(const IDIDFunc&f){ 116 | assert(std::is_sorted(f.begin(), f.end()) && "f is not sorted"); 117 | 118 | RangeIDIDMultiFunc h = {ArrayIDFunc{f.image_count()+1}}; 119 | 120 | auto&begin = h.range_begin; 121 | begin.fill(0); 122 | 123 | for(int i=0; i 139 | ArrayIDIDMultiFunc compute_successor_function(const Tail&tail, const Head&head){ 140 | auto x = invert_id_id_func(tail); 141 | x.intermediate_to_image = chain(x.intermediate_to_image, head); 142 | return x; // NRVO 143 | } 144 | 145 | #endif 146 | 147 | -------------------------------------------------------------------------------- /src/id_sort.h: -------------------------------------------------------------------------------- 1 | #ifndef ID_SORT_H 2 | #define ID_SORT_H 3 | 4 | #include "array_id_func.h" 5 | #include 6 | 7 | template 8 | void stable_sort_copy_by_id( 9 | InIter in_begin, InIter in_end, 10 | OutIter out_iter, 11 | int id_count, const GetID&get_id 12 | ){ 13 | ArrayIDFuncpos(id_count); 14 | pos.fill(0); 15 | for(InIter i=in_begin; i!=in_end; ++i) 16 | ++pos[get_id(*i)]; 17 | 18 | int sum = 0; 19 | for(int i=0; i 30 | void stable_sort_copy_by_id( 31 | InIter in_begin, InIter in_end, 32 | OutIter out_iter, 33 | const GetID&get_id 34 | ){ 35 | stable_sort_copy_by_id(in_begin, in_end, out_iter, get_id.image_count(), get_id); 36 | } 37 | 38 | #endif 39 | 40 | -------------------------------------------------------------------------------- /src/id_string.h: -------------------------------------------------------------------------------- 1 | #ifndef ID_STRING_H 2 | #define ID_STRING_H 3 | 4 | #include 5 | #include 6 | #include "id_func_traits.h" 7 | 8 | template 9 | void forall_in_id_string(const std::string&str, const F&f){ 10 | auto str_begin = str.begin(), str_end = str.end(), str_pos = str_begin; 11 | 12 | int id_begin = -1; 13 | int current_id = 0; 14 | bool seen_a_digit = false; 15 | for(;;){ 16 | if(str_pos != str_end && (*str_pos == ':' || *str_pos == '-')){ 17 | if(!seen_a_digit) 18 | throw std::runtime_error("integer missing at offset "+std::to_string(str_pos - str_begin + 1)); 19 | } 20 | if(str_pos == str_end || *str_pos == ':'){ 21 | if(id_begin == -1) 22 | f(current_id); 23 | else{ 24 | if(id_begin > current_id) 25 | throw std::runtime_error("id range "+std::to_string(id_begin)+"-"+std::to_string(current_id)+" is invalid"); 26 | for(int i=id_begin; i<=current_id; ++i) 27 | f(i); 28 | } 29 | if(str_pos == str_end) 30 | break; 31 | current_id = 0; 32 | id_begin = -1; 33 | seen_a_digit = false; 34 | }else if(*str_pos == '-'){ 35 | id_begin = current_id; 36 | current_id = 0; 37 | seen_a_digit = false; 38 | }else if('0' <= *str_pos && *str_pos <= '9'){ // stricly speaking undefined behaviour; digits do not have to be in a concecutive range... they just always are. 39 | current_id *= 10; 40 | current_id += *str_pos - '0'; 41 | seen_a_digit = true; 42 | }else 43 | throw std::runtime_error("Illegal character "+std::string(str_pos, str_pos+1)+" in id list at offset "+std::to_string(str_pos - str_begin + 1)); 44 | ++str_pos; 45 | } 46 | } 47 | 48 | template 49 | void forall_in_id_string(const std::string&str, int id_count, const F&f){ 50 | forall_in_id_string(str, 51 | [&](int x){ 52 | if(x < 0 || x >= id_count) 53 | throw std::runtime_error("id " +std::to_string(x) + " is out of bounds [0,"+std::to_string(id_count)+")"); 54 | f(x); 55 | } 56 | ); 57 | } 58 | 59 | template)> 60 | std::string make_id_string(const F&f){ 61 | std::string r; 62 | 63 | int begin = -1; 64 | bool first = true; 65 | 66 | for(int i=0; i 96 | std::string make_id_string_from_list_with_back_arcs(const L&l, const BackArc&back_arc){ 97 | std::string s; 98 | 99 | bool first = true; 100 | for(auto x:l){ 101 | if(!first) 102 | s += ":"; 103 | else 104 | first = false; 105 | s += std::to_string(x); 106 | s += ":"; 107 | s += std::to_string(back_arc(x)); 108 | } 109 | 110 | return s; // NVRO 111 | } 112 | 113 | template 114 | std::string make_id_string_from_list(const L&l){ 115 | std::string s; 116 | 117 | bool first = true; 118 | for(auto x:l){ 119 | if(!first) 120 | s += ":"; 121 | else 122 | first = false; 123 | s += std::to_string(x); 124 | } 125 | 126 | return s; // NVRO 127 | } 128 | 129 | #endif 130 | -------------------------------------------------------------------------------- /src/inverse_vector.h: -------------------------------------------------------------------------------- 1 | #ifndef INVERSE_VECTOR_H 2 | #define INVERSE_VECTOR_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | // 9 | // The inverse vector p of a vector v is a vector such that the elements 10 | // v[p[i]], v[p[i]+1], v[p[i]+2], ..., v[p[i+1]-1] are exactly the elements 11 | // with value i in v. If i does not occur in v, then p[i] == p[i+1]. v must be 12 | // a sorted vector of unsigned integers. 13 | // 14 | 15 | inline 16 | std::vectorinvert_vector(const std::vector&v, unsigned element_count){ 17 | std::vectorindex(element_count+1); 18 | if(v.empty()){ 19 | std::fill(index.begin(), index.end(), 0); 20 | }else{ 21 | index[0] = 0; 22 | 23 | unsigned pos = 0; 24 | for(unsigned i=0; iinvert_inverse_vector(const std::vector&sorted_index){ 36 | std::vectorv(sorted_index.back()); 37 | 38 | for(unsigned i=0; i 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include "file_utility.h" 10 | 11 | template 12 | void save_binary_file(const std::string&file_name, const SaveFunc&save, Args&&...args){ 13 | std::ofstream out(file_name, std::ios::binary); 14 | if(!out) 15 | throw std::runtime_error("Could not open "+file_name+" for binary writing"); 16 | save(out, std::forward(args)...); 17 | } 18 | 19 | template 20 | void save_text_file(const std::string&file_name, const SaveFunc&save, Args&&...args){ 21 | if(file_name == "-"){ 22 | save(std::cout, std::forward(args)...); 23 | std::cout << std::flush; 24 | } else if(file_name == "-null"){ 25 | } else { 26 | std::ofstream out(file_name); 27 | if(!out) 28 | throw std::runtime_error("Could not open "+file_name+" for text writing"); 29 | save(out, std::forward(args)...); 30 | } 31 | } 32 | 33 | template 34 | auto load_binary_file(const std::string&file_name, const LoadFunc&load)->decltype(load(std::cin, 0)){ 35 | std::ifstream in(file_name, std::ios::binary); 36 | if(!in) 37 | throw std::runtime_error("Could not load "+file_name+" for binary reading"); 38 | 39 | in.seekg (0, in.end); 40 | long long size = in.tellg(); 41 | in.seekg (0, in.beg); 42 | 43 | return load(in, size); 44 | } 45 | 46 | template 47 | auto load_uncached_text_file(const std::string&file_name, const LoadFunc&load)->decltype(load(std::cin)){ 48 | if(file_name == "-"){ 49 | return load(std::cin); 50 | } else { 51 | std::ifstream in(file_name); 52 | if(!in) 53 | throw std::runtime_error("Could not load "+file_name+" for text reading"); 54 | return load(in); 55 | } 56 | } 57 | 58 | template 59 | auto load_cached_text_file( 60 | const std::string&file_name, 61 | const std::string&format_name, 62 | const UncachedLoadFunc&uncached_load, 63 | const CachedLoadFunc&cached_load, 64 | const CacheSaveFunc&cache_save 65 | )->decltype(uncached_load(std::cin)){ 66 | if(file_name == "-"){ 67 | return uncached_load(std::cin); 68 | } else { 69 | std::string cache_file_name = concat_file_path_and_file_name( 70 | get_temp_directory_path(), 71 | "flow_cutter_cached_"+format_name+ "_" + uniquely_hash_file_name(make_absolute_file_name(file_name)) 72 | ); 73 | 74 | if(file_exists(cache_file_name)) 75 | if(file_last_modified(file_name) < file_last_modified(cache_file_name)){ 76 | std::ifstream in(cache_file_name, std::ios::binary); 77 | if(!in) 78 | throw std::runtime_error("Could not open binary cache file "+cache_file_name+" of "+file_name+" for reading"); 79 | in.seekg (0, in.end); 80 | long long size = in.tellg(); 81 | in.seekg (0, in.beg); 82 | return cached_load(in, size); 83 | } 84 | 85 | std::ifstream in(file_name); 86 | if(!in) 87 | throw std::runtime_error("Could not open text file "+file_name+" for reading"); 88 | 89 | auto data = uncached_load(in); 90 | in.close(); 91 | 92 | std::ofstream out(cache_file_name, std::ios::binary); 93 | 94 | if(out) 95 | cache_save(out, data); 96 | 97 | return data; 98 | } 99 | } 100 | 101 | #endif 102 | -------------------------------------------------------------------------------- /src/kaHIP_interface.h: -------------------------------------------------------------------------------- 1 | /****************************************************************************** 2 | * kaffpa_interface.h 3 | * 4 | * Source of KaHIP -- Karlsruhe High Quality Partitioning. 5 | * 6 | ****************************************************************************** 7 | * Copyright (C) 2013-2015 Christian Schulz 8 | * 9 | * This program is free software: you can redistribute it and/or modify it 10 | * under the terms of the GNU General Public License as published by the Free 11 | * Software Foundation, either version 2 of the License, or (at your option) 12 | * any later version. 13 | * 14 | * This program is distributed in the hope that it will be useful, but WITHOUT 15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 | * more details. 18 | * 19 | * You should have received a copy of the GNU General Public License along with 20 | * this program. If not, see . 21 | *****************************************************************************/ 22 | 23 | 24 | #ifndef KAFFPA_INTERFACE_RYEEZ6WJ 25 | #define KAFFPA_INTERFACE_RYEEZ6WJ 26 | 27 | #ifdef __cplusplus 28 | 29 | extern "C" 30 | { 31 | #endif 32 | 33 | const int FAST = 0; 34 | const int ECO = 1; 35 | const int STRONG = 2; 36 | const int FASTSOCIAL = 3; 37 | const int ECOSOCIAL = 4; 38 | const int STRONGSOCIAL = 5; 39 | 40 | // same data structures as in metis 41 | // edgecut and part are output parameters 42 | // part has to be an array of n ints 43 | void kaffpa(int* n, int* vwgt, int* xadj, 44 | int* adjcwgt, int* adjncy, int* nparts, 45 | double* imbalance, bool suppress_output, int seed, int mode, 46 | int* edgecut, int* part); 47 | 48 | // balance constraint on nodes and edges 49 | void kaffpa_balance_NE(int* n, int* vwgt, int* xadj, 50 | int* adjcwgt, int* adjncy, int* nparts, 51 | double* imbalance, bool suppress_output, int seed, int mode, 52 | int* edgecut, int* part); 53 | 54 | void node_separator(int* n, int* vwgt, int* xadj, 55 | int* adjcwgt, int* adjncy, int* nparts, 56 | double* imbalance, bool suppress_output, int seed, int mode, 57 | int* num_separator_vertices, int** separator); 58 | 59 | #ifdef __cplusplus 60 | } 61 | #endif 62 | 63 | #endif /* end of include guard: KAFFPA_INTERFACE_RYEEZ6WJ */ 64 | -------------------------------------------------------------------------------- /src/list_graph.h: -------------------------------------------------------------------------------- 1 | #ifndef LIST_GRAPH_H 2 | #define LIST_GRAPH_H 3 | 4 | #include "array_id_func.h" 5 | 6 | #include 7 | #include 8 | 9 | struct ListGraph{ 10 | ListGraph()=default; 11 | ListGraph(int node_count, int arc_count) 12 | :head(arc_count, node_count), tail(arc_count, node_count), arc_weight(arc_count), node_weight(node_count){} 13 | 14 | int node_count()const{ return head.image_count(); } 15 | int arc_count()const{ return head.preimage_count(); } 16 | 17 | ArrayIDIDFunc head, tail; 18 | ArrayIDFuncarc_weight, node_weight; 19 | }; 20 | 21 | void save_binary_graph(const std::string&file_name, const ArrayIDIDFunc&tail, const ArrayIDIDFunc&head, const ArrayIDFunc&node_weight, const ArrayIDFunc&arc_weight); 22 | ListGraph load_binary_graph(const std::string&file_name); 23 | 24 | void save_csv_graph(const std::string&file_name, const ArrayIDIDFunc&tail, const ArrayIDIDFunc&head, const ArrayIDFunc&arc_weight); 25 | 26 | void save_dimacs_graph(const std::string&file_name, const ArrayIDIDFunc&tail, const ArrayIDIDFunc&head, const ArrayIDFunc&arc_weight); 27 | ListGraph load_dimacs_graph(const std::string&file_name); 28 | ListGraph uncached_load_dimacs_graph(const std::string&file_name); 29 | 30 | void save_ddsg_graph(const std::string&file_name, const ArrayIDIDFunc&tail, const ArrayIDIDFunc&head, const ArrayIDFunc&arc_weight); 31 | 32 | void save_metis_graph(const std::string&file_name, const ArrayIDIDFunc&tail, const ArrayIDIDFunc&head, const ArrayIDFunc&arc_weight); 33 | ListGraph load_metis_graph(const std::string&file_name); 34 | ListGraph uncached_load_metis_graph(const std::string&file_name); 35 | 36 | ListGraph load_color_dimacs_graph(const std::string&file_name); 37 | ListGraph uncached_load_dimacs_graph(const std::string&file_name); 38 | 39 | ListGraph load_pace_graph(const std::string&file_name); 40 | ListGraph uncached_load_pace_graph(const std::string&file_name); 41 | void save_pace_graph(const std::string&file_name, const ArrayIDIDFunc&tail, const ArrayIDIDFunc&head); 42 | 43 | #endif 44 | -------------------------------------------------------------------------------- /src/min_max.h: -------------------------------------------------------------------------------- 1 | #ifndef MIN_MAX_H 2 | #define MIN_MAX_H 3 | 4 | #include 5 | #include 6 | #include "id_func_traits.h" 7 | 8 | template 9 | void min_to(T&x, T y){ 10 | if(y < x) 11 | x = std::move(y); 12 | } 13 | 14 | template 15 | void max_to(T&x, T y){ 16 | if(y > x) 17 | x = std::move(y); 18 | } 19 | 20 | template 21 | void sort_ref_args(T&x, T&y){ 22 | if(y < x) 23 | std::swap(x,y); 24 | } 25 | 26 | template 27 | typename id_func_image_type::type min_over_id_func(const F&f){ 28 | assert(f.preimage_count() != 0); 29 | typename id_func_image_type::type result = f(0); 30 | for(int i=1; i 36 | typename id_func_image_type::type max_over_id_func(const F&f){ 37 | assert(f.preimage_count() != 0); 38 | typename id_func_image_type::type result = f(0); 39 | for(int i=1; i 45 | int min_preimage_over_id_func(const F&f){ 46 | assert(f.preimage_count() != 0); 47 | int preimage = 0; 48 | typename id_func_image_type::type m = f(0); 49 | for(int i=1; i 60 | int max_preimage_over_id_func(const F&f){ 61 | assert(f.preimage_count() != 0); 62 | int preimage = 0; 63 | typename id_func_image_type::type m = f(0); 64 | for(int i=1; i 10 | BitIDFunc identify_non_multi_arcs(const Tail&tail, const Head&head){ 11 | const int arc_count = tail.preimage_count(); 12 | auto arc_list = sort_arcs_first_by_tail_second_by_head(tail, head); 13 | BitIDFunc is_non_multi_arc(arc_count); 14 | if(arc_count > 0){ 15 | is_non_multi_arc.set(arc_list[0], true); 16 | for(int i=1; i 27 | bool is_symmetric(const Tail&tail, const Head&head){ 28 | const int arc_count = tail.preimage_count(); 29 | auto forward_arc_list = sort_arcs_first_by_tail_second_by_head(tail, head); 30 | auto backward_arc_list = sort_arcs_first_by_tail_second_by_head(head, tail); 31 | 32 | for(int i=0; i 40 | class SymmetricHead{ 41 | public: 42 | SymmetricHead(Tail tail, Head head): 43 | tail(std::move(tail)), head(std::move(head)){} 44 | 45 | int preimage_count()const{ return 2*tail.preimage_count(); } 46 | int image_count()const {return tail.image_count(); } 47 | 48 | int operator()(int x)const{ 49 | if(x < tail.preimage_count()) 50 | return head(x); 51 | else 52 | return tail(x - tail.preimage_count()); 53 | } 54 | private: 55 | Tail tail; 56 | Head head; 57 | }; 58 | 59 | template 60 | SymmetricHeadmake_symmetric_head(Tail tail, Head head){ 61 | return {std::move(tail), std::move(head)}; 62 | } 63 | 64 | template 65 | class SymmetricTail{ 66 | public: 67 | SymmetricTail(Tail tail, Head head): 68 | tail(std::move(tail)), head(std::move(head)){} 69 | 70 | int preimage_count()const{ return 2*tail.preimage_count(); } 71 | int image_count()const {return tail.image_count(); } 72 | 73 | int operator()(int x)const{ 74 | if(x < tail.preimage_count()) 75 | return tail(x); 76 | else 77 | return head(x - tail.preimage_count()); 78 | } 79 | private: 80 | Tail tail; 81 | Head head; 82 | }; 83 | 84 | template 85 | SymmetricTailmake_symmetric_tail(Tail tail, Head head){ 86 | return {std::move(tail), std::move(head)}; 87 | } 88 | 89 | template 90 | bool has_multi_arcs(const Tail&tail, const Head&head){ 91 | return count_true(identify_non_multi_arcs(tail, head)) != tail.preimage_count(); 92 | } 93 | 94 | template 95 | bool is_loop_free(const Tail&tail, const Head&head){ 96 | for(int i=0; i 12 | #include 13 | 14 | #include 15 | 16 | 17 | 18 | namespace my_kahip{ 19 | 20 | inline 21 | ::inertial_flow::Cut compute_my_kahip_cut( 22 | const RangeIDIDMultiFunc&inv_tail, const ArrayIDIDFunc&head, 23 | double min_balance 24 | ){ 25 | 26 | int node_count = head.image_count(); 27 | //int arc_count = head.preimage_count(); 28 | 29 | //std::vectorone_node_weights(node_count, 1); 30 | //std::vectorone_arc_weights(arc_count, 1); 31 | std::vectorpart_of_node(node_count); 32 | 33 | int part_count = 2; 34 | int cut_size; 35 | 36 | int* n = &node_count; 37 | int* vwgt = /*&one_node_weights[0]*/nullptr; 38 | int* xadj = (int*)&inv_tail.range_begin[0]; 39 | int* adjcwgt = /*&one_arc_weights[0]*/nullptr; 40 | int* adjncy = head.data_; 41 | int* nparts = &part_count; 42 | double* imbalance = &min_balance; 43 | bool suppress_output = true; 44 | int seed = 42; 45 | int mode = STRONG; 46 | 47 | int* edgecut = &cut_size; 48 | int* part = &part_of_node[0]; 49 | 50 | kaffpa(n, vwgt, xadj, adjcwgt, adjncy,nparts, imbalance, suppress_output, seed, mode, edgecut, part); 51 | 52 | BitIDFunc reachable = id_func(node_count, [&](unsigned x){return part[x] == 1;}); 53 | 54 | int reachable_count = 0; 55 | for(int x=0; x compute_my_kahip_separator(const ArrayIDIDFunc&tail, const ArrayIDIDFunc&head, double min_balance){ 84 | const int arc_count = head.preimage_count(); 85 | const int node_count = head.image_count(); 86 | 87 | std::vectorsep; 88 | if(node_count == 1){ 89 | sep = {0}; 90 | } else { 91 | ::inertial_flow::Cut c = compute_my_kahip_cut(tail, head, min_balance); 92 | 93 | for(int i=0; i 105 | std::vectoroperator()(const ArrayIDIDFunc&tail, const ArrayIDIDFunc&head, const InputNodeID& /*input_node_id*/, const ArcWeight&/*arc_weight*/)const{ 106 | return compute_my_kahip_separator( 107 | tail, head, 108 | min_balance 109 | ); 110 | } 111 | 112 | double min_balance; 113 | }; 114 | 115 | inline 116 | MyKahipSeparator 117 | ComputeSeparator(double min_balance){ 118 | return {min_balance}; 119 | } 120 | 121 | 122 | 123 | inline 124 | std::vector compute_my_kahip2_separator(const ArrayIDIDFunc&tail, const ArrayIDIDFunc&head, double min_balance){ 125 | RangeIDIDMultiFunc inv_tail = invert_sorted_id_id_func(tail); 126 | 127 | int node_count = head.image_count(); 128 | //int arc_count = head.preimage_count(); 129 | 130 | //std::vectorone_node_weights(node_count, 1); 131 | //std::vectorone_arc_weights(arc_count, 1); 132 | 133 | int part_count = 2; 134 | int separator_size; 135 | int*separator_pointer; 136 | 137 | int* n = &node_count; 138 | int* vwgt = /*&one_node_weights[0]*/nullptr; 139 | int* xadj = (int*)&inv_tail.range_begin[0]; 140 | int* adjcwgt = /*&one_arc_weights[0]*/nullptr; 141 | int* adjncy = head.data_; 142 | int* nparts = &part_count; 143 | double* imbalance = &min_balance; 144 | bool suppress_output = true; 145 | int seed = 42; 146 | int mode = STRONG; 147 | 148 | node_separator(n, vwgt, xadj, adjcwgt, adjncy, nparts, imbalance, suppress_output, seed, mode, &separator_size, &separator_pointer); 149 | 150 | std::vectorseparator_list(separator_size); 151 | std::copy(separator_pointer, separator_pointer + separator_size, separator_list.begin()); 152 | delete[]separator_pointer; 153 | return separator_list; 154 | } 155 | 156 | struct MyKahip2Separator{ 157 | MyKahip2Separator(double min_balance): 158 | min_balance(min_balance){} 159 | 160 | template 161 | std::vectoroperator()(const ArrayIDIDFunc&tail, const ArrayIDIDFunc&head, const InputNodeID& /*input_node_id*/, const ArcWeight&/*arc_weight*/)const{ 162 | return compute_my_kahip2_separator( 163 | tail, head, 164 | min_balance 165 | ); 166 | } 167 | 168 | double min_balance; 169 | }; 170 | 171 | inline 172 | MyKahip2Separator 173 | ComputeSeparator2(double min_balance){ 174 | return {min_balance}; 175 | } 176 | } 177 | 178 | #endif 179 | 180 | -------------------------------------------------------------------------------- /src/permutation.cpp: -------------------------------------------------------------------------------- 1 | #include "permutation.h" 2 | #include "io_helper.h" 3 | #include "tiny_id_func.h" 4 | #include 5 | #include 6 | 7 | static 8 | ArrayIDIDFunc load_permutation_impl(std::istream&in){ 9 | 10 | std::vectorbuffer; 11 | int x; 12 | while(in >> x) 13 | buffer.push_back(x); 14 | 15 | const int n = buffer.size(); 16 | 17 | BitIDFunc seen(n); 18 | seen.fill(false); 19 | 20 | for(auto x:buffer){ 21 | if(x < 0) 22 | throw std::runtime_error("an order position can not be negative"); 23 | else if(x > n) 24 | throw std::runtime_error("an order position can not be larger than the number of elements"); 25 | else if(seen(x)) 26 | throw std::runtime_error("an order position can only appear once"); 27 | seen.set(x, true); 28 | } 29 | 30 | ArrayIDIDFunc order(n, n); 31 | 32 | for(int i=0; i 6 | 7 | template 8 | bool is_permutation(const IDIDFunc&f){ 9 | if(f.preimage_count() != f.image_count()) 10 | return false; 11 | 12 | int id_count = f.preimage_count(); 13 | 14 | BitIDFunc already_seen(id_count); 15 | already_seen.fill(false); 16 | for(int i=0; i= id_count) 19 | return false; 20 | if(already_seen(x)) 21 | return false; 22 | already_seen.set(x, true); 23 | } 24 | return true; 25 | } 26 | 27 | 28 | 29 | template 30 | ArrayIDIDFunc inverse_permutation(const IDIDFunc&f){ 31 | assert(is_permutation(f)); 32 | 33 | int id_count = f.preimage_count(); 34 | 35 | ArrayIDIDFunc inv_f(id_count, id_count); 36 | for(int i=0; i 57 | typename std::enable_if< 58 | is_only_id_func::value, 59 | ArrayIDFunc::type> 60 | >::type apply_permutation(const Permutation&p, const IDFunc&f){ 61 | assert(is_permutation(p)); 62 | assert(p.image_count() == f.preimage_count()); 63 | 64 | ArrayIDFunc::type> result(p.preimage_count()); 65 | 66 | for(int i=0; i 73 | typename std::enable_if< 74 | is_id_id_func::value, 75 | ArrayIDIDFunc 76 | >::type apply_permutation(const Permutation&p, const IDIDFunc&f){ 77 | assert(is_permutation(p)); 78 | assert(p.image_count() == f.preimage_count()); 79 | 80 | ArrayIDIDFunc result(p.preimage_count(), f.image_count()); 81 | 82 | for(int i=0; i 7 | 8 | template 9 | std::pair compute_preorder(const Out&out){ 10 | const int node_count = out.preimage_count(); 11 | 12 | ArrayIDIDFunc p(node_count, node_count); 13 | 14 | BitIDFunc seen(node_count); 15 | seen.fill(false); 16 | 17 | typedef typename std::decay::type Iter; 18 | ArrayIDFuncnext_out(node_count); 19 | for(int i=0; i stack(node_count); 23 | int stack_end = 0; 24 | 25 | int id = 0; 26 | size_t num_components = 0; 27 | 28 | for(int r=0; r 5 | struct Range{ 6 | Iter begin_, end_; 7 | Iter begin()const{return begin_;} 8 | Iter end()const{return end_;} 9 | }; 10 | 11 | template 12 | Range make_range(Iter begin, Iter end){ 13 | return {begin, end}; 14 | } 15 | 16 | #endif 17 | 18 | -------------------------------------------------------------------------------- /src/sort_arc.h: -------------------------------------------------------------------------------- 1 | #ifndef SORT_ARC_H 2 | #define SORT_ARC_H 3 | 4 | #include "id_sort.h" 5 | #include "array_id_func.h" 6 | #include "permutation.h" 7 | #include "count_range.h" 8 | #include 9 | 10 | template 11 | ArrayIDIDFunc sort_arcs_first_by_tail_second_by_head(const Tail&tail, const Head&head){ 12 | assert(tail.preimage_count() == head.preimage_count()); 13 | assert(tail.image_count() == head.image_count()); 14 | 15 | const int arc_count = tail.preimage_count(); 16 | 17 | ArrayIDIDFunc 18 | x(arc_count, arc_count), 19 | y(arc_count, arc_count); 20 | 21 | stable_sort_copy_by_id( 22 | CountIterator{0}, CountIterator{arc_count}, 23 | y.begin(), 24 | head.image_count(), 25 | head 26 | ); 27 | stable_sort_copy_by_id( 28 | y.begin(), y.end(), 29 | x.begin(), 30 | tail.image_count(), 31 | tail 32 | ); 33 | 34 | return x; //NVRO 35 | } 36 | 37 | #endif 38 | 39 | -------------------------------------------------------------------------------- /src/timer.h: -------------------------------------------------------------------------------- 1 | #ifndef TIMER_H 2 | #define TIMER_H 3 | 4 | #include 5 | 6 | inline 7 | long long get_micro_time(){ 8 | timeval t; 9 | gettimeofday(&t, 0); 10 | return t.tv_sec*1000000ll+t.tv_usec; 11 | } 12 | 13 | #endif 14 | -------------------------------------------------------------------------------- /src/timestamp_id_func.h: -------------------------------------------------------------------------------- 1 | #ifndef TIMESTAMP_ID_FUNC_H 2 | #define TIMESTAMP_ID_FUNC_H 3 | 4 | #include "array_id_func.h" 5 | 6 | //! A bool id func with a constant time fill(false). Note that fill(true) is expensive 7 | class TimestampIDFunc{ 8 | public: 9 | TimestampIDFunc(){} 10 | explicit TimestampIDFunc(int node_count): 11 | timestamp(node_count){ 12 | timestamp.fill(0); 13 | current_timestamp = 1; 14 | } 15 | 16 | bool operator()(int x)const{ 17 | return timestamp(x) == current_timestamp; 18 | } 19 | 20 | void fill(bool f){ 21 | if(!f){ 22 | ++current_timestamp; 23 | if(current_timestamp == 0){ 24 | timestamp.fill(0); 25 | current_timestamp = 1; 26 | } 27 | }else{ 28 | timestamp.fill(0); 29 | current_timestamp = 0; 30 | } 31 | } 32 | 33 | void set(int x, bool f){ 34 | timestamp.set(x, current_timestamp-!f); 35 | } 36 | 37 | private: 38 | ArrayIDFunctimestamp; 39 | unsigned short current_timestamp; 40 | }; 41 | 42 | #endif 43 | 44 | -------------------------------------------------------------------------------- /src/tiny_id_func.h: -------------------------------------------------------------------------------- 1 | #ifndef TINY_INT_ID_FUNC_H 2 | #define TINY_INT_ID_FUNC_H 3 | 4 | #include 5 | #include 6 | #include "id_func.h" 7 | #include "array_id_func.h" 8 | 9 | template 10 | struct TinyIntIDFunc{ 11 | private: 12 | static constexpr int entry_count_per_uint64 = 64/bit_count; 13 | static constexpr std::uint64_t entry_mask = (std::uint64_t(1)< 28 | TinyIntIDFunc(const IDFunc&other, typename std::enable_if::value, void>::type*dummy=0) 29 | :preimage_(other.preimage_count()), data_((other.preimage_count() + entry_count_per_uint64 - 1) / entry_count_per_uint64){ 30 | (void)dummy; 31 | for(int i=0; i> offset) & entry_mask; 41 | } 42 | 43 | void set(int id, std::uint64_t value){ 44 | assert(0 <= id && id < preimage_ && "id out of bounds"); 45 | assert(0 <= value && value <= entry_mask && "value out of bounds"); 46 | 47 | int index = id / entry_count_per_uint64; 48 | int offset = (id % entry_count_per_uint64)*bit_count; 49 | 50 | data_[index] ^= ((((data_[index] >> offset) & entry_mask) ^ value) << offset); 51 | } 52 | 53 | void fill(std::uint64_t value){ 54 | assert(0 <= value && value <= entry_mask && "value out of bounds"); 55 | 56 | if(bit_count == 1){ 57 | if(value == false) 58 | data_.fill(0); 59 | else 60 | data_.fill((std::uint64_t)-1); 61 | }else if(value == 0){ 62 | data_.fill(0); 63 | }else{ 64 | std::uint64_t pattern = value; 65 | int shift = bit_count; 66 | while(shift < 64){ 67 | pattern |= pattern << shift; 68 | shift <<= 1; 69 | } 70 | data_.fill(pattern); 71 | } 72 | } 73 | 74 | std::uint64_t move(int id){ 75 | return operator()(id); 76 | } 77 | 78 | void swap(TinyIntIDFunc&other)noexcept{ 79 | std::swap(preimage_, other.preimage_); 80 | data_.swap(other.data_); 81 | } 82 | 83 | template 84 | TinyIntIDFunc operator=(const typename std::enable_if::value, IDFunc>::type & other){ 85 | TinyIntIDFunc(other).swap(*this); 86 | return *this; 87 | } 88 | 89 | 90 | int preimage_; 91 | ArrayIDFunc data_; 92 | }; 93 | 94 | 95 | 96 | typedef TinyIntIDFunc<1> BitIDFunc; 97 | 98 | inline BitIDFunc operator~(BitIDFunc f){ 99 | for(int i=0; i 7 | 8 | //! input graph must be a symmetric graph 9 | //! The result is guaranteed to be optimal for trees. For non-trees there are no guarantees. 10 | template 11 | ArrayIDIDFunc compute_tree_node_ranking(const Neighbors&neighbors){ 12 | const int node_count = neighbors.preimage_count(); 13 | 14 | ArrayIDFunc 15 | parent(node_count), 16 | child_first_order(node_count); 17 | 18 | // Root tree at node 0 19 | { 20 | ArrayIDFuncstack(node_count); 21 | int stack_end = 0; 22 | int next_order_pos = node_count; 23 | 24 | stack[stack_end++] = 0; 25 | parent.fill(-1); 26 | parent[0] = -2; 27 | 28 | 29 | while(stack_end != 0){ 30 | auto x = stack[--stack_end]; 31 | child_first_order[--next_order_pos] = x; 32 | for(auto y:neighbors(x)){ 33 | if(parent(y) == -1){ 34 | assert(y != 0); 35 | stack[stack_end++] = y; 36 | parent[y] = x; 37 | } 38 | } 39 | } 40 | parent[0] = -1; 41 | assert(next_order_pos == 0); 42 | } 43 | 44 | 45 | ArrayIDIDFunc level(node_count, 1); 46 | 47 | // Compute node levels 48 | { 49 | struct SubTreeInfo{ 50 | std::vectorcritical_list; 51 | int size; 52 | }; 53 | ArrayIDFunc>node_children_info(node_count); 54 | 55 | BitIDFunc crit(node_count); 56 | crit.fill(false); 57 | 58 | for(int i=0; i max) 128 | max = t; 129 | }else{ 130 | if(t > p) 131 | p = t; 132 | } 133 | 134 | auto&&first_child_critical_list = children_info[0].critical_list; 135 | 136 | for(int i = first_child_critical_list.size()-1; i>=0; --i){ 137 | int t = first_child_critical_list[i]; 138 | 139 | if(t > max) 140 | break; 141 | if(p >= t) 142 | continue; 143 | 144 | if(!crit(t)){ 145 | crit.set(t, true); 146 | }else{ 147 | p = t; 148 | } 149 | } 150 | 151 | for(int i=0; i<=p; ++i) 152 | crit.set(i, false); 153 | for(int i=p+1; i<=max; ++i){ 154 | if(!crit(i)){ 155 | if(q == 0){ 156 | q = i; 157 | tree_info.critical_list = {q}; 158 | } 159 | }else{ 160 | crit.set(i, false); 161 | if(q != 0) 162 | tree_info.critical_list.push_back(i); 163 | } 164 | } 165 | 166 | if(q == 0){ 167 | 168 | while(!first_child_critical_list.empty() && first_child_critical_list.back() <= max) 169 | first_child_critical_list.pop_back(); 170 | q = max+1; 171 | while(!first_child_critical_list.empty() && first_child_critical_list.back() == q){ 172 | first_child_critical_list.pop_back(); 173 | ++q; 174 | } 175 | 176 | tree_info.critical_list = std::move(first_child_critical_list); 177 | tree_info.critical_list.push_back(q); 178 | }else{ 179 | while(!first_child_critical_list.empty() && first_child_critical_list.back() <= max) 180 | first_child_critical_list.pop_back(); 181 | assert(std::is_sorted(tree_info.critical_list.begin(), tree_info.critical_list.end())); 182 | for(int i=tree_info.critical_list.size()-1; i>=0; --i) 183 | first_child_critical_list.push_back(tree_info.critical_list[i]); 184 | tree_info.critical_list = std::move(first_child_critical_list); 185 | } 186 | 187 | assert(q > 0); 188 | 189 | if(level.image_count() < q) 190 | level.set_image_count(q); 191 | level[x] = q-1; 192 | } 193 | 194 | assert(std::is_sorted(tree_info.critical_list.begin(), tree_info.critical_list.end(), std::greater())); 195 | 196 | if(parent(x) != -1) 197 | node_children_info[parent(x)].push_back(std::move(tree_info)); 198 | } 199 | } 200 | 201 | return level; // NVRO 202 | } 203 | 204 | #endif 205 | 206 | -------------------------------------------------------------------------------- /src/triangle_count.h: -------------------------------------------------------------------------------- 1 | #ifndef TRIANGLE_COUNT_H 2 | #define TRIANGLE_COUNT_H 3 | #include "multi_arc.h" 4 | #include "tiny_id_func.h" 5 | #include "id_multi_func.h" 6 | #include "back_arc.h" 7 | #include "id_sort.h" 8 | #include 9 | 10 | template 11 | ArrayIDFunc count_arc_triangles(const Tail&tail, const Head&head){ 12 | assert(is_symmetric(tail, head)); 13 | 14 | int node_count = tail.image_count(); 15 | int arc_count = tail.preimage_count(); 16 | 17 | auto deg = compute_histogram(tail); 18 | ArrayIDFuncnodes_decreasing_by_deg(node_count); 19 | stable_sort_copy_by_id( 20 | CountIterator{0}, CountIterator{node_count}, 21 | std::begin(nodes_decreasing_by_deg), 22 | id_id_func(node_count, node_count, [&](int x){ return node_count - deg(x) - 1;}) 23 | ); 24 | 25 | auto out_arc = invert_id_id_func(tail); 26 | 27 | BitIDFunc is_finished(node_count), is_neighbor(node_count); 28 | is_finished.fill(false); 29 | is_neighbor.fill(false); 30 | 31 | ArrayIDFunctriangle_count(arc_count); 32 | triangle_count.fill(0); 33 | 34 | for(auto x:nodes_decreasing_by_deg){ 35 | for(auto xy:out_arc(x)){ 36 | auto y = head(xy); 37 | if(!is_finished(y)) 38 | is_neighbor.set(y, true); 39 | } 40 | 41 | for(auto xy:out_arc(x)){ 42 | auto y = head(xy); 43 | if(is_neighbor(y)){ 44 | for(auto yz:out_arc(y)){ 45 | auto z = head(yz); 46 | if(is_neighbor(z)){ 47 | ++triangle_count[xy]; 48 | if(y < z) 49 | ++triangle_count[yz]; 50 | } 51 | } 52 | } 53 | } 54 | 55 | for(auto xy:out_arc(x)){ 56 | is_neighbor.set(head(xy), false); 57 | } 58 | 59 | is_finished.set(x, true); 60 | } 61 | 62 | auto back_arc = compute_back_arc_permutation(tail, head); 63 | for(int xy = 0; xy < arc_count; ++xy){ 64 | auto yx = back_arc(xy); 65 | if(xy < yx){ 66 | int s = triangle_count(xy) + triangle_count(yx); 67 | triangle_count[xy] = s; 68 | triangle_count[yx] = s; 69 | } 70 | } 71 | 72 | return triangle_count; 73 | } 74 | 75 | 76 | #endif 77 | -------------------------------------------------------------------------------- /src/union_find.h: -------------------------------------------------------------------------------- 1 | #ifndef UNION_FIND_H 2 | #define UNION_FIND_H 3 | 4 | #include "array_id_func.h" 5 | #include 6 | 7 | //! An id-id-function that maps a node onto its components representative 8 | struct UnionFind{ 9 | public: 10 | UnionFind():node_count_(0){} 11 | 12 | explicit UnionFind(int node_count):parent_(node_count), node_count_(node_count), component_count_(node_count){ 13 | parent_.fill(-1); 14 | } 15 | 16 | void reset(){ 17 | parent_.fill(-1); 18 | component_count_ = node_count_; 19 | } 20 | 21 | int preimage_count()const{return node_count_;} 22 | int image_count()const{return node_count_;} 23 | 24 | void unite(int l, int r){ 25 | assert(0 <= l && l < node_count_); 26 | assert(0 <= r && r < node_count_); 27 | 28 | l = operator()(l); 29 | r = operator()(r); 30 | if(l != r){ 31 | --component_count_; 32 | if(-parent_[l] < -parent_[r]){ 33 | parent_[r] += parent_[l]; 34 | parent_[l] = r; 35 | }else{ 36 | parent_[l] += parent_[r]; 37 | parent_[r] = l; 38 | } 39 | } 40 | } 41 | 42 | int operator()(int x)const{ 43 | assert(0 <= x && x < node_count_); 44 | 45 | if(is_representative(x)) 46 | return x; 47 | 48 | int y = x; 49 | while(!is_representative(y)) 50 | y = parent_[y]; 51 | 52 | int z = x; 53 | while(!is_representative(z)){ 54 | int tmp = parent_[z]; 55 | parent_[z] = y; 56 | z = tmp; 57 | } 58 | 59 | return y; 60 | } 61 | 62 | bool in_same(int x, int y)const{ 63 | return (*this)(x) == (*this)(y); 64 | } 65 | 66 | bool is_representative(int v)const{ 67 | assert(0 <= v && v < node_count_); 68 | return parent_(v) < 0; 69 | } 70 | 71 | int component_size(int v)const{ 72 | assert(0 <= v && v < node_count_); 73 | if(is_representative(v)) 74 | return -parent_(v); 75 | else 76 | return 0; 77 | } 78 | 79 | int component_count()const{ 80 | return component_count_; 81 | } 82 | 83 | private: 84 | mutable ArrayIDFuncparent_; 85 | int node_count_; 86 | int component_count_; 87 | }; 88 | 89 | #endif 90 | -------------------------------------------------------------------------------- /src/vector_io.h: -------------------------------------------------------------------------------- 1 | #ifndef VECTOR_IO_H 2 | #define VECTOR_IO_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | template 10 | void save_vector(const std::string&file_name, const std::vector&vec){ 11 | std::ofstream out(file_name, std::ios::binary); 12 | if(!out) 13 | throw std::runtime_error("Can not open \""+file_name+"\" for writing."); 14 | out.write(reinterpret_cast(&vec[0]), vec.size()*sizeof(T)); 15 | } 16 | 17 | template 18 | std::vectorload_vector(const std::string&file_name){ 19 | std::ifstream in(file_name, std::ios::binary); 20 | if(!in) 21 | throw std::runtime_error("Can not open \""+file_name+"\" for reading."); 22 | in.seekg(0, std::ios::end); 23 | unsigned long long file_size = in.tellg(); 24 | if(file_size % sizeof(T) != 0) 25 | throw std::runtime_error("File \""+file_name+"\" can not be a vector of the requested type because it's size is no multiple of the element type's size."); 26 | in.seekg(0, std::ios::beg); 27 | std::vectorvec(file_size / sizeof(T)); 28 | in.read(reinterpret_cast(&vec[0]), file_size); 29 | return vec; // NVRO 30 | } 31 | 32 | template<> 33 | void save_vector(const std::string&file_name, const std::vector&vec){ 34 | std::ofstream out(file_name, std::ios::binary); 35 | for(unsigned i=0; i 42 | std::vectorload_vector(const std::string&file_name){ 43 | std::vectordata = load_vector(file_name); 44 | std::vectorret; 45 | std::vector::const_iterator 46 | str_begin = data.begin(), 47 | str_end = data.begin(), 48 | data_end = data.end(); 49 | 50 | while(str_end != data_end){ 51 | if(*str_end == '\0'){ 52 | ret.push_back(std::string(str_begin, str_end)); 53 | ++str_end; 54 | str_begin = str_end; 55 | }else{ 56 | ++str_end; 57 | } 58 | } 59 | 60 | ret.shrink_to_fit(); 61 | return ret; // NVRO 62 | } 63 | 64 | template 65 | void save_value(const std::string&file_name, const T&val){ 66 | save_vector(file_name, std::vector{val}); 67 | } 68 | 69 | template 70 | T load_value(const std::string&file_name){ 71 | auto v = load_vector(file_name); 72 | if(v.empty()) 73 | throw std::runtime_error(file_name+" is empty"); 74 | if(v.size() > 1) 75 | throw std::runtime_error(file_name+" contains more than one element"); 76 | return v.front(); 77 | } 78 | 79 | #endif 80 | --------------------------------------------------------------------------------