├── .gitignore ├── Dockerfile ├── LICENSE ├── install.sh ├── readme.md ├── setup.py └── swmm_mpc ├── __init__.py ├── evaluate.py ├── plot_passive_vs_active.py ├── plot_results.py ├── rpt_ele.py ├── run_baeopt.py ├── run_ea.py ├── swmm_mpc.py ├── tests ├── ' ├── ctl_results.csv ├── ctl_results_err.csv ├── example.rpt ├── example_rules_orifices.txt ├── example_rules_pumps.txt ├── test_evaluate.py ├── test_rpt_ele.py ├── test_run_swmm_mpc.py └── test_update_process_model_input_file.py └── update_process_model_input_file.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.hsf 2 | *.out 3 | *.swp 4 | *.pyc 5 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7-slim-stretch 2 | 3 | RUN apt update 4 | RUN apt install -y git gcc make wget unzip tk 5 | RUN pip install numpy 6 | RUN pip install git+https://github.com/UVAdMIST/swmm_mpc 7 | 8 | 9 | RUN wget https://www.epa.gov/sites/production/files/2017-03/swmm51012_engine_2.zip 10 | RUN mkdir swmm5 11 | RUN unzip swmm51012_engine_2.zip -d swmm5 12 | WORKDIR swmm5/ 13 | RUN mkdir src 14 | RUN unzip source5_1_012.zip -d src 15 | RUN mkdir mk 16 | RUN unzip makefiles.zip -d mk 17 | WORKDIR mk/ 18 | RUN mkdir gnu 19 | RUN unzip GNU-CLE.zip -d gnu 20 | RUN cp gnu/Makefile ../src/ 21 | WORKDIR ../src 22 | 23 | RUN sed -i -e 's/#define DLL/\/\/#define DLL/g' swmm5.c 24 | RUN sed -i -e 's/\/\/#define CLE/#define CLE/g' swmm5.c 25 | RUN make 26 | ENV PATH="/swmm5/src:${PATH}" 27 | WORKDIR / 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Jeffrey M. Sadler 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | if ! [ -x "$(command -v unzip)" ]; then 2 | sudo apt install unzip 3 | fi 4 | 5 | source activate swmm-mpc-py 6 | 7 | #git clone https://github.com/UVAdMIST/swmm_mpc.git 8 | pip install . 9 | 10 | pip install git+git://github.com/uva-hydroinformatics/pyswmm.git@feature_save_hotstart 11 | 12 | cd ~ 13 | wget https://www.epa.gov/sites/production/files/2017-03/swmm51012_engine_2.zip 14 | unzip swmm51012_engine_2.zip -d swmm5 15 | cd swmm5/ 16 | unzip source5_1_012.zip -d src 17 | unzip makefiles.zip -d mk 18 | cd mk 19 | unzip GNU-CLE.zip -d gnu 20 | cp gnu/Makefile ../src/ 21 | cd ../src 22 | 23 | sed -i -e 's/#define DLL/\/\/#define DLL/g' swmm5.c 24 | sed -i -e 's/\/\/#define CLE/#define CLE/g' swmm5.c 25 | 26 | if ! [ -x "$(command -v make)" ]; then 27 | sudo apt install build-essential 28 | fi 29 | 30 | make 31 | 32 | export PATH="~/swmm5/src:$PATH" 33 | echo 'PATH="~/swmm5/src:$PATH"' >> ~/.bashrc 34 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # swmm_mpc 2 | `swmm_mpc` is a python package that can be used to perform model predictive control (MPC) for EPASWMM5 ([Environmental Protection Agency Stormwater Management Model](https://www.epa.gov/water-research/storm-water-management-model-swmm)). `swmm_mpc` relies on the [pyswmm package](https://github.com/OpenWaterAnalytics/pyswmm) which enables the step by step running of SWMM5 models through Python. 3 | 4 | `swmm_mpc` has only been tested **Python 2.7** on **Ubuntu 16.10** and **RedHat**. The modified version of OWASWMM5 included in the `pyswmm` library was compiled on Ubuntu 16.10 and will therefore will not work in a Windows environment. 5 | 6 | # Installation 7 | ## 1. Install swmm_mpc 8 | **NOTE**: You must have `numpy` installed already or this will not work 9 | ``` 10 | pip install git+https://github.com/UVAdMIST/swmm_mpc 11 | 12 | ``` 13 | ## 2. Install EPASWMM5 14 | You will also need to have a working version of EPASWMM5 on your machine and have it added to the path. You can download the source code from the [EPA Website](https://www.epa.gov/water-research/storm-water-management-model-swmm). In Linux you can do this as follows: 15 | ``` 16 | wget https://www.epa.gov/sites/production/files/2017-03/swmm51012_engine_2.zip 17 | unzip swmm51012_engine_2.zip -d swmm5 18 | cd swmm5/ 19 | unzip source5_1_012.zip -d src 20 | unzip makefiles.zip -d mk 21 | cd mk 22 | unzip GNU-CLE.zip -d gnu 23 | cp gnu/Makefile ../src/ 24 | cd ../src 25 | ``` 26 | Then follow the instructions editing the swmm5.c line to read: 27 | ``` 28 | #define CLE 29 | \\#define SOL 30 | \\#define DLL 31 | ``` 32 | Then do 33 | ``` 34 | make 35 | ``` 36 | To add to the path, add this line to your .bashrc 37 | ``` 38 | export PATH="/path/to/swmm5/src:$PATH" 39 | ``` 40 | 41 | # Usage 42 | The `run_swmm_mpc` function is the main function (maybe the only function) that you will likely use. Here is an example of how it is used. `run_swmm_mpc` takes one and only one argument: the path to your configuration file (see example below). 43 | 44 | ## configuration file 45 | The configuration file is a json file that specifies all of the parameters you will be using in your swmm_mpc run. There are certain parameters that are required to be specified and others that have a default value and are not required. 46 | 47 | ### Required parameters in configuration file 48 | 1. `inp_file_path`: [string] path to .inp file relative to config file 49 | 2. `ctl_horizon`: [number] ctl horizon in hours 50 | 3. `ctl_time_step`: [number] control time step in seconds 51 | 4. `ctl_str_ids`: [list of strings] ids of control structures for which controls policies will be found. Each should start with one of the key words ORIFICE, PUMP, or WEIR e.g., [ORIFICE R1, ORIFICE R2] 52 | 5. `work_dir`: [string] directory relative to config file where the temporary files will be created. **note**: this must be an existing directory 53 | 6. `results_dir`: [string] directory relative to config file where the results will be written. **note**: this must be an existing directory 54 | 7. `opt_method`: [string] optimization method. Currently supported methods are 'genetic_algorithm', and 'bayesian_opt' 55 | 8. `optimization_params`: [dict] dictionary with key values that will be passed to the optimization function for GA this includes 56 | * `ngen`: [int] number of generations for GA 57 | * `nindividuals`: [int] number of individuals for initial generation in GA 58 | 9. `run_suffix`: [string] will be added to the results filename 59 | 60 | ### Optional parameters in configuration file 61 | 1. `flood_weight`: [number] overall weight for the sum of all flooding relative to the overall weight for the sum of the absolute deviations from target depths (`dev_weight`). Default: 1 62 | 2. `dev_weight`: [number] overall weight for the sum of the absolute deviations from target depths. This weight is relative to the `flood_weight` Default: 0 63 | 3. `target_depth_dict`: [dict] dictionary where the keys are the nodeids and the values are a dictionary. The inner dictionary has two keys, 'target', and 'weight'. These values specify the target depth for the nodeid and the weight given to that in the cost function. e.g., {'St1': {'target': 1, 'weight': 0.1}} Default: None (flooding from all nodes is weighted equally) 64 | 4. `node_flood_weight_dict`: [dict] dictionary where the keys are the node ids and the values are the relative weights for weighting the amount of flooding for a given node. e.g., {'st1': 10, 'J3': 1}. Default: None 65 | 66 | ## Example 67 | ### Example configuration file 68 | 69 | ``` 70 | { 71 | "inp_file_path": "my_swmm_model.inp", 72 | "ctl_horizon": 1, 73 | "ctl_time_step": 900, 74 | "ctl_str_ids": ["ORIFICE R1", "ORIFICE R2"], 75 | "work_dir": "work/", 76 | "results_dir": "results/", 77 | "dev_weight":0.5, 78 | "flood_weight":1000, 79 | "run_suffix": "my_mpc_run", 80 | "opt_method": "genetic_algorithm", 81 | "optimization_params": 82 | { 83 | "num_cores":20, 84 | "ngen":8, 85 | "nindividuals":120 86 | }, 87 | "target_depth_dict": 88 | { 89 | "Node St1": 90 | { 91 | "target":1.69, 92 | "weight":1 93 | }, 94 | "Node St2": 95 | { 96 | "target":1.69, 97 | "weight":1 98 | } 99 | } 100 | } 101 | ``` 102 | ### Example python file 103 | ```python 104 | 105 | from swmm_mpc.swmm_mpc import run_swmm_mpc 106 | run_swmm_mpc('my_config_file.json') 107 | ``` 108 | 109 | Then to run it, you simply call the script with python: 110 | ``` 111 | python my_swmm_mpc.py 112 | ``` 113 | # Dockerized code 114 | A Docker image with swmm_mpc and all of its dependencies can be found at [https://hub.docker.com/r/jsadler2/swmm_mpc/](https://hub.docker.com/r/jsadler2/swmm_mpc/). You would run it like so (**this assumes your results\_dir, your workdir, your .inp file, and your config file (\*.json) are all in the same directory**): 115 | 116 | ``` 117 | docker run -v /path/to/run_dir/:/run_dir/ -w /run_dir/ jsadler2/swmm_mpc:latest python /run_script.py 118 | ``` 119 | # Example model 120 | An example use case model is found on HydroShare: [https://www.hydroshare.org/resource/73b38d6417ac4352b9dae38a78a47d81/](https://www.hydroshare.org/resource/73b38d6417ac4352b9dae38a78a47d81/). 121 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup(name='swmm_mpc', 4 | version = '0.1', 5 | description = 'model predictive control for swmm5 models', 6 | url = 'https://github.com/uva-hydroinformatics-group/swmm_mpc', 7 | author = 'Jeffrey Sadler', 8 | author_email = 'jms3fb@virginia.edu', 9 | license = 'MIT', 10 | packages = find_packages(), 11 | install_requires=[ 12 | 'pandas', 13 | 'matplotlib', 14 | 'numpy', 15 | 'deap', 16 | 'GPyOpt', 17 | 'pyswmm @ https://github.com/UVAdMIST/pyswmm/archive/feature_save_hotstart.zip', 18 | ], 19 | ) 20 | 21 | -------------------------------------------------------------------------------- /swmm_mpc/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UVAdMIST/swmm_mpc/0ed12d1f585c3473d98bf0ab6c88f6601e7a53f0/swmm_mpc/__init__.py -------------------------------------------------------------------------------- /swmm_mpc/evaluate.py: -------------------------------------------------------------------------------- 1 | import string 2 | import numpy as np 3 | import sys 4 | import random 5 | import os 6 | from shutil import copyfile 7 | import subprocess 8 | from rpt_ele import rpt_ele 9 | import update_process_model_input_file as up 10 | import swmm_mpc as sm 11 | 12 | 13 | def get_flood_cost_from_dict(rpt, node_flood_weight_dict): 14 | node_flood_costs = [] 15 | for nodeid, weight in node_flood_weight_dict.iteritems(): 16 | # if user put "Node J3" for nodeid instead of just "J3" make \ 17 | # nodeid "J3" 18 | if len(nodeid.split()) > 0: 19 | nodeid = nodeid.split()[-1] 20 | # try/except used here in case there is no flooding for one or \ 21 | # more of the nodes 22 | if nodeid not in rpt.node_ids: 23 | print("warning node {} is not in model".format(nodeid)) 24 | try: 25 | # flood volume is in column, 5 26 | node_flood_volume = float(rpt.flooding_df.loc[nodeid, 5]) 27 | node_flood_cost = (weight*node_flood_volume) 28 | node_flood_costs.append(node_flood_cost) 29 | except: 30 | pass 31 | return sum(node_flood_costs) 32 | 33 | 34 | def get_flood_cost(rpt, node_flood_weight_dict): 35 | if rpt.total_flooding > 0 and node_flood_weight_dict: 36 | return get_flood_cost_from_dict(rpt, node_flood_weight_dict) 37 | else: 38 | return rpt.total_flooding 39 | 40 | 41 | def get_deviation_cost(rpt, target_depth_dict): 42 | node_deviation_costs = [] 43 | if target_depth_dict: 44 | for nodeid, data in target_depth_dict.iteritems(): 45 | depth = rpt.get_ele_df(nodeid)['Depth'] 46 | depth_dev = abs(depth - data['target']) 47 | avg_dev = depth_dev.sum()/len(depth_dev) 48 | weighted_deviation = avg_dev*data['weight'] 49 | node_deviation_costs.append(weighted_deviation) 50 | 51 | return sum(node_deviation_costs) 52 | 53 | 54 | def get_cost(rpt_file, node_flood_weight_dict, flood_weight, target_depth_dict, 55 | dev_weight): 56 | # read the output file 57 | rpt = rpt_ele('{}'.format(rpt_file)) 58 | 59 | # get flooding costs 60 | node_fld_cost = get_flood_cost(rpt, node_flood_weight_dict) 61 | 62 | # get deviation costs 63 | deviation_cost = get_deviation_cost(rpt, target_depth_dict) 64 | 65 | # convert the contents of the output file into a cost 66 | cost = flood_weight*node_fld_cost + dev_weight*deviation_cost 67 | return cost 68 | 69 | 70 | def bits_to_decimal(bits): 71 | bits_as_string = "".join(str(i) for i in bits) 72 | return float(int(bits_as_string, 2)) 73 | 74 | 75 | def bits_max_val(bit_len): 76 | bit_ones = [1 for i in range(bit_len)] 77 | return bits_to_decimal(bit_ones) 78 | 79 | 80 | def bits_to_perc(bits): 81 | bit_dec = bits_to_decimal(bits) 82 | max_bits = bits_max_val(len(bits)) 83 | return round(bit_dec/max_bits, 3) 84 | 85 | 86 | def bit_to_on_off(bit): 87 | """ 88 | convert single bit to "ON" or "OFF" 89 | bit: [int] or [list] 90 | """ 91 | if type(bit) == list: 92 | if len(bit) > 1: 93 | raise ValueError('you passed more than one bit to this fxn') 94 | else: 95 | bit = bit[0] 96 | if bit == 1: 97 | return "ON" 98 | elif bit == 0: 99 | return "OFF" 100 | else: 101 | raise ValueError('was expecting 1 or 0 and got {}'.format(bit)) 102 | 103 | 104 | def split_gene_by_ctl_ts(gene, control_str_ids, n_steps): 105 | """ 106 | split a list of bits representing a gene into the bits that correspond with 107 | each control id according to the control type for each time step 108 | ASSUMPTION: 3 bits for ORIFICE or WEIR, 1 for PUMP 109 | gene: [list] bits for a gene (e.g., [1, 0, 1, 1, 1, 0, 0, 1]) 110 | control_str_ids: [list] control ids (e.g., ['ORIFICE r1', 'PUMP p1']) 111 | n_steps: [int] number of control steps (e.g., 2) 112 | returns: [list of lists] [[[1, 0, 1], [1, 1, 0]], [[0], [1]]] 113 | """ 114 | split_gene = [] 115 | for control_id in control_str_ids: 116 | # get the control type (i.e. PUMP, WEIR, ORIFICE) 117 | control_type = control_id.split()[0] 118 | if control_type == 'ORIFICE' or control_type == 'WEIR': 119 | bits_per_type = 3 120 | # get the number of control elements that are for the current ctl 121 | elif control_type == 'PUMP': 122 | bits_per_type = 1 123 | # the number of bits per control structure 124 | n_bits = bits_per_type*n_steps 125 | # get the segment for the control 126 | gene_seg = gene[:n_bits] 127 | # split to get the different time steps 128 | gene_seg_per_ts = split_list(gene_seg, n_steps) 129 | # add the gene segment to the overall list 130 | split_gene.append(gene_seg_per_ts) 131 | # move the beginning of the gene to the end of the current ctl segment 132 | gene = gene[n_bits:] 133 | return split_gene 134 | 135 | 136 | def split_list(a_list, n): 137 | """ 138 | split one list into n lists of equal size. In this case, we are splitting 139 | the list that represents the policy of a single each control structure 140 | so that each time step is separate 141 | """ 142 | portions = len(a_list)/n 143 | split_lists = [] 144 | for i in range(n): 145 | split_lists.append(a_list[i*portions: (i+1)*portions]) 146 | return split_lists 147 | 148 | 149 | def gene_to_policy_dict(gene, control_str_ids, n_control_steps): 150 | """ 151 | converts a gene to a policy dictionary that with the format specified in 152 | up.update_controls_and_hotstart 153 | format a policy given the control_str_ids and splitted_gene 154 | control_str_ids: [list] control ids (e.g., ['ORIFICE r1', 'PUMP p1']) 155 | splitted_gene: [list of lists] [[[1, 0, 1], [1, 1, 0]], [[0], [1]]] 156 | returns: [dict] (e.g., {'ORIFICE r1'} 157 | """ 158 | fmted_policies = dict() 159 | splitted_gene = split_gene_by_ctl_ts(gene, control_str_ids, 160 | n_control_steps) 161 | for i, control_id in enumerate(control_str_ids): 162 | control_type = control_id.split()[0] 163 | seg = splitted_gene[i] 164 | if control_type == 'ORIFICE' or control_type == 'WEIR': 165 | # change the lists of bits into percent openings 166 | fmtd_seg = [bits_to_perc(setting) for setting in seg] 167 | elif control_type == 'PUMP': 168 | # change the lists of bits into on/off 169 | fmtd_seg = [bit_to_on_off(bit[0]) for bit in seg] 170 | fmted_policies[control_id] = fmtd_seg 171 | return fmted_policies 172 | 173 | 174 | def list_to_policy(policy, control_str_ids, n_control_steps): 175 | """ 176 | ASSUMPTION: round decimal number to BOOLEAN 177 | """ 178 | split_policies = split_list(policy, len(control_str_ids)) 179 | fmted_policies = dict() 180 | for i, control_id in enumerate(control_str_ids): 181 | control_type = control_id.split()[0] 182 | if control_type == 'ORIFICE' or control_type == 'WEIR': 183 | fmted_policies[control_id] = split_policies[i] 184 | elif control_type == 'PUMP': 185 | on_off = [bit_to_on_off(round(p)) for p in split_policies[i]] 186 | fmted_policies[control_id] = on_off 187 | return fmted_policies 188 | 189 | 190 | def format_policies(policy, control_str_ids, n_control_steps, opt_method): 191 | if opt_method == 'genetic_algorithm': 192 | return gene_to_policy_dict(policy, control_str_ids, n_control_steps) 193 | elif opt_method == 'bayesian_opt': 194 | return list_to_policy(policy, control_str_ids, n_control_steps) 195 | 196 | 197 | def prep_tmp_files(proc_inp, work_dir): 198 | # make process model tmp file 199 | rand_string = ''.join(random.choice( 200 | string.ascii_lowercase + string.digits) for _ in range(9)) 201 | 202 | # make a copy of the process model input file 203 | tmp_proc_base = proc_inp.replace('.inp', 204 | '_tmp_{}'.format(rand_string)) 205 | tmp_proc_inp = tmp_proc_base + '.inp' 206 | tmp_proc_rpt = tmp_proc_base + '.rpt' 207 | copyfile(proc_inp, tmp_proc_inp) 208 | 209 | # make copy of hs file 210 | hs_file_path = up.read_hs_filename(proc_inp) 211 | hs_file_name = os.path.split(hs_file_path)[-1] 212 | tmp_hs_file_name = hs_file_name.replace('.hsf', 213 | '_{}.hsf'.format(rand_string)) 214 | tmp_hs_file_path = os.path.join(sm.run.work_dir, tmp_hs_file_name) 215 | copyfile(hs_file_path, tmp_hs_file_path) 216 | return tmp_proc_inp, tmp_proc_rpt, tmp_hs_file_path 217 | 218 | 219 | def evaluate(*individual): 220 | """ 221 | evaluate the performance of an individual given the inp file of the process 222 | model, the individual, the control params (ctl_str_ids, horizon, step), 223 | and the cost function params (dev_weight/dict, flood weight/dict) 224 | """ 225 | FNULL = open(os.devnull, 'w') 226 | # prep files 227 | tmp_inp, tmp_rpt, tmp_hs = prep_tmp_files(sm.run.inp_process_file_path, 228 | sm.run.work_dir) 229 | 230 | # format policies 231 | if sm.run.opt_method == 'genetic_algorithm': 232 | individual = individual[0] 233 | elif sm.run.opt_method == 'bayesian_opt': 234 | individual = np.squeeze(individual) 235 | 236 | fmted_policies = format_policies(individual, sm.run.ctl_str_ids, 237 | sm.run.n_ctl_steps, sm.run.opt_method) 238 | 239 | # update controls 240 | up.update_controls_and_hotstart(tmp_inp, 241 | sm.run.ctl_time_step, 242 | fmted_policies, 243 | tmp_hs) 244 | 245 | # run the swmm model 246 | if os.name == 'nt': 247 | swmm_exe_cmd = 'swmm5.exe' 248 | elif sys.platform.startswith('linux'): 249 | swmm_exe_cmd = 'swmm5' 250 | cmd = '{} {} {}'.format(swmm_exe_cmd, tmp_inp, 251 | tmp_rpt) 252 | subprocess.call(cmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT) 253 | 254 | # get cost 255 | cost = get_cost(tmp_rpt, 256 | sm.run.node_flood_weight_dict, 257 | sm.run.flood_weight, 258 | sm.run.target_depth_dict, 259 | sm.run.dev_weight) 260 | 261 | os.remove(tmp_inp) 262 | os.remove(tmp_rpt) 263 | os.remove(tmp_hs) 264 | return cost 265 | -------------------------------------------------------------------------------- /swmm_mpc/plot_passive_vs_active.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import subprocess 3 | import matplotlib.pyplot as plt 4 | from rpt_ele import rpt_ele 5 | from update_process_model_input_file import update_controls_with_policy,\ 6 | remove_control_section 7 | 8 | def plot(inp_file, policy_file, control_time_step, ele, variable, save_dir, 9 | save_sfx='', show=True, figsize=(3.5, 2.8), fontsize=10): 10 | # run swmm active 11 | policy_id = policy_file.split("/")[-1].split("_")[-1] 12 | 13 | update_controls_with_policy(inp_file, control_time_step, policy_file) 14 | 15 | rpt_file_act = inp_file.replace(".inp", "_acive.rpt") 16 | cmd = "swmm5 {} {}".format(inp_file, rpt_file_act) 17 | subprocess.call(cmd, shell=True, stderr=subprocess.STDOUT) 18 | rpt_obj_act = rpt_ele(rpt_file_act) 19 | 20 | # run swmm passive 21 | remove_control_section(inp_file) 22 | 23 | rpt_file_pass = inp_file.replace(".inp", "_acive.rpt") 24 | cmd = "swmm5 {} {}".format(inp_file, rpt_file_pass) 25 | subprocess.call(cmd, shell=True, stderr=subprocess.STDOUT) 26 | rpt_obj_pass = rpt_ele(rpt_file_pass) 27 | 28 | # plot 29 | ax = rpt_obj_act.get_ele_df(ele)[variable].plot(label='MPC', 30 | figsize=figsize, 31 | fontsize=fontsize 32 | ) 33 | rpt_obj_pass.get_ele_df(ele)[variable].plot(label='passive', ax=ax) 34 | 35 | # formatting 36 | if variable == 'Depth': 37 | ylabel = 'Depth (ft)' 38 | elif variable == 'Flooding': 39 | ylabel = 'Flooding (million gallons)' 40 | 41 | ax.set_title("{} at {}".format(variable, ele), fontsize=fontsize) 42 | ax.set_ylabel(ylabel) 43 | ax.legend() 44 | plt.tight_layout() 45 | plt.savefig("{}/{}_{}{}".format(save_dir, variable, ele.replace(" ", "-"), 46 | save_sfx), 47 | dpi=300) 48 | if show: 49 | plt.show() 50 | -------------------------------------------------------------------------------- /swmm_mpc/plot_results.py: -------------------------------------------------------------------------------- 1 | import math 2 | import string 3 | import matplotlib.pyplot as plt 4 | import pandas as pd 5 | import matplotlib.dates as mdates 6 | from rpt_ele import rpt_ele 7 | 8 | 9 | 10 | def get_df(rpts, ele, variable, column_names=None): 11 | """ 12 | get pandas dataframe of different versions of results for one element for 13 | one variable 14 | rpts: list of rpt_ele objects - rpt_objects to be combined 15 | ele: string - swmm model element to extract from rpt_ele objects 16 | (e.g., "Node J3") 17 | variable: string - variable to extract data (e.g., "Depth") 18 | column_names: list of strings - column labels for different versions 19 | (e.g., ["Passive", "Rules", "MPC"]) 20 | """ 21 | ser_list = [] 22 | for i, rpt in enumerate(rpts): 23 | if variable == "Total Flooding": 24 | ser_list.append(pd.Series([rpt.total_flooding])) 25 | else: 26 | ser_list.append(rpt.get_ele_df(ele)[variable]) 27 | comb = pd.concat(ser_list, 1) 28 | if column_names: 29 | comb.columns = column_names 30 | return comb 31 | 32 | 33 | def plot_versions_single(df, variable, ylabel, fontsize, lw, title=None, 34 | colors=None, ax=None, sublabel=None, 35 | linestyles=["--", "-.", "-", ":"]): 36 | """ 37 | make a plot of multiple versions of rpt_elements at one node for one 38 | variable 39 | df: pandas dataframe - dataframe with values of one variable at one node 40 | for different versions. The different versions are the columns 41 | ylabel: string - label for y-axis 42 | title: string - title for plot 43 | colors: list of strings - matplotlib colors corresponding to the different 44 | versions of the results 45 | ax: matplotlib axes object - axes where the plot will be made 46 | lgd: boolean - whether to include legend or not 47 | """ 48 | plt.rc('font', weight='bold', size=fontsize) 49 | if not colors: 50 | colors = ["#999999", "#000d29", "#118c8b"] 51 | 52 | if variable == "Total Flooding": 53 | df = df*1000 54 | ax = df.plot.bar(ax=ax, color=colors, legend=False) 55 | # plt.tick_params( 56 | # axis='x', # changes apply to the x-axis 57 | # which='both', # major and minor ticks are affected 58 | # bottom=False, # ticks along the bottom edge are off 59 | # top=False, # ticks along the top edge are off 60 | # labelbottom=False # label is off 61 | # ) 62 | ax.set_ylim((0, df.max().max()*1.15)) 63 | xs = [] 64 | for p in ax.patches: 65 | val = str(round(p.get_height(), 1)) 66 | x = p.get_x()+ (p.get_width()/2) 67 | y = p.get_height() * 1.005 68 | ax.annotate(val, (x, y), ha='center', weight='normal') 69 | xs.append(x) 70 | ax.set_xticks(xs) 71 | ax.set_xticklabels([i + 1 for i in range(len(df.columns))], 72 | rotation=0) 73 | ax.set_xlabel('Scenario', fontsize=fontsize, weight='normal') 74 | else: 75 | for col in df.columns: 76 | ax.plot(df.index, df[col], label=col, lw=lw) 77 | lines = ax.lines 78 | 79 | for i in range(len(lines)): 80 | lines[i].set_color(colors[i]) 81 | lines[i].set_linestyle(linestyles[i]) 82 | 83 | ax.set_xlabel('Time elapsed (hr)', fontsize=fontsize, weight='normal') 84 | ax.xaxis.set_major_locator(mdates.HourLocator(interval=3)) 85 | ax.xaxis.set_major_formatter(mdates.DateFormatter('%H')) 86 | ax.set_xlim((df.index.min(), df.index.max())) 87 | ax.grid(which='both', color='#F0F0F0') 88 | 89 | if sublabel: 90 | ax.text(0.98, 0.02, sublabel, horizontalalignment='left', 91 | verticalalignment='top', transform=ax.transAxes, 92 | fontsize=fontsize) 93 | 94 | ax.set_ylabel(ylabel, fontsize=fontsize, weight='normal') 95 | if title: 96 | ax.set_title(title, fontsize=fontsize, weight='bold') 97 | ax.xaxis.set_tick_params(labelsize=fontsize) 98 | ax.yaxis.set_tick_params(labelsize=fontsize) 99 | return ax 100 | 101 | 102 | def get_unit_label(units, variable): 103 | variable = variable.lower() 104 | if units == 'english': 105 | if variable == 'depth': 106 | return "ft" 107 | elif variable == 'flooding': 108 | return "cfs" 109 | elif variable == 'total flooding': 110 | return '10^3 cubic feet' 111 | else: 112 | return "unknown" 113 | elif units == 'metric': 114 | if variable == 'depth': 115 | return 'm' 116 | elif variable == 'flooding': 117 | return 'cms' 118 | elif variable == 'total flooding': 119 | return '10^3 cubic meters' 120 | else: 121 | return 'unknown' 122 | else: 123 | return 'unknown' 124 | 125 | 126 | def make_values_metric(df, variable): 127 | variable = variable.lower() 128 | if variable == "depth": 129 | factor = 0.3048 # meters/foot 130 | elif variable == 'flooding' or variable == 'total flooding': 131 | factor = 0.028316847000000252 # cubic meters/cubic foot 132 | return df*factor 133 | 134 | 135 | def plot_versions_together(node_id_vars, rpt_files, rpt_labels, fig_dir, sfx, 136 | node_maxes={}, target_depths={}, 137 | units="english", fontsize=12, figsize=(6, 4), lw=2): 138 | """ 139 | plot variable results at different nodes in one figure 140 | node_id_vars: list of tuples - tuple has node_id as first element and 141 | variable to plot as second element 142 | (e.g., [("Node St1", "Depth"), ("Node J3", "Flooding")]) 143 | rpts_files: list of strings - file paths of different versions that will be 144 | in each sublot 145 | rpt_labels: list of strings - labels for the different rpt versions 146 | (e.g., ["Passive", "Rules", "MPC"]) 147 | fig_dir: string - directory where file should be saved 148 | sfx: string - suffix to be put on the end of the file name 149 | node_maxes: dict - key is node id, value is maximum value of the variable 150 | for the node in same units as 'units' parameter. If present 151 | these will be plotted as horizontal lines for reference 152 | target_depths: dict - key is node id, value is target depth for the node in 153 | same units as 'units' parameter. If present these will be 154 | plotted as horizontal lines for reference 155 | units: string - "english" or "metric". If "metric" conversions from english 156 | units will be performed 157 | """ 158 | plt.rc('font', weight='bold', size=fontsize) 159 | rpts = [rpt_ele(r) for r in rpt_files] 160 | 161 | nplots = len(node_id_vars) 162 | nrows = int(round(nplots**0.5)) 163 | ncols = int(math.ceil(float(nplots)/float(nrows))) 164 | fig, axs = plt.subplots(nrows=nrows, ncols=ncols, sharex=False, 165 | figsize=figsize) 166 | 167 | if nplots > 1: 168 | axs_list = axs.ravel() 169 | else: 170 | axs_list = [axs] 171 | 172 | counter = 0 173 | node_max_line = None 174 | target_depth_line = None 175 | for node_id, variable in node_id_vars: 176 | var_df = get_df(rpts, node_id, variable, rpt_labels) 177 | 178 | # correct for units 179 | if units == 'metric': 180 | var_df = make_values_metric(var_df, variable) 181 | elif units == 'english': 182 | pass 183 | else: 184 | raise ValueError('units variable should be "english" or "metric".\ 185 | you entered {}'.format(units)) 186 | 187 | unit_label = get_unit_label(units, variable) 188 | 189 | if node_id != '': 190 | plot_title = "{} at {}".format(variable, node_id) 191 | else: 192 | plot_title = "{}".format(variable) 193 | 194 | ax = plot_versions_single(var_df, variable, unit_label, fontsize, lw, 195 | title=plot_title, ax=axs_list[counter]) 196 | node_max = node_maxes.get(node_id) 197 | if node_max: 198 | node_max_line = ax.axhline(node_max, c='darkred', alpha=0.5, 199 | label='Max depth') 200 | 201 | target_depth = target_depths.get(node_id) 202 | if target_depth: 203 | target_depth_line = ax.axhline(target_depth, c='magenta', 204 | alpha=0.5, label='Target depth') 205 | 206 | counter += 1 207 | 208 | handles, labels = ax.get_legend_handles_labels() 209 | if target_depth_line: 210 | handles.insert(1, target_depth_line) 211 | labels.insert(1, target_depth_line.get_label()) 212 | if node_max_line: 213 | if target_depth_line: 214 | position = 3 215 | else: 216 | position = 1 217 | handles.insert(position, node_max_line) 218 | labels.insert(position, node_max_line.get_label()) 219 | 220 | fig.legend(handles, labels, loc='lower center', ncol=3, 221 | bbox_to_anchor=(0.5, 0)) 222 | fig.tight_layout() 223 | fig.subplots_adjust(bottom=0.2, hspace=0.7) 224 | fig.savefig("{}/{}_{}".format(fig_dir, "combined", sfx), dpi=300) 225 | plt.show() 226 | return fig 227 | -------------------------------------------------------------------------------- /swmm_mpc/rpt_ele.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | class rpt_ele(): 4 | def __init__(self, rpt_file): 5 | """ 6 | rpt_file (str): the name of the .rpt file you wante to read 7 | """ 8 | self.rpt = rpt_file 9 | self.file_contents = self.get_file_contents() 10 | self.total_flooding = self.get_total_flooding() 11 | if self.total_flooding > 0: 12 | self.flooding_df = self.get_summary_df("Node Flooding Summary") 13 | else: 14 | self.flooding_df = pd.DataFrame() 15 | self.depth_df = self.get_summary_df("Node Depth Summary") 16 | self.node_ids = self.depth_df.index.tolist() 17 | 18 | def get_file_contents(self): 19 | with open(self.rpt, 'r') as f: 20 | lines = f.readlines() 21 | return lines 22 | 23 | def get_ele_df(self, ele): 24 | start_line_no, end_line_no = self.get_ele_lines(ele) 25 | col_titles = self.file_contents[start_line_no+3].strip().split()[:2] 26 | col_titles.extend(self.file_contents[start_line_no+2].strip().split()) 27 | content_start = start_line_no + 5 28 | content_end = end_line_no - 1 29 | content_list = [] 30 | for i in range(content_start, content_end): 31 | content_list.append(self.file_contents[i].split()) 32 | df = pd.DataFrame(content_list, columns=col_titles) 33 | df["datetime"] = pd.to_datetime(df["Date"] + " " + df["Time"]) 34 | df["datetime"] = df["datetime"].dt.round('min') 35 | df.set_index("datetime", inplace=True) 36 | for c in df.columns: 37 | try: 38 | df[c] = pd.to_numeric(df[c]) 39 | except: 40 | pass 41 | return df 42 | 43 | def get_start_line(self, start_string, start=0): 44 | for i in range(len(self.file_contents[start:])): 45 | line_no = i + start 46 | line_lower = self.file_contents[line_no].strip().lower() 47 | start_string_lower = start_string.lower().strip() 48 | 49 | if line_lower.startswith(start_string_lower): 50 | return i 51 | 52 | # raise error if start line of section not found 53 | raise KeyError('Start line for string {} not found'.format(start_string)) 54 | 55 | def get_end_line(self, start_line): 56 | for i in range(len(self.file_contents[start_line:])): 57 | line_no = start_line + i 58 | if self.file_contents[line_no].strip() == "" and \ 59 | self.file_contents[line_no + 1].strip() == "": 60 | return line_no 61 | # raise error if end line of section not found 62 | raise KeyError('Did not find end of section starting on line {}'.format(start_line)) 63 | 64 | def get_ele_lines(self, ele): 65 | start_line = self.get_start_line("<<< {} >>>".format(ele.lower())) 66 | end_line = self.get_end_line(start_line) 67 | return start_line, end_line 68 | 69 | def get_total_flooding(self): 70 | fl_start_line = self.get_start_line("Flooding Loss") 71 | return float(self.file_contents[fl_start_line].split()[-1]) 72 | 73 | def get_summary_df(self, heading): 74 | """ 75 | heading: heading of summary table (e.g, "Node Flooding Summary") 76 | returns: a dataframe of the tabular data under the heading specified 77 | """ 78 | summary_start = self.get_start_line(heading) 79 | summary_end = self.get_end_line(summary_start) 80 | lines = self.file_contents[summary_start:summary_end] 81 | # reverse the list of strings so data is on top. makes it easier to handle (less skipping) 82 | lines.reverse() 83 | first_row = True 84 | for i, l in enumerate(lines): 85 | if not l.strip().startswith('---'): 86 | # add as row to dataframe 87 | line = l.strip().split() 88 | if first_row: 89 | df = pd.DataFrame(columns = range(len(line))) 90 | first_row = False 91 | df.loc[i] = line 92 | else: 93 | df.set_index(0, inplace=True) 94 | return df 95 | 96 | -------------------------------------------------------------------------------- /swmm_mpc/run_baeopt.py: -------------------------------------------------------------------------------- 1 | import GPyOpt 2 | import swmm_mpc as sm 3 | import evaluate as ev 4 | import numpy as np 5 | from GPyOpt.methods import BayesianOptimization as BayOpt 6 | 7 | 8 | def get_bounds(ctl_str_ids, nsteps): 9 | bounds = [] 10 | for ctl in ctl_str_ids: 11 | var_num = 0 12 | for j in range(nsteps): 13 | ctl_type = ctl.split()[0] 14 | ctl_bounds = {} 15 | 16 | if ctl_type == 'WEIR' or ctl_type == 'ORIFICE': 17 | var_type = 'continuous' 18 | elif ctl_type == 'PUMP': 19 | var_type = 'discrete' 20 | 21 | ctl_bounds['name'] = 'var_{}'.format(var_num) 22 | ctl_bounds['type'] = var_type 23 | ctl_bounds['domain'] = (0, 1) 24 | var_num += 1 25 | bounds.append(ctl_bounds) 26 | return bounds 27 | 28 | 29 | def run_baeopt(opt_params): 30 | # set up opt params 31 | bounds = get_bounds(sm.run.ctl_str_ids, sm.run.n_ctl_steps) 32 | max_iter = opt_params.get('max_iter', 15) 33 | max_time = opt_params.get('max_time', 120) 34 | initial_guess = opt_params.get('initial_guess', []) 35 | if len(initial_guess) == 0: 36 | initial_guess = None 37 | else: 38 | initial_guess = np.array([initial_guess]) 39 | 40 | eps = opt_params.get('eps', 0.01) 41 | model_type = opt_params.get('model_type', 'GP') 42 | acquisition_type = opt_params.get('acquisition_type', 'EI') 43 | 44 | # instantiate object 45 | bae_opt = BayOpt(ev.evaluate, 46 | domain=bounds, 47 | model_type=model_type, 48 | acquisition_type='EI', 49 | X=initial_guess, 50 | evaluator_type='local_penalization', 51 | num_cores=opt_params['num_cores'], 52 | batch_size=opt_params['num_cores'], 53 | ) 54 | bae_opt.run_optimization(max_iter, max_time, eps) 55 | return bae_opt.x_opt, bae_opt.fx_opt 56 | 57 | -------------------------------------------------------------------------------- /swmm_mpc/run_ea.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import random 4 | import multiprocessing 5 | from deap import base, creator, tools, algorithms 6 | import numpy as np 7 | import evaluate as ev 8 | import swmm_mpc as sm 9 | 10 | 11 | def evaluate_ea(individual): 12 | cost = ev.evaluate(individual) 13 | return cost, 14 | 15 | 16 | def run_ea(work_dir, config_file, ga_params): 17 | creator.create('FitnessMin', base.Fitness, weights=(-1.0,)) 18 | creator.create('Individual', list, fitness=creator.FitnessMin) 19 | 20 | pool = multiprocessing.Pool(ga_params['num_cores']) 21 | toolbox = base.Toolbox() 22 | toolbox.register('map', pool.map) 23 | toolbox.register('attr_binary', random.randint, 0, 1) 24 | toolbox.register('mate', tools.cxTwoPoint) 25 | toolbox.register('mutate', tools.mutFlipBit, indpb=0.20) 26 | toolbox.register('select', tools.selTournament, tournsize=6) 27 | toolbox.register('evaluate', evaluate_ea) 28 | 29 | policy_len = get_policy_length(sm.run.ctl_str_ids, 30 | sm.run.n_ctl_steps) 31 | toolbox.register('individual', tools.initRepeat, creator.Individual, 32 | toolbox.attr_binary, policy_len) 33 | 34 | # read from the json file to initialize population if exists 35 | # (not first time) 36 | pop_file = os.path.join(work_dir, "population.json") 37 | if os.path.isfile(pop_file): 38 | toolbox.register("pop_guess", init_population, creator.Individual, 39 | pop_file) 40 | pop = toolbox.pop_guess() 41 | else: 42 | toolbox.register('population', tools.initRepeat, list, 43 | toolbox.individual) 44 | pop = toolbox.population(n=ga_params.get('nindividuals', 25)) 45 | 46 | hof = tools.HallOfFame(1) 47 | stats = tools.Statistics(lambda ind: ind.fitness.values) 48 | stats.register('avg', np.mean) 49 | stats.register('min', np.min) 50 | stats.register('max', np.max) 51 | pop, logbook = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.2, 52 | ngen=ga_params.get('ngen', 7), 53 | stats=stats, 54 | halloffame=hof, verbose=True) 55 | seed_next_population(hof[0], ga_params.get('nindividuals', 25), 56 | sm.run.ctl_str_ids, pop_file, sm.run.n_ctl_steps) 57 | min_cost = min(logbook.select("min")) 58 | pool.close() 59 | pool.join() 60 | return hof[0], min_cost 61 | 62 | 63 | def write_pop_to_file(population, pop_file): 64 | """ 65 | write a population of individuals to json file 66 | """ 67 | with open(pop_file, 'w') as myfile: 68 | json.dump(population, myfile) 69 | 70 | 71 | def mutate_pop(best_policy, nindividuals, control_str_ids, n_steps): 72 | """ 73 | mutate the best policy of the current time step 74 | """ 75 | list_of_inds = [] 76 | for i in range(nindividuals): 77 | # split because there may be more than one control 78 | split_lists = ev.split_gene_by_ctl_ts(best_policy, control_str_ids, 79 | n_steps) 80 | mutated_ind = [] 81 | for seg_by_ctl in split_lists: 82 | setting_length = len(seg_by_ctl[0]) 83 | # disregard the first control step since we need future policy 84 | seg_by_ctl = seg_by_ctl[1:] 85 | # set setting length to one in case there is only one setting 86 | # mutate the remaining settings 87 | mutated_ctl_segment = [] 88 | for seg_by_ts in seg_by_ctl: 89 | tools.mutFlipBit(seg_by_ts, 0.2) 90 | mutated_ctl_segment.extend(seg_by_ts) 91 | # add a random setting for the last time step in the future policy 92 | rand_sttng = [random.randint(0, 1) for i in range(setting_length)] 93 | mutated_ctl_segment.extend(rand_sttng) 94 | # add the new policy for the control structure to the overall pol 95 | mutated_ind.extend(mutated_ctl_segment) 96 | # don't add the new indivi to the pop if identical indivi already there 97 | if mutated_ind not in list_of_inds: 98 | list_of_inds.append(mutated_ind) 99 | return list_of_inds 100 | 101 | 102 | def seed_next_population(best_policy, nindividuals, control_str_ids, pop_file, 103 | n_steps): 104 | """ 105 | seed the population for the next time step using the best policy from the 106 | current time step as the basis. 107 | best_policy: [list] binary string representing best policy of current ts 108 | nindividuals: [int] number of individuals per generation in GA 109 | control_str_ids:[list] list of control ids (e.g., ['ORIFICE r1', ...]) 110 | pop_file: [string] name of file where the next seed pop will be saved 111 | n_steps: [int] number of control time steps 112 | return: [list] mutated population 113 | 114 | """ 115 | mutated_pop = mutate_pop(best_policy, nindividuals, control_str_ids, 116 | n_steps) 117 | 118 | # in case there were duplicates after mutating, 119 | # fill the rest of the population with random individuals 120 | while len(mutated_pop) < nindividuals: 121 | rand_ind = [] 122 | for i in range(len(best_policy)): 123 | rand_ind.append(random.randint(0, 1)) 124 | if rand_ind not in mutated_pop: 125 | mutated_pop.append(rand_ind) 126 | write_pop_to_file(mutated_pop, pop_file) 127 | 128 | return mutated_pop 129 | 130 | 131 | def init_population(ind_init, filename): 132 | """ 133 | create initial population from json file 134 | ind_init: [class] class that and individual will be assigned to 135 | filename: [string] string of filename from which pop will be read 136 | returns: [list] list of Individual objects 137 | """ 138 | with open(filename, "r") as pop_file: 139 | contents = json.load(pop_file) 140 | return list(ind_init(c) for c in contents) 141 | 142 | 143 | def get_policy_length(control_str_ids, n_control_steps): 144 | """ 145 | get the length of the policy. ASSUMPTION - PUMP controls are binary 1 BIT, 146 | ORIFICE and WEIR are 3 BITS 147 | returns: [int] the number of total control decisions in the policy 148 | """ 149 | pol_len = 0 150 | for ctl_id in control_str_ids: 151 | ctl_type = ctl_id.split()[0] 152 | if ctl_type == 'ORIFICE' or ctl_type == 'WEIR': 153 | pol_len += 3*n_control_steps 154 | elif ctl_type == 'PUMP': 155 | pol_len += n_control_steps 156 | return pol_len 157 | -------------------------------------------------------------------------------- /swmm_mpc/swmm_mpc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import datetime 3 | import random 4 | from shutil import copyfile 5 | import shutil 6 | import pandas as pd 7 | import pyswmm 8 | from pyswmm import Simulation, Links 9 | import update_process_model_input_file as up 10 | import evaluate as ev 11 | import run_ea as ra 12 | import json 13 | import run_baeopt as bo 14 | 15 | run = None 16 | 17 | def get_global_run(config_file): 18 | global run 19 | run = swmm_mpc_run(config_file) 20 | 21 | 22 | class swmm_mpc_run(object): 23 | def __init__(self, config_file): 24 | with open(config_file, 'r') as f: 25 | config_dict = json.load(f) 26 | self.inp_file_path = os.path.abspath(config_dict['inp_file_path']) 27 | self.ctl_horizon = config_dict['ctl_horizon'] 28 | self.ctl_time_step = config_dict['ctl_time_step'] 29 | self.ctl_str_ids = config_dict['ctl_str_ids'] 30 | self.work_dir = os.path.abspath(config_dict['work_dir']) 31 | self.results_dir = os.path.abspath(config_dict['results_dir']) 32 | self.opt_method = config_dict['opt_method'] 33 | self.optimization_params = config_dict.get('optimization_params', {}) 34 | if 'num_cores' in self.optimization_params: 35 | if type(self.optimization_params['num_cores']) != int: 36 | self.optimization_params['num_cores'] = 1 37 | else: 38 | self.optimization_params['num_cores'] = 1 39 | self.run_suffix = config_dict['run_suffix'] 40 | self.target_depth_dict = config_dict.get('target_depth_dict', None) 41 | self.node_flood_weight_dict = config_dict.get('node_flood_weight_dict', 42 | None) 43 | self.flood_weight = config_dict.get('flood_weight', 1) 44 | if self.target_depth_dict: 45 | self.dev_weight = config_dict.get('dev_weight', 1) 46 | else: 47 | self.dev_weight = config_dict.get('dev_weight', 0) 48 | self.log_file = os.path.join(self.results_dir, 49 | 'log_{}'.format(self.run_suffix)) 50 | 51 | # check ctl_str_ids 52 | validate_ctl_str_ids(self.ctl_str_ids) 53 | 54 | # the input directory and the file name 55 | self.inp_file_dir, inp_file_name = os.path.split(self.inp_file_path) 56 | # the process file name with no extension 57 | inp_process_file_base = inp_file_name.replace('.inp', '_process') 58 | # the process .inp file name 59 | inp_process_file_inp = inp_process_file_base + '.inp' 60 | self.inp_process_file_path = os.path.join(self.work_dir, 61 | inp_process_file_inp) 62 | # copy input file to process file name 63 | copyfile(self.inp_file_path, self.inp_process_file_path) 64 | 65 | self.n_ctl_steps = int(self.ctl_horizon*3600/self.ctl_time_step) 66 | 67 | 68 | def run_swmm_mpc(config_file): 69 | ''' 70 | config_file: [string] path to config file. config file is a JSON file that 71 | contains the following key value pairs: 72 | inp_file_path: [string] path to .inp file relative to config file 73 | ctl_horizon: [number] ctl horizon in hours 74 | ctl_time_step: [number] control time step in seconds 75 | ctl_str_ids: [list of strings] ids of control structures for which 76 | controls policies will be found. Each should start with 77 | one of the key words ORIFICE, PUMP, or WEIR 78 | e.g., [ORIFICE R1, ORIFICE R2] 79 | work_dir: [string] directory relative to config file where the temporary 80 | files will be created 81 | results_dir: [string] directory relative to config file where the results 82 | will be written 83 | opt_method: [string] optimization method. Currently supported methods are 84 | 'genetic_algorithm', and 'bayesian_opt' 85 | optimization_params: [dict] dictionary with key values that will be passed 86 | to the optimization function 87 | for GA this includes 88 | * ngen: [int] number of generations for GA 89 | * nindividuals: [int] number of individuals for 90 | initial generation in GA 91 | run_suffix: [string] will be added to the results filename 92 | flood_weight: [number] overall weight for the sum of all flooding relative 93 | to the overall weight for the sum of the absolute deviations 94 | from target depths (dev_weight). Default: 1 95 | dev_weight: [number] overall weight for the sum of the absolute deviations 96 | from target depths. This weight is relative to the flood_weight 97 | Default: 0 98 | target_depth_dict: [dict] dictionary where the keys are the nodeids and 99 | the values are a dictionary. The inner dictionary has 100 | two keys, 'target', and 'weight'. These values specify 101 | the target depth for the nodeid and the weight given 102 | to that in the cost function. Default: None 103 | e.g., {'St1': {'target': 1, 'weight': 0.1}} 104 | node_flood_weight_dict: [dict] dictionary where the keys are the node ids 105 | and the values are the relative weights for 106 | weighting the amount of flooding for a given node. 107 | e.g., {'st1': 10, 'J3': 1}. Default: None 108 | 109 | ''' 110 | # save params to file 111 | get_global_run(config_file) 112 | print(vars(run)) 113 | 114 | with open(run.log_file, 'w') as f: 115 | f.write(str(vars(run))) 116 | f.write('\n') 117 | 118 | pyswmm.lib.use('libswmm5_hs.so') 119 | 120 | # record when simulation begins 121 | beg_time = datetime.datetime.now() 122 | run_beg_time_str = beg_time.strftime('%Y.%m.%d.%H.%M') 123 | print("Simulation start: {}".format(run_beg_time_str)) 124 | best_policy_ts = [] 125 | 126 | # make sure there is no control rules in inp file 127 | up.remove_control_section(run.inp_file_path) 128 | 129 | # run simulation 130 | with Simulation(run.inp_file_path) as sim: 131 | sim.step_advance(run.ctl_time_step) 132 | sim_start_time = sim.start_time 133 | for step in sim: 134 | # get most current system states 135 | current_dt = sim.current_time 136 | 137 | dt_hs_file = 'tmp_hsf.hsf' 138 | print(current_dt) 139 | dt_hs_path = os.path.join(os.getcwd(), dt_hs_file) 140 | sim.save_hotstart(dt_hs_path) 141 | 142 | link_obj = Links(sim) 143 | 144 | # update the process model with the current states 145 | up.update_process_model_file(run.inp_process_file_path, 146 | current_dt, dt_hs_path) 147 | 148 | if run.opt_method == 'genetic_algorithm': 149 | best_policy, cost = ra.run_ea(run.work_dir, config_file, 150 | run.optimization_params) 151 | elif run.opt_method == 'bayesian_opt': 152 | best_policy, cost = bo.run_baeopt(run.optimization_params) 153 | initial_guess = get_initial_guess(best_policy, run.ctl_str_ids) 154 | run.optimization_params['initial_guess'] = initial_guess 155 | else: 156 | raise ValueError( 157 | '{} not valid opt method'.format(run.opt_method) 158 | ) 159 | print best_policy, cost 160 | 161 | best_policy_fmt = ev.format_policies(best_policy, 162 | run.ctl_str_ids, 163 | run.n_ctl_steps, 164 | run.opt_method) 165 | best_policy_ts = update_policy_ts_list(best_policy_fmt, 166 | current_dt, 167 | run.ctl_time_step, 168 | best_policy_ts, 169 | cost) 170 | 171 | results_file = save_results_file(best_policy_ts, run.ctl_str_ids, 172 | run.results_dir, sim_start_time, 173 | run_beg_time_str, run.run_suffix) 174 | 175 | implement_control_policy(link_obj, best_policy_fmt) 176 | 177 | # if we are getting a policy with no cost then it's perfect 178 | if cost == 0: 179 | break 180 | 181 | end_time = datetime.datetime.now() 182 | print('simulation end: {}'.format(end_time.strftime('%Y.%m.%d.%H.%M'))) 183 | elapsed_time = end_time - beg_time 184 | elapsed_time_str = 'elapsed time: {}'.format(elapsed_time.seconds) 185 | print(elapsed_time_str) 186 | 187 | # write the elapsed time to the end of the log file 188 | with open(run.log_file, 'a') as f: 189 | f.write(elapsed_time_str) 190 | 191 | # update original inp file with found control policy 192 | up.update_controls_with_policy(run.inp_file_path, results_file) 193 | 194 | # remove all files in 'work' 195 | delete_files_in_dir(run.work_dir) 196 | 197 | 198 | 199 | def update_policy_ts_list(fmtd_policy, current_dt, ctl_time_step, 200 | best_policy_ts, cost): 201 | # record the rest of the control policy 202 | for ctl_id, policy in fmtd_policy.iteritems(): 203 | # first setting has already been recorded, so disregard 204 | for i, setting in enumerate(policy): 205 | # increase time step 206 | inc_seconds = i * ctl_time_step 207 | inc_time = datetime.timedelta(seconds=inc_seconds) 208 | setting_dt = current_dt + inc_time 209 | # append to list 210 | best_policy_ts.append({'setting_{}'.format(ctl_id): 211 | setting, 212 | 'datetime': setting_dt}) 213 | # if cost is not zero only do the first one 214 | # this should be the case for all but the last case 215 | if cost != 0: 216 | break 217 | return best_policy_ts 218 | 219 | 220 | def implement_control_policy(link_obj, best_policy_fmt): 221 | for ctl_id, policy in best_policy_fmt.iteritems(): 222 | next_setting = policy[0] 223 | 224 | # from for example "ORIFICE R1" to "R1" 225 | ctl_id_short = ctl_id.split()[-1] 226 | # implement best policy 227 | if next_setting == 'ON': 228 | next_setting = 1 229 | elif next_setting == 'OFF': 230 | next_setting = 0 231 | 232 | link_obj[ctl_id_short].target_setting = next_setting 233 | 234 | 235 | def save_results_file(best_policy_ts, ctl_str_ids, results_dir, 236 | sim_start_time, run_beg_time_str, run_suffix): 237 | """ 238 | Convert policy time series to dataframe and save to csv 239 | 240 | Parameters 241 | ---------- 242 | best_policy_ts : list of dicts 243 | list of dicts where the key/values are "setting_{ctl id}"/{setting} 244 | and "datetime"/{datetime} 245 | ctl_str_ids : list of str 246 | see documentation in "run_swmm_mpc" 247 | results_dir : str 248 | the directory where the csv will be saved 249 | sim_start_time : datetime object 250 | the datetime of the start time in the simulation 251 | run_beg_time_str : str 252 | the real time when the swmm_mpc run started 253 | run_suffix : str 254 | the run suffix that will be appended to the csv file name 255 | """ 256 | # consolidate ctl settings and save to csv file 257 | ctl_settings_df = pd.DataFrame(best_policy_ts) 258 | ctl_settings_df = ctl_settings_df.groupby('datetime').first() 259 | ctl_settings_df.index = pd.DatetimeIndex(ctl_settings_df.index) 260 | # add a row at the beginning of the policy since controls start open 261 | sim_start_dt = pd.to_datetime(sim_start_time) 262 | initial_states = get_initial_states(ctl_str_ids) 263 | ctl_settings_df.loc[sim_start_dt] = initial_states 264 | ctl_settings_df.sort_index(inplace=True) 265 | results_file = 'ctl_results_{}{}.csv'.format(run_beg_time_str, run_suffix) 266 | results_path = os.path.join(results_dir, results_file) 267 | ctl_settings_df.to_csv(results_path) 268 | return results_path 269 | 270 | 271 | def get_initial_states(ctl_str_ids): 272 | """ 273 | Get list of initial states. ASSUME initial states for ORIFICE/WEIR is 1 274 | (open) and for PUMPS is "OFF" 275 | """ 276 | initial_states = [] 277 | for ctl in ctl_str_ids: 278 | ctl_type = ctl.split()[0] 279 | if ctl_type == 'ORIFICE' or ctl_type == 'WEIR': 280 | initial_states.append(1) 281 | elif ctl_type == 'PUMP': 282 | initial_states.append('OFF') 283 | return initial_states 284 | 285 | 286 | def validate_ctl_str_ids(ctl_str_ids): 287 | """ 288 | make sure the ids are ORIFICE, PUMP, or WEIR 289 | """ 290 | valid_structure_types = ['ORIFICE', 'PUMP', 'WEIR'] 291 | for ctl_id in ctl_str_ids: 292 | ctl_type = ctl_id.split()[0] 293 | if ctl_type not in valid_structure_types: 294 | raise ValueError( 295 | '{} not valid ctl type. should be one of {}'.format( 296 | ctl_id, valid_structure_types)) 297 | 298 | 299 | def get_initial_guess(best_pol, ctl_str_ids): 300 | best_pol = best_pol.tolist() 301 | split_by_ctl = ev.split_list(best_pol, len(ctl_str_ids)) 302 | new_guess = [] 303 | for pol in split_by_ctl: 304 | if len(pol) == 1: 305 | return best_pol 306 | else: 307 | # take out first setting 308 | new_pol = pol[1:] 309 | # add random setting at end 310 | new_pol.append(random.random()) 311 | new_guess.extend(new_pol) 312 | return new_guess 313 | 314 | def delete_files_in_dir(folder): 315 | for the_file in os.listdir(folder): 316 | file_path = os.path.join(folder, the_file) 317 | try: 318 | if os.path.isfile(file_path): 319 | os.unlink(file_path) 320 | elif os.path.isdir(file_path): shutil.rmtree(file_path) 321 | except Exception as e: 322 | print(e) 323 | -------------------------------------------------------------------------------- /swmm_mpc/tests/': -------------------------------------------------------------------------------- 1 | import unittest 2 | import evaluate 3 | from swmm_mpc import rpt_ele 4 | 5 | 6 | class test_evaluate(unittest.TestCase): 7 | rpt_file = "example.rpt" 8 | rpt = rpt_ele(rpt_file) 9 | 10 | def test_get_flood_cost_no_dict(self): 11 | node_fld_wgt_dict = None 12 | cost = evaluate.get_flood_cost(rpt, node_fld_wgt_dict) 13 | self.assertEqual(cost, 0.320) 14 | 15 | def test_get_flood_cost_dict(self): 16 | node_fld_wgt_dict = {"J3":1, "St1":1, "St2":1} 17 | cost = evaluate.get_flood_cost(rpt, node_fld_wgt_dict) 18 | self.assertEqual(cost, 0.320) 19 | 20 | -------------------------------------------------------------------------------- /swmm_mpc/tests/ctl_results.csv: -------------------------------------------------------------------------------- 1 | datetime,setting_ORIFICE R1,setting_ORIFICE R2 2 | 2/6/2018 0:00,1,1 3 | 2/6/2018 0:15,0.714,0.714 4 | 2/6/2018 0:30,0.143,0 5 | 2/6/2018 0:45,0.571,0.286 6 | 2/6/2018 1:00,0.286,0.571 7 | 2/6/2018 1:15,0.429,0.286 8 | 2/6/2018 1:30,0.143,0.429 9 | 2/6/2018 1:45,0.143,0.571 10 | 2/6/2018 2:00,0.286,0.286 11 | 2/6/2018 2:15,0.143,0.429 12 | 2/6/2018 2:30,0.143,0.571 13 | 2/6/2018 2:45,0.429,0.143 14 | 2/6/2018 3:00,0.286,0.286 15 | 2/6/2018 3:15,0.286,0.286 16 | 2/6/2018 3:30,0.143,0.286 17 | 2/6/2018 3:45,0.429,0.143 18 | 2/6/2018 4:00,0.143,0.429 19 | 2/6/2018 4:15,0.714,0.429 20 | 2/6/2018 4:30,0.286,0.429 21 | 2/6/2018 4:45,0.714,1 22 | 2/6/2018 5:00,0.286,0.714 23 | 2/6/2018 5:15,0.286,0.429 24 | 2/6/2018 5:30,0.286,0.429 25 | 2/6/2018 5:45,0.429,0.714 26 | 2/6/2018 6:00,0.286,0.429 27 | 2/6/2018 6:15,0.286,0.429 28 | 2/6/2018 6:30,0.286,0.429 29 | 2/6/2018 6:45,0.857,0.571 30 | 2/6/2018 7:00,0.571,0.286 31 | 2/6/2018 7:15,0.429,0.714 32 | 2/6/2018 7:30,0.286,0.429 33 | 2/6/2018 7:45,0.571,0.286 34 | 2/6/2018 8:00,0.571,0.857 35 | 2/6/2018 8:15,0.286,0.286 36 | 2/6/2018 8:30,0.286,0.286 37 | 2/6/2018 8:45,0.286,0.429 38 | 2/6/2018 9:00,0.143,0.571 39 | 2/6/2018 9:15,0.429,0.429 40 | 2/6/2018 9:30,0.571,0.286 41 | 2/6/2018 9:45,0.429,0.286 42 | 2/6/2018 10:00,0.286,0.286 43 | 2/6/2018 10:15,0.286,0.286 44 | 2/6/2018 10:30,0.286,0.429 45 | 2/6/2018 10:45,0.429,0.286 46 | 2/6/2018 11:00,0.429,0.286 47 | 2/6/2018 11:15,0.286,0.429 48 | 2/6/2018 11:30,1,0.571 49 | 2/6/2018 11:45,0.429,0.571 50 | 2/6/2018 12:00,0.571,0.429 51 | 2/6/2018 12:15,0.429,0.571 52 | 2/6/2018 12:30,0.429,0.429 53 | 2/6/2018 12:45,0.571,0.571 54 | 2/6/2018 13:00,0.571,0.714 55 | 2/6/2018 13:15,0.429,0.429 56 | 2/6/2018 13:30,0.571,0.571 57 | 2/6/2018 13:45,0.714,0.571 58 | 2/6/2018 14:00,1,0.714 59 | 2/6/2018 14:15,1,0.714 60 | 2/6/2018 14:30,0.571,0.714 61 | 2/6/2018 14:45,0.429,0.571 62 | 2/6/2018 15:00,0.571,0.571 63 | 2/6/2018 15:15,0.571,0.714 64 | 2/6/2018 15:30,0.857,1 65 | 2/6/2018 15:45,0.857,0.714 66 | 2/6/2018 16:00,0.286,0.714 67 | 2/6/2018 16:15,1,0.571 68 | 2/6/2018 16:30,1,0.571 69 | 2/6/2018 16:45,0.286,1 70 | 2/6/2018 17:00,0.143,1 71 | 2/6/2018 17:15,0.857,0.714 72 | 2/6/2018 17:30,0.429,0.714 73 | 2/6/2018 17:45,0.714,0.857 74 | 2/6/2018 18:00,0.857,0.714 75 | 2/6/2018 18:15,0.429,0.857 76 | 2/6/2018 18:30,0.714,0 77 | 2/6/2018 18:45,0.571,0.143 78 | 2/6/2018 19:00,0.857,0 79 | 2/6/2018 19:15,0,0.857 80 | 2/6/2018 19:30,0.143,0.571 81 | 2/6/2018 19:45,0.286,0.571 82 | 2/6/2018 20:00,0,0.714 83 | 2/6/2018 20:15,0.571,0 84 | 2/6/2018 20:30,0.571,0.286 85 | 2/6/2018 20:45,0.571,0.286 86 | 2/6/2018 21:00,0.714,0.143 87 | 2/6/2018 21:15,0.143,0.571 88 | 2/6/2018 21:30,0.429,0.429 89 | 2/6/2018 21:45,0.571,0 90 | 2/6/2018 22:00,0.429,0.429 91 | 2/6/2018 22:15,0.714,0 92 | 2/6/2018 22:30,0.429,0.571 93 | 2/6/2018 22:45,0.714,0 94 | 2/6/2018 23:00,0.286,0.571 95 | 2/6/2018 23:15,0.571,0 96 | 2/6/2018 23:30,0.857,0 97 | 2/6/2018 23:45,0.286,1 98 | 2/7/2018 0:00,0.714,1 99 | 2/7/2018 0:15,0.857,1 100 | 2/7/2018 0:30,0.429,0.286 101 | -------------------------------------------------------------------------------- /swmm_mpc/tests/ctl_results_err.csv: -------------------------------------------------------------------------------- 1 | datetime,setting_ORIFICE R1,setting_ORIFICE R2 2 | 2/6/2018 0:00,1,1 3 | 2/6/2018 0:15,0.714,0.714 4 | 2/6/2018 0:30,0.143,0 5 | 2/6/2018 0:45,0.571,0.286 6 | 2/6/2018 1:00,0.286,0.571 7 | 2/6/2018 1:15,0.429,0.286 8 | 2/6/2018 1:30,0.143,0.429 9 | 2/6/2018 1:45,0.143,0.571 10 | 2/6/2018 2:00,0.286,0.286 11 | 2/6/2018 2:15,0.143,0.429 12 | 2/6/2018 2:30,0.143,0.571 13 | 2/6/2018 2:45,0.429,0.143 14 | 2/6/2018 3:00,0.286,0.286 15 | 2/6/2018 3:15,0.286,0.286 16 | 2/6/2018 3:30,0.143,0.286 17 | 2/6/2018 3:45,0.429,0.143 18 | 2/6/2018 4:00,0.143,0.429 19 | 2/6/2018 4:15,0.143,0.286 20 | 2/6/2018 4:15,0.714,0.429 21 | 2/6/2018 4:30,0.286,0.429 22 | 2/6/2018 4:30,0.286,0.429 23 | 2/6/2018 4:45,0.714,0.286 24 | 2/6/2018 4:45,0.714,1 25 | 2/6/2018 5:00,0.286,0.714 26 | 2/6/2018 5:15,0.286,0.429 27 | 2/6/2018 5:30,0.286,0.429 28 | 2/6/2018 5:45,0.429,0.714 29 | 2/6/2018 6:00,0.286,0.429 30 | 2/6/2018 6:15,0.286,0.429 31 | 2/6/2018 6:15,1,0.857 32 | 2/6/2018 6:30,0.429,0.286 33 | 2/6/2018 6:30,0.286,0.429 34 | 2/6/2018 6:45,0.429,0.143 35 | 2/6/2018 6:45,0.857,0.571 36 | 2/6/2018 7:00,0.571,0.286 37 | 2/6/2018 7:15,0.429,0.714 38 | 2/6/2018 7:30,0.286,0.429 39 | 2/6/2018 7:45,0.571,0.286 40 | 2/6/2018 8:00,0.571,0.857 41 | 2/6/2018 8:15,0.286,0.286 42 | 2/6/2018 8:30,0.286,0.286 43 | 2/6/2018 8:45,0.286,0.429 44 | 2/6/2018 9:00,0.143,0.571 45 | 2/6/2018 9:15,0.429,0.429 46 | 2/6/2018 9:30,0.571,0.286 47 | 2/6/2018 9:45,0.429,0.286 48 | 2/6/2018 10:00,0.286,0.286 49 | 2/6/2018 10:15,0.286,0.286 50 | 2/6/2018 10:30,0.286,0.429 51 | 2/6/2018 10:45,0.429,0.286 52 | 2/6/2018 11:00,0.429,0.286 53 | 2/6/2018 11:15,0.286,0.429 54 | 2/6/2018 11:30,1,0.571 55 | 2/6/2018 11:45,0.429,0.571 56 | 2/6/2018 12:00,0.571,0.429 57 | 2/6/2018 12:15,0.429,0.571 58 | 2/6/2018 12:30,0.429,0.429 59 | 2/6/2018 12:45,0.571,0.571 60 | 2/6/2018 13:00,0.571,0.714 61 | 2/6/2018 13:15,0.429,0.429 62 | 2/6/2018 13:30,0.571,0.571 63 | 2/6/2018 13:45,0.714,0.571 64 | 2/6/2018 14:00,1,0.714 65 | 2/6/2018 14:15,1,0.714 66 | 2/6/2018 14:30,0.571,0.714 67 | 2/6/2018 14:45,0.429,0.571 68 | 2/6/2018 15:00,0.571,0.571 69 | 2/6/2018 15:15,0.571,0.714 70 | 2/6/2018 15:30,0.857,1 71 | 2/6/2018 15:45,0.857,0.714 72 | 2/6/2018 16:00,0.286,0.714 73 | 2/6/2018 16:15,1,0.571 74 | 2/6/2018 16:30,1,0.571 75 | 2/6/2018 16:45,0.286,1 76 | 2/6/2018 17:00,0.143,1 77 | 2/6/2018 17:15,0.857,0.714 78 | 2/6/2018 17:30,0.429,0.714 79 | 2/6/2018 17:45,0.714,0.857 80 | 2/6/2018 18:00,0.857,0.714 81 | 2/6/2018 18:15,0.429,0.857 82 | 2/6/2018 18:30,0.714,0 83 | 2/6/2018 18:45,0.571,0.143 84 | 2/6/2018 19:00,0.857,0 85 | 2/6/2018 19:15,0,0.857 86 | 2/6/2018 19:30,0.143,0.571 87 | 2/6/2018 19:45,0.286,0.571 88 | 2/6/2018 20:00,0,0.714 89 | 2/6/2018 20:15,0.571,0 90 | 2/6/2018 20:30,0.571,0.286 91 | 2/6/2018 20:45,0.571,0.286 92 | 2/6/2018 21:00,0.714,0.143 93 | 2/6/2018 21:15,0.143,0.571 94 | 2/6/2018 21:30,0.429,0.429 95 | 2/6/2018 21:45,0.571,0 96 | 2/6/2018 22:00,0.429,0.429 97 | 2/6/2018 22:15,0.714,0 98 | 2/6/2018 22:30,0.429,0.571 99 | 2/6/2018 22:45,0.714,0 100 | 2/6/2018 23:00,0.286,0.571 101 | 2/6/2018 23:15,0.571,0 102 | 2/6/2018 23:30,0.857,0 103 | 2/6/2018 23:45,0.286,1 104 | 2/7/2018 0:00,0.714,1 105 | 2/7/2018 0:15,0.857,1 106 | 2/7/2018 0:30,0.429,0.286 107 | -------------------------------------------------------------------------------- /swmm_mpc/tests/example_rules_orifices.txt: -------------------------------------------------------------------------------- 1 | [CONTROLS] 2 | RULE R0 3 | IF SIMULATION TIME < 0.250 4 | THEN ORIFICE r1 SETTING = 0.714 5 | 6 | RULE R1 7 | IF SIMULATION TIME < 0.500 8 | THEN ORIFICE r1 SETTING = 0.857 9 | 10 | RULE R2 11 | IF SIMULATION TIME < 0.750 12 | THEN ORIFICE r1 SETTING = 0.523 13 | 14 | RULE R3 15 | IF SIMULATION TIME < 1.000 16 | THEN ORIFICE r1 SETTING = 0.451 17 | 18 | RULE R4 19 | IF SIMULATION TIME < 0.250 20 | THEN ORIFICE r2 SETTING = 0.124 21 | 22 | RULE R5 23 | IF SIMULATION TIME < 0.500 24 | THEN ORIFICE r2 SETTING = 0.512 25 | 26 | RULE R6 27 | IF SIMULATION TIME < 0.750 28 | THEN ORIFICE r2 SETTING = 0.857 29 | 30 | RULE R7 31 | IF SIMULATION TIME < 1.000 32 | THEN ORIFICE r2 SETTING = 0.543 33 | 34 | -------------------------------------------------------------------------------- /swmm_mpc/tests/example_rules_pumps.txt: -------------------------------------------------------------------------------- 1 | [CONTROLS] 2 | RULE R0 3 | IF SIMULATION TIME < 0.250 4 | THEN ORIFICE r1 SETTING = 0.714 5 | 6 | RULE R1 7 | IF SIMULATION TIME < 0.500 8 | THEN ORIFICE r1 SETTING = 0.857 9 | 10 | RULE R2 11 | IF SIMULATION TIME < 0.250 12 | THEN PUMP p1 STATUS = OFF 13 | 14 | RULE R3 15 | IF SIMULATION TIME < 0.500 16 | THEN PUMP p1 STATUS = ON 17 | 18 | -------------------------------------------------------------------------------- /swmm_mpc/tests/test_evaluate.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from swmm_mpc import evaluate 3 | from swmm_mpc.rpt_ele import rpt_ele 4 | 5 | 6 | class test_evaluate(unittest.TestCase): 7 | rpt_file = "example.rpt" 8 | rpt = rpt_ele(rpt_file) 9 | ctl_str_ids = ["ORIFICE r1", "PUMP p1"] 10 | 11 | def test_get_flood_cost_no_dict(self): 12 | node_fld_wgt_dict = None 13 | cost = evaluate.get_flood_cost(self.rpt, node_fld_wgt_dict) 14 | self.assertEqual(cost, 0.320) 15 | 16 | def test_get_flood_cost_dict(self): 17 | node_fld_wgt_dict = {"J3": 1, "St1": 1, "St2": 1} 18 | cost = evaluate.get_flood_cost(self.rpt, node_fld_wgt_dict) 19 | self.assertEqual(cost, 0.640) 20 | 21 | def test_gene_to_policy_dict(self): 22 | gene = [1, 0, 1, 1, 1, 0, 0, 1] 23 | n_ctl_steps = 2 24 | policy = evaluate.gene_to_policy_dict(gene, self.ctl_str_ids, 25 | n_ctl_steps) 26 | self.assertEqual(policy, {'ORIFICE r1': [0.714, 0.857], 27 | 'PUMP p1': ['OFF', 'ON']}) 28 | 29 | def test_bits_to_perc(self): 30 | bits = [1, 1, 0, 1] 31 | perc = evaluate.bits_to_perc(bits) 32 | self.assertEqual(perc, 0.867) 33 | 34 | def test_bits_to_decimal(self): 35 | bits = [1, 0, 1, 1] 36 | dec = evaluate.bits_to_decimal(bits) 37 | self.assertEqual(dec, 11) 38 | 39 | def test_bits_max_val(self): 40 | bit_len = 8 41 | max_val = evaluate.bits_max_val(bit_len) 42 | self.assertEqual(max_val, 255) 43 | 44 | def test_list_to_policy(self): 45 | gene = [0.4, 0.2, 0.1, 0.6, 0.2, 0] 46 | n_ctl_steps = 3 47 | policy = evaluate.list_to_policy(gene, self.ctl_str_ids, n_ctl_steps) 48 | self.assertEqual(policy, {'ORIFICE r1': [0.4, 0.2, 0.1], 49 | 'PUMP p1': ['ON', 'OFF', 'OFF']}) 50 | 51 | def test_split_gene_by_ctl_ts(self): 52 | ctl_str_ids = ["ORIFICE r1", "ORIFICE p1"] 53 | gene = [1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1] 54 | n_steps = 2 55 | split = evaluate.split_gene_by_ctl_ts(gene, ctl_str_ids, n_steps) 56 | expected = [[[1, 0, 1], [0, 0, 1]], [[1, 0, 0], [1, 1, 1]]] 57 | self.assertEqual(expected, split) 58 | 59 | 60 | def test_split_list(self): 61 | l = [1, 2, 3, 4, 5, 6] 62 | n = 2 63 | split = evaluate.split_list(l, n) 64 | expected = [[1, 2, 3], [4, 5, 6]] 65 | self.assertEqual(split, expected) 66 | 67 | n = 3 68 | split = evaluate.split_list(l, n) 69 | expected = [[1, 2], [3, 4], [5, 6]] 70 | self.assertEqual(split, expected) 71 | 72 | 73 | 74 | if __name__ == '__main__': 75 | unittest.main() 76 | -------------------------------------------------------------------------------- /swmm_mpc/tests/test_rpt_ele.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from swmm_mpc.rpt_ele import rpt_ele 3 | 4 | 5 | class test_rpt_ele(unittest.TestCase): 6 | test_rpt_file = "example.rpt" 7 | rpt = rpt_ele(test_rpt_file) 8 | 9 | def test_total_flood(self): 10 | true_flood_vol = 0.320 11 | self.assertEqual(true_flood_vol, self.rpt.total_flooding) 12 | 13 | def test_get_start_line(self): 14 | start_text = 'Infiltration Method' 15 | start_line = self.rpt.get_start_line(start_text) 16 | self.assertEqual(start_line, 23) 17 | 18 | start_text = 'Node Surcharge Summary' 19 | start_line = self.rpt.get_start_line(start_text) 20 | self.assertEqual(start_line, 138) 21 | 22 | def test_get_end_line(self): 23 | start_text = 'Node Depth Summary' 24 | start_line = self.rpt.get_start_line(start_text) 25 | end_line = self.rpt.get_end_line(start_line) 26 | self.assertEqual(end_line, 118) 27 | 28 | 29 | 30 | if __name__ == '__main__': 31 | unittest.main() 32 | -------------------------------------------------------------------------------- /swmm_mpc/tests/test_run_swmm_mpc.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from swmm_mpc import swmm_mpc as sm 3 | import datetime 4 | import random 5 | 6 | 7 | class test_swmm_mpc(unittest.TestCase): 8 | 9 | def test_validate_control_str_ids(self): 10 | control_str_ids_valid = ['ORIFICE ja', 'WEIR blah', 'PUMP p'] 11 | sm.validate_ctl_str_ids(control_str_ids_valid) 12 | control_str_ids_invalid = ['ORFICE ja', 'WEIR blah', 'PUMP p'] 13 | with self.assertRaises(ValueError): 14 | sm.validate_ctl_str_ids(control_str_ids_invalid) 15 | 16 | 17 | def test_save_results_file(self): 18 | pass 19 | 20 | 21 | def test_update_policy_ts_list(self): 22 | fmtd_policy = {'ORIFICE R1': [0.1, 0.2, 0.1], 23 | 'WEIR W1': [0.3, 0.4, 0.3] 24 | } 25 | dt = datetime.datetime.strptime("10/08/2018 12:15", "%m/%d/%Y %H:%M") 26 | ctl_time_step = 900 27 | best_policy_ts = [] 28 | cost = 1 29 | updated_ts = sm.update_policy_ts_list(fmtd_policy, dt, ctl_time_step, 30 | best_policy_ts, cost) 31 | self.assertEqual(len(updated_ts), 2) 32 | expected_ts = [{'datetime': dt, 'setting_ORIFICE R1': 0.1}, 33 | {'datetime': dt, 'setting_WEIR W1': 0.3}] 34 | self.assertItemsEqual(updated_ts, expected_ts) 35 | 36 | cost = 0 37 | best_policy_ts = [] 38 | ts_zero = sm.update_policy_ts_list(fmtd_policy, dt, ctl_time_step, 39 | best_policy_ts, cost) 40 | self.assertEqual(len(ts_zero), 6) 41 | 42 | dt1 = datetime.datetime.strptime("10/08/2018 12:30", "%m/%d/%Y %H:%M") 43 | dt2 = datetime.datetime.strptime("10/08/2018 12:45", "%m/%d/%Y %H:%M") 44 | expected_ts = [{'datetime': dt, 'setting_ORIFICE R1': 0.1}, 45 | {'datetime': dt1, 'setting_ORIFICE R1': 0.2}, 46 | {'datetime': dt2, 'setting_ORIFICE R1': 0.1}, 47 | {'datetime': dt, 'setting_WEIR W1': 0.3}, 48 | {'datetime': dt1, 'setting_WEIR W1': 0.4}, 49 | {'datetime': dt2, 'setting_WEIR W1': 0.3}] 50 | 51 | self.assertItemsEqual(ts_zero, expected_ts) 52 | 53 | 54 | def test_get_initial_guess(self): 55 | best_pol = [0.24, 0.3, 0.22, 0.1, 0.04, 0.6] 56 | ctl_str_ids = ['ORIFICE r1', 'WEIR w1'] 57 | new_guess = sm.get_initial_guess(best_pol, ctl_str_ids) 58 | print new_guess 59 | self.assertEqual(len(best_pol), len(new_guess)) 60 | self.assertEqual(best_pol[1], new_guess[0]) 61 | self.assertEqual(best_pol[4], new_guess[3]) 62 | 63 | 64 | if __name__ == '__main__': 65 | unittest.main() 66 | -------------------------------------------------------------------------------- /swmm_mpc/tests/test_update_process_model_input_file.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import pandas as pd 3 | from swmm_mpc import update_process_model_input_file as up 4 | 5 | 6 | class test_update_process_model_input_file(unittest.TestCase): 7 | 8 | 9 | def test_get_control_rule_string_pump(self): 10 | policy = {'PUMP p1': ['OFF', 'ON'], 'ORIFICE r1': [0.714, 0.857]} 11 | control_time_step = 900 12 | ctl_rule_str = up.get_control_rule_string(control_time_step, policy) 13 | with file('example_rules_pumps.txt', 'r') as rules_file: 14 | expected_str = rules_file.readlines() 15 | self.assertEqual(expected_str, ctl_rule_str) 16 | 17 | 18 | def test_get_control_rule_string_just_orifice(self): 19 | policy = {'ORIFICE r1': [0.714, 0.857, 0.523, 0.451], 20 | 'ORIFICE r2': [0.124, 0.512, 0.857, 0.543]} 21 | control_time_step = 900 22 | ctl_rule_str = up.get_control_rule_string(control_time_step, policy) 23 | with file('example_rules_orifices.txt', 'r') as rules_file: 24 | expected_str = rules_file.readlines() 25 | self.assertEqual(expected_str, ctl_rule_str) 26 | 27 | 28 | def test_get_ctl_time_step(self): 29 | policy_file = 'ctl_results.csv' 30 | pol_df = pd.read_csv(policy_file) 31 | time_step = up.get_control_time_step(pol_df) 32 | self.assertEqual(time_step, 900) 33 | 34 | 35 | def test_get_ctl_time_step_err(self): 36 | policy_file = 'ctl_results_err.csv' 37 | pol_df = pd.read_csv(policy_file) 38 | with self.assertRaises(Exception): 39 | time_step = up.get_control_time_step(pol_df) 40 | 41 | 42 | if __name__ == '__main__': 43 | unittest.main() 44 | -------------------------------------------------------------------------------- /swmm_mpc/update_process_model_input_file.py: -------------------------------------------------------------------------------- 1 | import re 2 | import pandas as pd 3 | 4 | 5 | def update_simulation_date_time(lines, start_line, new_datetime): 6 | """ 7 | replace both the analysis and reporting start date and times 8 | """ 9 | new_date = new_datetime.strftime("%m/%d/%Y") 10 | new_time = new_datetime.strftime("%H:%M:%S") 11 | lines[start_line] = re.sub(r'\d{2}\\\d{2}\\\d{2}', new_date, 12 | lines[start_line]) 13 | lines[start_line+1] = re.sub(r'\d{2}:\d{2}:\d{2}', new_time, 14 | lines[start_line+1]) 15 | lines[start_line+2] = re.sub(r'\d{2}\\\d{2}\\\d{2}', new_date, 16 | lines[start_line+2]) 17 | lines[start_line+3] = re.sub(r'\d{2}:\d{2}:\d{2}', new_time, 18 | lines[start_line+3]) 19 | return lines 20 | 21 | 22 | def update_process_model_file(inp_file, new_date_time, hs_file): 23 | with open(inp_file, 'r') as tmp_file: 24 | lines = tmp_file.readlines() 25 | 26 | # update date and times 27 | date_section_start, date_section_end = find_section(lines, "START_DATE") 28 | update_simulation_date_time(lines, date_section_start, new_date_time) 29 | 30 | # update to use hotstart file 31 | file_section_start, file_section_end = find_section(lines, "[FILES]") 32 | new_hotstart_string = get_file_section_string(hs_file) 33 | lines = update_section(lines, new_hotstart_string, file_section_start, 34 | file_section_end) 35 | 36 | with open(inp_file, 'w') as tmp_file: 37 | tmp_file.writelines(lines) 38 | 39 | 40 | def find_section(lines, section_name): 41 | start_line = None 42 | end_line = None 43 | for i, l in enumerate(lines): 44 | if l.startswith("{}".format(section_name)): 45 | start_line = i 46 | for j, ll in enumerate(lines[i+1:]): 47 | if ll.startswith("["): 48 | end_line = j + i 49 | break 50 | if not end_line: 51 | end_line = len(lines) 52 | return start_line, end_line 53 | 54 | 55 | def update_section(lines, new_lines, old_section_start=None, 56 | old_section_end=None): 57 | """ 58 | lines: list of strings; text of .inp file read into list of strings 59 | new_lines: list of strings; list of strings for replacing old section 60 | old_section_start: int; position of line where replacing should start 61 | (will append to end of file if 'None' and section end 62 | is 'None' passed as argument) 63 | old_section_end: int; position of line where replacing should end 64 | 65 | """ 66 | if old_section_start and old_section_end: 67 | del lines[old_section_start: old_section_end] 68 | else: 69 | old_section_start = len(lines) 70 | 71 | lines[old_section_start: old_section_start] = new_lines 72 | return lines 73 | 74 | 75 | def get_file_section_string(hs_filename): 76 | new_lines = ["[FILES] \n"] 77 | new_lines.append('USE HOTSTART "{}"\n \n'.format(hs_filename)) 78 | return new_lines 79 | 80 | 81 | def get_control_rule_string(control_time_step, policies): 82 | """ 83 | Write control rules from the policies. 84 | """ 85 | new_lines = ["[CONTROLS]\n"] 86 | rule_number = 0 87 | # control_time_step is in seconds. convert to hours 88 | control_time_step_hours = control_time_step/3600. 89 | for structure_id in policies: 90 | structure_type = structure_id.split()[0] 91 | for i, policy_step in enumerate(policies[structure_id]): 92 | l1 = "RULE R{}\n".format(rule_number) 93 | l2 = "IF SIMULATION TIME < {:.3f}\n".format( 94 | (i+1) * control_time_step_hours) 95 | # check the structure type to write 'SETTINGS' or 'STATUS' 96 | if structure_type == 'ORIFICE' or structure_type == 'WEIR': 97 | sttg_or_status = 'SETTING' 98 | elif structure_type == 'PUMP': 99 | sttg_or_status = 'STATUS' 100 | l3 = "THEN {} {} = {}\n".format(structure_id, sttg_or_status, 101 | policy_step) 102 | l4 = "\n" 103 | new_lines.extend([l1, l2, l3, l4]) 104 | rule_number += 1 105 | return new_lines 106 | 107 | 108 | def update_controls_and_hotstart(inp_file, control_time_step, policies, 109 | hs_file=None): 110 | """ 111 | control_time_step: number; in seconds 112 | policies: dict; structure id (e.g., ORIFICE R1) as key, list of settings 113 | as value; 114 | 115 | """ 116 | with open(inp_file, 'r') as inpfile: 117 | lines = inpfile.readlines() 118 | 119 | control_line, end_control_line = find_section(lines, "[CONTROLS]") 120 | 121 | control_rule_string = get_control_rule_string(control_time_step, policies) 122 | updated_lines = update_section(lines, control_rule_string, control_line, 123 | end_control_line) 124 | 125 | if hs_file: 126 | file_section_start, file_section_end = find_section(updated_lines, 127 | "[FILES]") 128 | hs_lines = get_file_section_string(hs_file) 129 | updated_lines = update_section(updated_lines, hs_lines, 130 | file_section_start, 131 | file_section_end) 132 | 133 | with open(inp_file, 'w') as inpfile: 134 | inpfile.writelines(updated_lines) 135 | 136 | 137 | def update_controls_with_policy(inp_file, policy_file): 138 | policy_df = pd.read_csv(policy_file) 139 | control_time_step = get_control_time_step(policy_df) 140 | policy_columns = [col for col in policy_df.columns if "setting" in col] 141 | policy_dict = {} 142 | for policy_col in policy_columns: 143 | structure_id = policy_col.split("_")[-1] 144 | policy_dict[structure_id] = policy_df[policy_col].tolist() 145 | 146 | update_controls_and_hotstart(inp_file, control_time_step, policy_dict) 147 | 148 | 149 | def remove_control_section(inp_file): 150 | with open(inp_file, 'r') as inpfile: 151 | lines = inpfile.readlines() 152 | 153 | control_line, end_control_line = find_section(lines, "[CONTROLS]") 154 | if control_line and end_control_line: 155 | del lines[control_line: end_control_line] 156 | 157 | with open(inp_file, 'w') as inpfile: 158 | inpfile.writelines(lines) 159 | 160 | 161 | def read_hs_filename(inp_file): 162 | with open(inp_file, 'r') as f: 163 | for line in f: 164 | if line.startswith("USE HOTSTART"): 165 | hs_filename = line.split()[-1].replace('"', '') 166 | return hs_filename 167 | 168 | 169 | def get_control_time_step(df, dt_col="datetime"): 170 | times = (pd.to_datetime(df[dt_col])) 171 | delta_times = times.diff() 172 | time_step = delta_times.mean().seconds 173 | if not time_step % 60: 174 | return time_step 175 | # if it's only off by 1 or two seconds then round down to nearest minute 176 | elif time_step % 60 < 3: 177 | time_step -= time_step % 60 178 | return time_step 179 | else: 180 | raise Exception("The time step in your file is in between minutes") 181 | --------------------------------------------------------------------------------