├── data ├── case_49 │ ├── raw_data_for_generating_supply_and_flow.txt │ ├── nodes_data.csv │ └── arcs_data.csv ├── case_6_node │ ├── node_data.csv │ └── arc_data.csv ├── node_position.csv ├── nodes_data.csv └── arcs_data.csv ├── ICOSSAR_2021_manuscript.pdf ├── plots ├── repair_schedule_bc.pdf ├── repair_schedule_cc.pdf ├── repair_schedule_dc.pdf ├── resilience_0.2_flow.pdf ├── resilience_0.2_topo.pdf ├── resilience_0.6_flow.pdf ├── resilience_0.6_topo.pdf ├── compare_attack_types.pdf └── repair_schedule_randomness.pdf ├── README.md ├── data.py ├── src └── simulate initial supply and flow │ ├── def.jl │ ├── readin.jl │ ├── gen_initial_supply_and_flow.jl │ └── plot_networks_and_check_cycles.py ├── utils.py ├── infrasnetwork.py └── main.py /data/case_49/raw_data_for_generating_supply_and_flow.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ICOSSAR_2021_manuscript.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jinzhuyu/ICOSSAR2021-NetworkResilienceAssessment/HEAD/ICOSSAR_2021_manuscript.pdf -------------------------------------------------------------------------------- /plots/repair_schedule_bc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jinzhuyu/ICOSSAR2021-NetworkResilienceAssessment/HEAD/plots/repair_schedule_bc.pdf -------------------------------------------------------------------------------- /plots/repair_schedule_cc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jinzhuyu/ICOSSAR2021-NetworkResilienceAssessment/HEAD/plots/repair_schedule_cc.pdf -------------------------------------------------------------------------------- /plots/repair_schedule_dc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jinzhuyu/ICOSSAR2021-NetworkResilienceAssessment/HEAD/plots/repair_schedule_dc.pdf -------------------------------------------------------------------------------- /plots/resilience_0.2_flow.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jinzhuyu/ICOSSAR2021-NetworkResilienceAssessment/HEAD/plots/resilience_0.2_flow.pdf -------------------------------------------------------------------------------- /plots/resilience_0.2_topo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jinzhuyu/ICOSSAR2021-NetworkResilienceAssessment/HEAD/plots/resilience_0.2_topo.pdf -------------------------------------------------------------------------------- /plots/resilience_0.6_flow.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jinzhuyu/ICOSSAR2021-NetworkResilienceAssessment/HEAD/plots/resilience_0.6_flow.pdf -------------------------------------------------------------------------------- /plots/resilience_0.6_topo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jinzhuyu/ICOSSAR2021-NetworkResilienceAssessment/HEAD/plots/resilience_0.6_topo.pdf -------------------------------------------------------------------------------- /plots/compare_attack_types.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jinzhuyu/ICOSSAR2021-NetworkResilienceAssessment/HEAD/plots/compare_attack_types.pdf -------------------------------------------------------------------------------- /plots/repair_schedule_randomness.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jinzhuyu/ICOSSAR2021-NetworkResilienceAssessment/HEAD/plots/repair_schedule_randomness.pdf -------------------------------------------------------------------------------- /data/case_6_node/node_data.csv: -------------------------------------------------------------------------------- 1 | net_id,node_id,demand,supply,supply_cap,y_node_init,redun_rate 2 | 1,1,0,2,2.4,0,0.2 3 | 1,2,0,0,0,0,0.2 4 | 1,3,1,0,0,0,0.2 5 | 1,4,1,0,0,0,0.2 6 | 2,5,0,0.5,0.6,0,0.2 7 | 2,6,0,0,0,0,0.2 8 | -------------------------------------------------------------------------------- /data/case_6_node/arc_data.csv: -------------------------------------------------------------------------------- 1 | net_id,start_node,end_node,flow,flow_cap,redun_rate,y_arc_init,conv_rate 2 | 1,1,2,2,2.4,0.2,0,1 3 | 1,2,3,1,1.2,0.2,0,1 4 | 1,2,4,1,1.2,0.2,0,1 5 | 2,5,6,0.5,0.6,0.2,0,1 6 | 2,6,1,0.5,0.6,0.2,0,10 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This repo contains the code for the ICOSSAR 2021 paper titled *Comparing Topology-based and Flow-based Resilience Assessment of Interdependent Infrastructure Networks*. 2 | 3 | We kindly ask you to cite our paper if you find our code helpful. 4 | 5 | ``` 6 | @inproceedings{yu2021comparing, 7 | title={Comparing topology-based and flow-based resilience assessment of interdependent infrastructure networks}, 8 | author={Yu, Jin-Zhu and Wang, Yu and Baroud, Hiba}, 9 | booktitle={Proceedings of the 3th International Conference on Structural Safety and Reliability}, 10 | year={2021} 11 | } 12 | ``` 13 | 14 | 15 | To get the results in the paper, simply run the "main.py" file. Free feel to reach out if you have questions about the code. 16 | 17 | ## Prerequisite 18 | 19 | Julia/Jump 20 | 21 | Gurobi 22 | 23 | Python MIP (Mixed-Integer Linear Programming) Tools 24 | -------------------------------------------------------------------------------- /data/node_position.csv: -------------------------------------------------------------------------------- 1 | net_id,node_id,pos_x,pos_y 2 | 1,K1,-1.5,-0.5 3 | 1,K2,-1.5,0.9 4 | 1,K3,-2.7,-1.4 5 | 1,K4,-2.3,-0.5 6 | 1,K5,-2.1,0.7 7 | 1,K6,-2.7,2 8 | 1,K7,-1.5,2 9 | 1,K8,-2.06,2 10 | 1,K9,-2.7,0.5 11 | 1,K10,-2.7,1.2 12 | 1,K11,-3.4,0.7 13 | 1,K12,-3.3,1.2 14 | 1,K13,-4,1.8 15 | 1,K14,-4,0 16 | 1,K15,-4,-1.1 17 | 1,K16,-4.75,-1.5 18 | 1,K17,-5.4,-2 19 | 1,K18,-6.2,-1 20 | 1,K19,-4.75,0.3 21 | 1,K20,-4.75,0.95 22 | 1,K21,-6.2,-0.1 23 | 1,K22,-6.2,1 24 | 1,K23,-4.75,1.8 25 | 1,K24,-3.3,-2 26 | 2,J1,3.2,-2 27 | 2,J2,3.1,-0.5 28 | 2,J3,2,-0.5 29 | 2,J4,1.5,-1.5 30 | 2,J5,1,-1.5 31 | 2,J6,0,-1.5 32 | 2,J7,0.6,-0.5 33 | 2,J8,0,-0.5 34 | 2,J9,3.1,0.4 35 | 2,J10,3.7,0.4 36 | 2,J11,4.25,0.4 37 | 2,J12,5,0.4 38 | 2,J13,5,-1.1 39 | 2,J14,3.1,1 40 | 2,J15,2.65,1.5 41 | 2,J16,3.7,1.5 42 | 2,J17,4.25,1 43 | 2,J18,5,1 44 | 2,J19,5,2 45 | 2,J20,2,1.5 46 | 2,J21,1.5,1 47 | 2,J22,1,1 48 | 2,J23,0.6,1.5 49 | 2,J24,0,1.5 50 | 2,J25,0,1 51 | -------------------------------------------------------------------------------- /data/nodes_data.csv: -------------------------------------------------------------------------------- 1 | net_id,node_id,demand,supply_cap,supply 2 | 1,K1,86.4,192,45 3 | 1,K2,77.6,192,0 4 | 1,K3,144,0,0 5 | 1,K4,59.2,0,0 6 | 1,K5,56.8,0,0 7 | 1,K6,108.8,0,0 8 | 1,K7,100,300,0 9 | 1,K8,136.8,0,0 10 | 1,K9,140,0,0 11 | 1,K10,156,0,0 12 | 1,K11,0,0,0 13 | 1,K12,0,0,0 14 | 1,K13,212,591,0 15 | 1,K14,155.2,0,0 16 | 1,K15,253.6,215,172 17 | 1,K16,80,155,7.074579464 18 | 1,K17,0,0,0 19 | 1,K18,266.4,400,228.8745795 20 | 1,K19,144.8,0,0 21 | 1,K20,102.4,0,0 22 | 1,K21,0,400,0 23 | 1,K22,0,300,0 24 | 1,K23,0,660,102.4 25 | 1,K24,0,0,0 26 | 2,J1,0,5.673821802,2.296815801 27 | 2,J2,0,0,0 28 | 2,J3,0,0,0 29 | 2,J4,0,0,0 30 | 2,J5,0,0,0 31 | 2,J6,0.692244656,0,0 32 | 2,J7,0,0,0 33 | 2,J8,0,0,0 34 | 2,J9,0,0,0 35 | 2,J10,0,0,0 36 | 2,J11,0,0,0 37 | 2,J12,0.467021902,0,0 38 | 2,J13,0,0,0 39 | 2,J14,0,0,0 40 | 2,J15,0,0,0 41 | 2,J16,0,0,0 42 | 2,J17,0,0,0 43 | 2,J18,0.484208308,0,0 44 | 2,J19,0,0,0 45 | 2,J20,0,0,0 46 | 2,J21,0,0,0 47 | 2,J22,0,0,0 48 | 2,J23,0,0,0 49 | 2,J24,0,0,0 50 | 2,J25,0.653340935,0,0 51 | -------------------------------------------------------------------------------- /data.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ This script contains functions for preprocessing the data 3 | """ 4 | import pandas as pd 5 | #import numpy as np 6 | #import networkx as nx 7 | import os 8 | 9 | dir_path = os.path.dirname(os.path.realpath(__file__)) #Change the directory to the one where the current script is located 10 | 11 | nodedata = pd.read_csv('./data/nodes_data.csv') 12 | arcdata = pd.read_csv('./data/arcs_data.csv') 13 | 14 | p_nodenum = 24 15 | g_nodenum = 25 16 | p_nodedata = nodedata.iloc[0:p_nodenum, :] 17 | g_nodedata = nodedata.iloc[p_nodenum:(p_nodenum + g_nodenum), :] 18 | 19 | p_arcnum = 34 20 | g_arcnum = 24 21 | p2g_arcnum = 4 22 | #g2p_arcnum = 1 23 | p_arcdata = arcdata.iloc[0:p_arcnum, :] 24 | g_arcdata = arcdata.iloc[p_arcnum:(p_arcnum + g_arcnum), :] 25 | g2p_arcdata = arcdata.iloc[(p_arcnum + g_arcnum):(p_arcnum + g_arcnum + p2g_arcnum), :] 26 | #p2g_arcdata = arcdata.iloc[(p_arcnum + g_arcnum + p2g_arcnum):(p_arcnum + g_arcnum + p2g_arcnum + g2p_arcnum), :] 27 | 28 | # power to gas link is removed for now because the current flow redistribution cannot handle cycles with flow 29 | 30 | 31 | -------------------------------------------------------------------------------- /src/simulate initial supply and flow/def.jl: -------------------------------------------------------------------------------- 1 | # define the struct types 2 | struct netData 3 | # in network 4 | IDList :: Array{Any,1} # [1, 2, 3, ...] 5 | inbrList :: Array{Any,1} # [(1,2), (1,3), ...] 6 | inbr1 :: Dict{Any,Any} # [1]: [2, 3, ...],[2]: 7 | inbr2 :: Dict{Any,Any} # [1]: [2, 3, ...],[2]: 8 | 9 | # restoration time 10 | rtn :: Dict{Any,Any} 11 | rta :: Dict{Any,Any} 12 | 13 | # costs 14 | chn :: Dict{Any,Any} 15 | cha :: Dict{Any,Any} 16 | csn :: Dict{Any,Any} 17 | crn :: Dict{Any,Any} 18 | cra :: Dict{Any,Any} 19 | 20 | # demand, supply capacity, flow capacity 21 | b :: Dict{Any,Any} 22 | sc :: Dict{Any,Any} 23 | u :: Dict{Any,Any} 24 | csc :: Dict{Any,Any} 25 | end 26 | 27 | # data structure for the inter network arcs 28 | struct interData 29 | startNet :: Int64 30 | endNet :: Int64 31 | startNode :: Any 32 | endNode :: Any 33 | convRate :: Float64 34 | u :: Float64 35 | end 36 | 37 | # data structure for the scenarios 38 | struct scenarioData 39 | dNodes :: Array{Any,1} 40 | dArcs :: Array{Any,1} 41 | end 42 | -------------------------------------------------------------------------------- /data/case_49/nodes_data.csv: -------------------------------------------------------------------------------- 1 | net_id,node_id,b,sc,csc 2 | 1,K1,86.4,192,27.78 3 | 1,K2,77.6,192,27.78 4 | 1,K3,144,0,27.78 5 | 1,K4,59.2,0,27.78 6 | 1,K5,56.8,0,27.78 7 | 1,K6,108.8,0,27.78 8 | 1,K7,100,300,27.78 9 | 1,K8,136.8,0,27.78 10 | 1,K9,140,0,27.78 11 | 1,K10,156,0,27.78 12 | 1,K11,0,0,27.78 13 | 1,K12,0,0,27.78 14 | 1,K13,212,591,27.78 15 | 1,K14,155.2,0,27.78 16 | 1,K15,253.6,215,27.78 17 | 1,K16,80,155,27.78 18 | 1,K17,0,0,27.78 19 | 1,K18,266.4,400,27.78 20 | 1,K19,144.8,0,27.78 21 | 1,K20,102.4,0,27.78 22 | 1,K21,0,400,27.78 23 | 1,K22,0,300,27.78 24 | 1,K23,0,660,27.78 25 | 1,K24,0,0,27.78 26 | 2,J1,0,5.673821802,3944.46 27 | 2,J2,0,0,3944.46 28 | 2,J3,0,0,3944.46 29 | 2,J4,0,0,3944.46 30 | 2,J5,0,0,3944.46 31 | 2,J6,0.692244656,0,3944.46 32 | 2,J7,0,0,3944.46 33 | 2,J8,0,0,3944.46 34 | 2,J9,0,0,3944.46 35 | 2,J10,0,0,3944.46 36 | 2,J11,0,0,3944.46 37 | 2,J12,0.467021902,0,3944.46 38 | 2,J13,0,0,3944.46 39 | 2,J14,0,0,3944.46 40 | 2,J15,0,0,3944.46 41 | 2,J16,0,0,3944.46 42 | 2,J17,0,0,3944.46 43 | 2,J18,0.484208308,0,3944.46 44 | 2,J19,0,0,3944.46 45 | 2,J20,0,0,3944.46 46 | 2,J21,0,0,3944.46 47 | 2,J22,0,0,3944.46 48 | 2,J23,0,0,3944.46 49 | 2,J24,0,0,3944.46 50 | 2,J25,0.653340935,0,3944.46 51 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # set default plot parameters 3 | def set_default_plot_param(plt): 4 | # %matplotlib inline 5 | # from matplotlib import pyplot as plt 6 | 7 | plt.style.use('classic') 8 | 9 | plt.rcParams["font.family"] = "Helvetica" 10 | plt.rcParams['font.weight']= 'normal' 11 | plt.rcParams['figure.figsize'] = [6, 6*3/4] 12 | 13 | plt.rcParams['figure.facecolor'] = 'white' 14 | plt.rcParams['axes.facecolor'] = 'white' 15 | plt.rcParams['axes.axisbelow'] = True 16 | 17 | plt.rc('axes', titlesize=16, labelsize=15, linewidth=0.9) # fontsize of the axes title, the x and y labels 18 | 19 | plt.rc('lines', linewidth=1.8, markersize=6, markeredgecolor='none') 20 | 21 | plt.rc('xtick', labelsize=13) 22 | plt.rc('ytick', labelsize=13) 23 | 24 | plt.rcParams['axes.formatter.useoffset'] = False # turn off offset 25 | # To turn off scientific notation, use: ax.ticklabel_format(style='plain') or 26 | # plt.ticklabel_format(style='plain') 27 | 28 | 29 | plt.rcParams['legend.fontsize'] = 13 30 | plt.rcParams["legend.fancybox"] = True 31 | plt.rcParams["legend.loc"] = "best" 32 | plt.rcParams["legend.framealpha"] = 0.5 33 | 34 | 35 | plt.rcParams['savefig.bbox'] = 'tight' 36 | plt.rcParams['savefig.dpi'] = 800 37 | 38 | # plt.rc('text', usetex=False) 39 | -------------------------------------------------------------------------------- /data/arcs_data.csv: -------------------------------------------------------------------------------- 1 | start_node,end_node,flow_cap,conv_rate,flow 2 | K1,K2,148.75,1,77.6 3 | K3,K1,148.75,1,119 4 | K1,K5,148.75,1,0 5 | K2,K4,148.75,1,0 6 | K2,K6,148.75,1,0 7 | K3,K9,148.75,1,0 8 | K24,K3,340,1,263 9 | K9,K4,148.75,1,59.2 10 | K10,K5,148.75,1,56.8 11 | K10,K6,148.75,1,108.8 12 | K7,K8,148.75,1,119 13 | K9,K8,148.75,1,17.8 14 | K10,K8,148.75,1,0 15 | K11,K9,340,1,68 16 | K12,K9,340,1,149 17 | K11,K10,340,1,272 18 | K12,K10,340,1,49.6 19 | K13,K11,425,1,340 20 | K14,K11,425,1,0 21 | K13,K12,425,1,198.6 22 | K23,K12,425,1,0 23 | K23,K13,425,1,0 24 | K16,K14,425,1,155.2 25 | K15,K16,425,1,32.92542054 26 | K21,K15,425,1,0 27 | K15,K24,425,1,263 28 | K17,K16,425,1,340 29 | K16,K19,425,1,144.8 30 | K18,K17,425,1,0 31 | K22,K17,425,1,340 32 | K21,K18,425,1,37.52542054 33 | K20,K19,425,1,0 34 | K23,K20,425,1,102.4 35 | K22,K21,425,1,37.52542054 36 | J1,J2,5.6738,1,2.296815801 37 | J2,J3,2.7362,1,0.692244656 38 | J3,J4,2.7362,1,0.692244656 39 | J4,J5,2.7362,1,0.692244656 40 | J5,J6,2.7362,1,0.692244656 41 | J5,J7,2.7362,1,0 42 | J7,J8,2.7362,1,0 43 | J2,J9,5.6738,1,1.604571145 44 | J9,J10,5.6738,1,1.604571145 45 | J10,J11,2.7362,1,0.467021902 46 | J11,J12,2.7362,1,0.467021902 47 | J11,J13,2.7362,1,0 48 | J10,J14,5.6738,1,1.137549243 49 | J14,J15,5.6738,1,1.137549243 50 | J15,J16,5.6738,1,0.484208308 51 | J16,J17,2.7362,1,0.484208308 52 | J17,J18,2.7362,1,0.484208308 53 | J16,J19,2.7362,1,0 54 | J15,J20,5.6738,1,0.653340935 55 | J20,J21,5.6738,1,0.653340935 56 | J21,J22,5.6738,1,0.653340935 57 | J22,J23,5.6738,1,0 58 | J23,J24,5.6738,1,0 59 | J22,J25,2.7362,1,0.653340935 60 | J8,K22,2.7362,172.4668,2.188974461 61 | J13,K15,2.7362,172.4668,2.188974461 62 | J24,K13,5.6738,172.4668,4.352141977 63 | J19,K7,2.7362,172.4668,1.26980961 64 | -------------------------------------------------------------------------------- /data/case_49/arcs_data.csv: -------------------------------------------------------------------------------- 1 | start_node,end_node,u,ra,cha,cra,conv_rate 2 | K1,K2,148.75,6,3.6,1.2,1 3 | K3,K1,148.75,6,3.6,1.2,1 4 | K1,K5,148.75,6,3.6,1.2,1 5 | K2,K4,148.75,6,3.6,1.2,1 6 | K2,K6,148.75,6,3.6,1.2,1 7 | K3,K9,148.75,6,3.6,1.2,1 8 | K24,K3,340,6,3.6,1.2,1 9 | K9,K4,148.75,6,3.6,1.2,1 10 | K10,K5,148.75,6,3.6,1.2,1 11 | K10,K6,148.75,6,3.6,1.2,1 12 | K7,K8,148.75,6,3.6,1.2,1 13 | K9,K8,148.75,6,3.6,1.2,1 14 | K10,K8,148.75,6,3.6,1.2,1 15 | K11,K9,340,6,3.6,1.2,1 16 | K12,K9,340,6,3.6,1.2,1 17 | K11,K10,340,6,3.6,1.2,1 18 | K12,K10,340,6,3.6,1.2,1 19 | K13,K11,425,6,3.6,1.2,1 20 | K14,K11,425,6,3.6,1.2,1 21 | K13,K12,425,6,3.6,1.2,1 22 | K23,K12,425,6,3.6,1.2,1 23 | K23,K13,425,6,3.6,1.2,1 24 | K16,K14,425,6,3.6,1.2,1 25 | K15,K16,425,6,3.6,1.2,1 26 | K21,K15,425,6,3.6,1.2,1 27 | K15,K24,425,6,3.6,1.2,1 28 | K17,K16,425,6,3.6,1.2,1 29 | K16,K19,425,6,3.6,1.2,1 30 | K18,K17,425,6,3.6,1.2,1 31 | K22,K17,425,6,3.6,1.2,1 32 | K21,K18,425,6,3.6,1.2,1 33 | K20,K19,425,6,3.6,1.2,1 34 | K23,K20,425,6,3.6,1.2,1 35 | K22,K21,425,6,3.6,1.2,1 36 | J1,J2,5.673821802,6,3.6,1.2,1 37 | J2,J3,2.736218076,6,3.6,1.2,1 38 | J3,J4,2.736218076,6,3.6,1.2,1 39 | J4,J5,2.736218076,6,3.6,1.2,1 40 | J5,J6,2.736218076,6,3.6,1.2,1 41 | J5,J7,2.736218076,6,3.6,1.2,1 42 | J7,J8,2.736218076,6,3.6,1.2,1 43 | J2,J9,5.673821802,6,3.6,1.2,1 44 | J9,J10,5.673821802,6,3.6,1.2,1 45 | J10,J11,2.736218076,6,3.6,1.2,1 46 | J11,J12,2.736218076,6,3.6,1.2,1 47 | J11,J13,2.736218076,6,3.6,1.2,1 48 | J10,J14,5.673821802,6,3.6,1.2,1 49 | J14,J15,5.673821802,6,3.6,1.2,1 50 | J15,J16,5.673821802,6,3.6,1.2,1 51 | J16,J17,2.736218076,6,3.6,1.2,1 52 | J17,J18,2.736218076,6,3.6,1.2,1 53 | J16,J19,2.736218076,6,3.6,1.2,1 54 | J15,J20,5.673821802,6,3.6,1.2,1 55 | J20,J21,5.673821802,6,3.6,1.2,1 56 | J21,J22,5.673821802,6,3.6,1.2,1 57 | J22,J23,5.673821802,6,3.6,1.2,1 58 | J23,J24,5.673821802,6,3.6,1.2,1 59 | J22,J25,2.736218076,6,3.6,1.2,1 60 | J8,K22,2.736218076,6,3.6,1.2,172.4668 61 | J13,K15,2.736218076,6,3.6,1.2,172.4668 62 | J24,K13,5.673821802,6,3.6,1.2,172.4668 63 | J19,K7,2.736218076,6,3.6,1.2,172.4668 64 | -------------------------------------------------------------------------------- /infrasnetwork.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import numpy as np 3 | import pandas as pd 4 | import networkx as nx 5 | class network(object): 6 | """ Define the network class for modeling the power and gas networks 7 | """ 8 | def __init__(self, node_data, arc_data): 9 | """ 10 | Input: 11 | node_data: pandas.dataframe, the data of the nodes 12 | arc_data: pandas.dataframe, the data of the arcs 13 | """ 14 | #read the node data 15 | self.nodenum = len(node_data) 16 | self.demand = node_data['demand'] 17 | self.supply_cap = node_data['supply_cap'] 18 | self.supply = node_data['supply'] 19 | self.nodeid = node_data['node_id'] 20 | 21 | 22 | #read the arc data 23 | self.arcnum = len(arc_data) 24 | self.start_node_id = arc_data['start_node'] 25 | self.end_node_id = arc_data['end_node'] 26 | self.flow_cap = arc_data['flow_cap'] 27 | self.conv_rate = arc_data['conv_rate'] 28 | self.flow = arc_data['flow'] 29 | 30 | self.nodeid2num() 31 | self.adj_matrix() 32 | self.adj_list() 33 | self.networkx_graph() 34 | self.topo_sort() 35 | self.flow_matrix() 36 | # self.flow_check() 37 | self.centrality() 38 | def nodeid2num(self): 39 | """ Mapping the ID of the node to the number of the node in the network 40 | """ 41 | self.ID2num = {} 42 | for i in range(self.nodenum): 43 | self.ID2num[self.nodeid.iloc[i]] = i 44 | 45 | def adj_matrix(self): 46 | """ Create the adjacency matrix of the network, directed graph 47 | """ 48 | self.adjmatrix = np.zeros((self.nodenum, self.nodenum), dtype = int) 49 | 50 | for i in range(self.arcnum): 51 | self.adjmatrix[self.ID2num[self.start_node_id.iloc[i]], self.ID2num[self.end_node_id.iloc[i]]] = 1 52 | 53 | def flow_matrix(self): 54 | """ Flor matrix of the network, directed graph 55 | """ 56 | self.flowmatrix = np.zeros((self.nodenum, self.nodenum), dtype = float) 57 | 58 | for i in range(self.arcnum): 59 | self.flowmatrix[self.ID2num[self.start_node_id.iloc[i]], self.ID2num[self.end_node_id.iloc[i]]] = self.flow.iloc[i] 60 | 61 | def flow_check(self): 62 | """ Check whether the initial flow is balanced 63 | """ 64 | for i in range(self.nodenum): 65 | print(i + 1, np.sum(self.flowmatrix[:, i]) - np.sum(self.flowmatrix[i, :]) + self.supply.iloc[i] - self.demand.iloc[i]) 66 | 67 | def adj_list(self): 68 | """ Create the adjacency list of the network, directed graph 69 | """ 70 | self.adjlist = {} 71 | for i in range(self.arcnum): 72 | if(self.ID2num[self.start_node_id.iloc[i]] not in self.adjlist.keys()): 73 | self.adjlist[self.ID2num[self.start_node_id.iloc[i]]] = [] 74 | 75 | self.adjlist[self.ID2num[self.start_node_id.iloc[i]]].append(self.ID2num[self.end_node_id.iloc[i]]) 76 | 77 | def networkx_graph(self): 78 | """ Create the networkx object of the network 79 | """ 80 | self.graph = nx.convert_matrix.from_numpy_matrix(self.adjmatrix, create_using=nx.DiGraph) 81 | 82 | def topo_sort(self): 83 | """ Perform the topological sort of the network, directed graph 84 | """ 85 | if(nx.algorithms.dag.is_directed_acyclic_graph(self.graph)): 86 | self.topo_order = list(nx.topological_sort(self.graph)) 87 | else: 88 | print('The current network is not the DAG') 89 | 90 | def centrality(self): 91 | """ Calculate the centrality of the graph 92 | Choice: degree, katz, closeness, betweenness 93 | """ 94 | self.dc = list(nx.algorithms.centrality.degree_centrality(self.graph).values()) 95 | self.kc = list(nx.algorithms.centrality.katz_centrality(self.graph).values()) 96 | self.cc = list(nx.algorithms.centrality.closeness_centrality(self.graph).values()) 97 | self.bc = list(nx.algorithms.centrality.betweenness_centrality(self.graph).values()) 98 | -------------------------------------------------------------------------------- /src/simulate initial supply and flow/readin.jl: -------------------------------------------------------------------------------- 1 | # read in the network data from the csv files 2 | using CSV,DataFrames,JLD,HDF5; 3 | 4 | # define the struct types 5 | struct netData 6 | # in network 7 | IDList :: Array{Any,1} # [1, 2, 3, ...] 8 | inbrList :: Array{Any,1} # [(1,2), (1,3), ...] 9 | inbr1 :: Dict{Any,Any} # [1]: [2, 3, ...],[2]: 10 | inbr2 :: Dict{Any,Any} # [1]: [2, 3, ...],[2]: 11 | 12 | # demand, supply capacity, flow capacity 13 | b :: Dict{Any,Any} 14 | sc :: Dict{Any,Any} 15 | u :: Dict{Any,Any} 16 | # csc :: Dict{Any,Any} 17 | end 18 | 19 | # data structure for the inter network arcs 20 | struct interData 21 | startNet :: Int64 22 | endNet :: Int64 23 | startNode :: Any 24 | endNode :: Any 25 | convRate :: Float64 26 | u :: Float64 27 | end 28 | 29 | 30 | # define functions 31 | function readNetwork(nodeAdd,arcAdd) 32 | nodes_data = CSV.read(nodeAdd); 33 | arcs_data = CSV.read(arcAdd); 34 | nnodes,mnodes = size(nodes_data); 35 | narcs,marcs = size(arcs_data); 36 | 37 | # initiate the data structure 38 | # the keys to the dictionaries are the network number 39 | netList = unique(nodes_data.net_id); 40 | nodeNet = Dict(); 41 | arcNet = Dict(); 42 | # node information 43 | nodeList = Dict(); 44 | 45 | bList = Dict(); 46 | # arc information 47 | brList = Dict(); 48 | 49 | # cfaList = Dict(); 50 | uList = Dict(); 51 | # rtaList = Dict(); 52 | br1List = Dict(); 53 | br2List = Dict(); 54 | scList = Dict(); 55 | # cscList = Dict(); 56 | for i in netList 57 | nodeList[i] = []; 58 | bList[i] = Dict(); 59 | scList[i] = Dict(); 60 | # cscList[i] = Dict(); 61 | 62 | brList[i] = []; 63 | # cfaList[i] = Dict(); 64 | uList[i] = Dict(); 65 | br1List[i] = Dict(); 66 | br2List[i] = Dict(); 67 | end 68 | 69 | # read in the node information 70 | for i in 1:nnodes 71 | ID = nodes_data.node_id[i]; 72 | netBelong = nodes_data.net_id[i]; 73 | push!(nodeList[netBelong],ID); 74 | nodeNet[ID] = netBelong; 75 | bList[netBelong][ID] = nodes_data.b[i]; 76 | scList[netBelong][ID] = nodes_data.sc[i]; 77 | # cscList[netBelong][ID] = nodes_data.csc[i]; 78 | end 79 | 80 | # read in the arc information 81 | interList = []; 82 | for a in 1:narcs 83 | fromNode = arcs_data.start_node[a]; 84 | toNode = arcs_data.end_node[a]; 85 | arcID = (fromNode,toNode); 86 | convRate = arcs_data.conv_rate[a]; 87 | if nodeNet[fromNode] == nodeNet[toNode] 88 | # if it is within some network 89 | netBelong = nodeNet[fromNode]; 90 | push!(brList[netBelong],arcID); 91 | arcNet[arcID] = netBelong; 92 | uList[netBelong][arcID] = arcs_data.u[a]; 93 | if fromNode in keys(br1List[netBelong]) 94 | push!(br1List[netBelong][fromNode],arcID); 95 | else 96 | br1List[netBelong][fromNode] = [arcID]; 97 | end 98 | if toNode in keys(br2List[netBelong]) 99 | push!(br2List[netBelong][toNode],arcID); 100 | else 101 | br2List[netBelong][toNode] = [arcID]; 102 | end 103 | else 104 | # if it is inter network 105 | arcInfo = interData(nodeNet[fromNode],nodeNet[toNode],fromNode,toNode,convRate,arcs_data.u[a]); 106 | push!(interList,arcInfo); 107 | end 108 | end 109 | 110 | # create networkData: a list of networks 111 | networkData = Dict(); 112 | for i in netList 113 | networkData[i] = netData(nodeList[i],brList[i],br1List[i],br2List[i],bList[i],scList[i],uList[i]); #,cscList[i]); 114 | end 115 | 116 | return netList,networkData,interList; 117 | end 118 | 119 | function arcTrans(arcStr) 120 | arcStr = strip(arcStr,'('); 121 | arcStr = strip(arcStr,')'); 122 | fromNodeStr,toNodeStr = split(arcStr,','); 123 | return (fromNodeStr,toNodeStr); 124 | end 125 | 126 | # load the data file given the folder address 127 | function loadData(dataAdd) 128 | nodeAdd = joinpath(dataAdd,"nodes_data.csv"); 129 | arcAdd = joinpath(dataAdd,"arcs_data.csv"); 130 | netList,networkData,interList = readNetwork(nodeAdd,arcAdd); 131 | return netList,networkData,interList; 132 | end 133 | -------------------------------------------------------------------------------- /src/simulate initial supply and flow/gen_initial_supply_and_flow.jl: -------------------------------------------------------------------------------- 1 | ############################################### 2 | # check if the node demand and supply capacity values are valide when there is no damage. 3 | # Validity criteria: 1) When there is no damage, no slack is incurred. 4 | # 2) The flow rate of all links (acutal flow over link capacity) is no greater than 0.85. 5 | 6 | # Nodal demand, supply capacity, and link capacity of power network are obtained from the IEEE test case (mpc object in matlab). 7 | # link capacity of gas network is calculated according to the diameter of gas pipelines. Node demand and capacity is set to a significantly 8 | # value and calculated by the solution on link flow according to the minCost model. 9 | ############################################### 10 | 11 | 12 | ############################################### 13 | # import pakcages and functions 14 | using JuMP, Gurobi 15 | using Statistics 16 | using CSV, DataFrames, JLD, HDF5 17 | const GUROBI_ENV = Gurobi.Env() 18 | 19 | include("./readin.jl") 20 | 21 | 22 | ############################################### 23 | # function 24 | # construct the min cost model to check if a valid initial feasible flow with no slack and capped flow rate exists. 25 | function minCost(netList, networkData, interList, flowRateUB=0.85, supRate=1) 26 | mp = Model(optimizer_with_attributes(() -> Gurobi.Optimizer(GUROBI_ENV), "OutputFlag" => 1)) 27 | 28 | # decision variables 29 | @variable(mp, f[n in netList, k in networkData[n].inbrList]>=0) 30 | @variable(mp, h[interI in 1:length(interList)]>=0) 31 | @variable(mp, 0 <= sup[n in netList, i in networkData[n].IDList] <= networkData[n].sc[i]*supRate) 32 | 33 | # constraints 34 | # flow balance: outflow - inflow + transformed flow from other network = supply - demand 35 | @constraint(mp, flowbalance[n in netList, i in networkData[n].IDList], 36 | sum(f[n,k] for k in networkData[n].inbrList if i == k[1]) - sum(f[n,k] for k in networkData[n].inbrList if i == k[2]) + 37 | - sum(h[interI]*interList[interI].convRate for interI in 1:length(interList) if (interList[interI].endNet == n)&(interList[interI].endNode == i)) 38 | == sup[n,i] - networkData[n].b[i]) 39 | 40 | # flow capacity 41 | @constraint(mp, flowCap1[n in netList, k in networkData[n].inbrList], 42 | f[n,k] <= networkData[n].u[k]*flowRateUB) # -networkData[n].u[k]*flowRateUB <= is removed. 43 | 44 | # interconnected nodes 45 | @constraint(mp, interConnect[interI in 1:length(interList)], 46 | h[interI] <= interList[interI].u) 47 | interDict = Dict() 48 | for interI in 1:length(interList) 49 | startN = interList[interI].startNode 50 | endN = interList[interI].endNode 51 | startNet = interList[interI].startNet 52 | endNet = interList[interI].endNet 53 | if !((endNet,endN) in keys(interDict)) 54 | interDict[(endNet,endN)] = [interI] 55 | else 56 | push!(interDict[(endNet,endN)], interI) 57 | end 58 | end 59 | @constraint(mp, interCapacity[n in netList, i in networkData[n].IDList; (n,i) in keys(interDict)], 60 | sup[n,i] == sum(h[interI]*interList[interI].convRate for interI in 1:length(interList) if (interList[interI].endNet == n)&(interList[interI].endNode == i))) 61 | 62 | # objective function 63 | @expression(mp, supplyCost, sum(sum(networkData[n].csc[i]*sup[n,i] for i in networkData[n].IDList) for n in netList)) 64 | @objective(mp, Min, supplyCost) 65 | 66 | return mp 67 | end 68 | 69 | 70 | function solve_and_print(netList,networkData,interList, flowRateUB=0.85, supRate=1) 71 | # build and solve the model 72 | modelMinCost = minCost(netList,networkData,interList, flowRateUB, supRate) 73 | optimize!(modelMinCost) 74 | 75 | # get results 76 | obj = objective_value(modelMinCost) 77 | f = value.(modelMinCost[:f]) 78 | h = value.(modelMinCost[:h]) 79 | sup = value.(modelMinCost[:sup]) 80 | 81 | # print results 82 | println("\n-------------------") 83 | println("\nSupply at nodes\n", ) 84 | for n in netList 85 | for i in networkData[n].IDList 86 | println(sup[n,i]) 87 | # println(n," ",i," ",sup[n, i]) 88 | end 89 | end 90 | 91 | println("\nFlow on arcs of each network\n") 92 | for n in netList 93 | for k in networkData[n].inbrList 94 | flowRate = f[n, k]/networkData[n].u[k] 95 | # println(n," ",k," ", f[n,k], " ", networkData[n].u[k], " ", flowRate) 96 | println(f[n, k]) 97 | end 98 | end 99 | 100 | println("\nFlow on interdependent arcs\n") 101 | for interI in 1:length(interList) 102 | flowRate = h[interI]/interList[interI].u 103 | # println(interList[interI], " ", h[interI], " ", interList[interI].u, " ", flowRate) 104 | println(h[interI]) 105 | end 106 | end 107 | 108 | function run_main() 109 | # load data from case_49 folder 110 | netList, networkData, interList = loadData("./data/case_49/") 111 | solve_and_print(netList, networkData, interList, flowRateUB=0.80, supRate==1) 112 | end 113 | 114 | run_main() 115 | -------------------------------------------------------------------------------- /src/simulate initial supply and flow/plot_networks_and_check_cycles.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | import pandas as pd 5 | 6 | import os 7 | dir_path = os.path.dirname(os.path.abs(__file__)) # go to the directory of the current script 8 | 9 | import networkx as nx 10 | from matplotlib import patches 11 | 12 | # set default plot parameters 13 | def set_default_plot_param(): 14 | 15 | plt.style.use('classic') 16 | 17 | plt.rcParams["font.family"] = "Helvetica" 18 | plt.rcParams['font.weight']= 'normal' 19 | plt.rcParams['figure.figsize'] = [6, 6*3/4] 20 | 21 | plt.rcParams['figure.facecolor'] = 'white' 22 | plt.rcParams['axes.facecolor'] = 'white' 23 | 24 | plt.rc('axes', titlesize=14, labelsize=12, linewidth=0.75) # fontsize of the axes title, the x and y labels 25 | 26 | plt.rc('lines', linewidth=1.5, markersize=4) 27 | 28 | plt.rc('xtick', labelsize=10) 29 | plt.rc('ytick', labelsize=10) 30 | 31 | plt.rcParams['axes.formatter.useoffset'] = False # turn off offset 32 | # To turn off scientific notation, use: ax.ticklabel_format(style='plain') or 33 | # plt.ticklabel_format(style='plain') 34 | 35 | 36 | plt.rcParams['legend.fontsize'] = 10 37 | plt.rcParams["legend.fancybox"] = True 38 | plt.rcParams["legend.loc"] = "best" 39 | plt.rcParams["legend.framealpha"] = 0.5 40 | 41 | plt.rcParams['savefig.bbox'] = 'tight' 42 | plt.rcParams['savefig.dpi'] = 800 43 | 44 | # plt.rc('text', usetex=False) 45 | 46 | set_default_plot_param() 47 | 48 | 49 | #%% 50 | '''plot networks in which the weights show the restorative importance, flow more. 51 | ref.: https://qxf2.com/blog/drawing-weighted-graphs-with-networkx/ 52 | ''' 53 | 54 | def min_pos_scale(data_df, scale_max=1, scale_min=-1): 55 | '''scale data into the desired range 56 | ''' 57 | for i in np.arange(data_df.shape[1]): 58 | # for y position, the range should be -0.4, 0.4 59 | if i==1: 60 | scale_max, scale_min = 0.4, -0.4 61 | data_max = data_df.iloc[:,i].max() 62 | data_min = data_df.iloc[:,i].min() 63 | slope = (scale_max-scale_min)/(data_max-data_min) 64 | intercept = scale_min - slope*data_min 65 | data_df.iloc[:,i] = intercept + data_df.iloc[:,i]*slope 66 | 67 | return data_df 68 | 69 | def plot_weighted_graph(node_df, arc_df, node_pos, fig_size = (16, 16*4/11)): 70 | '''draw a weighted graph according to the defined node positions 71 | ''' 72 | 73 | # 0.0 extract nodes label, net_id, etc. 74 | node_list = node_df['node_id'].tolist() 75 | net_id = node_df['net_id'].tolist() 76 | n_node_power = net_id.count(1) 77 | n_node_total = node_df.shape[0] 78 | 79 | # 0.1 extract arcs label 80 | start_node_id = arc_df['start_node'] 81 | end_node_id = arc_df['end_node'] 82 | n_arc_total = arc_df.shape[0] 83 | 84 | # 1.0 draw basic graph and nodes 85 | G = nx.DiGraph(directed=True) 86 | for i in np.arange(n_node_total): 87 | G.add_node(i, pos=(node_pos.iloc[i,0], node_pos.iloc[i,1])) 88 | 89 | # 1.1 draw nodes 90 | pos=nx.get_node_attributes(G,'pos') 91 | plt.figure(figsize=fig_size) 92 | nx.draw_networkx_nodes(G, pos, nodelist=range(0, n_node_power), 93 | node_color='royalblue', node_shape='o', node_size=500) 94 | nx.draw_networkx_nodes(G, pos, nodelist=range(n_node_power, n_node_total), 95 | node_color='tab:red', node_shape='o', node_size=500) 96 | 97 | # 1.2 remove edge of nodes 98 | ax = plt.gca() # to get the current axis 99 | for i in np.arange(max(net_id)): 100 | ax.collections[i].set_edgecolor('none') 101 | 102 | # 1.3. add labels to the nodes 103 | labels = {} 104 | for j in np.arange(n_node_total): 105 | labels[j] = node_list[j] 106 | nx.draw_networkx_labels(G, pos, labels, font_size=9) #, font_family='serif') 107 | 108 | # 2.0 add edges 109 | for k in np.arange(n_arc_total): 110 | start_node_num = list(labels.values()).index(start_node_id[k]) 111 | end_node_num = list(labels.values()).index(end_node_id[k]) 112 | G.add_edge(start_node_num, end_node_num, weight=1.5, color='k') 113 | 114 | all_weights = [] 115 | #4.0 Iterate through the graph nodes to gather all the weights 116 | for (node1,node2,data) in G.edges(data=True): 117 | all_weights.append(data['weight']) #we'll use this when determining edge thickness 118 | 119 | #4.1 Get unique weights 120 | unique_weights = list(set(all_weights)) 121 | 122 | #4.2 Plot the edges - one by one! 123 | # testArrow = patches.ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.1) 124 | for weight in unique_weights: 125 | #4 d. Form a filtered list with just the weight you want to draw 126 | weighted_edges = [(n_1,n_2) for (n_1,n_2,edge_attr) in G.edges(data=True) if edge_attr['weight']==weight] 127 | width = 0.75 128 | nx.draw_networkx_edges(G, pos, edgelist=weighted_edges, width=width, 129 | arrowsize=15) #, alpha=0.75) 130 | 131 | #Plot the graph 132 | plt.axis('off') 133 | plt.show() 134 | 135 | return G 136 | 137 | 138 | def main(): 139 | 140 | # import network data 141 | node_df = pd.read_csv('./data/nodes_data.csv') 142 | arc_df = pd.read_csv('./data/arcs_data.csv') 143 | 144 | # import node position data 145 | node_pos_df = pd.read_csv('./data/node_position.csv') 146 | 147 | # plot 148 | G = plot_weighted_graph(node_df, arc_df, node_pos=node_pos_df[['pos_x', 'pos_y']]*2) 149 | 150 | # check cycles 151 | cycles = nx.find_cycle(G) 152 | print(cycles) 153 | 154 | 155 | main() 156 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import numpy as np 3 | import math 4 | import random 5 | from math import ceil 6 | import copy 7 | import pandas as pd 8 | from matplotlib import pyplot as plt 9 | from mip import Model, minimize, xsum, BINARY, OptimizationStatus 10 | import os 11 | # dir_path = os.path.dirname(os.path.realpath(__file__)) # go to the directory of the current script 12 | import networkx as nx 13 | 14 | from utils import set_default_plot_param 15 | set_default_plot_param(plt) 16 | import data as dt 17 | from infrasnetwork import network 18 | #import failsimulation as fs 19 | 20 | 21 | class System(object): 22 | """ Couple the power and gas networks 23 | """ 24 | def __init__(self, power, gas, g2p_arcdata): ##Ignore the arc from the power network to gas network 25 | self.power = power 26 | self.gas = gas 27 | self.nodenum = self.power.nodenum + self.gas.nodenum 28 | self.arcnum = self.power.arcnum + self.gas.arcnum + len(g2p_arcdata) 29 | 30 | self.g2p_arcdata = g2p_arcdata 31 | 32 | self.adjflow_matrix() 33 | self.nodeid2num() 34 | self.networkx_graph() 35 | self.topo_sort() 36 | self.centrality() 37 | self.propertymapping() 38 | 39 | 40 | def adjflow_matrix(self): #first gas then power 41 | """ Create the adjacency matrix of the whole system by plugging the adjacency matrix of the power and gas networks 42 | with the addition of the interdependent links 43 | """ 44 | 45 | self.adjmatrix = np.zeros((self.nodenum, self.nodenum), dtype = int) 46 | self.flowmatrix = np.zeros((self.nodenum, self.nodenum), dtype = float) 47 | 48 | #add the power and gas arcs 49 | self.adjmatrix[0:self.gas.nodenum, 0:self.gas.nodenum] = copy.copy(self.gas.adjmatrix) 50 | self.flowmatrix[0:self.gas.nodenum, 0:self.gas.nodenum] = copy.copy(self.gas.flowmatrix) 51 | self.adjmatrix[self.gas.nodenum:(self.gas.nodenum + self.power.nodenum), self.gas.nodenum:(self.gas.nodenum + self.power.nodenum)] = copy.copy(self.power.adjmatrix) 52 | self.flowmatrix[self.gas.nodenum:(self.gas.nodenum + self.power.nodenum), self.gas.nodenum:(self.gas.nodenum + self.power.nodenum)] = copy.copy(self.power.flowmatrix) 53 | 54 | #add the gas2power arcs 55 | for i in range(len(self.g2p_arcdata)): 56 | self.adjmatrix[self.gas.ID2num[self.g2p_arcdata['start_node'].iloc[i]], self.power.ID2num[self.g2p_arcdata['end_node'].iloc[i]] + self.gas.nodenum] = 1 57 | self.flowmatrix[self.gas.ID2num[self.g2p_arcdata['start_node'].iloc[i]], self.power.ID2num[self.g2p_arcdata['end_node'].iloc[i]] + self.gas.nodenum] = self.g2p_arcdata['flow'].iloc[i] 58 | 59 | def nodeid2num(self): 60 | """ Mapping the ID of the node to the number of the node in the system 61 | """ 62 | ##update the power ID 63 | update_powerID = {x: self.power.ID2num[x]+self.gas.nodenum for x in self.power.ID2num} 64 | self.ID2num = {**self.gas.ID2num, **update_powerID} 65 | self.num2ID = dict(map(reversed, self.ID2num.items())) 66 | 67 | def networkx_graph(self): 68 | """ Create the networkx object of the network 69 | """ 70 | self.graph = nx.convert_matrix.from_numpy_matrix(self.adjmatrix, create_using = nx.DiGraph) 71 | 72 | def topo_sort(self): 73 | """ Perform the topological sort of the network, directed graph 74 | """ 75 | if (nx.algorithms.dag.is_directed_acyclic_graph(self.graph)): 76 | self.topo_order = list(nx.topological_sort(self.graph)) 77 | else: 78 | print('The current network is not the DAG') 79 | 80 | def centrality(self): 81 | """ Calculate the centrality of the graph 82 | Choice: degree, katz, closeness, betweenness 83 | """ 84 | self.dc = list(nx.algorithms.centrality.degree_centrality(self.graph).values()) 85 | self.kc = list(nx.algorithms.centrality.katz_centrality(self.graph).values()) 86 | self.cc = list(nx.algorithms.centrality.closeness_centrality(self.graph).values()) 87 | self.bc = list(nx.algorithms.centrality.betweenness_centrality(self.graph).values()) 88 | 89 | 90 | # def get_arc_id_list(self): 91 | # start_node_id_list = self.gas.start_node_id.to_list() + self.power.start_node_id.to_list() 92 | # end_node_id_list = self.gas.end_node_id.to_list() + self.power.end_node_id.to_list() 93 | # self.arc_id_list = [(start_node_id_list[i], end_node_id_list[i]) for i in range(len(start_node_id_list))] 94 | 95 | def propertymapping(self): 96 | """ Mapping all properties in the network to the system 97 | """ 98 | 99 | self.demand = np.concatenate((np.array(self.gas.demand), np.array(self.power.demand))) 100 | self.supply_cap = np.concatenate((np.array(self.gas.supply_cap), np.array(self.power.supply_cap))) 101 | self.supply = np.concatenate((np.array(self.gas.supply), np.array(self.power.supply))) 102 | 103 | # array of list, each element is a list that will contain the start node number and end node number of an arc 104 | self.arclist = np.empty((self.power.arcnum + self.gas.arcnum + len(self.g2p_arcdata), 2), dtype = int) 105 | 106 | self.conv_rate = np.concatenate((np.array(self.gas.conv_rate), np.array(self.power.conv_rate), np.array(self.g2p_arcdata.conv_rate))) 107 | self.flow_cap = np.concatenate((np.array(self.gas.flow_cap), np.array(self.power.flow_cap), np.array(self.g2p_arcdata.flow_cap))) 108 | 109 | # get start node id and end node id 110 | self.node_id_list = self.gas.nodeid.to_list() + self.power.nodeid.to_list() 111 | start_node_id_list = self.gas.start_node_id.to_list() + self.power.start_node_id.to_list() + self.g2p_arcdata.start_node.to_list() 112 | end_node_id_list = self.gas.end_node_id.to_list() + self.power.end_node_id.to_list() + self.g2p_arcdata.end_node.to_list() 113 | self.arc_id_list = [(start_node_id_list[i], end_node_id_list[i]) for i in range(len(start_node_id_list))] 114 | # self.arc_id_list = self.get_arc_id_list 115 | # 116 | # print(self.arc_id_list) 117 | 118 | self.flowcapmatrix = np.zeros((self.nodenum, self.nodenum), dtype = float) 119 | self.convratematrix = np.zeros((self.nodenum, self.nodenum), dtype = float) 120 | temp = 0 121 | for i in range(self.gas.arcnum): 122 | self.flowcapmatrix[self.ID2num[self.gas.start_node_id.iloc[i]], self.ID2num[self.gas.end_node_id.iloc[i]]] = self.gas.flow_cap.iloc[i] 123 | self.convratematrix[self.ID2num[self.gas.start_node_id.iloc[i]], self.ID2num[self.gas.end_node_id.iloc[i]]] = self.gas.conv_rate.iloc[i] 124 | self.arclist[temp, 0], self.arclist[temp, 1] = self.ID2num[self.gas.start_node_id.iloc[i]], self.ID2num[self.gas.end_node_id.iloc[i]] 125 | temp += 1 126 | 127 | for i in range(self.power.arcnum): 128 | self.flowcapmatrix[self.ID2num[self.power.start_node_id.iloc[i]], self.ID2num[self.power.end_node_id.iloc[i]]] = self.power.flow_cap.iloc[i] 129 | self.convratematrix[self.ID2num[self.power.start_node_id.iloc[i]], self.ID2num[self.power.end_node_id.iloc[i]]] = self.power.conv_rate.iloc[i] 130 | self.arclist[temp, 0], self.arclist[temp, 1] = self.ID2num[self.power.start_node_id.iloc[i]], self.ID2num[self.power.end_node_id.iloc[i]] 131 | temp += 1 132 | 133 | for i in range(len(self.g2p_arcdata)): 134 | self.flowcapmatrix[self.ID2num[self.g2p_arcdata.start_node.iloc[i]], self.ID2num[self.g2p_arcdata.end_node.iloc[i]]] = self.g2p_arcdata.flow_cap.iloc[i] 135 | self.convratematrix[self.ID2num[self.g2p_arcdata.start_node.iloc[i]], self.ID2num[self.g2p_arcdata.end_node.iloc[i]]] = self.g2p_arcdata.conv_rate.iloc[i] 136 | self.arclist[temp, 0], self.arclist[temp, 1] = self.ID2num[self.g2p_arcdata.start_node.iloc[i]], self.ID2num[self.g2p_arcdata.end_node.iloc[i]] 137 | temp += 1 138 | 139 | def initial_failure(self, Type, ratio): 140 | """ Simulate the initial failure sequence 141 | Input: 142 | Type - the type of the initial failure sequence, choice: 143 | 'randomness', 'dc' - degree centrality, 'bc' - betweenness centrality, 'kc' - katz centrality, 'cc': closeness centrality 144 | ratio - how much percentage of nodes is failed 145 | Output: 146 | the initial failure sequence 147 | """ 148 | fail_num = math.floor(ratio*self.nodenum) 149 | self.initial_fail_seq_onehotcode = np.zeros(self.nodenum, dtype = int) #1 - failure, 0 - survive 150 | 151 | if (Type == 'randomness'): 152 | self.initial_fail_seq = random.sample(range(self.nodenum), fail_num) 153 | else: # case fail_num > self.nodenum needs not be handled because [-fail_num:]= ['all elements'] 154 | # sort in descending order (negate the original) and return the indice of the top fail_num nodes 155 | exec('self.initial_fail_seq = np.argsort(-np.array(self.{}))[:fail_num]'.format(Type)) # sort in an ascending approach 156 | 157 | if (self.initial_fail_seq != []): 158 | self.initial_fail_seq_onehotcode[np.array(self.initial_fail_seq)] = 1 159 | self.initial_fail_link_onehotcode = np.zeros(self.arcnum, dtype = int) #There are no initial failed links 160 | 161 | # print(self.initial_fail_seq) 162 | 163 | return self.initial_fail_seq 164 | 165 | def update_matrix_node_fail(self, adjmatrix, node_fail_seq): 166 | """ Update the adjacency matrix caused by node failure 167 | Input: 168 | adjmatrix - the current adjacency matrix 169 | node_fail_seq - the failed node number at current time step, numpy1Darray 170 | """ 171 | adjmatrix[node_fail_seq == 1, :] = 0 172 | adjmatrix[:, node_fail_seq == 1] = 0 173 | 174 | return adjmatrix 175 | 176 | def update_matrix_link_fail(self, adjmatrix, link_fail_seq): 177 | """ Update the adjacency matrix caused by link failure 178 | Input: 179 | adjmatrix - the current adjacency matrix 180 | link_fail_seq - the failed link number at current time step, numpy1Darray 181 | """ 182 | for i in range(len(link_fail_seq)): 183 | if (link_fail_seq[i] == 1): 184 | adjmatrix[self.arclist[i, 0], self.arclist[i, 1]] = 0 185 | 186 | return adjmatrix 187 | 188 | def cascading_failure(self, redun_rate=0.2): 189 | """ Simulate the cascading failure 190 | Input: 191 | redun_rate: redundancy 192 | """ 193 | #Update the initial failure scenario: update the adjmatrix and flowmatrix 194 | self.adjmatrix_init = copy.copy(self.adjmatrix) 195 | self.flowmatrix_init = copy.copy(self.flowmatrix) 196 | 197 | #Update the adjmatrix caused by failed nodes 198 | self.adjmatrix_init = self.update_matrix_node_fail(copy.copy(self.adjmatrix_init), self.initial_fail_seq_onehotcode) 199 | 200 | #Update the adjmatrix caused by failed links 201 | self.adjmatrix_init = self.update_matrix_link_fail(copy.copy(self.adjmatrix_init), self.initial_fail_link_onehotcode) 202 | 203 | #Update the flow matrix 204 | self.flowmatrix_init = self.flowmatrix*self.adjmatrix_init 205 | 206 | self.adjmatrix_evol, self.flowmatrix_evol = [self.adjmatrix, self.adjmatrix_init], [self.flowmatrix, self.flowmatrix_init] 207 | self.node_fail_evol, self.link_fail_evol = [self.initial_fail_seq_onehotcode], [self.initial_fail_link_onehotcode] 208 | self.satisfy_node_evol = [self.demand] 209 | self.performance = [1] 210 | self.node_fail_evol_track, self.link_fail_evol_track = [self.initial_fail_seq_onehotcode], [self.initial_fail_link_onehotcode] 211 | time = 0 212 | while(1): #perform the flow redistribution until stable 213 | satisfynode = copy.copy(self.satisfy_node_evol[-1]) 214 | flowmatrix = copy.copy(self.flowmatrix_evol[-1]) 215 | adjmatrix = copy.copy(self.adjmatrix_evol[-1]) 216 | 217 | for node in self.topo_order: 218 | #calculate the total flow going into the node 219 | flowin = np.sum(flowmatrix[:, node]*self.convratematrix[:, node]) + self.supply[node]*(1 - self.node_fail_evol_track[-1][node]) 220 | #Some flows serve for the node demand value, the remaining part goes to the following distribution process 221 | if np.round(flowin, 3) >= np.round(self.demand[node], 3): 222 | satisfynode[node] = self.demand[node] 223 | flowin -= self.demand[node] 224 | else: 225 | satisfynode[node] = flowin/2 #if not enough to supply for the demand of the node, supply a half 226 | flowin = flowin/2 227 | # print('satisfied power demand', satisfynode[-self.power.nodenum:]/self.demand[-self.power.nodenum:]) 228 | 229 | if self.initial_fail_seq != []: 230 | performance_gas = np.sum(satisfynode[:self.gas.nodenum])/np.sum(self.demand[:self.gas.nodenum]) 231 | performance_power = np.sum(satisfynode[-self.power.nodenum:])/np.sum(self.demand[-self.power.nodenum:]) 232 | else: 233 | performance_gas = 1 234 | performance_power = 1 235 | performance_temp = np.mean([performance_gas, performance_power]) 236 | self.performance.append(performance_temp) #Track down the performance 237 | 238 | #Redistribute the flow, here we can introduce some randomness to account for the uncertainty 239 | if (np.sum(adjmatrix[node, :]) == 0 or np.sum(self.flowmatrix_evol[-2][node, :]) == 0): 240 | flowout = np.zeros(self.nodenum, dtype = float) 241 | else: 242 | if (np.sum(adjmatrix[node, :]) != np.sum(self.adjmatrix_evol[-2][node, :])): # Some links fail, redistribute evenly with some random noise 243 | flowout = 1/np.sum(adjmatrix[node, :])*flowin*adjmatrix[node, :] 244 | 245 | if (flowin != 0): #The uncertainty only happens when there are multiple out-links and inflow !=0 246 | index = np.random.choice(np.argwhere(flowout != 0).reshape(-1)) #flow - beta where beta~U(0, flow/2) 247 | if (len(np.argwhere(flowout != 0).reshape(-1)) != 1): 248 | noise_flow = np.random.rand()*flowout[index]/2 249 | unit_flow = noise_flow/(len(np.argwhere(flowout != 0)) - 1) 250 | 251 | flowout[flowout != 0] = flowout[flowout != 0] + unit_flow 252 | flowout[index] = flowout[index] - unit_flow - noise_flow 253 | 254 | else: #No links fail,redistribute according to the ratio of flow at last time step 255 | flowout = self.flowmatrix_evol[-2][node, :]/np.sum(self.flowmatrix_evol[-2][node, :])*flowin 256 | 257 | flowmatrix[node, :] = flowout 258 | 259 | node_seq_track = np.zeros(self.nodenum, dtype = int) #1 - failure, 0 - survive at current time step 260 | link_seq_track = np.zeros(self.arcnum, dtype = int) #1 - failure, 0 - survive at current time step 261 | 262 | for i in range(self.arcnum): 263 | node1, node2 = self.arclist[i, 0], self.arclist[i, 1] 264 | if (np.abs(flowmatrix[node1, node2]) > (1 + redun_rate)*self.flowcapmatrix[node1, node2]): 265 | # print(node1, node2, flowmatrix[node1, node2], self.flowcapmatrix[node1, node2]) 266 | link_seq_track[i] = 1\ 267 | 268 | #node failure caused by flow overload 269 | for i in range(self.nodenum): 270 | # print(i, np.sum(flowmatrix[:, i]*self.convratematrix[:, i]), np.sum(self.flowmatrix_evol[0][:, i]*self.convratematrix[:, i])) 271 | if ((np.abs(np.sum(flowmatrix[:, i]*self.convratematrix[:, i]))) > \ 272 | (1 + redun_rate)*np.abs(np.sum(self.flowmatrix_evol[0][:, i]*self.convratematrix[:, i]))): 273 | # print(time, 'node', i, np.sum(flowmatrix[:, i]*self.convratematrix[:, i]), np.sum(self.flowmatrix_evol[0][:, i]*self.convratematrix[:, i])) 274 | node_seq_track[i] = 1 275 | 276 | self.node_fail_evol_track.append(node_seq_track) 277 | self.link_fail_evol_track.append(link_seq_track) 278 | time += 1 279 | 280 | self.satisfy_node_evol.append(satisfynode) 281 | 282 | node_seq = np.zeros(self.nodenum, dtype = int) #1 - failure, 0 - survive at current time step 283 | link_seq = np.zeros(self.arcnum, dtype = int) #1 - failure, 0 - survive at current time step 284 | #adjacent matrix update 285 | #link failure caused by flow overload 286 | for i in range(self.arcnum): 287 | node1, node2 = self.arclist[i, 0], self.arclist[i, 1] 288 | if (np.abs(flowmatrix[node1, node2]) > (1 + redun_rate)*self.flowcapmatrix[node1, node2]): 289 | link_seq[i] = 1 290 | 291 | #node failure caused by flow overload 292 | for i in range(self.nodenum): 293 | if ((np.abs(np.sum(flowmatrix[:, i]*self.convratematrix[:, i]))) > \ 294 | (1 + redun_rate)*np.abs(np.sum(self.flowmatrix_evol[0][:, i]*self.convratematrix[:, i]))): 295 | node_seq[i] = 1 296 | 297 | self.node_fail_evol.append(node_seq) 298 | self.link_fail_evol.append(link_seq) 299 | 300 | #Update the adjmatrix caused by failed nodes 301 | adjmatrix = self.update_matrix_node_fail(copy.copy(adjmatrix), node_seq) 302 | 303 | #Update the adjmatrix caused by failed links 304 | adjmatrix = self.update_matrix_link_fail(copy.copy(adjmatrix), link_seq) 305 | 306 | #Update the flow matrix 307 | flowmatrix = adjmatrix*flowmatrix 308 | 309 | self.adjmatrix_evol.append(adjmatrix) 310 | self.flowmatrix_evol.append(flowmatrix) 311 | 312 | #Check the stability: no newly failed nodes and links 313 | if (np.sum(link_seq) == 0 and np.sum(node_seq) == 0): 314 | break 315 | 316 | 317 | ########################## 318 | # to do: plot the networks 319 | def plot_inter_networks(self, link_df, is_save=True): 320 | 321 | G = pygraphviz.AGraph(strict=False, directed=True) 322 | 323 | G.add_nodes_from(self.node_id_list) 324 | 325 | for i in np.arange(len(link_df)): 326 | G.add_arc(link_df[i,0], link_df[i,1], label=link_df[i,3]) 327 | 328 | G.layout() 329 | if is_save: 330 | G.draw('inter_networks.pdf') 331 | else: 332 | G.draw() 333 | 334 | #%% 335 | def compare_attack_types(self, attack_types = ['randomness', 'dc', 'bc', 'kc', 'cc'], 336 | attack_portions=np.round(np.arange(0,1.001,0.05),2), 337 | redun_rate = 0.5, n_repeat_random=50): 338 | ''' 339 | obtain network performance given different failure types and failure rate 340 | 341 | inputs: 342 | s - an instance of system representing the power-gas systems 343 | n_repeat_random - int: number of repetition for random attacks to obtain the average performance 344 | 345 | returns: 346 | performance_df - df: performance after all types of attacks over 347 | performance_random_attack - df: performance after random attacks for n_repeat_random times 348 | ''' 349 | performance = np.zeros([len(attack_portions), len(attack_types), n_repeat_random]) 350 | performance_mean = np.zeros([len(attack_portions), len(attack_types)]) 351 | 352 | for i in np.arange(len(attack_portions)): 353 | for j in np.arange(len(attack_types)): 354 | # repeat multiple times to obtain the average performance 355 | for k in np.arange(n_repeat_random): 356 | # print(k, "-th repetition for random attack") 357 | self.initial_failure(attack_types[j], attack_portions[i]) 358 | self.cascading_failure(redun_rate) 359 | performance_final_temp = self.performance[-1] 360 | performance[i,j,k] = performance_final_temp 361 | 362 | performance_mean[i,j] = np.mean(performance[i,j,:]) 363 | 364 | # convert array into pandas df 365 | performance_mean_df = pd.DataFrame(data=performance_mean, index=attack_portions.tolist(), columns=attack_types) 366 | print('Cascading failure ends.') 367 | 368 | return performance_mean_df 369 | 370 | 371 | def plot_performance_different_attack(self, attack_types = ['randomness', 'dc', 'bc', 'kc', 'cc'], 372 | attack_portions=np.round(np.arange(0,1.001,0.05),2), 373 | redun_rate = 0.2, n_repeat_random=50, is_save=True): 374 | # plot the performance of different attack types under different attack portions 375 | # attach both nodes and links? But links do not have a degree. 376 | 377 | performance_df = self.compare_attack_types(attack_types=attack_types, attack_portions=attack_portions, 378 | redun_rate=redun_rate, n_repeat_random=n_repeat_random) 379 | 380 | plt.figure(figsize=(4, 3)) 381 | 382 | styles = ['k-','b--','r-.X','c--o','m--s'][:performance_df.shape[1]] 383 | performance_df.plot(style=styles) 384 | 385 | plt.xlabel('Percentage of attacked nodes') 386 | plt.ylabel('Performance of networks') 387 | 388 | plt.ylim(bottom=-0.01) 389 | 390 | plt.grid(axis='both') 391 | 392 | legend_labels = ('Random', 'Degree-based', 'Betweenness-based', 'Closeness-based', 'Katz-based')[:performance_df.shape[1]] 393 | plt.legend(legend_labels) 394 | 395 | if is_save: 396 | plt.savefig('compare_attack_types.pdf') 397 | plt.show() 398 | 399 | 400 | def get_comp_damage_state_temp(self, type='node'): 401 | 402 | # get index of failed components 403 | if type=='node': 404 | comp_fail_evol = copy.copy(self.node_fail_evol) 405 | else: 406 | comp_fail_evol = copy.copy(self.link_fail_evol) 407 | fail_comp_idx = [] 408 | for i in np.arange(len(comp_fail_evol)-1): 409 | fail_comp_idx += np.where(comp_fail_evol[i]==1)[0].tolist() 410 | 411 | 412 | # set the respective y_init to 0 if the component fails at either stage 413 | y_comp_init_temp = [1]*len(comp_fail_evol[0]) 414 | y_comp_init = [0 if i in fail_comp_idx else item for i,item in enumerate(y_comp_init_temp)] 415 | 416 | return y_comp_init 417 | 418 | def get_damage_state(self, attack_types='randomness', 419 | attack_portions=0.2, 420 | redun_rate=0.2): 421 | ''' 422 | get final damage state of components from the results of cascading failure at each time step 423 | feed into the optimization problem as y_init, the initial damage state of components before the restoration is initiated 424 | ''' 425 | 426 | # simulate initial attack 427 | self.initial_failure(attack_types, attack_portions) 428 | 429 | # simulate casacading failure 430 | self.cascading_failure(redun_rate) 431 | 432 | # index of components that fail during failure propagation 433 | # t=0, the components that fail due to direct 434 | y_node_init, y_link_init = self.get_comp_damage_state_temp(type='node'), self.get_comp_damage_state_temp(type='link') 435 | 436 | return y_node_init, y_link_init 437 | 438 | # optimize the repair schedule 439 | def optimize_restore(self, attack_types = 'randomness', 440 | attack_portions=0.2, 441 | redun_rate=0.2, 442 | model_type='flow'): 443 | 444 | '''optimize the restoration of damaged components 445 | input: 446 | y_node_init and y_arc_init - list: =1 if not damaged, and 0 otherwise 447 | arcs damaged - dictionary: xxx 448 | 449 | output: 450 | resil_over_time 451 | components to restore at each time step, i.e. the optimal schedule 452 | 453 | notes: 454 | devise a minimal working example/as small as possible 455 | build up damage scenario to test the program 456 | scenario 1: no components are damaged 457 | scenario 2: one node or link is damaged 458 | scenario 3: all nodes are damaged 459 | scenario 4: all components are damaged 460 | make sure the codes mathes the model 461 | bugs: 462 | wrong index or indentation 463 | differentiate between idx and the real value, e.g. i != node[i] 464 | refs.: 465 | https://www.python-mip.com/ 466 | https://pysal.org/spaghetti/notebooks/transportation-problem.html 467 | ''' 468 | # x[i,t] - binary: whether or not to restore a node at time t, 0 otherwise. 469 | # y[i,t] - binary: whether or not to an link functions at time t, 0 otherwise. 470 | 471 | # 0 get initial damage state 472 | y_node_init, y_arc_init = self.get_damage_state(attack_types=attack_types, attack_portions=attack_portions, redun_rate=redun_rate) 473 | 474 | # 1 decalre and initiate model 475 | model = Model() 476 | 477 | # 2 add decision variable 478 | # 2.1 schedule of repairing components 479 | num_node = copy.copy(self.nodenum) 480 | num_arc = copy.copy(self.arcnum) 481 | # total restoration time 482 | num_restore_max = 2 483 | num_damage_comp = y_arc_init.count(0) + y_node_init.count(0) 484 | time_list = list(range(ceil(num_damage_comp/num_restore_max) + 1)) 485 | x_node = [[model.add_var(name="x({},{})".format(i, t), var_type=BINARY) for t in time_list] for i in np.arange(num_node)] 486 | x_arc = [[model.add_var(name="x({},{})".format(k, t), var_type=BINARY) for t in time_list] for k in np.arange(num_arc)] 487 | 488 | # 2.2 functional state of nodes and arcs 489 | y_node = [[model.add_var(name="y({},{})".format(i, t), var_type=BINARY) for t in time_list] for i in np.arange(num_node)] 490 | y_arc = [[model.add_var(name="y({},{})".format(k, t), var_type=BINARY) for t in time_list] for k in np.arange(num_arc)] 491 | 492 | # 2.3 flow, supply, demand, and slack 493 | flow = [[model.add_var(name="flow({},{})".format(k, t), lb=0) for t in time_list] for k in np.arange(num_arc)] 494 | supply = [[model.add_var(name="supply({},{})".format(i, t), lb=0) for t in time_list] for i in np.arange(num_node)] 495 | slack = [[model.add_var(name="slack({},{})".format(i, t), lb=0) for t in time_list] for i in np.arange(num_node)] 496 | 497 | # seudo variable: resilience at each time step 498 | resil = [model.add_var(name="resilience({})".format(t), lb=0) for t in time_list] 499 | 500 | # 3 obejctive function: min -1* sum of resilience at each time step 501 | model.objective = minimize(xsum(-1*resil[t] for t in time_list)) 502 | 503 | 504 | # 4 add constraints 505 | # 4.1 component will be restored at one of the time periods 506 | # These two sets of constraints might not be necessary 507 | for i in np.arange(num_node): 508 | if y_node_init[i]==0: 509 | model.add_constr(xsum(x_node[i][t] for t in time_list) == 1) 510 | for k in np.arange(num_arc): 511 | if y_arc_init[k]==0: 512 | model.add_constr(xsum(x_arc[k][t] for t in time_list) == 1) 513 | 514 | # 4.2 number of components restored at a time period is capped 515 | for t in time_list: 516 | model.add_constr(xsum(x_node[i][t] for i in np.arange(num_node)) + 517 | xsum(x_arc[k][t] for k in np.arange(num_arc)) <= num_restore_max) 518 | 519 | 520 | # 4.3 flow conservation 521 | # outflow - inflow = supply + slack - demand 522 | # node in each network: exclude inflow from interdependent links onto that node 523 | for i in np.arange(num_node): 524 | for t in time_list: 525 | model.add_constr(xsum(flow[k][t] for k in np.arange(num_arc) if self.node_id_list[i]==self.arc_id_list[k][0]) - 526 | xsum(flow[k][t] for k in np.arange(num_arc) if \ 527 | (self.node_id_list[i]==self.arc_id_list[k][1] and self.conv_rate[k]==1))\ 528 | == supply[i][t] + slack[i][t] - self.demand[i]) 529 | 530 | # end node of interdependent links: supply <= converted supply/inflow to that node 531 | for i in np.arange(num_node): 532 | for k in np.arange(num_arc): 533 | if self.arc_id_list[k][1]==self.node_id_list[i] and self.conv_rate[k]!=1: 534 | for t in time_list: 535 | model.add_constr(supply[i][t] <= self.conv_rate[k]*flow[k][t]) 536 | 537 | # 4.4 ub of flow, supply, and slack 538 | # 4.4.1.1 add auxillary variables to linearize the product of binary variables 539 | aux_z_arc = [[model.add_var(name="aux_z_arc({},{})".format(k, t), var_type=BINARY) \ 540 | for t in time_list] for k in np.arange(num_arc)] 541 | for t in time_list: 542 | for k in np.arange(num_arc): 543 | # use auxillary variables to linearize the product of binary variables 544 | start_node_idx = self.node_id_list.index(self.arc_id_list[k][0]) 545 | end_node_idx = self.node_id_list.index(self.arc_id_list[k][1]) 546 | n_binary_var = 3 547 | # aux_z <= each binary variable 548 | model.add_constr(aux_z_arc[k][t] <= y_node[start_node_idx][t]) 549 | model.add_constr(aux_z_arc[k][t] <= y_node[end_node_idx][t]) 550 | model.add_constr(aux_z_arc[k][t] <= y_arc[k][t]) 551 | # aux_z >= sum of binary variables - number of binary variables + 1 552 | # active when all binary varialbes == 1 553 | model.add_constr(aux_z_arc[k][t] >= y_node[start_node_idx][t] + y_node[end_node_idx][t] + 554 | y_arc[k][t] - (n_binary_var-1)) 555 | 556 | # 4.4.1.2 flow cap 557 | # flow will be zeros unless the start, end node nad the arc itsel are all functional 558 | if model_type=='flow': 559 | model.add_constr(flow[k][t] <= aux_z_arc[k][t]*self.flow_cap[k]) 560 | else: 561 | model.add_constr(flow[k][t] <= aux_z_arc[k][t]*1e5) 562 | 563 | 564 | # 4.4.2 slack and supply cap 565 | for i in np.arange(num_node): 566 | model.add_constr(slack[i][t] <= self.demand[i]) 567 | if model_type=='flow': 568 | model.add_constr(supply[i][t] <= y_node[i][t]*self.supply_cap[i]) 569 | else: 570 | model.add_constr(supply[i][t] <= y_node[i][t]*1e5) 571 | 572 | 573 | # 4.5.0 574 | for t in time_list: # start from the second time period 575 | # 4.5.1 non-deteriorating state of components 576 | if t==0: 577 | for i in np.arange(num_node): 578 | model.add_constr(y_node_init[i] <= y_node[i][t]) 579 | model.add_constr(y_node_init[i] >= y_node[i][t]) 580 | for k in np.arange(num_arc): 581 | model.add_constr(y_arc_init[k] <= y_arc[k][t]) 582 | model.add_constr(y_arc_init[k] >= y_arc[k][t]) 583 | else: 584 | for i in np.arange(num_node): 585 | model.add_constr(y_node[i][t-1] <= y_node[i][t]) 586 | for k in np.arange(num_arc): 587 | model.add_constr(y_arc[k][t-1] <= y_arc[k][t]) 588 | # 4.5.2 components will be functional once repaired 589 | for i in np.arange(num_node): 590 | model.add_constr(y_node[i][t] <= y_node[i][t-1] + x_node[i][t-1]) 591 | for k in np.arange(num_arc): 592 | model.add_constr(y_arc[k][t] <= y_arc[k][t-1] + x_arc[k][t-1]) 593 | 594 | 595 | # 4.6 calculate flow-based resilience or topology-based resilience 596 | node_demand_idx = [idx for idx, demand_val in enumerate(self.demand) if demand_val > 0] 597 | 598 | if model_type == 'flow': 599 | for t in time_list: 600 | # proportion of satisfied demand 601 | 602 | demand_satify_rate = xsum(1- slack[i][t]/self.demand[i] for i in node_demand_idx)/ \ 603 | len(node_demand_idx) 604 | model.add_constr(resil[t] == demand_satify_rate) 605 | else: 606 | # add auxilliary variable at each node, indicating weather or not node can receive supply from supply nodes 607 | aux_z_node = [[model.add_var(name="aux_z_node({},{})".format(i, t), var_type=BINARY) \ 608 | for t in time_list] for i in np.arange(num_node)] 609 | M_small = 1e-5 610 | for t in time_list: 611 | for i in node_demand_idx: 612 | # proportion of demand nodes whose slack is lower than demand, i.e. demand node can receive some supply, irrespective of the amount. 613 | # if slack = demand, aux_z_node = 0, else aux_z_node = 1. 614 | model.add_constr(slack[i][t]/self.demand[i] <= 1 - M_small*aux_z_node[i][t]) 615 | model.add_constr(slack[i][t]/self.demand[i] >= 1 - aux_z_node[i][t]) 616 | 617 | # resilience at each time point = proportion of demand nodes that can receive supply 618 | model.add_constr(resil[t] == xsum(aux_z_node[i][t] for i in node_demand_idx)/len(node_demand_idx)) 619 | 620 | # 5.0 solve the model and check status 621 | model.max_gap = 1e-5 622 | status = model.optimize(max_seconds=60*5) 623 | 624 | # 6.0 query optimization results 625 | # 6.1 check solution status 626 | if status == OptimizationStatus.OPTIMAL: 627 | # print('optimal solution: {}'.format(model.objective_value)) 628 | 629 | # 6.2 get objective value and x 630 | obj_value = model.objective_value 631 | 632 | # # 6.3 print the number of variables and constraints 633 | # print('model has {} vars, {} constraints and {} nzs'.format(model.num_cols,\ 634 | # model.num_rows, model.num_nz)) 635 | 636 | return obj_value, x_node, x_arc, y_node, y_arc, resil, time_list, y_node_init, y_arc_init 637 | 638 | else: 639 | print('Infeasible or unbounded problem') 640 | 641 | # 2 get solution 642 | # 2.1 convert solution results 643 | def convert_solu_list_to_arr(self, var, dim=2): 644 | '''convert list of solutions to variables in mip entity format to array 645 | input: 646 | var - mip solution list: e.g. var=y_var, solution to the functional state of arcs 647 | ''' 648 | if dim==2: 649 | var_arr = np.zeros([len(var), len(var[0])]) 650 | for j in np.arange(len(var)): 651 | for t in np.arange(len(var[0])): 652 | var_arr[j,t] = var[j][t].x 653 | else: 654 | var_arr = np.ones([len(var), 1]) 655 | for t in np.arange(len(var)): 656 | var_arr[t,0] = var[t].x 657 | var_arr[np.isnan(var_arr)] = 1.0 658 | return var_arr 659 | 660 | # 2.2 solve the model and extract solutions 661 | def get_solution(self, attack_types = 'randomness', 662 | attack_portions=0.2, 663 | redun_rate = 0.2, 664 | model_type='flow'): 665 | 666 | # 1 solve the model 667 | obj_value, x_node, x_arc, y_node, y_arc, resil, time_list, y_node_init, y_arc_init = \ 668 | self.optimize_restore(attack_types=attack_types, attack_portions=attack_portions, redun_rate=redun_rate, model_type=model_type) 669 | 670 | # 2 extract results 671 | # extract x and y 672 | x_node_arr = self.convert_solu_list_to_arr(x_node) 673 | x_arc_arr = self.convert_solu_list_to_arr(x_arc) 674 | 675 | #y_arc_arr = convert_solu_list_to_arr(y_arc, time_list) 676 | #y_node_arr = convert_solu_list_to_arr(y_node, time_list) 677 | 678 | # extract resilience 679 | resil_arr = self.convert_solu_list_to_arr(resil, dim=1) 680 | #supply_arr = convert_solu_list_to_arr(supply, time_list) 681 | 682 | return x_node_arr, x_arc_arr, resil_arr, time_list, y_node_init, y_arc_init 683 | 684 | 685 | # 3 visualize results 686 | # 3.1.1 prepare schedule data 687 | def get_schedule_df(self, attack_types='randomness', 688 | attack_portions=0.2, 689 | redun_rate=0.2, 690 | model_type='flow'): 691 | # store scheduling resulst in a df 692 | # df: index: damaged component; columns: start_time, duration, finish time 693 | # get damaged component id 694 | 695 | x_node_arr, x_arc_arr, resil_arr, time_list, y_node_init, y_arc_init = \ 696 | self.get_solution(attack_types=attack_types, attack_portions=attack_portions, redun_rate=redun_rate) 697 | 698 | comp_list = self.node_id_list + self.arc_id_list 699 | # due to randomness in cascading failure, get damage state can only be called once within the function for solving the problem 700 | # y_node_init, y_arc_init = self.get_damage_state(attack_types=attack_types,attack_portions=attack_portions, redun_rate=redun_rate) 701 | init_state_list = y_node_init + y_arc_init 702 | damaged_comp_list = [comp_list[i] for i, item in enumerate(init_state_list) if item==0] 703 | 704 | # get restoration start time point 705 | x_comp = np.concatenate((x_node_arr, x_arc_arr), axis=0) 706 | # select restoration start time of damaged components 707 | x_comp_damage = x_comp[np.amax(x_comp, axis=1)==1] 708 | # restore start time of each component 709 | restore_start_time = np.argmax(x_comp_damage, axis=1) 710 | 711 | # create df and sort by restore start time 712 | schedule_df = pd.DataFrame({'restore_start_time':restore_start_time}, index=damaged_comp_list) 713 | schedule_df = schedule_df.sort_values(by='restore_start_time') 714 | schedule_df['duration'] = 1 715 | schedule_df['restore_end_time'] = schedule_df['restore_start_time'] + schedule_df['duration'] 716 | 717 | return schedule_df 718 | 719 | # 3.1.2 plot schedule 720 | def plot_repair_schedule(self, attack_types = 'randomness', 721 | attack_portions=0.2, 722 | redun_rate=0.2, is_save=True, 723 | model_type='flow'): 724 | # plot the restoreation schedule 725 | # refs.: https://towardsdatascience.com/from-the-bridge-to-tasks-planning-build-gannt-chart-in-python-r-and-tableau-7256fb7615f8 726 | # https://plotly.com/python/gantt/ 727 | 728 | # get schedule df 729 | schedule_df = self.get_schedule_df(attack_types=attack_types, attack_portions=attack_portions, redun_rate=redun_rate) 730 | 731 | # plot parameters 732 | max_time = schedule_df['restore_end_time'].max() 733 | bar_ht = 0.75 734 | off_ht = 0.5 735 | 736 | fig, ax = plt.subplots(figsize=(1+max_time/1.25, schedule_df.shape[0]/3)) 737 | for i in np.arange(schedule_df.shape[0]): 738 | ax.broken_barh([(schedule_df['restore_start_time'].iloc[i], schedule_df['duration'].iloc[i])], 739 | yrange = (i+off_ht/4, bar_ht), 740 | facecolors = ('tab:blue') if isinstance(schedule_df.index[i], tuple) else ('tab:red'), 741 | edgecolor = "none") 742 | 743 | ax.set_title('Repair schedule after {} attack'.format(attack_types)) 744 | 745 | ax.set_ylabel('Component') 746 | ax.set_xlabel('Time period') 747 | 748 | ax.set_yticks([i + off_ht for i in np.arange(schedule_df.shape[0])]) 749 | y_tick_labels = ["%s->%s" % item if isinstance(item,tuple) else item for item in schedule_df.index] 750 | ax.set_yticklabels(y_tick_labels) 751 | ax.set_ylim(bottom=-off_ht/2, top=schedule_df.shape[0]+off_ht/2) 752 | 753 | ax.set_xticks(np.arange(0, max_time+1, 1.0)) 754 | ax.set_xticklabels(np.arange(1, max_time+2, 1)) 755 | ax.set_xlim(left=-off_ht/2, right=max_time+off_ht/2) 756 | 757 | ax.grid(True) 758 | 759 | # draw legend manually 760 | colors = {'Node':'tab:red', 'Link':'tab:blue'} 761 | labels = list(colors.keys()) 762 | handles = [plt.Rectangle((0,0),0.1,0.1, color=colors[label]) for label in labels] 763 | plt.legend(handles, labels, loc='lower right') 764 | 765 | if is_save: 766 | plt.savefig('repair_schedule_{}_{}.pdf'.format(attack_types, model_type)) 767 | 768 | plt.show() 769 | 770 | 771 | def get_resil_df(self, attack_types = ['randomness'], 772 | attack_portions=0.2, 773 | redun_rate=0.2, 774 | n_repeat_random=50, 775 | model_type='flow'): 776 | '''get df of resilience over time under each attack types 777 | 778 | output: 779 | 780 | ''' 781 | 782 | # get solultion to slack at node at time t 783 | if isinstance(attack_types, list): 784 | n_attack_types = len(attack_types) 785 | else: 786 | n_attack_types = 1 787 | time_max = 1 788 | resil_arr_3d = np.ones([self.nodenum+self.arcnum, n_attack_types, n_repeat_random]) # time list is sufficiently large 789 | for i in np.arange(len(attack_types)): 790 | for j in np.arange(n_repeat_random): 791 | if attack_types[i] == 'randomness': 792 | x_node_arr, x_arc_arr, resil_arr, time_list, y_node_init, y_arc_init = \ 793 | self.get_solution(attack_types=attack_types[i], 794 | attack_portions=attack_portions, 795 | redun_rate=redun_rate, 796 | model_type=model_type) 797 | # resilience at each time point 798 | resil_temp = resil_arr 799 | # print('resil: ', resil_temp) 800 | # print('time list: ', time_list) 801 | time_max_temp = [idx for idx,val in enumerate(time_list) if resil_temp[idx,0]>=1][0] + 1 # time to full restoration. 802 | # print('time max', time_max) 803 | resil_arr_3d[:time_max_temp, i, j] = resil_temp[:time_max_temp].ravel() 804 | # update number of time periods 805 | if time_max_temp > time_max: 806 | time_max = time_max_temp 807 | else: 808 | if j==0: 809 | x_node_arr, x_arc_arr, resil_arr, time_list, y_node_init, y_arc_init = \ 810 | self.get_solution(attack_types=attack_types[i], 811 | attack_portions=attack_portions, 812 | redun_rate=redun_rate, model_type=model_type) 813 | # resilience at each time point 814 | resil_temp = resil_arr 815 | # print('resil: ', resil_temp) 816 | # print('time list: ', time_list) 817 | time_max_temp = [idx for idx,val in enumerate(time_list) if resil_temp[idx,0]>=1][0] + 1 # time to full restoration. 818 | # print('time max', time_max) 819 | resil_arr_3d[:time_max_temp, i, j] = resil_temp[:time_max_temp].ravel() 820 | # update number of time periods 821 | if time_max_temp > time_max: 822 | time_max = time_max_temp 823 | 824 | else: 825 | resil_arr_3d[:time_max_temp, i, j] = resil_arr_3d[:time_max_temp, i, j-1] 826 | # print('time max: ', time_max) 827 | # get mean over n_repeat_random 828 | resil_arr_mean = np.mean(resil_arr_3d, axis=2) 829 | # create df 830 | # add time periods as index 831 | idx = [item for item in range(1, time_max+1)] 832 | resil_df = pd.DataFrame(data=resil_arr_mean[:time_max,:], columns=attack_types, index=idx) 833 | 834 | return resil_df 835 | 836 | 837 | def plot_resil(self, attack_types = ['randomness', 'dc'], 838 | attack_portions=0.2, 839 | redun_rate=0.2, 840 | n_repeat_random=50, 841 | is_save=True, 842 | model_type='flow'): 843 | 844 | resil_df = self.get_resil_df(attack_types=attack_types, attack_portions=attack_portions, 845 | redun_rate=redun_rate, n_repeat_random=n_repeat_random, model_type= model_type) 846 | 847 | plt.figure(figsize=(4, 3)) 848 | 849 | styles = ['k-','b--','r-.X','c--o','m--s'][:resil_df.shape[1]] 850 | resil_df.plot(style=styles) 851 | 852 | # show integer only in xticks 853 | plt.xticks(resil_df.index.to_list()) 854 | 855 | 856 | plt.xlabel('Time period') 857 | plt.ylabel('Resilience') 858 | 859 | plt.ylim(top=1.02) 860 | if model_type != 'flow': 861 | plt.ylim(bottom=0) 862 | 863 | plt.grid(axis='both') 864 | 865 | legend_labels = ('Random', 'Degree-based', 'Betweenness-based', 'Closeness-based', 'Katz-based')[:resil_df.shape[1]] 866 | plt.legend(legend_labels, loc='lower right') 867 | 868 | if is_save: 869 | plt.savefig('resilience_{}_{}.pdf'.format(attack_portions,model_type)) 870 | plt.show() 871 | 872 | 873 | 874 | # 3.2 restoration rate over time 875 | 876 | def main(): 877 | # create network 878 | power = network(dt.p_nodedata, dt.p_arcdata) # instantiate the power network 879 | gas = network(dt.g_nodedata, dt.g_arcdata) # instantiate the gas network 880 | s = System(power, gas, dt.g2p_arcdata) # instantiate the system class 881 | 882 | ## compare performance under different types of attacks 883 | #attack_portions = np.round(np.arange(0,1.02,0.05), 2) 884 | attack_types = ['randomness', 'dc', 'bc', 'cc'] 885 | 886 | # plot 887 | # performance under different types of attacks 888 | REDUN_RATE = 0.4 889 | N_REPEAT = 50 890 | is_save = True 891 | # s.plot_performance_different_attack(attack_types=attack_types, 892 | # attack_portions=np.round(np.arange(0,0.5,0.1),2), 893 | # redun_rate=REDUN_RATE, n_repeat_random=N_REPEAT, 894 | # is_save=is_save) 895 | 896 | # restoration schedule after cascading failures under different types of attacks 897 | attack_types = 'cc' 898 | model_type = 'topo' 899 | attack_portions = 0.6 900 | # s.plot_repair_schedule(attack_types=attack_types, attack_portions= attack_portions, 901 | # redun_rate=REDUN_RATE, is_save=is_save, 902 | # model_type=model_type) 903 | 904 | attack_types=['randomness','dc', 'bc', 'cc'] 905 | s.plot_resil(attack_types=attack_types, attack_portions=attack_portions, 906 | redun_rate=REDUN_RATE,n_repeat_random=N_REPEAT, 907 | is_save=is_save, model_type =model_type) 908 | 909 | if __name__ == '__main__': 910 | main() 911 | 912 | ## test optimization model 913 | # 1 import data 914 | # 1.1 prepare arc data 915 | # def get_arc_id_list(self, start_node, end_node): 916 | # '''get arc list from lists of start node and end node 917 | # 918 | # returns: 919 | # list of tuple elements, e.g. (start node, end node) 920 | # ''' 921 | # arc_id_list = [] 922 | # for i in np.arange(len(start_node)): 923 | # arc_id_list.append((start_node[i],end_node[i])) 924 | # 925 | # return arc_id_list 926 | 927 | # # 1.2 import data 928 | # def import_data(self): 929 | # 930 | # # 1 import data 931 | # node_data = pd.read_csv('./data/case_6_node/node_data.csv') 932 | # arc_data = pd.read_csv('./data/case_6_node/arc_data.csv') 933 | # 934 | # # 2 extract data 935 | # # 2.1 node 936 | # demand, supply_cap = node_data.demand.astype(float).tolist(),\ 937 | # node_data.supply_cap.tolist() # demand should be float format 938 | # node_id_list = node_data.node_id.tolist() 939 | # 940 | # # 2.2 arc 941 | # # get list of arcs 942 | # start_node = arc_data.start_node 943 | # end_node = arc_data.end_node 944 | # arc_id_list = get_arc_id_list(start_node, end_node) 945 | # 946 | # # flow 947 | # flow_cap = arc_data.flow_cap.astype(float).tolist() 948 | # conv_rate = arc_data.conv_rate.astype(float).tolist() 949 | # 950 | # # initial state of nodes and arcs 951 | # y_node_init, y_arc_init = node_data.y_node_init.astype(float).tolist(),\ 952 | # arc_data.y_arc_init.astype(float).tolist() 953 | # 954 | # return node_id_list, arc_id_list, y_node_init, y_arc_init, demand, flow_cap, supply_cap, conv_rate 955 | --------------------------------------------------------------------------------