├── IMAGES ├── 10.1287opre.47.1.38.svg ├── j.apenergy.2019.04.084.svg ├── test └── wfi.png ├── LICENSE ├── MARS.py ├── README.md ├── WindFarmGeneticToolbox.py └── main.py /IMAGES/10.1287opre.47.1.38.svg: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 10 | 11 | 12 | 13 | 16 | 17 | 18 | 20 | 22 | DOI 23 | 24 | 25 | DOI 26 | 27 | 29 | 10.1287/opre.47.1.38 30 | 31 | 32 | 10.1287/opre.47.1.38 33 | 34 | 35 | -------------------------------------------------------------------------------- /IMAGES/j.apenergy.2019.04.084.svg: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 10 | 11 | 12 | 13 | 16 | 17 | 18 | 20 | 22 | DOI 23 | 24 | 25 | DOI 26 | 27 | 29 | 10.1016/j.apenergy.2019.04.084 30 | 31 | 32 | 10.1016/j.apenergy.2019.04.084 33 | 34 | 35 | -------------------------------------------------------------------------------- /IMAGES/test: -------------------------------------------------------------------------------- 1 | test 2 | -------------------------------------------------------------------------------- /IMAGES/wfi.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JuXinglong/WFLOP_Python/b1fc0d602928ee3f1fed5f8dc0de0a4a37a06bff/IMAGES/wfi.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 JuXinglong 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MARS.py: -------------------------------------------------------------------------------- 1 | # if you would like to use the code, please cite 2 | # Xinglong Ju, and Victoria C. P. Chen. "A MARS Python version using truncated linear function.", 2019. 3 | # The DOI can be found here at https://github.com/JuXinglong/WFLOP_Python 4 | # from statistics import mean 5 | import numpy as np 6 | import math 7 | import matplotlib.pyplot as plt 8 | from mpl_toolkits.mplot3d import Axes3D 9 | import pickle 10 | from sklearn import preprocessing 11 | import matplotlib.cm as cm 12 | 13 | 14 | class MARS: 15 | SST = None 16 | # SST_D_n_1 = None # SST / (n-1) to calculate r_square_adjust 17 | SSE = None 18 | LOF = None 19 | r_square = 0 20 | r_square_adjust = 0 21 | static_knots = {} # {0:[possible values for variable 1],1:[-1.61,-1.5,0,1.2]} 22 | knot_index_step = [] 23 | n_basis_fn = 0 # number of basis functions 24 | basis_fns = [] 25 | coefficients = None 26 | auto_stop = False 27 | y_bar = None 28 | x_middles = None 29 | x_half_ranges = None 30 | y_mean = None 31 | y_scale = None 32 | 33 | x_original = None 34 | y_original = None 35 | 36 | def __init__(self, n_variables=None, n_points=None, x=None, y=None, n_candidate_knots=[], n_max_basis_functions=0, 37 | n_max_interactions=2, 38 | difference=0.000002): 39 | self.n_variables = n_variables 40 | self.n_points = n_points 41 | if x is not None: 42 | self.x_original = x 43 | self.y_original = y 44 | 45 | x_scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1)).fit(x) 46 | self.x_middles = (x_scaler.data_max_ + x_scaler.data_min_) * 0.5 47 | self.x_half_ranges = x_scaler.data_range_ * 0.5 48 | 49 | self.x = x_scaler.transform(x) 50 | 51 | if y is not None: 52 | self.y = y 53 | self.y_mean = [0] 54 | self.y_scale = [1] 55 | 56 | 57 | self.n_candidate_knots = n_candidate_knots 58 | self.n_max_basis_functions = n_max_basis_functions 59 | if self.n_max_basis_functions == 0: 60 | self.auto_stop = True 61 | self.n_max_interactions = n_max_interactions 62 | self.difference = difference 63 | 64 | 65 | 66 | def X_inverse_scale(self, x_scaled): 67 | n = len(x_scaled) 68 | 69 | x_scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1)) 70 | 71 | x_scaler.scale_ = np.zeros((self.n_variables), np.float64) 72 | for i in range(self.n_variables): 73 | x_scaler.scale_[i] = 1.0 / self.x_half_ranges[i] 74 | x_scaler.min_ = np.zeros((self.n_variables), np.float64) 75 | for i in range(self.n_variables): 76 | x_scaler.min_[i] = -self.x_middles[i] / self.x_half_ranges[i] 77 | return x_scaler.inverse_transform(x_scaled) 78 | 79 | def predict(self, x_new): 80 | n = len(x_new) 81 | # x_scaler = preprocessing.StandardScaler() 82 | x_scaler = preprocessing.MinMaxScaler(feature_range=(-1, 1)) 83 | # x_scaler.mean_ = self.x_means 84 | # x_scaler.scale_ = self.x_scales 85 | x_scaler.scale_ = np.zeros((self.n_variables), np.float64) 86 | for i in range(self.n_variables): 87 | x_scaler.scale_[i] = 1.0 / self.x_half_ranges[i] 88 | x_scaler.min_ = np.zeros((self.n_variables), np.float64) 89 | for i in range(self.n_variables): 90 | x_scaler.min_[i] = -self.x_middles[i] / self.x_half_ranges[i] 91 | x_new = x_scaler.transform(x_new) 92 | x_matrix = np.zeros((n, 1 + self.n_basis_fn), np.float64) 93 | for ind_bf in range(1 + self.n_basis_fn): 94 | for ind_x in range(n): 95 | x_matrix[ind_x, ind_bf] = self.basis_fns[ind_bf].cal_bf_value(x_new[ind_x]) 96 | 97 | y = np.matmul(x_matrix, self.coefficients) 98 | 99 | return y 100 | 101 | def save_mars_model_to_file(self): 102 | 103 | self.SST = None 104 | self.SSE = None 105 | self.LOF = None 106 | self.r_square = None 107 | self.r_square_adjust = None 108 | self.static_knots = None 109 | self.knot_index_step = None 110 | self.auto_stop = None 111 | self.y_bar = None 112 | self.x_original = None 113 | self.y_original = None 114 | return 115 | 116 | 117 | 118 | def MARS_regress(self): 119 | self.y_bar = np.mean(self.y[:, 0]) 120 | self.cal_SST() 121 | self.init_LOF_SSE_R_square() 122 | self.get_static_knots() 123 | self.cal_knot_index_step() 124 | 125 | bf = BasisFunction() 126 | self.basis_fns.append(bf) 127 | 128 | m_size = 100 # matrix size, columns of X matrix 129 | 130 | x_matrix = np.ones((self.n_points, m_size), np.float64) 131 | xTy_matrix = np.zeros((m_size, 1), np.float64) 132 | xTy_matrix[0, 0] = self.n_points * self.y_bar 133 | 134 | LTb_matrix = np.zeros((m_size, 1), np.float64) 135 | LTb_matrix[0, 0] = xTy_matrix[0, 0] / math.sqrt(self.n_points) 136 | 137 | b_matrix = np.zeros((m_size, 1), np.float64) 138 | b_matrix[0, 0] = self.y_bar 139 | 140 | xTx_matrix = np.zeros((m_size, m_size), np.float64) 141 | xTx_matrix[0, 0] = self.n_points 142 | L_matrix = np.zeros((m_size, m_size), np.float64) 143 | L_matrix[0, 0] = math.sqrt(xTx_matrix[0, 0]) 144 | 145 | addon_size = 100 146 | while self.n_basis_fn < self.n_max_basis_functions or self.auto_stop: 147 | if m_size - self.n_basis_fn - 1 < 2: 148 | x_m_addon = np.array([[1] * addon_size] * self.n_points, np.float64) 149 | x_matrix = np.append(x_matrix, x_m_addon, axis=1) 150 | 151 | xTy_matrix_addon = np.array([[0]] * addon_size, np.float64) 152 | xTy_matrix = np.append(xTy_matrix, xTy_matrix_addon, axis=0) 153 | 154 | LTb_matrix_addon = np.array([[0]] * addon_size, np.float64) 155 | LTb_matrix = np.append(LTb_matrix, LTb_matrix_addon, axis=0) 156 | 157 | b_matrix_addon = np.array([[0]] * addon_size, np.float64) 158 | b_matrix = np.append(b_matrix, b_matrix_addon, axis=0) 159 | 160 | xTx_matrix_addon_column = np.array([[0] * addon_size] * m_size, np.float64) 161 | xTx_matrix = np.append(xTx_matrix, xTx_matrix_addon_column, axis=1) 162 | xTx_matrix_addon_row = np.array([[0] * (addon_size + m_size)] * addon_size, np.float64) 163 | xTx_matrix = np.append(xTx_matrix, xTx_matrix_addon_row, axis=0) 164 | 165 | L_matrix_addon_column = np.array([[0] * addon_size] * m_size, np.float64) 166 | L_matrix = np.append(L_matrix, L_matrix_addon_column, axis=1) 167 | L_matrix_addon_row = np.array([[0] * (addon_size + m_size)] * addon_size, np.float64) 168 | L_matrix = np.append(L_matrix, L_matrix_addon_row, axis=0) 169 | 170 | m_size += addon_size 171 | 172 | is_nbf_valid = False # is new basis function valid (nbf new basis function) 173 | bs_split_bf_index = -1 # best selected split function index (parent function index) (bs best selected) 174 | bs_variable_index = -1 # best selected variable index 175 | bs_knot_value = 0 # best selected knot value 176 | bs_knot_index = -1 # best selected knot index 177 | bs_positive_valid = False 178 | bs_negative_valid = False 179 | bs_LOF = self.LOF # best selected lack of fit 180 | new_LOF = self.LOF # 181 | for ind_bf in range(self.n_basis_fn + 1): 182 | if self.basis_fns[ind_bf].order >= self.n_max_interactions: 183 | continue 184 | selected_x_column = np.zeros((self.n_points, 1), np.float64) 185 | selected_x_column[:, 0] = x_matrix[:, ind_bf] 186 | for ind_variable in range(self.n_variables): 187 | ind_max = int((len(self.static_knots[ind_variable]) - 1) / self.knot_index_step[ind_variable]) + 1 188 | for ind in range(ind_max): 189 | ind_knot = int(ind * self.knot_index_step[ind_variable]) 190 | if not self.is_knot_valid(self.basis_fns[ind_bf], ind_variable, 191 | self.static_knots[ind_variable][ind_knot]): 192 | continue 193 | positive_knot_item = KnotItem(self.static_knots[ind_variable][ind_knot], ind_variable, +1) 194 | negative_knot_item = KnotItem(self.static_knots[ind_variable][ind_knot], ind_variable, -1) 195 | positive_x_column = np.zeros((self.n_points, 1), np.float64) 196 | negative_x_column = np.zeros((self.n_points, 1), np.float64) 197 | self.cal_new_x_column_val(selected_x_column, positive_x_column, positive_knot_item) 198 | self.cal_new_x_column_val(selected_x_column, negative_x_column, negative_knot_item) 199 | is_positive_x_column_valid = self.is_new_x_col_valid(positive_x_column) 200 | is_negative_x_column_valid = self.is_new_x_col_valid(negative_x_column) 201 | # print(self.static_knots[ind_variable][ind_knot]) 202 | if is_positive_x_column_valid and is_negative_x_column_valid: 203 | x_matrix[:, self.n_basis_fn + 1] = positive_x_column[:, 0] 204 | x_matrix[:, self.n_basis_fn + 2] = negative_x_column[:, 0] 205 | self.update_xTx_matrix(x_matrix, xTx_matrix, 2) 206 | self.update_xTy_matrix(x_matrix, xTy_matrix, 2) 207 | if not self.update_L_matrix(xTx_matrix, L_matrix, 2): 208 | continue 209 | new_LOF = self.cal_new_LOF(L_matrix, LTb_matrix, xTy_matrix, b_matrix, x_matrix, 2) 210 | if new_LOF < bs_LOF: 211 | is_nbf_valid = True # is new basis function valid (nbf new basis function) 212 | bs_split_bf_index = ind_bf # best selected split function index (parent function index) (bs best selected) 213 | bs_variable_index = ind_variable # best selected variable index 214 | bs_knot_value = self.static_knots[ind_variable][ind_knot] # best selected knot value 215 | bs_knot_index = ind_knot # best selected knot index 216 | bs_positive_valid = True 217 | bs_negative_valid = True 218 | bs_LOF = new_LOF # best selected lack of fit 219 | elif is_positive_x_column_valid: 220 | x_matrix[:, self.n_basis_fn + 1] = positive_x_column[:, 0] 221 | self.update_xTx_matrix(x_matrix, xTx_matrix, 1) 222 | self.update_xTy_matrix(x_matrix, xTy_matrix, 1) 223 | if not self.update_L_matrix(xTx_matrix, L_matrix, 1): 224 | continue 225 | new_LOF = self.cal_new_LOF(L_matrix, LTb_matrix, xTy_matrix, b_matrix, x_matrix, 1) 226 | if new_LOF < bs_LOF: 227 | is_nbf_valid = True # is new basis function valid (nbf new basis function) 228 | bs_split_bf_index = ind_bf # best selected split function index (parent function index) (bs best selected) 229 | bs_variable_index = ind_variable # best selected variable index 230 | bs_knot_value = self.static_knots[ind_variable][ind_knot] # best selected knot value 231 | bs_knot_index = ind_knot # best selected knot index 232 | bs_positive_valid = True 233 | bs_negative_valid = False 234 | bs_LOF = new_LOF # best selected lack of fit 235 | # print(xTy_matrix) 236 | # print(L_matrix) 237 | # print(xTx_matrix) 238 | # print(x_matrix) 239 | elif is_negative_x_column_valid: 240 | x_matrix[:, self.n_basis_fn + 1] = negative_x_column[:, 0] 241 | self.update_xTx_matrix(x_matrix, xTx_matrix, 1) 242 | self.update_xTy_matrix(x_matrix, xTy_matrix, 1) 243 | if not self.update_L_matrix(xTx_matrix, L_matrix, 1): 244 | continue 245 | new_LOF = self.cal_new_LOF(L_matrix, LTb_matrix, xTy_matrix, b_matrix, x_matrix, 1) 246 | if new_LOF < bs_LOF: 247 | is_nbf_valid = True # is new basis function valid (nbf new basis function) 248 | bs_split_bf_index = ind_bf # best selected split function index (parent function index) (bs best selected) 249 | bs_variable_index = ind_variable # best selected variable index 250 | bs_knot_value = self.static_knots[ind_variable][ind_knot] # best selected knot value 251 | bs_knot_index = ind_knot # best selected knot index 252 | bs_positive_valid = False 253 | bs_negative_valid = True 254 | bs_LOF = new_LOF # best selected lack of fit 255 | # print(xTy_matrix) 256 | # print(L_matrix) 257 | else: 258 | pass 259 | if is_nbf_valid: 260 | 261 | if bs_positive_valid and bs_negative_valid: 262 | new_r_square = 1 - bs_LOF / self.SST 263 | new_r_square_adjust = 1 - (bs_LOF / self.SST) * ( 264 | (self.n_points - 1) / (self.n_points - self.n_basis_fn - 2 - 1)) 265 | if new_r_square - self.r_square < self.difference or new_r_square_adjust - self.r_square_adjust < 0: 266 | print("AUTO STOP") 267 | break 268 | 269 | self.LOF = bs_LOF 270 | self.SSE = bs_LOF 271 | self.r_square = new_r_square 272 | self.r_square_adjust = new_r_square_adjust 273 | 274 | positive_knot_item = KnotItem(bs_knot_value, bs_variable_index, +1) 275 | negative_knot_item = KnotItem(bs_knot_value, bs_variable_index, -1) 276 | self.update_x_matrix(x_matrix, bs_split_bf_index, self.n_basis_fn + 1, positive_knot_item) 277 | self.update_x_matrix(x_matrix, bs_split_bf_index, self.n_basis_fn + 2, negative_knot_item) 278 | self.update_xTx_matrix(x_matrix, xTx_matrix, 2) 279 | self.update_xTy_matrix(x_matrix, xTy_matrix, 2) 280 | self.update_L_matrix(xTx_matrix, L_matrix, 2) 281 | self.update_LTb_matrix(L_matrix, LTb_matrix, xTy_matrix, 2) 282 | self.n_basis_fn += 2 283 | new_p_bf = BasisFunction() 284 | new_p_bf.copy_basis_function(self.basis_fns[bs_split_bf_index]) 285 | new_p_bf.add_knot_item(positive_knot_item) 286 | self.basis_fns.append(new_p_bf) 287 | new_n_bf = BasisFunction() 288 | new_n_bf.copy_basis_function(self.basis_fns[bs_split_bf_index]) 289 | new_n_bf.add_knot_item(negative_knot_item) 290 | self.basis_fns.append(new_n_bf) 291 | 292 | print("PB:", bs_split_bf_index, "V:", bs_variable_index, "K:", bs_knot_value, "R2:", self.r_square, 293 | "AR2:", self.r_square_adjust, "NB:", self.n_basis_fn, "Pair") 294 | 295 | elif bs_positive_valid: 296 | new_r_square = 1 - bs_LOF / self.SST 297 | new_r_square_adjust = 1 - (bs_LOF / self.SST) * ( 298 | (self.n_points - 1) / (self.n_points - self.n_basis_fn - 1 - 1)) 299 | if new_r_square - self.r_square < self.difference or new_r_square_adjust - self.r_square_adjust < 0: 300 | print("AUTO STOP") 301 | break 302 | 303 | self.LOF = bs_LOF 304 | self.SSE = bs_LOF 305 | self.r_square = new_r_square 306 | self.r_square_adjust = new_r_square_adjust 307 | 308 | positive_knot_item = KnotItem(bs_knot_value, bs_variable_index, +1) 309 | self.update_x_matrix(x_matrix, bs_split_bf_index, self.n_basis_fn + 1, positive_knot_item) 310 | self.update_xTx_matrix(x_matrix, xTx_matrix, 1) 311 | self.update_xTy_matrix(x_matrix, xTy_matrix, 1) 312 | self.update_L_matrix(xTx_matrix, L_matrix, 1) 313 | self.update_LTb_matrix(L_matrix, LTb_matrix, xTy_matrix, 1) 314 | self.n_basis_fn += 1 315 | new_p_bf = BasisFunction() 316 | new_p_bf.copy_basis_function(self.basis_fns[bs_split_bf_index]) 317 | new_p_bf.add_knot_item(positive_knot_item) 318 | self.basis_fns.append(new_p_bf) 319 | 320 | print("PB:", bs_split_bf_index, "V:", bs_variable_index, "K:", bs_knot_value, "R2:", self.r_square, 321 | "AR2:", self.r_square_adjust, "NB:", self.n_basis_fn, "Single") 322 | 323 | elif bs_negative_valid: 324 | new_r_square = 1 - bs_LOF / self.SST 325 | new_r_square_adjust = 1 - (bs_LOF / self.SST) * ( 326 | (self.n_points - 1) / (self.n_points - self.n_basis_fn - 1 - 1)) 327 | if new_r_square - self.r_square < self.difference or new_r_square_adjust - self.r_square_adjust < 0: 328 | print("AUTO STOP") 329 | break 330 | 331 | self.LOF = bs_LOF 332 | self.SSE = bs_LOF 333 | self.r_square = new_r_square 334 | self.r_square_adjust = new_r_square_adjust 335 | 336 | negative_knot_item = KnotItem(bs_knot_value, bs_variable_index, -1) 337 | self.update_x_matrix(x_matrix, bs_split_bf_index, self.n_basis_fn + 1, negative_knot_item) 338 | self.update_xTx_matrix(x_matrix, xTx_matrix, 1) 339 | self.update_xTy_matrix(x_matrix, xTy_matrix, 1) 340 | self.update_L_matrix(xTx_matrix, L_matrix, 1) 341 | self.update_LTb_matrix(L_matrix, LTb_matrix, xTy_matrix, 1) 342 | self.n_basis_fn += 1 343 | new_n_bf = BasisFunction() 344 | new_n_bf.copy_basis_function(self.basis_fns[bs_split_bf_index]) 345 | new_n_bf.add_knot_item(negative_knot_item) 346 | self.basis_fns.append(new_n_bf) 347 | 348 | print("PB:", bs_split_bf_index, "V:", bs_variable_index, "K:", bs_knot_value, "R2:", self.r_square, 349 | "AR2:", self.r_square_adjust, "NB:", self.n_basis_fn, "Single") 350 | 351 | else: 352 | pass 353 | 354 | else: 355 | print("No new basis function found with lower lack of fit. Stop.") 356 | break 357 | # print(xTx_matrix[0:7, 0:7]) 358 | self.cal_coefficients(L_matrix, LTb_matrix) 359 | # print(self.coefficients) 360 | return 361 | 362 | def cal_new_LOF(self, L_m, LTb_m, xTy_m, b_m, x_m, n_new_cols): 363 | self.update_LTb_matrix(L_m, LTb_m, xTy_m, n_new_cols) 364 | self.update_b_matrix(L_m, LTb_m, n_new_cols, b_m) 365 | return (np.sum((np.matmul(x_m[:, :1 + self.n_basis_fn + n_new_cols], 366 | b_m[:1 + self.n_basis_fn + n_new_cols, :]) - self.y) ** 2)) 367 | # print(LTb_m) 368 | # print(b_m) 369 | return 370 | 371 | def cal_coefficients(self, L_m, LTb_m): 372 | self.coefficients = np.zeros((1 + self.n_basis_fn, 1), np.float64) 373 | for r_ind in range(1 + self.n_basis_fn): 374 | i = self.n_basis_fn - r_ind 375 | result = LTb_m[i, 0] 376 | for c_ind in range(r_ind): 377 | j = self.n_basis_fn - c_ind 378 | result -= self.coefficients[j, 0] * L_m[j, i] # LT_matrix[i,j]=L_matrix[j,i] transpose 379 | self.coefficients[i, 0] = result / L_m[i, i] # LT_matrix[i,i]=L_matrix[i,i] transpose 380 | return 381 | 382 | def update_b_matrix(self, L_m, LTb_m, n_new_cols, b_m): 383 | for r_ind in range(1 + self.n_basis_fn + n_new_cols): 384 | i = self.n_basis_fn + n_new_cols - r_ind 385 | result = LTb_m[i, 0] 386 | for c_ind in range(r_ind): 387 | j = self.n_basis_fn + n_new_cols - c_ind 388 | result -= b_m[j, 0] * L_m[j, i] # LT_matrix[i,j]=L_matrix[j,i] transpose 389 | b_m[i, 0] = result / L_m[i, i] # LT_matrix[i,i]=L_matrix[i,i] transpose 390 | return 391 | 392 | def update_LTb_matrix(self, L_m, LTb_m, xTy_m, n_new_cols): 393 | for i in range(self.n_basis_fn + 1, self.n_basis_fn + 1 + n_new_cols): 394 | result = xTy_m[i, 0] 395 | for j in range(i): 396 | result -= L_m[i, j] * LTb_m[j, 0] 397 | LTb_m[i, 0] = result / L_m[i, i] 398 | return 399 | 400 | def update_L_matrix(self, xTx_m, L_m, n_new_cols): 401 | for j in range(self.n_basis_fn + 1, self.n_basis_fn + n_new_cols + 1): 402 | L_m[j, 0] = xTx_m[j, 0] / L_m[0, 0] 403 | for i in range(1, j): 404 | result = xTx_m[j, i] 405 | for p in range(i): 406 | result -= L_m[i, p] * L_m[j, p] 407 | L_m[j, i] = result / L_m[i, i] 408 | result = xTx_m[j, j] 409 | for p in range(j): 410 | result -= L_m[j, p] ** 2 411 | if result <= 1.0e-10: 412 | return False 413 | L_m[j, j] = math.sqrt(result) 414 | return True 415 | 416 | def update_xTy_matrix(self, x_m, xTy_m, n_new_cols): 417 | for i_new_col in range(n_new_cols): 418 | result = 0 419 | for ind in range(self.n_points): 420 | result += x_m[ind, self.n_basis_fn + i_new_col + 1] * self.y[ind, 0] 421 | xTy_m[self.n_basis_fn + i_new_col + 1, 0] = result 422 | return 423 | 424 | def update_xTx_matrix(self, x_m, xTx_m, n_new_cols): 425 | # n_new_cols 1 or 2 426 | for i_new_col in range(n_new_cols): 427 | for i_col in range(self.n_basis_fn + 1 + i_new_col + 1): 428 | if i_new_col == 1 and i_col == self.n_basis_fn + 1: 429 | result = 0 430 | else: 431 | result = 0 432 | for i_row in range(self.n_points): 433 | result += x_m[i_row, self.n_basis_fn + i_new_col + 1] * x_m[i_row, i_col] 434 | xTx_m[self.n_basis_fn + i_new_col + 1, i_col] = result 435 | xTx_m[i_col, self.n_basis_fn + i_new_col + 1] = result 436 | return 437 | 438 | def is_new_x_col_valid(self, new_x_col): 439 | for i in range(self.n_points): 440 | if new_x_col[i, 0] > 1.0e-10: 441 | return True 442 | return False 443 | 444 | def cal_new_x_column_val(self, p_bf_x_col, new_x_col, knot_item): 445 | for ind in range(self.n_points): 446 | new_x_col[ind, 0] = p_bf_x_col[ind, 0] * knot_item.cal_knot_item_value(self.x[ind]) 447 | 448 | def update_x_matrix(self, x_m, p_bf_ind, new_x_col_ind, knot_item): 449 | for ind in range(self.n_points): 450 | x_m[ind, new_x_col_ind] = x_m[ind, p_bf_ind] * knot_item.cal_knot_item_value(self.x[ind]) 451 | # print(new_x_col) 452 | 453 | def is_knot_valid(self, p_bf, variable_index, knot_value): 454 | # return True 455 | # p_bf parent basis function 456 | for bf in self.basis_fns: 457 | if bf.order == p_bf.order + 1: 458 | is_same = True 459 | for ki in p_bf.knot_items: 460 | if not bf.is_knot_item_in(ki): 461 | is_same = False 462 | break 463 | if is_same: 464 | if not bf.is_knot_in(knot_value, variable_index): 465 | is_same = False 466 | if is_same: 467 | return False 468 | return True 469 | 470 | def cal_knot_index_step(self): 471 | for i in range(self.n_variables): 472 | n = len(self.static_knots[i]) 473 | if self.n_candidate_knots[i] == 0: 474 | step = 1 475 | else: 476 | step = (n - 1) / (self.n_candidate_knots[i] - 1) 477 | if step == 0: 478 | step = 1 479 | self.knot_index_step.append(step) 480 | # print(self.n_candidate_knots) 481 | 482 | def init_LOF_SSE_R_square(self): 483 | self.LOF = self.SST 484 | self.SSE = self.SST 485 | self.r_square = 0.0 486 | self.r_square_adjust = 0.0 487 | return 488 | 489 | def cal_SST(self): 490 | self.SST = 0.0 491 | for i in range(self.n_points): 492 | self.SST += (self.y[i, 0] - self.y_bar) ** 2 493 | # self.SST_D_n_1 = self.SST / (self.n_points - 1) 494 | return 495 | 496 | # get all possible knots from the training data and sort from smallest to largest 497 | def get_static_knots(self): 498 | for i in range(self.n_variables): 499 | self.static_knots[i] = sorted(set(self.x[:, i])) 500 | # print(len(self.static_knots[i])) 501 | # print(self.static_knots[i]) 502 | # self.get_static_knots_for_variable_i(self.x[:, i], self.static_knots[i]) 503 | return 504 | 505 | 506 | 507 | def get_static_knots_for_variable_i(self, knot_values, sorted_knot_values): 508 | n_values = len(knot_values) 509 | for i in range(n_values): 510 | new_index = self.get_new_knot_index(sorted_knot_values, knot_values[i]) 511 | if new_index == -1: 512 | pass 513 | elif new_index == len(sorted_knot_values): 514 | sorted_knot_values.append(knot_values[i]) 515 | else: 516 | sorted_knot_values.insert(new_index, knot_values[i]) 517 | return 518 | 519 | def get_new_knot_index(self, sorted_knot_values, new_knot_value): 520 | n_sorted = len(sorted_knot_values) 521 | start_index = 0 522 | end_index = n_sorted - 1 523 | middle_index = int((start_index + end_index) / 2) 524 | while start_index <= end_index: 525 | if new_knot_value - sorted_knot_values[middle_index] < 0: 526 | end_index = middle_index - 1 527 | elif new_knot_value - sorted_knot_values[middle_index] > 0: 528 | start_index = middle_index + 1 529 | else: 530 | return -1 531 | middle_index = int((start_index + end_index) / 2) 532 | return start_index 533 | 534 | 535 | class KnotItem: 536 | def __init__(self, knot_value, index_of_variable, sign): 537 | self.knot_value = knot_value 538 | self.index_of_variable = index_of_variable 539 | self.sign = sign 540 | 541 | def cal_knot_item_value(self, x): 542 | result = 0 543 | if self.sign == 1: 544 | result = x[self.index_of_variable] - self.knot_value 545 | else: 546 | result = self.knot_value - x[self.index_of_variable] 547 | 548 | if result > 0: 549 | return result 550 | else: 551 | return 0 552 | 553 | 554 | class BasisFunction: 555 | def __init__(self): 556 | self.order = 0 557 | self.knot_items = [] 558 | 559 | def is_knot_item_in(self, t_ki): 560 | # ki knot item t_ki test knot item 561 | for ki in self.knot_items: 562 | if ki.knot_value == t_ki.knot_value and ki.index_of_variable == t_ki.index_of_variable: 563 | return True 564 | return False 565 | 566 | def is_knot_in(self, k_value, var_ind): 567 | # 568 | for ki in self.knot_items: 569 | if ki.knot_value == k_value and ki.index_of_variable == var_ind: 570 | return True 571 | return False 572 | 573 | def add_knot_item(self, new_knot_item): 574 | self.knot_items.append(new_knot_item) 575 | self.order += 1 576 | 577 | def copy_basis_function(self, bf): 578 | for i in range(bf.order): 579 | new_knot_item = KnotItem(bf.knot_items[i].knot_value, bf.knot_items[i].index_of_variable, 580 | bf.knot_items[i].sign) 581 | self.add_knot_item(new_knot_item) 582 | 583 | def cal_bf_value(self, x): 584 | result = 1.0 585 | for i in range(self.order): 586 | result *= self.knot_items[i].cal_knot_item_value(x) 587 | return result 588 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Wind farm layout optimization problem (WFLOP) Python toolbox 2 | ## Reference 3 | [1] Xinglong Ju, and Victoria C. P. Chen. "A MARS Python version using truncated linear function." 2019. [![DOI](https://zenodo.org/badge/226974692.svg)](https://zenodo.org/badge/latestdoi/226974692)
4 | [2] Victoria C.P. Chen, David Ruppert, and Christine A. Shoemaker. "Applying experimental design and regression splines to high-dimensional continuous-state stochastic dynamic programming." *Operations Research* 47.1 (1999): 38-53. [![DOI](/IMAGES/10.1287opre.47.1.38.svg)](https://doi.org/10.1287/opre.47.1.38)
5 | [3] Xinglong Ju, and Feng Liu. "Wind farm layout optimization using self-informed genetic algorithm with information guided exploitation." *Applied Energy* 248 (2019): 429-445. [![DOI](/IMAGES/j.apenergy.2019.04.084.svg)](https://doi.org/10.1016/j.apenergy.2019.04.084)
6 | [4] Xinglong Ju, Victoria C. P. Chen, Jay M. Rosenberger, and Feng Liu. "Knot Optimization for Multivariate Adaptive Regression Splines." In *IISE Annual Conference*. Proceedings, Institute of Industrial and Systems Engineers (IISE), 2019. 7 | 8 | If you have any questions, please email Feng Liu (*fliu0@mgh.harvard.edu*), or Xinglong Ju (*xinglong.ju@mavs.uta.edu*). 9 | 10 |

11 | Wind farm land cells illustration
12 | Wind farm land cells illustration. 13 |

14 | 15 | ## WFLOP Python toolbox guide 16 | ### Import necessary libraries 17 | ```python 18 | import numpy as np 19 | import pandas as pd 20 | import MARS # MARS (Multivariate Adaptive Regression Splines) regression class 21 | import WindFarmGeneticToolbox # wind farm layout optimization using genetic algorithms classes 22 | from datetime import datetime 23 | import os 24 | ``` 25 | 26 | ### Wind farm settings and algorithm settings 27 | ```python 28 | # parameters for the genetic algorithm 29 | elite_rate = 0.2 30 | cross_rate = 0.6 31 | random_rate = 0.5 32 | mutate_rate = 0.1 33 | 34 | # wind farm size, cells 35 | rows = 21 36 | cols = 21 37 | cell_width = 77.0 * 2 # unit : m 38 | 39 | # 40 | N = 60 # number of wind turbines 41 | pop_size = 100 # population size, number of inidividuals in a population 42 | iteration = 1 # number of genetic algorithm iterations 43 | ``` 44 | 45 | ### Create a WindFarmGenetic object 46 | ```python 47 | # all data will be save in data folder 48 | data_folder = "data" 49 | if not os.path.exists(data_folder): 50 | os.makedirs(data_folder) 51 | 52 | # create an object of WindFarmGenetic 53 | wfg = WindFarmGeneticToolbox.WindFarmGenetic(rows=rows, cols=cols, N=N, pop_size=pop_size, 54 | iteration=iteration, cell_width=cell_width, elite_rate=elite_rate, 55 | cross_rate=cross_rate, random_rate=random_rate, mutate_rate=mutate_rate) 56 | # set wind distribution 57 | # wind distribution is discrete (number of wind speeds) by (number of wind directions) 58 | # wfg.init_4_direction_1_speed_12() 59 | wfg.init_1_direction_1_N_speed_12() 60 | ``` 61 | 62 | ### Generate initial populations 63 | ```python 64 | init_pops_data_folder = "data/init_pops" 65 | if not os.path.exists(init_pops_data_folder): 66 | os.makedirs(init_pops_data_folder) 67 | # n_init_pops : number of initial populations 68 | n_init_pops = 60 69 | for i in range(n_init_pops): 70 | wfg.gen_init_pop() 71 | wfg.save_init_pop("{}/init_{}.dat".format(init_pops_data_folder,i)) 72 | ``` 73 | 74 | ### Create results folder 75 | ```python 76 | # results folder 77 | # adaptive_best_layouts_N60_9_20190422213718.dat : best layout for AGA of run index 9 78 | # result_CGA_20190422213715.dat : run time and best eta for CGA method 79 | results_data_folder = "data/results" 80 | if not os.path.exists(results_data_folder): 81 | os.makedirs(results_data_folder) 82 | 83 | n_run_times = 1 # number of run times 84 | # result_arr stores the best conversion efficiency of each run 85 | result_arr = np.zeros((n_run_times, 2), dtype=np.float32) 86 | ``` 87 | 88 | ### Run conventional genetic algorithm (CGA) 89 | ```python 90 | # CGA method 91 | CGA_results_data_folder = "{}/CGA".format(results_data_folder) 92 | if not os.path.exists(CGA_results_data_folder): 93 | os.makedirs(CGA_results_data_folder) 94 | for i in range(0, n_run_times): # run times 95 | print("run times {} ...".format(i)) 96 | wfg.load_init_pop("{}/init_{}.dat".format(init_pops_data_folder, i)) 97 | run_time, eta = wfg.conventional_genetic_alg(ind_time=i, result_folder=CGA_results_data_folder) 98 | result_arr[i, 0] = run_time 99 | result_arr[i, 1] = eta 100 | time_stamp = datetime.now().strftime("%Y%m%d%H%M%S") 101 | filename = "{}/result_CGA_{}.dat".format(CGA_results_data_folder, time_stamp) 102 | np.savetxt(filename, result_arr, fmt='%f', delimiter=" ") 103 | ``` 104 | 105 | ### Run adaptive genetic algorithm (AGA) 106 | ```python 107 | # AGA method 108 | AGA_results_data_folder = "{}/AGA".format(results_data_folder) 109 | if not os.path.exists(AGA_results_data_folder): 110 | os.makedirs(AGA_results_data_folder) 111 | for i in range(0, n_run_times): # run times 112 | print("run times {} ...".format(i)) 113 | wfg.load_init_pop("{}/init_{}.dat".format(init_pops_data_folder, i)) 114 | run_time, eta = wfg.adaptive_genetic_alg(ind_time=i, result_folder=AGA_results_data_folder) 115 | result_arr[i, 0] = run_time 116 | result_arr[i, 1] = eta 117 | time_stamp = datetime.now().strftime("%Y%m%d%H%M%S") 118 | filename = "{}/result_AGA_{}.dat".format(AGA_results_data_folder, time_stamp) 119 | np.savetxt(filename, result_arr, fmt='%f', delimiter=" ") 120 | ``` 121 | 122 | ### Run self-informed genetic algorithm (SIGA) 123 | #### Generate wind distribution surface 124 | ```python 125 | wds_data_folder = "data/wds" 126 | if not os.path.exists(wds_data_folder): 127 | os.makedirs(wds_data_folder) 128 | # mc : monte-carlo 129 | n_mc_samples = 10000 130 | 131 | # each layout is binary list and the length of the list is (rows*cols) 132 | # 1 indicates there is a wind turbine in that cell 133 | # 0 indicates there is no wind turbine in the cell 134 | # in "mc_layout.dat", there are 'n_mc_samples' line and each line is a layout. 135 | 136 | # generate 'n_mc_samples' layouts and save it in 'mc_layout.data' file 137 | WindFarmGeneticToolbox.LayoutGridMCGenerator.gen_mc_grid(rows=rows, cols=cols, n=n_mc_samples, N=N, 138 | lofname="{}/{}".format(wds_data_folder, "mc_layout.dat")) 139 | # read layouts from 'mc_layout.dat' file 140 | layouts = np.genfromtxt("{}/{}".format(wds_data_folder,"mc_layout.dat"), delimiter=" ", dtype=np.int32) 141 | 142 | # generate dataset to build wind farm distribution surface 143 | wfg.mc_gen_xy(rows=rows, cols=cols, layouts=layouts, n=n_mc_samples, N=N, xfname="{}/{}".format(wds_data_folder, "x.dat"), 144 | yfname="{}/{}".format(wds_data_folder, "y.dat")) 145 | 146 | # parameters for MARS regression method 147 | n_variables = 2 148 | n_points = rows * cols 149 | n_candidate_knots = [rows, cols] 150 | n_max_basis_functions = 100 151 | n_max_interactions = 4 152 | difference = 1.0e-3 153 | 154 | x_original = pd.read_csv("{}/{}".format(wds_data_folder,"x.dat"), header=None, nrows=n_points, delim_whitespace=True) 155 | x_original = x_original.values 156 | 157 | y_original = pd.read_csv("{}/{}".format(wds_data_folder,"y.dat"), header=None, nrows=n_points, delim_whitespace=True) 158 | y_original = y_original.values 159 | 160 | mars = MARS.MARS(n_variables=n_variables, n_points=n_points, x=x_original, y=y_original, 161 | n_candidate_knots=n_candidate_knots, n_max_basis_functions=n_max_basis_functions, 162 | n_max_interactions=n_max_interactions, difference=difference) 163 | mars.MARS_regress() 164 | # save wind distribution model to 'wds.mars' 165 | mars.save_mars_model_to_file() 166 | with open("{}/{}".format(wds_data_folder,"wds.mars"), "wb") as mars_file: 167 | pickle.dump(mars, mars_file) 168 | ``` 169 | #### SIGA method 170 | ```python 171 | SIGA_results_data_folder = "{}/SIGA".format(results_data_folder) 172 | if not os.path.exists(SIGA_results_data_folder): 173 | os.makedirs(SIGA_results_data_folder) 174 | # wds_mars_file : wind distribution surface MARS model file 175 | wds_mars_file = "{}/{}".format(wds_data_folder, "wds.mars") 176 | for i in range(0, n_run_times): # run times 177 | print("run times {} ...".format(i)) 178 | wfg.load_init_pop("{}/init_{}.dat".format(init_pops_data_folder, i)) 179 | run_time, eta = wfg.self_informed_genetic_alg(ind_time=i, result_folder=SIGA_results_data_folder, 180 | wds_file=wds_mars_file) 181 | result_arr[i, 0] = run_time 182 | result_arr[i, 1] = eta 183 | time_stamp = datetime.now().strftime("%Y%m%d%H%M%S") 184 | filename = "{}/result_self_informed_{}.dat".format(SIGA_results_data_folder, time_stamp) 185 | np.savetxt(filename, result_arr, fmt='%f', delimiter=" ") 186 | ``` 187 | -------------------------------------------------------------------------------- /WindFarmGeneticToolbox.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | import matplotlib.patches as patches 5 | import time 6 | import MARS 7 | from datetime import datetime 8 | import pickle 9 | 10 | __version__ = "1.0.0" 11 | 12 | 13 | # np.random.seed(seed=int(time.time())) 14 | class WindFarmGenetic: 15 | # parameters for genetic algorithms 16 | elite_rate = 0.2 17 | cross_rate = 0.6 18 | random_rate = 0.5 19 | mutate_rate = 0.1 20 | 21 | turbine = None 22 | 23 | pop_size = 0 # number of individuals in a population 24 | N = 0 # number of wind turbines 25 | rows = 0 # number of rows : cells 26 | cols = 0 # number of columns : cells 27 | cell_width = 0 # cell width 28 | cell_width_half = 0 # half cell width 29 | iteration = 0 # number of iterations of genetic algorithm 30 | 31 | # constructor of class WindFarmGenetic 32 | def __init__(self, rows=0, cols=0, N=0, pop_size=0, 33 | iteration=0, cell_width=0, elite_rate=0.2, 34 | cross_rate=0.6, random_rate=0.5, mutate_rate=0.1): 35 | self.turbine = GE_1_5_sleTurbine() 36 | self.rows = rows 37 | self.cols = cols 38 | self.N = N 39 | self.pop_size = pop_size 40 | self.iteration = iteration 41 | self.cell_width = cell_width 42 | self.cell_width_half = cell_width * 0.5 43 | 44 | self.elite_rate = elite_rate 45 | self.cross_rate = cross_rate 46 | self.random_rate = random_rate 47 | self.mutate_rate = mutate_rate 48 | 49 | self.init_pop = None 50 | self.init_pop_nonezero_indices = None 51 | 52 | return 53 | 54 | def init_1_direction_1_speed_8(self): 55 | self.theta = np.array([0], dtype=np.float32) 56 | self.velocity = np.array([8.0], dtype=np.float32) 57 | self.f_theta_v = np.array([[1.0]], dtype=np.float32) 58 | return 59 | 60 | def init_1_direction_1_N_speed_12(self): 61 | self.theta = np.array([0], dtype=np.float32) 62 | self.velocity = np.array([12.0], dtype=np.float32) 63 | self.f_theta_v = np.array([[1.0]], dtype=np.float32) 64 | return 65 | 66 | def init_1_direction_1_NE_speed_12(self): 67 | self.theta = np.array([np.pi / 4.0], dtype=np.float32) 68 | self.velocity = np.array([12.0], dtype=np.float32) 69 | self.f_theta_v = np.array([[1.0]], dtype=np.float32) 70 | return 71 | 72 | def init_6_direction_1_speed_12(self): 73 | self.theta = np.array([0, np.pi / 3.0, 2 * np.pi / 3.0, 3 * np.pi / 3.0, 4 * np.pi / 3.0, 5 * np.pi / 3.0], 74 | dtype=np.float32) # 0.2, 0,3 0.2 0. 1 0.1 0.1 75 | self.velocity = np.array([12.0], dtype=np.float32) # 1 76 | self.f_theta_v = np.array([[0.2], [0.3], [0.2], [0.1], [0.1], [0.1]], dtype=np.float32) 77 | return 78 | 79 | def init_3_direction_3_speed(self): 80 | self.theta = np.array([0, np.pi / 6.0, 11 * np.pi / 6.0], dtype=np.float32) # 0.8 0.1 0.1 81 | self.velocity = np.array([12.0, 10.0, 8.0], dtype=np.float32) # 0.7 0.2 0.1 82 | self.f_theta_v = np.array([[0.56, 0.16, 0.08], [0.07, 0.02, 0.01], [0.07, 0.02, 0.01]], dtype=np.float32) 83 | return 84 | 85 | def init_12_direction_3_speed(self): 86 | self.theta = np.array( 87 | [0, np.pi / 6.0, 2 * np.pi / 6.0, 3 * np.pi / 6.0, 4 * np.pi / 6.0, 5 * np.pi / 6.0, 6 * np.pi / 6.0, 88 | 7 * np.pi / 6.0, 8 * np.pi / 6.0, 9 * np.pi / 6.0, 10 * np.pi / 6.0, 11 * np.pi / 6.0], 89 | dtype=np.float32) # 1.0/12 90 | self.velocity = np.array([12.0, 10.0, 8.0], dtype=np.float32) # 0.7 0.2 0.1 91 | self.f_theta_v = np.array( 92 | [[0.058333, 0.016667, 0.008333], [0.058333, 0.016667, 0.008333], [0.058333, 0.016667, 0.008333], 93 | [0.058333, 0.016667, 0.008333], [0.058333, 0.016667, 0.008333], [0.058333, 0.016667, 0.008333], 94 | [0.058333, 0.016667, 0.008333], [0.058333, 0.016667, 0.008333], [0.058333, 0.016667, 0.008333], 95 | [0.058333, 0.016667, 0.008333], [0.058333, 0.016667, 0.008333], [0.058333, 0.016667, 0.008333]], 96 | dtype=np.float32) 97 | return 98 | 99 | def init_4_direction_1_speed_12(self): 100 | self.theta = np.array( 101 | [0, 3 * np.pi / 6.0, 6 * np.pi / 6.0, 9 * np.pi / 6.0], dtype=np.float32) # 1.0/4 102 | self.velocity = np.array([12.0], dtype=np.float32) # 1 103 | self.f_theta_v = np.array([[0.25], [0.25], [0.25], [0.25]], dtype=np.float32) 104 | return 105 | 106 | def cost(self, N): 107 | return 1.0 * N * (2.0 / 3.0 + 1.0 / 3.0 * math.exp(-0.00174 * N ** 2)) 108 | 109 | def gen_init_pop(self): 110 | self.init_pop = LayoutGridMCGenerator.gen_pop(rows=self.rows, cols=self.cols, n=self.pop_size, N=self.N) 111 | self.init_pop_nonezero_indices = np.zeros((self.pop_size, self.N), dtype=np.int32) 112 | for ind_init_pop in range(self.pop_size): 113 | ind_indices = 0 114 | for ind in range(self.rows * self.cols): 115 | if self.init_pop[ind_init_pop, ind] == 1: 116 | self.init_pop_nonezero_indices[ind_init_pop, ind_indices] = ind 117 | ind_indices += 1 118 | return 119 | 120 | def save_init_pop(self, fname): 121 | np.savetxt(fname, self.init_pop, fmt='%d', delimiter=" ") 122 | return 123 | 124 | def load_init_pop(self, fname): 125 | self.init_pop = np.genfromtxt(fname, delimiter=" ", dtype=np.int32) 126 | self.init_pop_nonezero_indices = np.zeros((self.pop_size, self.N), dtype=np.int32) 127 | for ind_init_pop in range(self.pop_size): 128 | ind_indices = 0 129 | for ind in range(self.rows * self.cols): 130 | if self.init_pop[ind_init_pop, ind] == 1: 131 | self.init_pop_nonezero_indices[ind_init_pop, ind_indices] = ind 132 | ind_indices += 1 133 | return 134 | 135 | def cal_P_rate_total(self): 136 | f_p = 0.0 137 | for ind_t in range(len(self.theta)): 138 | for ind_v in range(len(self.velocity)): 139 | f_p += self.f_theta_v[ind_t, ind_v] * self.turbine.P_i_X(self.velocity[ind_v]) 140 | return self.N * f_p 141 | 142 | def layout_power(self, velocity, N): 143 | power = np.zeros(N, dtype=np.float32) 144 | for i in range(N): 145 | power[i] = self.turbine.P_i_X(velocity[i]) 146 | return power 147 | 148 | # generate dataset to build the wind distribution surface 149 | def mc_gen_xy(self, rows, cols, layouts, n, N, xfname, yfname): 150 | layouts_cr = np.zeros((rows * cols, 2), dtype=np.int32) # layouts column row index 151 | n_copies = np.sum(layouts, axis=0) 152 | layouts_power = np.zeros((n, rows * cols), dtype=np.float32) 153 | self.mc_fitness(pop=layouts, rows=rows, cols=cols, pop_size=n, N=N, lp=layouts_power) 154 | sum_layout_power = np.sum(layouts_power, axis=0) 155 | mean_power = np.zeros(rows * cols, dtype=np.float32) 156 | for i in range(rows * cols): 157 | mean_power[i] = sum_layout_power[i] / n_copies[i] 158 | # print(n_copies) 159 | # print(sum_layout_power) 160 | # print(mean_power) 161 | # print(n_copies) 162 | for ind in range(rows * cols): 163 | r_i = np.floor(ind / cols) 164 | c_i = np.floor(ind - r_i * cols) 165 | layouts_cr[ind, 0] = c_i 166 | layouts_cr[ind, 1] = r_i 167 | np.savetxt(xfname, layouts_cr, fmt='%d', delimiter=" ") 168 | np.savetxt(yfname, mean_power, fmt='%f', delimiter=" ") 169 | return 170 | 171 | def mc_fitness(self, pop, rows, cols, pop_size, N, lp): 172 | for i in range(pop_size): 173 | print("layout {}...".format(i)) 174 | xy_position = np.zeros((2, N), dtype=np.float32) # x y position 175 | cr_position = np.zeros((2, N), dtype=np.int32) # column row position 176 | ind_position = np.zeros(N, dtype=np.int32) 177 | ind_pos = 0 178 | for ind in range(rows * cols): 179 | if pop[i, ind] == 1: 180 | r_i = np.floor(ind / cols) 181 | c_i = np.floor(ind - r_i * cols) 182 | cr_position[0, ind_pos] = c_i 183 | cr_position[1, ind_pos] = r_i 184 | xy_position[0, ind_pos] = c_i * self.cell_width + self.cell_width_half 185 | xy_position[1, ind_pos] = r_i * self.cell_width + self.cell_width_half 186 | ind_position[ind_pos] = ind 187 | ind_pos += 1 188 | lp_power_accum = np.zeros(N, dtype=np.float32) # a specific layout power accumulate 189 | for ind_t in range(len(self.theta)): 190 | for ind_v in range(len(self.velocity)): 191 | trans_matrix = np.array( 192 | [[np.cos(self.theta[ind_t]), -np.sin(self.theta[ind_t])], 193 | [np.sin(self.theta[ind_t]), np.cos(self.theta[ind_t])]], 194 | np.float32) 195 | trans_xy_position = np.matmul(trans_matrix, xy_position) 196 | speed_deficiency = self.wake_calculate(trans_xy_position, N) 197 | 198 | actual_velocity = (1 - speed_deficiency) * self.velocity[ind_v] 199 | lp_power = self.layout_power(actual_velocity, 200 | N) # total power of a specific layout specific wind speed specific theta 201 | lp_power = lp_power * self.f_theta_v[ind_t, ind_v] 202 | lp_power_accum += lp_power 203 | 204 | lp[i, ind_position] = lp_power_accum 205 | 206 | return 207 | 208 | # conventional genetic algorithm for WFLOP (wind farm layout optimization problem) 209 | def MC_gen_alg(self): 210 | mars = MARS.MARS() 211 | mars.load_mars_model_from_file("mc_single_direction_single_speed.mars") 212 | print("Monte Carlo genetic algorithm starts....") 213 | fitness_generations = np.zeros(self.iteration, dtype=np.float32) 214 | best_layout_generations = np.zeros((self.iteration, self.rows * self.cols), dtype=np.int32) 215 | power_order = np.zeros((self.pop_size, self.N), 216 | dtype=np.int32) # in each layout, order turbine power from least to largest 217 | pop = np.copy(self.init_pop) 218 | eN = int(np.floor(self.pop_size * self.elite_rate)) # elite number 219 | rN = int(int(np.floor(self.pop_size * self.mutate_rate)) / eN) * eN # reproduce number 220 | mN = rN # mutation number 221 | cN = self.pop_size - eN - mN # crossover number 222 | 223 | for gen in range(self.iteration): 224 | print("generation {}...".format(gen)) 225 | fitness_value = self.AGA_fitness(pop=pop, rows=self.rows, cols=self.cols, pop_size=self.pop_size, 226 | N=self.N, 227 | po=power_order) 228 | sorted_index = np.argsort(-fitness_value) # fitness value descending from largest to least 229 | fitness_generations[gen] = fitness_value[sorted_index[0]] 230 | pop = pop[sorted_index, :] 231 | power_order = power_order[sorted_index, :] 232 | best_layout_generations[gen, :] = pop[0, :] 233 | self.MC_reproduce(pop=pop, eN=eN, rN=mN) 234 | self.MC_crossover(pop=pop, rows=self.rows, cols=self.cols, pop_size=self.pop_size, N=self.N, cN=cN) 235 | self.MC_mutation(pop=pop, rows=self.rows, cols=self.cols, pop_size=self.pop_size, N=self.N, eN=eN, 236 | mN=mN, po=power_order, mars=mars) 237 | filename = "MC_fitness_N{}.dat".format(self.N) 238 | np.savetxt(filename, fitness_generations, fmt='%f', delimiter=" ") 239 | filename = "MC_best_layouts_N{}.dat".format(self.N) 240 | np.savetxt(filename, best_layout_generations, fmt='%d', delimiter=" ") 241 | print("Monte Carlo genetic algorithm ends.") 242 | return 243 | 244 | # rN : reproduce number 245 | def MC_reproduce(self, pop, eN, rN): 246 | copies = int(rN / eN) 247 | for i in range(eN): 248 | pop[eN + copies * i:eN + copies * (i + 1), :] = pop[i, :] 249 | return 250 | 251 | # crossover from start index to end index (start index included, end index excluded) 252 | def MC_crossover(self, pop, rows, cols, pop_size, N, cN): 253 | pop[pop_size - cN:pop_size, :] = LayoutGridMCGenerator.gen_pop(rows=rows, cols=cols, n=cN, N=N) 254 | return 255 | 256 | def MC_mutation(self, pop, rows, cols, pop_size, N, eN, mN, po, mars): 257 | np.random.seed(seed=int(time.time())) 258 | copies = int(mN / eN) 259 | ind = eN 260 | 261 | n_candiate = 5 262 | pos_candidate = np.zeros((n_candiate, 2), dtype=np.int32) 263 | ind_pos_candidate = np.zeros(n_candiate, dtype=np.int32) 264 | for i in range(eN): 265 | turbine_pos = po[i, 0] 266 | for j in range(copies): 267 | ind_can = 0 268 | while True: 269 | null_turbine_pos = np.random.randint(0, cols * rows) 270 | if pop[i, null_turbine_pos] == 0: 271 | pos_candidate[ind_can, 1] = int(np.floor(null_turbine_pos / cols)) 272 | pos_candidate[ind_can, 0] = int(np.floor(null_turbine_pos - pos_candidate[ind_can, 1] * cols)) 273 | ind_pos_candidate[ind_can] = null_turbine_pos 274 | ind_can += 1 275 | if ind_can == n_candiate: 276 | break 277 | mars_val = mars.predict(pos_candidate) 278 | mars_val = mars_val[:, 0] 279 | sorted_index = np.argsort(mars_val) # fitness value descending from least to largest 280 | null_turbine_pos = ind_pos_candidate[sorted_index[0]] 281 | pop[ind, turbine_pos] = 0 282 | pop[ind, null_turbine_pos] = 1 283 | ind += 1 284 | return 285 | 286 | def MC_fitness(self, pop, rows, cols, pop_size, N, po): 287 | fitness_val = np.zeros(pop_size, dtype=np.float32) 288 | for i in range(pop_size): 289 | 290 | # layout = np.reshape(pop[i, :], newshape=(rows, cols)) 291 | xy_position = np.zeros((2, N), dtype=np.float32) # x y position 292 | cr_position = np.zeros((2, N), dtype=np.int32) # column row position 293 | ind_position = np.zeros(N, dtype=np.int32) 294 | ind_pos = 0 295 | for ind in range(rows * cols): 296 | if pop[i, ind] == 1: 297 | r_i = np.floor(ind / cols) 298 | c_i = np.floor(ind - r_i * cols) 299 | cr_position[0, ind_pos] = c_i 300 | cr_position[1, ind_pos] = r_i 301 | xy_position[0, ind_pos] = c_i * self.cell_width + self.cell_width_half 302 | xy_position[1, ind_pos] = r_i * self.cell_width + self.cell_width_half 303 | ind_position[ind_pos] = ind 304 | ind_pos += 1 305 | lp_power_accum = np.zeros(N, dtype=np.float32) # a specific layout power accumulate 306 | for ind_t in range(len(self.theta)): 307 | for ind_v in range(len(self.velocity)): 308 | # print(theta[ind_t]) 309 | # print(np.cos(theta[ind_t])) 310 | trans_matrix = np.array( 311 | [[np.cos(self.theta[ind_t]), -np.sin(self.theta[ind_t])], 312 | [np.sin(self.theta[ind_t]), np.cos(self.theta[ind_t])]], 313 | np.float32) 314 | 315 | trans_xy_position = np.matmul(trans_matrix, xy_position) 316 | 317 | speed_deficiency = self.wake_calculate(trans_xy_position, N) 318 | 319 | actual_velocity = (1 - speed_deficiency) * self.velocity[ind_v] 320 | lp_power = self.layout_power(actual_velocity, 321 | N) # total power of a specific layout specific wind speed specific theta 322 | lp_power = lp_power * self.f_theta_v[ind_t, ind_v] 323 | lp_power_accum += lp_power 324 | 325 | sorted_index = np.argsort(lp_power_accum) # power from least to largest 326 | po[i, :] = ind_position[sorted_index] 327 | 328 | fitness_val[i] = np.sum(lp_power_accum) 329 | return fitness_val 330 | 331 | def MC_layout_power(self, velocity, N): 332 | power = np.zeros(N, dtype=np.float32) 333 | for i in range(N): 334 | power[i] = self.turbine.P_i_X(velocity[i]) 335 | return power 336 | 337 | def wake_calculate(self, trans_xy_position, N): 338 | # print(-trans_xy_position) 339 | sorted_index = np.argsort(-trans_xy_position[1, :]) # y value descending 340 | wake_deficiency = np.zeros(N, dtype=np.float32) 341 | # print(1-wake_deficiency) 342 | wake_deficiency[sorted_index[0]] = 0 343 | for i in range(1, N): 344 | for j in range(i): 345 | xdis = np.absolute(trans_xy_position[0, sorted_index[i]] - trans_xy_position[0, sorted_index[j]]) 346 | ydis = np.absolute(trans_xy_position[1, sorted_index[i]] - trans_xy_position[1, sorted_index[j]]) 347 | d = self.cal_deficiency(dx=xdis, dy=ydis, r=self.turbine.rator_radius, 348 | ec=self.turbine.entrainment_const) 349 | wake_deficiency[sorted_index[i]] += d ** 2 350 | 351 | wake_deficiency[sorted_index[i]] = np.sqrt(wake_deficiency[sorted_index[i]]) 352 | # print(trans_xy_position[0, sorted_index[i]]) 353 | # print(trans_xy_position[0, sorted_index[j]]) 354 | # print(xdis) 355 | # print(trans_xy_position) 356 | # print(v) 357 | return wake_deficiency 358 | 359 | # ec : entrainment_const 360 | def cal_deficiency(self, dx, dy, r, ec): 361 | if dy == 0: 362 | return 0 363 | R = r + ec * dy 364 | inter_area = self.cal_interaction_area(dx=dx, dy=dy, r=r, R=R) 365 | d = 2.0 / 3.0 * (r ** 2) / (R ** 2) * inter_area / (np.pi * r ** 2) 366 | return d 367 | 368 | def cal_interaction_area(self, dx, dy, r, R): 369 | if dx >= r + R: 370 | return 0 371 | elif dx >= np.sqrt(R ** 2 - r ** 2): 372 | alpha = np.arccos((R ** 2 + dx ** 2 - r ** 2) / (2 * R * dx)) 373 | beta = np.arccos((r ** 2 + dx ** 2 - R ** 2) / (2 * r * dx)) 374 | A1 = alpha * R ** 2 375 | A2 = beta * r ** 2 376 | A3 = R * dx * np.sin(alpha) 377 | return A1 + A2 - A3 378 | elif dx >= R - r: 379 | alpha = np.arccos((R ** 2 + dx ** 2 - r ** 2) / (2 * R * dx)) 380 | beta = np.pi - np.arccos((r ** 2 + dx ** 2 - R ** 2) / (2 * r * dx)) 381 | A1 = alpha * R ** 2 382 | A2 = beta * r ** 2 383 | A3 = R * dx * np.sin(alpha) 384 | return np.pi * r ** 2 - (A2 + A3 - A1) 385 | else: 386 | return np.pi * r ** 2 387 | 388 | def conventional_genetic_alg(self, ind_time, result_folder): # conventional genetic algorithm 389 | P_rate_total = self.cal_P_rate_total() 390 | start_time = datetime.now() 391 | print("conventional genetic algorithm starts....") 392 | fitness_generations = np.zeros(self.iteration, dtype=np.float32) # best fitness value in each generation 393 | best_layout_generations = np.zeros((self.iteration, self.rows * self.cols), 394 | dtype=np.int32) # best layout in each generation 395 | power_order = np.zeros((self.pop_size, self.N), 396 | dtype=np.int32) # each row is a layout cell indices. in each layout, order turbine power from least to largest 397 | pop = np.copy(self.init_pop) 398 | pop_indices = np.copy(self.init_pop_nonezero_indices) # each row is a layout cell indices. 399 | 400 | eN = int(np.floor(self.pop_size * self.elite_rate)) # elite number 401 | rN = int(int(np.floor(self.pop_size * self.mutate_rate)) / eN) * eN # reproduce number 402 | mN = rN # mutation number 403 | cN = self.pop_size - eN - mN # crossover number 404 | 405 | for gen in range(self.iteration): 406 | print("generation {}...".format(gen)) 407 | fitness_value = self.conventional_fitness(pop=pop, rows=self.rows, cols=self.cols, pop_size=self.pop_size, 408 | N=self.N, 409 | po=power_order) 410 | sorted_index = np.argsort(-fitness_value) # fitness value descending from largest to least 411 | 412 | pop = pop[sorted_index, :] 413 | power_order = power_order[sorted_index, :] 414 | pop_indices = pop_indices[sorted_index, :] 415 | if gen == 0: 416 | fitness_generations[gen] = fitness_value[sorted_index[0]] 417 | best_layout_generations[gen, :] = pop[0, :] 418 | else: 419 | if fitness_value[sorted_index[0]] > fitness_generations[gen - 1]: 420 | fitness_generations[gen] = fitness_value[sorted_index[0]] 421 | best_layout_generations[gen, :] = pop[0, :] 422 | else: 423 | fitness_generations[gen] = fitness_generations[gen - 1] 424 | best_layout_generations[gen, :] = best_layout_generations[gen - 1, :] 425 | n_parents, parent_layouts, parent_pop_indices = self.conventional_select(pop=pop, pop_indices=pop_indices, 426 | pop_size=self.pop_size, 427 | elite_rate=self.elite_rate, 428 | random_rate=self.random_rate) 429 | self.conventional_crossover(N=self.N, pop=pop, pop_indices=pop_indices, pop_size=self.pop_size, 430 | n_parents=n_parents, 431 | parent_layouts=parent_layouts, parent_pop_indices=parent_pop_indices) 432 | self.conventional_mutation(rows=self.rows, cols=self.cols, N=self.N, pop=pop, pop_indices=pop_indices, 433 | pop_size=self.pop_size, 434 | mutation_rate=self.mutate_rate) 435 | end_time = datetime.now() 436 | run_time = (end_time - start_time).total_seconds() 437 | eta_generations = np.copy(fitness_generations) 438 | eta_generations = eta_generations * (1.0 / P_rate_total) 439 | time_stamp = datetime.now().strftime("%Y%m%d%H%M%S") 440 | filename = "{}/conventional_eta_N{}_{}_{}.dat".format(result_folder, self.N, ind_time, time_stamp) 441 | np.savetxt(filename, eta_generations, fmt='%f', delimiter=" ") 442 | filename = "{}/conventional_best_layouts_N{}_{}_{}.dat".format(result_folder, self.N, ind_time, time_stamp) 443 | np.savetxt(filename, best_layout_generations, fmt='%d', delimiter=" ") 444 | print("conventional genetic algorithm ends.") 445 | filename = "{}/conventional_runtime.txt".format(result_folder) # time used to run the method in seconds 446 | f = open(filename, "a+") 447 | f.write("{}\n".format(run_time)) 448 | f.close() 449 | 450 | filename = "{}/conventional_eta.txt".format(result_folder) # all best etas 451 | f = open(filename, "a+") 452 | f.write("{}\n".format(eta_generations[self.iteration - 1])) 453 | f.close() 454 | return run_time, eta_generations[self.iteration - 1] 455 | 456 | def conventional_mutation(self, rows, cols, N, pop, pop_indices, pop_size, mutation_rate): 457 | np.random.seed(seed=int(time.time())) 458 | for i in range(pop_size): 459 | if np.random.randn() > mutation_rate: 460 | continue 461 | while True: 462 | turbine_pos = np.random.randint(0, cols * rows) 463 | if pop[i, turbine_pos] == 1: 464 | break 465 | while True: 466 | null_turbine_pos = np.random.randint(0, cols * rows) 467 | if pop[i, null_turbine_pos] == 0: 468 | break 469 | pop[i, turbine_pos] = 0 470 | pop[i, null_turbine_pos] = 1 471 | for j in range(N): 472 | if pop_indices[i, j] == turbine_pos: 473 | pop_indices[i, j] = null_turbine_pos 474 | break 475 | pop_indices[i, :] = np.sort(pop_indices[i, :]) 476 | return 477 | 478 | def conventional_crossover(self, N, pop, pop_indices, pop_size, n_parents, 479 | parent_layouts, parent_pop_indices): 480 | n_counter = 0 481 | np.random.seed(seed=int(time.time())) # init random seed 482 | while n_counter < pop_size: 483 | male = np.random.randint(0, n_parents) 484 | female = np.random.randint(0, n_parents) 485 | if male != female: 486 | cross_point = np.random.randint(1, N) 487 | if parent_pop_indices[male, cross_point - 1] < parent_pop_indices[female, cross_point]: 488 | pop[n_counter, :] = 0 489 | pop[n_counter, :parent_pop_indices[male, cross_point - 1] + 1] = parent_layouts[male, 490 | :parent_pop_indices[ 491 | male, cross_point - 1] + 1] 492 | pop[n_counter, parent_pop_indices[female, cross_point]:] = parent_layouts[female, 493 | parent_pop_indices[female, cross_point]:] 494 | pop_indices[n_counter, :cross_point] = parent_pop_indices[male, :cross_point] 495 | pop_indices[n_counter, cross_point:] = parent_pop_indices[female, cross_point:] 496 | n_counter += 1 497 | return 498 | 499 | def conventional_select(self, pop, pop_indices, pop_size, elite_rate, random_rate): 500 | n_elite = int(pop_size * elite_rate) 501 | parents_ind = [i for i in range(n_elite)] 502 | np.random.seed(seed=int(time.time())) # init random seed 503 | for i in range(n_elite, pop_size): 504 | if np.random.randn() < random_rate: 505 | parents_ind.append(i) 506 | parent_layouts = pop[parents_ind, :] 507 | parent_pop_indices = pop_indices[parents_ind, :] 508 | return len(parent_pop_indices), parent_layouts, parent_pop_indices 509 | 510 | def conventional_fitness(self, pop, rows, cols, pop_size, N, po): 511 | fitness_val = np.zeros(pop_size, dtype=np.float32) 512 | for i in range(pop_size): 513 | 514 | # layout = np.reshape(pop[i, :], newshape=(rows, cols)) 515 | xy_position = np.zeros((2, N), dtype=np.float32) # x y position 516 | cr_position = np.zeros((2, N), dtype=np.int32) # column row position 517 | ind_position = np.zeros(N, dtype=np.int32) 518 | ind_pos = 0 519 | for ind in range(rows * cols): 520 | if pop[i, ind] == 1: 521 | r_i = np.floor(ind / cols) 522 | c_i = np.floor(ind - r_i * cols) 523 | cr_position[0, ind_pos] = c_i 524 | cr_position[1, ind_pos] = r_i 525 | xy_position[0, ind_pos] = c_i * self.cell_width + self.cell_width_half 526 | xy_position[1, ind_pos] = r_i * self.cell_width + self.cell_width_half 527 | ind_position[ind_pos] = ind 528 | ind_pos += 1 529 | lp_power_accum = np.zeros(N, dtype=np.float32) # a specific layout power accumulate 530 | for ind_t in range(len(self.theta)): 531 | for ind_v in range(len(self.velocity)): 532 | # print(theta[ind_t]) 533 | # print(np.cos(theta[ind_t])) 534 | trans_matrix = np.array( 535 | [[np.cos(self.theta[ind_t]), -np.sin(self.theta[ind_t])], 536 | [np.sin(self.theta[ind_t]), np.cos(self.theta[ind_t])]], 537 | np.float32) 538 | 539 | trans_xy_position = np.matmul(trans_matrix, xy_position) 540 | 541 | speed_deficiency = self.wake_calculate(trans_xy_position, N) 542 | 543 | actual_velocity = (1 - speed_deficiency) * self.velocity[ind_v] 544 | lp_power = self.layout_power(actual_velocity, 545 | N) # total power of a specific layout specific wind speed specific theta 546 | lp_power = lp_power * self.f_theta_v[ind_t, ind_v] 547 | lp_power_accum += lp_power 548 | 549 | sorted_index = np.argsort(lp_power_accum) # power from least to largest 550 | po[i, :] = ind_position[sorted_index] 551 | 552 | fitness_val[i] = np.sum(lp_power_accum) 553 | return fitness_val 554 | 555 | def adaptive_genetic_alg(self, ind_time, result_folder): # adaptive genetic algorithm 556 | P_rate_total = self.cal_P_rate_total() 557 | start_time = datetime.now() 558 | print("adaptive genetic algorithm starts....") 559 | fitness_generations = np.zeros(self.iteration, dtype=np.float32) # best fitness value in each generation 560 | best_layout_generations = np.zeros((self.iteration, self.rows * self.cols), 561 | dtype=np.int32) # best layout in each generation 562 | power_order = np.zeros((self.pop_size, self.N), 563 | dtype=np.int32) # each row is a layout cell indices. in each layout, order turbine power from least to largest 564 | pop = np.copy(self.init_pop) 565 | pop_indices = np.copy(self.init_pop_nonezero_indices) # each row is a layout cell indices. 566 | 567 | eN = int(np.floor(self.pop_size * self.elite_rate)) # elite number 568 | rN = int(int(np.floor(self.pop_size * self.mutate_rate)) / eN) * eN # reproduce number 569 | mN = rN # mutation number 570 | cN = self.pop_size - eN - mN # crossover number 571 | 572 | for gen in range(self.iteration): 573 | print("generation {}...".format(gen)) 574 | fitness_value = self.adaptive_fitness(pop=pop, rows=self.rows, cols=self.cols, pop_size=self.pop_size, 575 | N=self.N, 576 | po=power_order) 577 | sorted_index = np.argsort(-fitness_value) # fitness value descending from largest to least 578 | 579 | pop = pop[sorted_index, :] 580 | power_order = power_order[sorted_index, :] 581 | pop_indices = pop_indices[sorted_index, :] 582 | if gen == 0: 583 | fitness_generations[gen] = fitness_value[sorted_index[0]] 584 | best_layout_generations[gen, :] = pop[0, :] 585 | else: 586 | if fitness_value[sorted_index[0]] > fitness_generations[gen - 1]: 587 | fitness_generations[gen] = fitness_value[sorted_index[0]] 588 | best_layout_generations[gen, :] = pop[0, :] 589 | else: 590 | fitness_generations[gen] = fitness_generations[gen - 1] 591 | best_layout_generations[gen, :] = best_layout_generations[gen - 1, :] 592 | self.adaptive_move_worst(rows=self.rows, cols=self.cols, pop=pop, pop_indices=pop_indices, 593 | pop_size=self.pop_size, power_order=power_order) 594 | n_parents, parent_layouts, parent_pop_indices = self.adaptive_select(pop=pop, pop_indices=pop_indices, 595 | pop_size=self.pop_size, 596 | elite_rate=self.elite_rate, 597 | random_rate=self.random_rate) 598 | self.adaptive_crossover(N=self.N, pop=pop, pop_indices=pop_indices, pop_size=self.pop_size, 599 | n_parents=n_parents, 600 | parent_layouts=parent_layouts, parent_pop_indices=parent_pop_indices) 601 | self.adaptive_mutation(rows=self.rows, cols=self.cols, N=self.N, pop=pop, pop_indices=pop_indices, 602 | pop_size=self.pop_size, 603 | mutation_rate=self.mutate_rate) 604 | end_time = datetime.now() 605 | run_time = (end_time - start_time).total_seconds() 606 | eta_generations = np.copy(fitness_generations) 607 | eta_generations = eta_generations * (1.0 / P_rate_total) 608 | time_stamp = datetime.now().strftime("%Y%m%d%H%M%S") 609 | 610 | filename = "{}/adaptive_eta_N{}_{}_{}.dat".format(result_folder, self.N, ind_time, time_stamp) 611 | np.savetxt(filename, eta_generations, fmt='%f', delimiter=" ") 612 | filename = "{}/adaptive_best_layouts_N{}_{}_{}.dat".format(result_folder, self.N, ind_time, time_stamp) 613 | np.savetxt(filename, best_layout_generations, fmt='%d', delimiter=" ") 614 | print("adaptive genetic algorithm ends.") 615 | filename = "{}/adaptive_runtime.txt".format(result_folder) 616 | f = open(filename, "a+") 617 | f.write("{}\n".format(run_time)) 618 | f.close() 619 | 620 | filename = "{}/adaptive_eta.txt".format(result_folder) 621 | f = open(filename, "a+") 622 | f.write("{}\n".format(eta_generations[self.iteration - 1])) 623 | f.close() 624 | 625 | return run_time, eta_generations[self.iteration - 1] 626 | 627 | def adaptive_move_worst(self, rows, cols, pop, pop_indices, pop_size, power_order): 628 | np.random.seed(seed=int(time.time())) 629 | for i in range(pop_size): 630 | turbine_pos = power_order[i, 0] 631 | while True: 632 | null_turbine_pos = np.random.randint(0, cols * rows) 633 | if pop[i, null_turbine_pos] == 0: 634 | break 635 | pop[i, turbine_pos] = 0 636 | pop[i, null_turbine_pos] = 1 637 | power_order[i, 0] = null_turbine_pos 638 | pop_indices[i, :] = np.sort(power_order[i, :]) 639 | return 640 | 641 | def adaptive_mutation(self, rows, cols, N, pop, pop_indices, pop_size, mutation_rate): 642 | np.random.seed(seed=int(time.time())) 643 | for i in range(pop_size): 644 | if np.random.randn() > mutation_rate: 645 | continue 646 | while True: 647 | turbine_pos = np.random.randint(0, cols * rows) 648 | if pop[i, turbine_pos] == 1: 649 | break 650 | while True: 651 | null_turbine_pos = np.random.randint(0, cols * rows) 652 | if pop[i, null_turbine_pos] == 0: 653 | break 654 | pop[i, turbine_pos] = 0 655 | pop[i, null_turbine_pos] = 1 656 | for j in range(N): 657 | if pop_indices[i, j] == turbine_pos: 658 | pop_indices[i, j] = null_turbine_pos 659 | break 660 | pop_indices[i, :] = np.sort(pop_indices[i, :]) 661 | return 662 | 663 | def adaptive_crossover(self, N, pop, pop_indices, pop_size, n_parents, 664 | parent_layouts, parent_pop_indices): 665 | n_counter = 0 666 | np.random.seed(seed=int(time.time())) # init random seed 667 | while n_counter < pop_size: 668 | male = np.random.randint(0, n_parents) 669 | female = np.random.randint(0, n_parents) 670 | if male != female: 671 | cross_point = np.random.randint(1, N) 672 | if parent_pop_indices[male, cross_point - 1] < parent_pop_indices[female, cross_point]: 673 | pop[n_counter, :] = 0 674 | pop[n_counter, :parent_pop_indices[male, cross_point - 1] + 1] = parent_layouts[male, 675 | :parent_pop_indices[ 676 | male, cross_point - 1] + 1] 677 | pop[n_counter, parent_pop_indices[female, cross_point]:] = parent_layouts[female, 678 | parent_pop_indices[female, cross_point]:] 679 | pop_indices[n_counter, :cross_point] = parent_pop_indices[male, :cross_point] 680 | pop_indices[n_counter, cross_point:] = parent_pop_indices[female, cross_point:] 681 | n_counter += 1 682 | return 683 | 684 | def adaptive_select(self, pop, pop_indices, pop_size, elite_rate, random_rate): 685 | n_elite = int(pop_size * elite_rate) 686 | parents_ind = [i for i in range(n_elite)] 687 | np.random.seed(seed=int(time.time())) # init random seed 688 | for i in range(n_elite, pop_size): 689 | if np.random.randn() < random_rate: 690 | parents_ind.append(i) 691 | parent_layouts = pop[parents_ind, :] 692 | parent_pop_indices = pop_indices[parents_ind, :] 693 | return len(parent_pop_indices), parent_layouts, parent_pop_indices 694 | 695 | def adaptive_fitness(self, pop, rows, cols, pop_size, N, po): 696 | fitness_val = np.zeros(pop_size, dtype=np.float32) 697 | for i in range(pop_size): 698 | 699 | # layout = np.reshape(pop[i, :], newshape=(rows, cols)) 700 | xy_position = np.zeros((2, N), dtype=np.float32) # x y position 701 | cr_position = np.zeros((2, N), dtype=np.int32) # column row position 702 | ind_position = np.zeros(N, dtype=np.int32) 703 | ind_pos = 0 704 | for ind in range(rows * cols): 705 | if pop[i, ind] == 1: 706 | r_i = np.floor(ind / cols) 707 | c_i = np.floor(ind - r_i * cols) 708 | cr_position[0, ind_pos] = c_i 709 | cr_position[1, ind_pos] = r_i 710 | xy_position[0, ind_pos] = c_i * self.cell_width + self.cell_width_half 711 | xy_position[1, ind_pos] = r_i * self.cell_width + self.cell_width_half 712 | ind_position[ind_pos] = ind 713 | ind_pos += 1 714 | lp_power_accum = np.zeros(N, dtype=np.float32) # a specific layout power accumulate 715 | for ind_t in range(len(self.theta)): 716 | for ind_v in range(len(self.velocity)): 717 | # print(theta[ind_t]) 718 | # print(np.cos(theta[ind_t])) 719 | trans_matrix = np.array( 720 | [[np.cos(self.theta[ind_t]), -np.sin(self.theta[ind_t])], 721 | [np.sin(self.theta[ind_t]), np.cos(self.theta[ind_t])]], 722 | np.float32) 723 | 724 | trans_xy_position = np.matmul(trans_matrix, xy_position) 725 | speed_deficiency = self.wake_calculate(trans_xy_position, N) 726 | 727 | actual_velocity = (1 - speed_deficiency) * self.velocity[ind_v] 728 | lp_power = self.layout_power(actual_velocity, 729 | N) # total power of a specific layout specific wind speed specific theta 730 | lp_power = lp_power * self.f_theta_v[ind_t, ind_v] 731 | lp_power_accum += lp_power 732 | 733 | sorted_index = np.argsort(lp_power_accum) # power from least to largest 734 | po[i, :] = ind_position[sorted_index] 735 | 736 | fitness_val[i] = np.sum(lp_power_accum) 737 | return fitness_val 738 | 739 | # wds_file : wind distribution surface model file (A MARS model) 740 | def self_informed_genetic_alg(self, ind_time, result_folder, wds_file): # adaptive genetic algorithm 741 | mars_file = open(wds_file, 'rb') 742 | mars = pickle.load(mars_file) 743 | 744 | P_rate_total = self.cal_P_rate_total() 745 | start_time = datetime.now() 746 | print("self informed genetic algorithm starts....") 747 | fitness_generations = np.zeros(self.iteration, dtype=np.float32) # best fitness value in each generation 748 | best_layout_generations = np.zeros((self.iteration, self.rows * self.cols), 749 | dtype=np.int32) # best layout in each generation 750 | power_order = np.zeros((self.pop_size, self.N), 751 | dtype=np.int32) # each row is a layout cell indices. in each layout, order turbine power from least to largest 752 | pop = np.copy(self.init_pop) 753 | pop_indices = np.copy(self.init_pop_nonezero_indices) # each row is a layout cell indices. 754 | 755 | eN = int(np.floor(self.pop_size * self.elite_rate)) # elite number 756 | rN = int(int(np.floor(self.pop_size * self.mutate_rate)) / eN) * eN # reproduce number 757 | mN = rN # mutation number 758 | cN = self.pop_size - eN - mN # crossover number 759 | 760 | for gen in range(self.iteration): 761 | print("generation {}...".format(gen)) 762 | fitness_value = self.self_informed_fitness(pop=pop, rows=self.rows, cols=self.cols, pop_size=self.pop_size, 763 | N=self.N, 764 | po=power_order) 765 | sorted_index = np.argsort(-fitness_value) # fitness value descending from largest to least 766 | 767 | pop = pop[sorted_index, :] 768 | power_order = power_order[sorted_index, :] 769 | pop_indices = pop_indices[sorted_index, :] 770 | if gen == 0: 771 | fitness_generations[gen] = fitness_value[sorted_index[0]] 772 | best_layout_generations[gen, :] = pop[0, :] 773 | else: 774 | if fitness_value[sorted_index[0]] > fitness_generations[gen - 1]: 775 | fitness_generations[gen] = fitness_value[sorted_index[0]] 776 | best_layout_generations[gen, :] = pop[0, :] 777 | else: 778 | fitness_generations[gen] = fitness_generations[gen - 1] 779 | best_layout_generations[gen, :] = best_layout_generations[gen - 1, :] 780 | self.self_informed_move_worst(rows=self.rows, cols=self.cols, pop=pop, pop_indices=pop_indices, 781 | pop_size=self.pop_size, power_order=power_order, mars=mars) 782 | n_parents, parent_layouts, parent_pop_indices = self.self_informed_select(pop=pop, pop_indices=pop_indices, 783 | pop_size=self.pop_size, 784 | elite_rate=self.elite_rate, 785 | random_rate=self.random_rate) 786 | self.self_informed_crossover(N=self.N, pop=pop, pop_indices=pop_indices, pop_size=self.pop_size, 787 | n_parents=n_parents, 788 | parent_layouts=parent_layouts, parent_pop_indices=parent_pop_indices) 789 | self.self_informed_mutation(rows=self.rows, cols=self.cols, N=self.N, pop=pop, pop_indices=pop_indices, 790 | pop_size=self.pop_size, 791 | mutation_rate=self.mutate_rate) 792 | 793 | end_time = datetime.now() 794 | run_time = (end_time - start_time).total_seconds() 795 | eta_generations = np.copy(fitness_generations) 796 | eta_generations = eta_generations * (1.0 / P_rate_total) 797 | time_stamp = datetime.now().strftime("%Y%m%d%H%M%S") 798 | 799 | filename = "{}/self_informed_eta_N{}_{}_{}.dat".format(result_folder, self.N, ind_time, time_stamp) 800 | np.savetxt(filename, eta_generations, fmt='%f', delimiter=" ") 801 | filename = "{}/self_informed_best_layouts_N{}_{}_{}.dat".format(result_folder, self.N, ind_time, time_stamp) 802 | np.savetxt(filename, best_layout_generations, fmt='%d', delimiter=" ") 803 | print("self informed genetic algorithm ends.") 804 | filename = "{}/self_informed_runtime.txt".format(result_folder) 805 | f = open(filename, "a+") 806 | f.write("{}\n".format(run_time)) 807 | f.close() 808 | filename = "{}/self_informed_eta.txt".format(result_folder) 809 | f = open(filename, "a+") 810 | f.write("{}\n".format(eta_generations[self.iteration - 1])) 811 | f.close() 812 | return run_time, eta_generations[self.iteration - 1] 813 | 814 | def self_informed_move_worst(self, rows, cols, pop, pop_indices, pop_size, power_order, mars): 815 | np.random.seed(seed=int(time.time())) 816 | for i in range(pop_size): 817 | r = np.random.randn() 818 | if r < 0.5: 819 | self.self_informed_move_worst_case_random(i=i, rows=rows, cols=cols, pop=pop, pop_indices=pop_indices, 820 | pop_size=pop_size, power_order=power_order) 821 | else: 822 | self.self_informed_move_worst_case_best(i=i, rows=rows, cols=cols, pop=pop, pop_indices=pop_indices, 823 | pop_size=pop_size, power_order=power_order, mars=mars) 824 | 825 | return 826 | 827 | def self_informed_move_worst_case_random(self, i, rows, cols, pop, pop_indices, pop_size, power_order): 828 | np.random.seed(seed=int(time.time())) 829 | turbine_pos = power_order[i, 0] 830 | while True: 831 | null_turbine_pos = np.random.randint(0, cols * rows) 832 | if pop[i, null_turbine_pos] == 0: 833 | break 834 | pop[i, turbine_pos] = 0 835 | pop[i, null_turbine_pos] = 1 836 | power_order[i, 0] = null_turbine_pos 837 | pop_indices[i, :] = np.sort(power_order[i, :]) 838 | return 839 | 840 | def self_informed_move_worst_case_best(self, i, rows, cols, pop, pop_indices, pop_size, power_order, mars): 841 | np.random.seed(seed=int(time.time())) 842 | n_candiate = 5 843 | pos_candidate = np.zeros((n_candiate, 2), dtype=np.int32) 844 | ind_pos_candidate = np.zeros(n_candiate, dtype=np.int32) 845 | turbine_pos = power_order[i, 0] 846 | ind_can = 0 847 | while True: 848 | null_turbine_pos = np.random.randint(0, cols * rows) 849 | if pop[i, null_turbine_pos] == 0: 850 | pos_candidate[ind_can, 1] = int(np.floor(null_turbine_pos / cols)) 851 | pos_candidate[ind_can, 0] = int(np.floor(null_turbine_pos - pos_candidate[ind_can, 1] * cols)) 852 | ind_pos_candidate[ind_can] = null_turbine_pos 853 | ind_can += 1 854 | if ind_can == n_candiate: 855 | break 856 | mars_val = mars.predict(pos_candidate) 857 | mars_val = mars_val[:, 0] 858 | sorted_index = np.argsort(-mars_val) # fitness value descending from largest to least 859 | null_turbine_pos = ind_pos_candidate[sorted_index[0]] 860 | pop[i, turbine_pos] = 0 861 | pop[i, null_turbine_pos] = 1 862 | power_order[i, 0] = null_turbine_pos 863 | pop_indices[i, :] = np.sort(power_order[i, :]) 864 | return 865 | 866 | def self_informed_move_worst_case_worst(self, i, rows, cols, pop, pop_indices, pop_size, power_order, mars): 867 | np.random.seed(seed=int(time.time())) 868 | n_candiate = 11 869 | pos_candidate = np.zeros((n_candiate, 2), dtype=np.int32) 870 | ind_pos_candidate = np.zeros(n_candiate, dtype=np.int32) 871 | turbine_pos = power_order[i, 0] 872 | ind_can = 0 873 | while True: 874 | null_turbine_pos = np.random.randint(0, cols * rows) 875 | if pop[i, null_turbine_pos] == 0: 876 | pos_candidate[ind_can, 1] = int(np.floor(null_turbine_pos / cols)) 877 | pos_candidate[ind_can, 0] = int(np.floor(null_turbine_pos - pos_candidate[ind_can, 1] * cols)) 878 | ind_pos_candidate[ind_can] = null_turbine_pos 879 | ind_can += 1 880 | if ind_can == n_candiate: 881 | break 882 | mars_val = mars.predict(pos_candidate) 883 | mars_val = mars_val[:, 0] 884 | sorted_index = np.argsort(mars_val) # fitness value descending from least to largest 885 | null_turbine_pos = ind_pos_candidate[sorted_index[0]] 886 | pop[i, turbine_pos] = 0 887 | pop[i, null_turbine_pos] = 1 888 | power_order[i, 0] = null_turbine_pos 889 | pop_indices[i, :] = np.sort(power_order[i, :]) 890 | return 891 | 892 | def self_informed_move_worst_case_middle(self, i, rows, cols, pop, pop_indices, pop_size, power_order, mars): 893 | np.random.seed(seed=int(time.time())) 894 | n_candiate = 11 895 | pos_candidate = np.zeros((n_candiate, 2), dtype=np.int32) 896 | ind_pos_candidate = np.zeros(n_candiate, dtype=np.int32) 897 | turbine_pos = power_order[i, 0] 898 | ind_can = 0 899 | while True: 900 | null_turbine_pos = np.random.randint(0, cols * rows) 901 | if pop[i, null_turbine_pos] == 0: 902 | pos_candidate[ind_can, 1] = int(np.floor(null_turbine_pos / cols)) 903 | pos_candidate[ind_can, 0] = int(np.floor(null_turbine_pos - pos_candidate[ind_can, 1] * cols)) 904 | ind_pos_candidate[ind_can] = null_turbine_pos 905 | ind_can += 1 906 | if ind_can == n_candiate: 907 | break 908 | mars_val = mars.predict(pos_candidate) 909 | mars_val = mars_val[:, 0] 910 | sorted_index = np.argsort(-mars_val) # fitness value descending from largest to least 911 | null_turbine_pos = ind_pos_candidate[sorted_index[5]] 912 | pop[i, turbine_pos] = 0 913 | pop[i, null_turbine_pos] = 1 914 | power_order[i, 0] = null_turbine_pos 915 | pop_indices[i, :] = np.sort(power_order[i, :]) 916 | return 917 | 918 | def self_informed_mutation(self, rows, cols, N, pop, pop_indices, pop_size, mutation_rate): 919 | np.random.seed(seed=int(time.time())) 920 | for i in range(pop_size): 921 | if np.random.randn() > mutation_rate: 922 | continue 923 | while True: 924 | turbine_pos = np.random.randint(0, cols * rows) 925 | if pop[i, turbine_pos] == 1: 926 | break 927 | while True: 928 | null_turbine_pos = np.random.randint(0, cols * rows) 929 | if pop[i, null_turbine_pos] == 0: 930 | break 931 | pop[i, turbine_pos] = 0 932 | pop[i, null_turbine_pos] = 1 933 | for j in range(N): 934 | if pop_indices[i, j] == turbine_pos: 935 | pop_indices[i, j] = null_turbine_pos 936 | break 937 | pop_indices[i, :] = np.sort(pop_indices[i, :]) 938 | return 939 | 940 | def self_informed_crossover(self, N, pop, pop_indices, pop_size, n_parents, 941 | parent_layouts, parent_pop_indices): 942 | n_counter = 0 943 | np.random.seed(seed=int(time.time())) # init random seed 944 | while n_counter < pop_size: 945 | male = np.random.randint(0, n_parents) 946 | female = np.random.randint(0, n_parents) 947 | if male != female: 948 | cross_point = np.random.randint(1, N) 949 | if parent_pop_indices[male, cross_point - 1] < parent_pop_indices[female, cross_point]: 950 | pop[n_counter, :] = 0 951 | pop[n_counter, :parent_pop_indices[male, cross_point - 1] + 1] = parent_layouts[male, 952 | :parent_pop_indices[ 953 | male, cross_point - 1] + 1] 954 | pop[n_counter, parent_pop_indices[female, cross_point]:] = parent_layouts[female, 955 | parent_pop_indices[female, cross_point]:] 956 | pop_indices[n_counter, :cross_point] = parent_pop_indices[male, :cross_point] 957 | pop_indices[n_counter, cross_point:] = parent_pop_indices[female, cross_point:] 958 | n_counter += 1 959 | return 960 | 961 | def self_informed_select(self, pop, pop_indices, pop_size, elite_rate, random_rate): 962 | n_elite = int(pop_size * elite_rate) 963 | parents_ind = [i for i in range(n_elite)] 964 | np.random.seed(seed=int(time.time())) # init random seed 965 | for i in range(n_elite, pop_size): 966 | if np.random.randn() < random_rate: 967 | parents_ind.append(i) 968 | parent_layouts = pop[parents_ind, :] 969 | parent_pop_indices = pop_indices[parents_ind, :] 970 | return len(parent_pop_indices), parent_layouts, parent_pop_indices 971 | 972 | def self_informed_fitness(self, pop, rows, cols, pop_size, N, po): 973 | fitness_val = np.zeros(pop_size, dtype=np.float32) 974 | for i in range(pop_size): 975 | 976 | # layout = np.reshape(pop[i, :], newshape=(rows, cols)) 977 | xy_position = np.zeros((2, N), dtype=np.float32) # x y position 978 | cr_position = np.zeros((2, N), dtype=np.int32) # column row position 979 | ind_position = np.zeros(N, dtype=np.int32) 980 | ind_pos = 0 981 | for ind in range(rows * cols): 982 | if pop[i, ind] == 1: 983 | r_i = np.floor(ind / cols) 984 | c_i = np.floor(ind - r_i * cols) 985 | cr_position[0, ind_pos] = c_i 986 | cr_position[1, ind_pos] = r_i 987 | xy_position[0, ind_pos] = c_i * self.cell_width + self.cell_width_half 988 | xy_position[1, ind_pos] = r_i * self.cell_width + self.cell_width_half 989 | ind_position[ind_pos] = ind 990 | ind_pos += 1 991 | lp_power_accum = np.zeros(N, dtype=np.float32) # a specific layout power accumulate 992 | for ind_t in range(len(self.theta)): 993 | for ind_v in range(len(self.velocity)): 994 | # print(theta[ind_t]) 995 | # print(np.cos(theta[ind_t])) 996 | trans_matrix = np.array( 997 | [[np.cos(self.theta[ind_t]), -np.sin(self.theta[ind_t])], 998 | [np.sin(self.theta[ind_t]), np.cos(self.theta[ind_t])]], 999 | np.float32) 1000 | 1001 | trans_xy_position = np.matmul(trans_matrix, xy_position) 1002 | speed_deficiency = self.wake_calculate(trans_xy_position, N) 1003 | 1004 | actual_velocity = (1 - speed_deficiency) * self.velocity[ind_v] 1005 | lp_power = self.layout_power(actual_velocity, 1006 | N) # total power of a specific layout specific wind speed specific theta 1007 | lp_power = lp_power * self.f_theta_v[ind_t, ind_v] 1008 | lp_power_accum += lp_power 1009 | 1010 | sorted_index = np.argsort(lp_power_accum) # power from least to largest 1011 | po[i, :] = ind_position[sorted_index] 1012 | fitness_val[i] = np.sum(lp_power_accum) 1013 | return fitness_val 1014 | 1015 | def cal_slope(self, n, yi, xi): 1016 | sumx = 0.0 1017 | sumy = 0.0 1018 | sumxy = 0.0 1019 | for i in range(n): 1020 | sumx += xi[i] 1021 | sumy += yi[i] 1022 | sumxy += xi[i] * yi[i] 1023 | b = n * sumxy - sumx * sumy 1024 | return b 1025 | 1026 | 1027 | class GE_1_5_sleTurbine: 1028 | hub_height = 80.0 # unit (m) 1029 | rator_diameter = 77.0 # unit m 1030 | surface_roughness = 0.25 * 0.001 # unit mm surface roughness 1031 | # surface_roughness = 0.25 # unit mm surface roughness 1032 | rator_radius = 0 1033 | 1034 | entrainment_const = 0 1035 | 1036 | def __init__(self): 1037 | self.rator_radius = self.rator_diameter / 2 1038 | 1039 | self.entrainment_const = 0.5 / np.log(self.hub_height / self.surface_roughness) 1040 | return 1041 | 1042 | # power curve 1043 | def P_i_X(self, v): 1044 | if v < 2.0: 1045 | return 0 1046 | elif v < 12.8: 1047 | return 0.3 * v ** 3 1048 | elif v < 18: 1049 | return 629.1 1050 | else: 1051 | return 0 1052 | 1053 | 1054 | class LayoutGridMCGenerator: 1055 | def __init__(self): 1056 | return 1057 | 1058 | # rows : number of rows in wind farm 1059 | # cols : number of columns in wind farm 1060 | # n : number of layouts 1061 | # N : number of turbines 1062 | # lofname : layouts file name 1063 | def gen_mc_grid(rows, cols, n, N, lofname): # generate monte carlo wind farm layout grids 1064 | np.random.seed(seed=int(time.time())) # init random seed 1065 | layouts = np.zeros((n, rows * cols), dtype=np.int32) # one row is a layout 1066 | # layouts_cr = np.zeros((n*, 2), dtype=np.float32) # layouts column row index 1067 | positionX = np.random.randint(0, cols, size=(N * n * 2)) 1068 | positionY = np.random.randint(0, rows, size=(N * n * 2)) 1069 | ind_rows = 0 # index of layouts from 0 to n-1 1070 | ind_pos = 0 # index of positionX, positionY from 0 to N*n*2-1 1071 | # ind_crs = 0 1072 | while ind_rows < n: 1073 | layouts[ind_rows, positionX[ind_pos] + positionY[ind_pos] * cols] = 1 1074 | if np.sum(layouts[ind_rows, :]) == N: 1075 | # for ind in range(rows * cols): 1076 | # if layouts[ind_rows, ind] == 1: 1077 | # r_i = np.floor(ind / cols) 1078 | # c_i = np.floor(ind - r_i * cols) 1079 | # layouts_cr[ind_crs, 0] = c_i 1080 | # layouts_cr[ind_crs, 1] = r_i 1081 | # ind_crs += 1 1082 | ind_rows += 1 1083 | ind_pos += 1 1084 | if ind_pos >= N * n * 2: 1085 | print("Not enough positions") 1086 | break 1087 | # filename = "positions{}by{}by{}N{}.dat".format(rows, cols, n, N) 1088 | np.savetxt(lofname, layouts, fmt='%d', delimiter=" ") 1089 | # np.savetxt(xfname, layouts_cr, fmt='%d', delimiter=" ") 1090 | return layouts 1091 | 1092 | # generate population 1093 | def gen_pop(rows, cols, n, 1094 | N): # generate population very similar to gen_mc_grid, just without saving layouts to a file 1095 | np.random.seed(seed=int(time.time())) 1096 | layouts = np.zeros((n, rows * cols), dtype=np.int32) 1097 | positionX = np.random.randint(0, cols, size=(N * n * 2)) 1098 | positionY = np.random.randint(0, rows, size=(N * n * 2)) 1099 | ind_rows = 0 1100 | ind_pos = 0 1101 | 1102 | while ind_rows < n: 1103 | layouts[ind_rows, positionX[ind_pos] + positionY[ind_pos] * cols] = 1 1104 | if np.sum(layouts[ind_rows, :]) == N: 1105 | ind_rows += 1 1106 | ind_pos += 1 1107 | if ind_pos >= N * n * 2: 1108 | print("Not enough positions") 1109 | break 1110 | return layouts 1111 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import MARS # MARS (Multivariate Adaptive Regression Splines) regression class 4 | import WindFarmGeneticToolbox # wind farm layout optimization using genetic algorithms classes 5 | from datetime import datetime 6 | import os 7 | import pickle 8 | 9 | # parameters for the genetic algorithm 10 | elite_rate = 0.2 11 | cross_rate = 0.6 12 | random_rate = 0.5 13 | mutate_rate = 0.1 14 | 15 | # wind farm size, cells 16 | rows = 21 17 | cols = 21 18 | cell_width = 77.0 * 2 # unit : m 19 | 20 | # 21 | N = 60 # number of wind turbines 22 | pop_size = 100 # population size, number of inidividuals in a population 23 | iteration = 3 # number of genetic algorithm iterations 24 | 25 | # all data will be save in data folder 26 | data_folder = "data" 27 | if not os.path.exists(data_folder): 28 | os.makedirs(data_folder) 29 | 30 | # create an object of WindFarmGenetic 31 | wfg = WindFarmGeneticToolbox.WindFarmGenetic(rows=rows, cols=cols, N=N, pop_size=pop_size, 32 | iteration=iteration, cell_width=cell_width, elite_rate=elite_rate, 33 | cross_rate=cross_rate, random_rate=random_rate, mutate_rate=mutate_rate) 34 | # set wind distribution 35 | # wind distribution is discrete (number of wind speeds) by (number of wind directions) 36 | # wfg.init_4_direction_1_speed_12() 37 | wfg.init_1_direction_1_N_speed_12() 38 | 39 | ################################################ 40 | # generate initial populations 41 | ################################################ 42 | 43 | init_pops_data_folder = "data/init_pops" 44 | if not os.path.exists(init_pops_data_folder): 45 | os.makedirs(init_pops_data_folder) 46 | # n_init_pops : number of initial populations 47 | n_init_pops = 60 48 | for i in range(n_init_pops): 49 | wfg.gen_init_pop() 50 | wfg.save_init_pop("{}/init_{}.dat".format(init_pops_data_folder,i)) 51 | 52 | 53 | ############################################# 54 | # generate wind distribution surface 55 | ############################################# 56 | wds_data_folder = "data/wds" 57 | if not os.path.exists(wds_data_folder): 58 | os.makedirs(wds_data_folder) 59 | # mc : monte-carlo 60 | n_mc_samples = 10000 61 | 62 | # each layout is binary list and the length of the list is (rows*cols) 63 | # 1 indicates there is a wind turbine in that cell 64 | # 0 indicates there is no wind turbine in the cell 65 | # in "mc_layout.dat", there are 'n_mc_samples' line and each line is a layout. 66 | 67 | # generate 'n_mc_samples' layouts and save it in 'mc_layout.data' file 68 | WindFarmGeneticToolbox.LayoutGridMCGenerator.gen_mc_grid(rows=rows, cols=cols, n=n_mc_samples, N=N, 69 | lofname="{}/{}".format(wds_data_folder, "mc_layout.dat")) 70 | # read layouts from 'mc_layout.dat' file 71 | layouts = np.genfromtxt("{}/{}".format(wds_data_folder,"mc_layout.dat"), delimiter=" ", dtype=np.int32) 72 | 73 | # generate dataset to build wind farm distribution surface 74 | wfg.mc_gen_xy(rows=rows, cols=cols, layouts=layouts, n=n_mc_samples, N=N, xfname="{}/{}".format(wds_data_folder, "x.dat"), 75 | yfname="{}/{}".format(wds_data_folder, "y.dat")) 76 | 77 | # parameters for MARS regression method 78 | n_variables = 2 79 | n_points = rows * cols 80 | n_candidate_knots = [rows, cols] 81 | n_max_basis_functions = 100 82 | n_max_interactions = 4 83 | difference = 1.0e-3 84 | 85 | x_original = pd.read_csv("{}/{}".format(wds_data_folder,"x.dat"), header=None, nrows=n_points, delim_whitespace=True) 86 | x_original = x_original.values 87 | 88 | y_original = pd.read_csv("{}/{}".format(wds_data_folder,"y.dat"), header=None, nrows=n_points, delim_whitespace=True) 89 | y_original = y_original.values 90 | 91 | mars = MARS.MARS(n_variables=n_variables, n_points=n_points, x=x_original, y=y_original, 92 | n_candidate_knots=n_candidate_knots, n_max_basis_functions=n_max_basis_functions, 93 | n_max_interactions=n_max_interactions, difference=difference) 94 | mars.MARS_regress() 95 | # save wind distribution model to 'wds.mars' 96 | mars.save_mars_model_to_file() 97 | with open("{}/{}".format(wds_data_folder,"wds.mars"), "wb") as mars_file: 98 | pickle.dump(mars, mars_file) 99 | 100 | 101 | 102 | # results folder 103 | # adaptive_best_layouts_N60_9_20190422213718.dat : best layout for AGA of run index 9 104 | # result_CGA_20190422213715.dat : run time and best eta for CGA method 105 | results_data_folder = "data/results" 106 | if not os.path.exists(results_data_folder): 107 | os.makedirs(results_data_folder) 108 | 109 | n_run_times = 3 # number of run times 110 | # result_arr stores the best conversion efficiency of each run 111 | result_arr = np.zeros((n_run_times, 2), dtype=np.float32) 112 | 113 | # CGA method 114 | CGA_results_data_folder = "{}/CGA".format(results_data_folder) 115 | if not os.path.exists(CGA_results_data_folder): 116 | os.makedirs(CGA_results_data_folder) 117 | for i in range(0, n_run_times): # run times 118 | print("run times {} ...".format(i)) 119 | wfg.load_init_pop("{}/init_{}.dat".format(init_pops_data_folder, i)) 120 | run_time, eta = wfg.conventional_genetic_alg(ind_time=i, result_folder=CGA_results_data_folder) 121 | result_arr[i, 0] = run_time 122 | result_arr[i, 1] = eta 123 | time_stamp = datetime.now().strftime("%Y%m%d%H%M%S") 124 | filename = "{}/result_CGA_{}.dat".format(CGA_results_data_folder, time_stamp) 125 | np.savetxt(filename, result_arr, fmt='%f', delimiter=" ") 126 | 127 | # AGA method 128 | AGA_results_data_folder = "{}/AGA".format(results_data_folder) 129 | if not os.path.exists(AGA_results_data_folder): 130 | os.makedirs(AGA_results_data_folder) 131 | for i in range(0, n_run_times): # run times 132 | print("run times {} ...".format(i)) 133 | wfg.load_init_pop("{}/init_{}.dat".format(init_pops_data_folder, i)) 134 | run_time, eta = wfg.adaptive_genetic_alg(ind_time=i, result_folder=AGA_results_data_folder) 135 | result_arr[i, 0] = run_time 136 | result_arr[i, 1] = eta 137 | time_stamp = datetime.now().strftime("%Y%m%d%H%M%S") 138 | filename = "{}/result_AGA_{}.dat".format(AGA_results_data_folder, time_stamp) 139 | np.savetxt(filename, result_arr, fmt='%f', delimiter=" ") 140 | 141 | # SIGA method 142 | SIGA_results_data_folder = "{}/SIGA".format(results_data_folder) 143 | if not os.path.exists(SIGA_results_data_folder): 144 | os.makedirs(SIGA_results_data_folder) 145 | # wds_mars_file : wind distribution surface MARS model file 146 | wds_mars_file = "{}/{}".format(wds_data_folder, "wds.mars") 147 | for i in range(0, n_run_times): # run times 148 | print("run times {} ...".format(i)) 149 | wfg.load_init_pop("{}/init_{}.dat".format(init_pops_data_folder, i)) 150 | run_time, eta = wfg.self_informed_genetic_alg(ind_time=i, result_folder=SIGA_results_data_folder, 151 | wds_file=wds_mars_file) 152 | result_arr[i, 0] = run_time 153 | result_arr[i, 1] = eta 154 | time_stamp = datetime.now().strftime("%Y%m%d%H%M%S") 155 | filename = "{}/result_self_informed_{}.dat".format(SIGA_results_data_folder, time_stamp) 156 | np.savetxt(filename, result_arr, fmt='%f', delimiter=" ") 157 | --------------------------------------------------------------------------------