├── .spyproject └── config │ ├── backups │ ├── codestyle.ini.bak │ ├── encoding.ini.bak │ ├── vcs.ini.bak │ └── workspace.ini.bak │ ├── codestyle.ini │ ├── defaults │ ├── defaults-codestyle-0.2.0.ini │ ├── defaults-encoding-0.2.0.ini │ ├── defaults-vcs-0.2.0.ini │ └── defaults-workspace-0.2.0.ini │ ├── encoding.ini │ ├── vcs.ini │ └── workspace.ini ├── GA_NSGA.py ├── GA_main_final.py ├── HP_resultSGD_alldata_200eps.pkl ├── README.md ├── cGAN_3D.py ├── data_gen.m ├── data_preprocessing.py ├── image_gen_3D.py ├── image_vis_3D.py ├── min_max_final_all_data_20221026.npy ├── model_builder_3DCNN.py ├── ms_gen_25D_HPC.m ├── ori_gen.m ├── std_scalerSGD_alldata_200eps.bin ├── train_indexSGD_alldata_200eps.npy ├── train_index_final_all_data_20221026.npy ├── training_checkpoints ├── checkpoint ├── ckpt-1.data-00000-of-00001 ├── ckpt-1.index ├── ckpt-2.data-00000-of-00001 ├── ckpt-2.index ├── ckpt-3.data-00000-of-00001 ├── ckpt-3.index ├── ckpt-4.data-00000-of-00001 ├── ckpt-4.index ├── ckpt-5.data-00000-of-00001 └── ckpt-5.index └── yield_cal.py /.spyproject/config/backups/codestyle.ini.bak: -------------------------------------------------------------------------------- 1 | [codestyle] 2 | indentation = True 3 | edge_line = True 4 | edge_line_columns = 79 5 | 6 | [main] 7 | version = 0.2.0 8 | 9 | -------------------------------------------------------------------------------- /.spyproject/config/backups/encoding.ini.bak: -------------------------------------------------------------------------------- 1 | [encoding] 2 | text_encoding = utf-8 3 | 4 | [main] 5 | version = 0.2.0 6 | 7 | -------------------------------------------------------------------------------- /.spyproject/config/backups/vcs.ini.bak: -------------------------------------------------------------------------------- 1 | [vcs] 2 | use_version_control = False 3 | version_control_system = 4 | 5 | [main] 6 | version = 0.2.0 7 | 8 | -------------------------------------------------------------------------------- /.spyproject/config/backups/workspace.ini.bak: -------------------------------------------------------------------------------- 1 | [workspace] 2 | restore_data_on_startup = True 3 | save_data_on_exit = True 4 | save_history = True 5 | save_non_project_files = False 6 | project_type = 'empty-project-type' 7 | recent_files = ['../../../.config/spyder-py3/temp.py', 'GA_main_final.py', 'cGAN_3D.py'] 8 | 9 | [main] 10 | version = 0.2.0 11 | recent_files = [] 12 | 13 | -------------------------------------------------------------------------------- /.spyproject/config/codestyle.ini: -------------------------------------------------------------------------------- 1 | [codestyle] 2 | indentation = True 3 | edge_line = True 4 | edge_line_columns = 79 5 | 6 | [main] 7 | version = 0.2.0 8 | 9 | -------------------------------------------------------------------------------- /.spyproject/config/defaults/defaults-codestyle-0.2.0.ini: -------------------------------------------------------------------------------- 1 | [codestyle] 2 | indentation = True 3 | edge_line = True 4 | edge_line_columns = 79 5 | 6 | -------------------------------------------------------------------------------- /.spyproject/config/defaults/defaults-encoding-0.2.0.ini: -------------------------------------------------------------------------------- 1 | [encoding] 2 | text_encoding = utf-8 3 | 4 | -------------------------------------------------------------------------------- /.spyproject/config/defaults/defaults-vcs-0.2.0.ini: -------------------------------------------------------------------------------- 1 | [vcs] 2 | use_version_control = False 3 | version_control_system = 4 | 5 | -------------------------------------------------------------------------------- /.spyproject/config/defaults/defaults-workspace-0.2.0.ini: -------------------------------------------------------------------------------- 1 | [workspace] 2 | restore_data_on_startup = True 3 | save_data_on_exit = True 4 | save_history = True 5 | save_non_project_files = False 6 | 7 | -------------------------------------------------------------------------------- /.spyproject/config/encoding.ini: -------------------------------------------------------------------------------- 1 | [encoding] 2 | text_encoding = utf-8 3 | 4 | [main] 5 | version = 0.2.0 6 | 7 | -------------------------------------------------------------------------------- /.spyproject/config/vcs.ini: -------------------------------------------------------------------------------- 1 | [vcs] 2 | use_version_control = False 3 | version_control_system = 4 | 5 | [main] 6 | version = 0.2.0 7 | 8 | -------------------------------------------------------------------------------- /.spyproject/config/workspace.ini: -------------------------------------------------------------------------------- 1 | [workspace] 2 | restore_data_on_startup = True 3 | save_data_on_exit = True 4 | save_history = True 5 | save_non_project_files = False 6 | project_type = 'empty-project-type' 7 | recent_files = ['../../../.config/spyder-py3/temp.py', 'GA_main_final.py', 'cGAN_3D.py'] 8 | 9 | [main] 10 | version = 0.2.0 11 | recent_files = [] 12 | 13 | -------------------------------------------------------------------------------- /GA_NSGA.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Thu Aug 3 21:11:50 2023 5 | 6 | @author: xiao 7 | 8 | NSGA-iii code for optimization 9 | 10 | """ 11 | 12 | import signal 13 | import subprocess 14 | from subprocess import TimeoutExpired 15 | import matlab.engine 16 | import os 17 | import stat 18 | import tensorflow as tf 19 | #import sklearn 20 | import numpy as np 21 | import glob 22 | 23 | import model_builder_3DCNN 24 | import matplotlib.pyplot as plt 25 | from data_preprocessing import DeNormalizeData 26 | from scipy.stats import norm 27 | from image_vis_3D import plot_3D_res 28 | 29 | from joblib import load 30 | from image_gen_3D import image_gen_3D_BO 31 | from image_vis_3D import plot_3D 32 | from yield_cal import yield_strength_cal 33 | from cGAN_3D import Generator, Discriminator 34 | 35 | from pymoo.problems.functional import FunctionalProblem 36 | from pymoo.algorithms.moo.nsga3 import NSGA3 37 | from pymoo.algorithms.moo.nsga2 import NSGA2 38 | from pymoo.util.ref_dirs import get_reference_directions 39 | from pymoo.optimize import minimize 40 | from pymoo.termination.default import DefaultMultiObjectiveTermination 41 | from pymoo.core.callback import Callback 42 | from pymoo.util.display.column import Column 43 | from pymoo.util.display.output import Output 44 | from pymoo.indicators.hv import Hypervolume 45 | from pymoo.operators.crossover.pntx import SinglePointCrossover 46 | 47 | 48 | 49 | #initialize gen to 0 50 | gen=0 51 | # define the customized function, i.e., the trained CNN and cGAN models to be optimized 52 | def stress_estimator(solution,type_obj): 53 | 54 | n_priorBeta = float(int(n_priorBeta_glb)) 55 | ms_id = float(int(ms_id_glb)) 56 | ori_id = float(int(ori_id_glb)) 57 | lamwidth_beta = float(lamwidth_beta_glb) 58 | if alpha ==1: 59 | lam_ratio = float(solution[-1]) 60 | else: 61 | lam_ratio = float(lam_ratio_glb) 62 | 63 | # Create seed list from solution input 64 | ori_list = [] 65 | seed_list = [] 66 | for i in range(int(n_priorBeta)): 67 | ori_list.append([solution[i],solution[i+int(n_priorBeta)],solution[i+int(n_priorBeta*2)]]) 68 | seed_list.append([solution[i+int(n_priorBeta*3)]*0.03125,solution[i+int(n_priorBeta*4)]*0.03125,solution[i+int(n_priorBeta*5)]*0.03125]) 69 | 70 | oriBeta_inp = matlab.double(ori_list) 71 | # seed_list_new = seed_list 72 | # for i,seed in enumerate(seed_list): 73 | # for j,coord in enumerate(seed): 74 | # if coord%0.03125!=0: 75 | # seed_list_new[i][j] = round(coord)*0.03125 76 | global sh_dir 77 | sh_dir = './opt_run/NSGA/'+run_name+'/GA_Gen_{0}_Sol_{1}_{2}/'.format(gen,counter,type_obj) 78 | 79 | if (os.path.exists(sh_dir))!=1: 80 | os.makedirs(sh_dir) 81 | 82 | # =========== Call MATLAB function to generate grain orientations ==============================# 83 | # input numbers can't be integer. Should be float. e.g. 10.0 instead of 10 84 | # nargout=0 signifies no output is being returned 85 | eng.data_gen(n_priorBeta,oriBeta_inp,n_colonies_max_glb,lamwidth_beta,lam_ratio,ms_id,run_name,ori_id,sh_dir,nargout=0) 86 | 87 | # ========== Call Neper to generate microstructures ============================================# 88 | # counter = counter+1 89 | with open(sh_dir+'seeds','w') as seeds_file: 90 | for seed in seed_list: 91 | seeds_file.write(' '.join(map(str,seed))+'\n') 92 | sh_name = sh_dir+'generate_tess.sh' 93 | # make generate_tess file excutable 94 | st = os.stat(sh_name) 95 | 96 | os.chmod(sh_name, st.st_mode | stat.S_IEXEC) 97 | # subprocess.run([sh_name], stderr = subprocess.DEVNULL, cwd=sh_dir) 98 | 99 | # ================================= Error handling ================================================# 100 | 101 | try: 102 | # no bash output 103 | proc = subprocess.Popen([sh_name], stderr = subprocess.DEVNULL,stdout = subprocess.DEVNULL, cwd='./', shell=True, preexec_fn = os.setsid) 104 | # # with bash output 105 | # proc = subprocess.Popen([sh_name], cwd='./', shell=True, preexec_fn = os.setsid) 106 | proc.communicate(timeout=600) 107 | except TimeoutExpired: 108 | # proc.kill() 109 | os.killpg(os.getpgid(proc.pid), signal.SIGTERM) 110 | print('Tessellation timeout for {0}'.format(sh_dir)) 111 | return 0,0 112 | 113 | # print('tessellating {0}\n'.format(ms_id)) 114 | try: 115 | images = image_gen_3D_BO(ms_id,sh_dir) 116 | except IndexError: 117 | print('Tessellation failed for {0}'.format(sh_dir)) 118 | return 0,0 119 | # if alpha phase ratio exceed limits 120 | stgroup = glob.glob(sh_dir+'*.stgroup') 121 | with open(stgroup[0]) as f_stgroup: 122 | lines = f_stgroup.readlines() 123 | 124 | # ============ if the phase ratio is with range for Ti64 ========================================# 125 | if float(lines[0])<0.28: 126 | print('Alpha phase ratio out of lower limit!') 127 | return 0,0 128 | elif float(lines[0])>0.80: 129 | print('Alpha phase ratio out of upper limit!') 130 | return 0,0 131 | 132 | # ============ Calculate global stress ========================================================# 133 | images = image_gen_3D_BO(ms_id,sh_dir) 134 | tf.get_logger().setLevel('ERROR') 135 | stress = model.predict(images) # predict global stress response 136 | 137 | # comment out if no std 138 | sc = load(result_dir+'std_scaler'+save_name+'.bin') 139 | stress = sc.inverse_transform(stress) 140 | # calculate the modulus and yield strength of this microstructure 141 | E,yield_strength = yield_strength_cal(stress) 142 | 143 | return stress,images 144 | 145 | def E_cal(solution): 146 | global stress_sol 147 | global images_sol 148 | stress_sol,images_sol = stress_estimator(solution,'E') 149 | if type(stress_sol) != int: 150 | E,yield_strength = yield_strength_cal(stress_sol) 151 | f = open(sh_dir+"E_sigma.txt", "a") 152 | f.write("E = {0}\nSigmay = {1}\n".format(E,yield_strength)) 153 | f.close() 154 | else: 155 | return 10 # return a value that is for sure greater than current E 156 | # if obj_select==1: 157 | # E=-E[0] 158 | 159 | return E[0]/100000 160 | def sigmay_cal(solution): 161 | # stress,images = stress_estimator(solution,'Sy') 162 | if type(stress_sol) != int: 163 | E,yield_strength = yield_strength_cal(stress_sol) 164 | else: 165 | return 10 166 | yield_strength[0]=1/(yield_strength[0]/1000) 167 | return yield_strength[0] 168 | def kt_cal(solution): 169 | # stress,images = stress_estimator(solution,'Kt') 170 | global counter 171 | counter = counter+1 172 | global gen 173 | global pop_size 174 | gen = int(counter/pop_size) 175 | if type(stress_sol) == int: 176 | return 10 177 | else: 178 | #============ Calculate stress concentration factor kt =========================================# 179 | global generator 180 | pred = generator(images_sol[0].reshape(-1,32,32,32,4), training=True) 181 | pred = pred.numpy() 182 | pred = DeNormalizeData(pred,data_min,data_max) 183 | idx_max = np.unravel_index(np.argmax(pred), pred.shape) 184 | stress_max = pred[idx_max] 185 | stress_maxslice = pred[:,idx_max[1],:,:,:] 186 | stress_nom = np.average(stress_maxslice.flatten()) 187 | kt = stress_max/stress_nom 188 | f = open(sh_dir+"Kt.txt", "a") 189 | f.write("Kt = {0}".format(kt)) 190 | f.close() 191 | return kt 192 | # #============ Output optimization results into a text file =============================# 193 | # with open(sh_dir+'result_values.txt','w') as f: 194 | # f.write('E is ' + str(E)+'\n') 195 | # f.write('Yield strength is '+str(yield_strength)+'\n') 196 | # f.write('Optimization objective is '+str(opt_obj)+'\n') 197 | # f.write('stress vector is '+str(stress)+'\n') 198 | # f.write('best kt is {0}\n'.format(kt)) 199 | # plot_3D_res(pred/np.average(pred.reshape(-1,1)),stress_min=0,stress_max=1.400,image_format='svg',save_dir = sh_dir) 200 | # np.save(sh_dir+'pred_best',pred) 201 | # return E,yield_strength,opt_obj,stress,pred,kt 202 | 203 | #============ global values relevant to GA function =============================# 204 | # ============================= selectors =====================================# 205 | # object selector. 1--max sigmay and E; 2 --max sigmay but min E. Kt to be minimized in both cases 206 | obj_select = 2 207 | # if alpha phase ratio is fixed in GA 208 | alpha = 1 209 | # randomseed to initialize GA 210 | random_seed = 4 211 | # run_name to be saved 212 | global run_name 213 | run_name = 'NSGAiii_obj{0}_alpha{1}_rand{2}_kt'.format(obj_select,alpha,random_seed) 214 | 215 | # ============================= constant values =====================================# 216 | 217 | if (obj_select == 2): 218 | null_val = 1000 # set this value to one greater than objective function values. for obj 2 and 5, this is -10; for rest this is 0. 219 | else: 220 | null_val=0 221 | counter = 0 # to keep track of the iterations 222 | 223 | ms_id_glb=1 224 | ori_id_glb=1 225 | n_priorBeta_glb=25.0 226 | n_colonies_max_glb=1.0 227 | lamwidth_beta_glb=0.15 228 | lam_ratio_glb = 1.0 229 | # ------------------------name and dir of saved 3DCNN ------------------------------------# 230 | save_name = 'SGD_alldata_200eps' 231 | result_dir = './' 232 | model = load(result_dir+'HP_result'+save_name+'.pkl').best_estimator_ 233 | 234 | # ----------------------Load the trained 3D-cGAN model ------------------------------------# 235 | data_types = ['seq','strain','strain-eq','strain-pl','strain-pl-eq'] 236 | typeid=0 237 | 238 | cgan_dir = './' 239 | save_name_cgan = 'final_all_data_20221026'.format(data_types[typeid]) 240 | BATCH_SIZE = 1 241 | min_max = np.load(cgan_dir+'min_max_'+save_name_cgan+'.npy') 242 | data_min = min_max[0] 243 | data_max = min_max[1] 244 | 245 | generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5) 246 | discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5) 247 | 248 | generator = Generator() 249 | discriminator = Discriminator() 250 | 251 | checkpoint_dir = cgan_dir+'training_checkpoints' 252 | checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") 253 | checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, 254 | discriminator_optimizer=discriminator_optimizer, 255 | generator=generator, 256 | discriminator=discriminator) 257 | # restore any checkpoint 258 | checkpoint_id = 3 259 | checkpoint.restore(checkpoint_dir+'/ckpt-{0}'.format(checkpoint_id)) 260 | 261 | # ----------------------start matlab engine ------------------------------------# 262 | eng = matlab.engine.start_matlab() 263 | 264 | # ---------------------- Set up the optimziation problem ------------------------------------# 265 | objs = [ 266 | lambda x: E_cal(x), 267 | lambda x: sigmay_cal(x), 268 | lambda x: kt_cal(x) 269 | ] 270 | 271 | num_genes = int(n_priorBeta_glb)*6+alpha 272 | n_var = num_genes 273 | # assign different gene types 274 | gene_type = [] 275 | xl_list = [] 276 | xu_list = [] 277 | for gene in range(num_genes): 278 | if gene=n_priorBeta_glb: 281 | xl_list.append(0) 282 | xu_list.append(180) 283 | else: 284 | xl_list.append(0) 285 | xu_list.append(360) 286 | elif alpha==1: 287 | if gene!=num_genes-1: 288 | gene_type.append(int) 289 | xl_list.append(1) 290 | xu_list.append(29) 291 | else: 292 | gene_type.append(float) 293 | xl_list.append(0.1) 294 | xu_list.append(4) 295 | else: 296 | gene_type.append(int) 297 | xl_list.append(1) 298 | xu_list.append(29) 299 | # convert xl_list and xu_list to np arrays 300 | xl = np.array(xl_list) 301 | xu = np.array(xu_list) 302 | 303 | problem = FunctionalProblem(n_var, 304 | objs, 305 | xl=xl, 306 | xu=xu 307 | ) 308 | # # ---------------------- For testing if the problem is well formulated ------------------ # 309 | # test_data_dir = '/home/xiao/projects/inverse_mat_des/BO/20230803_rv1_25_obj4_alpha1_rand1_kt/Best_sol/' 310 | # # first get all the angles 311 | # a1 = [] 312 | # a2 = [] 313 | # a3 = [] 314 | # s1 = [] 315 | # s2 = [] 316 | # s3 = [] 317 | # for i in range(25): 318 | # f_angles=open(test_data_dir+'1_cell{0}'.format(i+1)) 319 | # angles = f_angles.read() 320 | # angles_list = angles.split(' ') 321 | # a1.append(float(angles_list[0])) 322 | # a2.append(float(angles_list[1])) 323 | # a3.append(float(angles_list[2])) 324 | # # then get all the seeds 325 | # f_seeds=open(test_data_dir+'seeds') 326 | # seeds = f_seeds.read() 327 | # seeds = seeds.split('\n') 328 | # for idx,seed in enumerate(seeds): 329 | # if idx !=25: 330 | # seed_list = seed.split(' ') 331 | # s1.append(float(seed_list[0])/0.03125) 332 | # s2.append(float(seed_list[1])/0.03125) 333 | # s3.append(float(seed_list[2])/0.03125) 334 | # # lastly specify the alpha ratio 335 | # a_ratio = [2.45921] 336 | 337 | # # put them together 338 | # test_inp = np.array(a1+a2+a3+s1+s2+s3+a_ratio) 339 | # F = problem.evaluate(test_inp) 340 | # # -------------------------------------------------------------------------------------- # 341 | 342 | 343 | # ----------------------------- For running the optimization ------------------------- # 344 | #size of each population 345 | pop_size = 36 346 | NSGA = 3 347 | if NSGA==3: 348 | # NSGA iii 349 | ref_dirs = get_reference_directions("das-dennis", 3, n_partitions=6) 350 | crossover = SinglePointCrossover() 351 | algorithm = NSGA3(pop_size=pop_size, 352 | ref_dirs=ref_dirs, 353 | crossover=crossover, 354 | ) 355 | else: 356 | algorithm = NSGA2(pop_size=pop_size) 357 | 358 | class MyCallback(Callback): 359 | 360 | def __init__(self) -> None: 361 | super().__init__() 362 | self.n_evals = [] 363 | self.opt_0 = [] 364 | 365 | 366 | def notify(self, algorithm): 367 | self.n_evals.append(algorithm.evaluator.n_eval) 368 | self.opt_0.append(algorithm.opt[0].F) 369 | 370 | class MyOutput(Output): 371 | 372 | def __init__(self): 373 | super().__init__() 374 | self.sol0 = Column("sol0", width=40) 375 | self.sol1 = Column("sol1", width=40) 376 | self.sol2 = Column("sol2", width=40) 377 | self.sol3 = Column("sol3", width=40) 378 | self.sol4 = Column("sol4", width=40) 379 | self.sol5 = Column("sol5", width=40) 380 | self.sol6 = Column("sol6", width=40) 381 | self.sol7 = Column("sol7", width=40) 382 | self.columns += [self.sol0,self.sol1,self.sol2,self.sol3,self.sol4,self.sol5,self.sol6,self.sol7] 383 | 384 | def update(self, algorithm): 385 | super().update(algorithm) 386 | self.sol0.set(algorithm.pop.get("F")[0]) 387 | self.sol1.set(algorithm.pop.get("F")[1]) 388 | self.sol2.set(algorithm.pop.get("F")[2]) 389 | self.sol3.set(algorithm.pop.get("F")[3]) 390 | self.sol4.set(algorithm.pop.get("F")[4]) 391 | self.sol5.set(algorithm.pop.get("F")[5]) 392 | self.sol6.set(algorithm.pop.get("F")[6]) 393 | self.sol7.set(algorithm.pop.get("F")[7]) 394 | 395 | 396 | 397 | termination = DefaultMultiObjectiveTermination( 398 | n_max_gen=60, 399 | period=15 400 | ) 401 | 402 | res = minimize(problem, 403 | algorithm, 404 | termination, 405 | seed=random_seed, 406 | callback=MyCallback(), 407 | output=MyOutput(), 408 | verbose=True, 409 | save_history=True) 410 | 411 | X, F = res.opt.get("X", "F") 412 | hist = res.history 413 | 414 | n_evals = [] # corresponding number of function evaluations\ 415 | hist_F = [] # the objective space values in each generation 416 | hist_cv = [] # constraint violation in each generation 417 | hist_cv_avg = [] # average constraint violation in the whole population 418 | 419 | for algo in hist: 420 | 421 | # store the number of function evaluations 422 | n_evals.append(algo.evaluator.n_eval) 423 | 424 | # retrieve the optimum from the algorithm 425 | opt = algo.opt 426 | 427 | # store the least contraint violation and the average in each population 428 | hist_cv.append(opt.get("CV").min()) 429 | hist_cv_avg.append(algo.pop.get("CV").mean()) 430 | 431 | # filter out only the feasible and append and objective space values 432 | feas = np.where(opt.get("feasible"))[0] 433 | hist_F.append(opt.get("F")[feas]) 434 | 435 | approx_ideal = F.min(axis=0) 436 | approx_nadir = F.max(axis=0) 437 | 438 | metric = Hypervolume(ref_point= np.array([1.25, 1.25, 1.6]), 439 | norm_ref_point=False, 440 | zero_to_one=True, 441 | ideal=approx_ideal, 442 | nadir=approx_nadir) 443 | 444 | save_dir = './opt_run/NSGA/{0}/'.format(run_name) 445 | hv = [metric.do(_F) for _F in hist_F] 446 | 447 | plt.figure(figsize=(7, 5)) 448 | plt.plot(n_evals, hv, color='black', lw=0.7, label="Avg. CV of Pop") 449 | plt.scatter(n_evals, hv, facecolor="none", edgecolor='black', marker="p") 450 | plt.title("Convergence") 451 | plt.xlabel("Function Evaluations") 452 | plt.ylabel("Hypervolume") 453 | plt.savefig(save_dir+'hv_plot.svg') 454 | plt.show() 455 | 456 | 457 | 458 | np.save(save_dir+'results_x',res.X) 459 | np.save(save_dir+'results_y',res.F) 460 | np.save(save_dir+'n_evals',n_evals) 461 | np.save(save_dir+'hv',hv) 462 | np.save(save_dir+'hist_F',hist_F) 463 | -------------------------------------------------------------------------------- /GA_main_final.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Jul 11 17:54:43 2022 5 | 6 | @author: Xiao 7 | 8 | Bayesian opt test 9 | 10 | """ 11 | import subprocess 12 | from subprocess import TimeoutExpired 13 | import matlab.engine 14 | import os 15 | import stat 16 | import tensorflow as tf 17 | #import sklearn 18 | import numpy as np 19 | import glob 20 | 21 | import model_builder_3DCNN 22 | #import matplotlib.pyplot as plt 23 | from data_preprocessing import DeNormalizeData 24 | #from scipy.stats import norm 25 | from image_vis_3D import plot_3D_res 26 | 27 | from joblib import load 28 | from image_gen_3D import image_gen_3D_BO 29 | #from image_vis_3D import plot_3D 30 | from yield_cal import yield_strength_cal 31 | from cGAN_3D import Generator, Discriminator 32 | 33 | import pygad 34 | 35 | # define the customized function, i.e., the trained CNN model to be optimized 36 | def stress_estimator(solution,solution_idx): 37 | 38 | global counter 39 | global ga_instance 40 | only_obj = only_obj_glb 41 | 42 | gen = ga_instance.generations_completed 43 | 44 | n_priorBeta = float(int(n_priorBeta_glb)) 45 | ms_id = float(int(ms_id_glb)) 46 | ori_id = float(int(ori_id_glb)) 47 | lamwidth_beta = float(lamwidth_beta_glb) 48 | if alpha ==1: 49 | lam_ratio = float(solution[-1]) 50 | else: 51 | lam_ratio = float(lam_ratio_glb) 52 | 53 | # Create seed list from solution input 54 | ori_list = [] 55 | seed_list = [] 56 | for i in range(int(n_priorBeta)): 57 | ori_list.append([solution[i],solution[i+int(n_priorBeta)],solution[i+int(n_priorBeta*2)]]) 58 | seed_list.append([solution[i+int(n_priorBeta*3)]*0.03125,solution[i+int(n_priorBeta*4)]*0.03125,solution[i+int(n_priorBeta*5)]*0.03125]) 59 | 60 | oriBeta_inp = matlab.double(ori_list) 61 | # seed_list_new = seed_list 62 | # for i,seed in enumerate(seed_list): 63 | # for j,coord in enumerate(seed): 64 | # if coord%0.03125!=0: 65 | # seed_list_new[i][j] = round(coord)*0.03125 66 | 67 | # ========= Decide whether to run the optimization or just visulizing the best result ==========# 68 | if only_obj: 69 | sh_dir = './opt_run/GA_Gen_{0}_Sol_{1}/'.format(gen,counter) 70 | else: 71 | sh_dir = './opt_run/Best_sol/' 72 | if (os.path.exists(sh_dir))!=1: 73 | os.makedirs(sh_dir) 74 | 75 | # =========== Call MATLAB function to generate grain orientations ==============================# 76 | # input numbers can't be integer. Should be float. e.g. 10.0 instead of 10 77 | # nargout=0 signifies no output is being returned 78 | eng.data_gen(n_priorBeta,oriBeta_inp,n_colonies_max_glb,lamwidth_beta,lam_ratio,ms_id,run_name,ori_id,sh_dir,nargout=0) 79 | 80 | # ========== Call Neper to generate microstructures ============================================# 81 | counter = counter+1 82 | with open(sh_dir+'seeds','w') as seeds_file: 83 | for seed in seed_list: 84 | seeds_file.write(' '.join(map(str,seed))+'\n') 85 | sh_name = sh_dir+'generate_tess.sh' 86 | # make generate_tess file excutable 87 | st = os.stat(sh_name) 88 | 89 | os.chmod(sh_name, st.st_mode | stat.S_IEXEC) 90 | # subprocess.run([sh_name], stderr = subprocess.DEVNULL, cwd=sh_dir) 91 | 92 | # ================================= Error handling ================================================# 93 | global lower_bd 94 | 95 | try: 96 | # subprocess.run([sh_name], stderr = subprocess.DEVNULL, cwd=sh_dir,timeout=600) 97 | proc = subprocess.Popen([sh_name], stdout=subprocess.DEVNULL, stderr = subprocess.STDOUT, cwd='./') 98 | proc.communicate(timeout=600) 99 | except TimeoutExpired: 100 | proc.kill() 101 | print('Tessellation timeout for {0}'.format(sh_dir)) 102 | return lower_bd 103 | 104 | # print('tessellating {0}\n'.format(ms_id)) 105 | try: 106 | images = image_gen_3D_BO(ms_id,sh_dir) 107 | except IndexError: 108 | print('Tessellation failed for {0}'.format(sh_dir)) 109 | return lower_bd 110 | # if alpha phase ratio exceed limits 111 | stgroup = glob.glob(sh_dir+'*.stgroup') 112 | with open(stgroup[0]) as f_stgroup: 113 | lines = f_stgroup.readlines() 114 | 115 | # ============ if the phase ratio is with range for Ti64 ========================================# 116 | if float(lines[0])<0.15: 117 | print('Alpha phase ratio out of lower limit!') 118 | return lower_bd 119 | elif float(lines[0])>0.95: 120 | print('Alpha phase ratio out of upper limit!') 121 | return lower_bd 122 | 123 | # ============ Calculate global stress ========================================================# 124 | images = image_gen_3D_BO(ms_id,sh_dir) 125 | tf.get_logger().setLevel('ERROR') 126 | stress = model.predict(images) # predict global stress response 127 | 128 | # comment out if no std 129 | sc = load(result_dir+'std_scaler'+save_name+'.bin') 130 | stress = sc.inverse_transform(stress) 131 | # calculate the modulus and yield strength of this microstructure 132 | E,yield_strength = yield_strength_cal(stress) 133 | 134 | #============ Calculate stress concentration factor kt =========================================# 135 | global generator 136 | pred = generator(images[0].reshape(-1,32,32,32,4), training=True) 137 | pred = pred.numpy() 138 | pred = DeNormalizeData(pred,data_min,data_max) 139 | idx_max = np.unravel_index(np.argmax(pred), pred.shape) 140 | stress_max = pred[idx_max] 141 | stress_maxslice = pred[:,idx_max[1],:,:,:] 142 | stress_nom = np.average(stress_maxslice.flatten()) 143 | kt = stress_max/stress_nom 144 | #mu, std = norm.fit(pred.reshape(-1,1)) # fit the predictions into a normal distribution 145 | 146 | #===================== calculate the optimization objects ======================================# 147 | global obj_select 148 | global tar_E 149 | global tar_sigmay 150 | 151 | # maximize E and yield_strength 152 | if obj_select==0: 153 | opt_obj = E[0]/100000+yield_strength[0]/1000 154 | 155 | # minimize E and maximize yield_strength 156 | elif obj_select==1: 157 | opt_obj = -E[0]/100000+yield_strength[0]/1000 158 | 159 | # minimize E and fix yield_strength 160 | elif obj_select==2: 161 | if tar_sigmay>yield_strength[0]: # i.e., 9645000: 169 | print('E out of fixed range!') 170 | return lower_bd 171 | else: 172 | opt_obj = yield_strength[0]/1000-kt 173 | 174 | # maximize E, yield, and minimize kt 175 | elif obj_select==4: 176 | opt_obj = E[0]/100000+yield_strength[0]/1000-kt 177 | 178 | # maximize yield strength, minimize E and kt 179 | elif obj_select==5: 180 | opt_obj = -E[0]/100000+yield_strength[0]/1000-kt 181 | 182 | print('Target value is {0}\n'.format(opt_obj)) 183 | 184 | #============ Output optimization results into a text file =============================# 185 | # if only_obj==1, it's for GA optimization. If not, it's for calculating the best solution result only 186 | if only_obj: 187 | return opt_obj 188 | else: 189 | with open(sh_dir+'result_values.txt','w') as f: 190 | f.write('E is ' + str(E)+'\n') 191 | f.write('Yield strength is '+str(yield_strength)+'\n') 192 | f.write('Optimization objective is '+str(opt_obj)+'\n') 193 | f.write('stress vector is '+str(stress)+'\n') 194 | f.write('best kt is {0}\n'.format(kt)) 195 | plot_3D_res(pred/np.average(pred.reshape(-1,1)),stress_min=0,stress_max=1.400,image_format='svg',save_dir = sh_dir) 196 | np.save(sh_dir+'pred_best',pred) 197 | return E,yield_strength,opt_obj,stress,pred,kt 198 | 199 | def callback_gen(ga_instance): 200 | print("Generation : ", ga_instance.generations_completed) 201 | print("Fitness of the best solution :", ga_instance.best_solution()[1]) 202 | 203 | # ====================== Set optimization objectives ==================================== # 204 | obj_select = 4 # 0 is max sigma y maxE; 1 max sigma y min E; 2 fix sigma; 3 fix E; 4 and 5 added kt 205 | 206 | 207 | random_seed = 42 208 | tar_sigmay = 964 209 | tar_E = 105000 210 | if (obj_select == 2) or (obj_select == 5): 211 | lower_bd = -2 # set this value to one smaller than objective function values. for obj 2 and 5, this is -10; for rest this is 0. 212 | elif obj_select==3: 213 | lower_bd = -1 214 | else: 215 | lower_bd=0 216 | 217 | counter = 0 # to keep track of the iterations 218 | 219 | ms_id_glb=1 220 | ori_id_glb=1 221 | n_priorBeta_glb=25.0 222 | n_colonies_max_glb=1.0 223 | lamwidth_beta_glb=0.15 224 | lam_ratio_glb = 1.0 225 | 226 | 227 | # ------------------------name and dir of saved 3DCNN ------------------------------------# 228 | save_name = 'SGD_alldata_200eps' 229 | result_dir = './' 230 | 231 | # ----------------------Load the trained 3D-cGAN model ------------------------------------# 232 | data_types = ['seq','strain','strain-eq','strain-pl','strain-pl-eq'] 233 | typeid=0 234 | 235 | cgan_dir = './' 236 | save_name_cgan = 'final_all_data_20221026' 237 | BATCH_SIZE = 1 238 | min_max = np.load(cgan_dir+'min_max_'+save_name_cgan+'.npy') 239 | data_min = min_max[0] 240 | data_max = min_max[1] 241 | 242 | # --------------------------- Testing section ------------------------------ # 243 | generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5) 244 | discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5) 245 | 246 | generator = Generator() 247 | discriminator = Discriminator() 248 | 249 | checkpoint_dir = cgan_dir+'training_checkpoints' 250 | checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") 251 | checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, 252 | discriminator_optimizer=discriminator_optimizer, 253 | generator=generator, 254 | discriminator=discriminator) 255 | # restore any checkpoint 256 | checkpoint_id = 3 257 | checkpoint.restore(checkpoint_dir+'/ckpt-{0}'.format(checkpoint_id)) 258 | 259 | 260 | # ----------------------name of the optimization run---------------------------------------# 261 | alpha = 1 # 0 or 1. use phase ratio as optimization parameter or not 262 | run_name = '{0}_obj{1}_alpha{2}_rand{3}_kt'.format(int(n_priorBeta_glb),obj_select,alpha,random_seed) 263 | save_name_opt = run_name 264 | model = load(result_dir+'HP_result'+save_name+'.pkl').best_estimator_ 265 | 266 | fitness_function = stress_estimator 267 | 268 | num_generations = 60 269 | num_parents_mating = 4 270 | 271 | sol_per_pop = 8 272 | num_genes = int(n_priorBeta_glb)*6+alpha 273 | 274 | # assign different gene types 275 | gene_type = [] 276 | gene_space = [] 277 | for gene in range(num_genes): 278 | if gene=n_priorBeta_glb: 281 | gene_space.append({'low':0,'high':180}) 282 | else: 283 | gene_space.append({'low':0,'high':360}) 284 | elif alpha==1: 285 | if gene!=num_genes-1: 286 | gene_type.append(int) 287 | gene_space.append({'low':1,'high':29}) 288 | else: 289 | gene_type.append(float) 290 | gene_space.append({'low':0.1,'high':4}) 291 | else: 292 | gene_type.append(int) 293 | gene_space.append({'low':1,'high':29}) 294 | 295 | #init_range_low = 0 296 | #init_range_high = 4 297 | 298 | parent_selection_type = "sss" 299 | keep_parents = 1 300 | 301 | crossover_type = "single_point" 302 | 303 | mutation_type = "random" 304 | mutation_percent_genes = 10 305 | 306 | # -------------------- Optimization loop -------------------------------------# 307 | eng = matlab.engine.start_matlab() 308 | 309 | ga_instance = pygad.GA(num_generations=num_generations, 310 | num_parents_mating=num_parents_mating, 311 | fitness_func=fitness_function, 312 | sol_per_pop=sol_per_pop, 313 | num_genes=num_genes, 314 | # init_range_low=init_range_low,random_seed=2 315 | # init_range_high=init_range_high, 316 | gene_space = gene_space, 317 | gene_type = gene_type, 318 | parent_selection_type=parent_selection_type, 319 | keep_parents=keep_parents, 320 | crossover_type=crossover_type, 321 | mutation_type=mutation_type, 322 | mutation_percent_genes=mutation_percent_genes, 323 | on_generation=callback_gen, 324 | save_best_solutions=True, 325 | save_solutions=True, 326 | random_seed=random_seed, 327 | stop_criteria=["saturate_15"] # stop after the solution is not changing for 15 generations 328 | ) 329 | # Run GA 330 | only_obj_glb = 1 # this value needs to be 1 in the GA loops 331 | ga_instance.run() 332 | # 333 | # Plot fitness 334 | ga_instance.plot_fitness( 335 | title = 'Generation VS Fitness {0}'.format(run_name), 336 | xlabel = 'Generation',ylabel = 'Objective function fitness', 337 | linewidth = 3, 338 | font_size = 14, 339 | plot_type = 'plot', 340 | color = "#3870FF", 341 | save_dir = './fitness_plot.svg' 342 | ) 343 | 344 | 345 | solution, solution_fitness, solution_idx = ga_instance.best_solution() 346 | print("Parameters of the best solution : {solution}".format(solution=solution)) 347 | print("Fitness value of the best solution = {solution_fitness}".format(solution_fitness=solution_fitness)) 348 | print("Index of the best solution : {solution_idx}".format(solution_idx=solution_idx)) 349 | 350 | only_obj_glb = 0 # this value sets to be 0 when only calculating the results 351 | E_best,yield_strength_best,opt_obj_best,stress_best,pred_best,kt_best = stress_estimator(solution,solution_idx) 352 | 353 | eng.quit() 354 | 355 | if ga_instance.best_solution_generation != -1: 356 | print("Best fitness value reached after {best_solution_generation} generations.".format(best_solution_generation=ga_instance.best_solution_generation)) 357 | 358 | file_name = './result' 359 | # save results 360 | ga_instance.save(filename=file_name) 361 | 362 | ## load results 363 | #ga_instance = pygad.load(filename=file_name) 364 | #print(ga_instance.best_solution()) -------------------------------------------------------------------------------- /HP_resultSGD_alldata_200eps.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/HP_resultSGD_alldata_200eps.pkl -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tailoring the mechanical properties of 3D microstructures: a deep learning and genetic algorithm inverse optimization framework 2 | 3 | Xiao Shang 4 | 5 | email: xiao.shang@mail.utoronto.ca 6 | 7 | ![abstract](https://github.com/xshang93/MsInverseDesign/assets/86078353/bd31082f-2224-4bc5-aa96-3c22b51bf7b7) 8 | 9 | We report a framework that provides an end-to-end solution to achieve application-specific mechanical properties by microstructure optimization. In this study, we select the widely used Ti-6Al-4V to demonstrate the effectiveness of this framework by tailoring its microstructure and achieving various yield strength and elastic modulus across a large design space, while minimizing the stress concentration factor. Compared with conventional methods, our framework is efficient, versatile, and readily transferrable to other materials and properties. 10 | 11 | ### Core dependencies and librarys 12 | 13 | - MATLAB python engine R2023a 14 | - Neper 4.4.2 - 33 (https://neper.info/) A free / open source software package for polycrystal generation and meshing. 15 | - Numpy 1.23.5 16 | - Tensorflow 2.10.1 17 | - Pandas 1.5.3 18 | - PyGAD 2.18.1 (https://pygad.readthedocs.io/en/latest/) An open-source Python library for building the genetic algorithm. 19 | - Pymoo 0.6.0.1 (https://pymoo.org/) An open-source framework for state of the art single- and multi-objective optimization algorithms. 20 | - python 3.9.16 21 | - Scikeras 0.9.0 22 | - Sklearn 1.0.2 23 | 24 | ### How to use 25 | 1. Make sure your have all required dependencies at the correct versions. 26 | 2. Clone this repo to your local directory uisng ```git clone https://github.com/xshang93/MsInverseDesign/``` 27 | 3. In the ```GA_main_final.py``` file, line 204, select the ojb_select to be either 4 or 5. 4 is for maximizing both strength and elastic modulus, and 5 is for maximizing stength but minimizing modulus. Both minimizes the stress concentration factor. 28 | 4. Wait for the optimization loop to converge. The results and history will be stored in a folder named ```opt_run```. 29 | -------------------------------------------------------------------------------- /cGAN_3D.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Tue Mar 29 02:18:54 2022 5 | 6 | @author: Xiao 7 | pix2pix model 8 | """ 9 | import sys 10 | # if local 11 | #sys.path.insert(1,'/home/xiao/Inverse_MS_Design/Python/Common_lib/') 12 | # if HPC 13 | sys.path.insert(1,'/home/xshang93/projects/def-yuzou/xshang93/Inverse_MS_Design/Python/Common_lib/') 14 | import tensorflow as tf 15 | 16 | import os 17 | import time 18 | import datetime 19 | import numpy as np 20 | 21 | from image_vis_3D import plot_3D_res 22 | from joblib import load 23 | from data_preprocessing import train_dataset_gen_cGAN, DeNormalizeData 24 | 25 | 26 | work_dir = './' 27 | data_dirs = [ 28 | "/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/10_cGAN/preprocessed/", 29 | "/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/15_cGAN/preprocessed/", 30 | "/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/20_cGAN/preprocessed/", 31 | "/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/25_cGAN/preprocessed/", 32 | "/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/30_cGAN/preprocessed/", 33 | "/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/35_cGAN/preprocessed/", 34 | "/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/40_cGAN/preprocessed/", 35 | "/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/45_cGAN/preprocessed/", 36 | "/home/xshang93/projects/def-yuzou/xshang93/ML/dataset/50_cGAN/preprocessed/", 37 | ] 38 | OUTPUT_CHANNELS = 1 #stress 39 | save_name = 'no_norm_20220819' 40 | train_ratio = 0.8 41 | pre_trained_model = load(work_dir+'HP_resultSGD_alldata_200eps.pkl').best_estimator_.model_ 42 | 43 | 44 | # Functions to build the generator 45 | 46 | def downsample(filters, transfer_layer='', transfer=True, apply_batchnorm=True): 47 | initializer = tf.random_normal_initializer(0., 0.02) 48 | 49 | result = tf.keras.Sequential() 50 | 51 | result.add( 52 | tf.keras.layers.Conv3D(filters, kernel_size=(3, 3, 3), strides=1, padding='valid', 53 | kernel_initializer=initializer, use_bias=False)) 54 | # if transfer learning is activated 55 | if transfer: 56 | result.trainable = False 57 | result.set_weights =pre_trained_model.layers[transfer_layer].get_weights() 58 | if apply_batchnorm: 59 | result.add(tf.keras.layers.BatchNormalization()) 60 | if transfer: 61 | result.trainable = False 62 | result.set_weights =pre_trained_model.layers[transfer_layer+1].get_weights() 63 | 64 | result.add(tf.keras.layers.LeakyReLU()) 65 | 66 | return result 67 | 68 | def upsample(filters, apply_dropout=False): 69 | initializer = tf.random_normal_initializer(0., 0.02) 70 | 71 | result = tf.keras.Sequential() 72 | result.add( 73 | tf.keras.layers.Conv3DTranspose(filters, kernel_size=(3, 3, 3), strides=1, 74 | padding='valid', 75 | kernel_initializer=initializer, 76 | use_bias=False)) 77 | 78 | result.add(tf.keras.layers.BatchNormalization()) 79 | 80 | if apply_dropout: 81 | result.add(tf.keras.layers.Dropout(0.5)) 82 | 83 | result.add(tf.keras.layers.ReLU()) 84 | 85 | return result 86 | 87 | def Generator(): 88 | inputs = tf.keras.layers.Input(shape=[32, 32, 32, 4])#instantiate a Keras Tensor 89 | 90 | down_stack = [ 91 | downsample(32, transfer_layer = 1, transfer = True), # (batch_size, 30, 30, 30, 32) 92 | downsample(32, transfer_layer = 3,transfer = True), # (batch_size, 28, 28, 28, 32) 93 | downsample(64, transfer_layer = 5,transfer = True), # (batch_size, 26, 26, 26, 64) 94 | downsample(128, transfer_layer = 7,transfer = True), # (batch_size, 24, 24, 24, 128) 95 | ]#put everything in a list and then glue them together 96 | 97 | up_stack = [ 98 | # upsample(512, 4, apply_dropout=True), # (batch_size, 2, 2, 1024) 99 | # upsample(512, 4, apply_dropout=True), # (batch_size, 4, 4, 1024) 100 | # upsample(512, 4, apply_dropout=True), # (batch_size, 8, 8, 1024) 101 | upsample(128), # (batch_size, 26, 26, 26, 128) 102 | upsample(64), # (batch_size, 28, 28, 28, 64) 103 | upsample(32), # (batch_size, 30, 30, 30, 32) 104 | upsample(32), # (batch_size, 32, 32, 32, 32) 105 | ] 106 | 107 | initializer = tf.random_normal_initializer(0., 0.02) 108 | last = tf.keras.layers.Conv3DTranspose(OUTPUT_CHANNELS, kernel_size=(3, 3, 3), 109 | strides=1, 110 | padding='valid', 111 | kernel_initializer=initializer, 112 | activation='tanh') # (batch_size, 32, 32, 32, 4) 113 | 114 | x = inputs 115 | 116 | # Downsampling through the model 117 | skips = [] 118 | for down in down_stack: 119 | x = down(x) 120 | skips.append(x) 121 | 122 | skips = reversed(skips[:-1]) 123 | 124 | # Upsampling and establishing the skip connections 125 | for up, skip in zip(up_stack, skips): 126 | x = up(x) 127 | x = tf.keras.layers.Concatenate()([x, skip]) 128 | 129 | x = last(x) 130 | 131 | return tf.keras.Model(inputs=inputs, outputs=x) 132 | 133 | # Define the generator loss 134 | 135 | LAMBDA = 100 136 | 137 | loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True) 138 | 139 | def generator_loss(disc_generated_output, gen_output, target): 140 | gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output) 141 | 142 | # Mean absolute error 143 | l1_loss = tf.reduce_mean(tf.abs(target - gen_output)) 144 | 145 | total_gen_loss = gan_loss + (LAMBDA * l1_loss) 146 | 147 | return total_gen_loss, gan_loss, l1_loss 148 | 149 | # Functions to build the discriminator 150 | def Discriminator(): 151 | initializer = tf.random_normal_initializer(0., 0.02) 152 | 153 | inp = tf.keras.layers.Input(shape=[32, 32, 32, 4], name='input_image') 154 | tar = tf.keras.layers.Input(shape=[32, 32, 32, 1], name='target_image') 155 | 156 | x = tf.keras.layers.concatenate([inp, tar]) # (batch_size, 256, 256, channels*2) 157 | 158 | down1 = downsample(32, transfer = False, apply_batchnorm = False)(x) # (batch_size, 32, 32, 32, 32) 159 | down2 = downsample(32,transfer = False)(down1) # (batch_size, 30, 30, 30, 32) 160 | down3 = downsample(64,transfer = False)(down2) # (batch_size, 28, 28, 28, 64) 161 | 162 | zero_pad1 = tf.keras.layers.ZeroPadding3D()(down3) # (batch_size, 26, 26, 26, 128) 163 | conv = tf.keras.layers.Conv3D(128, kernel_size = (3, 3, 3), strides=1, 164 | kernel_initializer=initializer, 165 | use_bias=False)(zero_pad1) # (batch_size, 31, 31, 512) 166 | 167 | batchnorm1 = tf.keras.layers.BatchNormalization()(conv) 168 | 169 | leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1) 170 | 171 | zero_pad2 = tf.keras.layers.ZeroPadding3D()(leaky_relu) # (batch_size, 33, 33, 512) 172 | 173 | last = tf.keras.layers.Conv3D(1, kernel_size = (3, 3, 3), strides=1, 174 | kernel_initializer=initializer)(zero_pad2) # (batch_size, 30, 30, 1) 175 | 176 | return tf.keras.Model(inputs=[inp, tar], outputs=last) 177 | 178 | # Define the discriminator loss 179 | def discriminator_loss(disc_real_output, disc_generated_output): 180 | real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output) 181 | 182 | generated_loss = loss_object(tf.zeros_like(disc_generated_output), disc_generated_output) 183 | 184 | total_disc_loss = real_loss + generated_loss 185 | 186 | return total_disc_loss 187 | 188 | # Generate images for checking 189 | def generate_images(model, test_input, tar): 190 | prediction = model(test_input, training=True) 191 | prediction = DeNormalizeData(prediction,data_min,data_max) 192 | plot_3D_res(prediction,0,save_dir = './pred') 193 | plot_3D_res(tar,0,save_dir = './true') 194 | # 195 | # 196 | # Functions for training 197 | 198 | log_dir="logs/" 199 | 200 | summary_writer = tf.summary.create_file_writer( 201 | log_dir + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")) 202 | 203 | @tf.function 204 | def train_step(input_image, target, step): 205 | with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: 206 | gen_output = generator(input_image, training=True) 207 | 208 | disc_real_output = discriminator([input_image, target], training=True) 209 | disc_generated_output = discriminator([input_image, gen_output], training=True) 210 | 211 | gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(disc_generated_output, gen_output, target) 212 | disc_loss = discriminator_loss(disc_real_output, disc_generated_output) 213 | 214 | generator_gradients = gen_tape.gradient(gen_total_loss, 215 | generator.trainable_variables) 216 | discriminator_gradients = disc_tape.gradient(disc_loss, 217 | discriminator.trainable_variables) 218 | 219 | generator_optimizer.apply_gradients(zip(generator_gradients, 220 | generator.trainable_variables)) 221 | discriminator_optimizer.apply_gradients(zip(discriminator_gradients, 222 | discriminator.trainable_variables)) 223 | 224 | with summary_writer.as_default(): 225 | tf.summary.scalar('gen_total_loss', gen_total_loss, step=step//1000) 226 | tf.summary.scalar('gen_gan_loss', gen_gan_loss, step=step//1000) 227 | tf.summary.scalar('gen_l1_loss', gen_l1_loss, step=step//1000) 228 | tf.summary.scalar('disc_loss', disc_loss, step=step//1000) 229 | 230 | def fit(train_ds, steps): 231 | # example_input, example_target = next(iter(test_ds.take(1))) 232 | start = time.time() 233 | 234 | for step, (input_image, target) in train_ds.repeat().take(steps).enumerate(): 235 | if (step) % 1000 == 0: 236 | 237 | if step != 0: 238 | print(f'Time taken for 1000 steps: {time.time()-start:.2f} sec\n') 239 | 240 | start = time.time() 241 | 242 | # generate_images(generator, example_input, example_target) 243 | print(f"Step: {step//1000}k") 244 | 245 | train_step(input_image, target, step) 246 | 247 | # Training step 248 | if (step+1) % 50 == 0: 249 | print('.', end='', flush=True) 250 | 251 | # Save (checkpoint) the model every 20000 steps (u.e. about 4 epochs) 252 | if (step + 1) % 20000 == 0: 253 | checkpoint.save(file_prefix=checkpoint_prefix) 254 | 255 | # ---------------------------- Body -------------------------------------- # 256 | if __name__ == "__main__": 257 | # The batch size of 1 produced better results for the U-Net in the original pix2pix experiment 258 | BUFFER_SIZE = 100000;BATCH_SIZE = 1 259 | 260 | # Build input pipelines 261 | train_input,train_target,test_input,test_target,data_min,data_max = train_dataset_gen_cGAN(save_name,data_dirs,train_ratio,1,5) 262 | np.save('min_max_'+save_name,[data_min,data_max]) 263 | 264 | train_dataset = tf.data.Dataset.from_tensor_slices((train_input,train_target)) 265 | train_dataset = train_dataset.shuffle(BUFFER_SIZE)#buffer size should >= dataset size 266 | train_dataset = train_dataset.batch(BATCH_SIZE) 267 | 268 | test_dataset = tf.data.Dataset.from_tensor_slices((test_input,test_target)) 269 | test_dataset = test_dataset.batch(BATCH_SIZE) 270 | 271 | # Build the generator 272 | generator = Generator() 273 | # tf.keras.utils.plot_model(generator, show_shapes=True, dpi=64) 274 | # 275 | ## Test the generator 276 | #gen_output = generator(tf.reshape(test_input[0],shape=[-1,32,32,32,4]), training=False)#training is false set it into inference mode, where dropout is disabled 277 | #plot_3D_res(gen_output,0) 278 | ## 279 | # Build the discriminator 280 | discriminator = Discriminator() 281 | 282 | ## Test the discriminator 283 | #disc_out = discriminator([tf.reshape(test_input[0],shape=[-1,32,32,32,4]), gen_output], training=False)#tf.newaxis used to add an aixs to match gen_output 284 | ## 285 | # Define the optimizers and checkpoint savers 286 | generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5) 287 | discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5) 288 | 289 | checkpoint_dir = './training_checkpoints' 290 | checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt") 291 | checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer, 292 | discriminator_optimizer=discriminator_optimizer, 293 | generator=generator, 294 | discriminator=discriminator) 295 | 296 | # Train the model 297 | fit(train_dataset, steps=100000) 298 | 299 | # save the model 300 | generator.save(save_name+'_Generator.h5') 301 | #tf.data.experimental.save(test_dataset, './') 302 | -------------------------------------------------------------------------------- /data_gen.m: -------------------------------------------------------------------------------- 1 | % This script is used to loop and generate multiple numbers of 2 | % microstrutures with different grain sizes and lam_width for Ti64 for FEA 3 | % and ML training 4 | % This is the version used for python library generation. 5 | 6 | % Need to have MTEX installed 7 | 8 | % Xiao Shang @ UofT, 20220721 9 | % list of inputs: 10 | % - n_priorBeta: number of prior Beta grains 11 | % - n_colonies_max: maximum number of alpha grains per prior Beta grain 12 | % - lamwidth_beta: with of beta lamella 13 | % - lam_ratio: ratio of beta lamella 14 | % - ms_ID: random seed for tessellation. i.e., -id in neper -T 15 | % ----------------------------------------------------------------------- % 16 | 17 | % addpath '/Users/Xiao/mtex-5.8.1'; % Point to whatever path that has MTEX 18 | % 19 | % startup_mtex; 20 | 21 | function data_gen(n_priorBeta,oriBeta_inp,n_colonies_max,lamwidth_beta,lam_ratio,ms_ID,run_name,ori_id,file_dir) 22 | % Where the files are generated locally and on hpc 23 | % data_dir_local = './'+string(run_name)+'/BO_ms_'+string(round(n_priorBeta))+'_'+string(round(n_colonies_max))+'_'+string(round(lamwidth_beta*100000))+'_'+string(round(lam_ratio*100000))+'_'+string(ms_ID)+'_'+string(ori_id)+'/'; 24 | % if ~exist(data_dir_local, 'dir') 25 | % mkdir(data_dir_local) 26 | % end 27 | % rng control the randomness of the orientation generation 28 | rng('default'); 29 | %rng(ori_id); % ori_id is to control the generation of grain orientations 30 | rng(ms_ID); % set ori_id to ms_id to simplified the problem 31 | texture_strength = 0; 32 | n_colonies_min = n_colonies_max; %min number of colonies in each prior beta grain 33 | 34 | %file_dir = data_dir_local; 35 | 36 | ms_gen_25D_HPC(n_priorBeta,oriBeta_inp,n_colonies_min,n_colonies_max,lamwidth_beta,file_dir,ms_ID,lam_ratio,texture_strength); 37 | end -------------------------------------------------------------------------------- /data_preprocessing.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Sun Jul 10 15:43:49 2022 5 | 6 | @author: Xiao 7 | 8 | Data preprocessing module 9 | 10 | """ 11 | from sklearn.preprocessing import StandardScaler 12 | from joblib import dump 13 | import pandas as pd 14 | import numpy as np 15 | import random 16 | import shutil 17 | import glob 18 | 19 | def train_dataset_gen(save_name,data_dirs,train_ratio,val_ratio,ch_start,ch_end,std = True): 20 | 21 | input_batches = [] 22 | target_batches = [] 23 | 24 | for i in range(0,len(data_dirs)): 25 | if data_dirs[i]: 26 | input_batch = np.load(data_dirs[i] + 'input.npy') 27 | input_batches.append(input_batch) 28 | 29 | target_batch = pd.read_csv(data_dirs[i]+'stress.csv',index_col=0) 30 | target_batches.append(target_batch) 31 | 32 | all_input_ori = np.concatenate(input_batches,axis=0) 33 | all_target_ori = pd.concat(target_batches) 34 | 35 | all_input = all_input_ori[:,:,:,:,ch_start:ch_end] 36 | all_target = all_target_ori.iloc[:,1:7]/1000000 # convert units to MPa 37 | 38 | 39 | # ----------------------- loading data finishes -------------------------------------# 40 | 41 | trainval_size = int(len(all_input)*train_ratio) 42 | train_size = int(trainval_size*(1-val_ratio)) 43 | val_size = trainval_size-train_size 44 | # test_size = len(all_input)-trainval_size 45 | 46 | # ------------ sepearte data randomly to train, val and test ----------------# 47 | 48 | trainval_idx = random.sample(range(0,len(all_target)),trainval_size) 49 | trainval_input = all_input[trainval_idx] 50 | trainval_target = all_target.iloc[trainval_idx] 51 | 52 | val_idx = random.sample(range(0,len(trainval_target)),val_size) 53 | val_input = trainval_input[val_idx] 54 | val_target = trainval_target.iloc[val_idx] 55 | 56 | train_input = np.delete(trainval_input,val_idx,0) 57 | train_target = trainval_target.drop(index=val_target.index) 58 | 59 | if std: 60 | # ------------ Standardize output data with training samples ----------------# 61 | 62 | sc = StandardScaler() 63 | sc.fit(train_target) 64 | 65 | train_target = sc.transform(train_target) 66 | val_target = sc.transform(val_target) 67 | #test_target = sc.transform(test_target) 68 | 69 | # save the standard scaler for model testing 70 | dump(sc,'std_scaler'+save_name+'.bin', compress=True) 71 | 72 | # save the index for testing 73 | np.save('train_index_'+save_name,trainval_idx) 74 | 75 | return train_input,train_target,val_input,val_target 76 | 77 | 78 | def test_dataset_gen(save_name,data_dirs,result_dir,ch_start,ch_end): 79 | 80 | input_batches = [] 81 | target_batches = [] 82 | # use when dataset are a single bunch 83 | if len(data_dirs)==1: 84 | all_input_ori = np.load(data_dirs[0] + 'input.npy') 85 | all_target_ori = pd.read_csv(data_dirs[0]+'stress.csv',index_col=0) 86 | else: 87 | for i in range(0,len(data_dirs)): 88 | if data_dirs[i]: 89 | input_batch = np.load(data_dirs[i] + 'input.npy') 90 | input_batches.append(input_batch) 91 | 92 | target_batch = pd.read_csv(data_dirs[i]+'stress.csv',index_col=0) 93 | target_batches.append(target_batch) 94 | 95 | all_input_ori = np.concatenate(input_batches,axis=0) 96 | all_target_ori = pd.concat(target_batches) 97 | 98 | all_input = all_input_ori[:,:,:,:,ch_start:ch_end] 99 | all_target = all_target_ori.iloc[:,1:7]/1000000 # convert units to MPa 100 | 101 | trainval_idx = np.load(result_dir+'train_index_'+save_name+'.npy') 102 | 103 | #trainval_input = all_input[trainval_idx] 104 | trainval_target = all_target.iloc[trainval_idx] 105 | 106 | test_input = np.delete(all_input,trainval_idx,0) 107 | test_target = all_target.drop(index=trainval_target.index) 108 | 109 | return test_input,test_target 110 | 111 | def test_dataset_gen_15_45(save_name,data_dirs,result_dir,ch_start,ch_end): 112 | 113 | input_batches = [] 114 | target_batches = [] 115 | # use when dataset are a single bunch 116 | if len(data_dirs)==1: 117 | all_input_ori = np.load(data_dirs[0] + 'input.npy') 118 | all_target_ori = pd.read_csv(data_dirs[0]+'stress.csv',index_col=0) 119 | else: 120 | for i in range(0,len(data_dirs)): 121 | if data_dirs[i]: 122 | input_batch = np.load(data_dirs[i] + 'input.npy') 123 | input_batches.append(input_batch) 124 | 125 | target_batch = pd.read_csv(data_dirs[i]+'stress.csv',index_col=0) 126 | target_batches.append(target_batch) 127 | 128 | all_input_ori = np.concatenate(input_batches,axis=0) 129 | all_target_ori = pd.concat(target_batches) 130 | 131 | all_input = all_input_ori[:,:,:,:,ch_start:ch_end] 132 | all_target = all_target_ori.iloc[:,0:6] # convert units to MPa 133 | 134 | trainval_idx = np.load(result_dir+'train_index_'+save_name+'.npy') 135 | 136 | #trainval_input = all_input[trainval_idx] 137 | trainval_target = all_target.iloc[trainval_idx] 138 | 139 | test_input = np.delete(all_input,trainval_idx,0) 140 | test_target = all_target.drop(index=trainval_target.index) 141 | 142 | return test_input,test_target 143 | 144 | def new_test_dataset_gen(data_dirs,ch_start,ch_end): 145 | 146 | input_batches = [] 147 | target_batches = [] 148 | 149 | for i in range(0,len(data_dirs)): 150 | if data_dirs[i]: 151 | input_batch = np.load(data_dirs[i] + 'input.npy') 152 | input_batches.append(input_batch) 153 | 154 | target_batch = pd.read_csv(data_dirs[i]+'stress.csv',index_col=0) 155 | target_batches.append(target_batch) 156 | 157 | test_input_ori = np.concatenate(input_batches,axis=0) 158 | test_target_ori = pd.concat(target_batches) 159 | 160 | test_input = test_input_ori[:,:,:,:,ch_start:ch_end] 161 | test_target = test_target_ori.iloc[:,1:7]/1000000 # convert units to MPa 162 | test_idx = test_target_ori.iloc[:,0] 163 | 164 | return test_idx,test_input,test_target 165 | 166 | def NormalizeData(data): 167 | data_min = np.nanmin(data) 168 | data_max = np.nanmax(data) 169 | data = (data - np.nanmin(data)) / (np.nanmax(data) - np.nanmin(data)) 170 | return data,data_min,data_max 171 | 172 | def DeNormalizeData(data,data_min,data_max): 173 | data = data*(data_max-data_min)+data_min 174 | return data 175 | 176 | def train_dataset_gen_cGAN(save_name,data_dirs,train_ratio,ch_start,ch_end,norm=True): 177 | 178 | input_batches = [] 179 | target_batches = [] 180 | 181 | for i in range(0,len(data_dirs)): 182 | if data_dirs[i]: 183 | input_batch = np.load(data_dirs[i] + 'input.npy') 184 | input_batches.append(input_batch) 185 | 186 | target_batch = np.load(data_dirs[i] + 'target.npy') 187 | target_batches.append(target_batch) 188 | 189 | all_input = np.concatenate(input_batches,axis=0) 190 | all_target = np.concatenate(target_batches,axis=0) 191 | 192 | # convert dtype from float64 to float32 to match the generator 193 | all_input = np.float32(all_input[:,:,:,:,ch_start:ch_end]) 194 | 195 | # normalize target if normalization is used. Otherwise assign 0 to data_min 196 | # and data_max as they will not be used. 197 | if norm: 198 | all_target,data_min,data_max = NormalizeData(all_target) 199 | else: 200 | data_min=0 201 | data_max=0 202 | all_target = np.float32(all_target) 203 | 204 | 205 | # ----------------------- loading data finishes -------------------------------------# 206 | 207 | trainval_size = int(len(all_input)*train_ratio) 208 | train_size = int(trainval_size*1) 209 | val_size = trainval_size-train_size 210 | #test_size = len(all_input)-trainval_size 211 | 212 | # ------------ sepearte data randomly to train, val and test ----------------# 213 | 214 | trainval_idx = random.sample(range(0,len(all_target)),trainval_size) 215 | trainval_input = all_input[trainval_idx] 216 | trainval_target = all_target[trainval_idx] 217 | 218 | test_input = np.delete(all_input,trainval_idx,0) 219 | test_target = np.delete(all_target,trainval_idx,0) 220 | 221 | val_idx = random.sample(range(0,len(trainval_target)),val_size) 222 | # val_input = trainval_input[val_idx] 223 | # val_target = trainval_target[val_idx] 224 | 225 | train_input = np.delete(trainval_input,val_idx,0) 226 | train_target = np.delete(trainval_target,val_idx,0) 227 | 228 | # save the index for testing 229 | np.save('train_index_'+save_name,trainval_idx) 230 | 231 | return train_input,train_target,test_input,test_target,data_min,data_max 232 | 233 | def test_dataset_gen_cGAN(save_name,data_dirs,result_dir): 234 | 235 | input_batches = [] 236 | target_batches = [] 237 | 238 | for i in range(0,len(data_dirs)): 239 | if data_dirs[i]: 240 | input_batch = np.load(data_dirs[i] + 'input.npy') 241 | input_batches.append(input_batch) 242 | 243 | target_batch = np.load(data_dirs[i] + 'target.npy') 244 | target_batches.append(target_batch) 245 | 246 | all_input_ori = np.concatenate(input_batches,axis=0) 247 | all_target_ori = np.concatenate(target_batches,axis=0) 248 | 249 | # convert dtype from float64 to float32 to match the generator 250 | all_input = np.float32(all_input_ori[:,:,:,:,1:5]) 251 | all_target,data_min,data_max = NormalizeData(all_target_ori) 252 | all_target = np.float32(all_target) 253 | 254 | trainval_idx = np.load(result_dir+'train_index_'+save_name+'.npy') 255 | 256 | test_input = np.delete(all_input,trainval_idx,0) 257 | test_target = np.delete(all_target,trainval_idx,0) 258 | 259 | return test_input,test_target 260 | 261 | ## Added because .removesuffix only works for python 3.9+ 262 | def remove_suffix(input_string, suffix): 263 | if suffix and input_string.endswith(suffix): 264 | return input_string[:-len(suffix)] 265 | return input_string 266 | 267 | def remove_prefix(input_string, prefix): 268 | if prefix and input_string.startswith(prefix): 269 | return input_string[len(prefix):] 270 | return input_string 271 | 272 | # Move data from simulation results to dataset folder 273 | def move_data(): 274 | data_types = ['strain','strain-eq','strain-pl','strain-pl-eq'] 275 | typeid = 1 276 | for grain in [10,15,20,25,30,35,40,45,50]: 277 | dir_strain_cal = '/home/xiao/projects/inverse_mat_des/Simulation/strain_recal/' 278 | dir_target = '/home/xiao/projects/inverse_mat_des/ML/dataset/{0}_cGAN/{1}/'.format(grain,data_types[typeid]) 279 | files_strain_1 = glob.glob(dir_strain_cal + "{0}/**/*{1}.step2".format(grain,data_types[typeid]), recursive = True) 280 | files_strain_2 = glob.glob(dir_strain_cal + "{0}_2/**/*{1}.step2".format(grain,data_types[typeid]), recursive = True) 281 | for file_strain_1 in files_strain_1: 282 | idx = remove_suffix(file_strain_1[(len(dir_strain_cal)+3):],'_sim.sim/results/elts/{0}/{0}.step2'.format(data_types[typeid])).replace('/','_') 283 | shutil.copy(file_strain_1,dir_target+idx+'_{0}.step2'.format(data_types[typeid])) 284 | for file_strain_2 in files_strain_2: 285 | idx = remove_suffix(file_strain_2[(len(dir_strain_cal)+5):],'_sim.sim/results/elts/{0}/{0}.step2'.format(data_types[typeid])).replace('/','_') 286 | shutil.copy(file_strain_2,dir_target+idx+'_{0}.step2'.format(data_types[typeid])) -------------------------------------------------------------------------------- /image_gen_3D.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon May 30 16:11:20 2022 5 | 6 | @author: Xiao 7 | 8 | Used to generate 3D images (arrays) with multiple input channels for BO 9 | 10 | """ 11 | 12 | import numpy as np 13 | import glob 14 | import shutil 15 | import os 16 | import pandas as pd 17 | import matplotlib.pyplot as plt 18 | from scipy import interpolate 19 | 20 | # generate input for BO 21 | def image_gen_3D_BO(ms_id,sh_dir): 22 | 23 | # # ------------------------ Move raw results files end ------------------------------# 24 | 25 | raster_size = 32 26 | n_channels = 5 27 | 28 | images = [] 29 | 30 | try: 31 | # ------------------------- gather 3D image data ---------------------------# 32 | f_tess = open(glob.glob(sh_dir.replace('generate_tess.sh','')+'*.tess')[0]) 33 | lines_tess = f_tess.readlines() 34 | f_tesr = open(glob.glob(sh_dir.replace('generate_tess.sh','')+'*.tesr')[0]) 35 | lines_tesr = f_tesr.readlines() 36 | 37 | cellids_list = [] 38 | ori_list = [] 39 | 40 | for line_number,line in enumerate(lines_tess): 41 | # First find how many cells are in the microstructure 42 | if '**cell' in line: 43 | num_cells = int(lines_tess[line_number+1]) 44 | 45 | # Then find and store lam info 46 | if '*lam' in line: 47 | lam_line_start = line_number+1 48 | if '*group' in line: 49 | lam_line_end = line_number 50 | lam_list=[] 51 | for i in range(lam_line_start,lam_line_end): 52 | lams_str = lines_tess[i] 53 | for lam_str in lams_str.split(): 54 | lam_int = int(lam_str)-1 #convert 1,2 to binary (0,1) 55 | lam_list.append(lam_int) 56 | # Then extract the orientations and store them in a list 57 | if '*ori' in line: 58 | for ori in range(line_number+2,line_number+2+num_cells): 59 | r1 = float(lines_tess[ori][2:17]) 60 | r2 = float(lines_tess[ori][20:35]) 61 | r3 = float(lines_tess[ori][38:53]) 62 | r = [r1,r2,r3] 63 | ori_list.append(r) 64 | 65 | # Finally extract the cell ids for each voxel from the .tesr file 66 | for line_number,line in enumerate(lines_tesr): 67 | if 'ascii\n' in line: 68 | for i in range(line_number+1,len(lines_tesr)-1): 69 | cellids_str = lines_tesr[i] 70 | for cellid_str in cellids_str.split(): 71 | cellid_int = int(cellid_str) 72 | cellids_list.append(cellid_int) 73 | 74 | image = np.zeros((raster_size,raster_size,raster_size,n_channels)) 75 | counter=0 76 | while counter < len(cellids_list): 77 | for z in range(0,raster_size): 78 | for y in range(0,raster_size): 79 | for x in range(0,raster_size): 80 | # Channel 0 is cell id 81 | image[x,y,z,0] = cellids_list[counter] 82 | # Channel 0 is lam id 83 | image[x,y,z,1] = lam_list[cellids_list[counter]-1] 84 | # Channels 2-3 are orientations 85 | image[x,y,z,2:n_channels] = ori_list[cellids_list[counter]-1] 86 | counter = counter+1 87 | 88 | # ------------------------- gather 3D image data to files ---------------------------# 89 | images.append(image[:,:,:,1:5]) 90 | except ValueError: 91 | print('ValueError:failed!') 92 | 93 | return images 94 | 95 | # generate input files for 3DCNN 96 | def image_gen_3D_CNN(dir_original,dir_raw_input,dir_raw_target,dir_preprocessed,move,gen): 97 | 98 | # dir_original = '/media/xiao/One_Touch/Xiao/Simulation/hpc_20220621_25grains/texture2/' 99 | # dir_raw_input = '/home/xiao/projects/inverse_mat_des/ML/dataset/25_t2/raw/input/' 100 | # dir_raw_target = '/home/xiao/projects/inverse_mat_des/ML/dataset/25_t2/raw/target/' 101 | # dir_preprocessed = '/home/xiao/projects/inverse_mat_des/ML/dataset/25_t2/preprocessed/' 102 | # 103 | # if raw data needs to be moved, set move=1 104 | # ------------------------ Move raw results files ------------------------------# 105 | files_tesr = glob.glob(dir_original + "**/*.tesr", recursive = True) 106 | files_tess = glob.glob(dir_original + "**/*.tess", recursive = True) 107 | files_force = glob.glob(dir_original + "**/*.x1", recursive = True) 108 | if move: 109 | 110 | 111 | for file_tesr in files_tesr: 112 | file_new = shutil.copy(file_tesr,dir_raw_input) 113 | file_new_name = dir_raw_input+file_tesr[len(dir_original):].replace('/','_') 114 | os.rename(file_new,file_new_name) 115 | 116 | for file_tess in files_tess: 117 | file_new = shutil.copy(file_tess,dir_raw_input) 118 | file_new_name = dir_raw_input+file_tess[len(dir_original):].replace('/','_') 119 | os.rename(file_new,file_new_name) 120 | 121 | for file_force in files_force: 122 | file_new = shutil.copy(file_force,dir_raw_target) 123 | file_new_name = dir_raw_target+file_force[len(dir_original):].replace('/','_') 124 | os.rename(file_new,file_new_name) 125 | 126 | # if results is to be generated, set gen=1 127 | if gen: 128 | # ------------------------ Move raw results files end ------------------------------# 129 | 130 | files_force_new = glob.glob(dir_raw_target + "**/*.x1", recursive = True) 131 | 132 | raster_size = 32 133 | n_channels = 5 134 | col_names = ['step', 'INCR', 'Fx', 'Fy', 'Fz', 'area A', 'TIME'] 135 | area = 0.000001 136 | strain = np.linspace(0,0.015,7) 137 | interp_factor = 10 # density of interpolation. larger the finer 138 | strain_new = np.linspace(0,0.015,7*interp_factor) 139 | data_E = []; data_yield = [] 140 | 141 | images = [] 142 | idx_list = [] 143 | data_stress = [] 144 | 145 | counter_file = 0 146 | for file_force_new in files_force_new: 147 | try: 148 | # ------------------------- gather stress/E data ---------------------------# 149 | idx = remove_suffix(file_force_new.replace(dir_raw_target,''),'_post.force.x1') 150 | # load the data and pick Fx 151 | data = pd.read_csv(file_force_new,skiprows = [0,1], delimiter=' ', names=col_names, engine='python') 152 | fx = data['Fx'].tolist() 153 | stress = [f/area for f in fx] # calculate engineering stress from reaction force 154 | E = (stress[1]/0.0025+stress[2]/0.005)/2 # calculate Young's modulus, average 155 | intercept = -E*0.002 # calculate the intercept of the 0.2% offsetline 156 | model_interp = interpolate.interp1d(strain,stress,'linear') # fit the current interpolation model 157 | stress_new = model_interp(strain_new) # interpolate the stress 158 | offsetline = E*strain_new+intercept # assemble the offset line 159 | 160 | idx_yield = np.argwhere(np.diff(np.sign(stress_new - offsetline))).flatten() # find the index of the yield stress 161 | yield_strength = stress_new[idx_yield][0] # '0' is used to convert array to float number 162 | 163 | # Plot data every 200 samples 164 | if counter_file%200 == 0: 165 | plt.plot(strain_new,stress_new,'-b') 166 | plt.plot(strain_new,offsetline,'-g') 167 | plt.plot(strain_new[idx_yield], stress_new[idx_yield], 'ro') 168 | 169 | counter_file+=1 170 | 171 | # ------------------------- gather 3D image data ---------------------------# 172 | f_tess = open(glob.glob(dir_raw_input+idx+'*.tess')[0]) 173 | lines_tess = f_tess.readlines() 174 | f_tesr = open(glob.glob(dir_raw_input+idx+'*.tesr')[0]) 175 | lines_tesr = f_tesr.readlines() 176 | 177 | cellids_list = [] 178 | ori_list = [] 179 | 180 | for line_number,line in enumerate(lines_tess): 181 | # First find how many cells are in the microstructure 182 | if '**cell' in line: 183 | num_cells = int(lines_tess[line_number+1]) 184 | 185 | # Then find and store lam info 186 | if '*lam' in line: 187 | lam_line_start = line_number+1 188 | if '*group' in line: 189 | lam_line_end = line_number 190 | lam_list=[] 191 | for i in range(lam_line_start,lam_line_end): 192 | lams_str = lines_tess[i] 193 | for lam_str in lams_str.split(): 194 | lam_int = int(lam_str)-1 #convert 1,2 to binary (0,1) 195 | lam_list.append(lam_int) 196 | # Then extract the orientations and store them in a list 197 | if '*ori' in line: 198 | for ori in range(line_number+2,line_number+2+num_cells): 199 | r1 = float(lines_tess[ori][2:17]) 200 | r2 = float(lines_tess[ori][20:35]) 201 | r3 = float(lines_tess[ori][38:53]) 202 | r = [r1,r2,r3] 203 | ori_list.append(r) 204 | 205 | # Finally extract the cell ids for each voxel from the .tesr file 206 | for line_number,line in enumerate(lines_tesr): 207 | if 'ascii\n' in line: 208 | for i in range(line_number+1,len(lines_tesr)-1): 209 | cellids_str = lines_tesr[i] 210 | for cellid_str in cellids_str.split(): 211 | cellid_int = int(cellid_str) 212 | cellids_list.append(cellid_int) 213 | 214 | image = np.zeros((raster_size,raster_size,raster_size,n_channels)) 215 | counter=0 216 | while counter < len(cellids_list): 217 | for z in range(0,raster_size): 218 | for y in range(0,raster_size): 219 | for x in range(0,raster_size): 220 | # Channel 0 is cell id 221 | image[x,y,z,0] = cellids_list[counter] 222 | # Channel 1 is lam id 223 | image[x,y,z,1] = lam_list[cellids_list[counter]-1] 224 | # Channels 2-3 are orientations 225 | image[x,y,z,2:5] = ori_list[cellids_list[counter]-1] 226 | counter = counter+1 227 | 228 | # ------------------------- gather 3D image data to files ---------------------------# 229 | 230 | data_yield.append(yield_strength) 231 | data_E.append(E) # add the modulus for this sample into the dataset 232 | idx_list.append(idx) # put the index into the index vector 233 | data_stress.append(stress) # put the stress vector for this sample into the dataset 234 | 235 | images.append(image) 236 | except ValueError: 237 | print(file_force_new+'failed!') 238 | #except IndexError: 239 | # print(file_force_new+'failed!') 240 | 241 | stress_df = pd.DataFrame(data_stress, columns = ['0.00','0.25','0.50','0.75','1.00','1.25','1.50']) # pd dataframe storing the stress values 242 | stress_df['E'] = data_E 243 | stress_df['Yield'] = data_yield 244 | 245 | stress_df.index = idx_list 246 | 247 | stress_df.to_csv(dir_preprocessed + 'stress.csv') 248 | 249 | np.save(dir_preprocessed + 'input',images)# save all 3D images into a .npy file 250 | 251 | return 252 | 253 | # Generate the position file for extracting mesh elements 254 | def position_gen(): 255 | x_coo = np.linspace(0.015,0.985,32) 256 | y_coo = np.linspace(0.015,0.985,32) 257 | z_coo = np.linspace(0.015,0.985,32) 258 | raster_size = 32 259 | # write position file, only use once 260 | f = open('./positions', 'w') 261 | for x in range(0,raster_size): 262 | for y in range(0,raster_size): 263 | for z in range(0,raster_size): 264 | f.write('{0} {1} {2}\n'.format(x_coo[x],y_coo[y],z_coo[z])) 265 | 266 | f.close() 267 | ## Added because .removesuffix only works for python 3.9+ 268 | def remove_suffix(input_string, suffix): 269 | if suffix and input_string.endswith(suffix): 270 | return input_string[:-len(suffix)] 271 | return input_string 272 | 273 | ## generate output stress/strain 3D images for cGAN 274 | def image_gen_3D_cGAN(typeid=4): 275 | # For selecting target data type to deal with 276 | data_types = ['seq','strain','strain-eq','strain-pl','strain-pl-eq'] 277 | 278 | for grain in [10,15,20,25,30,35,40,45,50]: 279 | # target property data directory, selected by data_types 280 | dir_raw = '/home/xiao/projects/inverse_mat_des/ML/dataset/'+str(grain)+'_cGAN/{0}/'.format(data_types[typeid]) 281 | # stpoint file directory 282 | dir_stpoint = '/home/xiao/projects/inverse_mat_des/ML/dataset/'+str(grain)+'_cGAN/stpoint/' 283 | # .tesr geometry files directory 284 | dir_raw_input = '/home/xiao/projects/inverse_mat_des/ML/dataset/all_input/' 285 | # output directory, create if not exists already 286 | dir_preprocessed = '/home/xiao/projects/inverse_mat_des/ML/dataset/'+str(grain)+'_cGAN/preprocessed_{0}/'.format(data_types[typeid]) 287 | isExists = os.path.exists(dir_preprocessed) 288 | if isExists==0: 289 | os.makedirs(dir_preprocessed) 290 | 291 | images_tar = [] 292 | images_inp = [] 293 | files_tar_new = glob.glob(dir_raw + "*{0}.step2".format(data_types[typeid]), recursive = True) 294 | idx_list = [] 295 | for file_tar_new in files_tar_new: 296 | try: 297 | idx = remove_suffix(file_tar_new.replace(dir_raw,''),'_{0}.step2'.format(data_types[typeid])) 298 | # stpoint file name sample '/home/xiao/projects/inverse_mat_des/ML/dataset/10_cGAN/preprocessed/20_2_20_1_19_12_0_ori1_27.stpoint' 299 | stpoint = np.loadtxt(dir_stpoint+idx+'.stpoint') 300 | tar = np.loadtxt(file_tar_new) 301 | raster_size = 32 302 | 303 | image_tar = np.zeros((32,32,32,1)) 304 | 305 | counter=0 306 | for x in range(0,raster_size): 307 | for y in range(0,raster_size): 308 | for z in range(0,raster_size): 309 | image_tar[x][y][z] = tar[int(stpoint[counter])-1] 310 | counter=counter+1 311 | 312 | f_tess = open(glob.glob(dir_raw_input+idx+'*.tess')[0]) 313 | lines_tess = f_tess.readlines() 314 | f_tesr = open(glob.glob(dir_raw_input+idx+'*.tesr')[0]) 315 | lines_tesr = f_tesr.readlines() 316 | 317 | cellids_list = [] 318 | ori_list = [] 319 | 320 | for line_number,line in enumerate(lines_tess): 321 | # First find how many cells are in the microstructure 322 | if '**cell' in line: 323 | num_cells = int(lines_tess[line_number+1]) 324 | 325 | # Then find and store lam info 326 | if '*lam' in line: 327 | lam_line_start = line_number+1 328 | if '*group' in line: 329 | lam_line_end = line_number 330 | lam_list=[] 331 | for i in range(lam_line_start,lam_line_end): 332 | lams_str = lines_tess[i] 333 | for lam_str in lams_str.split(): 334 | lam_int = int(lam_str)-1 #convert 1,2 to binary (0,1) 335 | lam_list.append(lam_int) 336 | # Then extract the orientations and store them in a list 337 | if '*ori' in line: 338 | for ori in range(line_number+2,line_number+2+num_cells): 339 | r1 = float(lines_tess[ori][2:17]) 340 | r2 = float(lines_tess[ori][20:35]) 341 | r3 = float(lines_tess[ori][38:53]) 342 | r = [r1,r2,r3] 343 | ori_list.append(r) 344 | 345 | # Finally extract the cell ids for each voxel from the .tesr file 346 | for line_number,line in enumerate(lines_tesr): 347 | if 'ascii\n' in line: 348 | for i in range(line_number+1,len(lines_tesr)-1): 349 | cellids_str = lines_tesr[i] 350 | for cellid_str in cellids_str.split(): 351 | cellid_int = int(cellid_str) 352 | cellids_list.append(cellid_int) 353 | n_channels = 5 354 | image_inp = np.zeros((raster_size,raster_size,raster_size,n_channels)) 355 | counter=0 356 | while counter < len(cellids_list): 357 | for z in range(0,raster_size): 358 | for y in range(0,raster_size): 359 | for x in range(0,raster_size): 360 | # Channel 0 is cell id 361 | image_inp[x,y,z,0] = cellids_list[counter] 362 | # Channel 1 is lam id 363 | image_inp[x,y,z,1] = lam_list[cellids_list[counter]-1] 364 | # Channels 2-3 are orientations 365 | image_inp[x,y,z,2:5] = ori_list[cellids_list[counter]-1] 366 | counter = counter+1 367 | 368 | images_tar.append(image_tar) 369 | images_inp.append(image_inp) 370 | idx_list.append(idx) 371 | except IndexError: 372 | print(idx+' failed!') 373 | except FileNotFoundError: 374 | print(idx+' failed!') 375 | 376 | np.save(dir_preprocessed + 'target',images_tar)# save all 3D images into a .npy file 377 | np.save(dir_preprocessed + 'input',images_inp)# save all 3D images into a .npy file 378 | np.save(dir_preprocessed + 'idx',idx_list)# save all indexes into a .npy file 379 | 380 | 381 | 382 | 383 | -------------------------------------------------------------------------------- /image_vis_3D.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Fri Jul 1 18:00:09 2022 5 | 6 | @author: Xiao 7 | 8 | To visualize 3D images 9 | 10 | """ 11 | import sys 12 | sys.path.insert(1,'/home/xiao/Inverse_MS_Design/Python/Common_lib/') 13 | 14 | import matplotlib.pyplot as plt 15 | import matplotlib 16 | import numpy as np 17 | 18 | def to_rgb(rod): 19 | rgb = (rod+np.sqrt(2)-1)/(2*(np.sqrt(2)-1)) 20 | return rgb 21 | 22 | def plot_3D(images,image_num=0,image_format='svg',save_dir=''): 23 | #data_file = '/Users/Xiao/cGAN_Project/ML/CNN/dataset/20220706/preprocessed/25/input.npy' 24 | tesrsize = 32 25 | #image_num = 0 # which image to visualize 26 | # Controll Tranperency 27 | alpha = 1.0 28 | 29 | # Change the Size of Graph using 30 | # Figsize 31 | #fig = plt.figure(figsize=(10, 10)) 32 | 33 | # Generating a 3D sine wave 34 | ax = plt.axes(projection='3d') 35 | 36 | image = np.ones([tesrsize,tesrsize,tesrsize,4]) 37 | image_bi = np.ones([tesrsize,tesrsize,tesrsize,4]) 38 | 39 | for z in range(0,tesrsize): 40 | for y in range(0,tesrsize): 41 | for x in range(0,tesrsize): 42 | #rgb = to_rgb(images[image_num][x][y][z][2:5]) 43 | rgb = to_rgb(images[0][x][y][z][1:4]) 44 | image[x][y][z] = np.append(rgb,alpha) 45 | if images[0][x][y][z][1]==0: 46 | image_bi[x][y][z][0] = 0 47 | else: 48 | image_bi[x][y][z][1] = 0 49 | 50 | image[image>1]=1 51 | image[image<0]=0 52 | 53 | data = np.ones([tesrsize,tesrsize,tesrsize]) 54 | 55 | # trun off/on axis 56 | ax.axis('off') 57 | # ax.axis('on') 58 | ax.set_xlabel('x') 59 | ax.set_ylabel('y') 60 | ax.set_zlabel('z') 61 | 62 | # Voxels is used to customizations of 63 | # the sizes, positions and colors. 64 | 65 | # match stress/strain 66 | image = np.flip(image,(0,1)) #matches neper vis 67 | # lightsource = matplotlib.colors.LightSource(0,240) 68 | # ax.view_init(elev =30., azim= 120.) 69 | 70 | #match neper 71 | ax.view_init(elev = 30, azim= 120) #matches neper vis 72 | lightsource = matplotlib.colors.LightSource(0,150) 73 | ax.voxels(data, facecolors = image, edgecolors = image, lightsource=lightsource) 74 | 75 | if save_dir != '': 76 | plt.savefig(save_dir+"_{0}.{1}".format(image_num,image_format)) 77 | return 0 78 | 79 | # plot results 3D image in RGB color 80 | def plot_3D_res(images,image_num=0,cb='False',image_format='png',stress_min='',stress_max='',save_dir='',axis='off'): 81 | image = images[0] 82 | if type(image)!=np.ndarray: 83 | image=image.numpy() 84 | #data_file = '/Users/Xiao/cGAN_Project/ML/CNN/dataset/20220706/preprocessed/25/input.npy' 85 | tesrsize = 32 86 | ax = plt.axes(projection='3d') 87 | image = np.flip(image,(0,1)) #matches neper vis 88 | image = image.reshape((32,32,32)) 89 | cmap = plt.get_cmap("jet") 90 | # set the range of the colormap 91 | if stress_max=='': 92 | if stress_min=='': 93 | norm= plt.Normalize(image.min(), image.max()) 94 | else: 95 | norm= plt.Normalize(stress_min, image.max()) 96 | else: 97 | if stress_min=='': 98 | norm= plt.Normalize(image.min(), stress_max()) 99 | else: 100 | norm= plt.Normalize(stress_min, stress_max) 101 | 102 | data = np.ones([tesrsize,tesrsize,tesrsize]) 103 | 104 | # trun off/on axis 105 | ax.axis(axis) 106 | ax.set_xlabel('x') 107 | ax.set_ylabel('y') 108 | ax.set_zlabel('z') 109 | 110 | # Voxels is used to customizations of 111 | # the sizes, positions and colors. 112 | # ax.view_init(elev =30., azim= 120.) 113 | # lightsource = matplotlib.colors.LightSource(0,240) 114 | 115 | #match neper 116 | ax.view_init(elev = 30, azim= 120) #matches neper vis 117 | lightsource = matplotlib.colors.LightSource(0,60) 118 | 119 | ax.voxels(data, facecolors = cmap(norm(image)), edgecolors = cmap(norm(image)), lightsource=lightsource) 120 | 121 | 122 | m = matplotlib.cm.ScalarMappable(cmap=plt.cm.jet, norm=norm) 123 | m.set_array([]) 124 | if cb == True: 125 | plt.colorbar(m,fraction=0.0275, pad=0.125) 126 | if save_dir != '': 127 | plt.savefig(save_dir+"_{0}.{1}".format(image_num,image_format)) 128 | 129 | plt.figure().clear() 130 | return image 131 | -------------------------------------------------------------------------------- /min_max_final_all_data_20221026.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/min_max_final_all_data_20221026.npy -------------------------------------------------------------------------------- /model_builder_3DCNN.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Sat Jul 9 10:08:58 2022 5 | 6 | @author: Xiao 7 | 8 | Model builder for 3D CNN 9 | 10 | """ 11 | 12 | # import tensorflow as tf 13 | # from tensorflow import keras 14 | # from tensorflow.keras.layers import Input, Dense, BatchNormalization, Conv2D, MaxPool2D, GlobalMaxPool2D, GlobalAveragePooling2D, Dropout 15 | # from tensorflow.keras.optimizers import SGD 16 | from tensorflow.keras.models import Model 17 | # from tensorflow.keras.callbacks import ModelCheckpoint 18 | # from matplotlib import pyplot as plt 19 | # from keras.utils.vis_utils import plot_model 20 | from tensorflow.keras.layers import Input, Dense, BatchNormalization, Conv3D, MaxPool3D, GlobalMaxPool3D, GlobalAveragePooling3D, Dropout 21 | from tensorflow.keras.optimizers import SGD,Adam 22 | 23 | # import pandas as pd 24 | # import glob 25 | # import numpy as np 26 | # from sklearn.metrics import r2_score 27 | # from joblib import dump, load 28 | 29 | # define a function to create the convolutional sections of the model 30 | def conv3D_block(inp, filters=64, bn=True, pool=True, dropout_loc=0.4,act_fun = 'LeakyReLU'): 31 | _ = Conv3D(filters, kernel_size=(3, 3, 3), activation=act_fun, kernel_initializer='he_uniform')(inp) 32 | if bn: 33 | _ = BatchNormalization()(_) 34 | if pool: 35 | _ = MaxPool3D()(_) 36 | if dropout_loc > 0: 37 | _ = Dropout(dropout_loc)(_) 38 | return _ 39 | 40 | # function to build model 41 | def model_build(inp_ch=4,nfilters=[32,32,64,128],bn=[True,True,True,True], 42 | pool=[False,False,False,False],dropout_loc=[0,0,0,0],dropout_glob=0.2, 43 | act_fun='LeakyReLU',loss='mse',learning_rate=0.001,epsilon =1e-7, 44 | amsgrad=False,momentum=0.0, opt='Adam'): 45 | # specify input image sizes 46 | img_height = 32; img_width = 32; img_depth = 32; 47 | 48 | input_layer = Input(shape=(img_height, img_width, img_depth, inp_ch)) 49 | _ = conv3D_block(input_layer, filters=nfilters[0], bn=bn[0], pool=pool[0], 50 | dropout_loc=dropout_loc[0],act_fun = act_fun) 51 | _ = conv3D_block(_, filters=nfilters[1],bn=bn[1],pool=pool[1], 52 | dropout_loc=dropout_loc[1],act_fun = act_fun) 53 | _ = conv3D_block(_, filters=nfilters[2],bn=bn[2],pool=pool[2], 54 | dropout_loc=dropout_loc[2],act_fun = act_fun) 55 | _ = conv3D_block(_, filters=nfilters[3],bn=bn[3],pool=pool[3], 56 | dropout_loc=dropout_loc[3],act_fun = act_fun) 57 | _ = GlobalAveragePooling3D()(_) 58 | 59 | #_ = Dense(units=1024, activation='LeakyReLU')(_) 60 | _ = Dense(units=64, activation=act_fun)(_) 61 | if dropout_glob>0: 62 | _ = Dropout(dropout_glob)(_) 63 | output = Dense(units=6)(_) 64 | 65 | if opt == 'Adam': 66 | optimizer = Adam(learning_rate=learning_rate,epsilon=epsilon,amsgrad=amsgrad) 67 | else: 68 | optimizer = SGD(learning_rate = learning_rate,momentum=momentum) 69 | 70 | model = Model(inputs=input_layer, outputs=output) 71 | model.compile(optimizer=optimizer, 72 | loss=loss, 73 | metrics=['mae']) 74 | return model -------------------------------------------------------------------------------- /ms_gen_25D_HPC.m: -------------------------------------------------------------------------------- 1 | % This script is used for generate multiple numbers of microstrutures for 2 | % Ti64 for FEA and ML training 3 | 4 | % Need to have MTEX installed 5 | 6 | % Xiao Shang @ UofT, 20211124 7 | % This is the version used for python library generation. 8 | % ----------------------------------------------------------------------- % 9 | 10 | %addpath '/Users/Xiao/mtex-5.7.0'; % Point to whatever path that has MTEX 11 | 12 | %startup_mtex; 13 | 14 | function ms_gen_25D_HPC(n_priorBeta,oriBeta_inp,n_colonies_min,n_colonies_max,lamwidth_beta,file_dir,ms_ID,lam_ratio,texture_strength) 15 | 16 | %domain l/w/t 17 | l = 1; 18 | w = 1; 19 | t = 1; 20 | 21 | diameq_nu = 1; %grain equivalent diameter, mean 22 | diameq_sigma = 0.2; %grain equivalent diameter, sigma 23 | sph_nu = 0.855; %sphericity, mean 24 | sph_sigma = 0.03; %sphericity, sigma 25 | % morpho_t1 = 'diameq:lognormal({0},{1})'.format(diameq_nu,diameq_sigma) % Specify morphological properties of the cells page 14 of neper doc lognormal(mean,sigma) 26 | % morpho_t2 = '1-sphericity:normal({0},{1})'.format(sph_nu, sph_sigma) % Specify morphological properties of the cells page 14 of neper doc 27 | morpho_t1 = sprintf('diameq:lognormal(%0.4f,%0.4f),sphericity:normal(%0.4f,%0.4f)',diameq_nu,diameq_sigma,sph_nu,sph_sigma); 28 | morpho_t2 = sprintf('diameq:lognormal(%0.4f,%0.4f),sphericity:normal(%0.4f,%0.4f)',diameq_nu,diameq_sigma,sph_nu,sph_sigma); 29 | 30 | lamwidth_alpha = lamwidth_beta * lam_ratio; % alpha lamellar width, alpha:beta = 1:lam_ratio 31 | lamwidth_single = l*10; % width when alpha colony is a single grain, i.e., w/o lameller 32 | r_lameller = 0.2/0.2; % ratio of alpha colonies with lamellar 33 | 34 | % Gnerate .sh script for Neper tessellation 35 | script_tess = fopen(string(file_dir)+'generate_tess.sh', 'w'); 36 | 37 | fprintf(script_tess,'#!/bin/bash\n'); 38 | 39 | fprintf(script_tess,'# Shell script for geometry tessellation\n'); 40 | fprintf(script_tess,'# - This bash script requires a full installation of Neper. Tested on v4.4.2-33\n'); 41 | fprintf(script_tess,'cd '+string(file_dir)+'\n'); 42 | 43 | ori_gen.ori_files(ms_ID,n_priorBeta,oriBeta_inp,n_colonies_min,n_colonies_max,file_dir,lamwidth_beta,lamwidth_alpha,lamwidth_single,r_lameller,texture_strength);% Generate orientation files 44 | 45 | % 3 scale tessellation (beta lamella) 46 | fprintf(script_tess,'# Generate beta lamella\n'); 47 | n_grains = string(n_priorBeta)+'::'+'file('+string(ms_ID)+'_colonies)'+'::'+'from_morpho'; 48 | morpho_t3 = string('lamellar(w=file('+string(ms_ID)+'_lamwidth),v=crysdir(-1,1,0),pos=optimal)'); 49 | morpho = string(morpho_t1)+'::'+string(morpho_t2)+'::'+string(morpho_t3); 50 | if oriBeta_inp==0 % When ms id is used as input 51 | fprintf(script_tess,"neper -T -n '%s' -id %s -dim 3 -domain 'cube(%s,%s,%s)' -ori 'random::file(%s_scale2_ori)::random' -morpho '%s' -group 'lam==1?1:2' -statcell ""scaleid(1),lam"" -statgroup vol -reg 1 -sel %s -format tess -o %s_%s_%s_%s_%s\n",... 52 | [n_grains,string(ms_ID),string(l),string(w),string(t),string(ms_ID),morpho,string(lamwidth_beta*0.1), ... 53 | string(round(n_priorBeta)),string(round(n_colonies_max)),string(round(lamwidth_beta*100000)), ... 54 | string(round(lam_ratio*100000)),string(ms_ID)]); 55 | else 56 | fprintf(script_tess,"neper -T -n '%s' -id %s -morphooptiini 'coo:file(seeds)' -dim 3 -domain 'cube(%s,%s,%s)' -ori 'random::file(%s_scale2_ori)::random' -morpho '%s' -group 'lam==1?1:2' -statcell ""scaleid(1),lam"" -statgroup vol -reg 1 -sel %s -format tess -o %s_%s_%s_%s_%s\n",... 57 | [n_grains,string(ms_ID),string(l),string(w),string(t),string(ms_ID),morpho,string(lamwidth_beta*0.1), ... 58 | string(round(n_priorBeta)),string(round(n_colonies_max)),string(round(lamwidth_beta*100000)), ... 59 | string(round(lam_ratio*100000)),string(ms_ID)]); 60 | end 61 | 62 | fprintf(script_tess,"rm -f %s_scale3_ori\n",[string(ms_ID)]); 63 | for grain_ID_priorBeta = 1:1:n_priorBeta 64 | for grain_ID_colony = 1:1:n_colonies_max 65 | fprintf(script_tess,"awk '{if ($1==%s) print $2}' %s_%s_%s_%s_%s.stcell > %s_ori-grain%s\n", ... 66 | [string(grain_ID_priorBeta),string(round(n_priorBeta)),string(round(n_colonies_max)), ... 67 | string(round(lamwidth_beta*100000)),string(round(lam_ratio*100000)),string(ms_ID), ... 68 | string(ms_ID),string(grain_ID_priorBeta)]); 69 | fprintf(script_tess,"ORI_A=$(awk 'NR==1 {print $1,$2,$3}' %s_cell%s_%s)\n", ... 70 | [string(ms_ID),string(grain_ID_priorBeta),string(grain_ID_colony)]); 71 | fprintf(script_tess,"ORI_B=$(awk 'NR==2 {print $1,$2,$3}' %s_cell%s_%s)\n", ... 72 | [string(ms_ID),string(grain_ID_priorBeta),string(grain_ID_colony)]); 73 | fprintf(script_tess,"sed -i ""s/^\\<1\\>$/$ORI_A/"" %s_ori-grain%s\n", ... 74 | [string(ms_ID),string(grain_ID_priorBeta)]); 75 | fprintf(script_tess,"sed -i ""s/^\\<2\\>$/$ORI_B/"" %s_ori-grain%s\n", ... 76 | [string(ms_ID),string(grain_ID_priorBeta)]); 77 | fprintf(script_tess,"echo ""%s::%s file(%s_ori-grain%s,des=euler-bunge)"" >> %s_scale3_ori\n", ... 78 | [string(grain_ID_priorBeta),string(grain_ID_colony),string(ms_ID),string(grain_ID_priorBeta),string(ms_ID)]); 79 | end 80 | end 81 | if oriBeta_inp==0 % When ms id is used as input 82 | fprintf(script_tess,"neper -T -n '%s' -id %s -dim 3 -domain 'cube(%s,%s,%s)' -ori 'random::file(%s_scale2_ori)::file(%s_scale3_ori)' -morpho '%s' -group 'lam==1?1:2' -statgroup vol -reg 1 -sel %s -format tess -o %s_%s_%s_%s_%s\n",... 83 | [n_grains,string(ms_ID),string(l),string(w),string(t),string(ms_ID),string(ms_ID),morpho,string(lamwidth_beta*0.1), ... 84 | string(round(n_priorBeta)),string(round(n_colonies_max)),string(round(lamwidth_beta*100000)), ... 85 | string(round(lam_ratio*100000)),string(ms_ID)]); 86 | fprintf(script_tess,"neper -T -n '%s' -id %s -dim 3 -domain 'cube(%s,%s,%s)' -ori 'random::file(%s_scale2_ori)::file(%s_scale3_ori)' -morpho '%s' -reg 1 -sel %s -format tesr -tesrformat 'ascii' -tesrsize 32 -o %s_%s_%s_%s_%s\n",... 87 | [n_grains,string(ms_ID),string(l),string(w),string(t),string(ms_ID),string(ms_ID),morpho,string(lamwidth_beta*0.1), ... 88 | string(round(n_priorBeta)),string(round(n_colonies_max)),string(round(lamwidth_beta*100000)), ... 89 | string(round(lam_ratio*100000)),string(ms_ID)]); 90 | else 91 | fprintf(script_tess,"neper -T -n '%s' -id %s -morphooptiini 'coo:file(seeds)' -dim 3 -domain 'cube(%s,%s,%s)' -ori 'random::file(%s_scale2_ori)::file(%s_scale3_ori)' -morpho '%s' -group 'lam==1?1:2' -statgroup vol -reg 1 -sel %s -format tess -o %s_%s_%s_%s_%s\n",... 92 | [n_grains,string(ms_ID),string(l),string(w),string(t),string(ms_ID),string(ms_ID),morpho,string(lamwidth_beta*0.1), ... 93 | string(round(n_priorBeta)),string(round(n_colonies_max)),string(round(lamwidth_beta*100000)), ... 94 | string(round(lam_ratio*100000)),string(ms_ID)]); 95 | fprintf(script_tess,"neper -T -n '%s' -id %s -morphooptiini 'coo:file(seeds)' -dim 3 -domain 'cube(%s,%s,%s)' -ori 'random::file(%s_scale2_ori)::file(%s_scale3_ori)' -morpho '%s' -reg 1 -sel %s -format tesr -tesrformat 'ascii' -tesrsize 32 -o %s_%s_%s_%s_%s\n",... 96 | [n_grains,string(ms_ID),string(l),string(w),string(t),string(ms_ID),string(ms_ID),morpho,string(lamwidth_beta*0.1), ... 97 | string(round(n_priorBeta)),string(round(n_colonies_max)),string(round(lamwidth_beta*100000)), ... 98 | string(round(lam_ratio*100000)),string(ms_ID)]); 99 | end 100 | fprintf(script_tess,'#Generate geometry image \n'); 101 | fprintf(script_tess,"neper -V %s_%s_%s_%s_%s.tess -datacellcol 'ori' -datacellcolscheme 'ipf(z)' -print %s_%s_%s_%s_%s_geom \n",... 102 | [string(round(n_priorBeta)),string(round(n_colonies_max)),string(round(lamwidth_beta*100000)), ... 103 | string(round(lam_ratio*100000)),string(ms_ID),string(round(n_priorBeta)),string(round(n_colonies_max)), ... 104 | string(round(lamwidth_beta*100000)),string(round(lam_ratio*100000)),string(ms_ID)]); 105 | fprintf(script_tess,"neper -V %s_%s_%s_%s_%s.tesr -datacellcol 'ori' -datacellcolscheme 'ipf(z)' -print %s_%s_%s_%s_%s_geom_raster \n\n",... 106 | [string(round(n_priorBeta)),string(round(n_colonies_max)),string(round(lamwidth_beta*100000)), ... 107 | string(round(lam_ratio*100000)),string(ms_ID),string(round(n_priorBeta)),string(round(n_colonies_max)), ... 108 | string(round(lamwidth_beta*100000)),string(round(lam_ratio*100000)),string(ms_ID)]); 109 | 110 | fprintf(script_tess,'exit 0'); 111 | fclose(script_tess); 112 | end 113 | 114 | -------------------------------------------------------------------------------- /ori_gen.m: -------------------------------------------------------------------------------- 1 | % This script is used for randomly generarte prior beta orientations, and 2 | % resulting alpha colonies' orientations obeying the Burgers OR. 3 | % Reference https://mtex-toolbox.github.io/ParentChildVariants.html 4 | % This is the version used for python library generation. 5 | 6 | % Need to have MTEX installed 7 | 8 | % Xiao Shang @ UofT, 20220721 9 | % This is the version used for python library generation. 10 | % ----------------------------------------------------------------------- % 11 | % Changed on 20221116. Take ori and seeds as input for tessellation. 12 | 13 | classdef ori_gen 14 | methods (Static) 15 | function [] = ori_files(ms_ID, n_priorBeta,oriBeta_inp,n_colonies_min,n_colonies_max,path,lamwidth_beta,lamwidth_alpha,lamwidth_single,r_lameller,texture_strength) 16 | 17 | n_colonies_tofile = zeros(n_priorBeta,1); % to write to the colonie file, telling Neper the number of colonies in each prior beta grain 18 | oriBeta_tofile = zeros(n_priorBeta, 3); 19 | oriAlpha_tofile = zeros(n_priorBeta, n_colonies_max, 3); 20 | 21 | % specify the crystalSymmtry (CS) for parent (beta prior phase). The size 22 | % of Ti BCC is 3.3 by 3.3 by 3.3 23 | csBeta = crystalSymmetry('432',[3.32 3.32 3.32],'mineral','Ti64 (beta)'); 24 | % specimen symmetry (which is no symmetry, or 'triclinic') 25 | ss = specimenSymmetry('1'); 26 | % Similarly, specify the CS for child (alpha colonies) 27 | csAlpha = crystalSymmetry('622',[2.95 2.95 4.68],'mineral','Ti64 (alpha)'); 28 | 29 | % generate orientations from input euler angles, the format being accepted by Neper 30 | if texture_strength == 0 % no texture 31 | if oriBeta_inp==0 32 | oriBeta = project2FundamentalRegion(orientation.rand(n_priorBeta, csBeta)); 33 | else 34 | oriBeta = project2FundamentalRegion(orientation.byEuler(oriBeta_inp*degree, csBeta)); 35 | end 36 | else % after 'else' are not being used 37 | % create a mod orientation for cube texture (001)[100] 38 | mod=orientation.byEuler(0,0,0,csBeta,ss); 39 | % create the corresponding ODF 40 | odf = unimodalODF(mod,'halfwidth',texture_strength*degree); 41 | oriBeta = discreteSample(odf,n_priorBeta); 42 | end 43 | % plot(oriBeta,'MarkerColor','b','MarkerSize',5); 44 | for beta = 1:n_priorBeta 45 | oriBeta_tofile(beta,:) = [oriBeta(beta).phi1/degree, oriBeta(beta).Phi/degree, oriBeta(beta).phi2/degree]; 46 | end 47 | % Find the rotation relations (misorientations) to translate beta to alpha, according to the 48 | % Burgers OR 49 | beta2alpha = orientation.Burgers(csBeta,csAlpha); 50 | 51 | % Find all possible variants resulting from the Burgers OR. This is an 52 | % intermedia step, the actul beta orientation is just the onr indicated 53 | % by oriBeta. 54 | oriBetaSym = oriBeta.symmetrise; 55 | % Discard the last 12 variants becasue they are identical to the first 12. 56 | % In reality only 12 variants exists for Ti beta->alpha 57 | oriBetaSym = oriBetaSym(1:12,:); 58 | 59 | for beta = 1:n_priorBeta 60 | n_colonies_tofile(beta) = randi([n_colonies_min,n_colonies_max]); 61 | for colony = 1:n_colonies_tofile(beta) 62 | %num_sym = randi(12); % randomly pick one variant 63 | num_sym = 1; % pick the first variant 64 | oriColonyBeta = oriBetaSym(num_sym, beta); 65 | oriColonyAlpha_nonfund = oriColonyBeta * inv(beta2alpha); % calculate the orientation for this colony 66 | oriColonyAlpha = project2FundamentalRegion(oriColonyAlpha_nonfund); 67 | %organize the values to be wrtten in file. Covert radian to 68 | %degrees, becasue Neper uses degrees as Euler-Bunge angles 69 | %oriBeta_tofile(beta, colony,:) = [oriBeta_Bunge.phi1/degree, 70 | %oriBeta_Bunge.Phi/degree, oriBeta_Bunge.phi2/degree]; %discarded. the 12 beta ori generated here are not necessary 71 | oriAlpha_tofile(beta, colony,:) = [oriColonyAlpha.phi1/degree, oriColonyAlpha.Phi/degree, oriColonyAlpha.phi2/degree]; 72 | % plot(oriColonyAlpha,'MarkerColor','c','MarkerSize',5); 73 | end 74 | end 75 | 76 | % Write the results to files, angles in degree 77 | 78 | % Write number of colonies to file 'colonies' 79 | colonies_ID = fopen(string(path)+string(ms_ID)+'_colonies','w'); 80 | 81 | priorBeta_ID = linspace(1,n_priorBeta,n_priorBeta); 82 | fprintf(colonies_ID,'%i %i\n',[priorBeta_ID;n_colonies_tofile']); 83 | 84 | fclose(colonies_ID); 85 | 86 | % Write file 'ori' and 'lamwidth' 87 | scale2_ori_ID = fopen(string(path)+string(ms_ID)+'_scale2_ori','w'); 88 | %scale3_ori_ID = fopen(string(path)+string(ms_ID)+'_scale3_ori_pre','w'); 89 | lamwidth_ID = fopen(string(path)+string(ms_ID)+'_lamwidth','w'); 90 | 91 | for beta = 1:n_priorBeta 92 | %wirte one line in 'scale2_ori' 93 | fprintf(scale2_ori_ID,'%i file(%i_cell%i,des=euler-bunge)\n',[beta,ms_ID,beta]); 94 | % write cell file 95 | scale2_cell_ID = fopen(string(path)+string(ms_ID)+'_cell'+string(beta),'w'); 96 | for i = 1:n_colonies_max 97 | % print cell euler angles in each alphams_ID colony (cell). 98 | fprintf(scale2_cell_ID,'%f ',oriAlpha_tofile(beta,:)); 99 | fprintf(scale2_cell_ID,'\n'); 100 | end 101 | fclose(scale2_cell_ID); 102 | for colony = 1:n_colonies_tofile(beta) 103 | %wirte one line in 'scale3_ori' and 'lamwidth' 104 | %fprintf(scale3_ori_ID,'%i::%i file(%i_cell%i_%i,des=euler-bunge)\n',[beta,colony,ms_ID,beta,colony]); 105 | % only r_lameller portion of alpha colonies have 106 | % lamellar 107 | islam = rand(); 108 | if islam <= r_lameller 109 | fprintf(lamwidth_ID,'%i::%i %.2f:%.2f\n',[beta,colony,lamwidth_alpha,lamwidth_beta]); 110 | else 111 | fprintf(lamwidth_ID,'%i::%i %.2f:%.2f\n',[beta,colony,lamwidth_single,lamwidth_single]); 112 | end 113 | 114 | %wirte cell file 115 | scale3_cell_ID = fopen(string(path)+string(ms_ID)+'_cell'+string(beta)+'_'+string(colony),'w'); 116 | % Write the aplha and beta orientation into a file for 117 | % each grain 118 | for i = 1:1 119 | % print cell euler angles in each alpha colony (cell). 120 | fprintf(scale3_cell_ID,'%f ',oriAlpha_tofile(beta, colony,:)); 121 | fprintf(scale3_cell_ID,'\n'); 122 | fprintf(scale3_cell_ID,'%f ',oriBeta_tofile(beta,:)); 123 | fprintf(scale3_cell_ID,'\n'); 124 | end 125 | fclose(scale3_cell_ID); 126 | end 127 | end 128 | fclose(scale2_ori_ID); 129 | %fclose(scale3_ori_ID); 130 | fclose(lamwidth_ID); 131 | end 132 | end 133 | end -------------------------------------------------------------------------------- /std_scalerSGD_alldata_200eps.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/std_scalerSGD_alldata_200eps.bin -------------------------------------------------------------------------------- /train_indexSGD_alldata_200eps.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/train_indexSGD_alldata_200eps.npy -------------------------------------------------------------------------------- /train_index_final_all_data_20221026.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/train_index_final_all_data_20221026.npy -------------------------------------------------------------------------------- /training_checkpoints/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "ckpt-5" 2 | all_model_checkpoint_paths: "ckpt-5" 3 | -------------------------------------------------------------------------------- /training_checkpoints/ckpt-1.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/training_checkpoints/ckpt-1.data-00000-of-00001 -------------------------------------------------------------------------------- /training_checkpoints/ckpt-1.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/training_checkpoints/ckpt-1.index -------------------------------------------------------------------------------- /training_checkpoints/ckpt-2.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/training_checkpoints/ckpt-2.data-00000-of-00001 -------------------------------------------------------------------------------- /training_checkpoints/ckpt-2.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/training_checkpoints/ckpt-2.index -------------------------------------------------------------------------------- /training_checkpoints/ckpt-3.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/training_checkpoints/ckpt-3.data-00000-of-00001 -------------------------------------------------------------------------------- /training_checkpoints/ckpt-3.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/training_checkpoints/ckpt-3.index -------------------------------------------------------------------------------- /training_checkpoints/ckpt-4.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/training_checkpoints/ckpt-4.data-00000-of-00001 -------------------------------------------------------------------------------- /training_checkpoints/ckpt-4.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/training_checkpoints/ckpt-4.index -------------------------------------------------------------------------------- /training_checkpoints/ckpt-5.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/training_checkpoints/ckpt-5.data-00000-of-00001 -------------------------------------------------------------------------------- /training_checkpoints/ckpt-5.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xshang93/MsInverseDesign/ae96413660bf82237ec0792f61e7cfb07c447c56/training_checkpoints/ckpt-5.index -------------------------------------------------------------------------------- /yield_cal.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Wed Nov 2 17:31:37 2022 5 | 6 | @author: xiao 7 | 8 | This script calculates the yield strength from 7 points on a s-s curve 9 | input needs to be a list of stresses list at [0.25, ..., 1.5] 10 | returns a list of moduli and yield strengths 11 | """ 12 | import numpy as np 13 | from scipy import interpolate 14 | 15 | def yield_strength_cal(target): 16 | 17 | strain = np.linspace(0.0025,0.015,6) 18 | interp_factor = 10 # density of interpolation. larger the finer 19 | strain_new = np.linspace(0.0025,0.015,6*interp_factor) 20 | 21 | yield_strengths = [] 22 | moduli = [] 23 | 24 | for i in range(0,len(target)): 25 | stress = target[i] 26 | E = stress[0]/0.0025 # calculate Young's modulus 27 | intercept = -E*0.002 # calculate the intercept of the 0.2% offsetline 28 | model_interp = interpolate.interp1d(strain,stress,'linear') # fit the current interpolation model 29 | stress_new = model_interp(strain_new) # interpolate the stress 30 | offsetline = E*strain_new+intercept # assemble the offset line 31 | 32 | idx_yield = np.argwhere(np.diff(np.sign(stress_new - offsetline))).flatten() # find the index of the yield stress 33 | yield_strength = stress_new[idx_yield][0] # '0' is used to convert array to float number 34 | 35 | yield_strengths.append(yield_strength) 36 | moduli.append(E) 37 | 38 | return moduli,yield_strengths --------------------------------------------------------------------------------