├── GAN ├── example_run_output.py ├── example_run_training.py ├── multipassGAN-4x.py ├── multipassGAN-8x.py └── multipassGAN-out.py ├── LICENSE ├── README.md ├── datagen ├── gen_mul_data.py └── gen_sim_grow_slices_data.py ├── resources ├── in175b.jpg ├── out_2_175b.jpg └── teaser5.png └── tools_wscale ├── GAN.py ├── fluiddataloader.py ├── flushmem.py ├── old ├── fluiddataloader.py └── tilecreator_t.py ├── paramhelpers.py ├── tilecreator2_test2.py ├── tilecreator_t.py └── uniio.py /GAN/example_run_output.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # 4x multi pass 4 | if 0: 5 | ## first nn: test 4 6 | os.system('python multipassGAN-4x.py randSeed 174213111 upRes 4 startIndex 0 out 1 pretrain 0 pretrainDisc 0 tileSize 64 trainingIterations 60000 lambda 5.0 lambda2 0.00001 discRuns 2 genRuns 2 alwaysSave 1 fromSim 1005 toSim 1005 outputInterval 200 genTestImg 1 dropout 0.5 dataDim 2 batchSize 16 useVelocities 1 useVorticities 0 useK_Eps_Turb 0 useFlags 0 gif 0 genModel gen_resnet discModel disc_binclass basePath ../2ddata_gan_sliced/ packedSimPath ../data/ lambda_t 1.0 lambda_t_l2 0.0 frame_min 110 frame_max 111 data_fraction 0.01 adv_flag 1 dataAugmentation 1 premadeTiles 0 rot 1 load_model_test 4 load_model_no 1199 sliceMode 1 genUni 1 interpMode 1 upsamplingMode 2 upsampledData 0 velScale 1.0') 7 | ## second nn: test 48 8 | os.system('python multipassGAN-4x.py randSeed 174213111 upRes 4 startIndex 0 out 1 pretrain 0 pretrainDisc 0 tileSize 64 trainingIterations 60000 lambda 5.0 lambda2 0.00001 discRuns 2 genRuns 2 alwaysSave 1 fromSim 1005 toSim 1005 outputInterval 200 genTestImg 1 dropout 0.5 dataDim 2 batchSize 16 useVelocities 1 useVorticities 0 useK_Eps_Turb 0 useFlags 0 gif 0 genModel gen_resnet discModel disc_binclass basePath ../2ddata_gan_sliced/ packedSimPath ../data/ lambda_t 1.0 lambda_t_l2 0.0 frame_min 110 frame_max 111 data_fraction 0.01 adv_flag 1 dataAugmentation 1 premadeTiles 0 rot 1 load_model_test 48 load_model_no 799 sliceMode 1 genUni 1 interpMode 1 upsamplingMode 1 upsampledData 1 velScale 1.0') 9 | 10 | # 8x multi pass 11 | if 1: 12 | ## first nn: test 59 13 | ## second nn: test 409 14 | sim_no = 3006 15 | min_frame = 100 16 | max_frame = 120 17 | 18 | net1_config = { 19 | "firstNNArch": "1", 20 | "load_model_test_1": "0", 21 | "load_model_no_1": "299", 22 | "use_res_net1": "1", 23 | "add_adj_idcs1": "1", 24 | "startFms1": "256", 25 | "maxFms1": "256", 26 | "filterSize1": "3" 27 | } 28 | 29 | net2_config = { 30 | "load_model_test_2": "4", 31 | "load_model_no_2": "585", 32 | "use_res_net2": "1", 33 | "add_adj_idcs2": "0", 34 | "startFms2": "192", 35 | "maxFms2": "192", 36 | "filterSize2": "5" 37 | } 38 | 39 | net3_config = { 40 | "load_model_test_3": "-1", 41 | "load_model_no_3": "-1", 42 | "use_res_net3": "0", 43 | "add_adj_idcs3": "0", 44 | "startFms3": "192", 45 | "maxFms3": "96", 46 | "filterSize3": "5" 47 | } 48 | python_string = 'python multipassGAN-out.py randSeed 174213111 upRes 8 pixelNorm 1 batchNorm 0 out 1 tileSize 64 simSize 64 fromSim %04d useVelocities 1 useVorticities 0 useK_Eps_Turb 0 useFlags 0 genModel gen_resnet discModel disc_binclass basePath ../ packedSimPath ../../ frame_max %04d frame_min %04d velScale 1.0 genUni 1 upsampleMode 1 usePixelShuffle 0 loadEmas 0 addBicubicUpsample 1 gpu 0 transposeAxis 0' % (sim_no,max_frame,min_frame) 49 | 50 | for key in (net1_config.keys()): 51 | python_string += ' ' + str(key) + ' ' + net1_config[key] 52 | 53 | for key in (net2_config.keys()): 54 | python_string += ' ' + str(key) + ' ' + net2_config[key] 55 | 56 | for key in (net3_config.keys()): 57 | python_string += ' ' + str(key) + ' ' + net3_config[key] 58 | 59 | print(python_string) 60 | # either this command: 61 | os.system(python_string) 62 | 63 | 64 | # or these (also stores intermediate results) 65 | #for i in range(20): 66 | # os.system('python multipassGAN-8x.py randSeed 174213111 upRes 8 use_res_net 1 firstNNArch 1 add_adj_idcs 1 pixelNorm 1 batchNorm 0 out 1 pretrain 0 pretrainDisc 0 tileSize 64 simSize 64 trainingIterations 60000 lambda 5.0 lambda2 0.00001 discRuns 2 genRuns 2 alwaysSave 1 fromSim %04d toSim %04d outputInterval 200 genTestImg 1 dropout 0.5 dataDim 2 batchSize 16 useVelocities 1 useVorticities 0 useK_Eps_Turb 0 useFlags 0 gif 0 genModel gen_resnet discModel disc_binclass basePath ../ packedSimPath V:/data3d_sliced_growing2/ lambda_t 1.0 lambda_t_l2 0.0 frame_max %04d frame_min %04d data_fraction 0.05 adv_flag 1 dataAugmentation 1 premadeTiles 0 rot 1 load_model_test 0 load_model_no 299 velScale 1.0 genUni 1 upsampledData 0 upsamplingMode 2 maxFms 256 startFms 256 filterSize 3 usePixelShuffle 0 loadEmas 0 upsampleMode 1 addBicubicUpsample 1 gpu 0 transposeAxis 0' % (1000+i, 1000+i,max_frame,min_frame)) 67 | 68 | #os.system('python multipassGAN-8x.py randSeed 174213111 upRes 8 use_res_net 1 outNNTestNo 59 pixelNorm 1 batchNorm 0 out 1 pretrain 0 pretrainDisc 0 tileSize 64 simSize 64 trainingIterations 60000 lambda 5.0 lambda2 0.00001 discRuns 2 genRuns 2 alwaysSave 1 fromSim %04d toSim %04d outputInterval 200 genTestImg 1 dropout 0.5 dataDim 2 batchSize 16 useVelocities 1 useVorticities 0 useK_Eps_Turb 0 useFlags 0 gif 0 genModel gen_resnet discModel disc_binclass basePath ../ packedSimPath ../../ lambda_t 1.0 lambda_t_l2 0.0 frame_max %04d frame_min %04d data_fraction 0.05 adv_flag 1 dataAugmentation 1 premadeTiles 0 rot 1 load_model_test 409 load_model_no 499 velScale 1.0 genUni 1 upsampledData 1 upsampleMode 1 upsamplingMode 1 maxFms 256 startFms 192 filterSize 5 usePixelShuffle 0 loadEmas 0 addBicubicUpsample 1 gpu 0 transposeAxis 2' % (sim_no, sim_no,max_frame,min_frame)) 69 | 70 | #os.system('python multipassGAN-8x.py randSeed 174213111 upRes 8 use_res_net 0 outNNTestNo 409 pixelNorm 1 batchNorm 0 out 1 pretrain 0 pretrainDisc 0 tileSize 64 simSize 64 trainingIterations 60000 lambda 5.0 lambda2 0.00001 discRuns 2 genRuns 2 alwaysSave 1 fromSim %04d toSim %04d outputInterval 200 genTestImg 1 dropout 0.5 dataDim 2 batchSize 16 useVelocities 1 useVorticities 0 useK_Eps_Turb 0 useFlags 0 gif 0 genModel gen_resnet discModel disc_binclass basePath ../ packedSimPath ../../ lambda_t 1.0 lambda_t_l2 0.0 frame_max %04d frame_min %04d data_fraction 0.05 adv_flag 1 dataAugmentation 1 premadeTiles 0 rot 1 load_model_test 411 load_model_no 749 velScale 1.0 genUni 1 upsampledData 1 upsamplingMode 3 maxFms 256 startFms 96 filterSize 5 usePixelShuffle 0 loadEmas 0 addBicubicUpsample 1 gpu 0 transposeAxis 1' % (sim_no, sim_no,max_frame,min_frame)) 71 | 72 | -------------------------------------------------------------------------------- /GAN/example_run_training.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | # train the first network 4 | os.system('python multipassGAN-8x.py randSeed 16131119 upRes 8 use_res_net 1 batchNorm 0 pixelNorm 1 out 0 pretrain 0 pretrainDisc 0 tileSize 16 simSize 64 use_LSGAN 0 use_wgan_gp 1 lambda 1.0 lambda2 0.0 discRuns 1 genRuns 1 alwaysSave 1 fromSim 1000 toSim 1016 outputInterval 200 genTestImg 1 dropout 0.5 dataDim 2 batchSize 16 useVelocities 1 useVorticities 0 useK_Eps_Turb 0 useFlags 0 gif 0 genModel gen_resnet discModel disc_binclass basePath ../2ddata/ packedSimPath ../data3d_growing/ lambda_t 1.0 lambda_t_l2 0.0 frame_max 120 frame_min 0 data_fraction 0.08 adv_flag 1 adv_mode 0 dataAugmentation 1 premadeTiles 0 rot 1 minScale 0.85 maxScale 1.15 flip 1 decayLR 1 adam_beta1 0.0 adam_beta2 0.99 learningRate 0.0001 lossScaling 1 stageIter 40000 decayIter 60000 maxFms 256 startFms 256 filterSize 3 upsamplingMode 2 upsampledData 0 discRuns 1 load_model_test -1 load_model_no -1 firstNNArch 1 add_adj_idcs 1 usePixelShuffle 0 addBicubicUpsample 1 startingIter 0 useVelInTDisc 0 upsampleMode 1 gpu 0') 5 | 6 | # train the second network, note that the input data for the 2nd network has to be generated at first... 7 | os.system('python multipassGAN-8x.py randSeed 9631119 upRes 8 use_res_net 1 batchNorm 0 pixelNorm 1 out 0 pretrain 0 pretrainDisc 0 tileSize 8 simSize 64 use_LSGAN 0 use_wgan_gp 1 lambda 1.0 lambda2 0.0 discRuns 1 genRuns 1 alwaysSave 1 fromSim 1000 toSim 1016 outputInterval 100 genTestImg 1 dropout 0.5 dataDim 2 batchSize 16 useVelocities 1 useVorticities 0 useK_Eps_Turb 0 useFlags 0 gif 0 genModel gen_resnet discModel disc_binclass basePath ../2ddata/ packedSimPath ../data3d_growing/ lambda_t 1.0 lambda_t_l2 0.0 frame_max 120 frame_min 0 data_fraction 0.09 adv_flag 1 adv_mode 0 dataAugmentation 1 premadeTiles 0 rot 1 minScale 0.85 maxScale 1.15 flip 1 decayLR 1 adam_beta1 0.0 adam_beta2 0.99 learningRate 0.0001 lossScaling 1 stageIter 1 decayIter 250000 maxFms 192 startFms 192 filterSize 5 outNNTestNo 0 upsamplingMode 1 upsampledData 1 upsampleMode 1 discRuns 1 usePixelShuffle 0 addBicubicUpsample 1 startingIter 0 useVelInTDisc 0 gpu 0 load_model_test -1 load_model_no -1') 8 | -------------------------------------------------------------------------------- /GAN/multipassGAN-out.py: -------------------------------------------------------------------------------- 1 | #****************************************************************************** 2 | # 3 | # tempoGAN: A Temporally Coherent, Volumetric GAN for Super-resolution Fluid Flow 4 | # Copyright 2018 You Xie, Erik Franz, Mengyu Chu, Nils Thuerey, Maximilian Werhahn 5 | # 6 | #****************************************************************************** 7 | 8 | import time 9 | import shutil 10 | import sys 11 | import math 12 | import gc 13 | import scipy 14 | import numpy as np 15 | import os 16 | import faulthandler 17 | faulthandler.enable() 18 | import tensorflow as tf 19 | # load manta tools 20 | sys.path.append("../tools_wscale") 21 | import tilecreator_t as tc 22 | import uniio 23 | import fluiddataloader as FDL 24 | import paramhelpers as ph 25 | from GAN import GAN, lrelu 26 | 27 | # initialize parameters / command line params 28 | outputOnly = int(ph.getParam( "out", False ))>0 # output/generation mode, main mode switch 29 | 30 | basePath = ph.getParam( "basePath", '../2ddata_gan/' ) 31 | randSeed = int(ph.getParam( "randSeed", 1 )) # seed for np and tf initialization 32 | load_model_test_1 = int(ph.getParam( "load_model_test_1", -1 )) # the number of the test to load a model from. can be used in training and output mode. -1 to not load a model 33 | load_model_test_2 = int(ph.getParam( "load_model_test_2", -1 )) # the number of the test to load a model from. can be used in training and output mode. -1 to not load a model 34 | load_model_test_3 = int(ph.getParam( "load_model_test_3", -1 )) # the number of the test to load a model from. can be used in training and output mode. -1 to not load a model 35 | load_model_no_1 = int(ph.getParam( "load_model_no_1", -1 )) # nubmber of the model to load 36 | load_model_no_2 = int(ph.getParam( "load_model_no_2", -1 )) # nubmber of the model to load 37 | load_model_no_3 = int(ph.getParam( "load_model_no_3", -1 )) # nubmber of the model to load 38 | 39 | simSizeLow = int(ph.getParam( "simSize", 64 )) # tiles of low res sim 40 | tileSizeLow = int(ph.getParam( "tileSize", 16 )) # size of low res tiles 41 | upRes = int(ph.getParam( "upRes", 4 )) # scaling factor 42 | 43 | #Data and Output 44 | packedSimPath = ph.getParam( "packedSimPath", '/data/share/GANdata/2ddata_sim/' ) # path to training data 45 | fromSim = int(ph.getParam( "fromSim", 1000 )) # range of sim data to use, start index 46 | frame_min = int(ph.getParam( "frame_min", 0 )) 47 | genModel = ph.getParam( "genModel", 'gen_test' ) # path to training data 48 | discModel = ph.getParam( "discModel", 'disc_test' ) # path to training data 49 | #Training 50 | batch_norm = int(ph.getParam( "batchNorm", False ))>0 # apply batch normalization to conv and deconv layers 51 | pixel_norm = int(ph.getParam( "pixelNorm", True ))>0 # apply batch normalization to conv and deconv layers 52 | 53 | useVelocities = int(ph.getParam( "useVelocities", 0 )) # use velocities or not 54 | useVorticities = int(ph.getParam( "useVorticities", 0 )) # use vorticities or not 55 | useFlags = int(ph.getParam( "useFlags", 0 )) # use flags or not 56 | useK_Eps_Turb = int(ph.getParam( "useK_Eps_Turb", 0 )) 57 | 58 | transposeAxis = int(ph.getParam( "transposeAxis", 0 )) # 59 | 60 | #Test and Save 61 | testPathStartNo = int(ph.getParam( "testPathStartNo", 0 )) 62 | frame_max = int(ph.getParam( "frame_max", 200 )) 63 | change_velocity = int(ph.getParam( "change_velocity", False )) 64 | 65 | upsampling_mode = int(ph.getParam( "upsamplingMode", 2 )) 66 | upsampled_data = int(ph.getParam ( "upsampledData", False)) 67 | generateUni = int(ph.getParam("genUni", False)) 68 | usePixelShuffle = int(ph.getParam("usePixelShuffle", False)) 69 | addBicubicUpsample = int(ph.getParam("addBicubicUpsample", False)) 70 | add_adj_idcs1 = int(ph.getParam("add_adj_idcs1", False)) 71 | add_adj_idcs2 = int(ph.getParam("add_adj_idcs2", False)) 72 | add_adj_idcs3 = int(ph.getParam("add_adj_idcs3", False)) 73 | 74 | load_emas = int(ph.getParam("loadEmas", False)) 75 | firstNNArch = int(ph.getParam("firstNNArch", True)) 76 | upsampleMode = int(ph.getParam("upsampleMode", 1)) 77 | 78 | # parameters for growing approach 79 | use_res_net1 = int(ph.getParam( "use_res_net1", False )) 80 | use_res_net2 = int(ph.getParam( "use_res_net2", False )) 81 | use_res_net3 = int(ph.getParam( "use_res_net3", False )) 82 | use_mb_stddev = int(ph.getParam( "use_mb_stddev", False )) 83 | start_fms_1 = int(ph.getParam("startFms1", 512)) 84 | max_fms_1 = int(ph.getParam("maxFms1", 256)) 85 | filterSize_1 = int(ph.getParam("filterSize1", 3)) 86 | start_fms_2 = int(ph.getParam("startFms2", 512)) 87 | max_fms_2 = int(ph.getParam("maxFms2", 256)) 88 | filterSize_2 = int(ph.getParam("filterSize2", 3)) 89 | start_fms_3 = int(ph.getParam("startFms3", 512)) 90 | max_fms_3 = int(ph.getParam("maxFms3", 256)) 91 | filterSize_3 = int(ph.getParam("filterSize3", 3)) 92 | 93 | velScale = float(ph.getParam("velScale", 1.0)) 94 | gpu_touse = int(ph.getParam("gpu", 0)) 95 | 96 | os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" 97 | os.environ["CUDA_VISIBLE_DEVICES"]= str(gpu_touse) 98 | 99 | ph.checkUnusedParams() 100 | 101 | # initialize 102 | simSizeHigh = simSizeLow * upRes 103 | tileSizeHigh = tileSizeLow * upRes 104 | 105 | 106 | 107 | channelLayout_low = 'd' 108 | channelLayout_high = 'd' 109 | 110 | lowfilename = "density_low_%04d.uni" 111 | 112 | toSim = fromSim 113 | dirIDs = np.linspace(fromSim, toSim, (toSim-fromSim+1),dtype='int16') 114 | 115 | highfilename = "density_high_%04d.uni" 116 | mfl = ["density"] 117 | mfh = ["density"] 118 | 119 | # load output of first network in high res data of tile creator -> separate when getting input 120 | 121 | if useVelocities: 122 | channelLayout_low += ',vx,vy,vz' 123 | mfl = np.append(mfl, "velocity") 124 | 125 | data_fraction = 1.0 126 | kt = 0.0 127 | kt_l = 0.0 128 | useTempoD = False 129 | useTempoL2 = False 130 | useDataAugmentation = 0 131 | 132 | # load data 133 | floader = FDL.FluidDataLoader( print_info=3, base_path=packedSimPath, base_path_y = packedSimPath, numpy_seed = randSeed ,filename=lowfilename, filename_index_min = frame_min, oldNamingScheme=False, filename_y = None, filename_index_max=frame_max, indices=dirIDs, data_fraction=data_fraction, multi_file_list=mfl, multi_file_list_y=mfh) 134 | 135 | x, y, _ = floader.get() 136 | 137 | x_3d = x 138 | x_3d[:,:,:,:,1:4] = velScale * x_3d[:,:,:,:,1:4] # scale velocity channels 139 | 140 | # 2D: tileSize x tileSize tiles; 3D: tileSize x tileSize x tileSize chunks 141 | n_input = tileSizeLow ** 2 142 | n_output = tileSizeHigh ** 2 143 | 144 | n_inputChannels = 1 145 | 146 | if useVelocities: 147 | n_inputChannels += 3 148 | if useVorticities: 149 | n_inputChannels += 3 150 | 151 | n_input *= n_inputChannels 152 | 153 | if not load_model_test_1 == -1: 154 | if not os.path.exists(basePath + 'test_%04d/' % load_model_test_1): 155 | print('ERROR: Test to load does not exist.') 156 | if not load_emas: 157 | load_path_1 = basePath + 'test_%04d/model_%04d.ckpt' % (load_model_test_1, load_model_no_1) 158 | load_path_ema_1 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_1, load_model_no_1) 159 | else: 160 | load_path_1 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_1, load_model_no_1) 161 | if outputOnly: 162 | out_path_prefix = 'out_%04d-%04d' % (load_model_test_1,load_model_no_1) 163 | test_path,_ = ph.getNextGenericPath(out_path_prefix, 0, basePath + 'test_%04d/' % load_model_test_1) 164 | else: 165 | test_path,_ = ph.getNextTestPath(testPathStartNo, basePath) 166 | 167 | if not load_model_test_2 == -1: 168 | if not os.path.exists(basePath + 'test_%04d/' % load_model_test_2): 169 | print('ERROR: Test to load does not exist.') 170 | if not load_emas: 171 | load_path_2 = basePath + 'test_%04d/model_%04d.ckpt' % (load_model_test_2, load_model_no_2) 172 | load_path_ema_2 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_2, load_model_no_2) 173 | else: 174 | load_path_2 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_2, load_model_no_2) 175 | if outputOnly: 176 | out_path_prefix = 'out_%04d-%04d' % (load_model_test_2,load_model_no_2) 177 | test_path,_ = ph.getNextGenericPath(out_path_prefix, 0, basePath + 'test_%04d/' % load_model_test_2) 178 | else: 179 | test_path,_ = ph.getNextTestPath(testPathStartNo, basePath) 180 | 181 | if not load_model_test_3 == -1: 182 | if not os.path.exists(basePath + 'test_%04d/' % load_model_test_2): 183 | print('ERROR: Test to load does not exist.') 184 | print('Using two networks') 185 | else: 186 | print('Using three networks') 187 | if not load_emas: 188 | load_path_3 = basePath + 'test_%04d/model_%04d.ckpt' % (load_model_test_3, load_model_no_3) 189 | load_path_ema_3 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_3, load_model_no_3) 190 | else: 191 | load_path_3 = basePath + 'test_%04d/model_ema_%04d.ckpt' % (load_model_test_3, load_model_no_3) 192 | if outputOnly: 193 | out_path_prefix = 'out_%04d-%04d' % (load_model_test_3,load_model_no_3) 194 | test_path,_ = ph.getNextGenericPath(out_path_prefix, 0, basePath + 'test_%04d/' % load_model_test_3) 195 | else: 196 | test_path,_ = ph.getNextTestPath(testPathStartNo, basePath) 197 | 198 | 199 | # create session and saver 200 | config = tf.ConfigProto(allow_soft_placement=True) 201 | #config.gpu_options.per_process_gpu_memory_fraction = 0.8 202 | sess = tf.InteractiveSession(config = config) 203 | 204 | def save_img(out_path, img): 205 | img = np.clip(img * 255.0, 0, 255).astype(np.uint8) 206 | scipy.misc.imsave(out_path, img) 207 | 208 | def save_img_3d(out_path, img): # y ↓ x →, z ↓ x →, z ↓ y →,3 in a column 209 | data = np.concatenate([np.sum(img, axis=0), np.sum(img, axis=1), np.sum(img, axis=2)], axis=0) 210 | save_img(out_path, data) 211 | 212 | def lerp(x, y, t): 213 | return tf.add(x, (y - x) * tf.clip_by_value(t,0.0,1.0)) 214 | 215 | def gaussian_noise_layer(input_layer, strength): 216 | noise = tf.random_normal(shape=tf.shape(input_layer), mean=0.0, stddev=1.0, dtype=tf.float32) 217 | return input_layer + noise * (strength * tf.sqrt(tf.cast(input_layer.get_shape().as_list()[3], tf.float32))) 218 | 219 | # set up GAN structure 220 | def resBlock(gan, inp, s1, s2, reuse, use_batch_norm, name, filter_size=3): 221 | # note - leaky relu (lrelu) not too useful here 222 | 223 | # convolutions of resnet block 224 | filter = [filter_size,filter_size] 225 | filter1 = [1,1] 226 | 227 | gc1,_ = gan.convolutional_layer( s1, filter, tf.nn.relu, stride=[1], name="g_cA_"+name, in_layer=inp, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64 228 | if pixel_norm: 229 | gc1 = gan.pixel_norm(gc1) 230 | gc2,_ = gan.convolutional_layer( s2, filter, None, stride=[1], name="g_cB_"+name, reuse=reuse, batch_norm=use_batch_norm, train=train) #->8,128 231 | # shortcut connection 232 | gs1,_ = gan.convolutional_layer( s2, filter1 , None , stride=[1], name="g_s_"+name, in_layer=inp, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64 233 | resUnit1 = tf.nn.relu( tf.add( gc2, gs1 ) ) 234 | if pixel_norm: 235 | resUnit1 = gan.pixel_norm(resUnit1) 236 | 237 | return resUnit1 238 | 239 | def growBlockGen(gan, inp, upres, fms, use_batch_norm, train, reuse, output = False, firstGen = True, filterSize = 3, first_nn_arch = False, use_res_net = True): 240 | with tf.variable_scope("genBlock%d"%(upres), reuse=reuse) as scope: 241 | if firstGen: 242 | if not usePixelShuffle: 243 | inDepool = gan.avg_depool(mode = upsampleMode) 244 | else: 245 | inDepool = gan.pixel_shuffle(inp, upres = 2, stage = "%d"%(upres)) 246 | else: 247 | inDepool = inp 248 | 249 | filter = [filterSize,filterSize] 250 | if first_nn_arch: 251 | # deeper network in lower levels for higher low-res receptive field - only for the first network 252 | if upres == 2: 253 | outp = resBlock(gan, inDepool, fms, fms, reuse, use_batch_norm, "first" , filter_size = filter[0]) #%(upres) 254 | outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "second", filter_size = filter[0]) #%(upres,upres) 255 | outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "third" , filter_size = filter[0]) #%(upres) 256 | outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "fourth", filter_size = filter[0]) #%(upres,upres) 257 | outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "fifth", filter_size = filter[0]) #%(upres,upres) 258 | elif upres == 4: 259 | outp = resBlock(gan, inDepool, fms*2, fms, reuse, use_batch_norm, "first" , filter_size = filter[0]) #%(upres) 260 | outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "second" , filter_size = filter[0]) #%(upres) 261 | outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "third", filter_size = filter[0]) #%(upres,upres) 262 | if upres == 8: 263 | outp = resBlock(gan, inDepool, fms*2, fms, reuse, use_batch_norm, "first" , filter_size = filter[0]) #%(upres) 264 | outp = resBlock(gan, outp, fms, fms, reuse, use_batch_norm, "second", filter_size = filter[0]) #%(upres,upres) 265 | else: 266 | if use_res_net: 267 | # two res blocks per growing block 268 | outp = resBlock(gan, inDepool, fms, fms, reuse, use_batch_norm, "first" , filter_size = filter[0]) #%(upres) 269 | outp = resBlock(gan, outp, fms//2, fms//2, reuse, use_batch_norm, "second", filter_size = filter[0]) #%(upres,upres) 270 | else: 271 | # "recursive" output 272 | inp,_ = gan.convolutional_layer( fms, filter, lrelu, stride=[1], name="g_cA%d"%(upres), in_layer=inDepool, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64 273 | 274 | if pixel_norm: 275 | inp = gan.pixel_norm(inp) 276 | outp,_ = gan.convolutional_layer( fms, filter, lrelu, stride=[1], name="g_cB%d"%(upres), in_layer=inp, reuse=reuse, batch_norm=use_batch_norm, train=train) #->8,128 277 | 278 | if pixel_norm: 279 | outp = gan.pixel_norm(outp) 280 | # density output for blending 281 | if not output: 282 | outpDens, _ = GAN(outp, bn_decay=0.0).convolutional_layer( 1, [1,1], None, stride=[1], name="g_cdensOut%d"%(upres), in_layer=outp, reuse=reuse, batch_norm=False, train=train, gain = 1) 283 | return outp, outpDens 284 | return outp 285 | 286 | def growing_gen(_in, percentage, reuse=False, use_batch_norm=False, train=None, currentUpres = 2, output = False, firstGen = True, filterSize = 3, startFms = 256, maxFms = 256, add_adj_idcs = False, first_nn_arch = False, use_res_net = True): 287 | global rbId 288 | print("\n\tGenerator (growing-sliced-resnett3-deep)") 289 | 290 | with tf.variable_scope("generator", reuse=reuse) as scope: 291 | n_channels = n_inputChannels 292 | if add_adj_idcs: 293 | n_channels += 2 294 | if firstGen: 295 | _in = tf.reshape(_in, shape=[-1, tileSizeLow, tileSizeLow, n_channels]) #NHWC 296 | else: 297 | _in = tf.reshape(_in, shape=[-1, tileSizeHigh, tileSizeHigh, n_channels+1]) #NHWC 298 | 299 | gan = GAN(_in, bn_decay=0.0) 300 | 301 | # inital conv layers 302 | filter = [filterSize,filterSize] 303 | 304 | if first_nn_arch: 305 | x_g = _in 306 | else: 307 | if use_res_net: 308 | x_g = resBlock(gan, _in, 16, min(maxFms, startFms//2)//8, reuse, False, "1", filter_size = filter[0]) 309 | x_g = resBlock(gan, x_g, min(maxFms, startFms//2)//4, min(maxFms, startFms//2)//2, reuse, False, "2", filter_size = filter[0]) 310 | else: 311 | x_g,_ = gan.convolutional_layer( 32, filter, lrelu, stride=[1], name="g_cA%d"%(1), in_layer=_in, reuse=reuse, batch_norm=use_batch_norm, train=train) #->16,64 312 | if pixel_norm: 313 | x_g = gan.pixel_norm(x_g) 314 | x_g,_ = gan.convolutional_layer( min(startFms//2, maxFms), filter, lrelu, stride=[1], name="g_cB%d"%(1), in_layer=x_g, reuse=reuse, batch_norm=use_batch_norm, train=train) #->8,128 315 | if pixel_norm: 316 | x_g = gan.pixel_norm(x_g) 317 | # density output for blending 318 | 319 | for j in range(1,currentUpres+1): 320 | num_fms = min(int(startFms / (2**j)),maxFms) 321 | if not output or j == currentUpres: 322 | x_g, _dens = growBlockGen(gan, x_g, int(2**(j)), num_fms, use_batch_norm, train, reuse, False, firstGen, filterSize, first_nn_arch, use_res_net) 323 | else: 324 | x_g = growBlockGen(gan, x_g, int(2**(j)), num_fms, use_batch_norm, train, reuse, output, firstGen, filterSize, first_nn_arch, use_res_net) 325 | 326 | # residual learning 327 | if addBicubicUpsample: 328 | if j == currentUpres: 329 | if firstGen: 330 | _dens = _dens + GAN(tf.slice(_in, [0,0,0,0], [-1,tileSizeLow, tileSizeLow, 1])).avg_depool(mode = 2, scale = [int(2**(j))]) 331 | else: 332 | _dens = _dens + tf.slice(_in, [0,0,0,0], [-1,tileSizeHigh, tileSizeHigh, 1]) 333 | 334 | print("\tDOFs: %d , %f m " % ( gan.getDOFs() , gan.getDOFs()/1000000.) ) 335 | 336 | resF = tf.reshape( _dens, shape=[-1, n_output] ) # + GAN(_in).avg_depool(mode = upsampleMode) 337 | print("\tDOFs: %d , %f m " % ( gan.getDOFs() , gan.getDOFs()/1000000.) ) 338 | return resF 339 | 340 | gen_model = growing_gen 341 | 342 | x = tf.placeholder(tf.float32,[None,n_input], name = "x") 343 | y = tf.placeholder(tf.float32,[None,None], name = "y") 344 | 345 | train = tf.placeholder(tf.bool) 346 | # output percentage for full 8x model... 347 | percentage = tf.placeholder(tf.float32) 348 | 349 | # first generator 350 | x_in = x 351 | if not load_model_test_1 == -1: 352 | with tf.variable_scope("gen_1", reuse=True) as scope: 353 | sampler = gen_model(x_in, use_batch_norm=batch_norm, reuse = tf.AUTO_REUSE, currentUpres = int(round(math.log(upRes, 2))), train=False, percentage = percentage, output = True, firstGen = True, filterSize = filterSize_1, startFms = start_fms_1, maxFms = max_fms_1, add_adj_idcs = add_adj_idcs1, first_nn_arch = firstNNArch, use_res_net=use_res_net1) 354 | 355 | # second generator 356 | if not load_model_test_2 == -1: 357 | x_in_2 = tf.concat((tf.reshape(y, shape = [-1, tileSizeHigh, tileSizeHigh, 1]), tf.image.resize_images(tf.reshape(x, shape = [-1, tileSizeLow, tileSizeLow, n_inputChannels]), tf.constant([tileSizeHigh, tileSizeHigh], dtype= tf.int32), method=1)), axis = 3) 358 | with tf.variable_scope("gen_2", reuse=True) as scope: 359 | sampler_2 = gen_model(x_in_2, use_batch_norm=batch_norm, reuse = tf.AUTO_REUSE, currentUpres = int(round(math.log(upRes, 2))), train=False, percentage = percentage, output = True, firstGen = False, filterSize = filterSize_2, startFms = start_fms_2, maxFms = max_fms_2, add_adj_idcs = add_adj_idcs2, first_nn_arch = False, use_res_net=use_res_net2) 360 | 361 | # second generator 362 | if not load_model_test_3 == -1: 363 | x_in_3 = tf.concat((tf.reshape(y, shape = [-1, tileSizeHigh, tileSizeHigh, 1]), tf.image.resize_images(tf.reshape(x, shape = [-1, tileSizeLow, tileSizeLow, n_inputChannels]), tf.constant([tileSizeHigh, tileSizeHigh], dtype= tf.int32), method=1)), axis = 3) 364 | with tf.variable_scope("gen_3", reuse=True) as scope: 365 | sampler_3 = gen_model(x_in_3, use_batch_norm=batch_norm, reuse = tf.AUTO_REUSE, currentUpres = int(round(math.log(upRes, 2))), train=False, percentage = percentage, output = True, firstGen = False, filterSize = filterSize_3, startFms = start_fms_3, maxFms = max_fms_3, add_adj_idcs = add_adj_idcs3, first_nn_arch = False, use_res_net=use_res_net3) 366 | 367 | if not load_model_test_1 == -1: 368 | gen1vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="gen_1") 369 | gen1dict = dict((var.name[6:len(var.name)-2],var) for var in gen1vars) 370 | saver = tf.train.Saver(var_list = gen1dict) 371 | saver.restore(sess, load_path_1) 372 | print("Model 1 restored from %s." % load_path_1) 373 | 374 | if not load_model_test_2 == -1: 375 | gen2vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="gen_2") 376 | gen2dict = dict((var.name[6:len(var.name)-2],var) for var in gen2vars) 377 | saver = tf.train.Saver(var_list = gen2dict) 378 | saver.restore(sess, load_path_2) 379 | print("Model 2 restored from %s." % load_path_2) 380 | 381 | if not load_model_test_3 == -1: 382 | gen3vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="gen_3") 383 | gen3dict = dict((var.name[6:len(var.name)-2],var) for var in gen3vars) 384 | saver = tf.train.Saver(var_list = gen3dict) 385 | saver.restore(sess, load_path_3) 386 | print("Model 3 restored from %s." % load_path_3) 387 | 388 | 389 | # for two different networks, first upsamples two dimensions, last one upsamples one dim 390 | def generate3DUniForNewNetwork(imageindex = 0, outPath = '../', inputPer = 3.0, head = None): 391 | start = time.time() 392 | dim_output = [] 393 | intermed_res1 = [] 394 | 395 | batch_xs_tile = x_3d[imageindex] 396 | 397 | if not load_model_test_1 == -1: 398 | # z y x -> 2d conv on y - x (or different combination of axis, depending on transposeAxis) 399 | # and switch velocity channels depending on orientation 400 | if transposeAxis == 1: 401 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[1,upRes,1,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeHigh, simSizeLow, n_inputChannels]) 402 | batch_xs_in = np.reshape(batch_xs_in.transpose(1,0,2,3),(-1, simSizeLow, simSizeLow, n_inputChannels)) 403 | temp_vel = np.copy(batch_xs_in[:,:,:,3:4]) 404 | batch_xs_in[:,:,:,3:4] = np.copy(batch_xs_in[:,:,:,2:3]) 405 | batch_xs_in[:,:,:,2:3] = np.copy(temp_vel) 406 | elif transposeAxis == 2: 407 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[1,1,upRes,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeLow, simSizeHigh, n_inputChannels]) 408 | batch_xs_in = np.reshape(batch_xs_in.transpose(2,1,0,3),(-1, simSizeLow, simSizeLow, n_inputChannels)) 409 | temp_vel = np.copy(batch_xs_in[:,:,:,3:4]) 410 | batch_xs_in[:,:,:,3:4] = np.copy(batch_xs_in[:,:,:,1:2]) 411 | batch_xs_in[:,:,:,1:2] = np.copy(temp_vel) 412 | elif transposeAxis == 3: 413 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[1,1,upRes,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeLow, simSizeHigh, n_inputChannels]) 414 | batch_xs_in = np.reshape(batch_xs_in.transpose(2,0,1,3),(-1, simSizeLow, simSizeLow, n_inputChannels)) 415 | temp_vel = np.copy(batch_xs_in[:,:,:,3:4]) 416 | temp_vel2 = np.copy(batch_xs_in[:,:,:,2:3]) 417 | batch_xs_in[:,:,:,3:4] = np.copy(batch_xs_in[:,:,:,1:2]) 418 | batch_xs_in[:,:,:,2:3] = np.copy(temp_vel) 419 | batch_xs_in[:,:,:,1:2] = np.copy(temp_vel2) 420 | else: 421 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[upRes,1,1,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeLow, simSizeLow, n_inputChannels]) 422 | 423 | if add_adj_idcs1: 424 | batch_xs_in = np.concatenate((batch_xs_in, np.zeros_like(batch_xs_in[:,:,:,0:1])), axis= 3) 425 | batch_xs_in = np.concatenate((batch_xs_in, np.zeros_like(batch_xs_in[:,:,:,0:1])), axis= 3) 426 | 427 | for i in range(batch_xs_in.shape[0]): 428 | if i == 0: 429 | batch_xs_in[i:i+1,:,:,n_inputChannels:n_inputChannels+1] = np.zeros_like(batch_xs_in[i:i+1,:,:,0:1]) 430 | batch_xs_in[i:i+1,:,:,n_inputChannels+1:n_inputChannels+2] = batch_xs_in[i+1:i+2,:,:,0:1] 431 | elif i == batch_xs_in.shape[0]-1: 432 | batch_xs_in[i:i+1,:,:,n_inputChannels:n_inputChannels+1] = batch_xs_in[i-1:i,:,:,0:1] 433 | batch_xs_in[i:i+1,:,:,n_inputChannels+1:n_inputChannels+2]= np.zeros_like(batch_xs_in[i-1:i,:,:,0:1]) 434 | else: 435 | batch_xs_in[i:i+1,:,:,n_inputChannels:n_inputChannels+1] = batch_xs_in[i-1:i,:,:,0:1] 436 | batch_xs_in[i:i+1,:,:,n_inputChannels+1:n_inputChannels+2] = batch_xs_in[i+1:i+2,:,:,0:1] 437 | 438 | # start generating output of first network 439 | batch_sz_out = 8 440 | run_metadata = tf.RunMetadata() 441 | 442 | start = time.time() 443 | for j in range(0,batch_xs_in.shape[0]//batch_sz_out): 444 | # x in shape (z,y,x,c) 445 | # -> 512 x 512 x 512 446 | results = sess.run(sampler, feed_dict={x: batch_xs_in[j*batch_sz_out:(j+1)*batch_sz_out].reshape(-1, n_input), percentage : inputPer, train: False}) 447 | intermed_res1.extend(results) 448 | 449 | # exact timing of network performance... 450 | if 0: 451 | fetched_timeline = timeline.Timeline(run_metadata.step_stats) 452 | chrome_trace = fetched_timeline.generate_chrome_trace_format() 453 | with open('timeline_8x_%04d.json'%(j), 'w') as f: 454 | f.write(chrome_trace) 455 | end = time.time() 456 | 457 | print("time for first network: {0:.6f}".format(end-start)) 458 | 459 | dim_output = np.copy(np.array(intermed_res1).reshape(simSizeHigh, simSizeHigh, simSizeHigh)).transpose(2,1,0) 460 | 461 | save_img_3d( outPath + 'source_1st_{:04d}.png'.format(imageindex+frame_min), dim_output/80) 462 | 463 | if not load_model_test_2 == -1: 464 | if transposeAxis == 3: 465 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[1,upRes,1,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeHigh, simSizeLow, n_inputChannels]) 466 | batch_xs_in = np.reshape(batch_xs_in.transpose(1,0,2,3),(-1, simSizeLow, simSizeLow, n_inputChannels)) 467 | temp_vel = np.copy(batch_xs_in[:,:,:,3:4]) 468 | batch_xs_in[:,:,:,3:4] = np.copy(batch_xs_in[:,:,:,2:3]) 469 | batch_xs_in[:,:,:,2:3] = np.copy(temp_vel) 470 | elif transposeAxis == 0: 471 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[1,1,upRes,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeLow, simSizeHigh, n_inputChannels]) 472 | batch_xs_in = np.reshape(batch_xs_in.transpose(2,1,0,3),(-1, simSizeLow, simSizeLow, n_inputChannels)) 473 | temp_vel = np.copy(batch_xs_in[:,:,:,3:4]) 474 | batch_xs_in[:,:,:,3:4] = np.copy(batch_xs_in[:,:,:,1:2]) 475 | batch_xs_in[:,:,:,1:2] = np.copy(temp_vel) 476 | elif transposeAxis == 1: 477 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[1,1,upRes,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeLow, simSizeHigh, n_inputChannels]) 478 | batch_xs_in = np.reshape(batch_xs_in.transpose(2,0,1,3),(-1, simSizeLow, simSizeLow, n_inputChannels)) 479 | temp_vel = np.copy(batch_xs_in[:,:,:,3:4]) 480 | temp_vel2 = np.copy(batch_xs_in[:,:,:,2:3]) 481 | batch_xs_in[:,:,:,3:4] = np.copy(batch_xs_in[:,:,:,1:2]) 482 | batch_xs_in[:,:,:,2:3] = np.copy(temp_vel) 483 | batch_xs_in[:,:,:,1:2] = np.copy(temp_vel2) 484 | else: 485 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[upRes,1,1,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeLow, simSizeLow, n_inputChannels]) 486 | 487 | if add_adj_idcs2: 488 | batch_xs_in = np.concatenate((batch_xs_in, np.zeros_like(batch_xs_in[:,:,:,0:1])), axis= 3) 489 | 490 | for i in range(batch_xs_in.shape[0]): 491 | if i == 0: 492 | batch_xs_in[i:i+1,:,:,n_inputChannels:n_inputChannels+1] = np.zeros_like(batch_xs_in[i:i+1,:,:,0:1]) 493 | batch_xs_in[i:i+1,:,:,n_inputChannels+1:n_inputChannels+2] = batch_xs_in[i+1:i+2,:,:,0:1] 494 | elif i == batch_xs_in.shape[0]-1: 495 | batch_xs_in[i:i+1,:,:,n_inputChannels:n_inputChannels+1] = batch_xs_in[i-1:i,:,:,0:1] 496 | batch_xs_in[i:i+1,:,:,n_inputChannels+1:n_inputChannels+2]= np.zeros_like(batch_xs_in[i-1:i,:,:,0:1]) 497 | else: 498 | batch_xs_in[i:i+1,:,:,n_inputChannels:n_inputChannels+1] = batch_xs_in[i-1:i,:,:,0:1] 499 | batch_xs_in[i:i+1,:,:,n_inputChannels+1:n_inputChannels+2] = batch_xs_in[i+1:i+2,:,:,0:1] 500 | 501 | intermed_res1 = [] 502 | batch_sz_out = 2 503 | 504 | start = time.time() 505 | for j in range(0,batch_xs_in.shape[0]//batch_sz_out): 506 | # x in shape (z,y,x,c) 507 | # -> 64 x 256 x 256 508 | results = sess.run(sampler_2, feed_dict={x: batch_xs_in[j*batch_sz_out:(j+1)*batch_sz_out].reshape(-1, n_input), y: dim_output[j*batch_sz_out:(j+1)*batch_sz_out].reshape(-1, n_output) ,percentage : inputPer, train: False}) 509 | intermed_res1.extend(results) 510 | 511 | # exact timing of network performance... 512 | if 0: 513 | fetched_timeline = timeline.Timeline(run_metadata.step_stats) 514 | chrome_trace = fetched_timeline.generate_chrome_trace_format() 515 | with open('timeline_8x_%04d.json'%(j), 'w') as f: 516 | f.write(chrome_trace) 517 | end = time.time() 518 | 519 | print("time for second network: {0:.6f}".format(end-start)) 520 | 521 | dim_output = np.array(intermed_res1).reshape(simSizeHigh, simSizeHigh, simSizeHigh).transpose(1,2,0) 522 | 523 | save_img_3d( outPath + 'source_2nd_{:04d}.png'.format(imageindex+frame_min), dim_output/80) 524 | 525 | if not load_model_test_3 == -1: 526 | if transposeAxis == 0: 527 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[1,upRes,1,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeHigh, simSizeLow, n_inputChannels]) 528 | batch_xs_in = np.reshape(batch_xs_in.transpose(1,0,2,3),(-1, simSizeLow, simSizeLow, n_inputChannels)) 529 | temp_vel = np.copy(batch_xs_in[:,:,:,3:4]) 530 | batch_xs_in[:,:,:,3:4] = np.copy(batch_xs_in[:,:,:,2:3]) 531 | batch_xs_in[:,:,:,2:3] = np.copy(temp_vel) 532 | elif transposeAxis == 3: 533 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[1,1,upRes,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeLow, simSizeHigh, n_inputChannels]) 534 | batch_xs_in = np.reshape(batch_xs_in.transpose(0,2,1,3),(-1, simSizeLow, simSizeLow, n_inputChannels)) 535 | temp_vel = np.copy(batch_xs_in[:,:,:,2:3]) 536 | batch_xs_in[:,:,:,2:3] = np.copy(batch_xs_in[:,:,:,1:2]) 537 | batch_xs_in[:,:,:,1:2] = np.copy(temp_vel) 538 | elif transposeAxis == 2: 539 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[1,1,upRes,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeLow, simSizeHigh, n_inputChannels]) 540 | batch_xs_in = np.reshape(batch_xs_in.transpose(1,2,0,3),(-1, simSizeLow, simSizeLow, n_inputChannels)) 541 | temp_vel = np.copy(batch_xs_in[:,:,:,3:4]) 542 | temp_vel2 = np.copy(batch_xs_in[:,:,:,13]) 543 | batch_xs_in[:,:,:,3:4] = np.copy(batch_xs_in[:,:,:,2:3]) 544 | batch_xs_in[:,:,:,2:3] = np.copy(batch_xs_in[:,:,:,1:2]) 545 | batch_xs_in[:,:,:,1:2] = np.copy(temp_vel) 546 | else: 547 | batch_xs_in = np.reshape(scipy.ndimage.zoom(batch_xs_tile,[upRes,1,1,1] , order = 1, mode = 'constant', cval = 0.0), [-1, simSizeLow, simSizeLow, n_inputChannels]) 548 | 549 | if add_adj_idcs3: 550 | batch_xs_in = np.concatenate((batch_xs_in, np.zeros_like(batch_xs_in[:,:,:,0:1])), axis= 3) 551 | 552 | for i in range(batch_xs_in.shape[0]): 553 | if i == 0: 554 | batch_xs_in[i:i+1,:,:,n_inputChannels:n_inputChannels+1] = np.zeros_like(batch_xs_in[i:i+1,:,:,0:1]) 555 | batch_xs_in[i:i+1,:,:,n_inputChannels+1:n_inputChannels+2] = batch_xs_in[i+1:i+2,:,:,0:1] 556 | elif i == batch_xs_in.shape[0]-1: 557 | batch_xs_in[i:i+1,:,:,n_inputChannels:n_inputChannels+1] = batch_xs_in[i-1:i,:,:,0:1] 558 | batch_xs_in[i:i+1,:,:,n_inputChannels+1:n_inputChannels+2]= np.zeros_like(batch_xs_in[i-1:i,:,:,0:1]) 559 | else: 560 | batch_xs_in[i:i+1,:,:,n_inputChannels:n_inputChannels+1] = batch_xs_in[i-1:i,:,:,0:1] 561 | batch_xs_in[i:i+1,:,:,n_inputChannels+1:n_inputChannels+2] = batch_xs_in[i+1:i+2,:,:,0:1] 562 | 563 | intermed_res1 = [] 564 | batch_sz_out = 2 565 | 566 | start = time.time() 567 | for j in range(0,batch_xs_in.shape[0]//batch_sz_out): 568 | # x in shape (z,y,x,c) 569 | # -> 64 x 256 x 256 570 | results = sess.run(sampler_3, feed_dict={x: batch_xs_in[j*batch_sz_out:(j+1)*batch_sz_out].reshape(-1, n_input), y: dim_output[j*batch_sz_out:(j+1)*batch_sz_out].reshape(-1, n_output) ,percentage : inputPer, train: False}) 571 | intermed_res1.extend(results) 572 | 573 | # exact timing of network performance... 574 | if 0: 575 | fetched_timeline = timeline.Timeline(run_metadata.step_stats) 576 | chrome_trace = fetched_timeline.generate_chrome_trace_format() 577 | with open('timeline_8x_%04d.json'%(j), 'w') as f: 578 | f.write(chrome_trace) 579 | end = time.time() 580 | 581 | print("time for third network: {0:.6f}".format(end-start)) 582 | 583 | dim_output = np.array(intermed_res1).reshape(simSizeHigh, simSizeHigh, simSizeHigh) 584 | 585 | save_img_3d( outPath + 'source_3rd_{:04d}.png'.format(imageindex+frame_min), dim_output/80) 586 | 587 | if not load_model_no_2 == -1: 588 | dim_output = dim_output.transpose(2,0,1) 589 | if not load_model_no_1 == -1: 590 | dim_output = dim_output.transpose(2,1,0) 591 | 592 | # output for images of slices (along every dimension) 593 | if 1: 594 | for i in range(simSizeHigh // 2 - 1, simSizeHigh // 2 + 1): 595 | if np.average(dim_output[i]) > 0.0001: 596 | save_img(outPath + 'slice_xy_{:04d}_{:04d}.png'.format(i,(imageindex+frame_min)), dim_output[i]) #.transpose(2,1,0) 597 | save_img(outPath + 'slice_yz_{:04d}_{:04d}.png'.format(i,(imageindex+frame_min)), dim_output.transpose(2,1,0)[i]) 598 | save_img(outPath + 'slice_xz_{:04d}_{:04d}.png'.format(i,(imageindex+frame_min)), dim_output.transpose(1,0,2)[i]) 599 | if (imageindex + frame_min) == 110: 600 | for i in range(0, tileSizeHigh): 601 | if np.average(dim_output[i]) > 0.0001: 602 | save_img(outPath + 'slice_xy_{:04d}_{:04d}.png'.format((imageindex+frame_min),i), dim_output[i]) #.transpose(2,1,0) 603 | save_img(outPath + 'slice_yz_{:04d}_{:04d}.png'.format((imageindex+frame_min),i), dim_output.transpose(2,1,0)[i]) 604 | save_img(outPath + 'slice_xz_{:04d}_{:04d}.png'.format((imageindex+frame_min),i), dim_output.transpose(1,0,2)[i]) 605 | 606 | if head is None: 607 | head, _ = uniio.readUni(packedSimPath + "sim_%04d/density_low_%04d.uni"%(fromSim, 0)) 608 | head['dimX'] = simSizeHigh 609 | head['dimY'] = simSizeHigh 610 | head['dimZ'] = simSizeHigh 611 | 612 | if generateUni: 613 | # set low density to zero to save storage space... 614 | cond_out = dim_output < 0.0005 615 | dim_output[cond_out] = 0 616 | uniio.writeUni(packedSimPath + '/sim_%04d/source_%04d.uni'%(fromSim, imageindex+frame_min), head, dim_output) 617 | print('stored .uni file') 618 | return 619 | 620 | print('*****OUTPUT ONLY*****') 621 | #print("{} tiles, {} tiles per image".format(100, 1)) 622 | #print("Generating images (batch size: {}, batches: {})".format(1, 100)) 623 | 624 | if not load_model_test_3 == -1 and not load_model_test_2 == -1 and not load_model_test_1 == -1: 625 | print("At least one network has to be loaded.") 626 | exit(1) 627 | 628 | 629 | head_0, _ = uniio.readUni(packedSimPath + "sim_%04d/density_low_%04d.uni"%(fromSim, 0)) 630 | for layerno in range(frame_min,frame_max): 631 | print(layerno) 632 | generate3DUniForNewNetwork(imageindex = layerno - frame_min, outPath = test_path, head = head_0) 633 | 634 | print('Test finished, %d pngs written to %s.' % (frame_max - frame_min, test_path) ) 635 | 636 | 637 | 638 | 639 | 640 | 641 | 642 | 643 | 644 | 645 | 646 | 647 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Multi-pass-GAN 2 | 3 | Public source code for the SCA paper "A Multi-Pass GAN for Fluid Flow Super-Resolution". Authors: Maximilian Werhahn, You Xie, MengYu Chu, Nils Thuerey. Technical University of Munich. 4 | 5 | Paper: https://arxiv.org/pdf/1906.01689.pdf, 6 | Video: https://www.youtube.com/watch?v=__WE22dB6AA 7 | 8 | ![An example of our 8x model (low-res left, ours right)](resources/teaser5.png) 9 | 10 | ## Requirements 11 | 12 | tensorflow >= 1.10 13 | mantaflow for datagen 14 | 15 | ## Directories 16 | `../datagen/`: data generation via mantaflow 17 | `../GAN/`: output + training + network files 18 | `../tools_wscale/`: helper functions, data loader, etc. 19 | 20 | ## Compilation 21 | First, compile mantaflow with numpy support (as usual), follow 22 | http://mantaflow.com/install.html. 23 | One difference is, in the CMake settings, numpy shoule be enabled: 24 | "cmake .. -DGUI=ON -DOPENMP=ON -DNUMPY=ON". 25 | Note that if mantaflow is installed on a remote server, GUI is not supported, i.e.: 26 | "cmake .. -DGUI=OFF -DOPENMP=ON -DNUMPY=ON". 27 | 28 | ## Data Generation 29 | Either use the file `../datagen/gen_mul_data.py` or similar commands for the file `../datagen/gen_sim_grow_slices_data.py` to generate a dataset. It will be stored in `../data3d_growing/sim_%04d`. 30 | 31 | ## Training 32 | Call `../GAN/example_run_training.py` 33 | 34 | ## Applying models 35 | Call `../GAN/example_run_output.py`, pretrained models can be found in the branch "models" 36 | -------------------------------------------------------------------------------- /datagen/gen_mul_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | #import numpy as np 3 | 4 | os.environ["CUDA_VISIBLE_DEVICES"]="0" 5 | 6 | # for i in range(0,1): 7 | # os.system('manta gen_sim_data_obstacle.py steps 200 saveuni 1 reset 1 obstacles 1 spheres 0 warmup 150') 8 | # os.system('manta gen_sim_data_obstacle.py steps 200 saveuni 1 reset 1 obstacles 1 spheres 0 warmup 100') 9 | # for i in range(0,1): 10 | # os.system('manta gen_sim_data_obstacle.py steps 200 saveuni 1 reset 1 obstacles 1 spheres 0 warmup 20') 11 | 12 | #for i in range(0,1): 13 | #os.system('manta gen_sim_data.py saveuni 1 reset 1 steps 200 gui 0 fac 8') 14 | 15 | #for i in range(0,40): 16 | #os.system('manta gen_data_wind_tunnel.py saveuni 1 gui 0') 17 | #for i in range(0,10): 18 | # os.system('manta gen_sim_data.py saveuni 1 reset 1 steps 200 gui 0') 19 | #seeds = [] 20 | #for j in range(40): 21 | # seeds.append((np.random.randint(10000000))) 22 | # for i in range(0,40): 23 | # os.system('manta manta_genSimData_growing_obs.py maxUpRes 16 minUpRes 2 resetN 1 npSeed ' + str(int(seeds[i]))) 24 | for i in range(0,4): 25 | os.system('manta gen_sim_grow_slices_data.py maxUpRes 8 minUpRes 2 reset 1 saveuni 1 obstacles 1') 26 | 27 | #for i in range(0,3): 28 | #os.system('manta manta_genSimData3.py maxUpRes 8 minUpRes 2 resetN 1 npSeed ' + str(int(seeds[i+8]))) 29 | -------------------------------------------------------------------------------- /datagen/gen_sim_grow_slices_data.py: -------------------------------------------------------------------------------- 1 | #****************************************************************************** 2 | # 3 | # tempoGAN: A Temporally Coherent, Volumetric GAN for Super-resolution Fluid Flow 4 | # Copyright 2018 You Xie, Erik Franz, Mengyu Chu, Nils Thuerey, Maximilian Werhahn 5 | # 6 | # Varying density data gen, 2d/3d 7 | # 2 modes: 8 | # 9 | # - double sim mode (mode==1) 10 | # Runs hi-res, then downsamples to coarse ("sm") sim in intervals, 11 | # by default every frame 12 | # 13 | # - wavelet turbulence mode (mode==2) 14 | # Runs low-res, then upsamples and adds WLT 15 | # 16 | #****************************************************************************** 17 | 18 | from manta import * 19 | import os, shutil, math, sys, time 20 | from datetime import datetime 21 | import numpy as np 22 | sys.path.append("../tools") 23 | import paramhelpers as ph 24 | 25 | # Main params ----------------------------------------------------------------------# 26 | steps = 120 27 | simNo = 1000 # start ID 28 | showGui = 0 29 | basePath = '../data3d_growing/' 30 | npSeedstr = "-1" 31 | dim = 3 32 | 33 | # Solver params 34 | res = 64 35 | resetN = 1 36 | saveEveryK = 2 37 | targettimestep = 0.5 38 | wup = 10 + np.random.randint(6) 39 | 40 | # cmd line 41 | basePath = ph.getParam( "basepath", basePath ) 42 | npSeedstr = ph.getParam( "seed" , npSeedstr ) 43 | npSeed = int(npSeedstr) 44 | resetN = int(ph.getParam( "reset" , resetN)) 45 | dim = int(ph.getParam( "dim" , dim)) 46 | simMode = int(ph.getParam( "mode" , 1 )) # 1 = double sim, 2 = wlt 47 | savenpz = int(ph.getParam( "savenpz", False))>0 48 | saveuni = int(ph.getParam( "saveuni", True))>0 49 | saveppm = int(ph.getParam( "saveppm" , False))>0 50 | showGui = int(ph.getParam( "gui" , showGui)) 51 | res = int(ph.getParam( "res" , res)) 52 | steps = int(ph.getParam( "steps" , steps)) 53 | timeOffset = int(ph.getParam( "warmup" , wup)) # skip certain no of steps at beginning 54 | scaleFactorMin = int(ph.getParam( "minUpRes" , 2)) 55 | scaleFactorMax = int(ph.getParam( "maxUpRes" , 8)) 56 | useObstacles = int(ph.getParam( "obstacles" , False))>0 57 | ph.checkUnusedParams() 58 | doRecenter = False # re-center densities , disabled for now 59 | 60 | setDebugLevel(1) 61 | if not basePath.endswith("/"): basePath = basePath+"/" 62 | 63 | # Init solvers -------------------------------------------------------------------# 64 | sm_gs = vec3(res,res,res) 65 | xl_gs = sm_gs * float(scaleFactorMax) 66 | if (dim==2): xl_gs.z = sm_gs.z = 1 # 2D 67 | 68 | # solvers 69 | sms = [] 70 | for i in range(int(math.log(scaleFactorMax,2))): 71 | gs = sm_gs * (2**i) 72 | if dim == 2: gs.z = 1 73 | sms.append(Solver(name='smaller'+ str(i), gridSize = gs, dim=dim)) 74 | sms[i].timestep = targettimestep / saveEveryK 75 | # wlt Turbulence output fluid 76 | xl = Solver(name='larger', gridSize = xl_gs, dim=dim) 77 | xl.timestep = targettimestep / saveEveryK 78 | timings = Timings() 79 | 80 | fff=0.5 81 | buoyFac = vec3(-1.5 + np.random.rand() * 3.0, 2.5 + np.random.rand() * 3.5, -1.5 + np.random.rand() * 3.0) * 2.0 82 | if buoyFac.y < 2.5: 83 | buoyFac = vec3(0,0,0) # make sure we have some sims without buoyancy 84 | #buoyFac = 0.125 85 | buoy = vec3(0.0,-0.0005,0.0) * buoyFac / saveEveryK 86 | xl_buoys = [] 87 | for i in range(int(math.log(scaleFactorMax,2))): 88 | xl_buoys.append(buoy) # * vec3(1./scaleFactorMax * (2**i))) 89 | print("Buoyancy: " + format(xl_buoys[i]) +", factor " + str(buoyFac)) 90 | print("Buoyancy: " + format(buoy) +", factor " + str(buoyFac)) 91 | # xl_buoy = buoy * vec3(1./scaleFactor) 92 | 93 | if savenpz or saveuni or saveppm: 94 | folderNo = simNo 95 | simPath,simNo = ph.getNextSimPath(simNo, basePath) 96 | 97 | # add some more info for json file 98 | ph.paramDict["simNo"] = simNo 99 | ph.paramDict["type"] = "smoke" 100 | ph.paramDict["dt"] = 0.5 101 | ph.paramDict["buoyX"] = buoy.x 102 | ph.paramDict["buoyY"] = buoy.y 103 | ph.paramDict["buoyZ"] = buoy.z 104 | ph.paramDict["seed"] = npSeed 105 | ph.paramDict["name"] = "gen6combined" 106 | ph.paramDict["version"] = printBuildInfo() 107 | ph.paramDict["creation_date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") 108 | ph.writeParams(simPath + "description.json") # export sim parameters 109 | 110 | sys.stdout = ph.Logger(simPath) 111 | print("Called on machine '"+ sys.platform[1] +"' with: " + str(" ".join(sys.argv) ) ) 112 | print("Saving to "+simPath+", "+str(simNo)) 113 | # optional , backupFile(__file__, simPath) 114 | 115 | if(npSeed<0): 116 | npSeed = np.random.randint(0, 2147483647 ) 117 | print("Random seed %d" % npSeed) 118 | np.random.seed(npSeed) 119 | 120 | # Simulation Grids -------------------------------------------------------------------# 121 | xl_flags = xl.create(FlagGrid) 122 | xl_vel = xl.create(MACGrid) 123 | xl_density = xl.create(RealGrid) 124 | xl_velTmp = xl.create(MACGrid) 125 | xl_tmp = xl.create(RealGrid) 126 | xl_phiObs = xl.create(LevelsetGrid) 127 | 128 | # for domain centering 129 | xl_velRecenter = xl.create(MACGrid) 130 | phiObs = sms[0].create(LevelsetGrid) 131 | velRecenters = [] 132 | flags = [] 133 | vel = [] 134 | velTmp = [] 135 | density = [] 136 | tmp = [] 137 | obstacles = [] 138 | xl_obstacles = [] 139 | 140 | bWidth=0 141 | 142 | for i in range(int(math.log(scaleFactorMax,2))): 143 | flags.append(sms[i].create(FlagGrid)) 144 | flags[i].initDomain(boundaryWidth=bWidth * 2**i) 145 | 146 | if useObstacles: 147 | obstacles = [] 148 | xl_obstacles = [] 149 | # init obstacles 150 | min_p = res//8 151 | max_p = res//8*7 152 | min_v = 2 153 | max_v = res//32 * 6 154 | num = np.random.randint(7,15) 155 | for i in range(num): 156 | rect = np.random.randint(0,2) 157 | rand_p = vec3(np.random.randint(min_p,max_p), np.random.randint(min_p,max_p) + res//16*3, np.random.randint(min_p,max_p)) 158 | rand_s = vec3(np.random.randint(min_v,max_v), np.random.randint(min_v,max_v), np.random.randint(min_v,max_v)) 159 | 160 | if rect: 161 | obstacles.append(Box( parent=sms[0],p0=rand_p - rand_s / 2, p1 = rand_p + rand_s / 2)) 162 | xl_obstacles.append(Box( parent=xl,p0=scaleFactorMax * (rand_p - rand_s / 2), p1 = scaleFactorMax * (rand_p + rand_s / 2))) 163 | else: 164 | rand_s = np.random.randint(min_v,max_v-1) 165 | obstacles.append(Sphere(parent=sms[0], center=rand_p, radius=rand_s)) 166 | xl_obstacles.append(Sphere(parent=xl, center=scaleFactorMax*rand_p, radius=scaleFactorMax * rand_s)) 167 | 168 | for i in range (0, len(obstacles)): 169 | phiObs.join(obstacles[i].computeLevelset()) 170 | 171 | for i in range (0, len(xl_obstacles)): 172 | xl_phiObs.join(xl_obstacles[i].computeLevelset()) 173 | 174 | setObstacleFlags(flags=flags[0], phiObs=phiObs) 175 | setObstacleFlags(flags=xl_flags, phiObs=xl_phiObs) 176 | 177 | for i in range(int(math.log(scaleFactorMax,2))): 178 | density.append(sms[i].create(RealGrid)) 179 | velRecenters.append(sms[i].create(MACGrid)) 180 | vel.append(sms[i].create(MACGrid)) 181 | tmp.append(sms[i].create(RealGrid)) 182 | flags[i].fillGrid() 183 | 184 | xl_flags = xl.create(FlagGrid) 185 | xl_vel = xl.create(MACGrid) 186 | xl_velTmp = xl.create(MACGrid) 187 | xl_density = xl.create(RealGrid) 188 | xl_flags.initDomain(boundaryWidth=bWidth*scaleFactorMax) 189 | xl_flags.fillGrid() 190 | 191 | boundaries = np.random.randint(4) 192 | print(boundaries) 193 | for i in range(int(math.log(scaleFactorMax,2))): 194 | ## if boundaries == 1: 195 | # setOpenBound(flags[i], bWidth * 2**i,'xy',FlagOutflow|FlagEmpty) 196 | # elif boundaries == 2: 197 | # setOpenBound(flags[i], bWidth* 2**i,'xX',FlagOutflow|FlagEmpty) 198 | # else: 199 | setOpenBound(flags[i], bWidth * 2**i,'xXyYzZ',FlagOutflow|FlagEmpty) 200 | 201 | #if boundaries == 1: 202 | # setOpenBound(xl_flags, bWidth*scaleFactorMax,'xy',FlagOutflow|FlagEmpty) 203 | #elif boundaries == 2: 204 | # setOpenBound(xl_flags, bWidth*scaleFactorMax,'xX',FlagOutflow|FlagEmpty) 205 | #else: 206 | setOpenBound(xl_flags, bWidth*scaleFactorMax,'xXyYzZ',FlagOutflow|FlagEmpty) 207 | 208 | # wavelet turbulence octaves 209 | 210 | wltnoise = NoiseField( parent=xl, loadFromFile=True) 211 | # scale according to lowres sim , smaller numbers mean larger vortices 212 | wltnoise.posScale = vec3( int(1.0*sm_gs.x) ) * 0.5 213 | wltnoise.timeAnim = 0.05 214 | 215 | wltnoise2 = NoiseField( parent=xl, loadFromFile=True) 216 | wltnoise2.posScale = wltnoise.posScale * 2.0 217 | wltnoise2.timeAnim = 0.02 218 | 219 | wltnoise3 = NoiseField( parent=xl, loadFromFile=True) 220 | wltnoise3.posScale = wltnoise2.posScale * 2.0 221 | wltnoise3.timeAnim = 0.03 222 | 223 | # inflow sources ----------------------------------------------------------------------# 224 | 225 | # init random density 226 | sources = [] 227 | noise = [] # xl 228 | sourSm = [] 229 | noiSm = [] # sm 230 | inflowSrc = [] # list of IDs to use as continuous density inflows 231 | 232 | noiseN = 18 233 | #noiseN = 1 234 | nseeds = np.random.randint(10000,size=noiseN) 235 | 236 | cpos = vec3(0.5,0.25,0.5) 237 | 238 | randoms = np.random.rand(noiseN, 10) 239 | random_scales = np.random.rand(noiseN, 1) 240 | for nI in range(noiseN): 241 | if random_scales[nI] > 0.15: 242 | random_scales[nI] = 1.0 + np.random.rand()*0.25 243 | else: 244 | random_scales[nI] = np.random.rand()*0.9 + 0.35 245 | 246 | rand_val = 0.5 + np.random.rand()*0.75 247 | noise.append( xl.create(NoiseField, fixedSeed= int(nseeds[nI]), loadFromFile=True) ) 248 | #noise[nI].posScale = vec3( res * (0.02 + 0.055 * np.random.rand()) * (randoms[nI][7] + 1) ) * (float(scaleFactorMax)) 249 | noise[nI].posScale = vec3( res * 0.1 * (randoms[nI][7] + 1) ) * ( float(scaleFactorMax)) 250 | noise[nI].clamp = True 251 | noise[nI].clampNeg = -np.random.rand() * 0.1 252 | noise[nI].clampPos = rand_val + (1.25-rand_val) * np.random.rand() 253 | noise[nI].valScale = 1.0 254 | #noise[nI].valOffset = 0.5 * randoms[nI][9] * (1.0 + np.random.rand()) 255 | noise[nI].valOffset = 0.5 * randoms[nI][9] 256 | noise[nI].timeAnim = 0.25 + np.random.rand() * 0.1 257 | noise[nI].posOffset = vec3(1.5) 258 | 259 | for i in range(int(math.log(scaleFactorMax,2))): 260 | noiSm.append([]) 261 | noiSm[i].append( sms[i].create(NoiseField, fixedSeed= int(nseeds[nI]), loadFromFile=True) ) 262 | noiSm[i][nI].timeAnim = noise[nI].timeAnim / ( float(scaleFactorMax) * 2**i) 263 | noiSm[i][nI].posOffset = vec3(1.5) 264 | noiSm[i][nI].posScale = noise[nI].posScale #vec3( res * 0.1 * (randoms[nI][7] + 1) ) * ( float(2**i)) # noise[nI].posScale 265 | noiSm[i][nI].clamp = noise[nI].clamp 266 | noiSm[i][nI].clampNeg = noise[nI].clampNeg 267 | noiSm[i][nI].clampPos = noise[nI].clampPos 268 | noiSm[i][nI].valScale = noise[nI].valScale 269 | noiSm[i][nI].valOffset= noise[nI].valOffset 270 | 271 | # random offsets 272 | coff = vec3(0.45,0.3,0.45) * (vec3( randoms[nI][0], randoms[nI][1], randoms[nI][2] ) - vec3(0.5)) 273 | radius_rand = 0.05 + 0.05 * randoms[nI][3] 274 | if radius_rand > 0.14: 275 | radius_rand *= (1.0 + np.random.rand() * 0.325) 276 | 277 | upz = vec3(0.95)+ vec3(0.1) * vec3( randoms[nI][4], randoms[nI][5], randoms[nI][6] ) 278 | 279 | if 1 and randoms[nI][8] > 0.5: # turn into inflow? 280 | if coff.y > -0.2: 281 | coff.y += -0.1 282 | coff.y *= 0.5 283 | inflowSrc.append(nI) 284 | 285 | if(dim == 2): 286 | coff.z = 0.0 287 | upz.z = 1.0 288 | if np.random.randint(2): 289 | sources.append(xl.create(Sphere, center=xl_gs*(cpos+coff), radius=xl_gs.x*radius_rand, scale=upz)) 290 | else: 291 | sources.append(xl.create(Cylinder, center=xl_gs*(cpos+coff), radius=xl_gs.x*radius_rand, z=gs*vec3(0.05 - 0.1 * np.random.rand(), 0.05 - 0.1 * np.random.rand(), 0.05 - 0.1 * np.random.rand()))) 292 | 293 | for i in range(int(math.log(scaleFactorMax,2))): 294 | sourSm.append([]) 295 | sourSm[i].append( sms[i].create(Sphere, center=sm_gs*(cpos+coff)*2**i, radius=sm_gs.x*radius_rand*2**i, scale=upz)) 296 | 297 | print (nI, "centre", xl_gs*(cpos+coff), "radius", xl_gs.x*radius_rand, "other", upz ) 298 | densityInflow( flags=xl_flags, density=xl_density, noise=noise[nI], shape=sources[nI], scale=1.0, sigma=1.0 ) 299 | for i in range(int(math.log(scaleFactorMax,2))): 300 | densityInflow( flags=flags[i], density=density[i], noise=noiSm[i][nI], shape=sourSm[i][nI], scale=1.0, sigma=1.0 ) 301 | 302 | # init random velocities 303 | 304 | inivel_sources = [] 305 | inivel_vels = [] 306 | inivel_sourcesSm = [] 307 | inivel_velsSm = [] 308 | if 1: # from fluidnet 309 | c = 3 + np.random.randint(3) # "sub" mode 310 | xgs = xl_gs 311 | if 1: 312 | # 3..5 - ini vel sources 313 | if c==3: numFac = 2; sizeFac = 0.7; 314 | if c==4: numFac = 3; sizeFac = 0.5; 315 | if c==5: numFac = 5; sizeFac = 0.3; 316 | numNs = int( numFac * float(dim) ) 317 | for ns in range(numNs): 318 | p = [0.5,0.5,0.5] 319 | Vrand = np.random.rand(10) 320 | for i in range(3): 321 | p[i] += (Vrand[0+i]-0.5) * 0.6 322 | p = Vec3(p[0],p[1],p[2]) 323 | size = ( 0.06 + 0.14*Vrand[3] ) * sizeFac 324 | 325 | v = [0.,0.,0.] 326 | for i in range(3): 327 | v[i] -= (Vrand[0+i]-0.5) * 0.6 * 2. # invert pos offset , towards middle 328 | v[i] += (Vrand[4+i]-0.5) * 0.3 # randomize a bit, parametrized for 64 base 329 | v = Vec3(v[0],v[1],v[2]) 330 | v = v*0.9*0.325 # tweaking 331 | v = v*(1. + 0.5*Vrand[7] ) # increase by up to 50% 332 | v *= float(scaleFactorMax) 333 | 334 | print( "IniVel Pos " + format(p) + ", size " + format(size) + ", vel " + format(v) ) 335 | sourceV = xl.create(Sphere, center=xgs*p, radius=xgs.x*size, scale=vec3(1)) 336 | inivel_sources.append(sourceV) 337 | inivel_vels.append(v) 338 | for i in range(int(math.log(scaleFactorMax,2))): 339 | sourceVsm = sms[i].create(Sphere, center=sm_gs*p*2**i, radius=sm_gs.x*size*2**i, scale=vec3(1)) 340 | inivel_sourcesSm.append([]) 341 | inivel_sourcesSm[i].append(sourceVsm) 342 | inivel_velsSm.append([]) 343 | inivel_velsSm[i].append(v)# * (1./scaleFactorMax) * 2**i) 344 | 345 | blurSigs = [] 346 | for i in range(int(math.log(scaleFactorMax,2))): 347 | blurSigs.append( float(scaleFactorMax) / (2**i) / 3.544908) # 3.544908 = 2 * sqrt( PI ) 348 | #xl_blurden.copyFrom( xl_density ) 349 | #blurRealGrid( xl_density, xl_blurden, blurSigs[i]) 350 | #interpolateGrid( target=density[i], source=xl_blurden ) 351 | 352 | xl_velTmp.copyFrom( xl_vel ) 353 | blurMacGrid( xl_vel, xl_velTmp, blurSigs[i]) 354 | interpolateMACGrid( target=vel[i], source=xl_velTmp ) 355 | vel[i].multConst( vec3(1./scaleFactorMax*(2**i)) * saveEveryK ) 356 | 357 | # wlt params ---------------------------------------------------------------------# 358 | 359 | if simMode==2: 360 | wltStrength = 0.8 361 | if resetN==1: 362 | print("Warning!!!!!!!!!!!!!! Using resetN=1 for WLT doesnt make much sense, resetting to never") 363 | resetN = 99999 364 | 365 | def calcCOM(dens): 366 | velOffsets = [] 367 | if doRecenter: 368 | newCentre = calcCenterOfMass(xl_density) 369 | #mantaMsg( "Current moff "+str(newCentre) ) 370 | xl_velOffset = xl_gs*float(0.5) - newCentre 371 | xl_velOffset = xl_velOffset * (1./ xl.timestep) 372 | 373 | for i in range(int(math.log(scaleFactorMax,2))): 374 | velOffsets.append(xl_velOffset * (1./ float(scaleFactorMax) * 2 ** i)) 375 | if(dim == 2): 376 | xl_velOffset.z = velOffsets[i].z = 0.0 377 | else: 378 | for i in range(int(math.log(scaleFactorMax,2))): 379 | velOffsets.append(vec3(0.0)) 380 | xl_velOffset = vec3(0.0) 381 | 382 | return velOffsets, xl_velOffset 383 | 384 | # Setup UI ---------------------------------------------------------------------# 385 | if (showGui and GUI): 386 | gui=Gui() 387 | gui.show() 388 | #gui.pause() 389 | 390 | t = 0 391 | doPrinttime = False 392 | 393 | # main loop --------------------------------------------------------------------# 394 | while t < steps*saveEveryK+timeOffset*saveEveryK: 395 | curt = t * xl.timestep 396 | sys.stdout.write( "Current sim time t: " + str(curt) +" \n" ) 397 | #density.setConst(0.); xl_density.setConst(0.); # debug reset 398 | 399 | if doPrinttime: 400 | starttime = time.time() 401 | print("starttime: %2f" % starttime) 402 | 403 | # --------------------------------------------------------------------# 404 | if simMode==1: 405 | velOffsets, xl_velOffset = calcCOM(xl_density) 406 | 407 | if 1 and len(inflowSrc)>0: 408 | # note - the density inflows currently move with the offsets! 409 | for nI in inflowSrc: 410 | #for i in range(int(math.log(scaleFactorMax,2))): 411 | # densityInflow( flags=flags[i], density=density[i], noise=noiSm[i][nI], shape=sourSm[i][nI], scale=1.0, sigma=1.0 ) 412 | densityInflow( flags=xl_flags, density=xl_density, noise=noise[nI], shape=sources[nI], scale=random_scales[nI][0], sigma=1.0 ) 413 | if t < timeOffset*saveEveryK: 414 | sources[i].applyToGrid( grid=xl_vel , value=inivel_vels[i]*1.5) 415 | 416 | #xl_flags.applyToGrid(xl_density,TypeObstacle, value =0.0) 417 | # high res fluid 418 | advectSemiLagrange(flags=xl_flags, vel=xl_vel, grid=xl_vel, order=2, clampMode=2, openBounds=True, boundaryWidth=bWidth) 419 | setWallBcs(flags=xl_flags, vel=xl_vel, phiObs=xl_phiObs) 420 | addBuoyancy(density=xl_density, vel=xl_vel, gravity=buoy , flags=xl_flags) 421 | if 1: 422 | for i in range(len(inivel_sources)): 423 | inivel_sources[i].applyToGrid( grid=xl_vel , value=inivel_vels[i] ) 424 | if 1 and ( t< (timeOffset+5) ): 425 | vorticityConfinement( vel=xl_vel, flags=xl_flags, strength=0.035 ) 426 | 427 | solvePressure(flags=xl_flags, vel=xl_vel, pressure=xl_tmp , preconditioner=PcMGStatic ) 428 | setWallBcs(flags=xl_flags, vel=xl_vel, phiObs=xl_phiObs) 429 | xl_velRecenter.copyFrom( xl_vel ) 430 | #xl_velRecenter.addConst( xl_velOffset ) 431 | if( dim == 2 ): 432 | xl_vel.multConst( vec3(1.0,1.0,0.0) ) 433 | xl_velRecenter.multConst( vec3(1.0,1.0,0.0) ) 434 | advectSemiLagrange(flags=xl_flags, vel=xl_vel, grid=xl_density, order=2, clampMode=2, openBounds=True, boundaryWidth=bWidth) 435 | 436 | # low res fluid, velocity 437 | if( t % resetN == 0) : 438 | for i in range(int(math.log(scaleFactorMax,2))): 439 | if i == 0: 440 | xl_velTmp.copyFrom( xl_vel ) 441 | blurMacGrid( xl_vel, xl_velTmp, blurSigs[i]) 442 | interpolateMACGrid( target=vel[i], source=xl_velTmp ) 443 | vel[i].multConst( vec3(1./scaleFactorMax) * 2**i * saveEveryK) 444 | #flags[i].applyToGrid(vel[i], TypeObstacle, value =vec3(0.0)) 445 | else: 446 | for i in range(int(math.log(scaleFactorMax,2))): 447 | advectSemiLagrange(flags=flags[i], vel=vel[i], grid=vel[i], order=2, clampMode=2, openBounds=True, boundaryWidth=bWidth) 448 | setWallBcs(flags=flags[i], vel=vel[i]) 449 | addBuoyancy(density=density[i], vel=vel[i], gravity=xl_buoys[i] , flags=flags[i]) 450 | if 1: 451 | for j in range(len(inivel_sourcesSm[0])): 452 | inivel_sourcesSm[i][j].applyToGrid( grid=vel[i] , value=inivel_velsSm[i][j] ) 453 | if 0 and ( t< timeOffset-10 ): 454 | vorticityConfinement( vel=vel[i], flags=flags[i], strength=0.05/scaleFactorMax * 2**i ) 455 | solvePressure(flags=flags[i], vel=vel[i], pressure=tmp[i], preconditioner=PcMGStatic ) 456 | setWallBcs(flags=flags[i], vel=vel[i]) 457 | 458 | #for i in range(int(math.log(scaleFactorMax,2))): 459 | #velRecenters[i].copyFrom(vel[i]) 460 | #velRecenters[i].addConst( velOffsets[i] ) 461 | 462 | #KEpsilonBcs(flags=flags,k=k,eps=eps,intensity=intensity, nu = nu,fillArea=False) 463 | #advectSemiLagrange(flags=flags, vel=vel, grid=k, order=1) 464 | #advectSemiLagrange(flags=flags, vel=vel, grid=eps, order=1) 465 | #KEpsilonBcs(flags=flags,k=k,eps=eps,intensity=intensity, nu = nu,fillArea=False) 466 | #KEpsilonComputeProduction(vel=vel, k=k, eps=eps, prod=prod, nuT=nuT, strain=strain, pscale=prodMult) 467 | #KEpsilonSources(k=k, eps=eps, prod=prod) 468 | #KEpsilonGradientDiffusion(k=k, eps=eps, vel=vel, nuT=nuT, sigmaU=10.0); 469 | # low res fluid, density 470 | if( t % resetN == 0) : 471 | for i in range(int(math.log(scaleFactorMax,2))-1,-1,-1): 472 | xl_tmp.copyFrom( xl_density ) 473 | blurRealGrid( xl_density, xl_tmp, blurSigs[i]) 474 | interpolateGrid( target=density[i], source=xl_tmp ) 475 | #flags[].applyToGrid(density[i], TypeObstacle, value = 0.0) 476 | else: 477 | for i in range(int(math.log(scaleFactorMax,2))): 478 | advectSemiLagrange(flags=flags[i], vel=vel[i], grid=density[i], order=2, clampMode=2, openBounds=True, boundaryWidth=bWidth) 479 | 480 | # --------------------------------------------------------------------# 481 | elif simMode==2: 482 | # low res fluid, density 483 | if( t % resetN == 0) : 484 | xl_tmp.copyFrom( xl_density ) 485 | blurRealGrid( xl_density, xl_tmp, blurSig) 486 | interpolateGrid( target=density, source=xl_tmp ) 487 | 488 | advectSemiLagrange(flags=flags, vel=velRecenter, grid=density, order=2, clampMode=2) 489 | if t<=1: velRecenter.copyFrom(vel); # special , center only density once, leave vel untouched 490 | advectSemiLagrange(flags=flags, vel=velRecenter, grid=vel, order=2, clampMode=2, openBounds=True, boundaryWidth=bWidth ) 491 | 492 | if 1 and len(inflowSrc)>0: 493 | # note - the density inflows currently move with the offsets! 494 | for nI in inflowSrc: 495 | densityInflow( flags=xl_flags, density=xl_density, noise=noise[nI], shape=sources[nI], scale=1.0, sigma=1.0 ) 496 | densityInflow( flags=flags, density=density, noise=noiSm[nI], shape=sourSm[nI], scale=1.0, sigma=1.0 ) 497 | 498 | setWallBcs(flags=flags, vel=vel) 499 | addBuoyancy(density=density, vel=vel, gravity=buoy , flags=flags) 500 | if 1: 501 | for i in range(len(inivel_sourcesSm)): 502 | inivel_sourcesSm[i].applyToGrid( grid=vel , value=inivel_velsSm[i] ) 503 | 504 | vorticityConfinement( vel=vel, flags=flags, strength=0.1 ) 505 | 506 | solvePressure(flags=flags, vel=vel, pressure=tmp , cgMaxIterFac=5.0, cgAccuracy=0.001, preconditioner=PcMGDynamic ) 507 | setWallBcs(flags=flags, vel=vel) 508 | 509 | computeEnergy(flags=flags, vel=vel, energy=tmp) 510 | computeWaveletCoeffs(tmp) 511 | 512 | # xl solver, update up-res'ed grids ... 513 | 514 | # new centre of mass , from XL density 515 | velOffset , xl_velOffset = calcCOM(xl_density) 516 | xl_velOffset = velOffset # note - hires advection does "scaleFac" substeps below! -> same offset 517 | 518 | if 1 and len(inflowSrc)>0: 519 | velOffset *= 0.5; xl_velOffset *= 0.5; # re-centering reduced 520 | 521 | # high res sim 522 | 523 | interpolateGrid( target=xl_tmp, source=tmp ) 524 | interpolateMACGrid( source=vel, target=xl_vel ) 525 | 526 | applyNoiseVec3( flags=xl_flags, target=xl_vel, noise=wltnoise, scale=wltStrength*1.0 , weight=xl_tmp) 527 | # manually weight and apply further octaves 528 | applyNoiseVec3( flags=xl_flags, target=xl_vel, noise=wltnoise2, scale=wltStrength*0.8 , weight=xl_tmp) 529 | applyNoiseVec3( flags=xl_flags, target=xl_vel, noise=wltnoise3, scale=wltStrength*0.8*0.8 , weight=xl_tmp) 530 | 531 | xl_velRecenter.copyFrom( xl_vel ) 532 | xl_velRecenter.addConst( xl_velOffset ) 533 | if( dim == 2 ): 534 | xl_velRecenter.multConst( vec3(1.0,1.0,0.0) ) 535 | 536 | for substep in range(scaleFactor): 537 | advectSemiLagrange(flags=xl_flags, vel=xl_velRecenter, grid=xl_density, order=2, clampMode=2) 538 | 539 | velRecenter.copyFrom(vel) 540 | velRecenter.addConst( velOffset ) 541 | if( dim == 2 ): 542 | velRecenter.multConst( vec3(1.0,1.0,0.0) ) 543 | else: 544 | print("Unknown sim mode!"); exit(1) 545 | 546 | if doPrinttime: 547 | endtime = time.time() 548 | print("endtime: %2f" % endtime) 549 | print("runtime: %2f" % (endtime-starttime)) 550 | 551 | # --------------------------------------------------------------------# 552 | 553 | # save low and high res 554 | # save all frames 555 | if t>=timeOffset*saveEveryK and t%saveEveryK == 0: 556 | tf = (t/saveEveryK-timeOffset) 557 | if savenpz: 558 | print("Writing NPZs for frame %d"%tf) 559 | copyGridToArrayReal( target=sm_arR, source=density ) 560 | np.savez_compressed( simPath + 'density_low_%04d.npz' % (tf), sm_arR ) 561 | copyGridToArrayVec3( target=sm_arV, source=vel ) 562 | np.savez_compressed( simPath + 'velocity_low_%04d.npz' % (tf), sm_arV ) 563 | copyGridToArrayReal( target=xl_arR, source=xl_density ) 564 | np.savez_compressed( simPath + 'density_high_%04d.npz' % (tf), xl_arR ) 565 | copyGridToArrayVec3( target=xl_arV, source=xl_vel ) 566 | np.savez_compressed( simPath + 'velocity_high_%04d.npz' % (tf), xl_arV ) 567 | if saveuni: 568 | print("Writing UNIs for frame %d"%tf) 569 | xl_density.save(simPath + 'density_high_%04d.uni' % (tf)) 570 | xl_flags.save(simPath + 'flags_high_%04d.uni' % (tf)) 571 | 572 | for i in range(int(math.log(scaleFactorMax,2))): 573 | if i == 0: 574 | density[i].save(simPath + 'density_low_%04d.uni'% (tf)) 575 | vel[i].save(simPath + 'velocity_low_%04d.uni'% (tf) ) 576 | flags[i].save(simPath + 'flags_low_%04d.uni' % (tf)) 577 | phiObs.save(simPath + 'levelset_low_%04d.uni' % (tf)) 578 | else: 579 | density[i].save(simPath + 'density_low_%i_%04d.uni' % (2**i,tf)) 580 | #vel[i].save( simPath + 'velocity_low_%i_%04d.uni' % (2**i,tf)) 581 | #flags[i].save(simPath + 'flags_low_%i_%04d.uni' % (2**i,tf)) 582 | 583 | if(saveppm): 584 | print("Writing ppms for frame %d"%tf) 585 | projectPpmFull( xl_density, simPath + 'density_high_%04d.ppm' % (tf), 0, 5.0 ) 586 | projectPpmFull( density, simPath + 'density_low_%04d.ppm' % (tf), 0, 5.0 ) 587 | for i in range(len(sms)): 588 | sms[i].step() 589 | xl.step() 590 | #gui.screenshot( 'out_%04d.jpg' % t ) 591 | timings.display() 592 | t = t+1 593 | 594 | 595 | -------------------------------------------------------------------------------- /resources/in175b.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwerhahn/Multi-pass-GAN/9d5e86889c3c62bab3d217f76762a68f871db835/resources/in175b.jpg -------------------------------------------------------------------------------- /resources/out_2_175b.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwerhahn/Multi-pass-GAN/9d5e86889c3c62bab3d217f76762a68f871db835/resources/out_2_175b.jpg -------------------------------------------------------------------------------- /resources/teaser5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maxwerhahn/Multi-pass-GAN/9d5e86889c3c62bab3d217f76762a68f871db835/resources/teaser5.png -------------------------------------------------------------------------------- /tools_wscale/GAN.py: -------------------------------------------------------------------------------- 1 | #****************************************************************************** 2 | # 3 | # tempoGAN: A Temporally Coherent, Volumetric GAN for Super-resolution Fluid Flow 4 | # Copyright 2018 You Xie, Erik Franz, Mengyu Chu, Nils Thuerey, Maximilian Werhahn 5 | # 6 | # This program is free software, distributed under the terms of the 7 | # Apache License, Version 2.0 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | #****************************************************************************** 11 | 12 | import numpy as np 13 | import tensorflow as tf 14 | import sys 15 | import math 16 | from keras import backend as kb 17 | class GAN(object): 18 | #--------------------------------------------------------------------------------- 19 | def __init__(self, _image, bn_decay=0.999): 20 | self.layer = _image 21 | self.batch_size = tf.shape(_image)[0] 22 | self.DOFs = 0 23 | # stack 24 | self.preFlatShapes = [] 25 | self.weight_stack = [] 26 | self.layer_num = 0 27 | self.layer_num_gen = 0 28 | self.layer_num_disc = 0 29 | 30 | self.bn_decay=bn_decay 31 | 32 | self.dtypeF = tf.float32 # tf.float32 33 | self.dtypeI = tf.int32 # tf.int16 # 34 | 35 | print("Input: {}".format(self.layer.get_shape())) 36 | 37 | #--------------------------------------------------------------------------------- 38 | # thanks to http://robromijnders.github.io/tensorflow_basic/ 39 | def weight_image(self): 40 | W = self.weight_stack[-1] 41 | # compute size of the image 42 | s = W.get_shape() 43 | out_channels = 1 44 | if int(s[3]) % 3 == 0: 45 | out_channels = 3 46 | print("Shape {}".format(s)) 47 | weight_patches = int(s[2]) * int(s[3]) / out_channels # e.g. the number of [3,3] patches in a CNN 48 | side_length = int(math.ceil(math.sqrt(weight_patches))) # image side length (in patches) 49 | image_patches = side_length * side_length # max number of patches that fit in the image 50 | # split into per filter weights 51 | ws = [] 52 | ws_dim3 = tf.split(3, s[3] / out_channels, W) # e.g. [ [3,3,3,1], [3,3,3,1], ... ] 53 | for w in ws_dim3: 54 | # split these further 55 | ws.extend(tf.split(2, s[2], w)) # e.g. [ [3,3,1,1], [3,3,1,1], ... ] 56 | # pad image 57 | padding = image_patches - weight_patches 58 | for i in range(padding): 59 | ws.append(tf.zeros([s[0], s[1], 1, out_channels])) 60 | # build rows of image 61 | rows = [] 62 | for i in range(side_length): 63 | start = i * side_length 64 | end = start + side_length 65 | rows.append(tf.concat(axis=0, values=ws[start:end])) 66 | # combine rows to image 67 | image = tf.concat(axis=1, values=rows) # [sidelength * ] 68 | s = [int(image.get_shape()[0]), int(image.get_shape()[1])] 69 | image = tf.reshape(image, [1, s[0], s[1], out_channels]) 70 | image = tf.image.resize_images(image, [int(s[1] * 50), int(s[0] * 50)], 1) 71 | image_tag = "l" + str(self.layer_num) + "_weight_image" 72 | tf.image_summary(image_tag, image) 73 | print("Image Summary: save weights as image") 74 | 75 | #--------------------------------------------------------------------------------- 76 | # outChannels: int 77 | # _patchShape: 2D: [H,W]; 3D: [D,H,W] 78 | # stride 3D: if 1D: [DHW], if 2D:[D,HW], if 3D:[D,H,W] 79 | # returns both normalized and linearized versions 80 | def convolutional_layer(self, outChannels, _patchShape, activation_function=tf.nn.tanh, stride=[1], name="conv",reuse=False, batch_norm=False, train=None, in_layer=None, in_channels = None, gain = np.sqrt(2)): 81 | if in_layer==None: 82 | in_layer = self.layer 83 | with tf.variable_scope(name, reuse = reuse): 84 | self.layer_num += 1 85 | # set the input and output dimension 86 | if in_channels is not None : 87 | inChannels = int(in_channels) 88 | else: 89 | inChannels = int(in_layer.get_shape()[-1]) 90 | #outChannels = int(inChannels * _filterSpread) 91 | # create a weight matrix 92 | if len(_patchShape) == 2: 93 | W = self.weight_variable([_patchShape[0], _patchShape[1], inChannels, outChannels], name=name, gain = gain) 94 | self.layer = self.conv2d(in_layer, W, stride) 95 | self.DOFs += _patchShape[0]* _patchShape[1]* inChannels* outChannels 96 | elif len(_patchShape) == 3: 97 | W = self.weight_variable([_patchShape[0], _patchShape[1], _patchShape[2], inChannels, outChannels], name=name, gain = gain) 98 | self.layer = self.conv3d(in_layer, W, stride) 99 | self.DOFs += _patchShape[0]* _patchShape[1]* _patchShape[2]* inChannels* outChannels 100 | #batch_norm = False 101 | 102 | self.weight_stack.append(W) 103 | # create a bias vector 104 | b = self.bias_variable([outChannels], name=name) 105 | self.layer = self.layer + b 106 | self.DOFs += outChannels 107 | 108 | if batch_norm: 109 | #self.layer = self.conv_batch_norm(self.layer, train=train) 110 | self.layer = tf.contrib.layers.batch_norm(self.layer, decay=self.bn_decay, scale=True, scope=tf.get_variable_scope(), reuse=reuse, fused=False, is_training=train) 111 | layer_lin = self.layer 112 | if activation_function: 113 | self.layer = activation_function(self.layer) 114 | # user output 115 | if activation_function: 116 | print("Convolutional Layer \'{}\' {} ({}) : {}, BN:{}".format(name, W.get_shape(), activation_function.__name__,self.layer.get_shape(),batch_norm)) 117 | else: 118 | print("Convolutional Layer \'{}\' {} ({}) : {}, BN:{}".format(name, W.get_shape(), 'None',self.layer.get_shape(),batch_norm)) 119 | return self.layer, layer_lin 120 | 121 | #--------------------------------------------------------------------------------- 122 | # s1: outChannels of intermediate conv layer 123 | # s2: outChannels of final and skip conv layer 124 | # filter: 2D: [H,W]; 3D: [D,H,W] 125 | # returns both normalized and linearized versions 126 | def residual_block(self, s1,s2, filter, activation_function=tf.nn.tanh, name="RB", reuse=False, batch_norm=False, train=None, in_layer=None): 127 | # note - leaky relu (lrelu) not too useful here 128 | if in_layer==None: 129 | in_layer = self.layer 130 | # convolutions of resnet block 131 | if len(filter) == 2: 132 | filter1 = [1,1] 133 | elif len(filter) == 3: 134 | filter1 = [1,1,1] 135 | 136 | print("Residual Block:") 137 | A,_ = self.convolutional_layer(s1, filter, activation_function, stride=[1], name=name+"_A", in_layer=in_layer, reuse=reuse, batch_norm=batch_norm, train=train) 138 | B,_ = self.convolutional_layer(s2, filter, None , stride=[1], name=name+"_B", reuse=reuse, batch_norm=batch_norm, train=train) 139 | # shortcut connection 140 | s,_ = self.convolutional_layer(s2, filter1, None , stride=[1], name=name+"_s", in_layer=in_layer, reuse=reuse, batch_norm=batch_norm, train=train) 141 | 142 | self.layer = tf.add( B, s) 143 | layer_lin = self.layer 144 | if activation_function: 145 | self.layer = activation_function(self.layer ) 146 | 147 | return self.layer, layer_lin 148 | 149 | 150 | #--------------------------------------------------------------------------------- 151 | # 2 x 2 max pool operation 152 | def max_pool(self, window_size=[2], window_stride=[2]): 153 | if len(self.layer.get_shape()) == 4: 154 | self.layer = tf.nn.max_pool(self.layer, ksize=[1, window_size[0], window_size[0], 1], strides=[1, window_stride[0], window_stride[0], 1], padding="VALID") 155 | elif len(self.layer.get_shape()) == 5: 156 | self.layer = tf.nn.max_pool3d(self.layer, ksize=[1, window_size[0], window_size[0], window_size[0], 1], strides=[1, window_stride[0], window_stride[0], window_stride[0], 1], padding="VALID") 157 | # user output 158 | print("Max Pool {}: {}".format(window_size, self.layer.get_shape())) 159 | return self.layer 160 | 161 | #--------------------------------------------------------------------------------- 162 | def avg_pool(self, window_size=[2], window_stride=[2]): 163 | if len(self.layer.get_shape()) == 4: 164 | self.layer = tf.nn.avg_pool(self.layer, ksize=[1, window_size[0], window_size[0], 1], strides=[1, window_stride[0], window_stride[0], 1], padding="VALID") 165 | elif len(self.layer.get_shape()) == 5: 166 | self.layer = tf.cast(tf.nn.avg_pool3d(tf.cast(self.layer, tf.float32), ksize=[1, window_size[0], window_size[0], window_size[0], 1], strides=[1, window_stride[0], window_stride[0], window_stride[0], 1], padding="VALID"), self.dtypeF) 167 | # user output 168 | print("Avg Pool {}: {}".format(window_size, self.layer.get_shape())) 169 | return self.layer 170 | 171 | #--------------------------------------------------------------------------------- 172 | # TODO: center velocities 173 | def SemiLagrange (self, source, vel, flags, res, pos): 174 | vel_shape = tf.shape(vel) 175 | dim = 2 #tf.size(vel_shape) - 2 # batch and channels are ignored 176 | 177 | pos = tf.subtract( tf.add( tf.cast(pos, tf.float32), tf.constant(0.0)), vel) 178 | 179 | floors = tf.cast(tf.floor(pos - 0.5), tf.int32) 180 | ceils = floors + 1 181 | 182 | # clamp min 183 | floors = tf.maximum(floors, tf.zeros_like(floors)) 184 | ceils = tf.maximum(ceils, tf.zeros_like(ceils)) 185 | 186 | # clamp max 187 | floors = tf.minimum(floors, tf.shape(source)[1:dim + 1] - 1) 188 | ceils = tf.minimum(ceils, tf.shape(source)[1:dim + 1] - 1) 189 | 190 | _broadcaster = tf.ones_like(ceils) 191 | cell_value_list = [] 192 | cell_weight_list = [] 193 | for axis_x in range(int(pow(2, dim))): # 3d, 0-7; 2d, 0-3;... 194 | condition_list = [bool(axis_x & int(pow(2, i))) for i in range(dim)] 195 | condition_ = (_broadcaster > 0) & condition_list 196 | axis_idx = tf.cast( 197 | tf.where(condition_, ceils, floors), 198 | tf.int32) 199 | 200 | # only support linear interpolation... 201 | axis_wei = 1.0 - tf.abs((pos - 0.5) - tf.cast(axis_idx, tf.float32)) # shape (..., res_x2, res_x1, dim) 202 | axis_wei = tf.reduce_prod(axis_wei, axis=-1, keepdims=True) 203 | cell_weight_list.append(axis_wei) # single scalar(..., res_x2, res_x1, 1) 204 | first_idx = tf.ones_like(axis_wei, dtype=self.dtypeI) 205 | first_idx = tf.cumsum(first_idx, axis=0, exclusive=True) 206 | cell_value_list.append(tf.concat([first_idx, axis_idx], -1)) 207 | #print(value.get_shape()) 208 | #print(cell_value_list[0].get_shape()) 209 | source_fwd = tf.gather_nd(source, cell_value_list[0]) * cell_weight_list[ 210 | 0] # broadcasting used, shape (..., res_x2, res_x1, channels ) 211 | for cell_idx in range(1, len(cell_value_list)): 212 | source_fwd = source_fwd + tf.gather_nd(source, cell_value_list[cell_idx]) * cell_weight_list[cell_idx] 213 | return source_fwd # shape (..., res_x2, res_x1, channels) 214 | 215 | def MacCormackCorrect(self, flags, source, forward, backward, strength = 1.0, threshold_flags = 0.2): 216 | flags = tf.reshape(flags, shape=[-1, tf.shape(source)[1], tf.shape(source)[2], 1]) 217 | cond_flags = tf.less(flags, tf.constant(threshold_flags)) # adapt threshold 218 | return tf.where(cond_flags, forward + strength * 0.5 * (source - backward), forward) 219 | 220 | # checkFlag(x,y,z) (flags((x),(y),(z)) & (FlagGrid::TypeFluid|FlagGrid::TypeEmpty)) 221 | 222 | def doClampComponent(self, grid_res, flags, intermed_adv, source, forward, pos, vel, startBz = 15, threshold_flags = 0.2): 223 | 224 | min = tf.ones_like(source) * sys.maxsize 225 | max = -tf.ones_like(source) * sys.maxsize - 1 226 | min_i = min 227 | max_i = max 228 | # forward 229 | currPos = tf.cast(tf.cast(pos, tf.float32) - vel, tf.int32) 230 | # clamp lookup to grid 231 | i0 = tf.clip_by_value(tf.slice(currPos,[0,0,0,0],[-1,-1,-1,1]), 0, grid_res[1]-1) 232 | j0 = tf.clip_by_value(tf.slice(currPos,[0,0,0,1],[-1,-1,-1,1]), 0, grid_res[2]-1) 233 | 234 | # indices_0 = tf.Variable([], dtype = tf.int32, trainable = False) 235 | # indices_1 = tf.Variable([], dtype = tf.int32, trainable = False) 236 | # indices_2 = tf.Variable([], dtype = tf.int32, trainable = False) 237 | # indices_3 = tf.Variable([], dtype = tf.int32, trainable = False) 238 | # i = tf.constant(0) 239 | 240 | # indices_0 = tf.concat([tf.ones_like(i0[0])*0, i0[0], j0[0], tf.zeros_like(i0[0])], axis = 2) 241 | # indices_1 = tf.clip_by_value(tf.concat([tf.ones_like(i0[i])*i, i0[i] + 1, j0[i], tf.zeros_like(i0[i])], axis = 2), 0, grid_res[2]-1) 242 | # indices_2 = tf.clip_by_value(tf.concat([tf.ones_like(i0[i])*i, i0[i], j0[i] + 1, tf.zeros_like(i0[i])], axis = 2), 0, grid_res[2]-1) 243 | # indices_3 = tf.clip_by_value(tf.concat([tf.ones_like(i0[i])*i, i0[i] + 1, j0[i] + 1, tf.zeros_like(i0[i])], axis = 2), 0, grid_res[2]-1) 244 | 245 | # def cond(i, indices_0, indices_1, indices_2, indices_3,i0,j0): 246 | # return tf.less(i, grid_res[0]) 247 | 248 | # #while_condition = lambda i, indices_0, indices_1, indices_2, indices_3: tf.less(i, grid_res[0]) 249 | 250 | # def body(i,indices_0,indices_1,indices_2,indices_3,i0,j0): 251 | 252 | # indices_t = [tf.concat([tf.ones_like(i0[i])*i, i0[i], j0[i], tf.zeros_like(i0[i])], axis = 2)] 253 | # indices_t1 = [tf.clip_by_value(tf.concat([tf.ones_like(i0[i])*i, i0[i] + 1, j0[i], tf.zeros_like(i0[i])], axis = 2), 0, grid_res[2]-1)] 254 | # indices_t2 = [tf.clip_by_value(tf.concat([tf.ones_like(i0[i])*i, i0[i], j0[i] + 1, tf.zeros_like(i0[i])], axis = 2), 0, grid_res[2]-1)] 255 | # indices_t3 = [tf.clip_by_value(tf.concat([tf.ones_like(i0[i])*i, i0[i] + 1, j0[i] + 1, tf.zeros_like(i0[i])], axis = 2), 0, grid_res[2]-1)] 256 | 257 | # indices_0 = tf.concat([tf.reshape(indices_0, shape=tf.shape(indices_t)), indices_t], 0) 258 | # indices_1 = tf.concat([tf.reshape(indices_1, shape=tf.shape(indices_t1)), indices_t1], 0) 259 | # indices_2 = tf.concat([tf.reshape(indices_2, shape=tf.shape(indices_t2)), indices_t2], 0) 260 | # indices_3 = tf.concat([tf.reshape(indices_3, shape=tf.shape(indices_t3)), indices_t3], 0) 261 | 262 | # return tf.add(i, 1),indices_0,indices_1,indices_2,indices_3, i0, j0 263 | 264 | # # do the loop: 265 | # r,_,_,_,_,_,_ = tf.while_loop(cond, body, [i,indices_0,indices_1,indices_2,indices_3,i0,j0],shape_invariants=[i.get_shape(),tf.TensorShape(None),tf.TensorShape(None),tf.TensorShape(None),tf.TensorShape(None),i0.get_shape(),j0.get_shape()]) 266 | 267 | #indices_0 = tf.convert_to_tensor(indices_0, dtype = tf.int32) 268 | # indices_0 = tf.concat([tf.ones_like(i0), i0, j0, tf.zeros_like(i0)], axis = 3) 269 | # indices_1 = tf.clip_by_value(tf.concat([tf.ones_like(i0), i0 + 1, j0, tf.zeros_like(i0)], axis = 3), 0, grid_res[2]-1) 270 | # indices_2 = tf.clip_by_value(tf.concat([tf.ones_like(i0), i0, j0 + 1, tf.zeros_like(i0)], axis = 3), 0, grid_res[2]-1) 271 | # indices_3 = tf.clip_by_value(tf.concat([tf.ones_like(i0), i0 + 1, j0 + 1, tf.zeros_like(i0)], axis = 3), 0, grid_res[2]-1) 272 | 273 | indices_0 = [] 274 | indices_1 = [] 275 | indices_2 = [] 276 | indices_3 = [] 277 | 278 | for i in range(startBz): 279 | indices_0.append( tf.concat([tf.ones_like(i0[i])*i, i0[i], j0[i], tf.zeros_like(i0[i])], axis = 2)) 280 | indices_1.append( tf.clip_by_value(tf.concat([tf.ones_like(i0[i])*i, i0[i] + 1, j0[i], tf.zeros_like(i0[i])], axis = 2), 0, grid_res[2]-1)) 281 | indices_2.append( tf.clip_by_value(tf.concat([tf.ones_like(i0[i])*i, i0[i], j0[i] + 1, tf.zeros_like(i0[i])], axis = 2), 0, grid_res[2]-1)) 282 | indices_3.append( tf.clip_by_value(tf.concat([tf.ones_like(i0[i])*i, i0[i] + 1, j0[i] + 1, tf.zeros_like(i0[i])], axis = 2), 0, grid_res[2]-1)) 283 | 284 | source_1 = tf.expand_dims(tf.gather_nd(source, indices_0), axis = 3) 285 | source_2 = tf.expand_dims(tf.gather_nd(source, indices_1), axis = 3) 286 | source_3 = tf.expand_dims(tf.gather_nd(source, indices_2), axis = 3) 287 | source_4 = tf.expand_dims(tf.gather_nd(source, indices_3), axis = 3) 288 | # const int k0 = clamp(currPos.z, 0, (orig.is3D() ? (gridSize.z-1) : 1) ); # for 3D 289 | 290 | flags_1 = tf.expand_dims(tf.gather_nd(flags, indices_0), axis = 3) 291 | cond_flags_1 = tf.less(flags_1, tf.constant(threshold_flags)) 292 | 293 | flags_2 = tf.expand_dims(tf.gather_nd(flags, indices_1), axis = 3) 294 | cond_flags_2 = tf.less(flags_2, tf.constant(threshold_flags)) 295 | 296 | flags_3 = tf.expand_dims(tf.gather_nd(flags, indices_2), axis = 3) 297 | cond_flags_3 = tf.less(flags_3, tf.constant(threshold_flags)) 298 | 299 | flags_4 = tf.expand_dims(tf.gather_nd(flags, indices_3), axis = 3) 300 | cond_flags_4 = tf.less(flags_4, tf.constant(threshold_flags)) 301 | 302 | tmp_min = min 303 | tmp_max = max 304 | cond_min = tf.greater( min, source_1) 305 | cond_max = tf.less( max, source_1) 306 | min = tf.where(cond_min, source_1, min) 307 | max = tf.where(cond_max, source_1, max) 308 | min = tf.where(cond_flags_1, min, tmp_min) 309 | max = tf.where(cond_flags_1, max, tmp_max) 310 | 311 | tmp_min = min 312 | tmp_max = max 313 | cond_min = tf.greater( min, source_2) 314 | cond_max = tf.less( max, source_2) 315 | min = tf.where(cond_min, source_2, min) 316 | max = tf.where(cond_max, source_2, max) 317 | min = tf.where(cond_flags_2, min, tmp_min) 318 | max = tf.where(cond_flags_2, max, tmp_max) 319 | 320 | tmp_min = min 321 | tmp_max = max 322 | cond_min = tf.greater( min, source_3) 323 | cond_max = tf.less( max, source_3) 324 | min = tf.where(cond_min, source_3, min) 325 | max = tf.where(cond_max, source_3, max) 326 | min = tf.where(cond_flags_3, min, tmp_min) 327 | max = tf.where(cond_flags_3, max, tmp_max) 328 | 329 | tmp_min = min 330 | tmp_max = max 331 | cond_min = tf.greater( min, source_4) 332 | cond_max = tf.less( max, source_4) 333 | min = tf.where(cond_min, source_4, min) 334 | max = tf.where(cond_max, source_4, max) 335 | min = tf.where(cond_flags_4, min, tmp_min) 336 | max = tf.where(cond_flags_4, max, tmp_max) 337 | 338 | # find min/max around source pos 339 | # if(checkFlag(i0,j0,k0)) { min, max = getMinMax(min, max, orig(i0,j0,k0)); haveFl=true; } 340 | # if(checkFlag(i1,j0,k0)) { min, max = getMinMax(min, max, orig(i1,j0,k0)); haveFl=true; } 341 | # if(checkFlag(i0,j1,k0)) { min, max = getMinMax(min, max, orig(i0,j1,k0)); haveFl=true; } 342 | # if(checkFlag(i1,j1,k0)) { min, max = getMinMax(min, max, orig(i1,j1,k0)); haveFl=true; } 343 | 344 | # for 3D 345 | # if(orig.is3D()) { 346 | # if(checkFlag(i0,j0,k1)) { getMinMax(minv, maxv, orig(i0,j0,k1)); haveFl=true; } 347 | # if(checkFlag(i1,j0,k1)) { getMinMax(minv, maxv, orig(i1,j0,k1)); haveFl=true; } 348 | # if(checkFlag(i0,j1,k1)) { getMinMax(minv, maxv, orig(i0,j1,k1)); haveFl=true; } 349 | # if(checkFlag(i1,j1,k1)) { getMinMax(minv, maxv, orig(i1,j1,k1)); haveFl=true; } } 350 | 351 | # if(!haveFl) return fwd; 352 | # if(cmpMinMax(min,max,dst)) dst = fwd; 353 | cond_complete = tf.logical_or( tf.logical_or(tf.logical_or(tf.less(intermed_adv, min) , tf.greater(intermed_adv, max)), tf.equal(min, min_i)) , tf.equal(max, max_i)) 354 | 355 | return tf.where(cond_complete, forward, intermed_adv) 356 | 357 | 358 | def MacCormackClamp(self, flags, vel, intermed_adv, source, forward, pos, startBz = 15): 359 | grid_res = tf.shape(vel) 360 | return self.doClampComponent(grid_res, flags, intermed_adv, source, forward, pos, vel, startBz); 361 | 362 | # def getMacCormackPosBatch(macgrid_batch, dt, cube_len_output=-1): 363 | 364 | # vel_pos_high_inter = getSemiLagrPosBatch(macgrid_input, dtArray, self.tileSizeHigh[1]).reshape((real_batch_sz, -1)) 365 | 366 | # vel_pos_high_inter = getSemiLagrPosBatch(macgrid_input, -dtArray, self.tileSizeHigh[1]).reshape((real_batch_sz, -1)) 367 | 368 | # velocity has to be centered, not in MAC-form 369 | def advect(self, source, vel, flags, dt, order, strength=0.0, name = "Advection", startBz = 15): 370 | res = tf.shape(source) 371 | #assert (tf.shape(vel) == res and tf.shape(flags) == res) 372 | vel_shape = tf.shape(source)#get_shape().as_list() 373 | print(vel_shape) 374 | dim = tf.size(vel_shape) - 2 # batch and channels are ignored 375 | 376 | # create index array 377 | # TODO precompute array 378 | pos_x = tf.range(start = 0.5, limit = tf.cast(res[1],dtype = tf.float32)+tf.constant(0.5,dtype = tf.float32), dtype = tf.float32) 379 | pos_x = tf.tile(pos_x, [res[2]]) 380 | if dim == 3: 381 | pos_x = tf.tile(pos_x, [res[3]]) 382 | pos_x = tf.reshape(pos_x, [1, vel_shape[1], vel_shape[2], vel_shape[3], 1]) 383 | pos_y = tf.transpose(pos_x, [0,2,1,3,4]) 384 | pos_z = tf.transpose(pos_x, [0,3,2,1,4]) 385 | pos = tf.stack([pos_z, pos_y, pos_x]) 386 | else: 387 | pos_x = tf.reshape(pos_x, [1, vel_shape[1], vel_shape[2], 1]) 388 | pos_y = tf.transpose(pos_x, [0,2,1,3]) 389 | pos = tf.cast(tf.concat([pos_y, pos_x], axis = 3), dtype = tf.float32)+tf.constant(0.5,dtype = tf.float32) 390 | 391 | upResFactor = tf.maximum(tf.cast((res[1] / tf.shape(vel)[1]),dtype =tf.float32),tf.cast((res[2] / tf.shape(vel)[2]),dtype =tf.float32)) 392 | vel = tf.slice(vel, [0,0,0,0], [-1,-1,-1,2]) 393 | vel = tf.concat((tf.slice(vel,[0,0,0,1], [-1,-1,-1,1]), tf.slice(vel,[0,0,0,0], [-1,-1,-1,1])), axis = 3) 394 | vel = tf.image.resize_images(vel,[res[1], res[2]], 0) 395 | #vel = tf.contrib.image.transform(vel, [1.0/upResFactor, 0, 0.5/upResFactor, 0, 1.0/upResFactor, 0.5/upResFactor, 0, 0], 'BILINEAR', output_shape=[tf.shape(source)[1], tf.shape(source)[2]]) 396 | vel *= upResFactor 397 | vel_y = tf.contrib.image.transform(tf.slice(vel,[0,0,0,0],[-1,-1,-1,1]), [1, 0, 0, 0, 1, 1, 0, 0], 'NEAREST') 398 | vel_x = tf.contrib.image.transform(tf.slice(vel,[0,0,0,1],[-1,-1,-1,1]), [1, 0, 1, 0, 1, 0, 0, 0], 'NEAREST') 399 | vel = tf.constant(0.5, dtype=tf.float32) * (vel + tf.concat((vel_y, vel_x), axis = 3)) 400 | #vel = tf.contrib.image.transform(vel, [1.0/upResFactor, 0, 0.5/upResFactor, 0, 1.0/upResFactor, 0.5/upResFactor, 0, 0], 'BILINEAR', output_shape=[tf.shape(source)[1], tf.shape(source)[2]]) 401 | 402 | #vel_y = tf.contrib.image.transform(tf.slice(vel, [0,0,0,1],[-1,-1,-1,1]), [1/upResFactor, 0, 0.5/upResFactor, 0, 1/upResFactor, 0.0, 0, 0], 'BILINEAR', output_shape=[tf.shape(source)[1], tf.shape(source)[2]]) 403 | #vel = tf.concat([vel_x,vel_y], axis = 3) 404 | # build time step array 405 | dt_arr_1 = tf.cast(tf.ones_like(pos), tf.float32) * dt 406 | dt_arr = tf.concat([dt_arr_1, tf.cast(tf.zeros_like(pos), tf.float32), dt_arr_1 * -1.0], axis = 0) 407 | dt_arr = tf.tile(dt_arr, [vel_shape[0]//3,1,1,1]) 408 | pos = tf.tile(pos, [tf.shape(source)[0],1,1,1]) 409 | vel *= dt_arr 410 | # advect quantity: source 411 | with tf.variable_scope(name): 412 | forward_adv = self.SemiLagrange(source, vel, flags, res, pos) 413 | if order == 2: 414 | backward_adv = self.SemiLagrange(forward_adv, -vel , flags, res, pos) 415 | intermed_correct = self.MacCormackCorrect(flags, source, forward_adv, backward_adv, strength) 416 | out_adv = self.MacCormackClamp(flags, vel, intermed_correct, source, forward_adv, pos, startBz) 417 | return out_adv 418 | return forward_adv 419 | 420 | #--------------------------------------------------------------------------------- 421 | # make layer flat 422 | # e.G. [1, 4, 4, 2] -> [1, 32] 423 | def flatten(self): 424 | # get unflat shape 425 | layerShape = self.layer.get_shape() 426 | self.preFlatShapes.append(layerShape) 427 | # compute flat size 428 | flatSize = int(layerShape[1]) * int(layerShape[2]) * int(layerShape[3]) 429 | if len(layerShape) == 5: 430 | flatSize *= int(layerShape[4]) 431 | # make flat 432 | self.layer = tf.reshape(self.layer, [-1, flatSize]) 433 | # user output 434 | print("Flatten: {}".format(self.layer.get_shape())) 435 | return flatSize 436 | 437 | #--------------------------------------------------------------------------------- 438 | def fully_connected_layer(self, _numHidden, _act, name="full", gain = np.sqrt(2)): 439 | with tf.variable_scope(name): 440 | self.layer_num += 1 441 | # get previous layer size 442 | numInput = int(self.layer.get_shape()[1]) 443 | # build layer variables 444 | W = self.weight_variable([numInput, _numHidden], name=name, gain =gain) 445 | b = self.bias_variable([_numHidden], name=name) 446 | self.DOFs += numInput*_numHidden + _numHidden 447 | # activate 448 | self.layer = tf.matmul(self.layer, W) + b 449 | if _act: 450 | self.layer = _act(self.layer) # ?? 451 | # user output 452 | if _act: 453 | print("Fully Connected Layer \'{}\': {}".format(name, self.layer.get_shape())) 454 | else: 455 | print("Linear Layer \'{}\': {}".format(name, self.layer.get_shape())) 456 | return self.layer 457 | 458 | #--------------------------------------------------------------------------------- 459 | # make layer 3D (from previously stored) 460 | # e.G. [1, 32] -> [1, 4, 4, 2] 461 | def unflatten(self): 462 | unflatShape = self.preFlatShapes.pop() 463 | if len(unflatShape) == 4: 464 | unflatShape = [-1, int(unflatShape[1]), int(unflatShape[2]), int(unflatShape[3])] 465 | elif len(unflatShape) == 5: 466 | unflatShape = [-1, int(unflatShape[1]), int(unflatShape[2]), int(unflatShape[3]), int(unflatShape[4])] 467 | self.layer = tf.reshape(self.layer, unflatShape) 468 | print("Unflatten: {}".format(self.layer.get_shape())) 469 | return self.layer 470 | 471 | # pixelnorm, used in progressive growing of gans https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L120 472 | def pixel_norm(self, in_layer, epsilon=1e-8): 473 | self.layer = in_layer * tf.rsqrt(tf.reduce_mean(tf.square(in_layer), axis=3, keep_dims=True) + epsilon) 474 | return self.layer 475 | 476 | def minibatch_stddev_layer(self, x, group_size=4): 477 | group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or less than) group_size. 478 | s = x.shape # [NCHW] Input shape. 479 | y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G. 480 | y = tf.cast(y, self.dtypeF) # [GMCHW] Cast to FP32. 481 | y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMCHW] Subtract mean over group. 482 | y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group. 483 | y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group. 484 | y = tf.reduce_mean(y, axis=[1,2,3], keepdims=True) # [M111] Take average over fmaps and pixels. 485 | y = tf.cast(y, x.dtype) # [M111] Cast back to original data type. 486 | y = tf.tile(y, [group_size, s[1], s[2], 1]) # [N1HW] Replicate over group and pixels. 487 | self.layer = tf.concat([x, y], axis=3) 488 | return self.layer # [NCHW] Append as new fmap. 489 | 490 | 491 | # 2D: 492 | # max_pool2d( inputs, kernel_size, stride=2, padding='VALID', data_format=DATA_FORMAT_NHWC, outputs_collections=None, scope=None) 493 | # try 494 | # tf.contrib.layers.max_pool3d , https://www.tensorflow.org/api_docs/python/tf/contrib/layers/max_pool3d 495 | # max_pool3d( inputs, kernel_size, stride=2, padding='VALID', data_format=DATA_FORMAT_NDHWC, outputs_collections=None, scope=None) 496 | # -> no fractions / depooling! 497 | 498 | #--------------------------------------------------------------------------------- 499 | # inverse of 2 x 2 max pool , note window size&stride given as [x,y] pair 500 | # does not support 3D 501 | def max_depool(self, in_layer=None, depth_factor =2, height_factor=2, width_factor=2): 502 | if in_layer==None: 503 | in_layer = self.layer 504 | 505 | #if 1: # alt with deconv 506 | #lo, li = self.deconvolutional_layer(1, [1,1], None, stride=[2,2], name="g_D1", reuse=reuse, batch_norm=use_batch_norm, train=train) 507 | #return lo 508 | ''' 509 | if len(self.layer.get_shape()) == 4: 510 | outWidth = in_layer.get_shape()[2] * window_stride[0] + window_size[0] - window_stride[0] 511 | outHeight = in_layer.get_shape()[1] * window_stride[1] + window_size[1] - window_stride[1] 512 | self.layer = tf.image.resize_images(in_layer, [int(outHeight), int(outWidth)], 1) #1 = ResizeMethod.NEAREST_NEIGHBOR 513 | print("Max Depool {}: {}".format(window_size, self.layer.get_shape())) 514 | ''' 515 | if len(self.layer.get_shape()) == 4: 516 | #self.layer = tf.contrib.keras.backend.resize_images(self.layer, height_factor, width_factor, 'channels_last') 517 | self.layer = kb.resize_images(self.layer, height_factor, width_factor, 'channels_last') 518 | print("Max Depool : {}".format(self.layer.get_shape())) 519 | if len(self.layer.get_shape()) == 5: 520 | #self.layer = tf.contrib.keras.backend.resize_volumes(self.layer, depth_factor, height_factor, width_factor, 'channels_last') 521 | self.layer = kb.resize_volumes(self.layer, depth_factor, height_factor, width_factor, 'channels_last') 522 | print("Max Depool : {}".format(self.layer.get_shape())) 523 | return self.layer 524 | 525 | #--------------------------------------------------------------------------------- 526 | # resizes H and W dimensions of NHWC or NDHWC (scale only H and W for 3D DHW data) 527 | # https://stackoverflow.com/questions/43814367/resize-3d-data-in-tensorflow-like-tf-image-resize-images 528 | def avg_depool(self, window_size=[1, 1], window_stride=[2,2], mode = 0, scale = [2]): 529 | is3D = False 530 | if len(self.layer.get_shape()) == 5: # 3D data, merge D into C to have a 4D tensor (like 2D data) 531 | is3D = True 532 | self.layer = tf.transpose(self.layer, [0,2,3,1,4]) # NDHWC -> NHWDC 533 | s=self.layer.get_shape() # NHWDC 534 | self.layer = tf.reshape(self.layer, [-1, int(s[1]), int(s[2]), int(s[3])*int(s[4])]) # NHWDC -> NHW(D*C) 535 | if len(scale) == 1: 536 | outWidth = self.layer.get_shape()[2] * scale[0]#window_stride[0] + window_size[0] - window_stride[0] 537 | outHeight = self.layer.get_shape()[1] * scale[0]#window_stride[1] + window_size[1] - window_stride[1] 538 | elif len(scale) == 2: 539 | outWidth = self.layer.get_shape()[2] * scale[1]#window_stride[0] + window_size[0] - window_stride[0] 540 | outHeight = self.layer.get_shape()[1] * scale[0]#window_stride[1] + window_size[1] - window_stride[1] 541 | self.layer = tf.cast(tf.image.resize_images(tf.cast(self.layer, tf.float32), [int(outHeight), int(outWidth)], mode), self.dtypeF) #0 = ResizeMethod.BILINEAR 542 | 543 | if is3D: # recover D dimension 544 | self.layer = tf.reshape(self.layer, [-1, int(outHeight), int(outWidth), int(s[3]), int(s[4])]) 545 | self.layer = tf.transpose(self.layer, [0,3,1,2,4]) # -> NDHWC 546 | s=self.layer.get_shape() # NDHWC 547 | self.layer = tf.reshape(self.layer, [-1, int(s[1]), int(s[2]), int(s[3])*int(s[4])])# NDHWC -> NDH(W*C) 548 | self.layer = tf.cast(tf.image.resize_images(tf.cast(self.layer, tf.float32), [int(s[1]*scale[0]), int(s[2])], mode), self.dtypeF) #0 = ResizeMethod.BILINEAR 549 | self.layer = tf.reshape(self.layer, [-1, int(s[1]*scale[0]), int(s[2]), int(s[3]), int(s[4])])# NDHWC 550 | 551 | print("Avg Depool {}: {}".format(window_size, self.layer.get_shape())) 552 | return self.layer 553 | 554 | def pixel_shuffle(self, input_layer = None, upres = 2, stage = "1"): 555 | if input_layer == None: 556 | input_layer = self.layer 557 | 558 | input_layer,_ = self.convolutional_layer( input_layer.get_shape().as_list()[3] * 4, [1,1], None, stride=[1], name="g_cPS"+stage, in_layer=input_layer, reuse=tf.AUTO_REUSE, batch_norm=False, train=True) #->16,64 559 | self.layer = tf.depth_to_space(input_layer, upres, name = "Pixel_Shuffle") 560 | return self.layer 561 | 562 | #--------------------------------------------------------------------------------- 563 | # outChannels: int 564 | # _patchShape: 2D: [H,W]; 3D: [D,H,W] 565 | # stride 3D: if 1D: [DHW], if 2D:[D,HW], if 3D:[D,H,W] 566 | def deconvolutional_layer(self, outChannels, _patchShape, activation_function=tf.nn.tanh, stride=[1], name="deconv",reuse=False, batch_norm=False, train=None, init_mean=0., strideOverride=None): 567 | if init_mean==1.: 568 | name = name+"_EXCLUDE_ME_" 569 | with tf.variable_scope(name): 570 | self.layer_num += 1 571 | shape = self.layer.get_shape() 572 | # spread channels 573 | inChannels = int(self.layer.get_shape()[-1]) 574 | #outChannels = int(inChannels / _filterSpread) # must always come out even 575 | 576 | dcStride = stride 577 | if strideOverride is not None: 578 | dcStride = strideOverride 579 | 580 | if len(_patchShape) == 2: 581 | if len(stride) == 1: 582 | stride = [stride[0],stride[0]] 583 | # create a weight matrix 584 | W = self.weight_variable([_patchShape[0], _patchShape[1], outChannels, inChannels], name=name, init_mean=init_mean) 585 | self.layer = self.deconv2d(self.layer, W, [self.batch_size, int(shape[1]*stride[0]), int(shape[2]*stride[1]), outChannels], dcStride) 586 | self.DOFs += _patchShape[0]* _patchShape[1]* outChannels* inChannels 587 | if len(_patchShape) == 3: 588 | if len(stride) == 1: 589 | stride = [stride[0],stride[0],stride[0]] 590 | elif len(stride) == 2: 591 | stride = [stride[0],stride[1],stride[1]] 592 | # create a weight matrix 593 | W = self.weight_variable([_patchShape[0], _patchShape[1], _patchShape[2], outChannels, inChannels], name=name, init_mean=init_mean) 594 | self.layer = self.deconv3d(self.layer, W, [self.batch_size, int(shape[1]*stride[0]), int(shape[2]*stride[1]), int(shape[3]*stride[2]), outChannels], dcStride) 595 | self.DOFs += _patchShape[0]* _patchShape[1]* _patchShape[2]* outChannels* inChannels 596 | #batch_norm = False 597 | 598 | # create a bias vector 599 | b = self.bias_variable([outChannels], name=name) 600 | self.layer = self.layer + b 601 | self.DOFs += outChannels 602 | 603 | if len(_patchShape) == 2: 604 | self.layer = tf.reshape(self.layer, [-1, int(shape[1]*stride[0]), int(shape[2]*stride[1]), outChannels]) 605 | if len(_patchShape) == 3: 606 | self.layer = tf.reshape(self.layer, [-1, int(shape[1]*stride[0]), int(shape[2]*stride[1]), int(shape[3]*stride[2]), outChannels]) 607 | 608 | if batch_norm: 609 | #self.layer = self.conv_batch_norm(self.layer, train=train) 610 | self.layer = tf.contrib.layers.batch_norm(self.layer, decay=self.bn_decay, scale=True, scope=tf.get_variable_scope(), reuse=reuse, fused=False, is_training=train) 611 | layer_lin = self.layer 612 | if activation_function: 613 | self.layer = activation_function(self.layer) 614 | # user output 615 | if activation_function: 616 | print("Deconvolutional Layer \'{}\' {} ({}): {}, BN:{}".format(name, W.get_shape(), activation_function.__name__, self.layer.get_shape(),batch_norm)) 617 | else: 618 | print("Deconvolutional Layer \'{}\' {} ({}): {}, BN:{}".format(name, W.get_shape(), 'None', self.layer.get_shape(),batch_norm)) 619 | return self.layer, layer_lin 620 | 621 | #--------------------------------------------------------------------------------- 622 | #adds noise to the current layer 623 | #channels: number of noise channels to add, uses channels of current layer if < 1 624 | def noise(self, channels=-1): 625 | shape=tf.shape(self.layer) 626 | if channels > 0: 627 | shape[-1] = channels 628 | noise = tf.random_normal(shape=shape, mean=0.0, stddev=0.04, dtype=self.dtypeF) 629 | self.layer = tf.concat([self.layer, noise], axis=-1) 630 | print("Noise {}: {}".format(noise.get_shape(), self.layer.get_shape())) 631 | return self.layer 632 | 633 | #--------------------------------------------------------------------------------- 634 | #adds the given tensor to self.layer on axis -1(channels) 635 | def concat(self, layer): 636 | self.layer = tf.concat(values=[self.layer, layer], axis=-1) 637 | print("Concat {}: {}".format(layer.get_shape(), self.layer.get_shape())) 638 | return self.layer 639 | #--------------------------------------------------------------------------------- 640 | #applys the given operation to self.layer 641 | def apply(self, op): 642 | self.layer = op(self.layer) 643 | print("Apply \'{}\': {}".format(op.__name__, self.layer.get_shape())) 644 | return self.layer 645 | #--------------------------------------------------------------------------------- 646 | def dropout(self, keep_prob): 647 | self.layer = tf.nn.dropout(self.layer, keep_prob) 648 | print("Dropout: {}".format(self.layer.get_shape())) 649 | return self.layer 650 | 651 | #--------------------------------------------------------------------------------- 652 | def y(self): 653 | return self.layer 654 | 655 | #--------------------------------------------------------------------------------- 656 | def getDOFs(self): 657 | return self.DOFs 658 | 659 | #--------------------------------------------------------------------------------- 660 | # generate random valued weight field 661 | def weight_variable(self, shape, name="w", gain=np.sqrt(2), use_he = False, in_lay = None, use_wscale = True): 662 | #use tf.get_variable() instead of tf.Variable() to be able to reuse variables 663 | 664 | if in_lay is None: in_lay = np.prod(shape[:-1]) 665 | std = gain / np.sqrt(in_lay) # He init 666 | if use_wscale: 667 | wscale = tf.constant(np.float32(std), name='wscale', dtype = self.dtypeF) 668 | v = tf.get_variable("weight", shape, initializer=tf.initializers.random_normal(dtype = self.dtypeF), dtype = self.dtypeF) * wscale 669 | else: 670 | v = tf.get_variable("weight", shape, initializer=tf.keras.initializers.he_normal(dtype = self.dtypeF), dtype = self.dtypeF) 671 | #else: 672 | # v = tf.get_variable("weight", shape, initializer=tf.random_normal_initializer(stddev=s, mean=init_mean)) 673 | 674 | #print("\t{}".format(v.name)) 675 | #print("\t{}".format(v.name)) # NT_DEBUG 676 | #print("\t{}".format( tf.get_variable_scope() )); 677 | #exit(1) 678 | return v 679 | 680 | #--------------------------------------------------------------------------------- 681 | # gemerate biases for the nodes 682 | def bias_variable(self, shape, name="b"): 683 | return tf.get_variable("bias", shape, initializer=tf.constant_initializer(0.1, dtype = self.dtypeF), dtype = self.dtypeF) 684 | 685 | #--------------------------------------------------------------------------------- 686 | def conv2d(self, x, W, stride=[1]): 687 | if len(stride) == 1: #[HW] 688 | strides = [1, stride[0], stride[0], 1] 689 | elif len(stride) == 2: #[H,W] 690 | strides = [1, stride[0], stride[1], 1] 691 | return tf.nn.conv2d(x, W, strides=strides, padding="SAME") 692 | 693 | def conv3d(self, x, W, stride=[1]): 694 | if len(stride) == 1: #[DHW] 695 | strides = [1, stride[0], stride[0], stride[0], 1] 696 | elif len(stride) == 2: #[D,HW] for use when striding time and space separately 697 | strides = [1, stride[0], stride[1], stride[1], 1] 698 | elif len(stride) == 3: #[D,H,W] 699 | strides = [1, stride[0], stride[1], stride[2], 1] 700 | return tf.nn.conv3d(x, W, strides=strides, padding="SAME") 701 | 702 | #--------------------------------------------------------------------------------- 703 | def deconv2d(self, x, W, output_shape, stride=[1]): 704 | if len(stride) == 1: 705 | strides = [1, stride[0], stride[0], 1] 706 | elif len(stride) == 2: 707 | strides = [1, stride[0], stride[1], 1] 708 | return tf.nn.conv2d_transpose(x, W, output_shape=output_shape, strides=strides, padding="SAME") 709 | 710 | def deconv3d(self, x, W, output_shape, stride=[1]): 711 | if len(stride) == 1: 712 | strides = [1, stride[0], stride[0], stride[0], 1] 713 | elif len(stride) == 2: # for use when striding time and space separately 714 | strides = [1, stride[0], stride[1], stride[1], 1] 715 | elif len(stride) == 3: 716 | strides = [1, stride[0], stride[1], stride[2], 1] 717 | return tf.nn.conv3d_transpose(x, W, output_shape=output_shape, strides=strides, padding="SAME") 718 | 719 | def variable_summaries(self, var, name): 720 | """Attach a lot of summaries to a Tensor.""" 721 | with tf.name_scope('summaries'): 722 | mean = tf.reduce_mean(var) 723 | tf.summary.scalar('mean/' + name, mean) 724 | with tf.name_scope('stddev'): 725 | stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean))) 726 | tf.summary.scalar('sttdev/' + name, stddev) 727 | tf.summary.scalar('max/' + name, tf.reduce_max(var)) 728 | tf.summary.scalar('min/' + name, tf.reduce_min(var)) 729 | tf.summary.histogram(name, var) 730 | 731 | 732 | #from https://github.com/bamos/dcgan-completion.tensorflow 733 | def lrelu(x, leak=0.2, name="lrelu"): 734 | with tf.variable_scope(name): 735 | f1 = 0.5 * (1 + leak) 736 | f2 = 0.5 * (1 - leak) 737 | return f1 * x + f2 * abs(x) 738 | -------------------------------------------------------------------------------- /tools_wscale/fluiddataloader.py: -------------------------------------------------------------------------------- 1 | #****************************************************************************** 2 | # 3 | # tempoGAN: A Temporally Coherent, Volumetric GAN for Super-resolution Fluid Flow 4 | # Copyright 2018 You Xie, Erik Franz, Mengyu Chu, Nils Thuerey, Maximilian Werhahn 5 | # 6 | # This program is free software, distributed under the terms of the 7 | # Apache License, Version 2.0 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | #****************************************************************************** 11 | import uniio 12 | import os, glob, re, math, threading 13 | import numpy as np 14 | import scipy.ndimage 15 | 16 | # necessary for loading uni files , could easily be disabled if necessary 17 | # data type for new python arrays 18 | FDG_DTYPE = np.float32 19 | 20 | 21 | class FluidDataLoader(object): 22 | """ Fluid Data Loader - load npz files from a collection of directories 23 | 24 | label/GT data can be passed in different ways: an array with 1 data per dir, 25 | a filename for npz data, or a generic function called for each loaded input 26 | """ 27 | 28 | def __init__(self, print_info=1, base_path="../data/", base_path_y="../data/", simdirname="sim_%04d/", indices=[], numpy_seed = 17179023, 29 | filename=None, filename_index_min=0, filename_index_max=200, wildcard=None, 30 | array_y=None, filename_y=None, func_y=None, data_fraction=1., 31 | shape=None, shape_y=None, collapse_z=False, shuffle_on_load=False, 32 | conv_slices = False, conv_axis = 0, density_threshold = 0.002, axis_scaling = [1,1,1,1], axis_scaling_y = [0.25,1,1,1], 33 | select_random = 1.0, add_adj_idcs = False, 34 | multi_file_list=None, multi_file_list_y=None, multi_file_idxOff=None, multi_file_idxOff_y=None, 35 | postproc_func=None, postproc_func_y=None, 36 | np_load_string=None , np_load_string_y=None , oldNamingScheme=False): 37 | """ Constructor , arguments: 38 | print_info: debugging info , <=0 off, 1 some info, 2 full 39 | base_path: path prefix for all sim dirs 40 | base_path_y: path prefix for all sim dirs for high res data 41 | simdirname: sim directory name with printf placeholders (eg %04d) for indices 42 | indices: list of directory numbers to load 43 | filename: filename with printf placeholders (eg %04d) for numbered input data x 44 | typical example string: "density_%04d.npz" 45 | currently uni and npz files are supported 46 | filename_index_min: start index for filenames, controls index range (min to max) 47 | filename_index_max: maximal index for filenames 48 | wildcard: optional, use specified wildcard for regexp matching filenames in sim dir for x. 49 | has to contain a group (...) to extract ID string for y, group(1) used by default. 50 | note, if wildcard string is given, this overrides filename with index range 51 | multi_file_list: list of file name prefixes, if given, the loader will load and concatenate 52 | all correspnding files 53 | multi_file_list_y: " analogous for y 54 | multi_file_idxOff: list of file index offsets for files multi_file_list 55 | can be used to load files with different index into same data entry 56 | multi_file_idxOff_y: " analogous for y 57 | postproc_func: function to be called for every data sample, can be used to post-process 58 | data in a custom way 59 | postproc_func_y: " analogous for y 60 | array_y: optional, label data as array, 1 entry per dir 61 | filename_y: optional, filenames for label data; needs # placeholder if used with wildcard 62 | func_y: optional, labelling func, called with sim index, filename and file index for every loeaded entry 63 | data_fraction: don't load all files, but also a fraction of it 64 | shape, shape_y: target shapes for x,y data; input shapes are determined from files to load; 65 | warning - can easily rescale & interpolate channels target channels dont match source channels. 66 | note - only used for scaling. no reshapes; if no shape is given, pass through data unmodified 67 | collapse_z: remove Z-axis for 2d data sets, ie, transform ZYXc to YXc when z==1 68 | also removes Z-component for pure velocity 2d data sets, ie, changes c=3 to c=2; TODO , make separate switch? (removeZComponent) 69 | shuffle_on_load: randomize order on load? definitely not recommended for validation set 70 | note: better use flow(... , shuffle=True) in most cases 71 | conv_slices: data has to be 3d -> convert axis into slices 72 | conv_axis: if conv_slices -> chosen axis for data (transpose if necessary) 73 | density_threshold: density threshold for discarding slices 74 | axis_scaling: upsamples converted axis lineraly 75 | axis_scaling_y: upsamples converted axis lineraly for high res sim 76 | select_random: selects random frames from left over slices (1.0 -> select all) 77 | np_load_string: by default, load "arr_0" from npz files, change using this string 78 | np_load_string_y: same as np_load_string but for loading y data; if none is given, 79 | the string for x is used (np_load_string) 80 | oldNamingScheme: revert to old scheme with double indices for dir & filename 81 | by default the loader expects: data/sim_XXXX/density_low_YYYY.sth 82 | the old naming scheme was: data/sim_XXXX/frame_YYYY/density_low_XXXX_YYYY.sth 83 | 84 | """ 85 | # path basics 86 | self.base_path = base_path 87 | self.base_path_y = base_path_y 88 | self.simdirname = simdirname 89 | self.indices = indices 90 | 91 | # x data files 92 | self.filename = filename 93 | self.filename_index_min = filename_index_min 94 | self.filename_index_max = filename_index_max 95 | self.wildcard = wildcard 96 | 97 | self.multi_file_list = multi_file_list 98 | self.multi_file_list_y = multi_file_list_y 99 | self.multi_file_idxOff = multi_file_idxOff 100 | self.multi_file_idxOff_y = multi_file_idxOff_y 101 | self.postproc_func = postproc_func 102 | self.postproc_func_y = postproc_func_y 103 | 104 | # y data for labeling x 105 | self.filename_y = filename_y 106 | self.array_y = array_y 107 | self.func_y = func_y 108 | 109 | # further options 110 | self.data_fraction = data_fraction 111 | self.shape = shape 112 | self.shape_y = shape_y 113 | self.collapse_z = collapse_z 114 | self.shuffle_on_load = shuffle_on_load 115 | 116 | self.conv_slices = conv_slices 117 | self.conv_axis = conv_axis 118 | self.density_threshold = density_threshold 119 | self.axis_scaling = axis_scaling 120 | self.axis_scaling_y = axis_scaling_y 121 | self.select_random = select_random 122 | self.add_adj_idcs = add_adj_idcs 123 | 124 | # initialize npz load 125 | if np_load_string is not None: 126 | self.np_load_string = np_load_string 127 | else: 128 | self.np_load_string = "arr_0" 129 | np.random.seed(numpy_seed) 130 | # input data 131 | inCnt = 0 132 | if self.filename is not None: inCnt += 1 133 | if self.wildcard is not None: inCnt += 1 134 | if inCnt>1: 135 | # sanity check - only one of those allowed 136 | raise FluidDataLoaderError("FluidDataLoader error: for input data loading, only specify one of: input filename, or wildcard") 137 | # label data 138 | inCnt = 0 139 | if self.filename_y is not None: inCnt += 1 140 | if self.array_y is not None: inCnt += 1 141 | if self.func_y is not None: inCnt += 1 142 | if inCnt>1: 143 | # sanity check - only one of those allowed 144 | raise FluidDataLoaderError("FluidDataLoader error: for label data loading, only specify one of: input filename, array or function") 145 | 146 | if np_load_string_y is not None: 147 | self.np_load_string_y = np_load_string_y 148 | else: 149 | self.np_load_string_y = self.np_load_string 150 | 151 | self.print_info = print_info 152 | if self.print_info: 153 | print("FluidDataLoader init, path %s, filename %s" % (self.base_path,self.filename) ) 154 | self.oldNamingScheme = oldNamingScheme 155 | 156 | # sanity check file lists 157 | if self.multi_file_idxOff is not None and self.multi_file_list is not None: 158 | if len(self.multi_file_list) != len(self.multi_file_idxOff): 159 | raise FluidDataLoaderError("FluidDataLoader error: multi file list and idxOff lists have to match " + format([len(self.multi_file_list) , len(self.multi_file_idxOff)]) ) 160 | if self.multi_file_idxOff_y is not None and self.multi_file_list_y is not None: 161 | if len(self.multi_file_list_y) != len(self.multi_file_idxOff_y): 162 | raise FluidDataLoaderError("FluidDataLoader error: multi file list and idxOff lists for y have to match " + format([len(self.multi_file_list_y) , len(self.multi_file_idxOff_y)]) ) 163 | 164 | # all initialized upon load: 165 | self.x = None 166 | self.y = None 167 | self.xfn = None 168 | self.have_y_npz = False # does y contain numpy array data? 169 | 170 | self.loadDirs() 171 | self.printStats() 172 | 173 | def getFilename(self, sim_index, fnbase, frame_index, file_path): 174 | if not self.oldNamingScheme: 175 | fn = os.path.join( file_path, os.path.join((self.simdirname % sim_index), (fnbase % frame_index)) ) 176 | else: 177 | # both parts simdir & file have both indices! 178 | fn = os.path.join( file_path, os.path.join( (self.simdirname % (sim_index,frame_index)), (fnbase % (sim_index,frame_index) ) )) 179 | return fn 180 | 181 | def collectFilenamesFromDir(self, list_index): 182 | """ Build filename list from single dir 183 | list_index: number in index list (or alternatively label list) 184 | """ 185 | 186 | sim_index = self.indices[list_index] # get simulation directory index from list 187 | labelstr = "" # debug info only 188 | foundCnt = 0 189 | 190 | if self.wildcard is not None: 191 | search_dir = os.path.join( self.base_path, (self.simdirname % sim_index) ) 192 | os.chdir(search_dir) 193 | allFiles = [f for f in glob.glob("*") if os.path.isfile(f)] # list all files 194 | files = [] 195 | for f in allFiles: 196 | match = re.search(self.wildcard, f) # note, matched again below... 197 | if match: 198 | files.append(f) 199 | 200 | if len(files)<1: 201 | raise FluidDataLoaderError("Error - no files found in directory '%s' with wildcard '%s' " %(search_dir, self.wildcard) ) 202 | 203 | files = sorted(files) # sort by name 204 | 205 | n = max(1, int(len(files)*self.data_fraction)) 206 | tf = float(len(files))/n # spread over time range (eg 200 frames) 207 | fcnt = 0 208 | for t in range(0,n): 209 | filelist_index = int(t*tf) # full range 210 | fn = files[filelist_index] 211 | self.xfn.append(os.path.join(search_dir, fn)) 212 | foundCnt += 1 213 | 214 | # construct label, closely follows index version below 215 | if self.filename_y is not None: 216 | mx = re.search(self.wildcard, fn) 217 | listy = self.filename_y.split("$") 218 | if(len(listy)!=2): 219 | raise FluidDataLoaderError("Error - when using a wildcard for x, filename_y needs to contain exactly one '$' where the file id string from x will be inserted to build the filename for y. Current, invalid, filename_y is '%s' " %(self.filename_y) ) 220 | fny = listy[0] + mx.group(1) + listy[1] 221 | 222 | if not os.path.isfile(fny): # make sure file for y exists 223 | raise FluidDataLoaderError("Error - y file '%s' for x file '%s' doesnt exist in search dir '%s' " %(fny, fn, search_dir ) ) 224 | 225 | fny = os.path.join(search_dir, fny) 226 | self.yfn.append(fny) 227 | self.have_y_npz = True # flag to indicate we have np arrays in y 228 | 229 | if self.array_y is not None: 230 | if self.y is None: 231 | self.y = [] 232 | self.y.append( self.array_y[list_index] ) 233 | labelstr = " with label " + format( self.array_y[list_index] ) 234 | 235 | if self.func_y is not None: 236 | print("NYI! test...") 237 | 238 | else: 239 | # "simple" index range 240 | n = max(1, int((self.filename_index_max-self.filename_index_min)*self.data_fraction)) 241 | tf = float(self.filename_index_max-self.filename_index_min)/n 242 | for t in range(0,n): 243 | filelist_index = int(self.filename_index_min + t*tf) # full range 244 | 245 | fn = self.getFilename(sim_index, self.filename, filelist_index, file_path= self.base_path) 246 | self.xfn.append(fn) 247 | foundCnt += 1 248 | 249 | if self.filename_y is not None: 250 | fny = self.getFilename(sim_index, self.filename_y, filelist_index,file_path= self.base_path_y) 251 | self.yfn.append(fny) 252 | self.have_y_npz = True # flag to indicate we have np arrays in y 253 | 254 | if self.array_y is not None: 255 | if self.y is None: 256 | self.y = [] 257 | self.y.append( self.array_y[list_index] ) 258 | labelstr = " with label " + format( self.array_y[list_index] ) 259 | 260 | if self.func_y is not None: 261 | print("NYI! test...") 262 | if self.y is None: 263 | self.y = [] 264 | self.y.append( self.func_y(list_index, sim_index, t, fn) ) 265 | 266 | if self.print_info: 267 | print("Found " +format(foundCnt) +" files from sim ID "+format(sim_index) + labelstr ) 268 | 269 | 270 | def getDim(self,shape): 271 | """ small helper to compute dimensionality of data from shape 272 | """ 273 | dim = -1 274 | if len(shape)==4: # probably ZYXc 275 | if(shape[0]==1): 276 | dim = 2 277 | else: 278 | dim = 3 279 | if len(shape)==5: # probably 4d, TZYXc 280 | dim = 4 281 | #print("Dim "+format(dim)+ " for " + format(shape) ) 282 | return dim 283 | 284 | def removeZComponent(self,x): 285 | """ Optional, and 2D only: remove Z entry from 3d vec fields 286 | """ 287 | if not self.collapse_z: return x 288 | if not self.getDim( x.shape )==2: return x 289 | if not x.shape[3]==3: return x # only apply for pure velocity grids with 3 channels 290 | x2d = np.zeros( (1,x.shape[1],x.shape[2],2), dtype=FDG_DTYPE ) 291 | x2d[:,:,:,0] = x[:,:,:,0] # only keep x,y 292 | x2d[:,:,:,1] = x[:,:,:,1] 293 | return x2d 294 | 295 | def removeSlices(self, fx, fy = None): 296 | """ 297 | If convert 3D data to slices: remove slices below density threshold 298 | """ 299 | real_size = 0 300 | 301 | fx_out = np.zeros_like(fx) 302 | if fy is not None: 303 | fy_out = np.zeros_like(fy) 304 | for i in range(fx.shape[0]): 305 | if float(np.average(fx[i,:,:,0:1])) >= self.density_threshold: 306 | fx_out[real_size] = fx[i] 307 | if fy is not None: 308 | fy_out[real_size] = fy[i] 309 | real_size += 1 310 | if fy is None: 311 | return fx_out[0:real_size] 312 | else: 313 | return fx_out[0:real_size], fy_out[0:real_size] 314 | 315 | def addAdjSlices(self, fx): 316 | """ 317 | adds the adjacent slices as an additional density channel 318 | """ 319 | 320 | fx_shape = fx.shape 321 | fx = fx.reshape((fx_shape[0], fx_shape[1], fx_shape[2], 3, -1)) 322 | fx_shape = fx.shape 323 | fx_out = np.zeros((fx_shape[0], fx_shape[1], fx_shape[2], fx_shape[3], fx_shape[4] + 2)) 324 | fx_out[:,:,:,:,0:fx_shape[4]] = fx 325 | 326 | for i in range(fx_shape[0]): 327 | if i == 0: 328 | fx_out[i:i+1,:,:,:,fx_shape[4]:fx_shape[4]+1] = np.zeros_like(fx[i:i+1,:,:,:,0:1]) 329 | fx_out[i:i+1,:,:,:,fx_shape[4]+1:fx_shape[4]+2] = fx[i+1:i+2,:,:,:,0:1] 330 | elif i == fx_shape[0]-1: 331 | fx_out[i:i+1,:,:,:,fx_shape[4]:fx_shape[4]+1] = fx[i-1:i,:,:,:,0:1] 332 | fx_out[i:i+1,:,:,:,fx_shape[4]+1:fx_shape[4]+2]= np.zeros_like(fx[i-1:i,:,:,:,0:1]) 333 | else: 334 | fx_out[i:i+1,:,:,:,fx_shape[4]:fx_shape[4]+1] = fx[i-1:i,:,:,:,0:1] 335 | fx_out[i:i+1,:,:,:,fx_shape[4]+1:fx_shape[4]+2] = fx[i+1:i+2,:,:,:,0:1] 336 | return fx_out.reshape((fx_shape[0], fx_shape[1], fx_shape[2], -1)) 337 | 338 | def selectRandomSamples(self, fx, fy = None): 339 | """ 340 | selects select_random samples from fx / fy 341 | """ 342 | rnd_length = int(fx.shape[0] * self.select_random) 343 | idcs = np.random.shuffle(np.arange(fx.shape[0])) 344 | if fy is None: 345 | return (fx[idcs]) 346 | else: 347 | return (fx[idcs][0])[0:rnd_length], (fy[idcs][0])[0:rnd_length] 348 | 349 | def mogrifyFilenameIndex(self, fn, idxOffset): 350 | """ Parse, determine index, and change 351 | """ 352 | match = re.search("(.*_)([\d]+)\.([\w]+)", fn) # split into groups: path/name_ , %04d , ext 353 | if match: 354 | if len(match.groups())!=3: 355 | raise FluidDataLoaderError("FluidDataLoader error: got filename %s, but could not fully split up into name,4-digit and extension " % (fn)) 356 | #print "A " + format(match.groups()) 357 | idx = int(match.group(2)) 358 | idx = max(self.filename_index_min, min(self.filename_index_max-1, idx+idxOffset) ) 359 | #print "A " + format(match.group(2)) 360 | fn = "%s%04d.%s" % (match.group(1), idx, match.group(3)) 361 | #print "fn " + fn 362 | else: 363 | raise FluidDataLoaderError("FluidDataLoader error: got filename %s, but could not split up into name,4-digit and extension " % (fn)) 364 | 365 | #exit() 366 | # density1_([\w]+).npz 367 | return fn 368 | 369 | def loadSingleDatum(self, fn, lstr, idxOffset=0): 370 | """ Determine file type and load 371 | """ 372 | if idxOffset!=0: 373 | fn = self.mogrifyFilenameIndex(fn,idxOffset) 374 | if self.print_info>1: 375 | print("Loading: "+fn+", "+lstr) 376 | # detect file type 377 | if fn.endswith( ".npz" ): 378 | ar = np.load(fn)[ lstr ] 379 | elif fn.endswith( ".uni" ): 380 | _, ar = uniio.readUni(fn) # load-string lstr not needed for uni files 381 | #ar = ar[::-1] # make a copy of the array in reverse order 382 | else: 383 | raise FluidDataLoaderError("FluidDataLoader error: got filename %s, but only .uni or .npz supported at the moment " % (fn)) 384 | 385 | return ar 386 | 387 | def loadFiles(self): 388 | """ Load all NPZs from list. 389 | Note, data always has to have shape ZYXc (3d) or YXc (2d), 390 | where c is channels (eg 3 for vels, 1 for scalar data). 391 | """ 392 | # maybe discard slices if they are below some density threshold (because of memory limitations) 393 | if self.conv_slices: 394 | true_n = 0 395 | n = len(self.xfn) 396 | for t in range(n): 397 | fof = 0 if self.multi_file_idxOff is None else self.multi_file_idxOff[0] 398 | fx = self.loadSingleDatum(self.xfn[t], self.np_load_string , fof) 399 | 400 | if self.multi_file_list is not None: 401 | # concat multiple files... 402 | basename = self.xfn[t] 403 | if not basename.find(self.multi_file_list[0])>=0: 404 | raise FluidDataLoaderError("Error, input filename '%s' doesnt contain given string '%s'"%(basename,self.multi_file_list[0])) 405 | for i in range(1,len(self.multi_file_list)): 406 | fnr = basename.replace(self.multi_file_list[0] , self.multi_file_list[i]) 407 | fof = 0 if self.multi_file_idxOff is None else self.multi_file_idxOff[i] 408 | _fx = self.loadSingleDatum(fnr, self.np_load_string , fof ) 409 | fx = np.append( fx, _fx , axis=len(fx.shape)-1 ) 410 | 411 | # apply post-processing function (if given) 412 | if self.postproc_func is not None: 413 | fx = self.postproc_func(fx, self) 414 | 415 | # transpoe axis + adjust velocity 416 | if self.conv_slices: 417 | if self.conv_axis == 1: 418 | fx = fx.transpose(1,0,2,3) 419 | if fx.shape[3] > 3: 420 | for i in range(3): 421 | fx[:,:,:,i*4 + 2:i*4 + 3], fx[:,:,:,i*4 + 3:i*4 + 4] = np.copy(fx[:,:,:,i*4 + 3:i*4 + 4]), np.copy(fx[:,:,:,i*4 + 2:i*4 + 3]) 422 | # fx[:,:,:,i*4 + 1:i*4 + 4] *= np.asarray(self.axis_scaling)[0:3] 423 | 424 | elif self.conv_axis == 2: 425 | fx = fx.transpose(2,1,0,3) 426 | # swap velocity channels 427 | if fx.shape[3] > 3: 428 | for i in range(3): 429 | fx[:,:,:,i*4 + 1:i*4 + 2], fx[:,:,:,i*4 + 3:i*4 + 4] = np.copy(fx[:,:,:,i*4 + 3:i*4 + 4]), np.copy(fx[:,:,:,i*4 + 1:i*4 + 2]) 430 | # fx[:,:,:,i*4 + 1:i*4 + 4] *= np.asarray(self.axis_scaling)[0:3] 431 | # TODO UPDATE FILE 432 | 433 | # ... and the same again for y 434 | if self.have_y_npz: 435 | fofy = 0 if self.multi_file_idxOff_y is None else self.multi_file_idxOff_y[0] 436 | fy = self.loadSingleDatum(self.yfn[t], self.np_load_string_y , fofy ) 437 | 438 | if self.multi_file_list_y is not None: 439 | basename = self.yfn[t] 440 | if not basename.find(self.multi_file_list_y[0])>=0: 441 | raise FluidDataLoaderError("Error, input filename y '%s' doesnt contain given string '%s'"%(basename,self.multi_file_list_y[0])) 442 | for i in range(1,len(self.multi_file_list_y)): 443 | fnr = basename.replace(self.multi_file_list_y[0] , self.multi_file_list_y[i]) 444 | fofy = 0 if self.multi_file_idxOff_y is None else self.multi_file_idxOff_y[i] 445 | _fy = self.loadSingleDatum(fnr, self.np_load_string_y , fofy ) 446 | fy = np.append( fy, _fy , axis=len(fy.shape)-1 ) 447 | 448 | if self.postproc_func_y is not None: 449 | fy = self.postproc_func_y(fy, self) 450 | 451 | if self.conv_slices: 452 | if self.conv_axis == 1: 453 | fy = fy.transpose(1,0,2,3) 454 | elif self.conv_axis == 2: 455 | fy = fy.transpose(2,1,0,3) 456 | 457 | fx = self.removeZComponent(fx) # optional! 458 | 459 | # intialize x/y arrays upon first use 460 | if self.x is None: 461 | self.data_shape = fx.shape 462 | 463 | if self.shape is None: # no target shape? use data res 464 | self.shape = fx.shape * np.asarray( self.axis_scaling) 465 | if self.add_adj_idcs: 466 | self.shape[3] += 6 467 | self.do_zoom = False 468 | else: 469 | self.do_zoom = True 470 | self.zoom_shape = [] 471 | for i in range(len(self.shape)): 472 | self.zoom_shape.append( float(self.shape[i]) / self.data_shape[i] ) 473 | #if self.collapse_z and self.dim==2: self.zoom_shape[ len(self.zoom_shape)-1 ] = 1. # old, dont zoom channels 474 | if self.print_info: print("Zoom for x by "+format(self.zoom_shape) ) 475 | 476 | if self.print_info: print("Allocating x data for "+format(n)+" entries of size "+format(self.shape) ) 477 | if self.conv_slices: 478 | # n*self.shape[0] instead of 28000 479 | self.x = np.zeros( tuple([int(n*self.shape[0]*self.select_random)]+list(self.shape[1:len(self.shape)])) , dtype=FDG_DTYPE) 480 | else: 481 | self.x = np.zeros( tuple([n]+list(self.shape)) , dtype=FDG_DTYPE ) 482 | # optional zoom, is initialized with original array 483 | 484 | if self.have_y_npz: 485 | fy = self.removeZComponent(fy) 486 | 487 | if self.y is None: 488 | self.data_shape_y = fy.shape * np.asarray(self.axis_scaling_y) 489 | if self.shape_y is None: # no target shape? use data res 490 | self.shape_y = fy.shape 491 | self.do_zoom = False 492 | else: 493 | self.do_zoom = True 494 | self.zoom_shape_y = [] 495 | for i in range(len(self.shape_y)): 496 | self.zoom_shape_y.append( float(self.shape_y[i]) / self.data_shape_y[i] ) 497 | if self.print_info: print("Zoom for y by "+format(self.zoom_shape_y) ) 498 | if self.print_info: print("Allocating y data for "+format(n)+" entries of size "+format(self.shape_y) ) 499 | if self.conv_slices: 500 | self.y = np.zeros( tuple([self.x.shape[0]]+list(self.shape_y[1:len(self.shape_y)])) , dtype=FDG_DTYPE) 501 | else: 502 | self.y = np.zeros( tuple([n]+list(self.shape_y)) , dtype=FDG_DTYPE ) 503 | 504 | if self.do_zoom: 505 | fy = scipy.ndimage.zoom( fy, self.zoom_shape_y, order=1 ) 506 | 507 | if self.do_zoom: 508 | fx = scipy.ndimage.zoom( fx, self.zoom_shape, order=1 ) 509 | 510 | if self.conv_slices: 511 | if self.have_y_npz: 512 | fx = scipy.ndimage.zoom( fx, self.axis_scaling, order=1) 513 | fy = scipy.ndimage.zoom( fy, self.axis_scaling_y, order=1) 514 | if self.add_adj_idcs: 515 | fx = self.addAdjSlices(fx) 516 | fx, fy = self.removeSlices(fx, fy) 517 | fx, fy = self.selectRandomSamples(fx, fy) 518 | else: 519 | fx = scipy.ndimage.zoom( fx, self.axis_scaling, order=1) 520 | if self.add_adj_idcs: 521 | fx = self.addAdjSlices(fxs) 522 | fx = self.removeSlices(fx) 523 | fx = self.selectRandomSamples(fx) 524 | # finally store t-th data sample 525 | if self.conv_slices: 526 | self.x[true_n:true_n + fx.shape[0],:] = fx 527 | else: 528 | self.x[t,:] = fx 529 | 530 | # and again for y ... 531 | if self.have_y_npz: 532 | if self.conv_slices: 533 | self.y[true_n:true_n + fy.shape[0],:] = fy 534 | else: 535 | self.y[t,:] = fy 536 | if self.conv_slices: 537 | true_n += fx.shape[0] 538 | if self.print_info and t==0: print("loadFiles: data size x "+ format(self.x.shape) + ((", y " + format(self.y.shape)) if self.filename_y is not None else "") ) 539 | 540 | if self.print_info and self.conv_slices: print("Removed " + format(self.x.shape[0]-true_n) + " slices by checking against the density_threshold " + format(self.density_threshold) + " and randomly selecting " + format(self.select_random) + " percent of the remaining frames.") 541 | if self.conv_slices: 542 | self.x = self.x[0:true_n] 543 | self.y = self.y[0:true_n] 544 | # x (and optionally y) arrays complete now, retrieve with get() later on 545 | 546 | 547 | 548 | def loadDirs(self): 549 | """ Main load function: collect all files in multiple directories, 550 | and load the necessary fraction; potentially rescale (zoom) data, if enabled 551 | """ 552 | self.xfn = [] 553 | self.yfn = [] 554 | currDir = os.getcwd() 555 | 556 | for i in range(len(self.indices)): 557 | self.collectFilenamesFromDir( i ) 558 | os.chdir( currDir ) 559 | 560 | # debug info, print full lists 561 | if self.print_info>1: 562 | #print("Full list x: "+format(self.xfn)) print("Full list y: "+format(self.yfn)) 563 | print( "\nfilenames x:" ); print( ("\n".join(self.xfn)) ) 564 | if self.filename_y is not None: 565 | print( "\nfilenames y:" ); print( ("\n".join(self.yfn)) ) 566 | 567 | self.loadFiles() 568 | os.chdir( currDir ) 569 | 570 | # remove z axis of all 3D data fields for whole data vector 571 | if self.collapse_z: 572 | if self.getDim(self.x[0].shape)==2: 573 | self.x = np.reshape( self.x, [self.x.shape[0], self.shape[1],self.shape[2],self.shape[3]] ) # remove z-axis for x 574 | if self.have_y_npz and self.getDim(self.y[0].shape)==2: 575 | self.y = np.reshape( self.y, [self.y.shape[0], self.shape_y[1],self.shape_y[2],self.shape_y[3]] ) 576 | 577 | # do manual shuffling once (needs to reorder x,y and filenames for x,y) 578 | if self.shuffle_on_load: 579 | idxr = np.random.permutation(self.x.shape[0]) 580 | self.x = self.x[idxr] 581 | if self.have_y_npz: self.y = self.y[idxr] # y is np array , reorder... 582 | 583 | xfn2,yfn2,y2 = [],[],[] 584 | for i in range(len(self.xfn)): 585 | xfn2.append( self.xfn[idxr[i]] ) 586 | if not self.have_y_npz and self.y is not None: y2.append( self.y[idxr[i]] ) # non np array y 587 | if self.filename_y is not None: yfn2.append( self.yfn[idxr[i]] ) 588 | self.xfn, self.yfn = xfn2,yfn2 589 | if not self.have_y_npz and self.y is not None: self.y = y2 590 | # loading done 591 | 592 | 593 | def arrayStats(self, values, weights=None): 594 | average = np.average(values) #, weights=weights) 595 | variance = np.average((values-average)**2) #, weights=weights) # Fast and numerically precise 596 | return (average, math.sqrt(variance)) 597 | 598 | def perChannelStats(self, values, info=None): 599 | if values.shape[-1]>1: 600 | if info: 601 | print(format(info)) 602 | for c in range(values.shape[-1]): 603 | print("\t\t"+format(c)+": "+format(self.arrayStats(values[...,c]) )) 604 | 605 | def printStats(self): 606 | """ General info about loaded data sets """ 607 | if self.print_info: 608 | print("Loaded "+format(self.x.shape[0])+" datasets" + (", shuffled" if self.shuffle_on_load else "") ) 609 | print("\tData shape x " + format(self.x.shape)) 610 | print("\tx mean & std dev: " + format(self.arrayStats(self.x))) 611 | self.perChannelStats(self.x, "\tPer channel mean & std dev x: ") 612 | if self.have_y_npz: 613 | print("\tData shape y " + format(self.y.shape)) 614 | print("\ty mean & std dev: " + format(self.arrayStats(self.y))) 615 | 616 | def get(self): 617 | """ After loading, return arrays 618 | """ 619 | return self.x , self.y , self.xfn 620 | 621 | def getFullInfo(self): 622 | """ Summarize full data set as string 623 | """ 624 | ret = "" 625 | printMean = True 626 | for i in range(len(self.xfn)): 627 | ret = ret + ("%d/%d, file %s, shape %s" % (i, len(self.xfn), self.xfn[i], format(self.x[i].shape) )) 628 | if printMean: 629 | ret = ret + (", x mean %s " % (format(np.mean(self.x[i])) )) 630 | if self.filename_y is not None: 631 | ret = ret + (", file_y %s " % (self.yfn[i]) ) 632 | if self.have_y_npz: 633 | ret = ret + (", shape_y %s " % (format(self.y[i].shape)) ) 634 | if printMean: 635 | ret = ret + (", y mean %s " % (format(np.mean(self.y[i])) )) 636 | if self.array_y is not None: 637 | ret = ret + (", y %s " % (format(self.y[i])) ) 638 | ret = ret + "\n" 639 | return ret 640 | 641 | class FluidDataLoaderError(Exception): 642 | ''' FDL errors ''' 643 | 644 | 645 | -------------------------------------------------------------------------------- /tools_wscale/flushmem.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | t = np.zeros((1024,1024,1024,16)) 4 | t += 1.0 5 | print(t.shape) 6 | exit(1) 7 | -------------------------------------------------------------------------------- /tools_wscale/old/fluiddataloader.py: -------------------------------------------------------------------------------- 1 | #****************************************************************************** 2 | # 3 | # tempoGAN: A Temporally Coherent, Volumetric GAN for Super-resolution Fluid Flow 4 | # Copyright 2018 You Xie, Erik Franz, Mengyu Chu, Nils Thuerey, Maximilian Werhahn 5 | # 6 | # This program is free software, distributed under the terms of the 7 | # Apache License, Version 2.0 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | #****************************************************************************** 11 | 12 | import os, glob, re, math, threading 13 | import numpy as np 14 | import scipy.ndimage 15 | 16 | # necessary for loading uni files , could easily be disabled if necessary 17 | import uniio 18 | # data type for new python arrays 19 | FDG_DTYPE = np.float32 #np.float32 20 | 21 | 22 | class FluidDataLoader(object): 23 | """ Fluid Data Loader - load npz files from a collection of directories 24 | 25 | label/GT data can be passed in different ways: an array with 1 data per dir, 26 | a filename for npz data, or a generic function called for each loaded input 27 | """ 28 | 29 | def __init__(self, print_info=0, base_path="../data/", simdirname="sim_%04d/", indices=[], 30 | filename=None, filename_index_min=0, filename_index_max=200, wildcard=None, 31 | array_y=None, filename_y=None, func_y=None, data_fraction=1., 32 | shape=None, shape_y=None, collapse_z=False, shuffle_on_load=False, 33 | multi_file_list=None, multi_file_list_y=None, multi_file_idxOff=None, multi_file_idxOff_y=None, 34 | postproc_func=None, postproc_func_y=None, 35 | np_load_string=None , np_load_string_y=None , oldNamingScheme=False): 36 | """ Constructor , arguments: 37 | print_info: debugging info , <=0 off, 1 some info, 2 full 38 | base_path: path prefix for all sim dirs 39 | simdirname: sim directory name with printf placeholders (eg %04d) for indices 40 | indices: list of directory numbers to load 41 | filename: filename with printf placeholders (eg %04d) for numbered input data x 42 | typical example string: "density_%04d.npz" 43 | currently uni and npz files are supported 44 | filename_index_min: start index for filenames, controls index range (min to max) 45 | filename_index_max: maximal index for filenames 46 | wildcard: optional, use specified wildcard for regexp matching filenames in sim dir for x. 47 | has to contain a group (...) to extract ID string for y, group(1) used by default. 48 | note, if wildcard string is given, this overrides filename with index range 49 | multi_file_list: list of file name prefixes, if given, the loader will load and concatenate 50 | all correspnding files 51 | multi_file_list_y: " analogous for y 52 | multi_file_idxOff: list of file index offsets for files multi_file_list 53 | can be used to load files with different index into same data entry 54 | multi_file_idxOff_y: " analogous for y 55 | postproc_func: function to be called for every data sample, can be used to post-process 56 | data in a custom way 57 | postproc_func_y: " analogous for y 58 | array_y: optional, label data as array, 1 entry per dir 59 | filename_y: optional, filenames for label data; needs # placeholder if used with wildcard 60 | func_y: optional, labelling func, called with sim index, filename and file index for every loeaded entry 61 | data_fraction: don't load all files, but also a fraction of it 62 | shape, shape_y: target shapes for x,y data; input shapes are determined from files to load; 63 | warning - can easily rescale & interpolate channels target channels dont match source channels. 64 | note - only used for scaling. no reshapes; if no shape is given, pass through data unmodified 65 | collapse_z: remove Z-axis for 2d data sets, ie, transform ZYXc to YXc when z==1 66 | also removes Z-component for pure velocity 2d data sets, ie, changes c=3 to c=2; TODO , make separate switch? (removeZComponent) 67 | shuffle_on_load: randomize order on load? definitely not recommended for validation set 68 | note: better use flow(... , shuffle=True) in most cases 69 | np_load_string: by default, load "arr_0" from npz files, change using this string 70 | np_load_string_y: same as np_load_string but for loading y data; if none is given, 71 | the string for x is used (np_load_string) 72 | oldNamingScheme: revert to old scheme with double indices for dir & filename 73 | by default the loader expects: data/sim_XXXX/density_low_YYYY.sth 74 | the old naming scheme was: data/sim_XXXX/frame_YYYY/density_low_XXXX_YYYY.sth 75 | 76 | """ 77 | # path basics 78 | self.base_path = base_path 79 | self.simdirname = simdirname 80 | self.indices = indices 81 | 82 | # x data files 83 | self.filename = filename 84 | self.filename_index_min = filename_index_min 85 | self.filename_index_max = filename_index_max 86 | self.wildcard = wildcard 87 | 88 | self.multi_file_list = multi_file_list 89 | self.multi_file_list_y = multi_file_list_y 90 | self.multi_file_idxOff = multi_file_idxOff 91 | self.multi_file_idxOff_y = multi_file_idxOff_y 92 | self.postproc_func = postproc_func 93 | self.postproc_func_y = postproc_func_y 94 | 95 | # y data for labeling x 96 | self.filename_y = filename_y 97 | self.array_y = array_y 98 | self.func_y = func_y 99 | 100 | # further options 101 | self.data_fraction = data_fraction 102 | self.shape = shape 103 | self.shape_y = shape_y 104 | self.collapse_z = collapse_z 105 | self.shuffle_on_load = shuffle_on_load 106 | 107 | # initialize npz load 108 | if np_load_string is not None: 109 | self.np_load_string = np_load_string 110 | else: 111 | self.np_load_string = "arr_0" 112 | # input data 113 | inCnt = 0 114 | if self.filename is not None: inCnt += 1 115 | if self.wildcard is not None: inCnt += 1 116 | if inCnt>1: 117 | # sanity check - only one of those allowed 118 | raise FluidDataLoaderError("FluidDataLoader error: for input data loading, only specify one of: input filename, or wildcard") 119 | # label data 120 | inCnt = 0 121 | if self.filename_y is not None: inCnt += 1 122 | if self.array_y is not None: inCnt += 1 123 | if self.func_y is not None: inCnt += 1 124 | if inCnt>1: 125 | # sanity check - only one of those allowed 126 | raise FluidDataLoaderError("FluidDataLoader error: for label data loading, only specify one of: input filename, array or function") 127 | 128 | if np_load_string_y is not None: 129 | self.np_load_string_y = np_load_string_y 130 | else: 131 | self.np_load_string_y = self.np_load_string 132 | 133 | self.print_info = print_info 134 | if self.print_info: 135 | print("FluidDataLoader init, path %s, filename %s" % (self.base_path,self.filename) ) 136 | self.oldNamingScheme = oldNamingScheme 137 | 138 | # sanity check file lists 139 | if self.multi_file_idxOff is not None and self.multi_file_list is not None: 140 | if len(self.multi_file_list) != len(self.multi_file_idxOff): 141 | raise FluidDataLoaderError("FluidDataLoader error: multi file list and idxOff lists have to match " + format([len(self.multi_file_list) , len(self.multi_file_idxOff)]) ) 142 | if self.multi_file_idxOff_y is not None and self.multi_file_list_y is not None: 143 | if len(self.multi_file_list_y) != len(self.multi_file_idxOff_y): 144 | raise FluidDataLoaderError("FluidDataLoader error: multi file list and idxOff lists for y have to match " + format([len(self.multi_file_list_y) , len(self.multi_file_idxOff_y)]) ) 145 | 146 | # all initialized upon load: 147 | self.x = None 148 | self.y = None 149 | self.xfn = None 150 | self.have_y_npz = False # does y contain numpy array data? 151 | 152 | self.loadDirs() 153 | self.printStats() 154 | 155 | def getFilename(self, sim_index, fnbase, frame_index): 156 | if not self.oldNamingScheme: 157 | fn = os.path.join( self.base_path, os.path.join((self.simdirname % sim_index), (fnbase % frame_index)) ) 158 | else: 159 | # both parts simdir & file have both indices! 160 | fn = os.path.join( self.base_path, os.path.join( (self.simdirname % (sim_index,frame_index)), (fnbase % (sim_index,frame_index) ) )) 161 | return fn 162 | 163 | def collectFilenamesFromDir(self, list_index): 164 | """ Build filename list from single dir 165 | list_index: number in index list (or alternatively label list) 166 | """ 167 | 168 | sim_index = self.indices[list_index] # get simulation directory index from list 169 | labelstr = "" # debug info only 170 | foundCnt = 0 171 | 172 | if self.wildcard is not None: 173 | search_dir = os.path.join( self.base_path, (self.simdirname % sim_index) ) 174 | os.chdir(search_dir) 175 | allFiles = [f for f in glob.glob("*") if os.path.isfile(f)] # list all files 176 | files = [] 177 | for f in allFiles: 178 | match = re.search(self.wildcard, f) # note, matched again below... 179 | if match: 180 | files.append(f) 181 | 182 | if len(files)<1: 183 | raise FluidDataLoaderError("Error - no files found in directory '%s' with wildcard '%s' " %(search_dir, self.wildcard) ) 184 | 185 | files = sorted(files) # sort by name 186 | 187 | n = max(1, int(len(files)*self.data_fraction)) 188 | tf = float(len(files))/n # spread over time range (eg 200 frames) 189 | fcnt = 0 190 | for t in range(0,n): 191 | filelist_index = int(t*tf) # full range 192 | fn = files[filelist_index] 193 | self.xfn.append(os.path.join(search_dir, fn)) 194 | foundCnt += 1 195 | 196 | # construct label, closely follows index version below 197 | if self.filename_y is not None: 198 | mx = re.search(self.wildcard, fn) 199 | listy = self.filename_y.split("$") 200 | if(len(listy)!=2): 201 | raise FluidDataLoaderError("Error - when using a wildcard for x, filename_y needs to contain exactly one '$' where the file id string from x will be inserted to build the filename for y. Current, invalid, filename_y is '%s' " %(self.filename_y) ) 202 | fny = listy[0] + mx.group(1) + listy[1] 203 | 204 | if not os.path.isfile(fny): # make sure file for y exists 205 | raise FluidDataLoaderError("Error - y file '%s' for x file '%s' doesnt exist in search dir '%s' " %(fny, fn, search_dir ) ) 206 | 207 | fny = os.path.join(search_dir, fny) 208 | self.yfn.append(fny) 209 | self.have_y_npz = True # flag to indicate we have np arrays in y 210 | 211 | if self.array_y is not None: 212 | if self.y is None: 213 | self.y = [] 214 | self.y.append( self.array_y[list_index] ) 215 | labelstr = " with label " + format( self.array_y[list_index] ) 216 | 217 | if self.func_y is not None: 218 | print("NYI! test...") 219 | 220 | else: 221 | # "simple" index range 222 | n = max(1, int((self.filename_index_max-self.filename_index_min)*self.data_fraction)) 223 | tf = float(self.filename_index_max-self.filename_index_min)/n 224 | for t in range(0,n): 225 | filelist_index = int(self.filename_index_min + t*tf) # full range 226 | 227 | fn = self.getFilename(sim_index, self.filename, filelist_index) 228 | self.xfn.append(fn) 229 | foundCnt += 1 230 | 231 | if self.filename_y is not None: 232 | fny = self.getFilename(sim_index, self.filename_y, filelist_index) 233 | self.yfn.append(fny) 234 | self.have_y_npz = True # flag to indicate we have np arrays in y 235 | 236 | if self.array_y is not None: 237 | if self.y is None: 238 | self.y = [] 239 | self.y.append( self.array_y[list_index] ) 240 | labelstr = " with label " + format( self.array_y[list_index] ) 241 | 242 | if self.func_y is not None: 243 | print("NYI! test...") 244 | if self.y is None: 245 | self.y = [] 246 | self.y.append( self.func_y(list_index, sim_index, t, fn) ) 247 | 248 | if self.print_info: 249 | print("Found " +format(foundCnt) +" files from sim ID "+format(sim_index) + labelstr ) 250 | 251 | 252 | def getDim(self,shape): 253 | """ small helper to compute dimensionality of data from shape 254 | """ 255 | dim = -1 256 | if len(shape)==4: # probably ZYXc 257 | if(shape[0]==1): 258 | dim = 2 259 | else: 260 | dim = 3 261 | if len(shape)==5: # probably 4d, TZYXc 262 | dim = 4 263 | #print("Dim "+format(dim)+ " for " + format(shape) ) 264 | return dim 265 | 266 | def removeZComponent(self,x): 267 | """ Optional, and 2D only: remove Z entry from 3d vec fields 268 | """ 269 | if not self.collapse_z: return x 270 | if not self.getDim( x.shape )==2: return x 271 | if not x.shape[3]==3: return x # only apply for pure velocity grids with 3 channels 272 | x2d = np.zeros( (1,x.shape[1],x.shape[2],2), dtype=FDG_DTYPE ) 273 | x2d[:,:,:,0] = x[:,:,:,0] # only keep x,y 274 | x2d[:,:,:,1] = x[:,:,:,1] 275 | return x2d 276 | 277 | def mogrifyFilenameIndex(self, fn, idxOffset): 278 | """ Parse, determine index, and change 279 | """ 280 | match = re.search("(.*_)([\d]+)\.([\w]+)", fn) # split into groups: path/name_ , %04d , ext 281 | if match: 282 | if len(match.groups())!=3: 283 | raise FluidDataLoaderError("FluidDataLoader error: got filename %s, but could not fully split up into name,4-digit and extension " % (fn)) 284 | #print "A " + format(match.groups()) 285 | idx = int(match.group(2)) 286 | idx = max(self.filename_index_min, min(self.filename_index_max-1, idx+idxOffset) ) 287 | #print "A " + format(match.group(2)) 288 | fn = "%s%04d.%s" % (match.group(1), idx, match.group(3)) 289 | #print "fn " + fn 290 | else: 291 | raise FluidDataLoaderError("FluidDataLoader error: got filename %s, but could not split up into name,4-digit and extension " % (fn)) 292 | 293 | #exit() 294 | # density1_([\w]+).npz 295 | return fn 296 | 297 | def loadSingleDatum(self, fn, lstr, idxOffset=0): 298 | """ Determine file type and load 299 | """ 300 | if idxOffset!=0: 301 | fn = self.mogrifyFilenameIndex(fn,idxOffset) 302 | if self.print_info>1: 303 | print("Loading: "+fn+", "+lstr) 304 | # detect file type 305 | if fn.endswith( ".npz" ): 306 | ar = np.load(fn)[ lstr ] 307 | elif fn.endswith( ".uni" ): 308 | _, ar = uniio.readUni(fn) # load-string lstr not needed for uni files 309 | #ar = ar[::-1] # make a copy of the array in reverse order 310 | else: 311 | raise FluidDataLoaderError("FluidDataLoader error: got filename %s, but only .uni or .npz supported at the moment " % (fn)) 312 | 313 | return ar 314 | 315 | def loadFiles(self): 316 | """ Load all NPZs from list. 317 | Note, data always has to have shape ZYXc (3d) or YXc (2d), 318 | where c is channels (eg 3 for vels, 1 for scalar data). 319 | """ 320 | n = len(self.xfn) 321 | for t in range(n): 322 | fof = 0 if self.multi_file_idxOff is None else self.multi_file_idxOff[0] 323 | fx = self.loadSingleDatum(self.xfn[t], self.np_load_string , fof) 324 | 325 | if self.multi_file_list is not None: 326 | # concat multiple files... 327 | basename = self.xfn[t] 328 | if not basename.find(self.multi_file_list[0])>=0: 329 | raise FluidDataLoaderError("Error, input filename '%s' doesnt contain given string '%s'"%(basename,self.multi_file_list[0])) 330 | for i in range(1,len(self.multi_file_list)): 331 | fnr = basename.replace(self.multi_file_list[0] , self.multi_file_list[i]) 332 | fof = 0 if self.multi_file_idxOff is None else self.multi_file_idxOff[i] 333 | _fx = self.loadSingleDatum(fnr, self.np_load_string , fof ) 334 | fx = np.append( fx, _fx , axis=len(fx.shape)-1 ) 335 | 336 | # apply post-processing function (if given) 337 | if self.postproc_func is not None: 338 | fx = self.postproc_func(fx, self) 339 | 340 | # ... and the same again for y 341 | if self.have_y_npz: 342 | fofy = 0 if self.multi_file_idxOff_y is None else self.multi_file_idxOff_y[0] 343 | fy = self.loadSingleDatum(self.yfn[t], self.np_load_string_y , fofy ) 344 | 345 | if self.multi_file_list_y is not None: 346 | basename = self.yfn[t] 347 | if not basename.find(self.multi_file_list_y[0])>=0: 348 | raise FluidDataLoaderError("Error, input filename y '%s' doesnt contain given string '%s'"%(basename,self.multi_file_list_y[0])) 349 | for i in range(1,len(self.multi_file_list_y)): 350 | fnr = basename.replace(self.multi_file_list_y[0] , self.multi_file_list_y[i]) 351 | fofy = 0 if self.multi_file_idxOff_y is None else self.multi_file_idxOff_y[i] 352 | _fy = self.loadSingleDatum(fnr, self.np_load_string_y , fofy ) 353 | fy = np.append( fy, _fy , axis=len(fy.shape)-1 ) 354 | 355 | if self.postproc_func_y is not None: 356 | fy = self.postproc_func_y(fy, self) 357 | 358 | fx = self.removeZComponent(fx) # optional! 359 | 360 | # intialize x/y arrays upon first use 361 | if self.x is None: 362 | self.data_shape = fx.shape 363 | 364 | if self.shape is None: # no target shape? use data res 365 | self.shape = fx.shape 366 | self.do_zoom = False 367 | else: 368 | self.do_zoom = True 369 | self.zoom_shape = [] 370 | for i in range(len(self.shape)): 371 | self.zoom_shape.append( float(self.shape[i]) / self.data_shape[i] ) 372 | #if self.collapse_z and self.dim==2: self.zoom_shape[ len(self.zoom_shape)-1 ] = 1. # old, dont zoom channels 373 | if self.print_info: print("Zoom for x by "+format(self.zoom_shape) ) 374 | 375 | if self.print_info: print("Allocating x data for "+format(n)+" entries of size "+format(self.shape) ) 376 | self.x = np.zeros( tuple([n]+list(self.shape)) , dtype=FDG_DTYPE ) 377 | 378 | # optional zoom, is initialized with original array 379 | if self.do_zoom: 380 | fx = scipy.ndimage.zoom( fx, self.zoom_shape, order=1 ) 381 | 382 | # finally store t-th data sample 383 | self.x[t,:] = fx 384 | 385 | # and again for y ... 386 | if self.have_y_npz: 387 | fy = self.removeZComponent(fy) 388 | 389 | if self.y is None: 390 | self.data_shape_y = fy.shape 391 | if self.shape_y is None: # no target shape? use data res 392 | self.shape_y = fy.shape 393 | self.do_zoom = False 394 | else: 395 | self.do_zoom = True 396 | self.zoom_shape_y = [] 397 | for i in range(len(self.shape_y)): 398 | self.zoom_shape_y.append( float(self.shape_y[i]) / self.data_shape_y[i] ) 399 | if self.print_info: print("Zoom for y by "+format(self.zoom_shape_y) ) 400 | 401 | if self.print_info: print("Allocating y data for "+format(n)+" entries of size "+format(self.shape_y) ) 402 | self.y = np.zeros( tuple([n]+list(self.shape_y)) , dtype=FDG_DTYPE ) 403 | 404 | if self.do_zoom: 405 | fy = scipy.ndimage.zoom( fy, self.zoom_shape_y, order=1 ) 406 | 407 | self.y[t,:] = fy 408 | 409 | if self.print_info and t==0: print("loadFiles: data size x "+ format(self.x.shape) + ((", y " + format(self.y.shape)) if self.filename_y is not None else "") ) 410 | 411 | # x (and optionally y) arrays complete now, retrieve with get() later on 412 | 413 | 414 | 415 | def loadDirs(self): 416 | """ Main load function: collect all files in multiple directories, 417 | and load the necessary fraction; potentially rescale (zoom) data, if enabled 418 | """ 419 | self.xfn = [] 420 | self.yfn = [] 421 | currDir = os.getcwd() 422 | 423 | for i in range(len(self.indices)): 424 | self.collectFilenamesFromDir( i ) 425 | os.chdir( currDir ) 426 | 427 | # debug info, print full lists 428 | if self.print_info>1: 429 | #print("Full list x: "+format(self.xfn)) print("Full list y: "+format(self.yfn)) 430 | print( "\nfilenames x:" ); print( ("\n".join(self.xfn)) ) 431 | if self.filename_y is not None: 432 | print( "\nfilenames y:" ); print( ("\n".join(self.yfn)) ) 433 | 434 | self.loadFiles() 435 | os.chdir( currDir ) 436 | 437 | # remove z axis of all 3D data fields for whole data vector 438 | if self.collapse_z: 439 | if self.getDim(self.x[0].shape)==2: 440 | self.x = np.reshape( self.x, [self.x.shape[0], self.shape[1],self.shape[2],self.shape[3]] ) # remove z-axis for x 441 | if self.have_y_npz and self.getDim(self.y[0].shape)==2: 442 | self.y = np.reshape( self.y, [self.y.shape[0], self.shape_y[1],self.shape_y[2],self.shape_y[3]] ) 443 | 444 | # do manual shuffling once (needs to reorder x,y and filenames for x,y) 445 | if self.shuffle_on_load: 446 | idxr = np.random.permutation(self.x.shape[0]) 447 | self.x = self.x[idxr] 448 | if self.have_y_npz: self.y = self.y[idxr] # y is np array , reorder... 449 | 450 | xfn2,yfn2,y2 = [],[],[] 451 | for i in range(len(self.xfn)): 452 | xfn2.append( self.xfn[idxr[i]] ) 453 | if not self.have_y_npz and self.y is not None: y2.append( self.y[idxr[i]] ) # non np array y 454 | if self.filename_y is not None: yfn2.append( self.yfn[idxr[i]] ) 455 | self.xfn, self.yfn = xfn2,yfn2 456 | if not self.have_y_npz and self.y is not None: self.y = y2 457 | # loading done 458 | 459 | 460 | def arrayStats(self, values, weights=None): 461 | average = np.average(values) #, weights=weights) 462 | variance = np.average((values-average)**2) #, weights=weights) # Fast and numerically precise 463 | return (average, math.sqrt(variance)) 464 | 465 | def perChannelStats(self, values, info=None): 466 | if values.shape[-1]>1: 467 | if info: 468 | print(format(info)) 469 | for c in range(values.shape[-1]): 470 | print("\t\t"+format(c)+": "+format(self.arrayStats(values[...,c]) )) 471 | 472 | def printStats(self): 473 | """ General info about loaded data sets """ 474 | if self.print_info: 475 | print("Loaded "+format(self.x.shape[0])+" datasets" + (", shuffled" if self.shuffle_on_load else "") ) 476 | print("\tData shape x " + format(self.x.shape)) 477 | print("\tx mean & std dev: " + format(self.arrayStats(self.x))) 478 | self.perChannelStats(self.x, "\tPer channel mean & std dev x: ") 479 | if self.have_y_npz: 480 | print("\tData shape y " + format(self.y.shape)) 481 | print("\ty mean & std dev: " + format(self.arrayStats(self.y))) 482 | 483 | def get(self): 484 | """ After loading, return arrays 485 | """ 486 | return self.x , self.y , self.xfn 487 | 488 | def getFullInfo(self): 489 | """ Summarize full data set as string 490 | """ 491 | ret = "" 492 | printMean = True 493 | for i in range(len(self.xfn)): 494 | ret = ret + ("%d/%d, file %s, shape %s" % (i, len(self.xfn), self.xfn[i], format(self.x[i].shape) )) 495 | if printMean: 496 | ret = ret + (", x mean %s " % (format(np.mean(self.x[i])) )) 497 | if self.filename_y is not None: 498 | ret = ret + (", file_y %s " % (self.yfn[i]) ) 499 | if self.have_y_npz: 500 | ret = ret + (", shape_y %s " % (format(self.y[i].shape)) ) 501 | if printMean: 502 | ret = ret + (", y mean %s " % (format(np.mean(self.y[i])) )) 503 | if self.array_y is not None: 504 | ret = ret + (", y %s " % (format(self.y[i])) ) 505 | ret = ret + "\n" 506 | return ret 507 | 508 | class FluidDataLoaderError(Exception): 509 | ''' FDL errors ''' 510 | 511 | 512 | -------------------------------------------------------------------------------- /tools_wscale/paramhelpers.py: -------------------------------------------------------------------------------- 1 | # 2 | # Helpers for handling command line parameters and the like 3 | # example: path = getParam("path", "path.uni") 4 | # 5 | import sys, os, shutil, json 6 | 7 | # global for checking used params 8 | paramUsed = [] 9 | # additionally store parameters and values pairs 10 | paramDict = {} 11 | 12 | # ====================================================================================================================== 13 | # read parameters 14 | 15 | #! check for a specific parameter, note returns strings, no conversion; not case sensitive! all converted to lower case 16 | def getParam(name, default): 17 | global paramUsed 18 | v = default 19 | while( len(paramUsed)= 1.5: 57 | Obsinput[i][0][j][k][0] = 1.0 58 | else: 59 | Obsinput[i][0][j][k][0] = 0.0 60 | 61 | if(len(xt) == 0): 62 | xt = np.concatenate((densVel, Obsinput), axis = 4) 63 | else: 64 | xt = np.concatenate((xt,np.concatenate((densVel, Obsinput), axis = 4)), axis=4) 65 | x = xt 66 | y = y[:,:,:,0:256,:] 67 | TC.addData(x,y) 68 | print(x.shape) 69 | print('##### sample') 70 | #get batch, data format: [batchSize, z, y, x, channels] 71 | #low, high = TC.getRandomFrame()#TC.selectRandomTiles(128) 72 | # 73 | #test output, all tiles in one image; average z axis, factor for visibility 74 | #tc.savePngsGrayscale(tiles=[np.average(high, axis=0)*8], path='../test_img/high_', imageCounter=0, tiles_in_image=[1,1]) 75 | #tc.savePngsGrayscale([np.average(low, axis=0)*8], '../test_img/low_', tiles_in_image=[1,1]) 76 | 77 | #test parser 78 | if 1: 79 | print('\n\tparser test 0') 80 | TC.parseChannels('d , D, vx,vy,vZ ,d, v1z,v1y ,v1x') 81 | print('\n\tparser test 1') 82 | #duplicate 83 | try: 84 | TC.parseChannels('d,d,vx,vy,vz,d,v1z,v1y,v1x,vx') 85 | except tc.TilecreatorError as e: 86 | print(e) 87 | #missing 88 | print('\n\tparser test 2') 89 | try: 90 | TC.parseChannels('d,d,vx,vy,vz,d,v1z,v1y') 91 | except tc.TilecreatorError as e: 92 | print(e) 93 | print('\n\tparser test 3') 94 | try: 95 | TC.parseChannels('d,d,vx,vy,vz,d,v1z,v1x') 96 | except tc.TilecreatorError as e: 97 | print(e) 98 | # unsupportet 99 | print('\n\tparser test 4') 100 | try: 101 | TC.parseChannels('d,d,vx,vy,vz,d,b') 102 | except tc.TilecreatorError as e: 103 | print(e) 104 | print('\n\tparser test 5') 105 | try: 106 | TC.parseChannels('d,d,vx,vy,vz,dd') 107 | except tc.TilecreatorError as e: 108 | print(e) 109 | 110 | #test batch creation with complete augmentation 111 | TC.initDataAugmentation(rot=2, minScale=0.85, maxScale=1.15 ,flip=True) 112 | if 0: 113 | batch = 32 114 | startTime = time.time() 115 | low, high = TC.selectRandomTiles(batch, augment=True) 116 | endTime=(time.time()-startTime) 117 | print('{} tiles batch creation time: {:.4f}, per tile: {:.4f}'.format(batch, endTime, endTime/batch)) 118 | #print(low.shape, high.shape) 119 | if dim == 3: 120 | high = np.average(high, axis=1)*8 121 | low = np.average(low, axis=1)*8 122 | if dim == 2: 123 | high.shape = (batch, 64, 64, 1) 124 | low.shape = (batch, 16, 16, 1) 125 | tc.savePngsGrayscale(tiles=high, path='../tiletest/test_img/batch_high_', imageCounter=0, tiles_in_image=[4,8]) 126 | tc.savePngsGrayscale(low, '../tiletest/test_img/batch_low_', tiles_in_image=[4,8]) 127 | 128 | # test load data 129 | if 0: 130 | #low, high = TC.getFrame(20) 131 | TC.clearData() 132 | low, high = np.ones((64,64,4)), np.ones((256,256,1)) 133 | try: 134 | TC.addData(low, high) 135 | except tc.TilecreatorError as e: 136 | print(e) 137 | 138 | #test tile concat 139 | if 0: 140 | frame = 20 141 | low, high = TC.getDatum(frame) 142 | 143 | high_tiles = TC.createTiles(high, TC.tile_shape_high) 144 | tc.savePngsGrayscale(np.reshape(high_tiles,(len(high_tiles), 64,64, 1)),'../tiletest/test_img/high_',imageCounter=0, tiles_in_image=[4,4]) 145 | 146 | high_tiles = TC.createTiles(high, TC.tile_shape_high, strides=32) 147 | tc.savePngsGrayscale(np.reshape(high_tiles,(len(high_tiles), 64,64, 1)),'../tiletest/test_img/high_',imageCounter=1, tiles_in_image=[7,7]) 148 | 149 | high_frame = TC.concatTiles(high_tiles, [1,7,7]) 150 | tc.savePngsGrayscale(np.reshape(high_frame,(1, 448,448, 1)),'../tiletest/test_img/high_',imageCounter=2, tiles_in_image=[1,1]) 151 | high_frame = TC.concatTiles(high_tiles, [1,7,7], [0,16,16,0]) 152 | tc.savePngsGrayscale(np.reshape(high_frame,(1, 224,224, 1)),'../tiletest/test_img/high_',imageCounter=3, tiles_in_image=[1,1]) 153 | 154 | 155 | #test all augmentation methods 2D 156 | if 1 and dim==2: 157 | frame = 20 158 | data = {} 159 | data[tc.DATA_KEY_LOW], data[tc.DATA_KEY_HIGH] = TC.getDatum(frame) 160 | #low, high = TC.frame_inputs[10], TC.frame_outputs[10] 161 | save_img(data, True) 162 | 163 | save_img(TC.flip(data, [1]), True) #flip y 164 | data[tc.DATA_KEY_LOW], data[tc.DATA_KEY_HIGH] = TC.getDatum(frame) 165 | save_img(TC.flip(data, [2]), True) 166 | data[tc.DATA_KEY_LOW], data[tc.DATA_KEY_HIGH] = TC.getDatum(frame) 167 | save_img(TC.flip(data, [1,2]), True) 168 | 169 | #rot 170 | data[tc.DATA_KEY_LOW], data[tc.DATA_KEY_HIGH] = TC.getDatum(frame) 171 | save_img(TC.rotate(data), True) 172 | data[tc.DATA_KEY_LOW], data[tc.DATA_KEY_HIGH] = TC.getDatum(frame) 173 | save_img(TC.rotate(data), True) 174 | 175 | #scale 176 | data[tc.DATA_KEY_LOW], data[tc.DATA_KEY_HIGH] = TC.getDatum(frame) 177 | save_img(TC.scale(data, 0.8), True) 178 | data[tc.DATA_KEY_LOW], data[tc.DATA_KEY_HIGH] = TC.getDatum(frame) 179 | save_img(TC.scale(data, 1.2), True) 180 | 181 | #rot90 182 | data[tc.DATA_KEY_LOW], data[tc.DATA_KEY_HIGH] = TC.getDatum(frame) 183 | save_img(TC.rotate90(data, [2,1]), True) 184 | data[tc.DATA_KEY_LOW], data[tc.DATA_KEY_HIGH] = TC.getDatum(frame) 185 | save_img(TC.rotate90(data, [1,2]), True) 186 | 187 | #test FLIP 188 | if 0 and dim==3: 189 | # low_f, high_f = TC.flip(low, high, [0]) #flip z, won't show as we average over z (3D) or z is only 1 (2D) 190 | # tc.savePngsGrayscale(tiles=[np.average(high_f, axis=0)*8], tileSize=high_f.shape[2], path='../test_img/high_', imageCounter=1, tiles_in_image=[1,1]) 191 | low_f, high_f = TC.flip(low, high, [1]) #flip y 192 | tc.savePngsGrayscale(tiles=[np.average(high_f, axis=0)*8], path='../test_img/high_', imageCounter=2, tiles_in_image=[1,1]) 193 | tc.savePngsGrayscale([np.average(low_f, axis=0)*8], '../test_img/low_', imageCounter=2, tiles_in_image=[1,1]) 194 | # low_f, high_f = TC.flip(low, high, [2]) #flip x 195 | # tc.savePngsGrayscale(tiles=[np.average(high_f, axis=0)*8], path='../test_img/high_', imageCounter=3, tiles_in_image=[1,1]) 196 | # low_f, high_f = TC.flip(low, high, [1,2]) #flip y and x 197 | # tc.savePngsGrayscale(tiles=[np.average(high_f, axis=0)*8], path='../test_img/high_', imageCounter=4, tiles_in_image=[1,1]) 198 | 199 | #test ROT 200 | if 0 and dim==3: 201 | print('testing rotation performance, this may take a while...') 202 | batch = 10 203 | theta = [ np.pi / 180 * 45, 204 | np.pi / 180 * 0, 205 | np.pi / 180 * 0 ] 206 | startTime = time.time() 207 | for i in range(batch): 208 | low_r, high_r = TC.rotate(low, high, theta) 209 | endTime=(time.time()-startTime)/batch 210 | print('matrix rot time: {:.8f}'.format(endTime)) 211 | tc.savePngsGrayscale(tiles=[np.average(high_r, axis=0)*8], path='../test_img/high_', imageCounter=1, tiles_in_image=[1,1]) 212 | startTime = time.time() 213 | for i in range(batch): 214 | low_r, high_r = TC.rotate4(low, high, theta) 215 | endTime=(time.time()-startTime)/batch 216 | print('matrix 4D rot time: {:.8f}'.format(endTime)) 217 | tc.savePngsGrayscale(tiles=[np.average(high_r, axis=0)*8], path='../test_img/high_', imageCounter=2, tiles_in_image=[1,1]) 218 | startTime = time.time() 219 | for i in range(batch): 220 | low_r, high_r = TC.rotate_simple(low, high, -45) 221 | endTime=(time.time()-startTime)/batch 222 | print('simple rot time: {:.8f}'.format(endTime)) 223 | tc.savePngsGrayscale(tiles=[np.average(high_r, axis=0)*8], path='../test_img/high_', imageCounter=3, tiles_in_image=[1,1]) 224 | 225 | if 0 and dim==3: 226 | theta = [ np.pi / 180 * 45, 227 | np.pi / 180 * 0, 228 | np.pi / 180 * 0 ] 229 | low_r, high_r = TC.rotate(low, high, theta) 230 | tc.savePngsGrayscale(tiles=[np.average(high_r, axis=0)*8], path='../test_img/high_', imageCounter=1, tiles_in_image=[1,1], plot_vel_x_y=True) 231 | tc.savePngsGrayscale([np.average(low_r, axis=0)*8], '../test_img/low_', imageCounter=1, tiles_in_image=[1,1], plot_vel_x_y=True) 232 | # theta = [ np.pi / 180 * 0, 233 | # np.pi / 180 * 45, 234 | # np.pi / 180 * 0 ] 235 | # low_r, high_r = TC.rotate(low, high, theta) 236 | # tc.savePngsGrayscale(tiles=[np.average(high_r, axis=0)*8], path='../test_img/high_', imageCounter=2, tiles_in_image=[1,1]) 237 | # tc.savePngsGrayscale([np.average(low_r, axis=0)*8], '../test_img/low_', imageCounter=2, tiles_in_image=[1,1]) 238 | 239 | 240 | #test SCALE 241 | if 0 and dim==3: 242 | low_s, high_s = TC.scale(low, high, 0.8) 243 | tc.savePngsGrayscale(tiles=[np.average(high_s, axis=0)*8], path='../test_img/high_', imageCounter=1, tiles_in_image=[1,1]) 244 | low_s, high_s = TC.scale(low, high, 1.2) 245 | tc.savePngsGrayscale(tiles=[np.average(high_s, axis=0)*8], path='../test_img/high_', imageCounter=2, tiles_in_image=[1,1]) 246 | 247 | -------------------------------------------------------------------------------- /tools_wscale/uniio.py: -------------------------------------------------------------------------------- 1 | #****************************************************************************** 2 | # 3 | # MantaFlow fluid solver framework 4 | # Copyright 2017 Nils Thuerey, Boris Bonev 5 | # 6 | # This program is free software, distributed under the terms of the 7 | # Apache License, Version 2.0 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Read mantaflow uni files into numpy arrays 11 | # note - only supports 3D grids for now 12 | # (python2 , switch to python3 below) 13 | # 14 | #****************************************************************************** 15 | 16 | import gzip 17 | import struct 18 | import sys 19 | import os 20 | import shutil 21 | from datetime import date 22 | from collections import namedtuple 23 | import numpy as np 24 | 25 | PY3K = sys.version_info >= (3, 0) 26 | 27 | # read content of grid 28 | def RU_read_content(bytestream, header): 29 | assert (header['bytesPerElement'] == 12 and header['elementType'] == 2) or (header['bytesPerElement'] == 4 and (header['elementType'] == 0 or header['elementType'] == 1)) 30 | 31 | if (header['elementType'] == 0): 32 | data = np.frombuffer(bytestream.read(), dtype="int32") # int grid 33 | else: 34 | data = np.frombuffer(bytestream.read(), dtype="float32") # float grid , scalar or vec3 35 | 36 | channels = 1 37 | if (header['elementType'] == 2): 38 | channels = 3 39 | 40 | dimensions = [header['dimT'], header['dimZ'], header['dimY'], header['dimX'], channels] 41 | if header['dimT']<=1: 42 | dimensions = [header['dimZ'], header['dimY'], header['dimX'], channels] 43 | 44 | return data.reshape( *dimensions, order='C') 45 | 46 | # read uni file header (v3) 47 | def RU_read_header(bytestream): 48 | ID = bytestream.read(4) 49 | # in python3, ID == b'MNT3' or b'MNT2' or ..., have to decode 50 | if(PY3K): ID = ID.decode("utf-8") 51 | if ID=="MNT2": 52 | # unpack header struct object 53 | header = namedtuple('HeaderV3', 'dimX, dimY, dimZ, gridType, elementType, bytesPerElement, info, timestamp') 54 | # convert to namedtuple and then directly to a dict 55 | header = header._asdict(header._make(struct.unpack('iiiiii256sQ', bytestream.read(288)))) 56 | 57 | # when writing, we'll need a v4 header field, re-pack... 58 | header['dimT'] = 0 59 | header['info'] = header['info'][0:252] 60 | head4 = namedtuple('HeaderV4', 'dimX, dimY, dimZ, gridType, elementType, bytesPerElement, info, dimT, timestamp')(**header) 61 | header = head4._asdict() 62 | 63 | elif ID=="MNT3": 64 | # unpack header struct object 65 | header = namedtuple('HeaderV4', 'dimX, dimY, dimZ, gridType, elementType, bytesPerElement, info, dimT, timestamp') 66 | # convert to namedtuple and then directly to a dict 67 | # header is shorter for v3! 68 | header = header._asdict(header._make(struct.unpack('iiiiii252siQ', bytestream.read(288)))) 69 | 70 | elif ID=="M4T2" or ID=="M4T3": 71 | print("read_header error - 4D grids not yet supported") 72 | exit(1) 73 | 74 | else: 75 | print("read_header error - unknown header '%s' " % ID) 76 | exit(1) 77 | 78 | return header 79 | 80 | # use this to read the .uni file. It will return the header as dictionary and the content as np-array 81 | def readUni(filename): 82 | #print("Reading '%s'" % filename) # debug 83 | with gzip.open(filename, 'rb') as bytestream: 84 | header = RU_read_header(bytestream) 85 | content = RU_read_content(bytestream, header) 86 | #print("Strides "+format(content.strides)) 87 | 88 | return header, content 89 | 90 | # use this to write a .uni file. The header has to be supplied in the same dictionary format as the output of readuni 91 | def writeUni(filename, header, content): 92 | #print("Writing '%s'" % filename) # debug 93 | #print("Strides "+format(content.strides)) 94 | with gzip.open(filename, 'wb') as bytestream: 95 | 96 | # write the header of the uni file (old v3 header) 97 | #bytestream.write(b'MNT2') # v3 98 | #head_tuple = namedtuple('GenericDict', header.keys())(**header) 99 | #head_buffer = struct.pack('iiiiii256sQ', *head_tuple) 100 | 101 | # current header 102 | bytestream.write(b'MNT3') # new, v4 103 | head_tuple = namedtuple('HeaderV4', header.keys())(**header) 104 | head_buffer = struct.pack('iiiiii252siQ', *head_tuple) 105 | bytestream.write(head_buffer) 106 | 107 | # always convert to single precision floats 108 | if content.dtype!="float32": 109 | content = np.asarray(content, dtype="float32") 110 | 111 | # write grid content 112 | if (header['elementType'] == 2): 113 | # vec3 grid 114 | content = content.reshape(header['dimX']*header['dimY']*header['dimZ']*3, order='C') 115 | else: 116 | # int or scalar grid 117 | content = content.reshape(header['dimX']*header['dimY']*header['dimZ'], order='C') 118 | 119 | if sys.version_info >= (3,0): 120 | # changed for Python3 121 | bytestream.write(memoryview(content)) 122 | else: 123 | bytestream.write(np.getbuffer(content)) 124 | 125 | # backup code to test folder 126 | def backupFile(name, test_path): 127 | code_path = os.path.dirname(name) + '/' + os.path.basename(name) 128 | if len(os.path.dirname(name))==0: 129 | code_path = ".%s" % code_path 130 | shutil.copy(code_path, test_path + os.path.basename(name)) 131 | 132 | #****************************************************************************** 133 | # particle data 134 | 135 | def RP_read_header(bytestream): 136 | ID = bytestream.read(4) # NOTE: useless 137 | # unpack header struct object 138 | head = namedtuple('UniPartHeader', 'dim, dimX, dimY, dimZ, elementType, bytesPerElement, info, timestamp') 139 | # convert to namedtuple and then directly to a dict 140 | head = head._asdict(head._make(struct.unpack('iiiiii256sQ', bytestream.read(288)))) 141 | 142 | return head 143 | 144 | def RP_read_content(bytestream, head, data_type=None): # data_type = {None: BasicParticleSystem; "float32": Real; "int32": Int} 145 | assert(head['bytesPerElement']==16 or head['bytesPerElement']==12 or head['bytesPerElement']==4) 146 | 147 | if(head['elementType']==0): # BasicParticleSystem 148 | print('(BasicParticleSystem) ' ) 149 | data = np.frombuffer(bytestream.read(), dtype=np.dtype([('f1',(np.float32,3)),('f2',(np.int32,1))]))['f1'] 150 | else: # head['elementType']==1: ParticleDataImpl, where T = {float32: Real(4) or Vec3(12); int32: Int(4)} 151 | print('(ParticleDataImpl) '.format(data_type, 'x3' if (head['bytesPerElement']==12) else '') ) 152 | data = np.reshape(np.frombuffer(bytestream.read(), dtype=data_type), (-1, 3 if (head['bytesPerElement']==12) else 1)) 153 | 154 | return data 155 | 156 | def readParticles(filename, data_type=None): 157 | print('Reading {} ... '.format(filename) ) 158 | with gzip.open(filename, 'rb') as bytestream: 159 | head = RP_read_header(bytestream) 160 | data = RP_read_content(bytestream, head, data_type) 161 | 162 | print('Done.') 163 | return head, data 164 | 165 | #****************************************************************************** 166 | # numpy array files 167 | 168 | npBuf = {} # store arrays 169 | npCnt = {} # filename counter 170 | # FIXME , todo - add byte size limit per file at some point, to prevent them from getting too large 171 | 172 | # buffer arrays, and write multiple to single file 173 | def writeNumpyBuf(filename, content): 174 | global npBuf,npCnt 175 | if not filename in npBuf: 176 | npBuf[filename] = [] 177 | npCnt[filename] = 0 178 | npBuf[filename].append(content) 179 | #print("writing buffered, arrays "+format( len(npBuf[filename]) ) + ", size "+ format(content.size) ) 180 | if len(npBuf[filename])>10: 181 | #print("writing buffered "+filename) 182 | np.savez_compressed( filename+("_%04d.npz"%(npCnt[filename])), *npBuf[filename] ) 183 | npCnt[filename] += 1 184 | npBuf[filename] = [] 185 | 186 | # write all remaining ones 187 | def finalizeNumpyBufs(): 188 | global npBuf,npCnt 189 | for filename in npBuf.keys(): 190 | if len(npBuf[filename])>0: 191 | #print("writing last buffered "+filename+ ", left " + format(len(npBuf[filename]))) 192 | np.savez_compressed( filename+("_%04d.npz"%(npCnt[filename])), *npBuf[filename] ) 193 | # reset... 194 | npBuf = {} 195 | npCnt = {} 196 | 197 | 198 | # write a single numpy array into an npz file 199 | def writeNumpySingle(filename, content): 200 | #print("writing "+filename) 201 | np.savez_compressed( filename, content ) 202 | 203 | def readNumpy(filename): 204 | #print("reading "+filename) 205 | npz = np.load( filename ) 206 | return npz 207 | 208 | 209 | --------------------------------------------------------------------------------