├── Code ├── Baseline │ ├── Baseline_0_GAN.m │ ├── Baseline_10_GAN_DynamicsSelfIndentity.m │ ├── Baseline_11_GAN_ConditionalCycleDynamics.m │ ├── Baseline_12_GAN_ConditionalCycleSelfIdentity.m │ ├── Baseline_13_GAN_ConditionalDynamicsIdentity.m │ ├── Baseline_14_GAN_CycleDynamicsIdentity.m │ ├── Baseline_15_QuantileMapping.m │ ├── Baseline_16_MBCn.m │ ├── Baseline_16_MBCn_Apply.m │ ├── Baseline_16_MBCn_n.m │ ├── Baseline_1_ConditionalGAN.m │ ├── Baseline_2_CycleGAN.m │ ├── Baseline_3_DynamicsGAN.m │ ├── Baseline_4_GAN_SelfIdentity.m │ ├── Baseline_5_GAN_ConditionalCycle.m │ ├── Baseline_6_GAN_ConditionalDynamics.m │ ├── Baseline_7_GAN_ConditionalSelfIdentity.m │ ├── Baseline_8_GAN_CycleDynamics.m │ ├── Baseline_9_GAN_CycleSelfIdentity.m │ └── Baseline_Evaluation.m ├── Data_Processing.m ├── Dynamical_Regularization.m ├── Evaluation.m ├── RADA.m └── TuringTest.m ├── Fig ├── Fig_1.png ├── Fig_3.png ├── Fig_4.png ├── Fig_5.png ├── Fig_6.png ├── Fig_7.png ├── Fig_8.png ├── LOGO.png ├── fig_10.png ├── fig_2.png └── fig_9.png ├── LICENSE ├── Logo.m ├── Paper.pdf ├── README.md ├── Result └── TuringTest │ ├── Pamler_Turing_Correction_30fc8b38-8d10-44f6-a1d9-e06cc91112b5.mx │ ├── Pamler_Turing_Correction_35e614b0-1eeb-4730-971d-250635ca07ae.mx │ ├── Pamler_Turing_Correction_6fb78c7f-b00d-45cd-8d63-ffedf844dbc1.mx │ ├── Pamler_Turing_Correction_887cf557-90b4-47b4-8fc8-02281e2f0585.mx │ ├── Pamler_Turing_Raw_186c034c-3536-4684-9c6f-6e662e481ce6.mx │ ├── Pamler_Turing_Raw_1e8866c2-75f1-4dcb-aba2-7d42c1709f5c.mx │ ├── Pamler_Turing_Raw_21cde7a7-a29a-4f48-a89e-94facc62d692.mx │ └── Pamler_Turing_Raw_6737e043-3315-407a-8001-bbd4b42c8e10.mx └── unet.m /Code/Baseline/Baseline_0_GAN.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | length=14610; 12 | vlength=3652; 13 | test=Table[<|"P_GCM"->nP4GCM[[i]], 14 | "P_Obser"->nP4Obser[[i]]|>,{i,length+vlength+1,Length[nP4GCM]}]; 15 | 16 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 17 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 18 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 19 | 20 | validation=Table[<|"P_GCM"->nP4GCM[[i]],"P_Obser"->nP4Obser[[i]]|>,{i,length,length+vlength}]; 21 | 22 | generatorGCM2Obser=NetGraph[<| 23 | "chain"->{ConvolutionLayer[64,{3,3},"PaddingSize"->1], 24 | BatchNormalizationLayer[], 25 | Ramp, 26 | ConvolutionLayer[128,{3,3},"PaddingSize"->1], 27 | BatchNormalizationLayer[], 28 | Ramp, 29 | ConvolutionLayer[256,{3,3},"PaddingSize"->1], 30 | BatchNormalizationLayer[], 31 | Ramp, 32 | ConvolutionLayer[512,{3,3},"PaddingSize"->1], 33 | BatchNormalizationLayer[], 34 | Ramp, 35 | ConvolutionLayer[1,{1,1}]}, 36 | "combine"->ThreadingLayer[Plus], 37 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 38 | |>, 39 | {NetPort["P"]->"chain"->"combine", 40 | NetPort["P"]->"combine"->"cut"}, 41 | "P"->dim] 42 | 43 | discriminatorGCM2Obser = NetChain[{ 44 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 45 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 46 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 47 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 48 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 49 | "Input" -> dim]; 50 | 51 | gan =NetGraph[<| 52 | "Generator_GCM->Obser" -> generatorGCM2Obser, 53 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 54 | "Cat_GCM->Obser" -> CatenateLayer[], 55 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 56 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 57 | "Fake_GCM->Obser"->PartLayer[1], 58 | "Real_GCM->Obser"->PartLayer[2], 59 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0] 60 | |>, 61 | 62 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 63 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 64 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 65 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 66 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 67 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"] 68 | }, 69 | "P_Obser" -> dim, 70 | "P_GCM" -> dim] 71 | 72 | DiffMean=Infinity; 73 | DiffVar=Infinity; 74 | 75 | 76 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 77 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 78 | 79 | index=StringSplit[CreateUUID[],"-"][[1]]; 80 | Print[index]; 81 | 82 | ReportCycleGan[net_] := 83 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 84 | gen=net[["Generator_GCM->Obser"]]; 85 | obserG=Map[gen[<|"P"->#[["P_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 86 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 87 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 88 | Print[TableForm[{{DiffMean,DiffVar}, 89 | {meanDiff,varDiff}}]]; 90 | If[meanDiff+varDiff<=DiffMean+DiffVar, 91 | Block[{}, 92 | Print[index]; 93 | Export["/g/g92/pan11/Baseline_0_GAN_"<>index<>".mx",net]; 94 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 95 | 96 | NetTrain[gan, 97 | {Function[Block[{base,choice,choice2}, 98 | base=RandomSample[Range[2,length],#BatchSize]; 99 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 100 | tempt=#+daylag+yearlag*365; 101 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 102 | <|"P_GCM"->nP4GCM[[base]], 103 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 104 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1]}, 105 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser"}, 106 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 107 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1}, 108 | BatchSize -> 32, 109 | TargetDevice->"GPU", 110 | MaxTrainingRounds->100, 111 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 112 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 113 | TrainingProgressReporting -> {{Function@ReportCycleGan[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 114 | 115 | gan=Import["/g/g92/pan11/Baseline_0_GAN_"<>index<>".mx"]; 116 | 117 | NetTrain[gan, 118 | {Function[Block[{base,choice,choice2}, 119 | base=RandomSample[Range[2,length],#BatchSize]; 120 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 121 | tempt=#+daylag+yearlag*365; 122 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 123 | <|"P_GCM"->nP4GCM[[base]], 124 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 125 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1]}, 126 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser"}, 127 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 128 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1}, 129 | BatchSize -> 32, 130 | TargetDevice->"GPU", 131 | MaxTrainingRounds->100, 132 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 133 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 134 | TrainingProgressReporting -> {{Function@ReportCycleGan[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 135 | 136 | gan=Import["/g/g92/pan11/Baseline_0_GAN_"<>index<>".mx"]; 137 | 138 | NetTrain[gan, 139 | {Function[Block[{base,choice,choice2}, 140 | base=RandomSample[Range[2,length],#BatchSize]; 141 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 142 | tempt=#+daylag+yearlag*365; 143 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 144 | <|"P_GCM"->nP4GCM[[base]], 145 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 146 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1]}, 147 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser"}, 148 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 149 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1}, 150 | BatchSize -> 32, 151 | TargetDevice->"GPU", 152 | MaxTrainingRounds->100, 153 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-6, 154 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 155 | TrainingProgressReporting -> {{Function@ReportCycleGan[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 156 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_10_GAN_DynamicsSelfIndentity.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | generator=NetGraph[<| 31 | "chain"->{ConvolutionLayer[64,{3,3},"PaddingSize"->1], 32 | BatchNormalizationLayer[], 33 | Ramp, 34 | ConvolutionLayer[128,{3,3},"PaddingSize"->1], 35 | BatchNormalizationLayer[], 36 | Ramp, 37 | ConvolutionLayer[256,{3,3},"PaddingSize"->1], 38 | BatchNormalizationLayer[], 39 | Ramp, 40 | ConvolutionLayer[512,{3,3},"PaddingSize"->1], 41 | BatchNormalizationLayer[], 42 | Ramp, 43 | ConvolutionLayer[1,{1,1}]}, 44 | "combine"->ThreadingLayer[Plus], 45 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 46 | |>, 47 | {NetPort["P"]->"chain"->"combine", 48 | NetPort["P"]->"combine"->"cut"}, 49 | "P"->dim]; 50 | 51 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 52 | generatorGCM2ObserR= NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 53 | 54 | 55 | discriminator = NetChain[{ 56 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 57 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 58 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 59 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 60 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 61 | "Input" -> dim]; 62 | discriminatorGCM2Obser = discriminator; 63 | discriminatorObser2GCM = discriminator; 64 | 65 | 66 | RdownscaleGCM=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx"]["net"]; 67 | DeltaGCM=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx"]["mse"]; 68 | 69 | RdownscaleObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["net"]; 70 | DeltaObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["mse"]; 71 | 72 | gan =NetGraph[<| 73 | "Generator_GCM->Obser" -> generatorGCM2Obser, 74 | "Generator_GCM->Obser_SelfRegression" -> generatorGCM2ObserR, 75 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 76 | "Cat_GCM->Obser" -> CatenateLayer[], 77 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 78 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 79 | "Fake_GCM->Obser"->PartLayer[1], 80 | "Real_GCM->Obser"->PartLayer[2], 81 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 82 | 83 | "R_Downscaling_Obser"->RdownscaleObser, 84 | "MS_Obser_RDownscaling"->MeanSquaredLossLayer[], 85 | "Max_Obser_RDownscaling"->ElementwiseLayer[Max[#,DeltaObser]-DeltaObser &], 86 | 87 | "MS_GCM2Obser_SelfRegression"->MeanAbsoluteLossLayer[] 88 | |>, 89 | 90 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 91 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 92 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 93 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 94 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 95 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 96 | 97 | NetPort["D_GCM"]->"R_Downscaling_Obser"->"MS_Obser_RDownscaling", 98 | "Generator_GCM->Obser"->"MS_Obser_RDownscaling"->"Max_Obser_RDownscaling"->NetPort["Loss_RDownscaling_GCM"], 99 | 100 | 101 | NetPort["P_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","P"], 102 | "Generator_GCM->Obser_SelfRegression"->"MS_GCM2Obser_SelfRegression", 103 | NetPort["P_Obser"]->"MS_GCM2Obser_SelfRegression"->NetPort["Loss_GCM2Obser_SelfRegression"] 104 | }, 105 | "P_Obser" -> dim, 106 | "P_GCM" -> dim, 107 | "D_GCM" -> dim2]; 108 | 109 | DiffMean=Infinity; 110 | DiffVar=Infinity; 111 | 112 | 113 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 114 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 115 | 116 | index=StringSplit[CreateUUID[],"-"][[1]]; 117 | Print[index]; 118 | ReportCycleGan2[net_] := 119 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 120 | gen=net[["Generator_GCM->Obser"]]; 121 | obserG=Map[gen[#[["P_GCM"]],TargetDevice->"GPU"]&,validation]; 122 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 123 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 124 | Print[TableForm[{{DiffMean,DiffVar}, 125 | {meanDiff,varDiff}}]]; 126 | If[meanDiff+varDiff<=DiffMean+DiffVar, 127 | Block[{}, 128 | Print[index]; 129 | Export["/g/g92/pan11/Baseline_10_GAN_DynamicsSelfIdentity_"<>index<>".mx",net]; 130 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 131 | 132 | NetTrain[gan, 133 | {Function[Block[{base,choice,choice2}, 134 | base=RandomSample[Range[2,length],#BatchSize]; 135 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 136 | tempt=#+daylag+yearlag*365; 137 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 138 | <|"P_GCM"->nP4GCM[[base]], 139 | "D_GCM"->ndynamics4GCM[[base]], 140 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 141 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 142 | "Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]], 143 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 144 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 145 | "Generator_GCM->Obser", 146 | "Generator_GCM->Obser_SelfRegression"}, 147 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 148 | "Generator_GCM->Obser" -> -1, 149 | "Generator_GCM->Obser_SelfRegression" -> -1, 150 | "Discriminator_GCM->Obser"->1,"R_Downscaling_Obser"->0}, 151 | BatchSize -> 32, 152 | TargetDevice->"GPU", 153 | MaxTrainingRounds->100, 154 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 155 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 156 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 157 | 158 | gan=Import["/g/g92/pan11/Baseline_10_GAN_DynamicsSelfIdentity_"<>index<>".mx",]; 159 | NetTrain[gan, 160 | {Function[Block[{base,choice,choice2}, 161 | base=RandomSample[Range[2,length],#BatchSize]; 162 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 163 | tempt=#+daylag+yearlag*365; 164 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 165 | <|"P_GCM"->nP4GCM[[base]], 166 | "D_GCM"->ndynamics4GCM[[base]], 167 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 168 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 169 | "Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]], 170 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 171 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 172 | "Generator_GCM->Obser", 173 | "Generator_GCM->Obser_SelfRegression"}, 174 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 175 | "Generator_GCM->Obser" -> -1, 176 | "Generator_GCM->Obser_SelfRegression" -> -1, 177 | "Discriminator_GCM->Obser"->1,"R_Downscaling_Obser"->0}, 178 | BatchSize -> 32, 179 | TargetDevice->"GPU", 180 | MaxTrainingRounds->200, 181 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 182 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 183 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 184 | 185 | gan=Import["/g/g92/pan11/Baseline_10_GAN_DynamicsSelfIdentity_"<>index<>".mx",]; 186 | NetTrain[gan, 187 | {Function[Block[{base,choice,choice2}, 188 | base=RandomSample[Range[2,length],#BatchSize]; 189 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 190 | tempt=#+daylag+yearlag*365; 191 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 192 | <|"P_GCM"->nP4GCM[[base]], 193 | "D_GCM"->ndynamics4GCM[[base]], 194 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 195 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 196 | "Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]], 197 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 198 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 199 | "Generator_GCM->Obser", 200 | "Generator_GCM->Obser_SelfRegression"}, 201 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 202 | "Generator_GCM->Obser" -> -1, 203 | "Generator_GCM->Obser_SelfRegression" -> -1, 204 | "Discriminator_GCM->Obser"->1,"R_Downscaling_Obser"->0}, 205 | BatchSize -> 32, 206 | TargetDevice->"GPU", 207 | MaxTrainingRounds->200, 208 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-6, 209 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 210 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 211 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_11_GAN_ConditionalCycleDynamics.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | 31 | generator=NetGraph[<|"Catenate"->CatenateLayer[1], 32 | "Padding"->PaddingLayer[{{0,0},{5,5},{4,4}}], 33 | "chain"->{ConvolutionLayer[64,{3,3}], 34 | BatchNormalizationLayer[], 35 | Ramp, 36 | ConvolutionLayer[128,{3,3}], 37 | BatchNormalizationLayer[], 38 | Ramp, 39 | ConvolutionLayer[256,{3,3}], 40 | BatchNormalizationLayer[], 41 | Ramp, 42 | ConvolutionLayer[512,{5,3}], 43 | BatchNormalizationLayer[], 44 | Ramp, 45 | ConvolutionLayer[1,{1,1}]}, 46 | "combine"->ThreadingLayer[Plus], 47 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 48 | |>, 49 | {NetPort["P"]->"Padding"->"Catenate", 50 | NetPort["z"]->"Catenate"->"chain"->"combine", 51 | NetPort["P"]->"combine"->"cut"}, 52 | "P"->dim, 53 | "z"->dim2] 54 | 55 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 56 | cycleGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 57 | 58 | generatorObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 59 | cycleObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 60 | 61 | discriminator = NetChain[{ 62 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 63 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 64 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 65 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 66 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 67 | "Input" -> dim]; 68 | discriminatorGCM2Obser = discriminator; 69 | discriminatorObser2GCM = discriminator; 70 | 71 | 72 | RdownscaleGCM=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx"]["net"]; 73 | DeltaGCM=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx"]["mse"]; 74 | 75 | RdownscaleObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["net"]; 76 | DeltaObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["mse"]; 77 | 78 | cycleGAN =NetGraph[<| 79 | "Generator_GCM->Obser" -> generatorGCM2Obser, 80 | "Cycle_GCM->Obser" -> cycleGCM2Obser, 81 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 82 | "Cat_GCM->Obser" -> CatenateLayer[], 83 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 84 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 85 | "Fake_GCM->Obser"->PartLayer[1], 86 | "Real_GCM->Obser"->PartLayer[2], 87 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 88 | "MS_GCM->Obser"->MeanAbsoluteLossLayer[], 89 | 90 | "Generator_Obser->GCM" -> generatorObser2GCM, 91 | "Cycle_Obser->GCM" -> cycleObser2GCM, 92 | "Discriminator_Obser->GCM" -> NetMapOperator[discriminatorObser2GCM], 93 | "Cat_Obser->GCM" -> CatenateLayer[], 94 | "Reshape_Obser->GCM" -> ReshapeLayer[Prepend[dim,2]], 95 | "Flat_Obser->GCM" -> ReshapeLayer[{2}], 96 | "Fake_Obser->GCM"->PartLayer[1], 97 | "Real_Obser->GCM"->PartLayer[2], 98 | "Scale_Obser->GCM" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 99 | "MS_Obser->GCM"->MeanAbsoluteLossLayer[], 100 | 101 | "R_Downscaling_GCM"->RdownscaleGCM, 102 | "R_Downscaling_Obser"->RdownscaleObser, 103 | 104 | "MS_GCM_RDownscaling"->MeanSquaredLossLayer[], 105 | "MS_Obser_RDownscaling"->MeanSquaredLossLayer[], 106 | "Max_GCM_RDownscaling"->ElementwiseLayer[Max[#,DeltaGCM]-DeltaGCM &], 107 | "Max_Obser_RDownscaling"->ElementwiseLayer[Max[#,DeltaObser]-DeltaObser &] 108 | 109 | |>, 110 | 111 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 112 | NetPort["D_GCM"]->NetPort["Generator_GCM->Obser","z"], 113 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 114 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 115 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 116 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 117 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 118 | "Generator_GCM->Obser"->NetPort["Cycle_Obser->GCM","P"], 119 | NetPort["D_GCM"]->NetPort["Cycle_Obser->GCM","z"], 120 | "Cycle_Obser->GCM"->"MS_GCM->Obser", 121 | NetPort["P_GCM"]->"MS_GCM->Obser"->NetPort["ReconstructionLoss_GCM->Obser"], 122 | 123 | NetPort["D_GCM"]->"R_Downscaling_Obser"->"MS_Obser_RDownscaling", 124 | "Generator_GCM->Obser"->"MS_Obser_RDownscaling"->"Max_Obser_RDownscaling"->NetPort["Loss_RDownscaling_GCM"], 125 | NetPort["D_Obser"]->"R_Downscaling_GCM"->"MS_GCM_RDownscaling", 126 | "Generator_Obser->GCM"->"MS_GCM_RDownscaling"->"Max_GCM_RDownscaling"->NetPort["Loss_RDownscaling_Obser"], 127 | 128 | NetPort["P_Obser"] ->NetPort["Generator_Obser->GCM","P"], 129 | NetPort["D_Obser"] ->NetPort["Generator_Obser->GCM","z"], 130 | "Generator_Obser->GCM"-> "Cat_Obser->GCM", 131 | NetPort["P_GCM"] -> "Cat_Obser->GCM", 132 | "Cat_Obser->GCM" -> "Reshape_Obser->GCM" -> "Discriminator_Obser->GCM" -> "Flat_Obser->GCM" -> "Scale_Obser->GCM" -> 133 | "Fake_Obser->GCM"->NetPort["FakeLoss_Obser->GCM"], 134 | "Scale_Obser->GCM"->"Real_Obser->GCM"->NetPort["RealLoss_Obser->GCM"], 135 | "Generator_Obser->GCM"->NetPort["Cycle_GCM->Obser","P"], 136 | NetPort["D_Obser"] ->NetPort["Cycle_GCM->Obser","z"], 137 | "Cycle_GCM->Obser"->"MS_Obser->GCM", 138 | NetPort["P_Obser"]->"MS_Obser->GCM"->NetPort["ReconstructionLoss_Obser->GCM"] 139 | 140 | }, 141 | "P_Obser" -> dim, 142 | "P_GCM" -> dim, 143 | "D_GCM" -> dim2, 144 | "D_Obser" -> dim2]; 145 | 146 | DiffMean=Infinity; 147 | DiffVar=Infinity; 148 | 149 | 150 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 151 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 152 | 153 | index=StringSplit[CreateUUID[],"-"][[1]]; 154 | Print[index]; 155 | ReportCycleGan2[net_] := 156 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 157 | gen=net[["Generator_GCM->Obser"]]; 158 | obserG=Map[gen[<|"P"->#[["P_GCM"]],"z"->#[["D_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 159 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 160 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 161 | Print[TableForm[{{DiffMean,DiffVar}, 162 | {meanDiff,varDiff}}]]; 163 | If[meanDiff+varDiff<=DiffMean+DiffVar, 164 | Block[{}, 165 | Print[index]; 166 | Export["/g/g92/pan11/Baseline_11_GAN_ConditionalCycleDynamics_"<>index<>".mx",net]; 167 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 168 | 169 | NetTrain[cycleGAN, 170 | {Function[Block[{base,choice,choice2}, 171 | base=RandomSample[Range[2,length],#BatchSize]; 172 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 173 | tempt=#+daylag+yearlag*365; 174 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 175 | <|"P_GCM"->nP4GCM[[base]], 176 | "D_GCM"->ndynamics4GCM[[base]], 177 | "P_Obser"->nP4Obser[[choice]], 178 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 179 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 180 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]], 181 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]],"Loss_RDownscaling_Obser"->Scaled[-hype[[3]]]}, 182 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 183 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 184 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM"}, 185 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 186 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 187 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 188 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1, 189 | "R_Downscaling_GCM"->0,"R_Downscaling_Obser"->0}, 190 | BatchSize -> 32, 191 | TargetDevice->"GPU", 192 | MaxTrainingRounds->200, 193 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 194 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 195 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 196 | 197 | cycleGAN=Import["/g/g92/pan11/Baseline_11_GAN_ConditionalCycleDynamics_"<>index<>".mx"]; 198 | NetTrain[cycleGAN, 199 | {Function[Block[{base,choice,choice2}, 200 | base=RandomSample[Range[2,length],#BatchSize]; 201 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 202 | tempt=#+daylag+yearlag*365; 203 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 204 | <|"P_GCM"->nP4GCM[[base]], 205 | "D_GCM"->ndynamics4GCM[[base]], 206 | "P_Obser"->nP4Obser[[choice]], 207 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 208 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 209 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]], 210 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]],"Loss_RDownscaling_Obser"->Scaled[-hype[[3]]]}, 211 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 212 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 213 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM"}, 214 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 215 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 216 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 217 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1, 218 | "R_Downscaling_GCM"->0,"R_Downscaling_Obser"->0}, 219 | BatchSize -> 32, 220 | TargetDevice->"GPU", 221 | MaxTrainingRounds->200, 222 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 223 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 224 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 225 | 226 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_12_GAN_ConditionalCycleSelfIdentity.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | 31 | generator=NetGraph[<|"Catenate"->CatenateLayer[1], 32 | "Padding"->PaddingLayer[{{0,0},{5,5},{4,4}}], 33 | "chain"->{ConvolutionLayer[64,{3,3}], 34 | BatchNormalizationLayer[], 35 | Ramp, 36 | ConvolutionLayer[128,{3,3}], 37 | BatchNormalizationLayer[], 38 | Ramp, 39 | ConvolutionLayer[256,{3,3}], 40 | BatchNormalizationLayer[], 41 | Ramp, 42 | ConvolutionLayer[512,{5,3}], 43 | BatchNormalizationLayer[], 44 | Ramp, 45 | ConvolutionLayer[1,{1,1}]}, 46 | "combine"->ThreadingLayer[Plus], 47 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 48 | |>, 49 | {NetPort["P"]->"Padding"->"Catenate", 50 | NetPort["z"]->"Catenate"->"chain"->"combine", 51 | NetPort["P"]->"combine"->"cut"}, 52 | "P"->dim, 53 | "z"->dim2] 54 | 55 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 56 | cycleGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 57 | generatorGCM2ObserR= NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 58 | 59 | generatorObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 60 | cycleObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 61 | generatorObser2GCMR = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 62 | 63 | discriminator = NetChain[{ 64 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 65 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 66 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 67 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 68 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 69 | "Input" -> dim]; 70 | discriminatorGCM2Obser = discriminator; 71 | discriminatorObser2GCM = discriminator; 72 | 73 | 74 | 75 | cycleGAN =NetGraph[<| 76 | "Generator_GCM->Obser" -> generatorGCM2Obser, 77 | "Generator_GCM->Obser_SelfRegression" -> generatorGCM2ObserR, 78 | "Cycle_GCM->Obser" -> cycleGCM2Obser, 79 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 80 | "Cat_GCM->Obser" -> CatenateLayer[], 81 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 82 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 83 | "Fake_GCM->Obser"->PartLayer[1], 84 | "Real_GCM->Obser"->PartLayer[2], 85 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 86 | "MS_GCM->Obser"->MeanAbsoluteLossLayer[], 87 | 88 | "Generator_Obser->GCM" -> generatorObser2GCM, 89 | "Generator_Obser->GCM_SelfRegression" -> generatorObser2GCMR, 90 | "Cycle_Obser->GCM" -> cycleObser2GCM, 91 | "Discriminator_Obser->GCM" -> NetMapOperator[discriminatorObser2GCM], 92 | "Cat_Obser->GCM" -> CatenateLayer[], 93 | "Reshape_Obser->GCM" -> ReshapeLayer[Prepend[dim,2]], 94 | "Flat_Obser->GCM" -> ReshapeLayer[{2}], 95 | "Fake_Obser->GCM"->PartLayer[1], 96 | "Real_Obser->GCM"->PartLayer[2], 97 | "Scale_Obser->GCM" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 98 | "MS_Obser->GCM"->MeanAbsoluteLossLayer[], 99 | 100 | "MS_GCM2Obser_SelfRegression"->MeanAbsoluteLossLayer[], 101 | "MS_Obser2GCM_SelfRegression"->MeanAbsoluteLossLayer[] 102 | |>, 103 | 104 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 105 | NetPort["D_GCM"]->NetPort["Generator_GCM->Obser","z"], 106 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 107 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 108 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 109 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 110 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 111 | "Generator_GCM->Obser"->NetPort["Cycle_Obser->GCM","P"], 112 | NetPort["D_GCM"]->NetPort["Cycle_Obser->GCM","z"], 113 | "Cycle_Obser->GCM"->"MS_GCM->Obser", 114 | NetPort["P_GCM"]->"MS_GCM->Obser"->NetPort["ReconstructionLoss_GCM->Obser"], 115 | 116 | 117 | NetPort["P_Obser"] ->NetPort["Generator_Obser->GCM","P"], 118 | NetPort["D_Obser"] ->NetPort["Generator_Obser->GCM","z"], 119 | "Generator_Obser->GCM"-> "Cat_Obser->GCM", 120 | NetPort["P_GCM"] -> "Cat_Obser->GCM", 121 | "Cat_Obser->GCM" -> "Reshape_Obser->GCM" -> "Discriminator_Obser->GCM" -> "Flat_Obser->GCM" -> "Scale_Obser->GCM" -> 122 | "Fake_Obser->GCM"->NetPort["FakeLoss_Obser->GCM"], 123 | "Scale_Obser->GCM"->"Real_Obser->GCM"->NetPort["RealLoss_Obser->GCM"], 124 | "Generator_Obser->GCM"->NetPort["Cycle_GCM->Obser","P"], 125 | NetPort["D_Obser"] ->NetPort["Cycle_GCM->Obser","z"], 126 | "Cycle_GCM->Obser"->"MS_Obser->GCM", 127 | NetPort["P_Obser"]->"MS_Obser->GCM"->NetPort["ReconstructionLoss_Obser->GCM"], 128 | 129 | NetPort["P_GCM"]->NetPort["Generator_Obser->GCM_SelfRegression","P"], 130 | NetPort["D_GCM"]->NetPort["Generator_Obser->GCM_SelfRegression","z"], 131 | "Generator_Obser->GCM_SelfRegression"->"MS_Obser2GCM_SelfRegression", 132 | NetPort["P_GCM"]->"MS_Obser2GCM_SelfRegression"->NetPort["Loss_Obser2GCM_SelfRegression"], 133 | 134 | NetPort["P_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","P"], 135 | NetPort["D_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","z"], 136 | "Generator_GCM->Obser_SelfRegression"->"MS_GCM2Obser_SelfRegression", 137 | NetPort["P_Obser"]->"MS_GCM2Obser_SelfRegression"->NetPort["Loss_GCM2Obser_SelfRegression"] 138 | }, 139 | "P_Obser" -> dim, 140 | "P_GCM" -> dim, 141 | "D_GCM" -> dim2, 142 | "D_Obser" -> dim2]; 143 | 144 | DiffMean=Infinity; 145 | DiffVar=Infinity; 146 | 147 | 148 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 149 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 150 | 151 | index=StringSplit[CreateUUID[],"-"][[1]]; 152 | Print[index]; 153 | ReportCycleGan2[net_] := 154 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 155 | gen=net[["Generator_GCM->Obser"]]; 156 | obserG=Map[gen[<|"P"->#[["P_GCM"]],"z"->#[["D_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 157 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 158 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 159 | Print[TableForm[{{DiffMean,DiffVar}, 160 | {meanDiff,varDiff}}]]; 161 | If[meanDiff+varDiff<=DiffMean+DiffVar, 162 | Block[{}, 163 | Print[index]; 164 | Export["/g/g92/pan11/Baseline_12_GAN_ConditionalCycleSelfIdentity_"<>index<>".mx",net]; 165 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 166 | 167 | NetTrain[cycleGAN, 168 | {Function[Block[{base,choice,choice2}, 169 | base=RandomSample[Range[2,length],#BatchSize]; 170 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 171 | tempt=#+daylag+yearlag*365; 172 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 173 | <|"P_GCM"->nP4GCM[[base]], 174 | "D_GCM"->ndynamics4GCM[[base]], 175 | "P_Obser"->nP4Obser[[choice]], 176 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 177 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 178 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]], 179 | "Loss_Obser2GCM_SelfRegression"->Scaled[-hype[[3]]],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 180 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 181 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 182 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM", 183 | "Generator_GCM->Obser_SelfRegression"|"Generator_Obser->GCM_SelfRegression"}, 184 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 185 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 186 | "Generator_GCM->Obser_SelfRegression" -> -1,"Generator_Obser->GCM_SelfRegression"->-1, 187 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 188 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 189 | BatchSize -> 32, 190 | TargetDevice->"GPU", 191 | MaxTrainingRounds->200, 192 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 193 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 194 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 195 | 196 | cycleGAN=Import["/g/g92/pan11/Baseline_12_GAN_ConditionalCycleSelfIdentity_"<>index<>".mx"]; 197 | NetTrain[cycleGAN, 198 | {Function[Block[{base,choice,choice2}, 199 | base=RandomSample[Range[2,length],#BatchSize]; 200 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 201 | tempt=#+daylag+yearlag*365; 202 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 203 | <|"P_GCM"->nP4GCM[[base]], 204 | "D_GCM"->ndynamics4GCM[[base]], 205 | "P_Obser"->nP4Obser[[choice]], 206 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 207 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 208 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]], 209 | "Loss_Obser2GCM_SelfRegression"->Scaled[-hype[[3]]],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 210 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 211 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 212 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM", 213 | "Generator_GCM->Obser_SelfRegression"|"Generator_Obser->GCM_SelfRegression"}, 214 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 215 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 216 | "Generator_GCM->Obser_SelfRegression" -> -1,"Generator_Obser->GCM_SelfRegression"->-1, 217 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 218 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 219 | BatchSize -> 32, 220 | TargetDevice->"GPU", 221 | MaxTrainingRounds->200, 222 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 223 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 224 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 225 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_13_GAN_ConditionalDynamicsIdentity.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | 31 | generator=NetGraph[<|"Catenate"->CatenateLayer[1], 32 | "Padding"->PaddingLayer[{{0,0},{5,5},{4,4}}], 33 | "chain"->{ConvolutionLayer[64,{3,3}], 34 | BatchNormalizationLayer[], 35 | Ramp, 36 | ConvolutionLayer[128,{3,3}], 37 | BatchNormalizationLayer[], 38 | Ramp, 39 | ConvolutionLayer[256,{3,3}], 40 | BatchNormalizationLayer[], 41 | Ramp, 42 | ConvolutionLayer[512,{5,3}], 43 | BatchNormalizationLayer[], 44 | Ramp, 45 | ConvolutionLayer[1,{1,1}]}, 46 | "combine"->ThreadingLayer[Plus], 47 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 48 | |>, 49 | {NetPort["P"]->"Padding"->"Catenate", 50 | NetPort["z"]->"Catenate"->"chain"->"combine", 51 | NetPort["P"]->"combine"->"cut"}, 52 | "P"->dim, 53 | "z"->dim2] 54 | 55 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 56 | generatorGCM2ObserR= NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 57 | 58 | 59 | discriminator = NetChain[{ 60 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 61 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 62 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 63 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 64 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 65 | "Input" -> dim]; 66 | discriminatorGCM2Obser = discriminator; 67 | discriminatorObser2GCM = discriminator; 68 | 69 | 70 | RdownscaleGCM=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx"]["net"]; 71 | DeltaGCM=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx"]["mse"]; 72 | 73 | RdownscaleObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["net"]; 74 | DeltaObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["mse"]; 75 | 76 | gan =NetGraph[<| 77 | "Generator_GCM->Obser" -> generatorGCM2Obser, 78 | "Generator_GCM->Obser_SelfRegression" -> generatorGCM2ObserR, 79 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 80 | "Cat_GCM->Obser" -> CatenateLayer[], 81 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 82 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 83 | "Fake_GCM->Obser"->PartLayer[1], 84 | "Real_GCM->Obser"->PartLayer[2], 85 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 86 | 87 | "R_Downscaling_Obser"->RdownscaleObser, 88 | "MS_Obser_RDownscaling"->MeanSquaredLossLayer[], 89 | "Max_Obser_RDownscaling"->ElementwiseLayer[Max[#,DeltaObser]-DeltaObser &], 90 | 91 | "MS_GCM2Obser_SelfRegression"->MeanAbsoluteLossLayer[] 92 | |>, 93 | 94 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 95 | NetPort["D_GCM"]->NetPort["Generator_GCM->Obser","z"], 96 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 97 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 98 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 99 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 100 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 101 | 102 | NetPort["D_GCM"]->"R_Downscaling_Obser"->"MS_Obser_RDownscaling", 103 | "Generator_GCM->Obser"->"MS_Obser_RDownscaling"->"Max_Obser_RDownscaling"->NetPort["Loss_RDownscaling_GCM"], 104 | 105 | NetPort["P_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","P"], 106 | NetPort["D_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","z"], 107 | "Generator_GCM->Obser_SelfRegression"->"MS_GCM2Obser_SelfRegression", 108 | NetPort["P_Obser"]->"MS_GCM2Obser_SelfRegression"->NetPort["Loss_GCM2Obser_SelfRegression"] 109 | }, 110 | "P_Obser" -> dim, 111 | "P_GCM" -> dim, 112 | "D_GCM" -> dim2, 113 | "D_Obser" -> dim2]; 114 | 115 | DiffMean=Infinity; 116 | DiffVar=Infinity; 117 | 118 | 119 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 120 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 121 | 122 | index=StringSplit[CreateUUID[],"-"][[1]]; 123 | Print[index]; 124 | ReportCycleGan2[net_] := 125 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 126 | gen=net[["Generator_GCM->Obser"]]; 127 | obserG=Map[gen[<|"P"->#[["P_GCM"]],"z"->#[["D_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 128 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 129 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 130 | Print[TableForm[{{DiffMean,DiffVar}, 131 | {meanDiff,varDiff}}]]; 132 | If[meanDiff+varDiff<=DiffMean+DiffVar, 133 | Block[{}, 134 | Print[index]; 135 | Export["/g/g92/pan11/Baseline_13_GAN_ConditionalDynamicsIdentity"<>index<>".mx",net]; 136 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 137 | 138 | NetTrain[gan, 139 | {Function[Block[{base,choice,choice2}, 140 | base=RandomSample[Range[2,length],#BatchSize]; 141 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 142 | tempt=#+daylag+yearlag*365; 143 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 144 | <|"P_GCM"->nP4GCM[[base]], 145 | "D_GCM"->ndynamics4GCM[[base]], 146 | "P_Obser"->nP4Obser[[choice]], 147 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 148 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 149 | "Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]], 150 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 151 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 152 | "Generator_GCM->Obser", 153 | "Generator_GCM->Obser_SelfRegression"}, 154 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 155 | "Generator_GCM->Obser" -> -1, 156 | "Generator_GCM->Obser_SelfRegression" -> -1, 157 | "Discriminator_GCM->Obser"->1, 158 | "R_Downscaling_Obser"->0}, 159 | BatchSize -> 32, 160 | TargetDevice->"GPU", 161 | MaxTrainingRounds->200, 162 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 163 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 164 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 165 | 166 | gan=Import["/g/g92/pan11/Baseline_13_GAN_ConditionalDynamicsIdentity"<>index<>".mx"]; 167 | NetTrain[gan, 168 | {Function[Block[{base,choice,choice2}, 169 | base=RandomSample[Range[2,length],#BatchSize]; 170 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 171 | tempt=#+daylag+yearlag*365; 172 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 173 | <|"P_GCM"->nP4GCM[[base]], 174 | "D_GCM"->ndynamics4GCM[[base]], 175 | "P_Obser"->nP4Obser[[choice]], 176 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 177 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 178 | "Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]], 179 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 180 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 181 | "Generator_GCM->Obser", 182 | "Generator_GCM->Obser_SelfRegression"}, 183 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 184 | "Generator_GCM->Obser" -> -1, 185 | "Generator_GCM->Obser_SelfRegression" -> -1, 186 | "Discriminator_GCM->Obser"->1, 187 | "R_Downscaling_Obser"->0}, 188 | BatchSize -> 32, 189 | TargetDevice->"GPU", 190 | MaxTrainingRounds->200, 191 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 192 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 193 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 194 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_14_GAN_CycleDynamicsIdentity.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | generator=NetGraph[<| 31 | "chain"->{ConvolutionLayer[64,{3,3},"PaddingSize"->1], 32 | BatchNormalizationLayer[], 33 | Ramp, 34 | ConvolutionLayer[128,{3,3},"PaddingSize"->1], 35 | BatchNormalizationLayer[], 36 | Ramp, 37 | ConvolutionLayer[256,{3,3},"PaddingSize"->1], 38 | BatchNormalizationLayer[], 39 | Ramp, 40 | ConvolutionLayer[512,{3,3},"PaddingSize"->1], 41 | BatchNormalizationLayer[], 42 | Ramp, 43 | ConvolutionLayer[1,{1,1}]}, 44 | "combine"->ThreadingLayer[Plus], 45 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 46 | |>, 47 | {NetPort["P"]->"chain"->"combine", 48 | NetPort["P"]->"combine"->"cut"}, 49 | "P"->dim]; 50 | 51 | 52 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 53 | cycleGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 54 | generatorGCM2ObserR= NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 55 | 56 | generatorObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 57 | cycleObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 58 | generatorObser2GCMR = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 59 | 60 | discriminator = NetChain[{ 61 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 62 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 63 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 64 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 65 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 66 | "Input" -> dim]; 67 | discriminatorGCM2Obser = discriminator; 68 | discriminatorObser2GCM = discriminator; 69 | 70 | 71 | RdownscaleGCM=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx"]["net"]; 72 | DeltaGCM=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx"]["mse"]; 73 | 74 | RdownscaleObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["net"]; 75 | DeltaObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["mse"]; 76 | 77 | cycleGAN =NetGraph[<| 78 | "Generator_GCM->Obser" -> generatorGCM2Obser, 79 | "Generator_GCM->Obser_SelfRegression" -> generatorGCM2ObserR, 80 | "Cycle_GCM->Obser" -> cycleGCM2Obser, 81 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 82 | "Cat_GCM->Obser" -> CatenateLayer[], 83 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 84 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 85 | "Fake_GCM->Obser"->PartLayer[1], 86 | "Real_GCM->Obser"->PartLayer[2], 87 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 88 | "MS_GCM->Obser"->MeanAbsoluteLossLayer[], 89 | 90 | "Generator_Obser->GCM" -> generatorObser2GCM, 91 | "Generator_Obser->GCM_SelfRegression" -> generatorObser2GCMR, 92 | "Cycle_Obser->GCM" -> cycleObser2GCM, 93 | "Discriminator_Obser->GCM" -> NetMapOperator[discriminatorObser2GCM], 94 | "Cat_Obser->GCM" -> CatenateLayer[], 95 | "Reshape_Obser->GCM" -> ReshapeLayer[Prepend[dim,2]], 96 | "Flat_Obser->GCM" -> ReshapeLayer[{2}], 97 | "Fake_Obser->GCM"->PartLayer[1], 98 | "Real_Obser->GCM"->PartLayer[2], 99 | "Scale_Obser->GCM" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 100 | "MS_Obser->GCM"->MeanAbsoluteLossLayer[], 101 | 102 | "R_Downscaling_GCM"->RdownscaleGCM, 103 | "R_Downscaling_Obser"->RdownscaleObser, 104 | 105 | "MS_GCM_RDownscaling"->MeanSquaredLossLayer[], 106 | "MS_Obser_RDownscaling"->MeanSquaredLossLayer[], 107 | "Max_GCM_RDownscaling"->ElementwiseLayer[Max[#,DeltaGCM]-DeltaGCM &], 108 | "Max_Obser_RDownscaling"->ElementwiseLayer[Max[#,DeltaObser]-DeltaObser &], 109 | 110 | "MS_GCM2Obser_SelfRegression"->MeanAbsoluteLossLayer[], 111 | "MS_Obser2GCM_SelfRegression"->MeanAbsoluteLossLayer[] 112 | |>, 113 | 114 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 115 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 116 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 117 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 118 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 119 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 120 | "Generator_GCM->Obser"->NetPort["Cycle_Obser->GCM","P"], 121 | "Cycle_Obser->GCM"->"MS_GCM->Obser", 122 | NetPort["P_GCM"]->"MS_GCM->Obser"->NetPort["ReconstructionLoss_GCM->Obser"], 123 | 124 | NetPort["D_GCM"]->"R_Downscaling_Obser"->"MS_Obser_RDownscaling", 125 | "Generator_GCM->Obser"->"MS_Obser_RDownscaling"->"Max_Obser_RDownscaling"->NetPort["Loss_RDownscaling_GCM"], 126 | NetPort["D_Obser"]->"R_Downscaling_GCM"->"MS_GCM_RDownscaling", 127 | "Generator_Obser->GCM"->"MS_GCM_RDownscaling"->"Max_GCM_RDownscaling"->NetPort["Loss_RDownscaling_Obser"], 128 | 129 | NetPort["P_Obser"] ->NetPort["Generator_Obser->GCM","P"], 130 | "Generator_Obser->GCM"-> "Cat_Obser->GCM", 131 | NetPort["P_GCM"] -> "Cat_Obser->GCM", 132 | "Cat_Obser->GCM" -> "Reshape_Obser->GCM" -> "Discriminator_Obser->GCM" -> "Flat_Obser->GCM" -> "Scale_Obser->GCM" -> 133 | "Fake_Obser->GCM"->NetPort["FakeLoss_Obser->GCM"], 134 | "Scale_Obser->GCM"->"Real_Obser->GCM"->NetPort["RealLoss_Obser->GCM"], 135 | "Generator_Obser->GCM"->NetPort["Cycle_GCM->Obser","P"], 136 | "Cycle_GCM->Obser"->"MS_Obser->GCM", 137 | NetPort["P_Obser"]->"MS_Obser->GCM"->NetPort["ReconstructionLoss_Obser->GCM"], 138 | 139 | NetPort["P_GCM"]->NetPort["Generator_Obser->GCM_SelfRegression","P"], 140 | "Generator_Obser->GCM_SelfRegression"->"MS_Obser2GCM_SelfRegression", 141 | NetPort["P_GCM"]->"MS_Obser2GCM_SelfRegression"->NetPort["Loss_Obser2GCM_SelfRegression"], 142 | 143 | NetPort["P_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","P"], 144 | "Generator_GCM->Obser_SelfRegression"->"MS_GCM2Obser_SelfRegression", 145 | NetPort["P_Obser"]->"MS_GCM2Obser_SelfRegression"->NetPort["Loss_GCM2Obser_SelfRegression"] 146 | }, 147 | "P_Obser" -> dim, 148 | "P_GCM" -> dim, 149 | "D_GCM" -> dim2, 150 | "D_Obser" -> dim2]; 151 | 152 | DiffMean=Infinity; 153 | DiffVar=Infinity; 154 | 155 | 156 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 157 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 158 | 159 | index=StringSplit[CreateUUID[],"-"][[1]]; 160 | Print[index]; 161 | ReportCycleGan2[net_] := 162 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 163 | gen=net[["Generator_GCM->Obser"]]; 164 | obserG=Map[gen[#[["P_GCM"]],TargetDevice->"GPU"]&,validation]; 165 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 166 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 167 | Print[TableForm[{{DiffMean,DiffVar}, 168 | {meanDiff,varDiff}}]]; 169 | If[meanDiff+varDiff<=DiffMean+DiffVar, 170 | Block[{}, 171 | Print[index]; 172 | Export["/g/g92/pan11/Baseline_14_GAN_CycleDynamicsIdentity_"<>index<>".mx",net]; 173 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 174 | 175 | NetTrain[cycleGAN, 176 | {Function[Block[{base,choice,choice2}, 177 | base=RandomSample[Range[2,length],#BatchSize]; 178 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 179 | tempt=#+daylag+yearlag*365; 180 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 181 | <|"P_GCM"->nP4GCM[[base]], 182 | "D_GCM"->ndynamics4GCM[[base]], 183 | "P_Obser"->nP4Obser[[choice]], 184 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 185 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 186 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]], 187 | "Loss_Obser2GCM_SelfRegression"->Scaled[-hype[[3]]],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]], 188 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]],"Loss_RDownscaling_Obser"->Scaled[-hype[[3]]]}, 189 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 190 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 191 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM", 192 | "Generator_GCM->Obser_SelfRegression"|"Generator_Obser->GCM_SelfRegression"}, 193 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 194 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 195 | "Generator_GCM->Obser_SelfRegression" -> -1,"Generator_Obser->GCM_SelfRegression"->-1, 196 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 197 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1, 198 | "R_Downscaling_GCM"->0,"R_Downscaling_Obser"->0}, 199 | BatchSize -> 32, 200 | TargetDevice->"GPU", 201 | MaxTrainingRounds->200, 202 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 203 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 204 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 205 | 206 | cycleGAN=Import["/g/g92/pan11/Baseline_14_GAN_CycleDynamicsIdentity_"<>index<>".mx"]; 207 | NetTrain[cycleGAN, 208 | {Function[Block[{base,choice,choice2}, 209 | base=RandomSample[Range[2,length],#BatchSize]; 210 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 211 | tempt=#+daylag+yearlag*365; 212 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 213 | <|"P_GCM"->nP4GCM[[base]], 214 | "D_GCM"->ndynamics4GCM[[base]], 215 | "P_Obser"->nP4Obser[[choice]], 216 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 217 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 218 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]], 219 | "Loss_Obser2GCM_SelfRegression"->Scaled[-hype[[3]]],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]], 220 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]],"Loss_RDownscaling_Obser"->Scaled[-hype[[3]]]}, 221 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 222 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 223 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM", 224 | "Generator_GCM->Obser_SelfRegression"|"Generator_Obser->GCM_SelfRegression"}, 225 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 226 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 227 | "Generator_GCM->Obser_SelfRegression" -> -1,"Generator_Obser->GCM_SelfRegression"->-1, 228 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 229 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1, 230 | "R_Downscaling_GCM"->0,"R_Downscaling_Obser"->0}, 231 | BatchSize -> 32, 232 | TargetDevice->"GPU", 233 | MaxTrainingRounds->200, 234 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 235 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 236 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 237 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_15_QuantileMapping.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | days={1,0} 3 | dim={1,26,48}; 4 | dim2={3*(Total[days]+1),36,56}; 5 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 6 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 7 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 8 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 9 | length=14610; 10 | vlength=3652; 11 | 12 | test=Table[<|"P_GCM"->nP4GCM[[i]], 13 | "D_GCM"->ndynamics4GCM[[i]], 14 | "P_Obser"->nP4Obser[[i]], 15 | "D_Obser"->ndynamics4Obser[[i]] 16 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 17 | 18 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 19 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 20 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 21 | 22 | training=Table[<|"P_GCM"->nP4GCM[[i]], 23 | "D_GCM"->ndynamics4GCM[[i]], 24 | "P_Obser"->nP4Obser[[i]], 25 | "D_Obser"->ndynamics4Obser[[i]] 26 | |>,{i,length}]; 27 | 28 | DADT=Table[If[validMatrix[[i,j]]==0, 29 | Table[0,Length[test]], 30 | Block[{sgcm=training[[;;,"P_GCM"]][[;;,1,i,j]], 31 | sobser=training[[;;,"P_Obser"]][[;;,1,i,j]],position,threshold,a1,range}, 32 | position=Position[Sort[sobser],0.][[-1,1]]; 33 | threshold=Sort[sgcm][[position]]; 34 | a1=sgcm/.x_/;x<=threshold->0; 35 | range=Block[{x=Sort[a1],y=Sort[sobser]},{Table[Quantile[x[[position+1;;]],i],{i,0,1,0.01}],Table[Quantile[y[[position+1;;]],i],{i,0,1,0.01}]}]; 36 | Table[Block[{element,p}, 37 | element=test[[kk,"P_GCM",1,i,j]]; 38 | p=Position[range[[1]],_?(#nP4GCM[[i]], 14 | "P_Obser"->nP4Obser[[i]] 15 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 16 | 17 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 18 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 19 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 20 | 21 | training=Table[<|"P_GCM"->nP4GCM[[i]], 22 | "P_Obser"->nP4Obser[[i]] 23 | |>,{i,length}]; 24 | 25 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]] 27 | |>,{i,length+1,Length[nP4GCM]}]; 28 | 29 | position=Position[validMatrix,1]; 30 | n=Length[position]; 31 | 32 | training=Table[<|"P_GCM"->Flatten[training[[i,1]]][[Position[Flatten[validMatrix],1][[;;,1]]]], 33 | "P_Obser"->Flatten[training[[i,2]]][[Position[Flatten[validMatrix],1][[;;,1]]]]|>,{i,Length[training]}]; 34 | validation=Table[<|"P_GCM"->Flatten[validation[[i,1]]][[Position[Flatten[validMatrix],1][[;;,1]]]], 35 | "P_Obser"->Flatten[validation[[i,2]]][[Position[Flatten[validMatrix],1][[;;,1]]]]|>,{i,Length[validation]}]; 36 | test=Table[<|"P_GCM"->Flatten[test[[i,1]]][[Position[Flatten[validMatrix],1][[;;,1]]]], 37 | "P_Obser"->Flatten[test[[i,2]]][[Position[Flatten[validMatrix],1][[;;,1]]]]|>,{i,Length[test]}]; 38 | 39 | 40 | gcm=training[[;;,1]]; 41 | obser=training[[;;,2]]; 42 | tobser=test[[;;,2]]; 43 | vobser=validation[[;;,2]]; 44 | rgcm=RandomSample[gcm]; 45 | robser=RandomSample[obser]; 46 | seq=RandomSample[Range[Length[training]]][[1;;Length[validation]]]; 47 | (* 48 | energy0=Block[{correction=gcm,rcorrection=RandomSample[correction]}, 49 | energy=2*Total[Flatten[Table[EuclideanDistance[correction[[i]],obser[[j]]],{i,Length[correction]},{j,Length[obser]}]]]/Length[correction]/Length[obser]-Total[Flatten[Table[EuclideanDistance[robser[[i]],obser[[j]]],{i,Length[correction]},{j,Length[obser]}]]]/Length[correction]/Length[obser]-Total[Flatten[Table[EuclideanDistance[rcorrection[[i]],correction[[j]]],{i,Length[correction]},{j,Length[obser]}]]]/Length[correction]/Length[obser]]; 50 | Export["/usr/workspace/pan11/CycleGAN/MBCn/Step_0.mx",<|"energy"->energy0|>]; 51 | *) 52 | 53 | Table[Block[{matrix,rtrainingGCM,rtrainingObser,transform,result,correction,ntransform,s1,s2,correctionValidation,correctionTest}, 54 | matrix=RandomVariate[CircularRealMatrixDistribution[n]]; 55 | rtrainingGCM=gcm.matrix; 56 | rtrainingObser=obser.matrix; 57 | qMapping[source_,target_,interval_:0.01]:=Block[{qsource,qtarget,position}, 58 | qsource=Table[Quantile[source,q],{q,0,1,interval}]; 59 | qtarget=Table[Quantile[target,q],{q,0,1,interval}]; 60 | position=Table[Position[qsource,_?(#>=source[[k]]&)][[1,1]],{k,Length[source]}]; 61 | Table[{{qsource[[Max[position[[i]]-1,1]]],qsource[[position[[i]]]]}, 62 | {qtarget[[Max[position[[i]]-1,1]]],qtarget[[position[[i]]]]},source[[i]]},{i,Length[position]}]]; 63 | transform=ParallelTable[Block[{},Print[i];qMapping[rtrainingGCM[[;;,i]],rtrainingObser[[;;,i]]]],{i,724}]; 64 | 65 | result=ParallelTable[If[transform[[dim,date]][[1,2]]!=transform[[dim,date]][[1,1]], 66 | (transform[[dim,date]][[3]]-transform[[dim,date]][[1,1]])/(transform[[dim,date]][[1,2]]-transform[[dim,date]][[1,1]])*(transform[[dim,date]][[2,2]]-transform[[dim,date]][[2,1]])+transform[[dim,date]][[2,1]],transform[[dim,date]][[2,1]]], 67 | {date,Length[rtrainingGCM]},{dim,n}]; 68 | correction=result.Inverse[matrix]; 69 | correction=correction/. x_ /; x<0.->0.; 70 | (* 71 | energy=2*Total[Flatten[Table[EuclideanDistance[correction[[i]],obser[[j]]],{i,Length[correction]},{j,Length[obser]}]]]/Length[correction]/Length[obser]-Total[Flatten[Table[EuclideanDistance[obser[[i]],obser[[j]]],{i,Length[correction]},{j,Length[obser]}]]]/Length[correction]/Length[obser]-Total[Flatten[Table[EuclideanDistance[correction[[i]],correction[[j]]],{i,Length[correction]},{j,Length[obser]}]]]/Length[correction]/Length[obser]; 72 | *) 73 | 74 | energyTraining=2*Total[Flatten[Table[EuclideanDistance[correction[[i]],obser[[j]]],{i,seq},{j,seq}]]]/Length[seq]/Length[seq]-Total[Flatten[Table[EuclideanDistance[obser[[i]],obser[[j]]],{i,seq},{j,seq}]]]/Length[seq]/Length[seq]-Total[Flatten[Table[EuclideanDistance[correction[[i]],correction[[j]]],{i,seq},{j,seq}]]]/Length[seq]/Length[seq]; 75 | 76 | Print[energyTraining]; 77 | 78 | ntransform=Table[Sort[DeleteDuplicates[transform[[dim,;;,1;;2]]],#1[[1,2]]<=#2[[1,2]]&],{dim,Length[transform]}]; 79 | s1=test[[;;,1]].matrix; 80 | s2=ParallelTable[Block[{raw=s1[[;;,dim]],po}, 81 | result=Map[Block[{ele=#},If[ele<=ntransform[[dim,1,1,1]],ntransform[[dim,2,1,1]], 82 | If[ele>=ntransform[[dim,-1,1,2]],ntransform[[dim,-1,2,2]], 83 | Block[{po=ntransform[[dim,Position[ntransform[[dim,;;,1,1]],_?(#<=ele&)][[-1,1]]]]}, 84 | (ele-po[[1,1]])*(po[[1,2]]-po[[1,1]])+po[[2,1]]]]]]&,raw]; 85 | result],{dim,Dimensions[s1][[2]]}]; 86 | correctionTest=Transpose[s2].Inverse[matrix]; 87 | correctionTest=correctionTest/. x_ /; x<0.->0; 88 | 89 | (*energyTest=2*Total[Flatten[Table[EuclideanDistance[correctionTest[[i]],test[[j,2]]],{i,Length[test]},{j,Length[test]}]]]/Length[test]/Length[test]-Total[Flatten[Table[EuclideanDistance[test[[i,2]],test[[j,2]]],{i,Length[test]},{j,Length[test]}]]]/Length[test]/Length[test]-Total[Flatten[Table[EuclideanDistance[correctionTest[[i]],correctionTest[[j]]],{i,Length[test]},{j,Length[test]}]]]/Length[test]/Length[test]; 90 | Print[energyTest];*) 91 | energyTest=2*Total[Flatten[Table[EuclideanDistance[correctionTest[[i]],tobser[[j]]],{i,Length[tobser]},{j,Length[tobser]}]]]/Length[tobser]/Length[tobser]-Total[Flatten[Table[EuclideanDistance[tobser[[i]],tobser[[j]]],{i,Length[tobser]},{j,Length[tobser]}]]]/Length[tobser]/Length[tobser]-Total[Flatten[Table[EuclideanDistance[correctionTest[[i]],correctionTest[[j]]],{i,Length[tobser]},{j,Length[tobser]}]]]/Length[tobser]/Length[tobser]; 92 | Print[energyTest]; 93 | 94 | s1=validation[[;;,1]].matrix; 95 | s2=ParallelTable[Block[{raw=s1[[;;,dim]],po}, 96 | result=Map[Block[{ele=#},If[ele<=ntransform[[dim,1,1,1]],ntransform[[dim,2,1,1]], 97 | If[ele>=ntransform[[dim,-1,1,2]],ntransform[[dim,-1,2,2]], 98 | Block[{po=ntransform[[dim,Position[ntransform[[dim,;;,1,1]],_?(#<=ele&)][[-1,1]]]]}, 99 | (ele-po[[1,1]])*(po[[1,2]]-po[[1,1]])+po[[2,1]]]]]]&,raw]; 100 | result],{dim,Dimensions[s1][[2]]}]; 101 | correctionValidation=Transpose[s2].Inverse[matrix]; 102 | correctionValidation=correctionValidation/. x_ /; x<0.->0; 103 | (* 104 | energyValidation=2*Total[Flatten[Table[EuclideanDistance[correctionValidation[[i]],validation[[j,2]]],{i,Length[validation]},{j,Length[validation]}]]]/Length[validation]/Length[validation]-Total[Flatten[Table[EuclideanDistance[validation[[i,2]],validation[[j,2]]],{i,Length[validation]},{j,Length[validation]}]]]/Length[validation]/Length[validation]-Total[Flatten[Table[EuclideanDistance[correctionValidation[[i]],correctionValidation[[j]]],{i,Length[validation]},{j,Length[validation]}]]]/Length[validation]/Length[validation]; 105 | Print[energyValidation]; 106 | *) 107 | energyValidation=2*Total[Flatten[Table[EuclideanDistance[correctionValidation[[i]],vobser[[j]]],{i,Length[vobser]},{j,Length[vobser]}]]]/Length[vobser]/Length[vobser]-Total[Flatten[Table[EuclideanDistance[vobser[[i]],vobser[[j]]],{i,Length[vobser]},{j,Length[vobser]}]]]/Length[vobser]/Length[vobser]-Total[Flatten[Table[EuclideanDistance[correctionValidation[[i]],correctionValidation[[j]]],{i,Length[vobser]},{j,Length[vobser]}]]]/Length[vobser]/Length[vobser]; 108 | Print[energyValidation]; 109 | 110 | Print[{"Step ",kk,energyTraining,energyValidation,energyTest}]; 111 | Export["/usr/workspace/pan11/CycleGAN/MBCn/Step_"<>ToString[kk]<>".mx", 112 | <|"transform"->transform, 113 | "matrix"->matrix, 114 | "energy"->{energyTraining,energyValidation,energyTest}|>]; 115 | Set[gcm,correction];],{kk,30}]; 116 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_16_MBCn_Apply.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | days={1,0} 3 | dim={1,26,48}; 4 | dim2={3*(Total[days]+1),36,56}; 5 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 6 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 7 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 8 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 9 | length=14610; 10 | vlength=3652; 11 | vposition=Position[Flatten[validMatrix],1][[;;,1]]; 12 | 13 | test=Table[<|"P_GCM"->nP4GCM[[i]], 14 | "P_Obser"->nP4Obser[[i]] 15 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 16 | gsimu=test[[;;,1]]; 17 | gobser=test[[;;,2]]; 18 | 19 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 20 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 21 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 22 | 23 | training=Table[<|"P_GCM"->nP4GCM[[i]], 24 | "P_Obser"->nP4Obser[[i]] 25 | |>,{i,length}]; 26 | 27 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 28 | "P_Obser"->nP4Obser[[i]] 29 | |>,{i,length+1,Length[nP4GCM]}]; 30 | 31 | position=Position[validMatrix,1]; 32 | n=Length[position]; 33 | 34 | training=Table[<|"P_GCM"->Flatten[training[[i,1]]][[Position[Flatten[validMatrix],1][[;;,1]]]], 35 | "P_Obser"->Flatten[training[[i,2]]][[Position[Flatten[validMatrix],1][[;;,1]]]]|>,{i,Length[training]}]; 36 | validation=Table[<|"P_GCM"->Flatten[validation[[i,1]]][[Position[Flatten[validMatrix],1][[;;,1]]]], 37 | "P_Obser"->Flatten[validation[[i,2]]][[Position[Flatten[validMatrix],1][[;;,1]]]]|>,{i,Length[validation]}]; 38 | test=Table[<|"P_GCM"->Flatten[test[[i,1]]][[Position[Flatten[validMatrix],1][[;;,1]]]], 39 | "P_Obser"->Flatten[test[[i,2]]][[Position[Flatten[validMatrix],1][[;;,1]]]]|>,{i,Length[test]}]; 40 | 41 | total=23; 42 | SetDirectory["/usr/workspace/pan11/CycleGAN/MBCn"]; 43 | energy=Table[Import["Step_"<>ToString[i]<>".mx"]["energy"],{i,total}]; 44 | simu=test[[;;,1]]; 45 | obser=test[[;;,2]]; 46 | seq=RandomSample[Range[Length[training]]][[1;;Length[validation]]]; 47 | 48 | 49 | MBCn=Table[Block[{matrix,transform,s1,s2,correction}, 50 | matrix=Import["Step_"<>ToString[i]<>".mx"]["matrix"]; 51 | transform=Import["Step_"<>ToString[i]<>".mx"]["transform"]; 52 | transform=Table[Sort[DeleteDuplicates[transform[[dim,;;,1;;2]]],#1[[1,2]]<=#2[[1,2]]&],{dim,Length[transform]}]; 53 | s1=simu.matrix; 54 | Print[{i,Correlation[Mean[obser],Mean[simu]]}]; 55 | s2=ParallelTable[Block[{raw=s1[[;;,dim]],po}, 56 | result=Map[Block[{ele=#},If[ele<=transform[[dim,1,1,1]],transform[[dim,2,1,1]], 57 | If[ele>=transform[[dim,-1,1,2]],transform[[dim,-1,2,2]], 58 | Block[{po=transform[[dim,Position[transform[[dim,;;,1,1]],_?(#<=ele&)][[-1,1]]]]}, 59 | (ele-po[[1,1]])*(po[[1,2]]-po[[1,1]])+po[[2,1]]]]]]&,raw]; 60 | result],{dim,Dimensions[s1][[2]]}]; 61 | correction=Transpose[s2].Inverse[matrix]; 62 | (*correction=correction/. x_ /; x<0.->0;*) 63 | Set[simu,correction]; 64 | simu],{i,total}]; 65 | 66 | final=Block[{tempt=Table[0,{i,Dimensions[MBCn][[2]]},{j,Dimensions[validMatrix][[1]]},{k,Dimensions[validMatrix][[2]]}],np}, 67 | Table[Set[tempt[[;;,position[[i,1]],position[[i,2]]]],MBCn[[-1,;;,i]]],{i,Length[position]}]; 68 | np=Position[tempt,_?(#<0&)]; 69 | Map[Set[tempt[[#[[1]],#[[2]],#[[3]]]],0]&,np]; 70 | tempt]; 71 | 72 | Export["/usr/workspace/pan11/CycleGAN/MBCn/MBCn_Result.mx",{NumericArray[Exp[final]-1.,"Real32"],NumericArray[Exp[gsimu[[;;,1]]]-1.,"Real 73 | 32"],NumericArray[Exp[gobser[[;;,1]]]-1.,"Real32"]}] 74 | 75 | 76 | 77 | total=1; 78 | SetDirectory["/usr/workspace/pan11/CycleGAN/MBCn"]; 79 | energy=Table[Import["Step_"<>ToString[i]<>".mx"]["energy"],{i,total}]; 80 | simu=test[[;;,1]]; 81 | obser=test[[;;,2]]; 82 | seq=RandomSample[Range[Length[training]]][[1;;Length[validation]]]; 83 | 84 | 85 | MBCn=Table[Block[{matrix,transform,s1,s2,correction}, 86 | matrix=Import["Step_"<>ToString[i]<>".mx"]["matrix"]; 87 | transform=Import["Step_"<>ToString[i]<>".mx"]["transform"]; 88 | transform=Table[Sort[DeleteDuplicates[transform[[dim,;;,1;;2]]],#1[[1,2]]<=#2[[1,2]]&],{dim,Length[transform]}]; 89 | s1=simu.matrix; 90 | Print[{i,Correlation[Mean[obser],Mean[simu]]}]; 91 | s2=ParallelTable[Block[{raw=s1[[;;,dim]],po}, 92 | result=Map[Block[{ele=#},If[ele<=transform[[dim,1,1,1]],transform[[dim,2,1,1]], 93 | If[ele>=transform[[dim,-1,1,2]],transform[[dim,-1,2,2]], 94 | Block[{po=transform[[dim,Position[transform[[dim,;;,1,1]],_?(#<=ele&)][[-1,1]]]]}, 95 | (ele-po[[1,1]])*(po[[1,2]]-po[[1,1]])+po[[2,1]]]]]]&,raw]; 96 | result],{dim,Dimensions[s1][[2]]}]; 97 | correction=Transpose[s2].Inverse[matrix]; 98 | (*correction=correction/. x_ /; x<0.->0;*) 99 | Set[simu,correction]; 100 | simu],{i,total}]; 101 | 102 | final=Block[{tempt=Table[0,{i,Dimensions[MBCn][[2]]},{j,Dimensions[validMatrix][[1]]},{k,Dimensions[validMatrix][[2]]}],np}, 103 | Table[Set[tempt[[;;,position[[i,1]],position[[i,2]]]],MBCn[[-1,;;,i]]],{i,Length[position]}]; 104 | np=Position[tempt,_?(#<0&)]; 105 | Map[Set[tempt[[#[[1]],#[[2]],#[[3]]]],0]&,np]; 106 | tempt]; 107 | 108 | Export["/usr/workspace/pan11/CycleGAN/MBCn/MBCn_Result_EarlyStopping.mx",{NumericArray[Exp[final]-1.,"Real32"],NumericArray[Exp[gsimu[[;;,1]]]-1.,"Real32"],NumericArray[Exp[gobser[[;;,1]]]-1.,"Real32"]}] 109 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_16_MBCn_n.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | days={1,0} 3 | dim={1,26,48}; 4 | dim2={3*(Total[days]+1),36,56}; 5 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 6 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 7 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 8 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 9 | length=14610; 10 | vlength=3652; 11 | vposition=Position[Flatten[validMatrix],1][[;;,1]]; 12 | 13 | test=Table[<|"P_GCM"->nP4GCM[[i]], 14 | "P_Obser"->nP4Obser[[i]] 15 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 16 | 17 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 18 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 19 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 20 | 21 | training=Table[<|"P_GCM"->nP4GCM[[i]], 22 | "P_Obser"->nP4Obser[[i]] 23 | |>,{i,length}]; 24 | 25 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]] 27 | |>,{i,length+1,Length[nP4GCM]}]; 28 | 29 | position=Position[validMatrix,1]; 30 | n=Length[position]; 31 | 32 | training=Table[<|"P_GCM"->Flatten[training[[i,1]]][[Position[Flatten[validMatrix],1][[;;,1]]]], 33 | "P_Obser"->Flatten[training[[i,2]]][[Position[Flatten[validMatrix],1][[;;,1]]]]|>,{i,Length[training]}]; 34 | validation=Table[<|"P_GCM"->Flatten[validation[[i,1]]][[Position[Flatten[validMatrix],1][[;;,1]]]], 35 | "P_Obser"->Flatten[validation[[i,2]]][[Position[Flatten[validMatrix],1][[;;,1]]]]|>,{i,Length[validation]}]; 36 | test=Table[<|"P_GCM"->Flatten[test[[i,1]]][[Position[Flatten[validMatrix],1][[;;,1]]]], 37 | "P_Obser"->Flatten[test[[i,2]]][[Position[Flatten[validMatrix],1][[;;,1]]]]|>,{i,Length[test]}]; 38 | 39 | 40 | gcm=training[[;;,1]]; 41 | obser=training[[;;,2]]; 42 | tobser=test[[;;,2]]; 43 | vobser=validation[[;;,2]]; 44 | rgcm=RandomSample[gcm]; 45 | robser=RandomSample[obser]; 46 | seq=RandomSample[Range[Length[training]]][[1;;Length[validation]]]; 47 | 48 | Table[Block[{matrix,rtrainingGCM,rtrainingObser,transform,result,correction,ntransform,s1,s2,correctionValidation,correctionTest}, 49 | matrix=RandomVariate[CircularRealMatrixDistribution[n]]; 50 | rtrainingGCM=gcm.matrix; 51 | rtrainingObser=obser.matrix; 52 | qMapping[source_,target_,interval_:0.01]:=Block[{qsource,qtarget,position}, 53 | qsource=Table[Quantile[source,q],{q,0,1,interval}]; 54 | qtarget=Table[Quantile[target,q],{q,0,1,interval}]; 55 | position=Table[Position[qsource,_?(#>=source[[k]]&)][[1,1]],{k,Length[source]}]; 56 | Table[{{qsource[[Max[position[[i]]-1,1]]],qsource[[position[[i]]]]}, 57 | {qtarget[[Max[position[[i]]-1,1]]],qtarget[[position[[i]]]]},source[[i]]},{i,Length[position]}]]; 58 | transform=ParallelTable[Block[{},Print[i];qMapping[rtrainingGCM[[;;,i]],rtrainingObser[[;;,i]]]],{i,724}]; 59 | 60 | result=ParallelTable[If[transform[[dim,date]][[1,2]]!=transform[[dim,date]][[1,1]], 61 | (transform[[dim,date]][[3]]-transform[[dim,date]][[1,1]])/(transform[[dim,date]][[1,2]]-transform[[dim,date]][[1,1]])*(transform[[dim,date]][[2,2]]-transform[[dim,date]][[2,1]])+transform[[dim,date]][[2,1]],transform[[dim,date]][[2,1]]], 62 | {date,Length[rtrainingGCM]},{dim,n}]; 63 | correction=result.Inverse[matrix]; 64 | (*correction=correction/. x_ /; x<0.->0.;*) 65 | (* 66 | energy=2*Total[Flatten[Table[EuclideanDistance[correction[[i]],obser[[j]]],{i,Length[correction]},{j,Length[obser]}]]]/Length[correction]/Length[obser]-Total[Flatten[Table[EuclideanDistance[obser[[i]],obser[[j]]],{i,Length[correction]},{j,Length[obser]}]]]/Length[correction]/Length[obser]-Total[Flatten[Table[EuclideanDistance[correction[[i]],correction[[j]]],{i,Length[correction]},{j,Length[obser]}]]]/Length[correction]/Length[obser]; 67 | Print[energy]; 68 | *) 69 | energyTraining=2*Total[Flatten[Table[EuclideanDistance[correction[[i]],obser[[j]]],{i,seq},{j,seq}]]]/Length[seq]/Length[seq]-Total[Flatten[Table[EuclideanDistance[obser[[i]],obser[[j]]],{i,seq},{j,seq}]]]/Length[seq]/Length[seq]-Total[Flatten[Table[EuclideanDistance[correction[[i]],correction[[j]]],{i,seq},{j,seq}]]]/Length[seq]/Length[seq]; 70 | 71 | Print[energyTraining]; 72 | 73 | 74 | ntransform=Table[Sort[DeleteDuplicates[transform[[dim,;;,1;;2]]],#1[[1,2]]<=#2[[1,2]]&],{dim,Length[transform]}]; 75 | s1=test[[;;,1]].matrix; 76 | s2=ParallelTable[Block[{raw=s1[[;;,dim]],po}, 77 | result=Map[Block[{ele=#},If[ele<=ntransform[[dim,1,1,1]],ntransform[[dim,2,1,1]], 78 | If[ele>=ntransform[[dim,-1,1,2]],ntransform[[dim,-1,2,2]], 79 | Block[{po=ntransform[[dim,Position[ntransform[[dim,;;,1,1]],_?(#<=ele&)][[-1,1]]]]}, 80 | (ele-po[[1,1]])*(po[[1,2]]-po[[1,1]])+po[[2,1]]]]]]&,raw]; 81 | result],{dim,Dimensions[s1][[2]]}]; 82 | correctionTest=Transpose[s2].Inverse[matrix]; 83 | (*correctionTest=correctionTest/. x_ /; x<0.->0;*) 84 | 85 | energyTest=2*Total[Flatten[Table[EuclideanDistance[correctionTest[[i]],test[[j,2]]],{i,Length[test]},{j,Length[test]}]]]/Length[test]/Length[test]-Total[Flatten[Table[EuclideanDistance[test[[i,2]],test[[j,2]]],{i,Length[test]},{j,Length[test]}]]]/Length[test]/Length[test]-Total[Flatten[Table[EuclideanDistance[correctionTest[[i]],correctionTest[[j]]],{i,Length[test]},{j,Length[test]}]]]/Length[test]/Length[test]; 86 | Print[energyTest]; 87 | 88 | s1=validation[[;;,1]].matrix; 89 | s2=ParallelTable[Block[{raw=s1[[;;,dim]],po}, 90 | result=Map[Block[{ele=#},If[ele<=ntransform[[dim,1,1,1]],ntransform[[dim,2,1,1]], 91 | If[ele>=ntransform[[dim,-1,1,2]],ntransform[[dim,-1,2,2]], 92 | Block[{po=ntransform[[dim,Position[ntransform[[dim,;;,1,1]],_?(#<=ele&)][[-1,1]]]]}, 93 | (ele-po[[1,1]])*(po[[1,2]]-po[[1,1]])+po[[2,1]]]]]]&,raw]; 94 | result],{dim,Dimensions[s1][[2]]}]; 95 | correctionValidation=Transpose[s2].Inverse[matrix]; 96 | (*correctionValidation=correctionValidation/. x_ /; x<0.->0;*) 97 | energyValidation=2*Total[Flatten[Table[EuclideanDistance[correctionValidation[[i]],validation[[j,2]]],{i,Length[validation]},{j,Length[validation]}]]]/Length[validation]/Length[validation]-Total[Flatten[Table[EuclideanDistance[validation[[i,2]],validation[[j,2]]],{i,Length[validation]},{j,Length[validation]}]]]/Length[validation]/Length[validation]-Total[Flatten[Table[EuclideanDistance[correctionValidation[[i]],correctionValidation[[j]]],{i,Length[validation]},{j,Length[validation]}]]]/Length[validation]/Length[validation]; 98 | Print[energyValidation]; 99 | 100 | Print[{"Step ",kk,energyTraining,energyValidation,energyTest}]; 101 | Export["/usr/workspace/pan11/CycleGAN/MBCn/Step_"<>ToString[kk]<>"_noN.mx", 102 | <|"transform"->transform, 103 | "matrix"->matrix, 104 | "energy"->{energyTraining,energyValidation,energyTest}|>]; 105 | Set[gcm,correction];],{kk,30}]; 106 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_1_ConditionalGAN.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | generatorGCM2Obser=NetGraph[<|"Catenate"->CatenateLayer[1], 31 | "Padding"->PaddingLayer[{{0,0},{5,5},{4,4}}], 32 | "chain"->{ConvolutionLayer[64,{3,3}], 33 | BatchNormalizationLayer[], 34 | Ramp, 35 | ConvolutionLayer[128,{3,3}], 36 | BatchNormalizationLayer[], 37 | Ramp, 38 | ConvolutionLayer[256,{3,3}], 39 | BatchNormalizationLayer[], 40 | Ramp, 41 | ConvolutionLayer[512,{5,3}], 42 | BatchNormalizationLayer[], 43 | Ramp, 44 | ConvolutionLayer[1,{1,1}]}, 45 | "combine"->ThreadingLayer[Plus], 46 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 47 | |>, 48 | {NetPort["P"]->"Padding"->"Catenate", 49 | NetPort["z"]->"Catenate"->"chain"->"combine", 50 | NetPort["P"]->"combine"->"cut"}, 51 | "P"->dim, 52 | "z"->dim2] 53 | 54 | discriminatorGCM2Obser = NetChain[{ 55 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 56 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 57 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 58 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 59 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 60 | "Input" -> dim]; 61 | 62 | gan =NetGraph[<| 63 | "Generator_GCM->Obser" -> generatorGCM2Obser, 64 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 65 | "Cat_GCM->Obser" -> CatenateLayer[], 66 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 67 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 68 | "Fake_GCM->Obser"->PartLayer[1], 69 | "Real_GCM->Obser"->PartLayer[2], 70 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0] 71 | |>, 72 | 73 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 74 | NetPort["D_GCM"]->NetPort["Generator_GCM->Obser","z"], 75 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 76 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 77 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 78 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 79 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"] 80 | }, 81 | "P_Obser" -> dim, 82 | "P_GCM" -> dim, 83 | "D_GCM" -> dim2]; 84 | 85 | DiffMean=Infinity; 86 | DiffVar=Infinity; 87 | 88 | 89 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 90 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 91 | 92 | index=StringSplit[CreateUUID[],"-"][[1]]; 93 | Print[index]; 94 | 95 | ReportCycleGan2[net_] := 96 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 97 | gen=net[["Generator_GCM->Obser"]]; 98 | obserG=Map[gen[<|"P"->#[["P_GCM"]],"z"->#[["D_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 99 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 100 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 101 | Print[TableForm[{{DiffMean,DiffVar}, 102 | {meanDiff,varDiff}}]]; 103 | If[meanDiff+varDiff<=DiffMean+DiffVar, 104 | Block[{}, 105 | Print[index]; 106 | Export["/g/g92/pan11/Baseline_1_GAN_Condition_"<>index<>".mx",net]; 107 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 108 | 109 | NetTrain[gan, 110 | {Function[Block[{base,choice,choice2}, 111 | base=RandomSample[Range[2,length],#BatchSize]; 112 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 113 | tempt=#+daylag+yearlag*365; 114 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 115 | <|"P_GCM"->nP4GCM[[base]], 116 | "D_GCM"->ndynamics4GCM[[base]], 117 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 118 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1]}, 119 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser"}, 120 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 121 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1}, 122 | BatchSize -> 32, 123 | TargetDevice->"GPU", 124 | MaxTrainingRounds->400, 125 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 126 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 127 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 128 | 129 | gan=Import["/g/g92/pan11/Baseline_1_GAN_Condition_"<>index<>".mx"]; 130 | NetTrain[gan, 131 | {Function[Block[{base,choice,choice2}, 132 | base=RandomSample[Range[2,length],#BatchSize]; 133 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 134 | tempt=#+daylag+yearlag*365; 135 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 136 | <|"P_GCM"->nP4GCM[[base]], 137 | "D_GCM"->ndynamics4GCM[[base]], 138 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 139 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1]}, 140 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser"}, 141 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 142 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1}, 143 | BatchSize -> 32, 144 | TargetDevice->"GPU", 145 | MaxTrainingRounds->400, 146 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 147 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 148 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 149 | 150 | gan=Import["/g/g92/pan11/Baseline_1_GAN_Condition_"<>index<>".mx"]; 151 | NetTrain[gan, 152 | {Function[Block[{base,choice,choice2}, 153 | base=RandomSample[Range[2,length],#BatchSize]; 154 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 155 | tempt=#+daylag+yearlag*365; 156 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 157 | <|"P_GCM"->nP4GCM[[base]], 158 | "D_GCM"->ndynamics4GCM[[base]], 159 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 160 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1]}, 161 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser"}, 162 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 163 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1}, 164 | BatchSize -> 32, 165 | TargetDevice->"GPU", 166 | MaxTrainingRounds->400, 167 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-6, 168 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 169 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 170 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_2_CycleGAN.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | length=14610; 12 | vlength=3652; 13 | test=Table[<|"P_GCM"->nP4GCM[[i]], 14 | "P_Obser"->nP4Obser[[i]]|>,{i,length+vlength+1,Length[nP4GCM]}]; 15 | 16 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 17 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 18 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 19 | 20 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 21 | "D_GCM"->ndynamics4GCM[[i]], 22 | "P_Obser"->nP4Obser[[i]], 23 | "D_Obser"->ndynamics4Obser[[i]] 24 | |>,{i,length,length+vlength}]; 25 | 26 | 27 | generator=NetGraph[<| 28 | "chain"->{ConvolutionLayer[64,{3,3},"PaddingSize"->1], 29 | BatchNormalizationLayer[], 30 | Ramp, 31 | ConvolutionLayer[128,{3,3},"PaddingSize"->1], 32 | BatchNormalizationLayer[], 33 | Ramp, 34 | ConvolutionLayer[256,{3,3},"PaddingSize"->1], 35 | BatchNormalizationLayer[], 36 | Ramp, 37 | ConvolutionLayer[512,{3,3},"PaddingSize"->1], 38 | BatchNormalizationLayer[], 39 | Ramp, 40 | ConvolutionLayer[1,{1,1}]}, 41 | "combine"->ThreadingLayer[Plus], 42 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 43 | |>, 44 | {NetPort["P"]->"chain"->"combine", 45 | NetPort["P"]->"combine"->"cut"}, 46 | "P"->dim]; 47 | 48 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 49 | cycleGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 50 | 51 | generatorObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 52 | cycleObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 53 | 54 | discriminator = NetChain[{ 55 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 56 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 57 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 58 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 59 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 60 | "Input" -> dim]; 61 | discriminatorGCM2Obser = discriminator; 62 | discriminatorObser2GCM = discriminator; 63 | 64 | 65 | cycleGAN =NetGraph[<| 66 | "Generator_GCM->Obser" -> generatorGCM2Obser, 67 | "Cycle_GCM->Obser" -> cycleGCM2Obser, 68 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 69 | "Cat_GCM->Obser" -> CatenateLayer[], 70 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 71 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 72 | "Fake_GCM->Obser"->PartLayer[1], 73 | "Real_GCM->Obser"->PartLayer[2], 74 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 75 | "MS_GCM->Obser"->MeanAbsoluteLossLayer[], 76 | 77 | "Generator_Obser->GCM" -> generatorObser2GCM, 78 | "Cycle_Obser->GCM" -> cycleObser2GCM, 79 | "Discriminator_Obser->GCM" -> NetMapOperator[discriminatorObser2GCM], 80 | "Cat_Obser->GCM" -> CatenateLayer[], 81 | "Reshape_Obser->GCM" -> ReshapeLayer[Prepend[dim,2]], 82 | "Flat_Obser->GCM" -> ReshapeLayer[{2}], 83 | "Fake_Obser->GCM"->PartLayer[1], 84 | "Real_Obser->GCM"->PartLayer[2], 85 | "Scale_Obser->GCM" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 86 | "MS_Obser->GCM"->MeanAbsoluteLossLayer[]|>, 87 | 88 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 89 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 90 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 91 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 92 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 93 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 94 | "Generator_GCM->Obser"->NetPort["Cycle_Obser->GCM","P"], 95 | "Cycle_Obser->GCM"->"MS_GCM->Obser", 96 | NetPort["P_GCM"]->"MS_GCM->Obser"->NetPort["ReconstructionLoss_GCM->Obser"], 97 | 98 | NetPort["P_Obser"] ->NetPort["Generator_Obser->GCM","P"], 99 | "Generator_Obser->GCM"-> "Cat_Obser->GCM", 100 | NetPort["P_GCM"] -> "Cat_Obser->GCM", 101 | "Cat_Obser->GCM" -> "Reshape_Obser->GCM" -> "Discriminator_Obser->GCM" -> "Flat_Obser->GCM" -> "Scale_Obser->GCM" -> 102 | "Fake_Obser->GCM"->NetPort["FakeLoss_Obser->GCM"], 103 | "Scale_Obser->GCM"->"Real_Obser->GCM"->NetPort["RealLoss_Obser->GCM"], 104 | "Generator_Obser->GCM"->NetPort["Cycle_GCM->Obser","P"], 105 | "Cycle_GCM->Obser"->"MS_Obser->GCM", 106 | NetPort["P_Obser"]->"MS_Obser->GCM"->NetPort["ReconstructionLoss_Obser->GCM"]}, 107 | "P_Obser" -> dim, 108 | "P_GCM" -> dim]; 109 | 110 | DiffMean=Infinity; 111 | DiffVar=Infinity; 112 | 113 | 114 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 115 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 116 | 117 | index=StringSplit[CreateUUID[],"-"][[1]]; 118 | Print[index]; 119 | ReportCycleGan2[net_] := 120 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 121 | gen=net[["Generator_GCM->Obser"]]; 122 | obserG=Map[gen[<|"P"->#[["P_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 123 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 124 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 125 | Print[TableForm[{{DiffMean,DiffVar}, 126 | {meanDiff,varDiff}}]]; 127 | If[meanDiff+varDiff<=DiffMean+DiffVar, 128 | Block[{}, 129 | Print[index]; 130 | Export["/g/g92/pan11/Baseline_2_GAN_Cycle_"<>index<>".mx",net]; 131 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 132 | 133 | NetTrain[cycleGAN, 134 | {Function[Block[{base,choice,choice2}, 135 | base=RandomSample[Range[2,length],#BatchSize]; 136 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 137 | tempt=#+daylag+yearlag*365; 138 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 139 | <|"P_GCM"->nP4GCM[[base]], 140 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 141 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 142 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]]}, 143 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 144 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 145 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM"}, 146 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 147 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 148 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 149 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 150 | BatchSize -> 32, 151 | TargetDevice->"GPU", 152 | MaxTrainingRounds->100, 153 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> .5*10^-4, 154 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 155 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 156 | 157 | cycleGAN=Import["/g/g92/pan11/Baseline_2_GAN_Cycle_"<>index<>".mx"]; 158 | NetTrain[cycleGAN, 159 | {Function[Block[{base,choice,choice2}, 160 | base=RandomSample[Range[2,length],#BatchSize]; 161 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 162 | tempt=#+daylag+yearlag*365; 163 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 164 | <|"P_GCM"->nP4GCM[[base]], 165 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 166 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 167 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]]}, 168 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 169 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 170 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM"}, 171 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 172 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 173 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 174 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 175 | BatchSize -> 32, 176 | TargetDevice->"GPU", 177 | MaxTrainingRounds->200, 178 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 179 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 180 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 181 | 182 | cycleGAN=Import["/g/g92/pan11/Baseline_2_GAN_Cycle_"<>index<>".mx"]; 183 | NetTrain[cycleGAN, 184 | {Function[Block[{base,choice,choice2}, 185 | base=RandomSample[Range[2,length],#BatchSize]; 186 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 187 | tempt=#+daylag+yearlag*365; 188 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 189 | <|"P_GCM"->nP4GCM[[base]], 190 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 191 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 192 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]]}, 193 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 194 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 195 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM"}, 196 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 197 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 198 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 199 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 200 | BatchSize -> 32, 201 | TargetDevice->"GPU", 202 | MaxTrainingRounds->200, 203 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-6, 204 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 205 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 206 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_3_DynamicsGAN.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | generatorGCM2Obser=NetGraph[<| 31 | "chain"->{ConvolutionLayer[64,{3,3},"PaddingSize"->1], 32 | BatchNormalizationLayer[], 33 | Ramp, 34 | ConvolutionLayer[128,{3,3},"PaddingSize"->1], 35 | BatchNormalizationLayer[], 36 | Ramp, 37 | ConvolutionLayer[256,{3,3},"PaddingSize"->1], 38 | BatchNormalizationLayer[], 39 | Ramp, 40 | ConvolutionLayer[512,{3,3},"PaddingSize"->1], 41 | BatchNormalizationLayer[], 42 | Ramp, 43 | ConvolutionLayer[1,{1,1}]}, 44 | "combine"->ThreadingLayer[Plus], 45 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 46 | |>, 47 | {NetPort["P"]->"chain"->"combine", 48 | NetPort["P"]->"combine"->"cut"}, 49 | "P"->dim]; 50 | 51 | 52 | discriminator = NetChain[{ 53 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 54 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 55 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 56 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 57 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 58 | "Input" -> dim]; 59 | discriminatorGCM2Obser = discriminator; 60 | 61 | RdownscaleObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["net"]; 62 | DeltaObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["mse"]; 63 | 64 | gan =NetGraph[<| 65 | "Generator_GCM->Obser" -> generatorGCM2Obser, 66 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 67 | "Cat_GCM->Obser" -> CatenateLayer[], 68 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 69 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 70 | "Fake_GCM->Obser"->PartLayer[1], 71 | "Real_GCM->Obser"->PartLayer[2], 72 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 73 | 74 | "R_Downscaling_Obser"->RdownscaleObser, 75 | "MS_Obser_RDownscaling"->MeanSquaredLossLayer[], 76 | "Max_Obser_RDownscaling"->ElementwiseLayer[Max[#,DeltaObser]-DeltaObser &] 77 | |>, 78 | 79 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 80 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 81 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 82 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 83 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 84 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 85 | 86 | NetPort["D_GCM"]->"R_Downscaling_Obser"->"MS_Obser_RDownscaling", 87 | "Generator_GCM->Obser"->"MS_Obser_RDownscaling"->"Max_Obser_RDownscaling"->NetPort["Loss_RDownscaling_GCM"] 88 | }, 89 | "P_Obser" -> dim, 90 | "P_GCM" -> dim, 91 | "D_GCM" -> dim2]; 92 | 93 | DiffMean=Infinity; 94 | DiffVar=Infinity; 95 | 96 | 97 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 98 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 99 | 100 | index=StringSplit[CreateUUID[],"-"][[1]]; 101 | Print[index]; 102 | ReportCycleGan2[net_] := 103 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 104 | gen=net[["Generator_GCM->Obser"]]; 105 | obserG=Map[gen[#[["P_GCM"]],TargetDevice->"GPU"]&,validation]; 106 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 107 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 108 | Print[TableForm[{{DiffMean,DiffVar}, 109 | {meanDiff,varDiff}}]]; 110 | If[meanDiff+varDiff<=DiffMean+DiffVar, 111 | Block[{}, 112 | Print[index]; 113 | Export["/g/g92/pan11/Baseline_3_GAN_Dynamics_"<>index<>".mx",net]; 114 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 115 | 116 | NetTrain[gan, 117 | {Function[Block[{base,choice,choice2}, 118 | base=RandomSample[Range[2,length],#BatchSize]; 119 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 120 | tempt=#+daylag+yearlag*365; 121 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 122 | <|"P_GCM"->nP4GCM[[base]], 123 | "P_Obser"->nP4Obser[[choice]], 124 | "D_GCM"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 125 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 126 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 127 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 128 | "Generator_GCM->Obser"}, 129 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 130 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1, 131 | "R_Downscaling_Obser"->0}, 132 | BatchSize -> 32, 133 | TargetDevice->"GPU", 134 | MaxTrainingRounds->100, 135 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 136 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 137 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 138 | 139 | gan=Import["/g/g92/pan11/Baseline_3_GAN_Dynamics_"<>index<>".mx"]; 140 | NetTrain[gan, 141 | {Function[Block[{base,choice,choice2}, 142 | base=RandomSample[Range[2,length],#BatchSize]; 143 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 144 | tempt=#+daylag+yearlag*365; 145 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 146 | <|"P_GCM"->nP4GCM[[base]], 147 | "P_Obser"->nP4Obser[[choice]], 148 | "D_GCM"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 149 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 150 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 151 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 152 | "Generator_GCM->Obser"}, 153 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 154 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1, 155 | "R_Downscaling_Obser"->0}, 156 | BatchSize -> 32, 157 | TargetDevice->"GPU", 158 | MaxTrainingRounds->200, 159 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 160 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 161 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 162 | 163 | gan=Import["/g/g92/pan11/Baseline_3_GAN_Dynamics_"<>index<>".mx"]; 164 | NetTrain[gan, 165 | {Function[Block[{base,choice,choice2}, 166 | base=RandomSample[Range[2,length],#BatchSize]; 167 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 168 | tempt=#+daylag+yearlag*365; 169 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 170 | <|"P_GCM"->nP4GCM[[base]], 171 | "P_Obser"->nP4Obser[[choice]], 172 | "D_GCM"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 173 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 174 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 175 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 176 | "Generator_GCM->Obser"}, 177 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 178 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1, 179 | "R_Downscaling_Obser"->0}, 180 | BatchSize -> 32, 181 | TargetDevice->"GPU", 182 | MaxTrainingRounds->200, 183 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-6, 184 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 185 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 186 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_4_GAN_SelfIdentity.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | length=14610; 12 | vlength=3652; 13 | test=Table[<|"P_GCM"->nP4GCM[[i]], 14 | "P_Obser"->nP4Obser[[i]]|>,{i,length+vlength+1,Length[nP4GCM]}]; 15 | 16 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 17 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 18 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 19 | 20 | validation=Table[<|"P_GCM"->nP4GCM[[i]],"P_Obser"->nP4Obser[[i]]|>,{i,length,length+vlength}]; 21 | 22 | generator=NetGraph[<| 23 | "chain"->{ConvolutionLayer[64,{3,3},"PaddingSize"->1], 24 | BatchNormalizationLayer[], 25 | Ramp, 26 | ConvolutionLayer[128,{3,3},"PaddingSize"->1], 27 | BatchNormalizationLayer[], 28 | Ramp, 29 | ConvolutionLayer[256,{3,3},"PaddingSize"->1], 30 | BatchNormalizationLayer[], 31 | Ramp, 32 | ConvolutionLayer[512,{3,3},"PaddingSize"->1], 33 | BatchNormalizationLayer[], 34 | Ramp, 35 | ConvolutionLayer[1,{1,1}]}, 36 | "combine"->ThreadingLayer[Plus], 37 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 38 | |>, 39 | {NetPort["P"]->"chain"->"combine", 40 | NetPort["P"]->"combine"->"cut"}, 41 | "P"->dim] 42 | 43 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 44 | generatorGCM2ObserR= NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 45 | 46 | discriminatorGCM2Obser = NetChain[{ 47 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 48 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 49 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 50 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 51 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 52 | "Input" -> dim]; 53 | 54 | gan =NetGraph[<| 55 | "Generator_GCM->Obser" -> generatorGCM2Obser, 56 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 57 | "Cat_GCM->Obser" -> CatenateLayer[], 58 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 59 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 60 | "Fake_GCM->Obser"->PartLayer[1], 61 | "Real_GCM->Obser"->PartLayer[2], 62 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 63 | "Generator_GCM->Obser_SelfRegression" -> generatorGCM2ObserR, 64 | "MS_GCM2Obser_SelfRegression"->MeanAbsoluteLossLayer[] 65 | |>, 66 | 67 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 68 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 69 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 70 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 71 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 72 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 73 | 74 | NetPort["P_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","P"], 75 | "Generator_GCM->Obser_SelfRegression"->"MS_GCM2Obser_SelfRegression", 76 | NetPort["P_Obser"]->"MS_GCM2Obser_SelfRegression"->NetPort["Loss_GCM2Obser_SelfRegression"] 77 | }, 78 | "P_Obser" -> dim, 79 | "P_GCM" -> dim] 80 | 81 | DiffMean=Infinity; 82 | DiffVar=Infinity; 83 | 84 | 85 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 86 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 87 | 88 | index=StringSplit[CreateUUID[],"-"][[1]]; 89 | Print[index]; 90 | 91 | ReportCycleGan[net_] := 92 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 93 | gen=net[["Generator_GCM->Obser"]]; 94 | obserG=Map[gen[<|"P"->#[["P_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 95 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 96 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 97 | Print[TableForm[{{DiffMean,DiffVar}, 98 | {meanDiff,varDiff}}]]; 99 | If[meanDiff+varDiff<=DiffMean+DiffVar, 100 | Block[{}, 101 | Print[index]; 102 | Export["/g/g92/pan11/Baseline_4_GAN_SelfIdentity_"<>index<>".mx",net]; 103 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 104 | 105 | NetTrain[gan, 106 | {Function[Block[{base,choice,choice2}, 107 | base=RandomSample[Range[2,length],#BatchSize]; 108 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 109 | tempt=#+daylag+yearlag*365; 110 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 111 | <|"P_GCM"->nP4GCM[[base]], 112 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 113 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 114 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser","Generator_GCM->Obser_SelfRegression"}, 115 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 116 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1,"Generator_GCM->Obser_SelfRegression"->-1}, 117 | BatchSize -> 32, 118 | TargetDevice->"GPU", 119 | MaxTrainingRounds->100, 120 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 121 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 122 | TrainingProgressReporting -> {{Function@ReportCycleGan[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 123 | 124 | gan=Import["/g/g92/pan11/Baseline_4_GAN_SelfIdentity_"<>index<>".mx"]; 125 | NetTrain[gan, 126 | {Function[Block[{base,choice,choice2}, 127 | base=RandomSample[Range[2,length],#BatchSize]; 128 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 129 | tempt=#+daylag+yearlag*365; 130 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 131 | <|"P_GCM"->nP4GCM[[base]], 132 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 133 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 134 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser","Generator_GCM->Obser_SelfRegression"}, 135 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 136 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1,"Generator_GCM->Obser_SelfRegression"->-1}, 137 | BatchSize -> 32, 138 | TargetDevice->"GPU", 139 | MaxTrainingRounds->200, 140 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 141 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 142 | TrainingProgressReporting -> {{Function@ReportCycleGan[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 143 | 144 | gan=Import["/g/g92/pan11/Baseline_4_GAN_SelfIdentity_"<>index<>".mx"]; 145 | NetTrain[gan, 146 | {Function[Block[{base,choice,choice2}, 147 | base=RandomSample[Range[2,length],#BatchSize]; 148 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 149 | tempt=#+daylag+yearlag*365; 150 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 151 | <|"P_GCM"->nP4GCM[[base]], 152 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 153 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 154 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser","Generator_GCM->Obser_SelfRegression"}, 155 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 156 | "Generator_GCM->Obser" -> -1,"Discriminator_GCM->Obser"->1,"Generator_GCM->Obser_SelfRegression"->-1}, 157 | BatchSize -> 32, 158 | TargetDevice->"GPU", 159 | MaxTrainingRounds->200, 160 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-6, 161 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 162 | TrainingProgressReporting -> {{Function@ReportCycleGan[#Net], "Interval" -> Quantity[1, "Rounds"]},"Print"}] 163 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_5_GAN_ConditionalCycle.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | 31 | generator=NetGraph[<|"Catenate"->CatenateLayer[1], 32 | "Padding"->PaddingLayer[{{0,0},{5,5},{4,4}}], 33 | "chain"->{ConvolutionLayer[64,{3,3}], 34 | BatchNormalizationLayer[], 35 | Ramp, 36 | ConvolutionLayer[128,{3,3}], 37 | BatchNormalizationLayer[], 38 | Ramp, 39 | ConvolutionLayer[256,{3,3}], 40 | BatchNormalizationLayer[], 41 | Ramp, 42 | ConvolutionLayer[512,{5,3}], 43 | BatchNormalizationLayer[], 44 | Ramp, 45 | ConvolutionLayer[1,{1,1}]}, 46 | "combine"->ThreadingLayer[Plus], 47 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 48 | |>, 49 | {NetPort["P"]->"Padding"->"Catenate", 50 | NetPort["z"]->"Catenate"->"chain"->"combine", 51 | NetPort["P"]->"combine"->"cut"}, 52 | "P"->dim, 53 | "z"->dim2] 54 | 55 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 56 | cycleGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 57 | 58 | generatorObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 59 | cycleObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 60 | 61 | discriminator = NetChain[{ 62 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 63 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 64 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 65 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 66 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 67 | "Input" -> dim]; 68 | discriminatorGCM2Obser = discriminator; 69 | discriminatorObser2GCM = discriminator; 70 | 71 | 72 | cycleGAN =NetGraph[<| 73 | "Generator_GCM->Obser" -> generatorGCM2Obser, 74 | "Cycle_GCM->Obser" -> cycleGCM2Obser, 75 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 76 | "Cat_GCM->Obser" -> CatenateLayer[], 77 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 78 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 79 | "Fake_GCM->Obser"->PartLayer[1], 80 | "Real_GCM->Obser"->PartLayer[2], 81 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 82 | "MS_GCM->Obser"->MeanAbsoluteLossLayer[], 83 | 84 | "Generator_Obser->GCM" -> generatorObser2GCM, 85 | "Cycle_Obser->GCM" -> cycleObser2GCM, 86 | "Discriminator_Obser->GCM" -> NetMapOperator[discriminatorObser2GCM], 87 | "Cat_Obser->GCM" -> CatenateLayer[], 88 | "Reshape_Obser->GCM" -> ReshapeLayer[Prepend[dim,2]], 89 | "Flat_Obser->GCM" -> ReshapeLayer[{2}], 90 | "Fake_Obser->GCM"->PartLayer[1], 91 | "Real_Obser->GCM"->PartLayer[2], 92 | "Scale_Obser->GCM" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 93 | "MS_Obser->GCM"->MeanAbsoluteLossLayer[] 94 | 95 | |>, 96 | 97 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 98 | NetPort["D_GCM"]->NetPort["Generator_GCM->Obser","z"], 99 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 100 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 101 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 102 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 103 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 104 | "Generator_GCM->Obser"->NetPort["Cycle_Obser->GCM","P"], 105 | NetPort["D_GCM"]->NetPort["Cycle_Obser->GCM","z"], 106 | "Cycle_Obser->GCM"->"MS_GCM->Obser", 107 | NetPort["P_GCM"]->"MS_GCM->Obser"->NetPort["ReconstructionLoss_GCM->Obser"], 108 | 109 | 110 | NetPort["P_Obser"] ->NetPort["Generator_Obser->GCM","P"], 111 | NetPort["D_Obser"] ->NetPort["Generator_Obser->GCM","z"], 112 | "Generator_Obser->GCM"-> "Cat_Obser->GCM", 113 | NetPort["P_GCM"] -> "Cat_Obser->GCM", 114 | "Cat_Obser->GCM" -> "Reshape_Obser->GCM" -> "Discriminator_Obser->GCM" -> "Flat_Obser->GCM" -> "Scale_Obser->GCM" -> 115 | "Fake_Obser->GCM"->NetPort["FakeLoss_Obser->GCM"], 116 | "Scale_Obser->GCM"->"Real_Obser->GCM"->NetPort["RealLoss_Obser->GCM"], 117 | "Generator_Obser->GCM"->NetPort["Cycle_GCM->Obser","P"], 118 | NetPort["D_Obser"] ->NetPort["Cycle_GCM->Obser","z"], 119 | "Cycle_GCM->Obser"->"MS_Obser->GCM", 120 | NetPort["P_Obser"]->"MS_Obser->GCM"->NetPort["ReconstructionLoss_Obser->GCM"] 121 | }, 122 | "P_Obser" -> dim, 123 | "P_GCM" -> dim, 124 | "D_GCM" -> dim2, 125 | "D_Obser" -> dim2]; 126 | 127 | DiffMean=Infinity; 128 | DiffVar=Infinity; 129 | 130 | 131 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 132 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 133 | 134 | index=StringSplit[CreateUUID[],"-"][[1]]; 135 | Print[index]; 136 | ReportCycleGan2[net_] := 137 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 138 | gen=net[["Generator_GCM->Obser"]]; 139 | obserG=Map[gen[<|"P"->#[["P_GCM"]],"z"->#[["D_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 140 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 141 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 142 | Print[TableForm[{{DiffMean,DiffVar}, 143 | {meanDiff,varDiff}}]]; 144 | If[meanDiff+varDiff<=DiffMean+DiffVar, 145 | Block[{}, 146 | Print[index]; 147 | Export["/g/g92/pan11/Baseline_5_GAN_ConditionalCycle_"<>index<>".mx",net]; 148 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 149 | 150 | NetTrain[cycleGAN, 151 | {Function[Block[{base,choice,choice2}, 152 | base=RandomSample[Range[2,length],#BatchSize]; 153 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 154 | tempt=#+daylag+yearlag*365; 155 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 156 | <|"P_GCM"->nP4GCM[[base]], 157 | "D_GCM"->ndynamics4GCM[[base]], 158 | "P_Obser"->nP4Obser[[choice]], 159 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 160 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 161 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]]}, 162 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 163 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 164 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM"}, 165 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 166 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 167 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 168 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 169 | BatchSize -> 32, 170 | TargetDevice->"GPU", 171 | MaxTrainingRounds->100, 172 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 173 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 174 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 175 | 176 | cycleGAN=Import["/g/g92/pan11/Baseline_5_GAN_ConditionalCycle_"<>index<>".mx"]; 177 | NetTrain[cycleGAN, 178 | {Function[Block[{base,choice,choice2}, 179 | base=RandomSample[Range[2,length],#BatchSize]; 180 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 181 | tempt=#+daylag+yearlag*365; 182 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 183 | <|"P_GCM"->nP4GCM[[base]], 184 | "D_GCM"->ndynamics4GCM[[base]], 185 | "P_Obser"->nP4Obser[[choice]], 186 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 187 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 188 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]]}, 189 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 190 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 191 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM"}, 192 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 193 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 194 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 195 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 196 | BatchSize -> 32, 197 | TargetDevice->"GPU", 198 | MaxTrainingRounds->200, 199 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 200 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 201 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 202 | 203 | cycleGAN=Import["/g/g92/pan11/Baseline_5_GAN_ConditionalCycle_"<>index<>".mx"]; 204 | NetTrain[cycleGAN, 205 | {Function[Block[{base,choice,choice2}, 206 | base=RandomSample[Range[2,length],#BatchSize]; 207 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 208 | tempt=#+daylag+yearlag*365; 209 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 210 | <|"P_GCM"->nP4GCM[[base]], 211 | "D_GCM"->ndynamics4GCM[[base]], 212 | "P_Obser"->nP4Obser[[choice]], 213 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 214 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 215 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]]}, 216 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 217 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 218 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM"}, 219 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 220 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 221 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 222 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 223 | BatchSize -> 32, 224 | TargetDevice->"GPU", 225 | MaxTrainingRounds->100, 226 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-6, 227 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 228 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 229 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_6_GAN_ConditionalDynamics.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | generatorGCM2Obser=NetGraph[<|"Catenate"->CatenateLayer[1], 31 | "Padding"->PaddingLayer[{{0,0},{5,5},{4,4}}], 32 | "chain"->{ConvolutionLayer[64,{3,3}], 33 | BatchNormalizationLayer[], 34 | Ramp, 35 | ConvolutionLayer[128,{3,3}], 36 | BatchNormalizationLayer[], 37 | Ramp, 38 | ConvolutionLayer[256,{3,3}], 39 | BatchNormalizationLayer[], 40 | Ramp, 41 | ConvolutionLayer[512,{5,3}], 42 | BatchNormalizationLayer[], 43 | Ramp, 44 | ConvolutionLayer[1,{1,1}]}, 45 | "combine"->ThreadingLayer[Plus], 46 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 47 | |>, 48 | {NetPort["P"]->"Padding"->"Catenate", 49 | NetPort["z"]->"Catenate"->"chain"->"combine", 50 | NetPort["P"]->"combine"->"cut"}, 51 | "P"->dim, 52 | "z"->dim2] 53 | 54 | 55 | discriminator = NetChain[{ 56 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 57 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 58 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 59 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 60 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 61 | "Input" -> dim]; 62 | discriminatorGCM2Obser = discriminator; 63 | 64 | 65 | RdownscaleObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["net"]; 66 | DeltaObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["mse"]; 67 | 68 | gan =NetGraph[<| 69 | "Generator_GCM->Obser" -> generatorGCM2Obser, 70 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 71 | "Cat_GCM->Obser" -> CatenateLayer[], 72 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 73 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 74 | "Fake_GCM->Obser"->PartLayer[1], 75 | "Real_GCM->Obser"->PartLayer[2], 76 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 77 | 78 | "R_Downscaling_Obser"->RdownscaleObser, 79 | "MS_Obser_RDownscaling"->MeanSquaredLossLayer[], 80 | "Max_Obser_RDownscaling"->ElementwiseLayer[Max[#,DeltaObser]-DeltaObser &] 81 | |>, 82 | 83 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 84 | NetPort["D_GCM"]->NetPort["Generator_GCM->Obser","z"], 85 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 86 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 87 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 88 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 89 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 90 | 91 | NetPort["D_GCM"]->"R_Downscaling_Obser"->"MS_Obser_RDownscaling", 92 | "Generator_GCM->Obser"->"MS_Obser_RDownscaling"->"Max_Obser_RDownscaling"->NetPort["Loss_RDownscaling_GCM"] 93 | }, 94 | "P_Obser" -> dim, 95 | "P_GCM" -> dim, 96 | "D_GCM" -> dim2]; 97 | 98 | DiffMean=Infinity; 99 | DiffVar=Infinity; 100 | 101 | 102 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 103 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 104 | 105 | index=StringSplit[CreateUUID[],"-"][[1]]; 106 | Print[index]; 107 | ReportCycleGan2[net_] := 108 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 109 | gen=net[["Generator_GCM->Obser"]]; 110 | obserG=Map[gen[<|"P"->#[["P_GCM"]],"z"->#[["D_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 111 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 112 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 113 | Print[TableForm[{{DiffMean,DiffVar}, 114 | {meanDiff,varDiff}}]]; 115 | If[meanDiff+varDiff<=DiffMean+DiffVar, 116 | Block[{}, 117 | Print[index]; 118 | Export["/g/g92/pan11/Baseline_6_GAN_ConditionalDynamics_"<>index<>".mx",net]; 119 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 120 | 121 | 122 | NetTrain[gan, 123 | {Function[Block[{base,choice,choice2}, 124 | base=RandomSample[Range[2,length],#BatchSize]; 125 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 126 | tempt=#+daylag+yearlag*365; 127 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 128 | <|"P_GCM"->nP4GCM[[base]], 129 | "D_GCM"->ndynamics4GCM[[base]], 130 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 131 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 132 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 133 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 134 | "Generator_GCM->Obser"}, 135 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 136 | "Generator_GCM->Obser" -> -1, 137 | "Discriminator_GCM->Obser"->1,"R_Downscaling_Obser"->0}, 138 | BatchSize -> 32, 139 | TargetDevice->"GPU", 140 | MaxTrainingRounds->100, 141 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 142 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 143 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 144 | 145 | gan=Import["/g/g92/pan11/Baseline_6_GAN_ConditionalDynamics_"<>index<>".mx"]; 146 | NetTrain[gan, 147 | {Function[Block[{base,choice,choice2}, 148 | base=RandomSample[Range[2,length],#BatchSize]; 149 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 150 | tempt=#+daylag+yearlag*365; 151 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 152 | <|"P_GCM"->nP4GCM[[base]], 153 | "D_GCM"->ndynamics4GCM[[base]], 154 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 155 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 156 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 157 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 158 | "Generator_GCM->Obser"}, 159 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 160 | "Generator_GCM->Obser" -> -1, 161 | "Discriminator_GCM->Obser"->1,"R_Downscaling_Obser"->0}, 162 | BatchSize -> 32, 163 | TargetDevice->"GPU", 164 | MaxTrainingRounds->200, 165 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 166 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 167 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 168 | 169 | gan=Import["/g/g92/pan11/Baseline_6_GAN_ConditionalDynamics_"<>index<>".mx"]; 170 | NetTrain[gan, 171 | {Function[Block[{base,choice,choice2}, 172 | base=RandomSample[Range[2,length],#BatchSize]; 173 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 174 | tempt=#+daylag+yearlag*365; 175 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 176 | <|"P_GCM"->nP4GCM[[base]], 177 | "D_GCM"->ndynamics4GCM[[base]], 178 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 179 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1], 180 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]]}, 181 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser", 182 | "Generator_GCM->Obser"}, 183 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 184 | "Generator_GCM->Obser" -> -1, 185 | "Discriminator_GCM->Obser"->1,"R_Downscaling_Obser"->0}, 186 | BatchSize -> 32, 187 | TargetDevice->"GPU", 188 | MaxTrainingRounds->200, 189 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-6, 190 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 191 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 192 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_7_GAN_ConditionalSelfIdentity.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | generator=NetGraph[<|"Catenate"->CatenateLayer[1], 31 | "Padding"->PaddingLayer[{{0,0},{5,5},{4,4}}], 32 | "chain"->{ConvolutionLayer[64,{3,3}], 33 | BatchNormalizationLayer[], 34 | Ramp, 35 | ConvolutionLayer[128,{3,3}], 36 | BatchNormalizationLayer[], 37 | Ramp, 38 | ConvolutionLayer[256,{3,3}], 39 | BatchNormalizationLayer[], 40 | Ramp, 41 | ConvolutionLayer[512,{5,3}], 42 | BatchNormalizationLayer[], 43 | Ramp, 44 | ConvolutionLayer[1,{1,1}]}, 45 | "combine"->ThreadingLayer[Plus], 46 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 47 | |>, 48 | {NetPort["P"]->"Padding"->"Catenate", 49 | NetPort["z"]->"Catenate"->"chain"->"combine", 50 | NetPort["P"]->"combine"->"cut"}, 51 | "P"->dim, 52 | "z"->dim2] 53 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 54 | generatorGCM2ObserR= NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 55 | 56 | discriminator = NetChain[{ 57 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 58 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 59 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 60 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 61 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 62 | "Input" -> dim]; 63 | discriminatorGCM2Obser = discriminator; 64 | 65 | 66 | gan =NetGraph[<| 67 | "Generator_GCM->Obser" -> generatorGCM2Obser, 68 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 69 | "Cat_GCM->Obser" -> CatenateLayer[], 70 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 71 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 72 | "Fake_GCM->Obser"->PartLayer[1], 73 | "Real_GCM->Obser"->PartLayer[2], 74 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 75 | "Generator_GCM->Obser_SelfRegression" -> generatorGCM2ObserR, 76 | "MS_GCM2Obser_SelfRegression"->MeanAbsoluteLossLayer[] 77 | |>, 78 | 79 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 80 | NetPort["D_GCM"]->NetPort["Generator_GCM->Obser","z"], 81 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 82 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 83 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 84 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 85 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 86 | 87 | NetPort["P_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","P"], 88 | NetPort["D_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","z"], 89 | "Generator_GCM->Obser_SelfRegression"->"MS_GCM2Obser_SelfRegression", 90 | NetPort["P_Obser"]->"MS_GCM2Obser_SelfRegression"->NetPort["Loss_GCM2Obser_SelfRegression"] 91 | }, 92 | "P_Obser" -> dim, 93 | "P_GCM" -> dim, 94 | "D_GCM" -> dim2, 95 | "D_Obser" -> dim2]; 96 | 97 | DiffMean=Infinity; 98 | DiffVar=Infinity; 99 | 100 | 101 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 102 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 103 | 104 | index=StringSplit[CreateUUID[],"-"][[1]]; 105 | Print[index]; 106 | ReportCycleGan2[net_] := 107 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 108 | gen=net[["Generator_GCM->Obser"]]; 109 | obserG=Map[gen[<|"P"->#[["P_GCM"]],"z"->#[["D_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 110 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 111 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 112 | Print[TableForm[{{DiffMean,DiffVar}, 113 | {meanDiff,varDiff}}]]; 114 | If[meanDiff+varDiff<=DiffMean+DiffVar, 115 | Block[{}, 116 | Print[index]; 117 | Export["/g/g92/pan11/Baseline_7_GAN_ConditionalSelfIdentity_"<>index<>".mx",net]; 118 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 119 | 120 | 121 | NetTrain[gan, 122 | {Function[Block[{base,choice,choice2}, 123 | base=RandomSample[Range[2,length],#BatchSize]; 124 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 125 | tempt=#+daylag+yearlag*365; 126 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 127 | <|"P_GCM"->nP4GCM[[base]], 128 | "D_GCM"->ndynamics4GCM[[base]], 129 | "D_Obser"->ndynamics4Obser[[choice]], 130 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 131 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 132 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser","Generator_GCM->Obser_SelfRegression"}, 133 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 134 | "Generator_GCM->Obser" -> -1, 135 | "Discriminator_GCM->Obser"->1, 136 | "Generator_GCM->Obser_SelfRegression"->-1}, 137 | BatchSize -> 32, 138 | TargetDevice->"GPU", 139 | MaxTrainingRounds->100, 140 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 141 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 142 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 143 | 144 | gan=Import["/g/g92/pan11/Baseline_7_GAN_ConditionalSelfIdentity_"<>index<>".mx"]; 145 | NetTrain[gan, 146 | {Function[Block[{base,choice,choice2}, 147 | base=RandomSample[Range[2,length],#BatchSize]; 148 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 149 | tempt=#+daylag+yearlag*365; 150 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 151 | <|"P_GCM"->nP4GCM[[base]], 152 | "D_GCM"->ndynamics4GCM[[base]], 153 | "D_Obser"->ndynamics4Obser[[choice]], 154 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 155 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 156 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser","Generator_GCM->Obser_SelfRegression"}, 157 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 158 | "Generator_GCM->Obser" -> -1, 159 | "Discriminator_GCM->Obser"->1, 160 | "Generator_GCM->Obser_SelfRegression"->-1}, 161 | BatchSize -> 32, 162 | TargetDevice->"GPU", 163 | MaxTrainingRounds->200, 164 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 165 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 166 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 167 | 168 | gan=Import["/g/g92/pan11/Baseline_7_GAN_ConditionalSelfIdentity_"<>index<>".mx"]; 169 | NetTrain[gan, 170 | {Function[Block[{base,choice,choice2}, 171 | base=RandomSample[Range[2,length],#BatchSize]; 172 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 173 | tempt=#+daylag+yearlag*365; 174 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 175 | <|"P_GCM"->nP4GCM[[base]], 176 | "D_GCM"->ndynamics4GCM[[base]], 177 | "D_Obser"->ndynamics4Obser[[choice]], 178 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 179 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 180 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser","Generator_GCM->Obser","Generator_GCM->Obser_SelfRegression"}, 181 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, 182 | "Generator_GCM->Obser" -> -1, 183 | "Discriminator_GCM->Obser"->1, 184 | "Generator_GCM->Obser_SelfRegression"->-1}, 185 | BatchSize -> 32, 186 | TargetDevice->"GPU", 187 | MaxTrainingRounds->200, 188 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-6, 189 | "WeightClipping" -> {"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 190 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 191 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_9_GAN_CycleSelfIdentity.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | hype={500,5,1}; 3 | days={1,0} 4 | dim={1,26,48}; 5 | dim2={3*(Total[days]+1),36,56}; 6 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 7 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 8 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 9 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 10 | 11 | 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 21 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 22 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 23 | 24 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 25 | "D_GCM"->ndynamics4GCM[[i]], 26 | "P_Obser"->nP4Obser[[i]], 27 | "D_Obser"->ndynamics4Obser[[i]] 28 | |>,{i,length,length+vlength}]; 29 | 30 | generator=NetGraph[<| 31 | "chain"->{ConvolutionLayer[64,{3,3},"PaddingSize"->1], 32 | BatchNormalizationLayer[], 33 | Ramp, 34 | ConvolutionLayer[128,{3,3},"PaddingSize"->1], 35 | BatchNormalizationLayer[], 36 | Ramp, 37 | ConvolutionLayer[256,{3,3},"PaddingSize"->1], 38 | BatchNormalizationLayer[], 39 | Ramp, 40 | ConvolutionLayer[512,{3,3},"PaddingSize"->1], 41 | BatchNormalizationLayer[], 42 | Ramp, 43 | ConvolutionLayer[1,{1,1}]}, 44 | "combine"->ThreadingLayer[Plus], 45 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 46 | |>, 47 | {NetPort["P"]->"chain"->"combine", 48 | NetPort["P"]->"combine"->"cut"}, 49 | "P"->dim]; 50 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 51 | cycleGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 52 | generatorGCM2ObserR= NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 53 | 54 | generatorObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 55 | cycleObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 56 | generatorObser2GCMR = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 57 | 58 | discriminator = NetChain[{ 59 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 60 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 61 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 62 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 63 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 64 | "Input" -> dim]; 65 | discriminatorGCM2Obser = discriminator; 66 | discriminatorObser2GCM = discriminator; 67 | 68 | 69 | cycleGAN =NetGraph[<| 70 | "Generator_GCM->Obser" -> generatorGCM2Obser, 71 | "Generator_GCM->Obser_SelfRegression" -> generatorGCM2ObserR, 72 | "Cycle_GCM->Obser" -> cycleGCM2Obser, 73 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 74 | "Cat_GCM->Obser" -> CatenateLayer[], 75 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 76 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 77 | "Fake_GCM->Obser"->PartLayer[1], 78 | "Real_GCM->Obser"->PartLayer[2], 79 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 80 | "MS_GCM->Obser"->MeanAbsoluteLossLayer[], 81 | 82 | "Generator_Obser->GCM" -> generatorObser2GCM, 83 | "Generator_Obser->GCM_SelfRegression" -> generatorObser2GCMR, 84 | "Cycle_Obser->GCM" -> cycleObser2GCM, 85 | "Discriminator_Obser->GCM" -> NetMapOperator[discriminatorObser2GCM], 86 | "Cat_Obser->GCM" -> CatenateLayer[], 87 | "Reshape_Obser->GCM" -> ReshapeLayer[Prepend[dim,2]], 88 | "Flat_Obser->GCM" -> ReshapeLayer[{2}], 89 | "Fake_Obser->GCM"->PartLayer[1], 90 | "Real_Obser->GCM"->PartLayer[2], 91 | "Scale_Obser->GCM" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 92 | "MS_Obser->GCM"->MeanAbsoluteLossLayer[], 93 | 94 | "MS_GCM2Obser_SelfRegression"->MeanAbsoluteLossLayer[], 95 | "MS_Obser2GCM_SelfRegression"->MeanAbsoluteLossLayer[] 96 | |>, 97 | 98 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 99 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 100 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 101 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 102 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 103 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 104 | "Generator_GCM->Obser"->NetPort["Cycle_Obser->GCM","P"], 105 | "Cycle_Obser->GCM"->"MS_GCM->Obser", 106 | NetPort["P_GCM"]->"MS_GCM->Obser"->NetPort["ReconstructionLoss_GCM->Obser"], 107 | 108 | 109 | NetPort["P_Obser"] ->NetPort["Generator_Obser->GCM","P"], 110 | "Generator_Obser->GCM"-> "Cat_Obser->GCM", 111 | NetPort["P_GCM"] -> "Cat_Obser->GCM", 112 | "Cat_Obser->GCM" -> "Reshape_Obser->GCM" -> "Discriminator_Obser->GCM" -> "Flat_Obser->GCM" -> "Scale_Obser->GCM" -> 113 | "Fake_Obser->GCM"->NetPort["FakeLoss_Obser->GCM"], 114 | "Scale_Obser->GCM"->"Real_Obser->GCM"->NetPort["RealLoss_Obser->GCM"], 115 | "Generator_Obser->GCM"->NetPort["Cycle_GCM->Obser","P"], 116 | "Cycle_GCM->Obser"->"MS_Obser->GCM", 117 | NetPort["P_Obser"]->"MS_Obser->GCM"->NetPort["ReconstructionLoss_Obser->GCM"], 118 | 119 | NetPort["P_GCM"]->NetPort["Generator_Obser->GCM_SelfRegression","P"], 120 | "Generator_Obser->GCM_SelfRegression"->"MS_Obser2GCM_SelfRegression", 121 | NetPort["P_GCM"]->"MS_Obser2GCM_SelfRegression"->NetPort["Loss_Obser2GCM_SelfRegression"], 122 | 123 | NetPort["P_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","P"], 124 | "Generator_GCM->Obser_SelfRegression"->"MS_GCM2Obser_SelfRegression", 125 | NetPort["P_Obser"]->"MS_GCM2Obser_SelfRegression"->NetPort["Loss_GCM2Obser_SelfRegression"] 126 | }, 127 | "P_Obser" -> dim, 128 | "P_GCM" -> dim] 129 | 130 | 131 | DiffMean=Infinity; 132 | DiffVar=Infinity; 133 | 134 | 135 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 136 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 137 | 138 | index=StringSplit[CreateUUID[],"-"][[1]]; 139 | Print[index]; 140 | ReportCycleGan2[net_] := 141 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 142 | gen=net[["Generator_GCM->Obser"]]; 143 | obserG=Map[gen[#[["P_GCM"]],TargetDevice->"GPU"]&,validation]; 144 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 145 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 146 | Print[TableForm[{{DiffMean,DiffVar}, 147 | {meanDiff,varDiff}}]]; 148 | If[meanDiff+varDiff<=DiffMean+DiffVar, 149 | Block[{}, 150 | Print[index]; 151 | Export["/g/g92/pan11/Baseline_9_GAN_CycleSelfIdentity"<>index<>".mx",net]; 152 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 153 | 154 | NetTrain[cycleGAN, 155 | {Function[Block[{base,choice,choice2}, 156 | base=RandomSample[Range[2,length],#BatchSize]; 157 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 158 | tempt=#+daylag+yearlag*365; 159 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 160 | <|"P_GCM"->nP4GCM[[base]], 161 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 162 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 163 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]], 164 | "Loss_Obser2GCM_SelfRegression"->Scaled[-hype[[3]]],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 165 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 166 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 167 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM", 168 | "Generator_GCM->Obser_SelfRegression"|"Generator_Obser->GCM_SelfRegression"}, 169 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 170 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 171 | "Generator_GCM->Obser_SelfRegression" -> -1,"Generator_Obser->GCM_SelfRegression"->-1, 172 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 173 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 174 | BatchSize -> 32, 175 | TargetDevice->"GPU", 176 | MaxTrainingRounds->100, 177 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 178 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 179 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 180 | 181 | cycleGAN=Import["/g/g92/pan11/Baseline_9_GAN_CycleSelfIdentity"<>index<>".mx"]; 182 | NetTrain[cycleGAN, 183 | {Function[Block[{base,choice,choice2}, 184 | base=RandomSample[Range[2,length],#BatchSize]; 185 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 186 | tempt=#+daylag+yearlag*365; 187 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 188 | <|"P_GCM"->nP4GCM[[base]], 189 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 190 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 191 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]], 192 | "Loss_Obser2GCM_SelfRegression"->Scaled[-hype[[3]]],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 193 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 194 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 195 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM", 196 | "Generator_GCM->Obser_SelfRegression"|"Generator_Obser->GCM_SelfRegression"}, 197 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 198 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 199 | "Generator_GCM->Obser_SelfRegression" -> -1,"Generator_Obser->GCM_SelfRegression"->-1, 200 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 201 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 202 | BatchSize -> 32, 203 | TargetDevice->"GPU", 204 | MaxTrainingRounds->300, 205 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-5, 206 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 207 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 208 | 209 | cycleGAN=Import["/g/g92/pan11/Baseline_9_GAN_CycleSelfIdentity"<>index<>".mx"]; 210 | NetTrain[cycleGAN, 211 | {Function[Block[{base,choice,choice2}, 212 | base=RandomSample[Range[2,length],#BatchSize]; 213 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 214 | tempt=#+daylag+yearlag*365; 215 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 216 | <|"P_GCM"->nP4GCM[[base]], 217 | "P_Obser"->nP4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 218 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 219 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]], 220 | "Loss_Obser2GCM_SelfRegression"->Scaled[-hype[[3]]],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]]}, 221 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 222 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 223 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM", 224 | "Generator_GCM->Obser_SelfRegression"|"Generator_Obser->GCM_SelfRegression"}, 225 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 226 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 227 | "Generator_GCM->Obser_SelfRegression" -> -1,"Generator_Obser->GCM_SelfRegression"->-1, 228 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 229 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1}, 230 | BatchSize -> 32, 231 | TargetDevice->"GPU", 232 | MaxTrainingRounds->300, 233 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-6, 234 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 235 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 236 | -------------------------------------------------------------------------------- /Code/Baseline/Baseline_Evaluation.m: -------------------------------------------------------------------------------- 1 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 2 | days={1,0} 3 | dim={1,26,48}; 4 | dim2={3*(Total[days]+1),36,56}; 5 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 6 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 7 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 8 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 9 | length=14610; 10 | vlength=3652; 11 | test=Table[<|"P_GCM"->nP4GCM[[i]], 12 | "D_GCM"->ndynamics4GCM[[i]], 13 | "P_Obser"->nP4Obser[[i]], 14 | "D_Obser"->ndynamics4Obser[[i]] 15 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 16 | 17 | Table[Block[{models,DADT,GCM,OBSER}, 18 | SetDirectory["/g/g92/pan11/Baseline"]; 19 | models=Map[Import,FileNames["Baseline_"<>ToString[index]<>"_*mx"]]; 20 | Print["Index="<>ToString[index]]; 21 | DADT=Block[{ms}, 22 | ms=Map[#[["Generator_GCM->Obser"]]&,models]; 23 | Table[Block[{tempt=ms[[i]]},Print[i]; 24 | If[MemberQ[{0,2,3,4,8,9,10,14},index], 25 | Map[tempt[#[["P_GCM"]],TargetDevice->"GPU"]&,test], 26 | Map[tempt[<|"P"->#[["P_GCM"]],"z"->#[["D_GCM"]]|>,TargetDevice->"GPU"]&,test]]], 27 | {i,Length[models]}]][[;;,;;,1]]; 28 | GCM=test[[;;,"P_GCM"]][[;;,1]]; 29 | OBSER=test[[;;,"P_Obser"]][[;;,1]]; 30 | 31 | {DADT,GCM,OBSER}=Map[Exp[#]-1&,{DADT,GCM,OBSER}]; 32 | Export["/g/g92/pan11/result_Baseline_"<>ToString[index]<>".mx",Map[NumericArray[#,"Real32"]&,{DADT,GCM,OBSER}]];],{index,0,14}]; 33 | -------------------------------------------------------------------------------- /Code/Data_Processing.m: -------------------------------------------------------------------------------- 1 | {nP4Obser,ndynamics4Obser,position,validMatrix,plat4Obser,plon4Obser,dlat4Obser,dlon4Obser}=Import["/usr/workspace/pan11/CycleGAN/Obser/all.mx"]; 2 | (* 3 | {nP4Obser,ndynamics4Obser,position,validMatrix,plat4Obser,plon4Obser,dlat4Obser,dlon4Obser}= 4 | Block[{P,dynamics,position,ndynamics,validMatrix,nP,plat,plon,dlat,dlon}, 5 | SetDirectory["/usr/workspace/pan11/CycleGAN/Obser"]; 6 | P=Import["P.mx"]["data"]; 7 | dynamics=Block[{hus,zg,slp}, 8 | hus=Block[{tempt=Import["hus.mx"]["data"]},Table[Mean[tempt[[i*8+1;;(i+1)*8]]],{i,0,Length[tempt]/8-1}]]; 9 | zg=Block[{tempt=Import["zg.mx"]["data"]},Table[Mean[tempt[[i*8+1;;(i+1)*8]]],{i,0,Length[tempt]/8-1}]]; 10 | slp=Block[{tempt=Import["slp.mx"]["data"]},Table[Mean[tempt[[i*8+1;;(i+1)*8]]],{i,0,Length[tempt]/8-1}]]; 11 | Transpose[{slp,zg,hus}]]; 12 | position=Block[{valiMatrix,mP}, 13 | valiMatrix=Block[{tempt=P[[1]]},Table[If[tempt[[i,j]]<0,0,1],{i,100},{j,236}]]; 14 | mP=Table[Mean[Flatten[P[[i]]*valiMatrix]],{i,Length[P]}]; 15 | Position[mP,_?(#>=0.&)][[;;,1]]]; 16 | position=Select[position,#<=Length[dynamics]&]; 17 | ndynamics=Block[{tempt=dynamics[[position]]}, 18 | Transpose[Table[Block[{mean,sigma}, 19 | mean=Mean[tempt[[;;,i]]]; 20 | sigma=Sqrt[Variance[tempt[[;;,i]]]]; 21 | Map[(#-mean)/sigma&,tempt[[;;,i]]]],{i,Dimensions[tempt][[2]]}]]]; 22 | validMatrix=Block[{tempt1=ArrayResample[P[[1]],{27,48},"Bin"]}, 23 | Table[If[tempt1[[i,j]]<0,0,1],{i,Length[tempt1]},{j,Dimensions[tempt1][[2]]}]]; 24 | nP=Block[{tempt}, 25 | tempt=Map[ArrayResample[#,{27,48},"Bin"]*validMatrix&,P[[position]]]; 26 | Log[tempt+1.]]; 27 | plat=Import["P.mx"]["lat"]; 28 | plon=Import["P.mx"]["lon"]; 29 | dlat=Import["hus.mx"]["lat"]; 30 | dlon=Import["hus.mx"]["lon"]; 31 | {nP,ndynamics,position,validMatrix,plat,plon,dlat,dlon}]; 32 | Export["/usr/workspace/pan11/CycleGAN/Obser/all.mx",{nP4Obser,ndynamics4Obser,position,validMatrix,plat4Obser,plon4Obser,dlat4Obser,dlon4Obser}]; 33 | *) 34 | 35 | {nP4GCM,ndynamics4GCM,plat4GCM,plon4GCM,dlat4GCM,dlon4GCM}=Block[{dir,p,np,dynamics,ndynamics,nP,plat,plon,dlat,dlon}, 36 | dir="/usr/workspace/pan11/CycleGAN/GCM/CESM"; 37 | SetDirectory[dir]; 38 | p=Import["P.mx"]["data"]; 39 | dynamics=Block[{hus,zg,psl}, 40 | hus=Import["hus.mx"]["data"]; 41 | zg=Import["zg.mx"]["data"]; 42 | psl=Import["psl.mx"]["data"]; 43 | Transpose[{psl,zg[[;;,2]],hus[[;;,2]]}]]; 44 | ndynamics=Transpose[Table[Block[{mean,sigma}, 45 | mean=Mean[dynamics[[;;,i]]]; 46 | sigma=Sqrt[Variance[dynamics[[;;,i]]]]; 47 | Map[(#-mean)/sigma&,dynamics[[;;,i]]]],{i,Dimensions[dynamics][[2]]}]]; 48 | nP=Map[#*validMatrix&,Log[p*24*3600+1.]]; 49 | plat=Import["P.mx"]["lat"]; 50 | plon=Import["P.mx"]["lon"]; 51 | dlat=Import["hus.mx"]["lat"]; 52 | dlon=Import["hus.mx"]["lon"]; 53 | {nP, ndynamics,plat,plon,dlat,dlon}]; 54 | 55 | nP4Obser=nP4Obser[[;;,1;;-2]]; 56 | plat4Obser=plat4Obser[[1;;-2]]; 57 | validMatrix=validMatrix[[1;;-2]]; 58 | nP4GCM=nP4GCM[[position,1;;-2]]; 59 | ndynamics4GCM=ndynamics4GCM[[position]]; 60 | plat4GCM=plat4GCM[[1;;-2]]; 61 | nP4GCM=Map[List,nP4GCM]; 62 | nP4Obser=Map[List,nP4Obser]; 63 | length=Length[nP4Obser]; 64 | 65 | {nP4GCM,nP4Obser,plat4GCM,validMatrix,ndynamics4GCM,ndynamics4Obser}= 66 | {nP4GCM[[;;,;;,26;;1;;-1]],nP4Obser[[;;,;;,26;;1;;-1]], 67 | plat4GCM[[26;;1;;-1]],validMatrix[[26;;1;;-1]], 68 | ndynamics4GCM[[;;,;;,36;;1;;-1]],ndynamics4Obser[[;;,;;,36;;1;;-1]]}; 69 | -------------------------------------------------------------------------------- /Code/Dynamical_Regularization.m: -------------------------------------------------------------------------------- 1 | hype={RandomSample[{300,500,1000}][[1]],RandomSample[{5,7,10}][[1]],RandomSample[Range[1,3]][[1]]}; 2 | days={1,0} 3 | dim={1,26,48}; 4 | dim2={3*(Total[days]+1),36,56}; 5 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 6 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 7 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 8 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 9 | mse[a_,b_]:=Mean[(a-b)^2] 10 | length=14610; 11 | vlength=3652; 12 | tlength=Length[nP4GCM]; 13 | test=Table[<|"P_GCM"->nP4GCM[[i]], 14 | "D_GCM"->ndynamics4GCM[[i]], 15 | "P_Obser"->nP4Obser[[i]], 16 | "D_Obser"->ndynamics4Obser[[i]] 17 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 18 | 19 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 20 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 21 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 22 | 23 | trainObser=Table[ndynamics4Obser[[i]]->nP4Obser[[i]],{i,length}]; 24 | validationObser=Table[ndynamics4Obser[[i]]->nP4Obser[[i]],{i,length,length+vlength}]; 25 | 26 | downscalingObser=Import["/g/g92/pan11/Backup_CycleGAN/Previous/Downscaling_Obser.mx"]; 27 | 28 | trainedObser=NetTrain[downscalingObser,trainObser, 29 | ValidationSet->validationObser, 30 | TargetDevice->{"GPU",All}, 31 | Method->{"ADAM","L2Regularization"->10^-3,"LearningRate"->10^-4}, 32 | MaxTrainingRounds->500, 33 | BatchSize->32]; 34 | trainedObser=NetTrain[trainedObser,trainObser, 35 | ValidationSet->validationObser, 36 | TargetDevice->{"GPU",All}, 37 | Method->{"ADAM","L2Regularization"->10^-3,"LearningRate"->10^-5}, 38 | MaxTrainingRounds->500, 39 | BatchSize->32]; 40 | trainedObser=NetTrain[trainedObser,trainObser, 41 | ValidationSet->validationObser, 42 | TargetDevice->{"GPU",All}, 43 | Method->{"ADAM","L2Regularization"->10^-3,"LearningRate"->10^-6}, 44 | MaxTrainingRounds->500, 45 | BatchSize->32]; 46 | 47 | simu=Table[trainedObser[ndynamics4Obser[[i]],TargetDevice->"GPU"],{i,Length[ndynamics4Obser]}]; 48 | obser=nP4Obser; 49 | 50 | corr=Table[If[And[Variance[simu[[;;,1,i,j]]]>0,Variance[obser[[;;,1,i,j]]]>0], 51 | Correlation[simu[[;;,1,i,j]],obser[[;;,1,i,j]]],-2],{i,26},{j,48}]; 52 | Mean[Select[Flatten[corr],Positive]] 53 | 54 | amse=Table[If[And[Variance[simu[[;;,1,i,j]]]>0,Variance[obser[[;;,1,i,j]]]>0], 55 | mse[simu[[;;,1,i,j]],obser[[;;,1,i,j]]],0],{i,26},{j,48}]; 56 | Mean[Flatten[amse]] 57 | Export["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx", 58 | <|"net"->trainedObser, 59 | "mse"->Mean[Flatten[amse]]|>] 60 | 61 | 62 | trainGCM=Table[ndynamics4GCM[[i]]->nP4GCM[[i]],{i,length}]; 63 | validationGCM=Table[ndynamics4GCM[[i]]->nP4GCM[[i]],{i,length,length+vlength}]; 64 | 65 | downscalingGCM=Import["/g/g92/pan11/Backup_CycleGAN/Previous/Downscaling_GCM.mx"]; 66 | trainedGCM=NetTrain[downscalingGCM,trainGCM, 67 | ValidationSet->validationGCM, 68 | TargetDevice->{"GPU",All}, 69 | Method->{"ADAM","L2Regularization"->10^-3,"LearningRate"->10^-4}, 70 | MaxTrainingRounds->500, 71 | BatchSize->32]; 72 | trainedGCM=NetTrain[trainedGCM,trainGCM, 73 | ValidationSet->validationGCM, 74 | TargetDevice->{"GPU",All}, 75 | Method->{"ADAM","L2Regularization"->10^-3,"LearningRate"->10^-5}, 76 | MaxTrainingRounds->500, 77 | BatchSize->32]; 78 | trainedGCM=NetTrain[trainedGCM,trainGCM, 79 | ValidationSet->validationGCM, 80 | TargetDevice->{"GPU",All}, 81 | Method->{"ADAM","L2Regularization"->10^-3,"LearningRate"->10^-6}, 82 | MaxTrainingRounds->500, 83 | BatchSize->32]; 84 | 85 | simu=Table[trainedGCM[ndynamics4GCM[[i]],TargetDevice->"GPU"],{i,Length[ndynamics4GCM]}]; 86 | obser=nP4GCM; 87 | 88 | corr=Table[If[And[Variance[simu[[;;,1,i,j]]]>0,Variance[obser[[;;,1,i,j]]]>0], 89 | Correlation[simu[[;;,1,i,j]],obser[[;;,1,i,j]]],-2],{i,26},{j,48}]; 90 | Mean[Select[Flatten[corr],Positive]] 91 | 92 | amse=Table[If[And[Variance[simu[[;;,1,i,j]]]>0,Variance[obser[[;;,1,i,j]]]>0], 93 | mse[simu[[;;,1,i,j]],obser[[;;,1,i,j]]],0],{i,26},{j,48}]; 94 | Mean[Flatten[amse]] 95 | 96 | Export["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx", 97 | <|"net"->trainedGCM, 98 | "mse"->Mean[Flatten[amse]]|>] 99 | -------------------------------------------------------------------------------- /Code/Evaluation.m: -------------------------------------------------------------------------------- 1 | SetDirectory["/g/g92/pan11/Trained"]; 2 | models=Map[Import,FileNames["Cycle*mx"]]; 3 | 4 | Import["/g/g92/pan11/CycleGAN/2020_11_13_CycleGAN_Data.m"]; 5 | days={1,0} 6 | dim={1,26,48}; 7 | dim2={3*(Total[days]+1),36,56}; 8 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 9 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 10 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 11 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 12 | length=14610; 13 | vlength=3652; 14 | test=Table[<|"P_GCM"->nP4GCM[[i]], 15 | "D_GCM"->ndynamics4GCM[[i]], 16 | "P_Obser"->nP4Obser[[i]], 17 | "D_Obser"->ndynamics4Obser[[i]] 18 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 19 | 20 | DADT=Block[{ms}, 21 | ms=Map[#[["Generator_GCM->Obser"]]&,models]; 22 | Table[Block[{tempt=ms[[i]]},Print[i]; 23 | Map[tempt[<|"P"->#[["P_GCM"]],"z"->#[["D_GCM"]]|>,TargetDevice->"GPU"]&,test]], 24 | {i,Length[models]}]][[;;,;;,1]]; 25 | GCM=test[[;;,"P_GCM"]][[;;,1]]; 26 | OBSER=test[[;;,"P_Obser"]][[;;,1]]; 27 | 28 | {DADT,GCM,OBSER}=Map[Exp[#]-1&,{DADT,GCM,OBSER}]; 29 | Export["/g/g92/pan11/result.mx",Map[NumericArray[#,"Real32"]&,{DADT,GCM,OBSER}]]; 30 | 31 | downscalingObser=Block[{models}, 32 | SetDirectory["/g/g92/pan11/Trained"]; 33 | models=Map[Import[#][["R_Downscaling_Obser"]]&,FileNames["Cycle*mx"]]; 34 | Table[Block[{tempt=models[[i]]},Print[i]; 35 | Map[{Exp[tempt[#[["D_GCM"]],TargetDevice->"GPU"]]-1., 36 | Exp[tempt[#[["D_Obser"]],TargetDevice->"GPU"]]-1.}&,test]], 37 | {i,Length[models]}]]; 38 | Export["/g/g92/pan11/DownscalingObser.mx",NumericArray[downscalingObser,"Real32"]]; 39 | 40 | downscalingGCM=Block[{models}, 41 | SetDirectory["/g/g92/pan11/Trained"]; 42 | models=Map[Import[#][["R_Downscaling_GCM"]]&,FileNames["Cycle*mx"]]; 43 | Table[Block[{tempt=models[[i]]},Print[i]; 44 | Map[{Exp[tempt[#[["D_GCM"]],TargetDevice->"GPU"]]-1., 45 | Exp[tempt[#[["D_Obser"]],TargetDevice->"GPU"]]-1.}&,test]], 46 | {i,Length[models]}]]; 47 | Export["/g/g92/pan11/DownscalingGCM.mx",NumericArray[downscalingGCM,"Real32"]]; 48 | -------------------------------------------------------------------------------- /Code/RADA.m: -------------------------------------------------------------------------------- 1 | hype={RandomSample[{300,500,1000}][[1]],RandomSample[{3,5,7,10}][[1]],RandomSample[Range[1,3]][[1]]}; 2 | days={1,0} 3 | dim={1,26,48}; 4 | dim2={3*(Total[days]+1),36,56}; 5 | nP4Obser=nP4Obser[[1+days[[1]];;Length[nP4Obser]-days[[2]]]]; 6 | ndynamics4Obser=Map[Flatten[#,1]&,Transpose[Table[ndynamics4Obser[[1+k;;Length[nP4Obser]+k]],{k,0,Total[days]}]]]; 7 | nP4GCM=nP4GCM[[1+days[[1]];;Length[nP4GCM]-days[[2]]]]; 8 | ndynamics4GCM=Map[Flatten[#,1]&,Transpose[Table[ndynamics4GCM[[1+k;;Length[nP4GCM]+k]],{k,0,Total[days]}]]]; 9 | 10 | 11 | length=14610; 12 | vlength=3652; 13 | test=Table[<|"P_GCM"->nP4GCM[[i]], 14 | "D_GCM"->ndynamics4GCM[[i]], 15 | "P_Obser"->nP4Obser[[i]], 16 | "D_Obser"->ndynamics4Obser[[i]] 17 | |>,{i,length+vlength+1,Length[nP4GCM]}]; 18 | 19 | seq=Flatten[{Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]<=7&]}]],#<=length+vlength&], 20 | Select[Flatten[Table[Range[(i-1)*365+1,i*365],{i,Select[Range[1,60],Mod[#,10]>7&]}]],#<=length+vlength&]}]; 21 | {nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}=Map[#[[seq]]&,{nP4GCM,ndynamics4GCM,nP4Obser,ndynamics4Obser}]; 22 | 23 | validation=Table[<|"P_GCM"->nP4GCM[[i]], 24 | "D_GCM"->ndynamics4GCM[[i]], 25 | "P_Obser"->nP4Obser[[i]], 26 | "D_Obser"->ndynamics4Obser[[i]] 27 | |>,{i,length,length+vlength}]; 28 | 29 | 30 | generator=NetGraph[<|"Catenate"->CatenateLayer[1], 31 | "Padding"->PaddingLayer[{{0,0},{5,5},{4,4}}], 32 | "chain"->{ConvolutionLayer[64,{3,3}], 33 | BatchNormalizationLayer[], 34 | Ramp, 35 | ConvolutionLayer[128,{3,3}], 36 | BatchNormalizationLayer[], 37 | Ramp, 38 | ConvolutionLayer[256,{3,3}], 39 | BatchNormalizationLayer[], 40 | Ramp, 41 | ConvolutionLayer[512,{5,3}], 42 | BatchNormalizationLayer[], 43 | Ramp, 44 | ConvolutionLayer[1,{1,1}]}, 45 | "combine"->ThreadingLayer[Plus], 46 | "cut"->{ConstantTimesLayer["Scaling"->{validMatrix},LearningRateMultipliers->0.],Ramp} 47 | |>, 48 | {NetPort["P"]->"Padding"->"Catenate", 49 | NetPort["z"]->"Catenate"->"chain"->"combine", 50 | NetPort["P"]->"combine"->"cut"}, 51 | "P"->dim, 52 | "z"->dim2] 53 | 54 | generatorGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 55 | cycleGCM2Obser = NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 56 | generatorGCM2ObserR= NetInsertSharedArrays[generator, "generatorGCM2Obser/"]; 57 | 58 | generatorObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 59 | cycleObser2GCM = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 60 | generatorObser2GCMR = NetInsertSharedArrays[generator, "generatorObser2GCM/"]; 61 | 62 | discriminator = NetChain[{ 63 | ConvolutionLayer[16,{3,3},"Stride"->1],BatchNormalizationLayer[], Ramp, 64 | ConvolutionLayer[32,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 65 | ConvolutionLayer[64,{3,3},"Stride"->1],BatchNormalizationLayer[],Ramp, 66 | ConvolutionLayer[128,{3,3},"Stride"->2],BatchNormalizationLayer[],Ramp, 67 | FlattenLayer[],BatchNormalizationLayer[],hype[[1]],Ramp,BatchNormalizationLayer[], 1, ElementwiseLayer["HardSigmoid"]}, 68 | "Input" -> dim]; 69 | discriminatorGCM2Obser = discriminator; 70 | discriminatorObser2GCM = discriminator; 71 | 72 | 73 | RdownscaleGCM=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx"]["net"]; 74 | DeltaGCM=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_GCM.mx"]["mse"]; 75 | 76 | RdownscaleObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["net"]; 77 | DeltaObser=Import["/g/g92/pan11/Backup_CycleGAN/Downscaling_Obser.mx"]["mse"]; 78 | 79 | cycleGAN =NetGraph[<| 80 | "Generator_GCM->Obser" -> generatorGCM2Obser, 81 | "Generator_GCM->Obser_SelfRegression" -> generatorGCM2ObserR, 82 | "Cycle_GCM->Obser" -> cycleGCM2Obser, 83 | "Discriminator_GCM->Obser" -> NetMapOperator[discriminatorGCM2Obser], 84 | "Cat_GCM->Obser" -> CatenateLayer[], 85 | "Reshape_GCM->Obser" -> ReshapeLayer[Prepend[dim,2]], 86 | "Flat_GCM->Obser" -> ReshapeLayer[{2}], 87 | "Fake_GCM->Obser"->PartLayer[1], 88 | "Real_GCM->Obser"->PartLayer[2], 89 | "Scale_GCM->Obser" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 90 | "MS_GCM->Obser"->MeanAbsoluteLossLayer[], 91 | 92 | "Generator_Obser->GCM" -> generatorObser2GCM, 93 | "Generator_Obser->GCM_SelfRegression" -> generatorObser2GCMR, 94 | "Cycle_Obser->GCM" -> cycleObser2GCM, 95 | "Discriminator_Obser->GCM" -> NetMapOperator[discriminatorObser2GCM], 96 | "Cat_Obser->GCM" -> CatenateLayer[], 97 | "Reshape_Obser->GCM" -> ReshapeLayer[Prepend[dim,2]], 98 | "Flat_Obser->GCM" -> ReshapeLayer[{2}], 99 | "Fake_Obser->GCM"->PartLayer[1], 100 | "Real_Obser->GCM"->PartLayer[2], 101 | "Scale_Obser->GCM" -> ConstantTimesLayer["Scaling" -> {-1, 1},LearningRateMultipliers->0], 102 | "MS_Obser->GCM"->MeanAbsoluteLossLayer[], 103 | 104 | "R_Downscaling_GCM"->RdownscaleGCM, 105 | "R_Downscaling_Obser"->RdownscaleObser, 106 | 107 | "MS_GCM_RDownscaling"->MeanSquaredLossLayer[], 108 | "MS_Obser_RDownscaling"->MeanSquaredLossLayer[], 109 | "Max_GCM_RDownscaling"->ElementwiseLayer[Max[#,DeltaGCM]-DeltaGCM &], 110 | "Max_Obser_RDownscaling"->ElementwiseLayer[Max[#,DeltaObser]-DeltaObser &], 111 | 112 | "MS_GCM2Obser_SelfRegression"->MeanAbsoluteLossLayer[], 113 | "MS_Obser2GCM_SelfRegression"->MeanAbsoluteLossLayer[] 114 | |>, 115 | 116 | {NetPort["P_GCM"] ->NetPort["Generator_GCM->Obser","P"], 117 | NetPort["D_GCM"]->NetPort["Generator_GCM->Obser","z"], 118 | "Generator_GCM->Obser"->"Cat_GCM->Obser", 119 | NetPort["P_Obser"] -> "Cat_GCM->Obser", 120 | "Cat_GCM->Obser" -> "Reshape_GCM->Obser" -> "Discriminator_GCM->Obser" -> "Flat_GCM->Obser" -> "Scale_GCM->Obser" -> 121 | "Fake_GCM->Obser"->NetPort["FakeLoss_GCM->Obser"], 122 | "Scale_GCM->Obser"->"Real_GCM->Obser"->NetPort["RealLoss_GCM->Obser"], 123 | "Generator_GCM->Obser"->NetPort["Cycle_Obser->GCM","P"], 124 | NetPort["D_GCM"]->NetPort["Cycle_Obser->GCM","z"], 125 | "Cycle_Obser->GCM"->"MS_GCM->Obser", 126 | NetPort["P_GCM"]->"MS_GCM->Obser"->NetPort["ReconstructionLoss_GCM->Obser"], 127 | 128 | NetPort["D_GCM"]->"R_Downscaling_Obser"->"MS_Obser_RDownscaling", 129 | "Generator_GCM->Obser"->"MS_Obser_RDownscaling"->"Max_Obser_RDownscaling"->NetPort["Loss_RDownscaling_GCM"], 130 | NetPort["D_Obser"]->"R_Downscaling_GCM"->"MS_GCM_RDownscaling", 131 | "Generator_Obser->GCM"->"MS_GCM_RDownscaling"->"Max_GCM_RDownscaling"->NetPort["Loss_RDownscaling_Obser"], 132 | 133 | NetPort["P_Obser"] ->NetPort["Generator_Obser->GCM","P"], 134 | NetPort["D_Obser"] ->NetPort["Generator_Obser->GCM","z"], 135 | "Generator_Obser->GCM"-> "Cat_Obser->GCM", 136 | NetPort["P_GCM"] -> "Cat_Obser->GCM", 137 | "Cat_Obser->GCM" -> "Reshape_Obser->GCM" -> "Discriminator_Obser->GCM" -> "Flat_Obser->GCM" -> "Scale_Obser->GCM" -> 138 | "Fake_Obser->GCM"->NetPort["FakeLoss_Obser->GCM"], 139 | "Scale_Obser->GCM"->"Real_Obser->GCM"->NetPort["RealLoss_Obser->GCM"], 140 | "Generator_Obser->GCM"->NetPort["Cycle_GCM->Obser","P"], 141 | NetPort["D_Obser"] ->NetPort["Cycle_GCM->Obser","z"], 142 | "Cycle_GCM->Obser"->"MS_Obser->GCM", 143 | NetPort["P_Obser"]->"MS_Obser->GCM"->NetPort["ReconstructionLoss_Obser->GCM"], 144 | 145 | NetPort["P_GCM"]->NetPort["Generator_Obser->GCM_SelfRegression","P"], 146 | NetPort["D_GCM"]->NetPort["Generator_Obser->GCM_SelfRegression","z"], 147 | "Generator_Obser->GCM_SelfRegression"->"MS_Obser2GCM_SelfRegression", 148 | NetPort["P_GCM"]->"MS_Obser2GCM_SelfRegression"->NetPort["Loss_Obser2GCM_SelfRegression"], 149 | 150 | NetPort["P_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","P"], 151 | NetPort["D_Obser"]->NetPort["Generator_GCM->Obser_SelfRegression","z"], 152 | "Generator_GCM->Obser_SelfRegression"->"MS_GCM2Obser_SelfRegression", 153 | NetPort["P_Obser"]->"MS_GCM2Obser_SelfRegression"->NetPort["Loss_GCM2Obser_SelfRegression"] 154 | }, 155 | "P_Obser" -> dim, 156 | "P_GCM" -> dim, 157 | "D_GCM" -> dim2, 158 | "D_Obser" -> dim2]; 159 | 160 | DiffMean=0.020; 161 | DiffVar=0.025; 162 | 163 | 164 | obserMean=Mean[validation[[;;,"P_Obser"]]][[1]]; 165 | obserVar=Variance[validation[[;;,"P_Obser"]]][[1]]; 166 | 167 | index="N"<>ToString[hype[[1]]]<>"_R"<>ToString[hype[[2]]]<>"_B"<>ToString[hype[[3]]]<>"_"<>StringSplit[CreateUUID[],"-"][[1]]; 168 | Print[index]; 169 | ReportCycleGan2[net_] := 170 | Block[{gen,dGCM,dObser,obserG,obserD,gcmD,meanDiff,varDiff,dlossGCM,dlossObser}, 171 | gen=net[["Generator_GCM->Obser"]]; 172 | obserG=Map[gen[<|"P"->#[["P_GCM"]],"z"->#[["D_GCM"]]|>,TargetDevice->"GPU"]&,validation]; 173 | meanDiff=Mean[Abs[Flatten[Mean[obserG][[1]]-obserMean]]]; 174 | varDiff=Mean[Abs[Flatten[Variance[obserG][[1]]-obserVar]]]; 175 | Print[TableForm[{{DiffMean,DiffVar}, 176 | {meanDiff,varDiff}}]]; 177 | If[meanDiff+varDiff<=DiffMean+DiffVar, 178 | Block[{}, 179 | Print[index]; 180 | Export["/g/g92/pan11/CycleGAN_"<>index<>".mx",net]; 181 | Set[{DiffMean,DiffVar},{meanDiff,varDiff}]]]]; 182 | 183 | NetTrain[cycleGAN, 184 | {Function[Block[{base,choice,choice2}, 185 | base=RandomSample[Range[2,length],#BatchSize]; 186 | choice=Map[Block[{daylag=RandomSample[Range[-15,15]][[1]],yearlag=RandomSample[Range[-5,5]][[1]],tempt}, 187 | tempt=#+daylag+yearlag*365; 188 | If[And[tempt>0,tempt<=length],tempt,#]]&,base]; 189 | <|"P_GCM"->nP4GCM[[base]], 190 | "D_GCM"->ndynamics4GCM[[base]], 191 | "P_Obser"->nP4Obser[[choice]], 192 | "D_Obser"->ndynamics4Obser[[choice]]|>]], "RoundLength" -> Length[nP4GCM]}, 193 | LossFunction ->{"FakeLoss_GCM->Obser"->Scaled[1],"RealLoss_GCM->Obser"->Scaled[1],"ReconstructionLoss_GCM->Obser"->Scaled[-hype[[3]]], 194 | "FakeLoss_Obser->GCM"->Scaled[1],"RealLoss_Obser->GCM"->Scaled[1],"ReconstructionLoss_Obser->GCM"->Scaled[-hype[[3]]], 195 | "Loss_Obser2GCM_SelfRegression"->Scaled[-hype[[3]]],"Loss_GCM2Obser_SelfRegression"->Scaled[-hype[[3]]], 196 | "Loss_RDownscaling_GCM"->Scaled[-hype[[3]]],"Loss_RDownscaling_Obser"->Scaled[-hype[[3]]]}, 197 | TrainingUpdateSchedule -> {"Discriminator_GCM->Obser"|"Discriminator_Obser->GCM", 198 | "Generator_GCM->Obser"|"Generator_Obser->GCM", 199 | "Cycle_GCM->Obser"|"Cycle_Obser->GCM", 200 | "Generator_GCM->Obser_SelfRegression"|"Generator_Obser->GCM_SelfRegression"}, 201 | LearningRateMultipliers -> {"Scale_GCM->Obser" -> 0, "Scale_Obser->GCM" -> 0, 202 | "Generator_GCM->Obser" -> -1,"Generator_Obser->GCM"->-1, 203 | "Generator_GCM->Obser_SelfRegression" -> -1,"Generator_Obser->GCM_SelfRegression"->-1, 204 | "Cycle_GCM->Obser"->-1,"Cycle_Obser->GCM"->-1, 205 | "Discriminator_Obser->GCM"->1,"Discriminator_GCM->Obser"->1, 206 | "R_Downscaling_GCM"->0,"R_Downscaling_Obser"->0}, 207 | BatchSize -> 32, 208 | TargetDevice->"GPU", 209 | MaxTrainingRounds->400, 210 | Method -> {"ADAM", "Beta1" -> 0.5, "LearningRate" -> 10^-4, 211 | "WeightClipping" -> {"Discriminator_Obser->GCM"-> hype[[2]]/100.,"Discriminator_GCM->Obser"->hype[[2]]/100.}}, 212 | TrainingProgressReporting -> {{Function@ReportCycleGan2[#Net], "Interval" -> Quantity[300, "Batches"]},"Print"}] 213 | -------------------------------------------------------------------------------- /Code/TuringTest.m: -------------------------------------------------------------------------------- 1 | vateStatisticsCorrection=Table[Block[{p=RandomReal[{0,1}]}, 2 | ChoiceDialog[If[p<1/2,plot[dadt[[RandomSample[Range[Length[dadt]]][[1]]]],plat4GCM,plon4GCM], 3 | plot[obser[[RandomSample[Range[Length[obser]]][[1]]]],plat4GCM,plon4GCM]], 4 | {"Observation"->If[p>=1/2,{1,1},{1,0}], 5 | "Simulation"->If[p<1/2,{0,0},{0,1}]}]],{i,50}]; 6 | 7 | voteStatisticsRaw=Table[Block[{p=RandomReal[{0,1}]}, 8 | ChoiceDialog[If[p<1/2,plot[gcm[[RandomSample[Range[Length[gcm]]][[1]]]],plat4GCM,plon4GCM], 9 | plot[obser[[RandomSample[Range[Length[obser]]][[1]]]],plat4GCM,plon4GCM]], 10 | {"Observation"->If[p>=1/2,{1,1},{1,0}], 11 | "Simulation"->If[p<1/2,{0,0},{0,1}]}]],{i,50}]; 12 | Export["/Users/pan11/Documents/CycleGAN/Results/Pamler_Turing_Raw_"<>CreateUUID[]<>".mx",voteStatisticsRaw]; 13 | -------------------------------------------------------------------------------- /Fig/Fig_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/Fig_1.png -------------------------------------------------------------------------------- /Fig/Fig_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/Fig_3.png -------------------------------------------------------------------------------- /Fig/Fig_4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/Fig_4.png -------------------------------------------------------------------------------- /Fig/Fig_5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/Fig_5.png -------------------------------------------------------------------------------- /Fig/Fig_6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/Fig_6.png -------------------------------------------------------------------------------- /Fig/Fig_7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/Fig_7.png -------------------------------------------------------------------------------- /Fig/Fig_8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/Fig_8.png -------------------------------------------------------------------------------- /Fig/LOGO.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/LOGO.png -------------------------------------------------------------------------------- /Fig/fig_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/fig_10.png -------------------------------------------------------------------------------- /Fig/fig_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/fig_2.png -------------------------------------------------------------------------------- /Fig/fig_9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Fig/fig_9.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 lambda 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Logo.m: -------------------------------------------------------------------------------- 1 | urange = 10Pi; vrange = 1.5; t = 10;rec=6; dis=Sqrt[5]-1; 2 | f[u_, v_] = #/Sqrt[# . #] &@{Cos[u], Sin[u], (u + v)/t}; 3 | {r, \[Theta], \[CurlyPhi]} = ToSphericalCoordinates[{x, y, z}] /. Thread[{x, y, z} -> f[u, v]] //FullSimplify; 4 | map1=Rasterize[Block[{range=ArcSin[f[urange,vrange][[-1]]]/Pi*180.}, 5 | GeoGraphics[,GeoBackground->"Satellite",GeoRange->{{-range, range}, {-180., 180.}}, 6 | PlotRangePadding->None,ImagePadding->None,Frame->False,Axes->False,GeoRangePadding->None,GeoZoomLevel->4]],RasterSize->5000]; 7 | 8 | {sgrid,ogrid}=Block[{result,lat,lon,grid,testpoint,poly}, 9 | lat=Range[-90,90,1]; 10 | lon=Range[-180,180,1]; 11 | grid=Flatten[Table[{lat[[i]],lon[[j]]},{i,Length[lat]},{j,Length[lon]}],1]; 12 | testpoint[poly_, pt_] := And[pt[[1]]>Min[poly[[;;,1]]],pt[[1]]Min[poly[[;;,2]]],pt[[2]]Or,{i,Length[grid]}]; 17 | {grid[[Position[result,True][[;;,1]]]],grid[[Position[result,False][[;;,1]]]]}]; 18 | (*Export["/Users/pan11/Documents/PROUD/Logo/sgrid.mx",<|"continent"\[Rule]sgrid,"ocean"\[Rule]ogrid|>]*) 19 | (*{sgrid,ogrid}=Values[Import["/Users/pan11/Documents/PROUD/Logo/sgrid.mx"]];*) 20 | 21 | map2=Rasterize[Block[{range=ArcSin[f[urange,vrange][[-1]]]/Pi*180.}, 22 | GeoGraphics[{Table[GeoMarker[GeoPosition[sgrid[[i]]],Style[RandomSample[{"0","1"}][[1]], 23 | {2,Bold,Opacity[RandomReal[{.2,1}]],RGBColor[{0.18,0.443,0.13}],FontFamily->"Silom"}]],{i,1,Length[sgrid]}], 24 | Table[GeoMarker[GeoPosition[ogrid[[i]]],Style[RandomSample[{"0","1"}][[1]], 25 | {2,Bold,Opacity[RandomReal[{.2,1}]],RGBColor[{0.03,0.09,0.21}],FontFamily->"Silom"}]],{i,1,Length[ogrid]}]}, 26 | GeoBackground->{RGBColor[{0.03,0.09,0.21}],Opacity[0.9]}, 27 | GeoRange->{{-range, range}, {-180., 180.}}, 28 | PlotRangePadding->None,ImagePadding->None,Frame->False,Axes->False,GeoRangePadding->None,GeoZoomLevel->4]],RasterSize->5000]; 29 | 30 | left=ParametricPlot3D[f[u, v], {u, -urange+Pi/2, urange-Pi/2}, {v, -vrange, vrange}, 31 | Mesh->None, 32 | Lighting->"Neutral", 33 | MaxRecursion -> rec, 34 | Axes -> False, Boxed -> False, TextureCoordinateScaling -> False, 35 | TextureCoordinateFunction -> Function[{x, y, z, u, v}, {\[CurlyPhi]/(2 \[Pi]), -\[Theta]/\[Pi]}], 36 | PlotStyle -> Texture[map1], 37 | PlotPoints -> 30,PlotRange->Full,ImageSize->400,Lighting->"Neutral"]; 38 | right=Block[{tempt}, 39 | tempt=ParametricPlot3D[f[u, v]*{1,1,-1}, {u, -urange-Pi/2,urange+Pi/2}, {v, -vrange, vrange}, 40 | Mesh->None, 41 | Lighting->"Neutral", 42 | MaxRecursion -> rec, 43 | Axes -> False, Boxed -> False, TextureCoordinateScaling -> False, 44 | TextureCoordinateFunction -> Function[{x, y, z, u, v}, {\[CurlyPhi]/(2 \[Pi]), \[Theta]/\[Pi]}], 45 | PlotStyle -> Texture[map2], 46 | PlotPoints -> 30,PlotRange->Full]; 47 | Graphics3D[Translate[First@tempt,{dis, 0, 0}],ImageSize->400]]; 48 | joint=ParametricPlot3D[ 49 | {x*(f[urange-Pi/2, v+2vrange]+{dis,0,0}-f[urange-Pi/2, v])+f[urange-Pi/2, v], 50 | x*(f[-urange+Pi/2, v-2vrange]+{dis,0,0}-f[-urange+Pi/2, v])+f[-urange+Pi/2, v]}, 51 | {x,0,1},{v,-vrange,vrange},MaxRecursion->rec,Mesh->None,PlotStyle->{RGBColor[(*{0.125,0.164,0.07}*){0.03,0.09,0.21}],RGBColor[{0.03,0.09,0.21}]}]; 52 | result=Show[{left,right,joint},PlotRange->Full,ViewPoint->{1, -2, 1},ImageSize->700] 53 | -------------------------------------------------------------------------------- /Paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Paper.pdf -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Learning to correct climate projection biases 3 | [**Paper in Journal of Advances in Modeling Earth Systems, 2021**](https://arxiv.org/abs/2108.02774) 4 | 5 | 6 |
7 | Baoxiang Pan1, Gemma Anderson1, Andre Goncalves1, Donald Lucas1, Celine Bonfils1, Jiwoo Lee1, Yang Tian1, Hsi-yen Ma1 8 |
Lawrence Livermore National Lab1 9 | 10 | 11 | ## Overview 12 | This work develops the Regularized Adversarial Domain Adaptation (RADA) methodology to correct historical climate projection biases. RADA is built on the 13 | generative adversarial nets (GANs, Goodfellow et al. 2014), adding various constraints to guarantee phyiscal coherency. The general idea is illustrated as follows: 14 |
15 | 16 | 17 | In the top, GAN is applied to generate cat samples. In the bottom, we make use of the adversarial learning idea to match samples from a source domain (climate simulations), to corresponding samples in a target domain (climate observations). This line of research is often named **domain adaptation** in the machine learning literature. We can not apply supervised learning, since climate is a chaotic system, preventing us from obtaining sufficient paired observation and simulation data to identify and correct climate model biases. 18 | 19 | 24 | 25 | To guarantee physical coherency, we translate the three requirements Ehret et al. (2012) proposed for a ``perfect'' climate model bias corrector into three regularization terms to the adversarial domain adaption process, namely cycle consistency (Zhu et al. 2017), dynamical consistency (Pan et al. 2019), and dynamical dependency. The final model is illustrated as follows: 26 | 27 |
28 | 29 | 30 | Below we provide guidance for applying RADA to correct the Community Earth System Model version 2 (CESM2, Danabasoglu et al. 2020) daily precipitation projection over the contiguous United States (CONUS). 31 | 32 | ## Getting started 33 | ### Obtain the histotical climate simulation and observation data 34 | 35 | The Community Earth System Model version 2 (CESM2) historical simulation data are available through https://esgf-node.llnl.gov/projects/cmip6/. 36 | 37 | The CPC unified gauge-based analysis of daily precipitation data are available through https://psl.noaa.gov/data/gridded/data.cpc.globalprecip.html. 38 | 39 | The ECMWF atmospheric reanalysis of the 20th century (ERA-20C) data are available through https://www.ecmwf.int/en/forecasts/datasets/reanalysis-datasets/era-20c. 40 | 41 | Cut, regrid, and normalize the data: 42 | 43 | ```mathematica 44 | math -script Data_Processing.m 45 | ``` 46 | 47 | #### Train the dynamical regularization module 48 | 49 | ```mathematica 50 | math -script Dynamical_Regularization.m 51 | ``` 52 | 53 | #### Train the RADA bias corrector 54 | 55 | ```mathematica 56 | math -script RADA.m 57 | ``` 58 | 59 | #### Compare with the baselines 60 | 61 | ```mathematica 62 | math -script Baseline/Baseline_n.m 63 | ``` 64 | 65 | #### Evaluation 66 | 67 | ```mathematica 68 | math -script Evaluation.m 69 | ``` 70 | 71 | ## Acknowledgments 72 | 73 | This work was performed under the auspices of the U.S. Department of Energy by Lawrence Livermore National Laboratory under contract DE-AC52-07NA27344. Lawrence Livermore National Security, LLC. The views expressed here do not necessarily reflect the opinion of the United States Government, the United States Department of Energy,or Lawrence Livermore National Laboratory. This work was supported by LLNL Laboratory Directed Research and Development project 19-ER-032. This document is released with IM tracking number LLNL-JRNL-817982. 74 | 75 | 88 | -------------------------------------------------------------------------------- /Result/TuringTest/Pamler_Turing_Correction_30fc8b38-8d10-44f6-a1d9-e06cc91112b5.mx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Result/TuringTest/Pamler_Turing_Correction_30fc8b38-8d10-44f6-a1d9-e06cc91112b5.mx -------------------------------------------------------------------------------- /Result/TuringTest/Pamler_Turing_Correction_35e614b0-1eeb-4730-971d-250635ca07ae.mx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Result/TuringTest/Pamler_Turing_Correction_35e614b0-1eeb-4730-971d-250635ca07ae.mx -------------------------------------------------------------------------------- /Result/TuringTest/Pamler_Turing_Correction_6fb78c7f-b00d-45cd-8d63-ffedf844dbc1.mx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Result/TuringTest/Pamler_Turing_Correction_6fb78c7f-b00d-45cd-8d63-ffedf844dbc1.mx -------------------------------------------------------------------------------- /Result/TuringTest/Pamler_Turing_Correction_887cf557-90b4-47b4-8fc8-02281e2f0585.mx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Result/TuringTest/Pamler_Turing_Correction_887cf557-90b4-47b4-8fc8-02281e2f0585.mx -------------------------------------------------------------------------------- /Result/TuringTest/Pamler_Turing_Raw_186c034c-3536-4684-9c6f-6e662e481ce6.mx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Result/TuringTest/Pamler_Turing_Raw_186c034c-3536-4684-9c6f-6e662e481ce6.mx -------------------------------------------------------------------------------- /Result/TuringTest/Pamler_Turing_Raw_1e8866c2-75f1-4dcb-aba2-7d42c1709f5c.mx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Result/TuringTest/Pamler_Turing_Raw_1e8866c2-75f1-4dcb-aba2-7d42c1709f5c.mx -------------------------------------------------------------------------------- /Result/TuringTest/Pamler_Turing_Raw_21cde7a7-a29a-4f48-a89e-94facc62d692.mx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Result/TuringTest/Pamler_Turing_Raw_21cde7a7-a29a-4f48-a89e-94facc62d692.mx -------------------------------------------------------------------------------- /Result/TuringTest/Pamler_Turing_Raw_6737e043-3315-407a-8001-bbd4b42c8e10.mx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/panbaoxiang/RADA/8a65defcb954e3253a81ad792394c6dcf08caf7b/Result/TuringTest/Pamler_Turing_Raw_6737e043-3315-407a-8001-bbd4b42c8e10.mx -------------------------------------------------------------------------------- /unet.m: -------------------------------------------------------------------------------- 1 | size={32,32}; 2 | c=32; 3 | depth=3; 4 | 5 | res[c_]:=NetGraph[<|"long"->Flatten[Table[{ConvolutionLayer[c,{3,3},"PaddingSize"->1],NormalizationLayer[],Ramp},2]][[1;;-2]], 6 | "plus"->TotalLayer[], 7 | "short"->ConvolutionLayer[c,{1,1}]|>, 8 | {NetPort["Input"]->"long"->"plus",NetPort["Input"]->"short"->"plus"}] 9 | 10 | upres[c_,size_]:=NetGraph[<|"long"->{NormalizationLayer[],Ramp,ResizeLayer[size],ConvolutionLayer[c,{3,3},"PaddingSize"->1], 11 | NormalizationLayer[],Ramp,ConvolutionLayer[c,{3,3},"PaddingSize"->1]}, 12 | "plus"->TotalLayer[], 13 | "short"->{ResizeLayer[size],ConvolutionLayer[c,{1,1}]}|>, 14 | {NetPort["Input"]->"long"->"plus",NetPort["Input"]->"short"->"plus"}] 15 | 16 | contract[channel_,crop_:{{1,1},{1,1}}]:=NetGraph[{"conv"->res[channel],"pooling"->PoolingLayer[2,2,"Function"->Mean], 17 | "cropping"->PartLayer[{;;,crop[[1,1]];;-crop[[1,-1]],crop[[2,1]];;-crop[[2,-1]]}]}, 18 | {NetPort["Input"]->"conv"->"pooling"->NetPort["Pooling"],"conv"->"cropping"->NetPort["Shortcut"]}]; 19 | 20 | expand[channel_,size_]:=NetGraph[{"deconv"->upres[channel,size], 21 | "join"->CatenateLayer[], 22 | "conv"->res[channel/2]}, 23 | {NetPort["Input"]->"deconv"->"join", 24 | NetPort["Shortcut"]->"join"->"conv"}]; 25 | 26 | UNet=NetGraph[<|Table[{"contract_"<>ToString[i]->contract[c*2^(i-1)], 27 | "expand_"<>ToString[i]->expand[c*2^(i-1),size/2^(i-1)]},{i,depth}], 28 | "ubase"->res[c*(depth+1)]|>, 29 | Flatten[Table[{NetPort["contract_"<>ToString[i],"Pooling"]->If[iToString[i+1],"ubase"->NetPort["expand_"<>ToString[depth],"Input"]], 30 | NetPort["contract_"<>ToString[i],"Shortcut"]->NetPort["expand_"<>ToString[i],"Shortcut"], 31 | NetPort["expand_"<>ToString[i],"Output"]->If[i>1,NetPort["expand_"<>ToString[i-1],"Input"],NetPort["Output"]]},{i,depth}]]] 32 | --------------------------------------------------------------------------------