├── A multi-frequency ├── Hard Constraint │ ├── 1.mat │ └── Hard Constraints MFF.py ├── MFF │ ├── 1.mat │ └── MFF.py ├── MMPINN-INN │ ├── 1.mat │ └── MMPINN-INN.py └── MMPINN-MFF │ ├── 1.mat │ └── MMPINN-MFF.py ├── A problem with dramatic variations in different subdomains ├── MMPINN-DNN-GRS │ ├── MMPINN-DNN-GRS.ipynb │ ├── log1.txt │ ├── log2.txt │ ├── log3.txt │ ├── plotting.py │ └── uu.mat ├── MMPINN-DNN │ ├── MMPINN-DNN.ipynb │ ├── log1.txt │ ├── log2.txt │ ├── log3.txt │ ├── plotting.py │ └── uu.mat ├── PINN(1000k Adam) │ ├── PINN(1000K Adam).py │ ├── log1.txt │ ├── log2.txt │ ├── log3.txt │ └── uu.mat └── PINN │ ├── PINN.ipynb │ ├── log1.txt │ ├── log2.txt │ ├── log3.txt │ ├── plotting.py │ └── uu.mat ├── A typical multi-scale problem ├── MFF+hard constraint │ ├── 1.mat │ ├── Compute_Jacobian.py │ ├── MFF+hard constraint.ipynb │ └── models_tf.py ├── MFF │ ├── 1.mat │ ├── Compute_Jacobian.py │ ├── MFF.ipynb │ └── models_tf.py ├── MMPINN │ ├── Ref │ │ ├── 1.mat │ │ ├── Compute_Jacobian.py │ │ ├── MMPINN-MFF.ipynb │ │ └── models_tf.py │ └── others │ │ ├── 1.mat │ │ ├── Compute_Jacobian.py │ │ ├── MMPINN-MFF.ipynb │ │ └── models_tf.py ├── PINN(1000k Adam) │ ├── 1.mat │ ├── 1000k PINN.ipynb │ ├── Compute_Jacobian.py │ └── models_tf.py └── PINN │ ├── 1.mat │ ├── Compute_Jacobian.py │ ├── PINN.ipynb │ └── models_tf.py ├── Helmholtz equation ├── Hard Constraint │ ├── Hard Constraints PINN.ipynb │ └── plotting.py ├── LRA with IFNN │ ├── Helmholtz2D_model_tf.py │ └── LRA with IFNN.ipynb ├── MFF │ ├── 1.mat │ ├── MFF.ipynb │ └── plotting.py ├── MMPINN-INN │ ├── MMPINN-INN.ipynb │ └── plotting.py ├── MMPINN-MFF │ ├── MMPINN-MFF.ipynb │ └── plotting.py └── SAPINN │ ├── 1.mat │ ├── SAPINN.ipynb │ ├── eager_lbfgs.py │ ├── helmholtz-NTK.py │ └── plotting.py ├── Klein–Gordon equation ├── a10 │ ├── LRA with IFNN │ │ ├── Klein_Gordon_model_tf.py │ │ ├── LRA with IFNN.ipynb │ │ └── plotting.py │ ├── MMPINN-INN │ │ ├── Klein_Gordon_model_tf.py │ │ └── MMPINN-INN-kg.ipynb │ ├── MMPINN-MFF │ │ ├── Klein_Gordon_model_tf.py │ │ ├── MMPINN-MFF.ipynb │ │ └── plotting.py │ └── PINN │ │ ├── Klein_Gordon_model_tf.py │ │ └── PINN.ipynb └── a5 │ ├── LRA with IFNN │ ├── Klein_Gordon_model_tf.py │ └── LEA with IFNN.ipynb │ ├── MMPINN-INN │ ├── Klein_Gordon_model_tf.py │ └── MMPINN-INN.ipynb │ ├── MMPINN-MFF │ ├── Klein_Gordon_model_tf.py │ └── MMPINN-MFF.ipynb │ └── PINN │ ├── Klein_Gordon_model_tf.py │ └── PINN.ipynb ├── README.md └── heat conduction problem ├── 0.11 └── MMPINN │ ├── 0.11MMPINN.py │ ├── log1.txt │ ├── log2.txt │ ├── log3.txt │ ├── log5.txt │ ├── log6.txt │ └── log7.txt ├── 0.15 ├── Hard Constraint │ ├── 0.3hard constraint.ipynb │ ├── 3.mat │ ├── S │ ├── log1.txt │ ├── log2.txt │ ├── log3.txt │ ├── log5.txt │ ├── log6.txt │ ├── log7.txt │ └── plotting.py ├── MMPINN │ ├── 0.15MMPINN.ipynb │ └── plotting.py ├── PINN (1000K Adam) │ ├── 0.15PINN(1000k Adam).py │ ├── 1.mat │ ├── 2.mat │ ├── 3.mat │ ├── log1.txt │ ├── log2.txt │ ├── log3.txt │ ├── log5.txt │ ├── log6.txt │ └── log7.txt ├── PINN │ ├── 0.15PINN.ipynb │ └── plotting.py └── SAPINN │ ├── 0.15SAPINN.ipynb │ └── plotting.py └── 0.3 ├── MMPINN ├── 0.3MMPINN.ipynb └── plotting.py └── PINN ├── 0.3PINN.ipynb └── plotting.py /A multi-frequency/Hard Constraint/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A multi-frequency/Hard Constraint/1.mat -------------------------------------------------------------------------------- /A multi-frequency/MFF/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A multi-frequency/MFF/1.mat -------------------------------------------------------------------------------- /A multi-frequency/MMPINN-INN/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A multi-frequency/MMPINN-INN/1.mat -------------------------------------------------------------------------------- /A multi-frequency/MMPINN-MFF/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A multi-frequency/MMPINN-MFF/1.mat -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/MMPINN-DNN-GRS/log1.txt: -------------------------------------------------------------------------------- 1 | 266.01114 2 | 277.7695 3 | 286.23776 4 | 2146.129 5 | 687.1554 6 | 1527.48 7 | 17397.12 8 | 9073.963 9 | 991.3972 10 | 661.9428 11 | 119.41741 12 | 35.060555 13 | 29.403793 14 | 28.182795 15 | 28.698843 16 | 35.267483 17 | 32.905655 18 | 51.278187 19 | 269.42197 20 | 2503.2207 21 | 4148.1616 22 | 2788.6206 23 | 1147.1963 24 | 469.88565 25 | 265.10242 26 | 127.8602 27 | 80.49158 28 | 65.09655 29 | 57.40058 30 | 36.354275 31 | 30.230019 32 | 27.257273 33 | 14.5581875 34 | 14.324184 35 | 12.20742 36 | 11.986361 37 | 11.488605 38 | 6.6951475 39 | 12.583529 40 | 7.3191376 41 | 4.7035985 42 | 7.0828524 43 | 7.1335654 44 | 6.4952526 45 | 6.012248 46 | 5.1701713 47 | 4.5081697 48 | 3.8784614 49 | 3.7252219 50 | 3.9189394 51 | 4.1351876 52 | 4.267532 53 | 4.3312383 54 | 4.328648 55 | 4.249485 56 | 4.059042 57 | 3.8223727 58 | 3.79123 59 | 3.5872164 60 | 3.180682 61 | 2.6903784 62 | 2.317608 63 | 2.1759043 64 | 2.0922096 65 | 2.0339499 66 | 2.0054393 67 | 2.0091548 68 | 2.0215983 69 | 1.9882956 70 | 1.9539869 71 | 1.936368 72 | 1.952752 73 | 2.0065384 74 | 2.0865266 75 | 2.146408 76 | 2.1305535 77 | 2.0374568 78 | 1.8976785 79 | 1.744459 80 | 1.6107224 81 | 1.5334774 82 | 1.5007392 83 | 1.4619257 84 | 1.4226915 85 | 1.4007584 86 | 1.3620201 87 | 1.2970111 88 | 1.2269113 89 | 1.1795462 90 | 1.1694895 91 | 1.1164061 92 | 1.0804687 93 | 1.0447303 94 | 1.0176243 95 | 0.9928347 96 | 0.9912986 97 | 0.95760196 98 | 0.94951 99 | 0.9575778 100 | 0.9016031 101 | 112619.39 102 | 15575.667 103 | 2003.4148 104 | 84.140526 105 | 156.2625 106 | 53.732414 107 | 28.533781 108 | 14.352647 109 | 14.054759 110 | 9.004034 111 | 5.92774 112 | 17.50054 113 | 32.315063 114 | 45.27715 115 | 34.288914 116 | 33.872738 117 | 31.911575 118 | 25.33207 119 | 15.572528 120 | 7.0632095 121 | 5.9095035 122 | 2.1050344 123 | 2.1452935 124 | 2.3107193 125 | 2.4274328 126 | 2.9996293 127 | 2.8793118 128 | 2.9715908 129 | 3.0544245 130 | 3.7388663 131 | 3.2790422 132 | 2.7554617 133 | 2.4120245 134 | 2.7286785 135 | 2.4125042 136 | 2.1397018 137 | 2.4283404 138 | 1.929116 139 | 1.8896916 140 | 1.8972465 141 | 1.8248258 142 | 1.743585 143 | 1.6646307 144 | 1.5071676 145 | 1.4400665 146 | 1.5317684 147 | 1.3313265 148 | 1.3605064 149 | 1.3461288 150 | 1.4115441 151 | 1.5374185 152 | 1.4630551 153 | 1.4246743 154 | 1.4021884 155 | 1.3229597 156 | 1.2561158 157 | 1.2354463 158 | 1.309194 159 | 1.2855678 160 | 1.3843862 161 | 1.3168235 162 | 1.2798561 163 | 1.272806 164 | 1.2945403 165 | 1.3002087 166 | 1.2524393 167 | 1.2098747 168 | 1.1415621 169 | 1.0053056 170 | 0.963925 171 | 0.97768885 172 | 0.9606592 173 | 0.9055353 174 | 0.8700266 175 | 0.8276066 176 | 0.8001551 177 | 0.82300586 178 | 0.78651077 179 | 0.7790297 180 | 0.7310487 181 | 0.7187177 182 | 0.7278071 183 | 0.76022273 184 | 0.7295517 185 | 0.73975086 186 | 0.7694147 187 | 0.77254766 188 | 0.7767711 189 | 0.7565771 190 | 0.73276573 191 | 0.7443133 192 | 0.734482 193 | 0.73394805 194 | 0.7429279 195 | 0.74778324 196 | 0.7634485 197 | 0.7765217 198 | 0.77160186 199 | 0.7590331 200 | 0.77142215 201 | 0.7716025 202 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/MMPINN-DNN-GRS/log2.txt: -------------------------------------------------------------------------------- 1 | 415700320000.0 2 | 415700320000.0 3 | 415700300000.0 4 | 415700400000.0 5 | 415700320000.0 6 | 415700350000.0 7 | 415700580000.0 8 | 415702480000.0 9 | 415709630000.0 10 | 415753180000.0 11 | 415785500000.0 12 | 415827950000.0 13 | 415864620000.0 14 | 415902800000.0 15 | 415955700000.0 16 | 416023200000.0 17 | 416083940000.0 18 | 416166940000.0 19 | 416283600000.0 20 | 416401460000.0 21 | 416799520000.0 22 | 417350880000.0 23 | 418107420000.0 24 | 418782700000.0 25 | 419484930000.0 26 | 420285380000.0 27 | 421065230000.0 28 | 422114600000.0 29 | 423185250000.0 30 | 424283830000.0 31 | 425470900000.0 32 | 426419320000.0 33 | 427672830000.0 34 | 429431550000.0 35 | 431468900000.0 36 | 433785900000.0 37 | 436058100000.0 38 | 438576780000.0 39 | 441633670000.0 40 | 444036350000.0 41 | 445713100000.0 42 | 446882900000.0 43 | 447392680000.0 44 | 448083720000.0 45 | 449328500000.0 46 | 450595400000.0 47 | 451235800000.0 48 | 451200800000.0 49 | 451133180000.0 50 | 451359500000.0 51 | 451816900000.0 52 | 452438950000.0 53 | 453219350000.0 54 | 454193550000.0 55 | 455363500000.0 56 | 456702560000.0 57 | 458070400000.0 58 | 459227370000.0 59 | 460291200000.0 60 | 461267730000.0 61 | 461311970000.0 62 | 461084330000.0 63 | 461503140000.0 64 | 462184220000.0 65 | 462912980000.0 66 | 463659340000.0 67 | 464425060000.0 68 | 465203700000.0 69 | 465966100000.0 70 | 466693400000.0 71 | 467445450000.0 72 | 468262850000.0 73 | 469140730000.0 74 | 470032100000.0 75 | 470812820000.0 76 | 471461460000.0 77 | 472042400000.0 78 | 472650940000.0 79 | 473315700000.0 80 | 474015800000.0 81 | 474724660000.0 82 | 475425700000.0 83 | 476118480000.0 84 | 476807950000.0 85 | 477496640000.0 86 | 478205580000.0 87 | 478942860000.0 88 | 479693470000.0 89 | 480456470000.0 90 | 481244740000.0 91 | 482042120000.0 92 | 482860040000.0 93 | 483681600000.0 94 | 484510920000.0 95 | 485339140000.0 96 | 486174260000.0 97 | 486982600000.0 98 | 487772780000.0 99 | 488521400000.0 100 | 489210250000.0 101 | 3708371900000.0 102 | 4568903000000.0 103 | 1411018300000.0 104 | 300879380000.0 105 | 92828300000.0 106 | 32154595000.0 107 | 7635458600.0 108 | 2807530000.0 109 | 1005390850.0 110 | 495023200.0 111 | 296548670.0 112 | 190751900.0 113 | 105137240.0 114 | 58836116.0 115 | 45734852.0 116 | 37340036.0 117 | 32385682.0 118 | 27959394.0 119 | 23475270.0 120 | 20407484.0 121 | 15331272.0 122 | 12954200.0 123 | 11489509.0 124 | 9425053.0 125 | 7824536.5 126 | 6802553.0 127 | 5838521.0 128 | 5169533.0 129 | 4653938.5 130 | 3863610.5 131 | 3524538.2 132 | 3308545.5 133 | 3043075.5 134 | 2762646.0 135 | 2383744.0 136 | 2183200.2 137 | 1985902.1 138 | 1878056.5 139 | 1623104.9 140 | 1536888.1 141 | 1423996.1 142 | 1368057.0 143 | 1302436.0 144 | 1201813.9 145 | 1159095.2 146 | 1110640.8 147 | 1075297.1 148 | 1026821.3 149 | 990608.2 150 | 946471.1 151 | 889290.75 152 | 822944.44 153 | 783261.94 154 | 749525.0 155 | 714194.06 156 | 676334.0 157 | 642966.5 158 | 605982.6 159 | 580824.75 160 | 554100.3 161 | 532366.44 162 | 514996.84 163 | 505778.53 164 | 499321.84 165 | 491201.94 166 | 486211.03 167 | 472431.66 168 | 456818.62 169 | 446744.28 170 | 438157.0 171 | 427789.0 172 | 418308.88 173 | 400578.75 174 | 383172.44 175 | 370149.88 176 | 351683.0 177 | 338035.3 178 | 335345.3 179 | 323108.47 180 | 311511.97 181 | 289593.25 182 | 268233.6 183 | 257989.14 184 | 249329.73 185 | 239808.16 186 | 230540.14 187 | 226805.78 188 | 215539.05 189 | 206784.72 190 | 202467.08 191 | 197310.19 192 | 189132.84 193 | 185426.44 194 | 183055.03 195 | 177321.75 196 | 172185.12 197 | 167796.7 198 | 163892.69 199 | 3969823.5 200 | 163892.22 201 | 163925.7 202 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/MMPINN-DNN-GRS/log3.txt: -------------------------------------------------------------------------------- 1 | 266.7619 2 | 278.2181 3 | 281.45242 4 | 273.2428 5 | 270.00766 6 | 271.439 7 | 286.53394 8 | 336.2842 9 | 326.98294 10 | 282.04807 11 | 276.0326 12 | 272.83936 13 | 270.6832 14 | 269.95697 15 | 269.87408 16 | 270.29733 17 | 271.27866 18 | 282.4342 19 | 2979.6235 20 | 29782.605 21 | 20302.182 22 | 20640.64 23 | 18867.402 24 | 16955.297 25 | 14942.416 26 | 10042.701 27 | 6753.2544 28 | 3884.7927 29 | 931.2626 30 | 303.09454 31 | 216.11964 32 | 174.06474 33 | 139.76547 34 | 119.261024 35 | 77.59035 36 | 50.948883 37 | 53.78359 38 | 26.788868 39 | 17.425032 40 | 11.346147 41 | 10.425387 42 | 10.002205 43 | 9.584825 44 | 9.117133 45 | 8.085706 46 | 6.206244 47 | 4.8375006 48 | 4.3950615 49 | 4.2802663 50 | 4.252298 51 | 4.304632 52 | 4.309044 53 | 4.3044047 54 | 4.305268 55 | 4.2700186 56 | 4.1668663 57 | 3.9679546 58 | 3.6784477 59 | 3.2887683 60 | 2.710801 61 | 2.3077486 62 | 2.521016 63 | 2.7039444 64 | 2.6638892 65 | 2.4916685 66 | 2.2551026 67 | 1.991464 68 | 1.7531359 69 | 1.5473367 70 | 1.415074 71 | 1.347176 72 | 1.3157327 73 | 1.2990909 74 | 1.2896432 75 | 1.2844951 76 | 1.2722292 77 | 1.250815 78 | 1.2165763 79 | 1.1697181 80 | 1.1153493 81 | 1.0578043 82 | 0.99958205 83 | 0.94715315 84 | 0.9057967 85 | 0.87113965 86 | 0.81914353 87 | 0.7961524 88 | 0.7770762 89 | 0.7535056 90 | 0.7327734 91 | 0.7043585 92 | 0.6740894 93 | 0.6382515 94 | 0.578513 95 | 0.5003586 96 | 0.45740402 97 | 0.4936882 98 | 0.4481472 99 | 0.42166504 100 | 0.5393235 101 | 17543.283 102 | 130790.42 103 | 637.5577 104 | 384.23694 105 | 119.09377 106 | 85.715 107 | 62.703014 108 | 46.944138 109 | 44.42478 110 | 40.207935 111 | 38.421932 112 | 36.576878 113 | 39.22118 114 | 31.320803 115 | 28.325912 116 | 27.629463 117 | 27.716866 118 | 26.503185 119 | 26.478907 120 | 24.724726 121 | 24.969885 122 | 23.525341 123 | 23.099468 124 | 23.490828 125 | 22.737774 126 | 22.148651 127 | 21.881569 128 | 21.3951 129 | 21.046782 130 | 20.714306 131 | 20.969107 132 | 20.568 133 | 20.481909 134 | 21.15904 135 | 22.665442 136 | 21.31353 137 | 21.293404 138 | 21.090055 139 | 20.907974 140 | 21.099363 141 | 22.060345 142 | 22.707369 143 | 22.12796 144 | 21.724785 145 | 21.16677 146 | 20.836687 147 | 19.98999 148 | 18.56362 149 | 17.417704 150 | 17.12135 151 | 17.094372 152 | 17.7699 153 | 17.379368 154 | 16.901375 155 | 16.13462 156 | 14.890372 157 | 14.924959 158 | 14.88762 159 | 14.553594 160 | 14.156909 161 | 14.139172 162 | 13.266318 163 | 13.1488285 164 | 12.843909 165 | 12.266882 166 | 11.4941845 167 | 10.833894 168 | 10.468101 169 | 10.04279 170 | 9.434782 171 | 9.126278 172 | 8.916023 173 | 8.696767 174 | 8.341035 175 | 8.183367 176 | 7.4396462 177 | 7.483041 178 | 7.1595507 179 | 6.98731 180 | 6.8581023 181 | 6.686057 182 | 6.234457 183 | 6.0882525 184 | 5.936833 185 | 5.8288655 186 | 5.725814 187 | 5.4344606 188 | 5.1872344 189 | 4.716026 190 | 4.575884 191 | 4.6358666 192 | 4.7472625 193 | 4.7640476 194 | 4.7679973 195 | 4.730586 196 | 4.4117293 197 | 4.052361 198 | 4.045823 199 | 4.0878835 200 | 4.04691 201 | 4.0452085 202 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/MMPINN-DNN-GRS/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/MMPINN-DNN-GRS/uu.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A problem with dramatic variations in different subdomains/MMPINN-DNN-GRS/uu.mat -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/MMPINN-DNN/log1.txt: -------------------------------------------------------------------------------- 1 | 266.05322 2 | 277.90866 3 | 286.63574 4 | 12279.619 5 | 64470.12 6 | 101869.055 7 | 108199.05 8 | 99416.016 9 | 82012.125 10 | 61621.855 11 | 68230.1 12 | 155563.58 13 | 335488.28 14 | 887188.75 15 | 1869172.0 16 | 2044887.4 17 | 4809807.0 18 | 2106601.8 19 | 1827325.0 20 | 1207055.8 21 | 947757.8 22 | 759351.6 23 | 648861.7 24 | 581734.25 25 | 539547.8 26 | 510329.94 27 | 484033.94 28 | 455015.0 29 | 422770.16 30 | 390276.9 31 | 360363.9 32 | 334200.16 33 | 311724.16 34 | 292410.06 35 | 275952.94 36 | 262439.5 37 | 252112.98 38 | 245210.6 39 | 241983.97 40 | 242826.4 41 | 248557.27 42 | 260954.56 43 | 283832.5 44 | 325712.0 45 | 407113.53 46 | 581812.75 47 | 1006172.06 48 | 2160644.8 49 | 5625013.0 50 | 17904274.0 51 | 50702510.0 52 | 74920984.0 53 | 79538830.0 54 | 78464310.0 55 | 75940970.0 56 | 71491864.0 57 | 66898730.0 58 | 62333070.0 59 | 58310490.0 60 | 55134284.0 61 | 53063628.0 62 | 51853364.0 63 | 51134264.0 64 | 50594300.0 65 | 49750012.0 66 | 49212084.0 67 | 49031604.0 68 | 49515776.0 69 | 50591948.0 70 | 51939784.0 71 | 53441390.0 72 | 54708360.0 73 | 55975468.0 74 | 57491156.0 75 | 59225890.0 76 | 61114896.0 77 | 63302364.0 78 | 65407290.0 79 | 67245720.0 80 | 68991550.0 81 | 69922730.0 82 | 70187370.0 83 | 70136240.0 84 | 70240630.0 85 | 70327690.0 86 | 70378170.0 87 | 70579040.0 88 | 69988370.0 89 | 70041660.0 90 | 70081304.0 91 | 70211300.0 92 | 70187120.0 93 | 69929740.0 94 | 70451810.0 95 | 71271230.0 96 | 71359780.0 97 | 71797660.0 98 | 71636504.0 99 | 71853704.0 100 | 71977620.0 101 | 2202889500.0 102 | 7537636400.0 103 | 6345909000.0 104 | 6544915000.0 105 | 9357313000.0 106 | 4168526800.0 107 | 5660289500.0 108 | 3792017200.0 109 | 4952733700.0 110 | 1915077400.0 111 | 1532137100.0 112 | 400141860.0 113 | 353165280.0 114 | 36182120.0 115 | 25734820.0 116 | 21647688.0 117 | 15784064.0 118 | 9537406.0 119 | 6401393.5 120 | 2353410.0 121 | 1337389.6 122 | 490275.5 123 | 236656.39 124 | 185510.34 125 | 165514.95 126 | 128957.305 127 | 93283.24 128 | 67655.445 129 | 49055.617 130 | 36772.625 131 | 30845.256 132 | 27377.938 133 | 23531.646 134 | 22781.406 135 | 21632.115 136 | 20417.54 137 | 20572.7 138 | 20808.766 139 | 20112.418 140 | 20126.697 141 | 20189.615 142 | 18196.957 143 | 15445.336 144 | 15003.58 145 | 12294.1 146 | 11397.606 147 | 11105.527 148 | 10221.977 149 | 8983.706 150 | 8346.94 151 | 7823.4727 152 | 7486.716 153 | 7308.1304 154 | 6958.6694 155 | 6348.7163 156 | 6110.863 157 | 6118.1504 158 | 6117.607 159 | 6275.777 160 | 5953.6714 161 | 5866.531 162 | 5625.0796 163 | 5547.696 164 | 5430.271 165 | 5287.139 166 | 5135.9297 167 | 5033.906 168 | 4917.442 169 | 4985.875 170 | 5048.3213 171 | 5148.1045 172 | 5095.8384 173 | 5253.6 174 | 5146.223 175 | 5045.7324 176 | 4947.022 177 | 4840.6367 178 | 4740.82 179 | 4647.44 180 | 4409.5664 181 | 4372.9023 182 | 4152.0156 183 | 3991.2017 184 | 3978.1296 185 | 3937.5308 186 | 3867.504 187 | 3781.05 188 | 3626.5808 189 | 3530.7368 190 | 3458.0347 191 | 3450.6965 192 | 3461.2283 193 | 3522.7468 194 | 3476.238 195 | 3404.4998 196 | 3421.0435 197 | 3400.7888 198 | 3415.879 199 | 3388.328 200 | 3373.9685 201 | 3372.6917 202 | 3396.7017 203 | 3400.0793 204 | 3466.05 205 | 3478.2449 206 | 3482.4836 207 | 3557.0002 208 | 3530.433 209 | 3447.9019 210 | 3404.4434 211 | 3368.492 212 | 3273.7393 213 | 3233.9888 214 | 3181.778 215 | 3131.277 216 | 3161.8328 217 | 3146.7937 218 | 3187.09 219 | 3219.504 220 | 3186.9768 221 | 3152.3813 222 | 3121.7415 223 | 3026.622 224 | 3009.5835 225 | 2991.0938 226 | 3017.9631 227 | 3003.8474 228 | 2975.9487 229 | 2985.7637 230 | 2939.249 231 | 2915.765 232 | 2846.7517 233 | 2815.3633 234 | 2783.3584 235 | 2724.061 236 | 2741.3733 237 | 2678.661 238 | 2630.4917 239 | 2562.722 240 | 2494.0425 241 | 2488.7683 242 | 2489.392 243 | 2464.967 244 | 2429.326 245 | 2406.5066 246 | 2370.236 247 | 2367.5715 248 | 2348.4512 249 | 2324.8403 250 | 2295.4102 251 | 2285.9238 252 | 2238.2664 253 | 2230.7944 254 | 2234.8489 255 | 2198.9763 256 | 2182.4038 257 | 2171.0557 258 | 2151.4568 259 | 2128.5034 260 | 2123.6035 261 | 2115.2405 262 | 2114.4543 263 | 2103.054 264 | 2071.2925 265 | 2066.706 266 | 2047.5615 267 | 2012.653 268 | 1996.3546 269 | 1989.5723 270 | 1957.212 271 | 1915.5322 272 | 1868.0637 273 | 1845.9512 274 | 1834.4197 275 | 1795.5404 276 | 1786.7346 277 | 1762.2963 278 | 1753.1107 279 | 1744.2496 280 | 1718.7072 281 | 1717.5778 282 | 1754.7129 283 | 1666.4536 284 | 1650.0243 285 | 1618.749 286 | 1596.612 287 | 1586.02 288 | 1559.0648 289 | 1549.3062 290 | 1548.8242 291 | 1552.1593 292 | 1546.435 293 | 1542.314 294 | 1523.7039 295 | 1516.144 296 | 1503.2913 297 | 1492.4995 298 | 1480.2814 299 | 1464.0884 300 | 1456.6147 301 | 1459.2882 302 | 1449.6846 303 | 1429.8134 304 | 1420.1204 305 | 1408.6483 306 | 1396.8063 307 | 1384.2477 308 | 1385.1058 309 | 1385.3109 310 | 1381.072 311 | 1389.1439 312 | 1384.3035 313 | 1380.7717 314 | 1375.0327 315 | 1374.4325 316 | 1377.0808 317 | 1383.2042 318 | 1382.8069 319 | 1380.0608 320 | 1376.3688 321 | 1370.7688 322 | 1359.7273 323 | 1351.9965 324 | 1344.3922 325 | 1341.7817 326 | 1337.1997 327 | 1329.2269 328 | 1322.1815 329 | 1312.8893 330 | 1308.5533 331 | 1301.56 332 | 1296.3539 333 | 1294.59 334 | 1295.0448 335 | 1292.7109 336 | 1289.9266 337 | 1289.4409 338 | 1291.1561 339 | 1282.6061 340 | 1279.7374 341 | 1277.5358 342 | 1276.1082 343 | 1274.079 344 | 1267.3168 345 | 1264.159 346 | 1260.6102 347 | 1257.7938 348 | 1255.5482 349 | 1248.9781 350 | 1243.1771 351 | 1238.1707 352 | 1234.537 353 | 1230.5493 354 | 1223.8667 355 | 1219.3907 356 | 1215.3652 357 | 1209.6661 358 | 1202.593 359 | 1196.4736 360 | 1188.5226 361 | 1186.3309 362 | 1185.3438 363 | 1184.8469 364 | 1175.5287 365 | 1171.8854 366 | 1162.8159 367 | 1152.682 368 | 1148.6133 369 | 1143.8496 370 | 1141.1357 371 | 1135.1692 372 | 1132.1475 373 | 1127.2742 374 | 1127.2443 375 | 1128.7008 376 | 1125.0234 377 | 1122.6317 378 | 1121.7592 379 | 1120.1045 380 | 1111.2172 381 | 1107.0673 382 | 1096.7064 383 | 1089.084 384 | 1084.0652 385 | 1075.3527 386 | 1070.9282 387 | 1069.5367 388 | 1061.4735 389 | 1053.2183 390 | 1040.8314 391 | 1037.8192 392 | 1038.544 393 | 1033.8638 394 | 1029.1475 395 | 1024.804 396 | 1014.725 397 | 1012.4621 398 | 1003.4406 399 | 995.3549 400 | 990.95624 401 | 979.446 402 | 974.496 403 | 960.11896 404 | 949.88776 405 | 940.4856 406 | 941.98865 407 | 933.487 408 | 923.17346 409 | 919.0807 410 | 911.8487 411 | 898.2336 412 | 886.2876 413 | 875.7615 414 | 867.6137 415 | 855.3204 416 | 847.5477 417 | 839.71606 418 | 831.56036 419 | 827.23706 420 | 824.8319 421 | 818.22156 422 | 809.1744 423 | 805.2808 424 | 791.1843 425 | 778.6436 426 | 768.42444 427 | 766.1561 428 | 761.05493 429 | 756.3589 430 | 752.6724 431 | 755.7635 432 | 751.13824 433 | 744.024 434 | 731.6809 435 | 723.70233 436 | 716.706 437 | 704.8022 438 | 693.6038 439 | 685.5501 440 | 680.87195 441 | 675.16455 442 | 670.15765 443 | 657.18945 444 | 654.3405 445 | 651.1636 446 | 647.9576 447 | 642.72784 448 | 638.0038 449 | 635.56476 450 | 630.86505 451 | 623.3884 452 | 619.2784 453 | 612.66693 454 | 612.4893 455 | 609.8762 456 | 598.56586 457 | 594.6633 458 | 594.09814 459 | 593.9668 460 | 593.9668 461 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/MMPINN-DNN/log2.txt: -------------------------------------------------------------------------------- 1 | 415697860000.0 2 | 415697860000.0 3 | 415697860000.0 4 | 415697930000.0 5 | 415698000000.0 6 | 415698060000.0 7 | 415698060000.0 8 | 415698160000.0 9 | 415698300000.0 10 | 415698780000.0 11 | 415701430000.0 12 | 415717200000.0 13 | 415767860000.0 14 | 415883620000.0 15 | 416064900000.0 16 | 416724030000.0 17 | 416640930000.0 18 | 417888900000.0 19 | 418230240000.0 20 | 418975800000.0 21 | 419540340000.0 22 | 420164630000.0 23 | 420772500000.0 24 | 421376400000.0 25 | 421975850000.0 26 | 422561220000.0 27 | 423130070000.0 28 | 423681520000.0 29 | 424215940000.0 30 | 424731740000.0 31 | 425226830000.0 32 | 425699570000.0 33 | 426150500000.0 34 | 426583520000.0 35 | 427005300000.0 36 | 427421830000.0 37 | 427835000000.0 38 | 428240300000.0 39 | 428629070000.0 40 | 428991200000.0 41 | 429314340000.0 42 | 429581400000.0 43 | 429767200000.0 44 | 429835680000.0 45 | 429736700000.0 46 | 429419070000.0 47 | 428883400000.0 48 | 428226400000.0 49 | 427520660000.0 50 | 426647160000.0 51 | 425863680000.0 52 | 425360100000.0 53 | 424914700000.0 54 | 424538600000.0 55 | 424300970000.0 56 | 424129170000.0 57 | 423978370000.0 58 | 423845560000.0 59 | 423717900000.0 60 | 423609760000.0 61 | 423520470000.0 62 | 423440940000.0 63 | 423365380000.0 64 | 423365000000.0 65 | 423229880000.0 66 | 423161860000.0 67 | 423113230000.0 68 | 423069480000.0 69 | 423031300000.0 70 | 422934050000.0 71 | 422951200000.0 72 | 422965670000.0 73 | 422926100000.0 74 | 422927700000.0 75 | 422933200000.0 76 | 422944900000.0 77 | 422873270000.0 78 | 423038030000.0 79 | 422969740000.0 80 | 422898600000.0 81 | 422902100000.0 82 | 423033600000.0 83 | 423147440000.0 84 | 423037730000.0 85 | 423251700000.0 86 | 423315770000.0 87 | 423397600000.0 88 | 423653540000.0 89 | 423733200000.0 90 | 423793360000.0 91 | 423581400000.0 92 | 423725860000.0 93 | 423716060000.0 94 | 424004900000.0 95 | 424140670000.0 96 | 424184870000.0 97 | 424414540000.0 98 | 424432300000.0 99 | 424914220000.0 100 | 425117580000.0 101 | 1144067300000.0 102 | 2749823200000.0 103 | 1096350900000.0 104 | 645721700000.0 105 | 541748630000.0 106 | 472584130000.0 107 | 440974970000.0 108 | 425321300000.0 109 | 368123870000.0 110 | 271325350000.0 111 | 132418600000.0 112 | 44231692000.0 113 | 20688982000.0 114 | 12362017000.0 115 | 6707651600.0 116 | 4377509400.0 117 | 2928481500.0 118 | 2102169500.0 119 | 1717811200.0 120 | 1340571900.0 121 | 1200317600.0 122 | 906456640.0 123 | 696328960.0 124 | 640696000.0 125 | 570108860.0 126 | 479368420.0 127 | 409845020.0 128 | 358284300.0 129 | 338544670.0 130 | 307262660.0 131 | 258740260.0 132 | 247130750.0 133 | 204148940.0 134 | 190077900.0 135 | 171733600.0 136 | 158074380.0 137 | 147133780.0 138 | 136369400.0 139 | 123747920.0 140 | 112614500.0 141 | 103960580.0 142 | 96277096.0 143 | 95798120.0 144 | 127516130.0 145 | 82497640.0 146 | 75354536.0 147 | 73946824.0 148 | 66066716.0 149 | 57465224.0 150 | 54353252.0 151 | 51387284.0 152 | 50136360.0 153 | 49761932.0 154 | 45689948.0 155 | 45810980.0 156 | 46048016.0 157 | 44001788.0 158 | 39887384.0 159 | 39536376.0 160 | 35015444.0 161 | 34641250.0 162 | 33342016.0 163 | 30899568.0 164 | 29935168.0 165 | 29149330.0 166 | 28810194.0 167 | 28766922.0 168 | 28490216.0 169 | 27174742.0 170 | 28657168.0 171 | 25363638.0 172 | 24373986.0 173 | 23796322.0 174 | 22789924.0 175 | 21836878.0 176 | 20372228.0 177 | 18867724.0 178 | 17793348.0 179 | 17227104.0 180 | 17060554.0 181 | 16149106.0 182 | 15342321.0 183 | 14581504.0 184 | 14261197.0 185 | 13773459.0 186 | 13284784.0 187 | 13033451.0 188 | 12835932.0 189 | 12453769.0 190 | 12251859.0 191 | 14253304.0 192 | 11863631.0 193 | 11730901.0 194 | 11482744.0 195 | 11210447.0 196 | 11128109.0 197 | 10933334.0 198 | 10762995.0 199 | 10700259.0 200 | 10424978.0 201 | 10297907.0 202 | 10238002.0 203 | 10186972.0 204 | 10014803.0 205 | 9704227.0 206 | 9298292.0 207 | 8956105.0 208 | 8726116.0 209 | 8569872.0 210 | 8272630.0 211 | 8137568.0 212 | 8096188.5 213 | 8004897.0 214 | 7772440.5 215 | 7666433.0 216 | 7469517.0 217 | 7452604.5 218 | 7370443.0 219 | 7383273.5 220 | 7153864.0 221 | 7216082.5 222 | 7138710.0 223 | 6993560.5 224 | 6902022.5 225 | 6936350.5 226 | 6747566.0 227 | 6635153.0 228 | 6449380.5 229 | 6289584.0 230 | 6165261.0 231 | 6020837.5 232 | 6006779.0 233 | 5936916.5 234 | 5732446.5 235 | 5527328.0 236 | 5407082.5 237 | 5328585.5 238 | 5212365.0 239 | 5197095.5 240 | 5238434.5 241 | 5155930.0 242 | 5001421.5 243 | 4857879.5 244 | 4858163.0 245 | 4808622.0 246 | 4593308.0 247 | 4507975.5 248 | 4465433.5 249 | 4405105.0 250 | 4231871.0 251 | 4157950.8 252 | 3976826.5 253 | 3794823.0 254 | 3738520.5 255 | 3598646.5 256 | 3532749.0 257 | 3500211.8 258 | 3401879.2 259 | 3319196.5 260 | 3253392.8 261 | 3205154.2 262 | 3106154.2 263 | 3025801.0 264 | 2947565.8 265 | 2936486.0 266 | 2817954.5 267 | 2735225.5 268 | 2713027.8 269 | 2663030.2 270 | 2654800.0 271 | 2605439.5 272 | 2547200.5 273 | 2538492.8 274 | 2470444.0 275 | 2435828.5 276 | 2431261.0 277 | 2407577.5 278 | 2374990.2 279 | 2377469.5 280 | 2311486.8 281 | 2254313.0 282 | 2601149.5 283 | 2192242.8 284 | 2151985.8 285 | 2106163.0 286 | 2061872.2 287 | 2044677.8 288 | 2010332.1 289 | 1982332.4 290 | 1954831.6 291 | 1932936.0 292 | 1910263.0 293 | 1890292.8 294 | 1877896.1 295 | 1852310.8 296 | 1845378.2 297 | 1846149.8 298 | 1815020.4 299 | 1784906.9 300 | 1768604.2 301 | 1749822.4 302 | 1737570.2 303 | 1736443.6 304 | 1722095.8 305 | 1705841.4 306 | 1695595.1 307 | 1689579.9 308 | 1702537.2 309 | 1700990.6 310 | 1694972.4 311 | 1666540.9 312 | 1630915.5 313 | 1594885.2 314 | 1585453.2 315 | 1573916.5 316 | 1558737.5 317 | 1549052.1 318 | 1528651.5 319 | 1512472.0 320 | 1499345.2 321 | 1468651.1 322 | 1446938.2 323 | 1437719.5 324 | 1399886.4 325 | 1377518.2 326 | 1353555.2 327 | 1336680.8 328 | 1333501.1 329 | 1330762.4 330 | 1326816.2 331 | 1302290.4 332 | 1296924.5 333 | 1295320.6 334 | 1285975.5 335 | 1267512.4 336 | 1261021.9 337 | 1253850.5 338 | 1243063.6 339 | 1231360.0 340 | 1198669.4 341 | 1184340.4 342 | 1172963.9 343 | 1156611.4 344 | 1118513.0 345 | 1108747.0 346 | 1091870.1 347 | 1061672.1 348 | 1054176.9 349 | 1026825.7 350 | 1016154.7 351 | 1004190.3 352 | 988804.0 353 | 973771.0 354 | 952613.8 355 | 939036.25 356 | 925650.1 357 | 915978.5 358 | 899996.2 359 | 890118.1 360 | 879617.56 361 | 871745.4 362 | 870467.2 363 | 868928.56 364 | 855667.5 365 | 851814.7 366 | 838668.0 367 | 823318.4 368 | 813354.94 369 | 816155.2 370 | 810081.9 371 | 807148.2 372 | 802524.7 373 | 793631.3 374 | 783082.7 375 | 777437.5 376 | 774770.44 377 | 768313.7 378 | 762591.94 379 | 762445.44 380 | 764094.44 381 | 764076.3 382 | 769306.0 383 | 763630.7 384 | 758043.2 385 | 755388.2 386 | 756557.25 387 | 752809.3 388 | 751303.5 389 | 747352.6 390 | 738550.1 391 | 726774.94 392 | 720562.4 393 | 710770.8 394 | 704511.56 395 | 694910.4 396 | 687435.6 397 | 675090.3 398 | 672994.6 399 | 666801.3 400 | 656038.0 401 | 648884.25 402 | 641934.0 403 | 634822.1 404 | 627709.8 405 | 620704.7 406 | 617725.1 407 | 608943.6 408 | 602676.56 409 | 597783.4 410 | 592325.3 411 | 590175.7 412 | 589569.56 413 | 581608.4 414 | 575223.44 415 | 568138.06 416 | 559170.8 417 | 558276.06 418 | 549676.6 419 | 548240.7 420 | 546662.56 421 | 539101.94 422 | 538194.7 423 | 524130.44 424 | 516694.2 425 | 510102.34 426 | 512768.8 427 | 511781.1 428 | 506992.5 429 | 499998.3 430 | 498333.62 431 | 486524.03 432 | 480257.5 433 | 477341.06 434 | 467218.62 435 | 461351.25 436 | 455389.53 437 | 450073.6 438 | 445300.62 439 | 442876.03 440 | 432531.84 441 | 426142.5 442 | 416021.5 443 | 405176.47 444 | 402053.47 445 | 400141.3 446 | 397358.78 447 | 394701.75 448 | 390844.3 449 | 386782.78 450 | 385384.94 451 | 382566.1 452 | 380559.94 453 | 376976.88 454 | 374117.16 455 | 371982.28 456 | 364993.25 457 | 362244.78 458 | 359621.1 459 | 359609.66 460 | 359609.66 461 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/MMPINN-DNN/log3.txt: -------------------------------------------------------------------------------- 1 | 266.78607 2 | 278.57468 3 | 282.02112 4 | 276.1291 5 | 273.5095 6 | 273.31998 7 | 273.4637 8 | 274.47388 9 | 276.86005 10 | 281.48648 11 | 289.28174 12 | 290.69214 13 | 282.103 14 | 277.0158 15 | 525.9735 16 | 1531138.9 17 | 5258709.0 18 | 4148038.0 19 | 4466644.5 20 | 2838502.2 21 | 1958474.5 22 | 1267444.1 23 | 861041.9 24 | 631898.94 25 | 512156.97 26 | 455546.25 27 | 427348.6 28 | 405414.84 29 | 380131.6 30 | 352201.25 31 | 326648.53 32 | 307506.78 33 | 295865.97 34 | 290270.72 35 | 288086.34 36 | 287234.72 37 | 287503.9 38 | 290362.72 39 | 297520.7 40 | 310003.78 41 | 328502.44 42 | 354193.7 43 | 389634.06 44 | 440190.22 45 | 517157.4 46 | 645040.25 47 | 881581.7 48 | 1375994.1 49 | 2554211.8 50 | 5787029.0 51 | 14055212.0 52 | 32997904.0 53 | 65230372.0 54 | 97476776.0 55 | 118586460.0 56 | 129514410.0 57 | 133733030.0 58 | 132667950.0 59 | 128318900.0 60 | 123026570.0 61 | 118356936.0 62 | 114864940.0 63 | 112641070.0 64 | 111364530.0 65 | 109790150.0 66 | 106814290.0 67 | 102991410.0 68 | 99194240.0 69 | 95488730.0 70 | 91260770.0 71 | 86923480.0 72 | 82245970.0 73 | 77390850.0 74 | 72670030.0 75 | 68278600.0 76 | 63968504.0 77 | 59907844.0 78 | 56297804.0 79 | 52938460.0 80 | 50024160.0 81 | 47516270.0 82 | 45905348.0 83 | 44555828.0 84 | 43235960.0 85 | 42740508.0 86 | 42156216.0 87 | 41764790.0 88 | 42027076.0 89 | 42187730.0 90 | 42372610.0 91 | 42297100.0 92 | 42893076.0 93 | 43138380.0 94 | 44074028.0 95 | 44284892.0 96 | 45007276.0 97 | 45071250.0 98 | 45482590.0 99 | 45449320.0 100 | 46229820.0 101 | 2127977300.0 102 | 2154049500.0 103 | 1696384300.0 104 | 2575999700.0 105 | 5552120000.0 106 | 3998159400.0 107 | 4735319600.0 108 | 2854227700.0 109 | 3785663200.0 110 | 1077059000.0 111 | 396784860.0 112 | 39287.773 113 | 133874.23 114 | 267176.66 115 | 404585.8 116 | 244592.64 117 | 152385.53 118 | 124425.69 119 | 82500.08 120 | 87002.914 121 | 89954.445 122 | 81664.78 123 | 69077.53 124 | 61034.418 125 | 48892.457 126 | 37135.258 127 | 28480.328 128 | 19632.465 129 | 16884.57 130 | 14364.835 131 | 13360.177 132 | 11539.874 133 | 9519.592 134 | 9453.412 135 | 8653.801 136 | 8733.011 137 | 9112.551 138 | 9484.021 139 | 9837.529 140 | 9756.805 141 | 10630.328 142 | 12214.381 143 | 12784.917 144 | 11803.813 145 | 14171.559 146 | 15918.9375 147 | 16418.607 148 | 17489.441 149 | 19002.227 150 | 19868.146 151 | 22270.414 152 | 23629.555 153 | 23624.984 154 | 24079.695 155 | 24594.777 156 | 24844.625 157 | 23952.725 158 | 22948.67 159 | 22083.094 160 | 20801.068 161 | 19208.094 162 | 18392.48 163 | 16734.201 164 | 15674.097 165 | 14975.18 166 | 13642.895 167 | 12176.32 168 | 11571.955 169 | 11089.313 170 | 10681.801 171 | 10641.874 172 | 10393.988 173 | 9986.464 174 | 9590.282 175 | 9215.105 176 | 8820.001 177 | 8716.227 178 | 8867.119 179 | 9060.61 180 | 8846.625 181 | 8627.223 182 | 8606.762 183 | 8610.288 184 | 8520.555 185 | 8356.916 186 | 8205.37 187 | 8158.1694 188 | 7881.3066 189 | 7902.5327 190 | 7717.675 191 | 7438.509 192 | 7546.74 193 | 7302.1626 194 | 7147.0835 195 | 6965.829 196 | 6919.2617 197 | 6758.1997 198 | 6684.85 199 | 6564.4097 200 | 6448.8994 201 | 6349.738 202 | 6284.3706 203 | 6166.799 204 | 6038.8994 205 | 5833.794 206 | 5697.056 207 | 5578.913 208 | 5480.9014 209 | 5283.239 210 | 5102.0063 211 | 5107.0977 212 | 5123.3237 213 | 5165.582 214 | 5281.6587 215 | 5315.121 216 | 5402.884 217 | 5461.6597 218 | 5631.2344 219 | 5644.5664 220 | 5710.4414 221 | 5824.228 222 | 5858.3994 223 | 5851.452 224 | 5867.556 225 | 5836.32 226 | 5828.6694 227 | 5870.053 228 | 5876.672 229 | 5843.6553 230 | 5747.1934 231 | 5772.2334 232 | 5771.9756 233 | 5702.0903 234 | 5601.4795 235 | 5527.247 236 | 5524.338 237 | 5571.3726 238 | 5584.9536 239 | 5616.1826 240 | 5593.9526 241 | 5612.5977 242 | 5602.462 243 | 5593.607 244 | 5575.5713 245 | 5555.177 246 | 5498.968 247 | 5506.5493 248 | 5475.4785 249 | 5451.935 250 | 5376.8 251 | 5349.21 252 | 5302.3877 253 | 5290.0728 254 | 5251.549 255 | 5193.799 256 | 5143.5615 257 | 5119.4023 258 | 5063.541 259 | 5030.7764 260 | 4981.3394 261 | 4938.884 262 | 4908.079 263 | 4819.582 264 | 4725.524 265 | 4682.8145 266 | 4595.907 267 | 4469.83 268 | 4434.164 269 | 4349.261 270 | 4272.619 271 | 4201.2993 272 | 4119.617 273 | 4071.2842 274 | 4008.0503 275 | 3912.3333 276 | 3874.389 277 | 3825.2346 278 | 3783.3591 279 | 3768.1418 280 | 3689.6172 281 | 3687.4155 282 | 3752.2148 283 | 3577.9265 284 | 3550.8218 285 | 3515.3184 286 | 3451.2292 287 | 3435.2485 288 | 3377.1453 289 | 3343.4597 290 | 3330.4075 291 | 3309.1104 292 | 3246.0566 293 | 3231.262 294 | 3193.2336 295 | 3162.833 296 | 3126.104 297 | 3102.8613 298 | 3065.813 299 | 3014.3696 300 | 2989.676 301 | 2989.96 302 | 2945.384 303 | 2896.6238 304 | 2884.4917 305 | 2849.5625 306 | 2829.4758 307 | 2789.3555 308 | 2777.7036 309 | 2771.113 310 | 2744.5908 311 | 2742.3206 312 | 2711.4043 313 | 2690.4805 314 | 2669.7468 315 | 2663.1067 316 | 2653.3027 317 | 2640.393 318 | 2620.855 319 | 2599.6902 320 | 2579.4504 321 | 2554.8345 322 | 2519.1882 323 | 2501.6965 324 | 2475.553 325 | 2452.7405 326 | 2435.3516 327 | 2402.4006 328 | 2375.5908 329 | 2350.1428 330 | 2325.2751 331 | 2288.936 332 | 2272.7688 333 | 2261.6255 334 | 2243.194 335 | 2210.1074 336 | 2208.443 337 | 2196.5793 338 | 2191.1677 339 | 2169.483 340 | 2153.9614 341 | 2140.2117 342 | 2109.6384 343 | 2101.6812 344 | 2090.225 345 | 2082.499 346 | 2074.6506 347 | 2061.6995 348 | 2061.825 349 | 2049.999 350 | 2039.493 351 | 2033.8073 352 | 2026.4277 353 | 2023.0365 354 | 2006.2408 355 | 2006.4932 356 | 2002.6976 357 | 1991.1885 358 | 1981.0896 359 | 1973.1827 360 | 1952.7152 361 | 1935.8011 362 | 1929.3253 363 | 1913.9547 364 | 1891.2758 365 | 1875.2776 366 | 1850.535 367 | 1809.3933 368 | 1793.7909 369 | 1776.7688 370 | 1756.9966 371 | 1725.685 372 | 1716.3195 373 | 1699.0205 374 | 1685.6682 375 | 1685.2219 376 | 1670.485 377 | 1662.488 378 | 1664.2461 379 | 1672.2708 380 | 1655.9578 381 | 1659.6606 382 | 1646.0853 383 | 1651.0114 384 | 1640.1781 385 | 1630.086 386 | 1629.1227 387 | 1628.3566 388 | 1613.1365 389 | 1597.8068 390 | 1576.6049 391 | 1564.9941 392 | 1561.3046 393 | 1549.4962 394 | 1541.1184 395 | 1531.6287 396 | 1512.1376 397 | 1502.3 398 | 1482.9576 399 | 1471.9557 400 | 1463.667 401 | 1443.663 402 | 1437.1436 403 | 1418.1333 404 | 1402.0032 405 | 1386.3712 406 | 1389.5875 407 | 1378.0691 408 | 1362.9406 409 | 1357.6963 410 | 1342.4872 411 | 1321.3608 412 | 1304.851 413 | 1288.1267 414 | 1275.7241 415 | 1257.219 416 | 1244.5338 417 | 1239.1499 418 | 1229.02 419 | 1229.0963 420 | 1225.7123 421 | 1216.1232 422 | 1207.0237 423 | 1205.9406 424 | 1188.5618 425 | 1166.9885 426 | 1148.8859 427 | 1146.5682 428 | 1134.7087 429 | 1120.7725 430 | 1107.1299 431 | 1108.1229 432 | 1098.6537 433 | 1092.0018 434 | 1071.8773 435 | 1061.4691 436 | 1051.9014 437 | 1034.0583 438 | 1020.49646 439 | 1012.1298 440 | 1002.4443 441 | 995.45026 442 | 988.81226 443 | 967.1272 444 | 961.3029 445 | 953.39746 446 | 946.0507 447 | 934.91425 448 | 922.1731 449 | 918.1732 450 | 908.9376 451 | 896.8675 452 | 886.9408 453 | 873.86786 454 | 873.65466 455 | 866.8323 456 | 847.64844 457 | 840.259 458 | 836.4684 459 | 836.3675 460 | 836.3675 461 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/MMPINN-DNN/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/MMPINN-DNN/uu.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A problem with dramatic variations in different subdomains/MMPINN-DNN/uu.mat -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/PINN(1000k Adam)/PINN(1000K Adam).py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[1]: 5 | 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | import scipy.io 10 | from pyDOE import lhs 11 | import time 12 | import time 13 | import math 14 | 15 | 16 | # In[2]: 17 | 18 | 19 | np.random.seed(1232) 20 | tf.set_random_seed(1232) 21 | 22 | 23 | # In[3]: 24 | 25 | 26 | class PhysicsInformedNN: 27 | # Initialize the class 28 | def __init__(self, x0, u0, uend,tb, X_f, layers, lb, ub,u_lb,u_ub): 29 | 30 | # lb = np.array([0, 0]) ub = np.array([1, 1]) 31 | 32 | X0 = np.concatenate((x0, 0*x0), 1) # 初始 33 | X1 = np.concatenate((x0, 0*x0+1), 1) 34 | X_lb = np.concatenate((0*tb + lb[0], tb), 1) # 边界0 35 | X_ub = np.concatenate((0*tb + ub[0], tb), 1) # 边界1 36 | 37 | self.lb = lb 38 | self.ub = ub 39 | 40 | self.x0 = X0[:,0:1] 41 | self.t0 = X0[:,1:2] 42 | 43 | self.t1 = X1[:,1:2] 44 | 45 | self.x_lb = X_lb[:,0:1] 46 | self.t_lb = X_lb[:,1:2] 47 | 48 | self.x_ub = X_ub[:,0:1] 49 | self.t_ub = X_ub[:,1:2] 50 | 51 | self.x_f = X_f[:,0:1] 52 | self.t_f = X_f[:,1:2] 53 | 54 | 55 | self.x_f1 = X_f[:,0:1] 56 | self.t_f1 = X_f[:,1:2]*1/10*4 57 | 58 | self.x_f2 = X_f[:,0:1] 59 | self.t_f2 = X_f[:,1:2]*1/10*4+0.2 60 | 61 | self.x_f3 = X_f[:,0:1] 62 | self.t_f3 = X_f[:,1:2]*1/10*4+0.6 63 | 64 | 65 | self.u_lb=u_lb 66 | self.u_ub=u_ub 67 | #分别是初始时刻的实部和虚部 68 | self.u0 = u0 69 | self.uend = uend 70 | self.losslossloss=[] 71 | # Initialize NNs 72 | self.layers = layers 73 | 74 | self.loss1weight=1 75 | self.loss2weight=1 76 | #返回初始的权重w和偏差b 77 | self.weights, self.biases = self.initialize_NN(layers) 78 | 79 | # tf Placeholders 80 | #形参 占位符,行数不确定,列数确定为1 81 | self.x0_tf = tf.placeholder(tf.float32, shape=[None, self.x0.shape[1]]) 82 | self.t0_tf = tf.placeholder(tf.float32, shape=[None, self.t0.shape[1]]) 83 | self.t1_tf = tf.placeholder(tf.float32, shape=[None, self.t1.shape[1]]) 84 | self.u_lb_tf = tf.placeholder(tf.float32, shape=[None, self.u_lb.shape[1]]) 85 | self.u_ub_tf = tf.placeholder(tf.float32, shape=[None, self.u_ub.shape[1]]) 86 | self.u0_tf = tf.placeholder(tf.float32, shape=[None, self.u0.shape[1]]) 87 | self.uend_tf = tf.placeholder(tf.float32, shape=[None, self.uend.shape[1]]) 88 | self.x_lb_tf = tf.placeholder(tf.float32, shape=[None, self.x_lb.shape[1]]) 89 | self.t_lb_tf = tf.placeholder(tf.float32, shape=[None, self.t_lb.shape[1]]) 90 | self.x_ub_tf = tf.placeholder(tf.float32, shape=[None, self.x_ub.shape[1]]) 91 | self.t_ub_tf = tf.placeholder(tf.float32, shape=[None, self.t_ub.shape[1]]) 92 | self.x_f_tf = tf.placeholder(tf.float32, shape=[None, self.x_f.shape[1]]) 93 | self.t_f_tf = tf.placeholder(tf.float32, shape=[None, self.t_f.shape[1]]) 94 | 95 | 96 | 97 | self.x_f1_tf = tf.placeholder(tf.float32, shape=[None, self.x_f1.shape[1]]) 98 | self.t_f1_tf = tf.placeholder(tf.float32, shape=[None, self.t_f1.shape[1]]) 99 | self.x_f2_tf = tf.placeholder(tf.float32, shape=[None, self.x_f1.shape[1]]) 100 | self.t_f2_tf = tf.placeholder(tf.float32, shape=[None, self.t_f1.shape[1]]) 101 | self.x_f3_tf = tf.placeholder(tf.float32, shape=[None, self.x_f1.shape[1]]) 102 | self.t_f3_tf = tf.placeholder(tf.float32, shape=[None, self.t_f1.shape[1]]) 103 | 104 | 105 | 106 | self.f_u1_pred= self.net_f_uv(self.x_f1_tf, self.t_f1_tf) 107 | self.f_u2_pred= self.net_f_uv(self.x_f2_tf, self.t_f2_tf) 108 | self.f_u3_pred= self.net_f_uv(self.x_f3_tf, self.t_f3_tf) 109 | # tf Graphs 进行预测 110 | self.u0_pred= self.net_uv(self.x0_tf, self.t0_tf) 111 | self.uend_pred= self.net_uv(self.x0_tf, self.t1_tf) 112 | self.u_lb_pred= self.net_uv(self.x_lb_tf, self.t_lb_tf) 113 | self.u_ub_pred = self.net_uv(self.x_ub_tf, self.t_ub_tf) 114 | self.f_u_pred= self.net_f_uv(self.x_f_tf, self.t_f_tf) 115 | self.hsadasjd=1 116 | 117 | 118 | 119 | 120 | self.loss111 = tf.pow((tf.reduce_mean(tf.square(self.u0_tf- self.u0_pred )) + tf.reduce_mean(tf.square(self.uend_tf - self.uend_pred )) + tf.reduce_mean(tf.square(self.u_ub_tf - self.u_ub_pred )) + tf.reduce_mean(tf.square(self.u_lb_tf - self.u_lb_pred))),1) 121 | 122 | self.loss222 =tf.pow(tf.reduce_mean(tf.square(self.f_u_pred)),1) 123 | 124 | self.loss21=tf.pow(tf.reduce_mean(tf.square(self.f_u1_pred)),1/1) 125 | self.loss22=tf.pow(tf.reduce_mean(tf.square(self.f_u2_pred)),1/1) 126 | self.loss23=tf.pow(tf.reduce_mean(tf.square(self.f_u3_pred)),1/1) 127 | 128 | 129 | 130 | self.loss = (tf.reduce_mean(tf.square(self.u0_tf- self.u0_pred )) + tf.reduce_mean(tf.square(self.uend_tf - self.uend_pred )) + tf.reduce_mean(tf.square(self.u_ub_tf - self.u_ub_pred )) + tf.reduce_mean(tf.square(self.u_lb_tf - self.u_lb_pred))) + tf.pow(tf.reduce_mean(tf.square(self.f_u_pred)),1) 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | # Optimizers maxiter最大迭代次数 maxfun最大求值次数 maxcor int变量的最大数量 139 | #maxls 可选的最大搜索步数 140 | self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, 141 | method = 'L-BFGS-B', 142 | options = {'maxiter': 10000, 143 | 'maxfun': 100000, 144 | 'maxcor': 50, 145 | 'maxls': 50, 146 | 'ftol' : 1.0 * np.finfo(float).eps}) 147 | 148 | ''' 149 | 是一个寻找全局最优点的优化算法,引入了二次方梯度校正 150 | 除了利用反向传播算法对权重和偏置项进行修正外,也在运行中不断修正学习率。 151 | 根据其损失量学习自适应,损失量大则学习率大,进行修正的角度越大,损失量小,修正的幅度也小,学习率就小, 152 | 但是不会超过自己所设定的学习率。20 153 | 3 154 | ''' 155 | self.optimizer_Adam = tf.train.AdamOptimizer() 156 | self.train_op_Adam = self.optimizer_Adam.minimize(self.loss) 157 | 158 | # tf session 配置Session运行参数&&GPU设备指定) 159 | self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, 160 | log_device_placement=True)) 161 | #初始化模型的参数 162 | init = tf.global_variables_initializer() 163 | self.sess.run(init) 164 | 165 | 166 | 167 | 168 | 169 | def initialize_NN(self, layers): 170 | weights = [] 171 | biases = [] 172 | num_layers = len(layers) 173 | for l in range(0,num_layers-1): 174 | W = self.xavier_init(size=[layers[l], layers[l+1]]) 175 | b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32) 176 | weights.append(W) 177 | biases.append(b) 178 | return weights, biases 179 | 180 | def xavier_init(self, size): 181 | in_dim = size[0] 182 | out_dim = size[1] 183 | xavier_stddev = np.sqrt(2/(in_dim + out_dim)) 184 | #产生截断正态分布随机数,stddev是标准差,取值范围为[ 0 - 2 * stddev, 0+2 * stddev ] 185 | return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32) 186 | 187 | def neural_net(self, X, weights, biases): 188 | num_layers = len(weights) + 1 189 | 190 | #将初始输入X映射到-1到1之间为H 191 | H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0 192 | for l in range(0,num_layers-2): 193 | W = weights[l] 194 | b = biases[l] 195 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 196 | W = weights[-1] 197 | b = biases[-1] 198 | Y = tf.add(tf.matmul(H, W), b) 199 | return Y 200 | 201 | def net_uv(self, x, t): 202 | X = tf.concat([x,t],1) 203 | 204 | uv = self.neural_net(X, self.weights, self.biases) 205 | 206 | 207 | return uv 208 | 209 | 210 | 211 | def net_f_uv(self, x, t): 212 | hh=0.02 213 | hhhh=1000 214 | u = self.net_uv(x,t) 215 | u_t = tf.gradients(u, t)[0] 216 | u_tt = tf.gradients(u_t, t)[0] 217 | u_x = tf.gradients(u, x)[0] 218 | u_xx = tf.gradients(u_x, x)[0] 219 | f_u = u_tt+u_xx-2*tf.exp(-(t - 0.5)**2/(2*hh**2))-(-(hhhh + x**2)*tf.exp(-(t - 0.5)**2/(2*hh**2))/hh**2 + (hhhh + x**2)*(4*(t - 0.5)**2)*tf.exp(-(t - 0.5)**2/(2*hh**2))/(4*hh**4)) 220 | return f_u 221 | 222 | def callback(self,loss,f_u_pred,f_u1_pred,f_u2_pred,f_u3_pred,u0_pred,uend_pred,u_ub_pred,u_lb_pred): 223 | self.losslossloss.append(loss) 224 | 225 | sss=self.hsadasjd 226 | if sss%1000==0: 227 | losssss =tf.reduce_mean(tf.square(f_u_pred)) 228 | array1 = losssss.eval(session=tf.Session()) 229 | 230 | losssss1 =tf.reduce_mean(tf.square(f_u1_pred)) 231 | array11 = losssss1.eval(session=tf.Session()) 232 | 233 | losssss2 =tf.reduce_mean(tf.square(f_u2_pred)) 234 | array21 = losssss2.eval(session=tf.Session()) 235 | 236 | losssss3 =tf.reduce_mean(tf.square(f_u3_pred)) 237 | array31 = losssss3.eval(session=tf.Session()) 238 | 239 | 240 | 241 | 242 | 243 | 244 | tf_dict ={self.x0_tf: self.x0, self.t0_tf: self.t0,self.uend_tf:self.uend,self.t1_tf:self.t1, 245 | self.u0_tf: self.u0,self.u_lb_tf:self.u_lb,self.u_ub_tf:self.u_ub, 246 | self.x_lb_tf: self.x_lb, self.t_lb_tf: self.t_lb, 247 | self.x_ub_tf: self.x_ub, self.t_ub_tf: self.t_ub, 248 | self.x_f1_tf: self.x_f1, self.t_f1_tf: self.t_f1, 249 | self.x_f2_tf: self.x_f2, self.t_f2_tf: self.t_f2, 250 | self.x_f3_tf: self.x_f3, self.t_f3_tf: self.t_f3, 251 | self.x_f_tf: self.x_f, self.t_f_tf: self.t_f} 252 | 253 | 254 | #self.loss111 = tf.pow((tf.reduce_mean(tf.square(self.u0_tf- self.u0_pred )) + \ 255 | #tf.reduce_mean(tf.square(self.uend_tf - self.uend_pred )) + \ 256 | #tf.reduce_mean(tf.square(self.u_ub_tf - self.u_ub_pred )) + \ 257 | # tf.reduce_mean(tf.square(self.u_lb_tf - self.u_lb_pred))),1) 258 | 259 | 260 | 261 | lossu0_tfu0_tf=self.u0_tf 262 | lossu0_tf=self.sess.run(lossu0_tfu0_tf, tf_dict) 263 | 264 | lossuend_tfuend_tf=self.uend_tf 265 | lossuend_tf=self.sess.run(lossuend_tfuend_tf, tf_dict) 266 | 267 | lossu_ub_tfu_ub_tf=self.u_ub_tf 268 | lossu_ub_tf=self.sess.run(lossu_ub_tfu_ub_tf, tf_dict) 269 | 270 | lossu_lb_tfu_lb_tf=self.u_lb_tf 271 | lossu_lb_tf=self.sess.run(lossu_lb_tfu_lb_tf, tf_dict) 272 | 273 | 274 | 275 | 276 | 277 | zkjxJXhzs = tf.reduce_mean(tf.square(u0_pred-lossu0_tf))+tf.reduce_mean(tf.square(uend_pred-lossuend_tf))+tf.reduce_mean(tf.square(u_ub_pred-lossu_ub_tf))+tf.reduce_mean(tf.square(u_lb_pred-lossu_lb_tf)) 278 | array4 = zkjxJXhzs.eval(session=tf.Session()) 279 | 280 | print('It: %d, L1: %.4e,L2: %.4e,L31: %.4e,L32: %.4e,L33: %.4e' % 281 | (sss,array4,array1,array11,array21,array31)) 282 | 283 | log1=open("log1.txt",mode = 'a+', encoding = 'utf-8') 284 | print(array11,file=log1) 285 | log1.close() 286 | 287 | log2=open("log2.txt",mode = 'a+', encoding = 'utf-8') 288 | print(array21,file=log2) 289 | log2.close() 290 | 291 | log3=open("log3.txt",mode = 'a+', encoding = 'utf-8') 292 | print(array31,file=log3) 293 | log3.close() 294 | sss=sss+1 295 | self.hsadasjd=sss 296 | 297 | 298 | 299 | 300 | 301 | def train(self, nIter): 302 | tf_dict = {self.x0_tf: self.x0, self.t0_tf: self.t0,self.uend_tf:self.uend,self.t1_tf:self.t1, 303 | self.u0_tf: self.u0,self.u_lb_tf:self.u_lb,self.u_ub_tf:self.u_ub, 304 | self.x_lb_tf: self.x_lb, self.t_lb_tf: self.t_lb, 305 | self.x_ub_tf: self.x_ub, self.t_ub_tf: self.t_ub, 306 | self.x_f1_tf: self.x_f1, self.t_f1_tf: self.t_f1, 307 | self.x_f2_tf: self.x_f2, self.t_f2_tf: self.t_f2, 308 | self.x_f3_tf: self.x_f3, self.t_f3_tf: self.t_f3, 309 | self.x_f_tf: self.x_f, self.t_f_tf: self.t_f} 310 | 311 | lossloss = [] 312 | lossloss1=[] 313 | lossloss2=[] 314 | 315 | start_time = time.time() 316 | loss_value11 = self.sess.run(self.loss, tf_dict) 317 | lossloss.append(loss_value11) 318 | for it in range(nIter): 319 | self.sess.run(self.train_op_Adam, tf_dict) 320 | if it % 100000 == 0: 321 | 322 | loss_value11 = self.sess.run(self.loss111, tf_dict) 323 | lossloss1.append(loss_value11) 324 | 325 | loss_value22 = self.sess.run(self.loss222, tf_dict) 326 | lossloss2.append(loss_value22) 327 | 328 | 329 | loss_value222 = self.sess.run(self.loss22, tf_dict) 330 | loss_value223 = self.sess.run(self.loss23, tf_dict) 331 | loss_value221 = self.sess.run(self.loss21, tf_dict) 332 | 333 | 334 | 335 | elapsed = time.time() - start_time 336 | 337 | print('It: %d, Loss1: %.4e,loss2: %.4e Loss31: %.4e,Loss32: %.4e,Loss33: %.4e,Time: %.2f' % 338 | (it, loss_value11,loss_value22,loss_value221,loss_value222,loss_value223,elapsed)) 339 | 340 | start_time = time.time() 341 | 342 | log1=open("log1.txt",mode = 'a+', encoding = 'utf-8') 343 | print(loss_value221,file=log1) 344 | log1.close() 345 | 346 | log2=open("log2.txt",mode = 'a+', encoding = 'utf-8') 347 | print(loss_value222,file=log2) 348 | log2.close() 349 | 350 | log3=open("log3.txt",mode = 'a+', encoding = 'utf-8') 351 | print(loss_value223,file=log3) 352 | log3.close() 353 | 354 | self.optimizer.minimize(self.sess, 355 | feed_dict = tf_dict, 356 | fetches = [self.loss,self.f_u_pred,self.f_u1_pred,self.f_u2_pred,self.f_u3_pred,self.u0_pred,self.uend_pred,self.u_ub_pred,self.u_lb_pred], 357 | loss_callback = self.callback 358 | ) 359 | 360 | 361 | 362 | 363 | return lossloss 364 | 365 | def predict(self, X_star): 366 | 367 | tf_dict = {self.x0_tf: X_star[:,0:1], self.t0_tf: X_star[:,1:2]} 368 | 369 | u_star = self.sess.run(self.u0_pred, tf_dict) 370 | 371 | 372 | tf_dict = {self.x_f_tf: X_star[:,0:1], self.t_f_tf: X_star[:,1:2]} 373 | 374 | f_u_star = self.sess.run(self.f_u_pred, tf_dict) 375 | 376 | return u_star,f_u_star 377 | def loss_show(self): 378 | return self.losslossloss 379 | 380 | 381 | # In[4]: 382 | 383 | 384 | def heatsolution(x,t): 385 | hh=0.02 386 | hhhh=1000 387 | return (hhhh+x*x)*math.exp(-(t-0.5)**2/2/hh/hh)+1 388 | 389 | 390 | # In[5]: 391 | 392 | 393 | if __name__ == "__main__": 394 | 395 | 396 | # Doman bounds 397 | lb = np.array([0, 0]) 398 | ub = np.array([1, 1]) 399 | 400 | N0 = 1200 #初始点 401 | N_b = 1200 #边界点 402 | N_f = 30000 #适配点 403 | layers = [2,50,50,50,50,1] 404 | #读取真实解 405 | x=np.linspace(0,1,1200).flatten()[:,None] 406 | t=np.linspace(0,1,1200).flatten()[:,None] 407 | res=np.zeros([len(x),len(t)]) 408 | for i in range(len(x)): 409 | for j in range(len(t)): 410 | res[i,j]=heatsolution(x[i],t[j]) 411 | 412 | X, T = np.meshgrid(x, t) 413 | X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 414 | #选定初始点N0=100个点 415 | idx_x = np.random.choice(x.shape[0], N0, replace=False) 416 | x0 = x[idx_x,:] 417 | u0 = res[idx_x,0:1] 418 | uend=res[idx_x,-1] 419 | #选择N_b=50个边界点 420 | idx_t = np.random.choice(t.shape[0], N_b, replace=False) 421 | tb = t[idx_t,:] 422 | u_lb = res[0,idx_t] 423 | u_ub=res[-1,idx_t] 424 | #N_f=2000个随机搭配点 第一列位置 第二列时间 425 | X_f = lb + (ub-lb)*lhs(2, N_f) 426 | x0=np.array(x0).flatten()[:,None] 427 | u0=np.array(u0).flatten()[:,None] 428 | uend=np.array(uend).flatten()[:,None] 429 | u_lb=np.array(u_lb).flatten()[:,None] 430 | u_ub=np.array(u_ub).flatten()[:,None] 431 | 432 | 433 | # In[6]: 434 | 435 | 436 | model = PhysicsInformedNN(x0, u0,uend,tb, X_f, layers, lb, ub,u_lb,u_ub) 437 | 438 | 439 | # In[7]: 440 | 441 | 442 | LOSS=model.train(1000000) 443 | 444 | 445 | # In[8]: 446 | 447 | 448 | X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 449 | u_pred, f_u_pred = model.predict(X_star) 450 | u_star = res.T.flatten()[:,None] 451 | error_u1 = np.linalg.norm(u_star-u_pred,2)/np.linalg.norm(u_star,2) 452 | error_u2 = np.linalg.norm(u_star-u_pred,1)/len(u_star) 453 | error_u3 = np.linalg.norm(u_star-u_pred,np.inf) 454 | print('二范数Error u: %e' % (error_u1)) 455 | print('平均绝对Error u: %e' % (error_u2)) 456 | print('无穷范数Error u: %e' % (error_u3)) 457 | 458 | 459 | # 460 | 461 | # In[14]: 462 | 463 | 464 | scipy.io.savemat("uu.mat", {'u': u_pred}) 465 | 466 | 467 | # In[ ]: 468 | 469 | 470 | 471 | 472 | 473 | # In[ ]: 474 | 475 | 476 | 477 | 478 | 479 | # In[ ]: 480 | 481 | 482 | 483 | 484 | 485 | # In[ ]: 486 | 487 | 488 | 489 | 490 | 491 | # In[ ]: 492 | 493 | 494 | 495 | 496 | 497 | # In[ ]: 498 | 499 | 500 | 501 | 502 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/PINN(1000k Adam)/log1.txt: -------------------------------------------------------------------------------- 1 | 266.46286 2 | 2412395300.0 3 | 5830284000.0 4 | 1095577500.0 5 | 1580149200.0 6 | 16425076.0 7 | 145541.33 8 | 23070.188 9 | 14965.495 10 | 6310.325 11 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/PINN(1000k Adam)/log2.txt: -------------------------------------------------------------------------------- 1 | 415697440000.0 2 | 896610200000.0 3 | 2706567600000.0 4 | 126235460000000.0 5 | 5811057700000.0 6 | 244598800000.0 7 | 5414341000.0 8 | 5739929.0 9 | 5845838.5 10 | 2302399.0 11 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/PINN(1000k Adam)/log3.txt: -------------------------------------------------------------------------------- 1 | 266.64395 2 | 125642456.0 3 | 1058244600.0 4 | 889975940.0 5 | 1215703900.0 6 | 1699751200.0 7 | 116017840.0 8 | 528774.0 9 | 106556.45 10 | 14748.017 11 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/PINN(1000k Adam)/uu.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A problem with dramatic variations in different subdomains/PINN(1000k Adam)/uu.mat -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/PINN/log1.txt: -------------------------------------------------------------------------------- 1 | 266.31244 2 | 267.97177 3 | 392.4081 4 | 10231.011 5 | 79872.11 6 | 698116.25 7 | 208560.98 8 | 336108.7 9 | 539778.3 10 | 264458.1 11 | 227830.52 12 | 267809.28 13 | 617782.9 14 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/PINN/log2.txt: -------------------------------------------------------------------------------- 1 | 415697830000.0 2 | 403276460000.0 3 | 603240700000.0 4 | 805396150000.0 5 | 1039071000000.0 6 | 1230187000000.0 7 | 1521170500000.0 8 | 1802750500000.0 9 | 1803662700000.0 10 | 1994955600000.0 11 | 2061963200000.0 12 | 2240154000000.0 13 | 2295869200000.0 14 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/PINN/log3.txt: -------------------------------------------------------------------------------- 1 | 266.48865 2 | 211.71913 3 | 52145.168 4 | 48806.836 5 | 35973.902 6 | 49144.793 7 | 272969.62 8 | 423889.9 9 | 4148.786 10 | 5620.063 11 | 6253.7993 12 | 26305.809 13 | 178321.81 14 | -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/PINN/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /A problem with dramatic variations in different subdomains/PINN/uu.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A problem with dramatic variations in different subdomains/PINN/uu.mat -------------------------------------------------------------------------------- /A typical multi-scale problem/MFF+hard constraint/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A typical multi-scale problem/MFF+hard constraint/1.mat -------------------------------------------------------------------------------- /A typical multi-scale problem/MFF+hard constraint/Compute_Jacobian.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sat Jul 11 17:45:07 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | import tensorflow as tf 12 | from tensorflow.python.framework import ops 13 | from tensorflow.python.ops import array_ops 14 | from tensorflow.python.ops import check_ops 15 | from tensorflow.python.ops import gradients_impl as gradient_ops 16 | from tensorflow.python.ops.parallel_for import control_flow_ops 17 | from tensorflow.python.util import nest 18 | 19 | def jacobian(output, inputs, use_pfor=True, parallel_iterations=None): 20 | """Computes jacobian of `output` w.r.t. `inputs`. 21 | Args: 22 | output: A tensor. 23 | inputs: A tensor or a nested structure of tensor objects. 24 | use_pfor: If true, uses pfor for computing the jacobian. Else uses 25 | tf.while_loop. 26 | parallel_iterations: A knob to control how many iterations and dispatched in 27 | parallel. This knob can be used to control the total memory usage. 28 | Returns: 29 | A tensor or a nested structure of tensors with the same structure as 30 | `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding 31 | value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has 32 | shape [x_1, ..., x_m], the corresponding jacobian has shape 33 | [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is 34 | sparse (IndexedSlices), jacobian function currently makes it dense and 35 | returns a Tensor instead. This may change in the future. 36 | """ 37 | flat_inputs = nest.flatten(inputs) 38 | output_tensor_shape = output.shape 39 | output_shape = array_ops.shape(output) 40 | output = array_ops.reshape(output, [-1]) 41 | 42 | def loop_fn(i): 43 | y = array_ops.gather(output, i) 44 | return gradient_ops.gradients(y, flat_inputs, unconnected_gradients=tf.UnconnectedGradients.ZERO) 45 | 46 | try: 47 | output_size = int(output.shape[0]) 48 | except TypeError: 49 | output_size = array_ops.shape(output)[0] 50 | 51 | if use_pfor: 52 | pfor_outputs = control_flow_ops.pfor( 53 | loop_fn, output_size, parallel_iterations=parallel_iterations) 54 | else: 55 | pfor_outputs = control_flow_ops.for_loop( 56 | loop_fn, 57 | [output.dtype] * len(flat_inputs), 58 | output_size, 59 | parallel_iterations=parallel_iterations) 60 | 61 | for i, out in enumerate(pfor_outputs): 62 | if isinstance(out, ops.Tensor): 63 | new_shape = array_ops.concat( 64 | [output_shape, array_ops.shape(out)[1:]], axis=0) 65 | out = array_ops.reshape(out, new_shape) 66 | out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape)) 67 | pfor_outputs[i] = out 68 | 69 | return nest.pack_sequence_as(inputs, pfor_outputs) -------------------------------------------------------------------------------- /A typical multi-scale problem/MFF/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A typical multi-scale problem/MFF/1.mat -------------------------------------------------------------------------------- /A typical multi-scale problem/MFF/Compute_Jacobian.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sat Jul 11 17:45:07 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | import tensorflow as tf 12 | from tensorflow.python.framework import ops 13 | from tensorflow.python.ops import array_ops 14 | from tensorflow.python.ops import check_ops 15 | from tensorflow.python.ops import gradients_impl as gradient_ops 16 | from tensorflow.python.ops.parallel_for import control_flow_ops 17 | from tensorflow.python.util import nest 18 | 19 | def jacobian(output, inputs, use_pfor=True, parallel_iterations=None): 20 | """Computes jacobian of `output` w.r.t. `inputs`. 21 | Args: 22 | output: A tensor. 23 | inputs: A tensor or a nested structure of tensor objects. 24 | use_pfor: If true, uses pfor for computing the jacobian. Else uses 25 | tf.while_loop. 26 | parallel_iterations: A knob to control how many iterations and dispatched in 27 | parallel. This knob can be used to control the total memory usage. 28 | Returns: 29 | A tensor or a nested structure of tensors with the same structure as 30 | `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding 31 | value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has 32 | shape [x_1, ..., x_m], the corresponding jacobian has shape 33 | [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is 34 | sparse (IndexedSlices), jacobian function currently makes it dense and 35 | returns a Tensor instead. This may change in the future. 36 | """ 37 | flat_inputs = nest.flatten(inputs) 38 | output_tensor_shape = output.shape 39 | output_shape = array_ops.shape(output) 40 | output = array_ops.reshape(output, [-1]) 41 | 42 | def loop_fn(i): 43 | y = array_ops.gather(output, i) 44 | return gradient_ops.gradients(y, flat_inputs, unconnected_gradients=tf.UnconnectedGradients.ZERO) 45 | 46 | try: 47 | output_size = int(output.shape[0]) 48 | except TypeError: 49 | output_size = array_ops.shape(output)[0] 50 | 51 | if use_pfor: 52 | pfor_outputs = control_flow_ops.pfor( 53 | loop_fn, output_size, parallel_iterations=parallel_iterations) 54 | else: 55 | pfor_outputs = control_flow_ops.for_loop( 56 | loop_fn, 57 | [output.dtype] * len(flat_inputs), 58 | output_size, 59 | parallel_iterations=parallel_iterations) 60 | 61 | for i, out in enumerate(pfor_outputs): 62 | if isinstance(out, ops.Tensor): 63 | new_shape = array_ops.concat( 64 | [output_shape, array_ops.shape(out)[1:]], axis=0) 65 | out = array_ops.reshape(out, new_shape) 66 | out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape)) 67 | pfor_outputs[i] = out 68 | 69 | return nest.pack_sequence_as(inputs, pfor_outputs) -------------------------------------------------------------------------------- /A typical multi-scale problem/MMPINN/Ref/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A typical multi-scale problem/MMPINN/Ref/1.mat -------------------------------------------------------------------------------- /A typical multi-scale problem/MMPINN/Ref/Compute_Jacobian.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sat Jul 11 17:45:07 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | import tensorflow as tf 12 | from tensorflow.python.framework import ops 13 | from tensorflow.python.ops import array_ops 14 | from tensorflow.python.ops import check_ops 15 | from tensorflow.python.ops import gradients_impl as gradient_ops 16 | from tensorflow.python.ops.parallel_for import control_flow_ops 17 | from tensorflow.python.util import nest 18 | 19 | def jacobian(output, inputs, use_pfor=True, parallel_iterations=None): 20 | """Computes jacobian of `output` w.r.t. `inputs`. 21 | Args: 22 | output: A tensor. 23 | inputs: A tensor or a nested structure of tensor objects. 24 | use_pfor: If true, uses pfor for computing the jacobian. Else uses 25 | tf.while_loop. 26 | parallel_iterations: A knob to control how many iterations and dispatched in 27 | parallel. This knob can be used to control the total memory usage. 28 | Returns: 29 | A tensor or a nested structure of tensors with the same structure as 30 | `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding 31 | value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has 32 | shape [x_1, ..., x_m], the corresponding jacobian has shape 33 | [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is 34 | sparse (IndexedSlices), jacobian function currently makes it dense and 35 | returns a Tensor instead. This may change in the future. 36 | """ 37 | flat_inputs = nest.flatten(inputs) 38 | output_tensor_shape = output.shape 39 | output_shape = array_ops.shape(output) 40 | output = array_ops.reshape(output, [-1]) 41 | 42 | def loop_fn(i): 43 | y = array_ops.gather(output, i) 44 | return gradient_ops.gradients(y, flat_inputs, unconnected_gradients=tf.UnconnectedGradients.ZERO) 45 | 46 | try: 47 | output_size = int(output.shape[0]) 48 | except TypeError: 49 | output_size = array_ops.shape(output)[0] 50 | 51 | if use_pfor: 52 | pfor_outputs = control_flow_ops.pfor( 53 | loop_fn, output_size, parallel_iterations=parallel_iterations) 54 | else: 55 | pfor_outputs = control_flow_ops.for_loop( 56 | loop_fn, 57 | [output.dtype] * len(flat_inputs), 58 | output_size, 59 | parallel_iterations=parallel_iterations) 60 | 61 | for i, out in enumerate(pfor_outputs): 62 | if isinstance(out, ops.Tensor): 63 | new_shape = array_ops.concat( 64 | [output_shape, array_ops.shape(out)[1:]], axis=0) 65 | out = array_ops.reshape(out, new_shape) 66 | out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape)) 67 | pfor_outputs[i] = out 68 | 69 | return nest.pack_sequence_as(inputs, pfor_outputs) -------------------------------------------------------------------------------- /A typical multi-scale problem/MMPINN/others/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A typical multi-scale problem/MMPINN/others/1.mat -------------------------------------------------------------------------------- /A typical multi-scale problem/MMPINN/others/Compute_Jacobian.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sat Jul 11 17:45:07 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | import tensorflow as tf 12 | from tensorflow.python.framework import ops 13 | from tensorflow.python.ops import array_ops 14 | from tensorflow.python.ops import check_ops 15 | from tensorflow.python.ops import gradients_impl as gradient_ops 16 | from tensorflow.python.ops.parallel_for import control_flow_ops 17 | from tensorflow.python.util import nest 18 | 19 | def jacobian(output, inputs, use_pfor=True, parallel_iterations=None): 20 | """Computes jacobian of `output` w.r.t. `inputs`. 21 | Args: 22 | output: A tensor. 23 | inputs: A tensor or a nested structure of tensor objects. 24 | use_pfor: If true, uses pfor for computing the jacobian. Else uses 25 | tf.while_loop. 26 | parallel_iterations: A knob to control how many iterations and dispatched in 27 | parallel. This knob can be used to control the total memory usage. 28 | Returns: 29 | A tensor or a nested structure of tensors with the same structure as 30 | `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding 31 | value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has 32 | shape [x_1, ..., x_m], the corresponding jacobian has shape 33 | [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is 34 | sparse (IndexedSlices), jacobian function currently makes it dense and 35 | returns a Tensor instead. This may change in the future. 36 | """ 37 | flat_inputs = nest.flatten(inputs) 38 | output_tensor_shape = output.shape 39 | output_shape = array_ops.shape(output) 40 | output = array_ops.reshape(output, [-1]) 41 | 42 | def loop_fn(i): 43 | y = array_ops.gather(output, i) 44 | return gradient_ops.gradients(y, flat_inputs, unconnected_gradients=tf.UnconnectedGradients.ZERO) 45 | 46 | try: 47 | output_size = int(output.shape[0]) 48 | except TypeError: 49 | output_size = array_ops.shape(output)[0] 50 | 51 | if use_pfor: 52 | pfor_outputs = control_flow_ops.pfor( 53 | loop_fn, output_size, parallel_iterations=parallel_iterations) 54 | else: 55 | pfor_outputs = control_flow_ops.for_loop( 56 | loop_fn, 57 | [output.dtype] * len(flat_inputs), 58 | output_size, 59 | parallel_iterations=parallel_iterations) 60 | 61 | for i, out in enumerate(pfor_outputs): 62 | if isinstance(out, ops.Tensor): 63 | new_shape = array_ops.concat( 64 | [output_shape, array_ops.shape(out)[1:]], axis=0) 65 | out = array_ops.reshape(out, new_shape) 66 | out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape)) 67 | pfor_outputs[i] = out 68 | 69 | return nest.pack_sequence_as(inputs, pfor_outputs) -------------------------------------------------------------------------------- /A typical multi-scale problem/PINN(1000k Adam)/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A typical multi-scale problem/PINN(1000k Adam)/1.mat -------------------------------------------------------------------------------- /A typical multi-scale problem/PINN(1000k Adam)/Compute_Jacobian.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sat Jul 11 17:45:07 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | import tensorflow as tf 12 | from tensorflow.python.framework import ops 13 | from tensorflow.python.ops import array_ops 14 | from tensorflow.python.ops import check_ops 15 | from tensorflow.python.ops import gradients_impl as gradient_ops 16 | from tensorflow.python.ops.parallel_for import control_flow_ops 17 | from tensorflow.python.util import nest 18 | 19 | def jacobian(output, inputs, use_pfor=True, parallel_iterations=None): 20 | """Computes jacobian of `output` w.r.t. `inputs`. 21 | Args: 22 | output: A tensor. 23 | inputs: A tensor or a nested structure of tensor objects. 24 | use_pfor: If true, uses pfor for computing the jacobian. Else uses 25 | tf.while_loop. 26 | parallel_iterations: A knob to control how many iterations and dispatched in 27 | parallel. This knob can be used to control the total memory usage. 28 | Returns: 29 | A tensor or a nested structure of tensors with the same structure as 30 | `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding 31 | value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has 32 | shape [x_1, ..., x_m], the corresponding jacobian has shape 33 | [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is 34 | sparse (IndexedSlices), jacobian function currently makes it dense and 35 | returns a Tensor instead. This may change in the future. 36 | """ 37 | flat_inputs = nest.flatten(inputs) 38 | output_tensor_shape = output.shape 39 | output_shape = array_ops.shape(output) 40 | output = array_ops.reshape(output, [-1]) 41 | 42 | def loop_fn(i): 43 | y = array_ops.gather(output, i) 44 | return gradient_ops.gradients(y, flat_inputs, unconnected_gradients=tf.UnconnectedGradients.ZERO) 45 | 46 | try: 47 | output_size = int(output.shape[0]) 48 | except TypeError: 49 | output_size = array_ops.shape(output)[0] 50 | 51 | if use_pfor: 52 | pfor_outputs = control_flow_ops.pfor( 53 | loop_fn, output_size, parallel_iterations=parallel_iterations) 54 | else: 55 | pfor_outputs = control_flow_ops.for_loop( 56 | loop_fn, 57 | [output.dtype] * len(flat_inputs), 58 | output_size, 59 | parallel_iterations=parallel_iterations) 60 | 61 | for i, out in enumerate(pfor_outputs): 62 | if isinstance(out, ops.Tensor): 63 | new_shape = array_ops.concat( 64 | [output_shape, array_ops.shape(out)[1:]], axis=0) 65 | out = array_ops.reshape(out, new_shape) 66 | out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape)) 67 | pfor_outputs[i] = out 68 | 69 | return nest.pack_sequence_as(inputs, pfor_outputs) -------------------------------------------------------------------------------- /A typical multi-scale problem/PINN/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/A typical multi-scale problem/PINN/1.mat -------------------------------------------------------------------------------- /A typical multi-scale problem/PINN/Compute_Jacobian.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Sat Jul 11 17:45:07 2020 4 | 5 | @author: sifan 6 | """ 7 | 8 | from __future__ import absolute_import 9 | from __future__ import division 10 | from __future__ import print_function 11 | import tensorflow as tf 12 | from tensorflow.python.framework import ops 13 | from tensorflow.python.ops import array_ops 14 | from tensorflow.python.ops import check_ops 15 | from tensorflow.python.ops import gradients_impl as gradient_ops 16 | from tensorflow.python.ops.parallel_for import control_flow_ops 17 | from tensorflow.python.util import nest 18 | 19 | def jacobian(output, inputs, use_pfor=True, parallel_iterations=None): 20 | """Computes jacobian of `output` w.r.t. `inputs`. 21 | Args: 22 | output: A tensor. 23 | inputs: A tensor or a nested structure of tensor objects. 24 | use_pfor: If true, uses pfor for computing the jacobian. Else uses 25 | tf.while_loop. 26 | parallel_iterations: A knob to control how many iterations and dispatched in 27 | parallel. This knob can be used to control the total memory usage. 28 | Returns: 29 | A tensor or a nested structure of tensors with the same structure as 30 | `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding 31 | value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has 32 | shape [x_1, ..., x_m], the corresponding jacobian has shape 33 | [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is 34 | sparse (IndexedSlices), jacobian function currently makes it dense and 35 | returns a Tensor instead. This may change in the future. 36 | """ 37 | flat_inputs = nest.flatten(inputs) 38 | output_tensor_shape = output.shape 39 | output_shape = array_ops.shape(output) 40 | output = array_ops.reshape(output, [-1]) 41 | 42 | def loop_fn(i): 43 | y = array_ops.gather(output, i) 44 | return gradient_ops.gradients(y, flat_inputs, unconnected_gradients=tf.UnconnectedGradients.ZERO) 45 | 46 | try: 47 | output_size = int(output.shape[0]) 48 | except TypeError: 49 | output_size = array_ops.shape(output)[0] 50 | 51 | if use_pfor: 52 | pfor_outputs = control_flow_ops.pfor( 53 | loop_fn, output_size, parallel_iterations=parallel_iterations) 54 | else: 55 | pfor_outputs = control_flow_ops.for_loop( 56 | loop_fn, 57 | [output.dtype] * len(flat_inputs), 58 | output_size, 59 | parallel_iterations=parallel_iterations) 60 | 61 | for i, out in enumerate(pfor_outputs): 62 | if isinstance(out, ops.Tensor): 63 | new_shape = array_ops.concat( 64 | [output_shape, array_ops.shape(out)[1:]], axis=0) 65 | out = array_ops.reshape(out, new_shape) 66 | out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape)) 67 | pfor_outputs[i] = out 68 | 69 | return nest.pack_sequence_as(inputs, pfor_outputs) -------------------------------------------------------------------------------- /Helmholtz equation/Hard Constraint/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /Helmholtz equation/LRA with IFNN/Helmholtz2D_model_tf.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import timeit 4 | 5 | 6 | class Sampler: 7 | # Initialize the class 8 | def __init__(self, dim, coords, func, name=None): 9 | self.dim = dim 10 | self.coords = coords 11 | self.func = func 12 | self.name = name 13 | 14 | def sample(self, N): 15 | x = self.coords[0:1, :] + (self.coords[1:2, :] - self.coords[0:1, :]) * np.random.rand(N, self.dim) 16 | y = self.func(x) 17 | return x, y 18 | 19 | class Helmholtz2D: 20 | def __init__(self, layers, operator, ics_sampler, bcs_sampler, res_sampler, lam, model, stiff_ratio): 21 | # Normalization constants 22 | X, _ = res_sampler.sample(np.int32(1e5)) 23 | self.mu_X, self.sigma_X = X.mean(0), X.std(0) 24 | self.mu_x1, self.sigma_x1 = self.mu_X[0], self.sigma_X[0] 25 | self.mu_x2, self.sigma_x2 = self.mu_X[1], self.sigma_X[1] 26 | 27 | # Samplers 28 | self.operator = operator 29 | self.ics_sampler = ics_sampler 30 | self.bcs_sampler = bcs_sampler 31 | self.res_sampler = res_sampler 32 | 33 | # Helmoholtz constant 34 | self.lam = tf.constant(lam, dtype=tf.float32) 35 | 36 | # Mode 37 | self.model = model 38 | 39 | # Record stiff ratio 40 | self.stiff_ratio = stiff_ratio 41 | 42 | # Adaptive constant 43 | self.beta = 0.9 44 | self.adaptive_constant_val = np.array(1.0) 45 | 46 | # Initialize network weights and biases 47 | self.layers = layers 48 | self.weights, self.biases = self.initialize_NN(layers) 49 | 50 | if model in ['M3', 'M4']: 51 | # Initialize encoder weights and biases 52 | self.encoder_weights_1 = self.xavier_init([2, layers[1]]) 53 | self.encoder_biases_1 = self.xavier_init([1, layers[1]]) 54 | 55 | self.encoder_weights_2 = self.xavier_init([2, layers[1]]) 56 | self.encoder_biases_2 = self.xavier_init([1, layers[1]]) 57 | 58 | # Define Tensorflow session 59 | self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 60 | 61 | # Define placeholders and computational graph 62 | self.x1_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 63 | self.x2_u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 64 | self.u_tf = tf.placeholder(tf.float32, shape=(None, 1)) 65 | 66 | self.x1_bc1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 67 | self.x2_bc1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 68 | self.u_bc1_tf = tf.placeholder(tf.float32, shape=(None, 1)) 69 | 70 | self.x1_bc2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 71 | self.x2_bc2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 72 | self.u_bc2_tf = tf.placeholder(tf.float32, shape=(None, 1)) 73 | 74 | self.x1_bc3_tf = tf.placeholder(tf.float32, shape=(None, 1)) 75 | self.x2_bc3_tf = tf.placeholder(tf.float32, shape=(None, 1)) 76 | self.u_bc3_tf = tf.placeholder(tf.float32, shape=(None, 1)) 77 | 78 | self.x1_bc4_tf = tf.placeholder(tf.float32, shape=(None, 1)) 79 | self.x2_bc4_tf = tf.placeholder(tf.float32, shape=(None, 1)) 80 | self.u_bc4_tf = tf.placeholder(tf.float32, shape=(None, 1)) 81 | 82 | self.x1_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 83 | self.x2_r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 84 | self.r_tf = tf.placeholder(tf.float32, shape=(None, 1)) 85 | 86 | # Define placeholder for adaptive constant 87 | self.adaptive_constant_tf = tf.placeholder(tf.float32, shape=self.adaptive_constant_val.shape) 88 | 89 | # Evaluate predictions 90 | self.u_bc1_pred = self.net_u(self.x1_bc1_tf, self.x2_bc1_tf) 91 | self.u_bc2_pred = self.net_u(self.x1_bc2_tf, self.x2_bc2_tf) 92 | self.u_bc3_pred = self.net_u(self.x1_bc3_tf, self.x2_bc3_tf) 93 | self.u_bc4_pred = self.net_u(self.x1_bc4_tf, self.x2_bc4_tf) 94 | 95 | self.u_pred = self.net_u(self.x1_u_tf, self.x2_u_tf) 96 | self.r_pred = self.net_r(self.x1_r_tf, self.x2_r_tf) 97 | 98 | # Boundary loss 99 | self.loss_bc1 = tf.reduce_mean(tf.square(self.u_bc1_tf - self.u_bc1_pred)) 100 | self.loss_bc2 = tf.reduce_mean(tf.square(self.u_bc2_tf - self.u_bc2_pred)) 101 | self.loss_bc3 = tf.reduce_mean(tf.square(self.u_bc3_tf - self.u_bc3_pred)) 102 | self.loss_bc4 = tf.reduce_mean(tf.square(self.u_bc4_tf - self.u_bc4_pred)) 103 | self.loss_bcs = self.adaptive_constant_tf * (self.loss_bc1 + self.loss_bc2 + self.loss_bc3 + self.loss_bc4) 104 | 105 | # Residual loss 106 | self.loss_res = tf.reduce_mean(tf.square(self.r_tf - self.r_pred)) 107 | 108 | # Total loss 109 | self.loss = self.loss_res + self.loss_bcs 110 | 111 | # Define optimizer with learning rate schedule 112 | self.global_step = tf.Variable(0, trainable=False) 113 | starter_learning_rate = 1e-3 114 | self.learning_rate = tf.train.exponential_decay(starter_learning_rate, self.global_step, 115 | 1000, 0.9, staircase=False) 116 | # Passing global_step to minimize() will increment it at each step. 117 | self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss, global_step=self.global_step) 118 | 119 | # Logger 120 | self.loss_bcs_log = [] 121 | self.loss_res_log = [] 122 | self.saver = tf.train.Saver() 123 | 124 | # Generate dicts for gradients storage 125 | self.dict_gradients_res_layers = self.generate_grad_dict(self.layers) 126 | self.dict_gradients_bcs_layers = self.generate_grad_dict(self.layers) 127 | 128 | # Gradients Storage 129 | self.grad_res = [] 130 | self.grad_bcs = [] 131 | for i in range(len(self.layers) - 1): 132 | self.grad_res.append(tf.gradients(self.loss_res, self.weights[i])[0]) 133 | self.grad_bcs.append(tf.gradients(self.loss_bcs, self.weights[i])[0]) 134 | 135 | # Compute and store the adaptive constant 136 | self.adpative_constant_log = [] 137 | self.adaptive_constant_list = [] 138 | 139 | self.max_grad_res_list = [] 140 | self.mean_grad_bcs_list = [] 141 | 142 | for i in range(len(self.layers) - 1): 143 | self.max_grad_res_list.append(tf.reduce_max(tf.abs(self.grad_res[i]))) 144 | self.mean_grad_bcs_list.append(tf.reduce_mean(tf.abs(self.grad_bcs[i]))) 145 | 146 | self.max_grad_res = tf.reduce_max(tf.stack(self.max_grad_res_list)) 147 | self.mean_grad_bcs = tf.reduce_mean(tf.stack(self.mean_grad_bcs_list)) 148 | self.adaptive_constant = self.max_grad_res / self.mean_grad_bcs 149 | 150 | # Stiff Ratio 151 | if self.stiff_ratio: 152 | self.Hessian, self.Hessian_bcs, self.Hessian_res = self.get_H_op() 153 | self.eigenvalues, _ = tf.linalg.eigh(self.Hessian) 154 | self.eigenvalues_bcs, _ = tf.linalg.eigh(self.Hessian_bcs) 155 | self.eigenvalues_res, _ = tf.linalg.eigh(self.Hessian_res) 156 | 157 | self.eigenvalue_log = [] 158 | self.eigenvalue_bcs_log = [] 159 | self.eigenvalue_res_log = [] 160 | 161 | # Initialize Tensorflow variables 162 | init = tf.global_variables_initializer() 163 | self.sess.run(init) 164 | 165 | # Create dictionary to store gradients 166 | def generate_grad_dict(self, layers): 167 | num = len(layers) - 1 168 | grad_dict = {} 169 | for i in range(num): 170 | grad_dict['layer_{}'.format(i + 1)] = [] 171 | return grad_dict 172 | 173 | # Save gradients 174 | def save_gradients(self, tf_dict): 175 | num_layers = len(self.layers) 176 | for i in range(num_layers - 1): 177 | grad_res_value, grad_bcs_value = self.sess.run([self.grad_res[i], self.grad_bcs[i]], feed_dict=tf_dict) 178 | 179 | # save gradients of loss_res and loss_bcs 180 | self.dict_gradients_res_layers['layer_' + str(i + 1)].append(grad_res_value.flatten()) 181 | self.dict_gradients_bcs_layers['layer_' + str(i + 1)].append(grad_bcs_value.flatten()) 182 | return None 183 | 184 | # Compute the Hessian 185 | def flatten(self, vectors): 186 | return tf.concat([tf.reshape(v, [-1]) for v in vectors], axis=0) 187 | 188 | def get_Hv(self, v): 189 | loss_gradients = self.flatten(tf.gradients(self.loss, self.weights)) 190 | vprod = tf.math.multiply(loss_gradients, 191 | tf.stop_gradient(v)) 192 | Hv_op = self.flatten(tf.gradients(vprod, self.weights)) 193 | return Hv_op 194 | 195 | def get_Hv_res(self, v): 196 | loss_gradients = self.flatten(tf.gradients(self.loss_res, 197 | self.weights)) 198 | vprod = tf.math.multiply(loss_gradients, 199 | tf.stop_gradient(v)) 200 | Hv_op = self.flatten(tf.gradients(vprod, 201 | self.weights)) 202 | return Hv_op 203 | 204 | def get_Hv_bcs(self, v): 205 | loss_gradients = self.flatten(tf.gradients(self.loss_bcs, self.weights)) 206 | vprod = tf.math.multiply(loss_gradients, 207 | tf.stop_gradient(v)) 208 | Hv_op = self.flatten(tf.gradients(vprod, self.weights)) 209 | return Hv_op 210 | 211 | def get_H_op(self): 212 | self.P = self.flatten(self.weights).get_shape().as_list()[0] 213 | H = tf.map_fn(self.get_Hv, tf.eye(self.P, self.P), 214 | dtype='float32') 215 | H_bcs = tf.map_fn(self.get_Hv_bcs, tf.eye(self.P, self.P), 216 | dtype='float32') 217 | H_res = tf.map_fn(self.get_Hv_res, tf.eye(self.P, self.P), 218 | dtype='float32') 219 | 220 | return H, H_bcs, H_res 221 | 222 | # Xavier initialization 223 | def xavier_init(self,size): 224 | in_dim = size[0] 225 | out_dim = size[1] 226 | xavier_stddev = 1. / np.sqrt((in_dim + out_dim) / 2.) 227 | return tf.Variable(tf.random_normal([in_dim, out_dim], dtype=tf.float32) * xavier_stddev, 228 | dtype=tf.float32) 229 | 230 | # Initialize network weights and biases using Xavier initialization 231 | def initialize_NN(self, layers): 232 | weights = [] 233 | biases = [] 234 | num_layers = len(layers) 235 | for l in range(0, num_layers - 1): 236 | W = self.xavier_init(size=[layers[l], layers[l + 1]]) 237 | b = tf.Variable(tf.zeros([1, layers[l + 1]], dtype=tf.float32), dtype=tf.float32) 238 | weights.append(W) 239 | biases.append(b) 240 | return weights, biases 241 | 242 | # Evaluates the forward pass 243 | def forward_pass(self, H): 244 | if self.model in ['M1', 'M2']: 245 | num_layers = len(self.layers) 246 | for l in range(0, num_layers - 2): 247 | W = self.weights[l] 248 | b = self.biases[l] 249 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 250 | W = self.weights[-1] 251 | b = self.biases[-1] 252 | H = tf.add(tf.matmul(H, W), b) 253 | return H 254 | 255 | if self.model in ['M3', 'M4']: 256 | num_layers = len(self.layers) 257 | encoder_1 = tf.tanh(tf.add(tf.matmul(H, self.encoder_weights_1), self.encoder_biases_1)) 258 | encoder_2 = tf.tanh(tf.add(tf.matmul(H, self.encoder_weights_2), self.encoder_biases_2)) 259 | 260 | for l in range(0, num_layers - 2): 261 | W = self.weights[l] 262 | b = self.biases[l] 263 | H = tf.math.multiply(tf.tanh(tf.add(tf.matmul(H, W), b)), encoder_1) + \ 264 | tf.math.multiply(1 - tf.tanh(tf.add(tf.matmul(H, W), b)), encoder_2) 265 | 266 | W = self.weights[-1] 267 | b = self.biases[-1] 268 | H = tf.add(tf.matmul(H, W), b) 269 | return H 270 | 271 | # Forward pass for u 272 | def net_u(self, x1, x2): 273 | u = self.forward_pass(tf.concat([x1, x2], 1)) 274 | return u 275 | 276 | # Forward pass for residual 277 | def net_r(self, x1, x2): 278 | u = self.net_u(x1, x2) 279 | residual = self.operator(u, x1, x2, 280 | self.lam, 281 | self.sigma_x1, 282 | self.sigma_x2) 283 | return residual 284 | 285 | # Feed minibatch 286 | def fetch_minibatch(self, sampler, N): 287 | X, Y = sampler.sample(N) 288 | X = (X - self.mu_X) / self.sigma_X 289 | return X, Y 290 | 291 | # Trains the model by minimizing the MSE loss 292 | def train(self, nIter=10000, batch_size=128): 293 | 294 | start_time = timeit.default_timer() 295 | for it in range(nIter): 296 | # Fetch boundary mini-batches 297 | X_bc1_batch, u_bc1_batch = self.fetch_minibatch(self.bcs_sampler[0], batch_size) 298 | X_bc2_batch, u_bc2_batch = self.fetch_minibatch(self.bcs_sampler[1], batch_size) 299 | X_bc3_batch, u_bc3_batch = self.fetch_minibatch(self.bcs_sampler[2], batch_size) 300 | X_bc4_batch, u_bc4_batch = self.fetch_minibatch(self.bcs_sampler[3], batch_size) 301 | 302 | # Fetch residual mini-batch 303 | X_res_batch, f_res_batch = self.fetch_minibatch(self.res_sampler, batch_size) 304 | 305 | # Define a dictionary for associating placeholders with data 306 | tf_dict = {self.x1_bc1_tf: X_bc1_batch[:, 0:1], self.x2_bc1_tf: X_bc1_batch[:, 1:2], 307 | self.u_bc1_tf: u_bc1_batch, 308 | self.x1_bc2_tf: X_bc2_batch[:, 0:1], self.x2_bc2_tf: X_bc2_batch[:, 1:2], 309 | self.u_bc2_tf: u_bc2_batch, 310 | self.x1_bc3_tf: X_bc3_batch[:, 0:1], self.x2_bc3_tf: X_bc3_batch[:, 1:2], 311 | self.u_bc3_tf: u_bc3_batch, 312 | self.x1_bc4_tf: X_bc4_batch[:, 0:1], self.x2_bc4_tf: X_bc4_batch[:, 1:2], 313 | self.u_bc4_tf: u_bc4_batch, 314 | self.x1_r_tf: X_res_batch[:, 0:1], self.x2_r_tf: X_res_batch[:, 1:2], self.r_tf: f_res_batch, 315 | self.adaptive_constant_tf: self.adaptive_constant_val 316 | } 317 | 318 | # Run the Tensorflow session to minimize the loss 319 | self.sess.run(self.train_op, tf_dict) 320 | 321 | # Compute the eigenvalues of the Hessian of losses 322 | if self.stiff_ratio: 323 | if it % 1000 == 0: 324 | print("Eigenvalues information stored ...") 325 | eigenvalues, eigenvalues_bcs, eigenvalues_res = self.sess.run([self.eigenvalues, 326 | self.eigenvalues_bcs, 327 | self.eigenvalues_res], tf_dict) 328 | 329 | # Log eigenvalues 330 | self.eigenvalue_log.append(eigenvalues) 331 | self.eigenvalue_bcs_log.append(eigenvalues_bcs) 332 | self.eigenvalue_res_log.append(eigenvalues_res) 333 | 334 | # Print 335 | if it % 1000 == 0: 336 | elapsed = timeit.default_timer() - start_time 337 | loss_value = self.sess.run(self.loss, tf_dict) 338 | loss_bcs_value, loss_res_value = self.sess.run([self.loss_bcs, self.loss_res], tf_dict) 339 | 340 | self.loss_bcs_log.append(loss_bcs_value / self.adaptive_constant_val) 341 | self.loss_res_log.append(loss_res_value) 342 | 343 | # Compute and Print adaptive weights during training 344 | if self.model in ['M2', 'M4']: 345 | adaptive_constant_value = self.sess.run(self.adaptive_constant, tf_dict) 346 | self.adaptive_constant_val = adaptive_constant_value * (1.0 - self.beta) \ 347 | + self.beta * self.adaptive_constant_val 348 | self.adpative_constant_log.append(self.adaptive_constant_val) 349 | 350 | print('It: %d, Loss: %.3e, Loss_bcs: %.3e, Loss_res: %.3e, Adaptive_Constant: %.2f ,Time: %.2f' % 351 | (it, loss_value, loss_bcs_value, loss_res_value, self.adaptive_constant_val, elapsed)) 352 | start_time = timeit.default_timer() 353 | 354 | # Store gradients 355 | if it % 10000 == 0: 356 | self.save_gradients(tf_dict) 357 | print("Gradients information stored ...") 358 | 359 | # Evaluates predictions at test points 360 | 361 | # Evaluates predictions at test points 362 | def predict_u(self, X_star): 363 | X_star = (X_star - self.mu_X) / self.sigma_X 364 | tf_dict = {self.x1_u_tf: X_star[:, 0:1], self.x2_u_tf: X_star[:, 1:2]} 365 | u_star = self.sess.run(self.u_pred, tf_dict) 366 | return u_star 367 | 368 | def predict_r(self, X_star): 369 | X_star = (X_star - self.mu_X) / self.sigma_X 370 | tf_dict = {self.x1_r_tf: X_star[:, 0:1], self.x2_r_tf: X_star[:, 1:2]} 371 | r_star = self.sess.run(self.r_pred, tf_dict) 372 | return r_star 373 | 374 | 375 | 376 | -------------------------------------------------------------------------------- /Helmholtz equation/MFF/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/Helmholtz equation/MFF/1.mat -------------------------------------------------------------------------------- /Helmholtz equation/MFF/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /Helmholtz equation/MMPINN-INN/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /Helmholtz equation/MMPINN-MFF/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /Helmholtz equation/SAPINN/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/Helmholtz equation/SAPINN/1.mat -------------------------------------------------------------------------------- /Helmholtz equation/SAPINN/eager_lbfgs.py: -------------------------------------------------------------------------------- 1 | #pulled from https://github.com/yaroslavvb/stuff/blob/master/eager_lbfgs/eager_lbfgs.py 2 | 3 | import tensorflow as tf 4 | import numpy as np 5 | import time 6 | 7 | 8 | def dot(a, b): 9 | """Dot product function since TensorFlow doesn't have one.""" 10 | return tf.reduce_sum(a*b) 11 | 12 | def verbose_func(s): 13 | print(s) 14 | 15 | final_loss = None 16 | times = [] 17 | def lbfgs(opfunc, x, state, maxIter = 100, learningRate = 1, do_verbose = True): 18 | """port of lbfgs.lua, using TensorFlow eager mode. 19 | """ 20 | 21 | global final_loss, times 22 | 23 | maxEval = maxIter*1.25 24 | tolFun = 1e-5 25 | tolX = 1e-9 26 | nCorrection = 50 27 | isverbose = False 28 | 29 | # verbose function 30 | if isverbose: 31 | verbose = verbose_func 32 | else: 33 | verbose = lambda x: None 34 | 35 | f, g = opfunc(x) 36 | 37 | f_hist = [f] 38 | currentFuncEval = 1 39 | state.funcEval = state.funcEval + 1 40 | p = g.shape[0] 41 | 42 | # check optimality of initial point 43 | tmp1 = tf.abs(g) 44 | if tf.reduce_sum(tmp1) <= tolFun: 45 | verbose("optimality condition below tolFun") 46 | return x, f_hist 47 | 48 | # optimize for a max of maxIter iterations 49 | nIter = 0 50 | times = [] 51 | while nIter < maxIter: 52 | start_time = time.time() 53 | 54 | # keep track of nb of iterations 55 | nIter = nIter + 1 56 | state.nIter = state.nIter + 1 57 | 58 | ############################################################ 59 | ## compute gradient descent direction 60 | ############################################################ 61 | if state.nIter == 1: 62 | d = -g 63 | old_dirs = [] 64 | old_stps = [] 65 | Hdiag = 1 66 | else: 67 | # do lbfgs update (update memory) 68 | y = g - g_old 69 | s = d*t 70 | ys = dot(y, s) 71 | 72 | if ys > 1e-10: 73 | # updating memory 74 | if len(old_dirs) == nCorrection: 75 | # shift history by one (limited-memory) 76 | del old_dirs[0] 77 | del old_stps[0] 78 | 79 | # store new direction/step 80 | old_dirs.append(s) 81 | old_stps.append(y) 82 | 83 | # update scale of initial Hessian approximation 84 | Hdiag = ys/dot(y, y) 85 | 86 | # compute the approximate (L-BFGS) inverse Hessian 87 | # multiplied by the gradient 88 | k = len(old_dirs) 89 | 90 | # need to be accessed element-by-element, so don't re-type tensor: 91 | ro = [0]*nCorrection 92 | for i in range(k): 93 | ro[i] = 1/dot(old_stps[i], old_dirs[i]) 94 | 95 | 96 | # iteration in L-BFGS loop collapsed to use just one buffer 97 | # need to be accessed element-by-element, so don't re-type tensor: 98 | al = [0]*nCorrection 99 | 100 | q = -g 101 | for i in range(k-1, -1, -1): 102 | al[i] = dot(old_dirs[i], q) * ro[i] 103 | q = q - al[i]*old_stps[i] 104 | 105 | # multiply by initial Hessian 106 | r = q*Hdiag 107 | for i in range(k): 108 | be_i = dot(old_stps[i], r) * ro[i] 109 | r += (al[i]-be_i)*old_dirs[i] 110 | 111 | d = r 112 | # final direction is in r/d (same object) 113 | 114 | g_old = g 115 | f_old = f 116 | 117 | ############################################################ 118 | ## compute step length 119 | ############################################################ 120 | # directional derivative 121 | gtd = dot(g, d) 122 | 123 | # check that progress can be made along that direction 124 | if gtd > -tolX: 125 | verbose("Can not make progress along direction.") 126 | break 127 | 128 | # reset initial guess for step size 129 | if state.nIter == 1: 130 | tmp1 = tf.abs(g) 131 | t = min(1, 1/tf.reduce_sum(tmp1)) 132 | else: 133 | t = learningRate 134 | 135 | 136 | 137 | x += t*d 138 | 139 | if nIter != maxIter: 140 | # re-evaluate function only if not in last iteration 141 | # the reason we do this: in a stochastic setting, 142 | # no use to re-evaluate that function here 143 | f, g = opfunc(x) 144 | 145 | lsFuncEval = 1 146 | f_hist.append(f) 147 | 148 | 149 | # update func eval 150 | currentFuncEval = currentFuncEval + lsFuncEval 151 | state.funcEval = state.funcEval + lsFuncEval 152 | 153 | ############################################################ 154 | ## check conditions 155 | ############################################################ 156 | if nIter == maxIter: 157 | break 158 | 159 | if currentFuncEval >= maxEval: 160 | # max nb of function evals 161 | print('max nb of function evals') 162 | break 163 | 164 | tmp1 = tf.abs(g) 165 | if tf.reduce_sum(tmp1) <=tolFun: 166 | # check optimality 167 | print('optimality condition below tolFun') 168 | break 169 | 170 | tmp1 = tf.abs(d*t) 171 | if tf.reduce_sum(tmp1) <= tolX: 172 | # step size below tolX 173 | print('step size below tolX') 174 | break 175 | 176 | if tf.abs(f,f_old) < tolX: 177 | # function value changing less than tolX 178 | print('function value changing less than tolX'+str(tf.abs(f-f_old))) 179 | break 180 | 181 | if do_verbose: 182 | if nIter % 1000 == 0: 183 | print("Step %3d loss %6.5f "%(nIter, f.numpy())) 184 | 185 | 186 | if nIter == maxIter - 1: 187 | final_loss = f.numpy() 188 | 189 | 190 | # save state 191 | state.old_dirs = old_dirs 192 | state.old_stps = old_stps 193 | state.Hdiag = Hdiag 194 | state.g_old = g_old 195 | state.f_old = f_old 196 | state.t = t 197 | state.d = d 198 | 199 | return x, f_hist, currentFuncEval 200 | 201 | # dummy/Struct gives Lua-like struct object with 0 defaults 202 | class dummy(object): 203 | pass 204 | 205 | class Struct(dummy): 206 | def __getattribute__(self, key): 207 | if key == '__dict__': 208 | return super(dummy, self).__getattribute__('__dict__') 209 | return self.__dict__.get(key, 0) 210 | 211 | 212 | -------------------------------------------------------------------------------- /Helmholtz equation/SAPINN/helmholtz-NTK.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | import time 5 | import scipy.io 6 | import math 7 | import matplotlib.gridspec as gridspec 8 | from plotting import newfig 9 | from mpl_toolkits.axes_grid1 import make_axes_locatable 10 | from tensorflow import keras 11 | from tensorflow.keras.models import Sequential 12 | from tensorflow.keras.layers import Dense, Input 13 | from tensorflow.keras import layers, activations 14 | from scipy.interpolate import griddata 15 | from eager_lbfgs import lbfgs, Struct 16 | from pyDOE import lhs 17 | 18 | 19 | layer_sizes = [2, 50, 50, 50, 50, 1] 20 | 21 | sizes_w = [] 22 | sizes_b = [] 23 | for i, width in enumerate(layer_sizes): 24 | if i != 1: 25 | sizes_w.append(int(width * layer_sizes[1])) 26 | sizes_b.append(int(width if i != 0 else layer_sizes[1])) 27 | 28 | def set_weights(model, w, sizes_w, sizes_b): 29 | for i, layer in enumerate(model.layers[0:]): 30 | start_weights = sum(sizes_w[:i]) + sum(sizes_b[:i]) 31 | end_weights = sum(sizes_w[:i+1]) + sum(sizes_b[:i]) 32 | weights = w[start_weights:end_weights] 33 | w_div = int(sizes_w[i] / sizes_b[i]) 34 | weights = tf.reshape(weights, [w_div, sizes_b[i]]) 35 | biases = w[end_weights:end_weights + sizes_b[i]] 36 | weights_biases = [weights, biases] 37 | layer.set_weights(weights_biases) 38 | 39 | def get_weights(model): 40 | w = [] 41 | for layer in model.layers[0:]: 42 | weights_biases = layer.get_weights() 43 | weights = weights_biases[0].flatten() 44 | biases = weights_biases[1] 45 | w.extend(weights) 46 | w.extend(biases) 47 | 48 | w = tf.convert_to_tensor(w) 49 | return w 50 | 51 | def neural_net(layer_sizes): 52 | model = Sequential() 53 | model.add(layers.InputLayer(input_shape=(layer_sizes[0],))) 54 | for width in layer_sizes[1:-1]: 55 | model.add(layers.Dense( 56 | width, activation=tf.nn.tanh, 57 | kernel_initializer="glorot_normal")) 58 | model.add(layers.Dense( 59 | layer_sizes[-1], activation=None, 60 | kernel_initializer="glorot_normal")) 61 | return model 62 | 63 | 64 | u_model = neural_net(layer_sizes) 65 | 66 | 67 | def loss(x_f, y_f, 68 | x_lb, y_lb, 69 | x_ub, y_ub, 70 | x_rb, y_rb, 71 | x_lftb, y_lftb, 72 | col_weights): 73 | 74 | f_u_pred = f_model(x_f, y_f) 75 | 76 | u_lb_pred = u_model(tf.concat([x_lb, y_lb],1)) 77 | u_ub_pred = u_model(tf.concat([x_ub, y_ub],1)) 78 | u_rb_pred = u_model(tf.concat([x_rb, y_rb],1)) 79 | u_lftb_pred = u_model(tf.concat([x_lftb, y_lftb],1)) 80 | 81 | mse_b_u = tf.reduce_mean(tf.square(u_lb_pred - 0)) + \ 82 | tf.reduce_mean(tf.square(u_ub_pred - 0)) + \ 83 | tf.reduce_mean(tf.square(u_rb_pred - 0)) + \ 84 | tf.reduce_mean(tf.square(u_lftb_pred - 0)) 85 | # 86 | mse_f_u = tf.reduce_mean(tf.square(col_weights*f_u_pred)) 87 | 88 | return mse_b_u + mse_f_u , mse_b_u, mse_f_u 89 | 90 | def loss_weights(x_f, y_f, 91 | x_lb, y_lb, 92 | x_ub, y_ub, 93 | x_rb, y_rb, 94 | x_lftb, y_lftb, 95 | col_weights): 96 | 97 | f_u_pred = f_model(x_f, y_f) 98 | 99 | u_lb_pred = u_model(tf.concat([x_lb, y_lb],1)) 100 | u_ub_pred = u_model(tf.concat([x_ub, y_ub],1)) 101 | u_rb_pred = u_model(tf.concat([x_rb, y_rb],1)) 102 | u_lftb_pred = u_model(tf.concat([x_lftb, y_lftb],1)) 103 | 104 | mse_b_u = tf.reduce_mean(tf.square(u_lb_pred - 0)) + \ 105 | tf.reduce_mean(tf.square(u_ub_pred - 0)) + \ 106 | tf.reduce_mean(tf.square(u_rb_pred - 0)) + \ 107 | tf.reduce_mean(tf.square(u_lftb_pred - 0)) 108 | # 109 | mse_f_u = tf.reduce_mean(tf.square(col_weights*f_u_pred)) 110 | return mse_b_u, mse_bx_u, mse_f_u 111 | 112 | def f_model(x, y): 113 | with tf.GradientTape(persistent=True) as tape: 114 | tape.watch(x) 115 | tape.watch(y) 116 | 117 | u = u_model(tf.concat([x, y],1)) 118 | 119 | 120 | u_x = tape.gradient(u, x) 121 | u_y = tape.gradient(u, y) 122 | 123 | u_xx = tape.gradient(u_x, x) 124 | 125 | u_yy = tape.gradient(u_y, y) 126 | 127 | del tape 128 | a1 = 1.0 129 | a2 = 4.0 130 | ksq = 1.0 131 | forcing = - (a1*math.pi)**2*np.sin(a1*math.pi*x)*np.sin(a2*math.pi*y) - \ 132 | (a2*math.pi)**2*np.sin(a1*math.pi*x)*np.sin(a2*math.pi*y) + \ 133 | ksq*np.sin(a1*math.pi*x)*np.sin(a2*math.pi*y) 134 | 135 | f_u = u_xx + u_yy + ksq*u - forcing 136 | 137 | return f_u 138 | 139 | def fit(x_f, t_f, x_lb, y_lb, x_ub, y_ub, x_rb, y_rb, x_lftb, y_lftb, col_weights, tf_iter, newton_iter): 140 | 141 | batch_sz = N_f 142 | n_batches = N_f // batch_sz 143 | start_time = time.time() 144 | tf_optimizer = tf.keras.optimizers.Adam(lr = 0.001, beta_1=.99) 145 | tf_optimizer_coll = tf.keras.optimizers.Adam(lr = 0.001, beta_1=.99) 146 | 147 | print("starting Adam training") 148 | for epoch in range(tf_iter): 149 | for i in range(n_batches): 150 | 151 | x_f_batch = x_f[i*batch_sz:(i*batch_sz + batch_sz),] 152 | y_f_batch = y_f[i*batch_sz:(i*batch_sz + batch_sz),] 153 | 154 | with tf.GradientTape(persistent=True) as tape: 155 | loss_value, mse_b, mse_f = loss(x_f, y_f, x_lb, y_lb, x_ub, y_ub, x_rb, y_rb, x_lftb, y_lftb, col_weights) 156 | grads = tape.gradient(loss_value, u_model.trainable_variables) 157 | grads_col = tape.gradient(loss_value, col_weights) 158 | tf_optimizer.apply_gradients(zip(grads, u_model.trainable_variables)) 159 | tf_optimizer_coll.apply_gradients(zip([-grads_col], [col_weights])) 160 | 161 | 162 | del tape 163 | 164 | elapsed = time.time() - start_time 165 | print('It: %d, Time: %.2f' % (epoch, elapsed)) 166 | tf.print(f"mse_b: {mse_b} mse_f: {mse_f} total loss: {loss_value}") 167 | start_time = time.time() 168 | 169 | 170 | print("Starting L-BFGS training") 171 | 172 | loss_and_flat_grad = get_loss_and_flat_grad(x_f, y_f, x_lb, y_lb, x_ub, y_ub, x_rb, y_rb, x_lftb, y_lftb, col_weights) 173 | 174 | lbfgs(loss_and_flat_grad, 175 | get_weights(u_model), 176 | Struct(), maxIter=newton_iter, learningRate=0.8) 177 | 178 | def get_loss_and_flat_grad(x_f, y_f, x_lb, y_lb, x_ub, y_ub, x_rb, y_rb, x_lftb, y_lftb, col_weights): 179 | def loss_and_flat_grad(w): 180 | with tf.GradientTape() as tape: 181 | set_weights(u_model, w, sizes_w, sizes_b) 182 | loss_value, _, _ = loss(x_f, y_f, x_lb, y_lb, x_ub, y_ub, x_rb, y_rb, x_lftb, y_lftb, col_weights) 183 | grad = tape.gradient(loss_value, u_model.trainable_variables) 184 | grad_flat = [] 185 | for g in grad: 186 | grad_flat.append(tf.reshape(g, [-1])) 187 | grad_flat = tf.concat(grad_flat, 0) 188 | #print(loss_value, grad_flat) 189 | return loss_value, grad_flat 190 | 191 | return loss_and_flat_grad 192 | 193 | 194 | def predict(X_star): 195 | X_star = tf.convert_to_tensor(X_star, dtype=tf.float32) 196 | u_star = u_model(X_star) 197 | 198 | f_u_star = f_model(X_star[:,0:1], 199 | X_star[:,1:2]) 200 | 201 | return u_star.numpy(), f_u_star.numpy() 202 | 203 | 204 | lb = np.array([-1.0]) 205 | ub = np.array([1.0]) 206 | rb = np.array([1.0]) 207 | lftb = np.array([-1.0]) 208 | 209 | N0 = 200 210 | N_b = 100 #25 per upper and lower boundary, so 50 total 211 | N_f = 100000 212 | 213 | col_weights = tf.Variable(tf.random.uniform([N_f, 1])) 214 | u_weights = tf.Variable(100*tf.random.uniform([N0, 1])) 215 | 216 | 217 | nx, ny = (1001,1001) 218 | x = np.linspace(-1, 1, nx) 219 | y = np.linspace(-1, 1, ny) 220 | 221 | 222 | xv, yv = np.meshgrid(x,y) 223 | 224 | x = np.reshape(x, (-1,1)) 225 | y = np.reshape(y, (-1,1)) 226 | 227 | 228 | Exact_u = np.sin(math.pi*xv)*np.sin(4*math.pi*yv) 229 | 230 | 231 | idx_x = np.random.choice(x.shape[0], N0, replace=False) 232 | x0 = x[idx_x,:] 233 | u0 = Exact_u[idx_x,0:1] 234 | 235 | idx_y = np.random.choice(y.shape[0], N_b, replace=False) 236 | yb = y[idx_y, :] 237 | 238 | 239 | 240 | X_f = lb + (ub-lb)*lhs(2, N_f) 241 | 242 | x_f = tf.convert_to_tensor(X_f[:,0:1], dtype=tf.float32) 243 | y_f = tf.convert_to_tensor(X_f[:,1:2], dtype=tf.float32) 244 | 245 | 246 | X0 = np.concatenate((x0, 0*x0), 1) # (x0, 0) 247 | X_lb = np.concatenate(( yb, 0*yb + lb[0]), 1) # lower boundary (x, -1) 248 | X_ub = np.concatenate(( yb, 0*yb + ub[0]), 1) # upper boundary (x, 1) 249 | X_rb = np.concatenate((0*yb + rb[0], yb), 1) # right boundary (1, y) 250 | X_lftb = np.concatenate((0*yb + lftb[0], yb), 1) # left boundary (-1,y) 251 | 252 | 253 | x0 = X0[:,0:1] 254 | y0 = X0[:,1:2] 255 | 256 | x_lb = tf.convert_to_tensor(X_lb[:,0:1], dtype=tf.float32) 257 | y_lb = tf.convert_to_tensor(X_lb[:,1:2], dtype=tf.float32) 258 | 259 | x_ub = tf.convert_to_tensor(X_ub[:,0:1], dtype=tf.float32) 260 | y_ub = tf.convert_to_tensor(X_ub[:,1:2], dtype=tf.float32) 261 | 262 | x_rb = tf.convert_to_tensor(X_rb[:,0:1], dtype=tf.float32) 263 | y_rb = tf.convert_to_tensor(X_rb[:,1:2], dtype=tf.float32) 264 | 265 | x_lftb = tf.convert_to_tensor(X_lftb[:,0:1], dtype=tf.float32) 266 | y_lftb = tf.convert_to_tensor(X_lftb[:,1:2], dtype=tf.float32) 267 | 268 | #Fit helmholtz 269 | fit(x_f, y_f, x_lb, y_lb, x_ub, y_ub, x_rb, y_rb, x_lftb, y_lftb, col_weights, 270 | tf_iter = 10000, newton_iter = 10000) 271 | 272 | #generate mesh for plotting 273 | X, Y = np.meshgrid(x,y) 274 | 275 | X_star = np.hstack((X.flatten()[:,None], Y.flatten()[:,None])) 276 | u_star = Exact_u.flatten()[:,None] 277 | 278 | 279 | lb = np.array([-1.0, -1.0]) 280 | ub = np.array([1.0, 1]) 281 | 282 | u_pred, f_u_pred = predict(X_star) 283 | 284 | 285 | error_u = np.linalg.norm(u_star-u_pred,2)/np.linalg.norm(u_star,2) 286 | 287 | print('Error u: %e' % (error_u)) 288 | 289 | 290 | 291 | U_pred = griddata(X_star, u_pred.flatten(), (X, Y), method='cubic') 292 | 293 | 294 | FU_pred = griddata(X_star, f_u_pred.flatten(), (X, Y), method='cubic') 295 | 296 | 297 | ###################################################################### 298 | ############################# Plotting ############################### 299 | ###################################################################### 300 | 301 | fig, ax = newfig(1.3, 1.0) 302 | ax.axis('off') 303 | 304 | ####### Row 0: h(t,x) ################## 305 | gs0 = gridspec.GridSpec(1, 2) 306 | gs0.update(top=1-0.06, bottom=1-1/3, left=0.15, right=0.85, wspace=0) 307 | ax = plt.subplot(gs0[:, :]) 308 | 309 | h = ax.imshow(U_pred, interpolation='nearest', cmap='YlGnBu', 310 | extent=[lb[1], ub[1], lb[0], ub[0]], 311 | origin='lower', aspect='auto') 312 | divider = make_axes_locatable(ax) 313 | cax = divider.append_axes("right", size="5%", pad=0.05) 314 | fig.colorbar(h, cax=cax) 315 | 316 | 317 | line = np.linspace(x.min(), x.max(), 2)[:,None] 318 | ax.plot(y[250]*np.ones((2,1)), line, 'k--', linewidth = 1) 319 | ax.plot(y[500]*np.ones((2,1)), line, 'k--', linewidth = 1) 320 | ax.plot(y[750]*np.ones((2,1)), line, 'k--', linewidth = 1) 321 | 322 | ax.set_xlabel('$x$') 323 | ax.set_ylabel('$y$') 324 | leg = ax.legend(frameon=False, loc = 'best') 325 | 326 | ax.set_title('$u(x,y)$', fontsize = 10) 327 | 328 | ####### Row 1: h(t,x) slices ################## 329 | gs1 = gridspec.GridSpec(1, 3) 330 | gs1.update(top=1-1/3, bottom=0, left=0.1, right=0.9, wspace=0.5) 331 | 332 | ax = plt.subplot(gs1[0, 0]) 333 | ax.plot(x,Exact_u[:,250], 'b-', linewidth = 2, label = 'Exact') 334 | ax.plot(x,U_pred[:,250], 'r--', linewidth = 2, label = 'Prediction') 335 | ax.set_xlabel('$y$') 336 | ax.set_ylabel('$u(x,y)$') 337 | ax.set_title('$y = %.2f$' % (y[250]), fontsize = 10) 338 | ax.axis('square') 339 | ax.set_xlim([-1.1,1.1]) 340 | ax.set_ylim([-1.1,1.1]) 341 | 342 | ax = plt.subplot(gs1[0, 1]) 343 | ax.plot(x,Exact_u[:,500], 'b-', linewidth = 2, label = 'Exact') 344 | ax.plot(x,U_pred[:,500], 'r--', linewidth = 2, label = 'Prediction') 345 | ax.set_xlabel('$y$') 346 | ax.set_ylabel('$u(x,y)$') 347 | ax.axis('square') 348 | ax.set_xlim([-1.1,1.1]) 349 | ax.set_ylim([-1.1,1.1]) 350 | ax.set_title('$x = %.2f$' % (y[500]), fontsize = 10) 351 | ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.3), ncol=5, frameon=False) 352 | 353 | ax = plt.subplot(gs1[0, 2]) 354 | ax.plot(x,Exact_u[:,750], 'b-', linewidth = 2, label = 'Exact') 355 | ax.plot(x,U_pred[:,750], 'r--', linewidth = 2, label = 'Prediction') 356 | ax.set_xlabel('$y$') 357 | ax.set_ylabel('$u(x,y)$') 358 | ax.axis('square') 359 | ax.set_xlim([-1.1,1.1]) 360 | ax.set_ylim([-1.1,1.1]) 361 | ax.set_title('$x = %.2f$' % (y[750]), fontsize = 10) 362 | 363 | #plot prediction 364 | fig, ax = plt.subplots() 365 | 366 | ec = plt.imshow(U_pred, interpolation='nearest', cmap='rainbow', 367 | extent=[-1.0, 1.0, -1.0, 1.0], 368 | origin='lower', aspect='auto') 369 | 370 | 371 | ax.autoscale_view() 372 | ax.set_xlabel('$y$') 373 | ax.set_ylabel('$x$') 374 | cbar = plt.colorbar(ec) 375 | cbar.set_label('$u(x,y)$') 376 | plt.title("Predicted $u(x,y)$",fontdict = {'fontsize': 14}) 377 | plt.show() 378 | 379 | 380 | #show f_u_pred, we want this to be ~0 across the whole domain 381 | fig, ax = plt.subplots() 382 | 383 | ec = plt.imshow(FU_pred, interpolation='nearest', cmap='rainbow', 384 | extent=[-1.0, 1, -1.0, 1.0], 385 | origin='lower', aspect='auto') 386 | 387 | #ax.add_collection(ec) 388 | ax.autoscale_view() 389 | ax.set_xlabel('$x$') 390 | ax.set_ylabel('$t$') 391 | cbar = plt.colorbar(ec) 392 | cbar.set_label('$\overline{f}_u$ prediction') 393 | plt.show() 394 | 395 | #plot prediction error 396 | fig, ax = plt.subplots() 397 | 398 | ec = plt.imshow((U_pred - Exact_u), interpolation='nearest', cmap='YlGnBu', 399 | extent=[-1.0, 1.0, -1.0, 1.0], 400 | origin='lower', aspect='auto') 401 | 402 | #ax.add_collection(ec) 403 | ax.autoscale_view() 404 | ax.set_xlabel('$t$') 405 | ax.set_ylabel('$x$') 406 | cbar = plt.colorbar(ec) 407 | cbar.set_label('$u$ prediction error') 408 | #plt.title("Prediction Error",fontdict = {'fontsize': 14}) 409 | plt.show() 410 | 411 | # print collocation point weights 412 | plt.scatter(x_f, y_f, c = col_weights.numpy(), s = col_weights.numpy()/100) 413 | plt.show() 414 | -------------------------------------------------------------------------------- /Helmholtz equation/SAPINN/plotting.py: -------------------------------------------------------------------------------- 1 | 2 | #!/usr/bin/env python3 3 | # -*- coding: utf-8 -*- 4 | """ 5 | Created on Mon Oct 9 20:11:57 2017 6 | 7 | @author: mraissi 8 | """ 9 | 10 | import numpy as np 11 | import matplotlib as mpl 12 | #mpl.use('pgf') 13 | 14 | def figsize(scale, nplots = 1): 15 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 16 | inches_per_pt = 1.0/72.27 # Convert pt to inch 17 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 18 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 19 | fig_height = nplots*fig_width*golden_mean # height in inches 20 | fig_size = [fig_width,fig_height] 21 | return fig_size 22 | 23 | pgf_with_latex = { # setup matplotlib to use latex for output 24 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 25 | "text.usetex": True, # use LaTeX to write all text 26 | "font.family": "serif", 27 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 28 | "font.sans-serif": [], 29 | "font.monospace": [], 30 | "axes.labelsize": 10, # LaTeX default is 10pt font. 31 | "font.size": 10, 32 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 33 | "xtick.labelsize": 8, 34 | "ytick.labelsize": 8, 35 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 36 | "pgf.preamble": [ 37 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 38 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 39 | ] 40 | } 41 | mpl.rcParams.update(pgf_with_latex) 42 | 43 | import matplotlib.pyplot as plt 44 | 45 | # I make my own newfig and savefig functions 46 | def newfig(width, nplots = 1): 47 | fig = plt.figure(figsize=figsize(width, nplots)) 48 | ax = fig.add_subplot(111) 49 | return fig, ax 50 | 51 | def savefig(filename, crop = True): 52 | if crop == True: 53 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 55 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 56 | else: 57 | # plt.savefig('{}.pgf'.format(filename)) 58 | plt.savefig('{}.pdf'.format(filename)) 59 | plt.savefig('{}.eps'.format(filename)) 60 | 61 | ## Simple plot 62 | #fig, ax = newfig(1.0) 63 | # 64 | #def ema(y, a): 65 | # s = [] 66 | # s.append(y[0]) 67 | # for t in range(1, len(y)): 68 | # s.append(a * y[t] + (1-a) * s[t-1]) 69 | # return np.array(s) 70 | # 71 | #y = [0]*200 72 | #y.extend([20]*(1000-len(y))) 73 | #s = ema(y, 0.01) 74 | # 75 | #ax.plot(s) 76 | #ax.set_xlabel('X Label') 77 | #ax.set_ylabel('EMA') 78 | # 79 | #savefig('ema') 80 | -------------------------------------------------------------------------------- /Klein–Gordon equation/a10/LRA with IFNN/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /Klein–Gordon equation/a10/MMPINN-MFF/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MMPINN 2 | A practical PINN framework for multi-scale problems with multi-magnitude loss terms 3 | -------------------------------------------------------------------------------- /heat conduction problem/0.11/MMPINN/log1.txt: -------------------------------------------------------------------------------- 1 | 1.1473521 2 | 0.64324385 3 | 0.08962359 4 | 0.03312587 5 | 0.010640064 6 | 0.005974762 7 | 0.0051623587 8 | 0.0044684964 9 | 0.003849154 10 | 0.0033282107 11 | 0.0028862173 12 | 0.06320214 13 | 0.009347659 14 | 0.002855851 15 | 0.0025309825 16 | 0.0020396707 17 | 0.0017587984 18 | 0.0014978178 19 | 0.0012543316 20 | 0.0010090149 21 | 0.0007850308 22 | 0.00060673454 23 | 0.0004902106 24 | 0.00043175745 25 | 0.054796625 26 | 0.00096157327 27 | 0.0015855236 28 | 0.00038660684 29 | 0.00032651136 30 | 0.00026698396 31 | 0.00024919462 32 | 0.00025247104 33 | 0.00034660316 34 | 0.044054735 35 | 0.0011434241 36 | 0.00071774254 37 | 0.00035811332 38 | 0.00032688526 39 | 0.00032734597 40 | 0.00037688712 41 | 0.43968946 42 | 0.009840397 43 | 0.0049984166 44 | 0.0006977855 45 | 0.00065187726 46 | 0.000536487 47 | 0.1433366 48 | 0.02417931 49 | 0.0013717657 50 | 0.04679913 51 | 0.026894122 52 | 0.0010277462 53 | 0.11658034 54 | 0.00799466 55 | 0.0019430219 56 | 0.06652164 57 | 0.0028423567 58 | 0.0038661829 59 | 0.089442275 60 | 0.021343479 61 | 0.002722982 62 | 0.044298097 63 | 0.003226089 64 | 0.2760539 65 | 0.06985083 66 | 0.011233619 67 | 0.0040621827 68 | 0.005053935 69 | 0.00264248 70 | 0.090173006 71 | 0.02801598 72 | 0.005055836 73 | 0.0012915895 74 | 0.047025 75 | 0.055266738 76 | 0.0015885715 77 | 0.0007593394 78 | 0.0021010058 79 | 0.017335225 80 | 0.012338198 81 | 0.0034618815 82 | 0.009205118 83 | 0.0075882366 84 | 0.002253052 85 | 0.002717993 86 | 0.23549835 87 | 0.030192196 88 | 0.00223526 89 | 0.0007372128 90 | 0.15663198 91 | 0.039867092 92 | 0.006803118 93 | 0.0011672344 94 | 0.038414124 95 | 0.016412629 96 | 0.007331839 97 | 0.0018468403 98 | 0.11252789 99 | 0.03852338 100 | 0.0066277673 101 | -------------------------------------------------------------------------------- /heat conduction problem/0.11/MMPINN/log2.txt: -------------------------------------------------------------------------------- 1 | 0.4338132 2 | 0.7965281 3 | 0.1446307 4 | 0.02525619 5 | 0.010676359 6 | 0.009149152 7 | 0.008219397 8 | 0.0076492466 9 | 0.0071617304 10 | 0.0066586547 11 | 0.006116351 12 | 0.2717818 13 | 0.023644472 14 | 0.006209342 15 | 0.005433139 16 | 0.004547037 17 | 0.00367828 18 | 0.0027880333 19 | 0.0018981784 20 | 0.0012644508 21 | 0.001131805 22 | 0.0013896192 23 | 0.0016329335 24 | 0.0016107381 25 | 0.043954257 26 | 0.0019371869 27 | 0.0014670179 28 | 0.0012538639 29 | 0.0009769918 30 | 0.00091676525 31 | 0.000922299 32 | 0.0010119914 33 | 0.0012690277 34 | 0.039219335 35 | 0.0014530043 36 | 0.0017265708 37 | 0.0013416049 38 | 0.0015501431 39 | 0.0015666354 40 | 0.0022322154 41 | 0.3389335 42 | 0.010895653 43 | 0.004437463 44 | 0.0031697657 45 | 0.005048354 46 | 0.006317066 47 | 0.346829 48 | 0.031168897 49 | 0.01376147 50 | 0.113854215 51 | 0.05956494 52 | 0.011577083 53 | 0.25959298 54 | 0.056766473 55 | 0.01402558 56 | 0.31101158 57 | 0.2838233 58 | 0.0466369 59 | 0.74776137 60 | 0.07333889 61 | 0.10955958 62 | 0.112031445 63 | 0.046874598 64 | 1.5306947 65 | 0.13948993 66 | 0.043280937 67 | 0.7871534 68 | 0.33277005 69 | 0.041729007 70 | 1.7682948 71 | 0.16138777 72 | 0.04039388 73 | 0.020343918 74 | 1.6300874 75 | 0.109268986 76 | 0.073999986 77 | 0.016579716 78 | 0.23467822 79 | 0.795114 80 | 0.10823789 81 | 0.03438774 82 | 0.53063744 83 | 0.14356557 84 | 0.015604677 85 | 0.03482602 86 | 1.1188185 87 | 0.12892342 88 | 0.033831965 89 | 0.017817488 90 | 4.376706 91 | 0.5311854 92 | 0.10329726 93 | 0.043518245 94 | 0.9344326 95 | 0.13775872 96 | 0.046566956 97 | 0.020634256 98 | 2.3688488 99 | 0.13249846 100 | 0.043931045 101 | -------------------------------------------------------------------------------- /heat conduction problem/0.11/MMPINN/log3.txt: -------------------------------------------------------------------------------- 1 | 868825600.0 2 | 868826430.0 3 | 868822100.0 4 | 868815500.0 5 | 868815700.0 6 | 868816000.0 7 | 868815900.0 8 | 868815500.0 9 | 868815360.0 10 | 868815360.0 11 | 868815040.0 12 | 868814660.0 13 | 868815100.0 14 | 868814500.0 15 | 868814400.0 16 | 868814000.0 17 | 868813200.0 18 | 868812500.0 19 | 868811260.0 20 | 868808800.0 21 | 868806800.0 22 | 868804300.0 23 | 868802400.0 24 | 868800400.0 25 | 868799040.0 26 | 868802400.0 27 | 868799800.0 28 | 868795970.0 29 | 868792450.0 30 | 868787840.0 31 | 868781100.0 32 | 868772540.0 33 | 868758500.0 34 | 868763140.0 35 | 868756200.0 36 | 868739260.0 37 | 868716700.0 38 | 868681500.0 39 | 868619600.0 40 | 868524900.0 41 | 868378000.0 42 | 868350600.0 43 | 868267700.0 44 | 868136700.0 45 | 867966100.0 46 | 867741760.0 47 | 867598460.0 48 | 867513400.0 49 | 867140700.0 50 | 866669950.0 51 | 866461800.0 52 | 865844900.0 53 | 865174500.0 54 | 864275460.0 55 | 863034430.0 56 | 862014850.0 57 | 860609100.0 58 | 859295600.0 59 | 858231940.0 60 | 856907900.0 61 | 855039940.0 62 | 853877760.0 63 | 852181000.0 64 | 851066900.0 65 | 850340100.0 66 | 848167940.0 67 | 846501000.0 68 | 845504600.0 69 | 843617500.0 70 | 846105000.0 71 | 843787600.0 72 | 840834370.0 73 | 838841000.0 74 | 836953900.0 75 | 836467840.0 76 | 834566340.0 77 | 832825700.0 78 | 831083700.0 79 | 831779100.0 80 | 829797000.0 81 | 827634200.0 82 | 825927940.0 83 | 824685250.0 84 | 823147650.0 85 | 821562300.0 86 | 820508200.0 87 | 819423500.0 88 | 817699700.0 89 | 816172300.0 90 | 822771140.0 91 | 817904900.0 92 | 814227650.0 93 | 812182850.0 94 | 811254100.0 95 | 810045950.0 96 | 808225100.0 97 | 806637900.0 98 | 806418900.0 99 | 804783600.0 100 | 803145400.0 101 | -------------------------------------------------------------------------------- /heat conduction problem/0.11/MMPINN/log5.txt: -------------------------------------------------------------------------------- 1 | 0.7827987 2 | 5.36196 3 | 1.705689 4 | 1.2887087 5 | 0.9934969 6 | 0.9739435 7 | 1.4526223 8 | 0.8531964 9 | 1.03353 10 | 0.9178345 11 | 0.9354735 12 | 0.56677717 13 | 1.1862077 14 | 0.5073798 15 | 0.7625852 16 | 0.5643543 17 | 0.4288004 18 | 0.5865301 19 | 0.5149823 20 | 0.40772367 21 | 0.18965635 22 | 0.46987024 23 | 0.64425844 24 | 0.43978986 25 | 0.4835937 26 | 0.46057606 27 | 0.34159374 28 | 0.40225777 29 | 0.3700778 30 | 0.28650776 31 | 0.21607931 32 | 0.25782135 33 | 0.16702157 34 | 0.19201851 35 | 0.18961509 36 | 0.18846649 37 | 0.17615733 38 | 0.16395904 39 | 0.1127034 40 | 0.9713575 41 | 0.18372232 42 | 0.19331372 43 | 0.12699501 44 | 0.13335414 45 | 0.15367652 46 | 0.124635644 47 | 0.09888359 48 | 0.10095542 49 | 0.20596902 50 | 0.08608783 51 | 0.10779406 52 | 0.057941504 53 | 0.07617006 54 | 0.07882592 55 | 0.09556941 56 | 0.09691154 57 | 0.08463432 58 | 0.12274254 59 | 0.06583477 60 | 0.086727105 61 | 0.073827624 62 | 0.075136475 63 | 0.057673126 64 | 0.07444574 65 | 0.10134804 66 | 0.13808426 67 | 0.065123826 68 | 0.0613086 69 | 0.11865616 70 | 0.105212994 71 | 0.08638449 72 | 0.0936559 73 | 0.10081087 74 | 0.09199799 75 | 0.13030088 76 | 0.05937675 77 | 0.08872447 78 | 0.07319461 79 | 0.046506617 80 | 0.111462854 81 | 0.07571555 82 | 0.048456706 83 | 0.06122362 84 | 0.052158687 85 | 0.058025833 86 | 0.038461197 87 | 0.07911224 88 | 0.03630518 89 | 0.055920586 90 | 0.053688187 91 | 0.03574628 92 | 0.032335225 93 | 0.02879173 94 | 0.051066786 95 | 0.053388037 96 | 0.031715 97 | 0.031033956 98 | 0.033256844 99 | 0.022662489 100 | 0.014505768 101 | 0.016699968 102 | 0.030630779 103 | 0.02034665 104 | 0.019489544 105 | 0.01927323 106 | 0.026653204 107 | 0.022942215 108 | 0.019609021 109 | 0.013983763 110 | 0.013145331 111 | 0.016344901 112 | 0.039713394 113 | 0.018100835 114 | 0.018284736 115 | 0.011371176 116 | 0.013826485 117 | 0.018941123 118 | 0.032051936 119 | 0.013460485 120 | 0.0110114375 121 | 0.013886941 122 | 0.013921891 123 | 0.013943078 124 | 0.017825775 125 | 0.019911125 126 | 0.01800523 127 | 0.015559918 128 | 0.021600181 129 | 0.011791206 130 | 0.009911917 131 | 0.009217275 132 | 0.007999413 133 | 0.032277383 134 | 0.010545023 135 | 0.009986388 136 | 0.009962601 137 | 0.0102362 138 | 0.0091708535 139 | 0.008386144 140 | 0.0068537523 141 | 0.0070228986 142 | 0.006054499 143 | 0.0064864 144 | 0.005428596 145 | 0.005335036 146 | 0.005395666 147 | 0.0044765496 148 | 0.005374145 149 | 0.0043555326 150 | 0.004910413 151 | 0.004063499 152 | 0.0046170345 153 | 0.004905361 154 | 0.005367678 155 | 0.005323682 156 | 0.006939923 157 | 0.007805844 158 | 0.0077899317 159 | 0.0071641766 160 | 0.0076619713 161 | 0.0068022506 162 | 0.0061364905 163 | 0.0061720344 164 | 0.006807391 165 | 0.006374635 166 | 0.0064375163 167 | 0.006921074 168 | 0.006629063 169 | 0.0063894284 170 | 0.0062584523 171 | 0.0069625224 172 | 0.0071254442 173 | 0.0072770785 174 | 0.008776013 175 | 0.008199009 176 | 0.008296214 177 | 0.007163948 178 | 0.007403239 179 | 0.006544229 180 | 0.006139141 181 | 0.006257654 182 | 0.0058843964 183 | 0.0056787403 184 | 0.0053471783 185 | 0.0049582445 186 | 0.0041568354 187 | 0.0038006832 188 | 0.0037590184 189 | 0.0038715315 190 | 0.0036594546 191 | 0.004647354 192 | 0.0051229238 193 | 0.0047905007 194 | 0.00485083 195 | 0.00460687 196 | 0.0040846597 197 | 0.003915647 198 | 0.0035711245 199 | 0.0033074638 200 | 0.00370801 201 | 0.003860939 202 | 0.004182034 203 | 0.0042304937 204 | 0.0039611678 205 | 0.004437291 206 | 0.0046571777 207 | 0.0042960444 208 | 0.004622358 209 | 0.004546161 210 | 0.004693524 211 | 0.005057119 212 | 0.004887451 213 | 0.0044266596 214 | 0.0036140694 215 | 0.0027354865 216 | 0.0024397506 217 | 0.0025933667 218 | 0.0028221027 219 | 0.00315879 220 | 0.003186463 221 | 0.0035453634 222 | 0.0034565683 223 | 0.0032720303 224 | 0.0031476158 225 | 0.003025581 226 | 0.003212699 227 | 0.0034129177 228 | 0.0032605028 229 | 0.003149412 230 | 0.0033863743 231 | 0.0034225627 232 | 0.003397953 233 | 0.0031045782 234 | 0.002789811 235 | 0.0026677046 236 | 0.0022711991 237 | 0.002441474 238 | 0.0025884274 239 | 0.002574984 240 | 0.0018997444 241 | 0.001764534 242 | 0.001893721 243 | 0.0021415344 244 | 0.0022208872 245 | 0.0021930072 246 | 0.0020418738 247 | 0.0017077638 248 | 0.0018645391 249 | 0.0016831186 250 | 0.0018281376 251 | 0.0016862061 252 | 0.001748356 253 | 0.0018218535 254 | 0.0018550649 255 | 0.0019156317 256 | 0.0021909985 257 | 0.0023636476 258 | 0.0023260978 259 | 0.0022592884 260 | 0.0022340894 261 | 0.002024828 262 | 0.0020542736 263 | 0.002007362 264 | 0.0018973211 265 | 0.0019307227 266 | 0.0018383048 267 | 0.0018851047 268 | 0.0017878509 269 | 0.0019144156 270 | 0.0021033795 271 | 0.0020975838 272 | 0.001948636 273 | 0.0020074286 274 | 0.0020732726 275 | 0.0020318935 276 | 0.00211224 277 | 0.0017943712 278 | 0.0017628856 279 | 0.0016126916 280 | 0.0019414657 281 | 0.0020885018 282 | 0.0022720792 283 | 0.0022048496 284 | 0.0020220552 285 | 0.0018601624 286 | 0.001892101 287 | 0.0017012372 288 | 0.0016826453 289 | 0.0020268417 290 | 0.002016108 291 | 0.002110678 292 | 0.0020540992 293 | 0.0020156403 294 | 0.0018256791 295 | 0.0019361188 296 | 0.0018508753 297 | 0.0020082921 298 | 0.0019171989 299 | 0.0019602687 300 | 0.00186431 301 | 0.0018105159 302 | 0.0017309218 303 | 0.0019482326 304 | 0.0018734181 305 | 0.0019393766 306 | 0.0020245742 307 | 0.00198821 308 | 0.0019436217 309 | 0.0018600682 310 | 0.001821204 311 | 0.0017348196 312 | 0.0018098464 313 | 0.0017055715 314 | 0.0017211614 315 | 0.0018186637 316 | 0.001734889 317 | 0.0016972034 318 | 0.0016804314 319 | 0.0016390174 320 | 0.0017335998 321 | 0.0016452521 322 | 0.001533294 323 | 0.0015395555 324 | 0.0014412365 325 | 0.0015375597 326 | 0.0016032062 327 | 0.0017423035 328 | 0.0019051819 329 | 0.002047323 330 | 0.0019333125 331 | 0.0021614826 332 | 0.0020332194 333 | 0.0019638948 334 | 0.0021456194 335 | 0.0022210432 336 | 0.0022511033 337 | 0.0022472064 338 | 0.0022670631 339 | 0.0021229654 340 | 0.0018121415 341 | 0.0018160741 342 | 0.0018691807 343 | 0.0018140543 344 | 0.0017559464 345 | 0.0016938823 346 | 0.0017029183 347 | 0.0017772808 348 | 0.0017416113 349 | 0.001719799 350 | 0.0016723332 351 | 0.0017605827 352 | 0.0016881359 353 | 0.0016401557 354 | 0.0016210496 355 | 0.0017610163 356 | 0.0018914144 357 | 0.001889512 358 | 0.0019069955 359 | 0.0019253542 360 | 0.0019102232 361 | 0.0018835485 362 | 0.0019706665 363 | 0.0021645904 364 | 0.002240334 365 | 0.0023163217 366 | 0.0023750153 367 | 0.002372344 368 | 0.0022439314 369 | 0.0023129557 370 | 0.002308045 371 | 0.0022508772 372 | 0.0022951474 373 | 0.0022992592 374 | 0.0023450772 375 | 0.0023269295 376 | 0.002275651 377 | 0.0021940828 378 | 0.0021855664 379 | 0.002248572 380 | 0.0022997272 381 | 0.0022103367 382 | 0.0021683902 383 | 0.0020071855 384 | 0.0020269884 385 | 0.0019995095 386 | 0.0019608624 387 | 0.0019074096 388 | 0.0018692374 389 | 0.001810747 390 | 0.0018215228 391 | 0.00176113 392 | 0.0016981214 393 | 0.0016569798 394 | 0.0016601648 395 | 0.0016158655 396 | 0.0015099845 397 | 0.0013504114 398 | 0.0012327937 399 | 0.0011757524 400 | 0.0011665057 401 | 0.0011478293 402 | 0.0011020014 403 | 0.0010444147 404 | 0.0010692261 405 | 0.0010616528 406 | 0.0010970088 407 | 0.0010597417 408 | 0.0010777564 409 | 0.0010401263 410 | 0.0010575604 411 | 0.0010048463 412 | 0.0008783334 413 | 0.0008801223 414 | 0.00077948545 415 | 0.00072922313 416 | 0.00068798015 417 | 0.0007284153 418 | 0.00073668896 419 | 0.0008068318 420 | 0.0007918726 421 | 0.0008511554 422 | 0.00081826805 423 | 0.0008664062 424 | 0.0008443147 425 | 0.0008804319 426 | 0.0008923321 427 | 0.00091049384 428 | 0.00087500346 429 | 0.00083999586 430 | 0.0007982744 431 | 0.00078631914 432 | 0.00074157066 433 | 0.0007401427 434 | 0.0007084971 435 | 0.00069420814 436 | 0.0007093966 437 | 0.0006467395 438 | 0.0005945233 439 | 0.0006174179 440 | 0.00059684506 441 | 0.000653821 442 | 0.00056668045 443 | 0.0005826518 444 | 0.00052747526 445 | 0.0005374167 446 | 0.0005298436 447 | 0.0005144737 448 | 0.00050535006 449 | 0.00051726884 450 | 0.0005277102 451 | 0.0005288713 452 | 0.0005472187 453 | 0.0005391376 454 | 0.0005605569 455 | 0.0005879992 456 | 0.00060429313 457 | 0.0006007677 458 | 0.00053880503 459 | 0.0006359948 460 | 0.00063328 461 | 0.00068636774 462 | 0.0006442235 463 | 0.00071283255 464 | 0.00077233696 465 | 0.0007715367 466 | 0.0008394279 467 | 0.00084915425 468 | 0.00082816364 469 | 0.000834406 470 | 0.0008311795 471 | 0.0009109311 472 | 0.0009014218 473 | 0.0008471613 474 | 0.00091331656 475 | 0.0007698982 476 | 0.00077585544 477 | 0.0007820284 478 | 0.00071164937 479 | 0.000778854 480 | 0.00073458743 481 | 0.0006665039 482 | 0.0006170993 483 | 0.00065391196 484 | 0.0006597834 485 | 0.00066736544 486 | 0.00067757734 487 | 0.00066464284 488 | 0.000652048 489 | 0.00067873846 490 | 0.0006972425 491 | 0.0006792981 492 | 0.00071598974 493 | 0.0006674424 494 | 0.0006870029 495 | 0.00073581946 496 | 0.0007491941 497 | 0.0007304391 498 | 0.00077448593 499 | 0.00073431735 500 | 0.000727393 501 | 0.0006800551 502 | 0.0007245842 503 | 0.0007497957 504 | 0.0007305753 505 | 0.000722347 506 | 0.00073678856 507 | 0.00068901456 508 | 0.00060327 509 | 0.00058480643 510 | 0.00059775426 511 | 0.0005921675 512 | 0.00060569 513 | 0.0005657882 514 | 0.0005302907 515 | 0.0004958314 516 | 0.00050135236 517 | 0.0005138338 518 | 0.000555825 519 | 0.0005326733 520 | 0.0005432555 521 | 0.00053950696 522 | 0.0005119767 523 | 0.00052624993 524 | 0.0005840293 525 | 0.00059633615 526 | 0.0005281626 527 | 0.0005066778 528 | 0.00051898434 529 | 0.00049537455 530 | 0.0004930142 531 | 0.0005192213 532 | 0.00062953087 533 | 0.0006688969 534 | 0.0006294426 535 | 0.00063609326 536 | 0.0006366599 537 | 0.0006158823 538 | 0.00061294757 539 | 0.0005873249 540 | 0.00057308446 541 | 0.00057300844 542 | 0.000539953 543 | 0.00055476313 544 | 0.00057296193 545 | 0.0006114987 546 | 0.00054386415 547 | 0.0005125558 548 | 0.0005319981 549 | 0.00053063064 550 | 0.0005360049 551 | 0.0004831827 552 | 0.0005193588 553 | 0.0004389451 554 | 0.0004323564 555 | 0.00044380038 556 | 0.00047239204 557 | 0.0004602636 558 | 0.00041410036 559 | 0.00040368264 560 | 0.00041385568 561 | 0.00038075162 562 | 0.00036197065 563 | 0.00038146577 564 | 0.0003949296 565 | 0.00037925362 566 | 0.00042828947 567 | 0.00042884386 568 | 0.00042793143 569 | 0.0004141966 570 | 0.00040168327 571 | 0.00045826158 572 | 0.0005139747 573 | 0.00051119464 574 | 0.00046754855 575 | 0.00047109486 576 | 0.00048569657 577 | 0.00051238714 578 | 0.00053590053 579 | 0.00048937474 580 | 0.0005173335 581 | 0.00048832514 582 | 0.00049159 583 | 0.00044891893 584 | 0.00042801237 585 | 0.00042193543 586 | 0.00042845638 587 | 0.0004610538 588 | 0.00047138188 589 | 0.00047197947 590 | 0.0004327341 591 | 0.00043264125 592 | 0.0004280115 593 | 0.00043650996 594 | 0.00043588303 595 | 0.0004809228 596 | 0.00043377274 597 | 0.00043386675 598 | 0.0004439864 599 | 0.00046651647 600 | 0.0004418587 601 | 0.00047838438 602 | 0.0004520874 603 | 0.0004520085 604 | 0.00047393513 605 | 0.00045312918 606 | 0.00044426526 607 | 0.00047840134 608 | 0.00048040037 609 | 0.0004516714 610 | 0.00042517306 611 | 0.00040632416 612 | 0.00039276783 613 | 0.00040398355 614 | 0.00038173102 615 | 0.00041861815 616 | 0.00038237762 617 | 0.0003887673 618 | 0.00034008728 619 | 0.00033936393 620 | 0.0003319329 621 | 0.00032489817 622 | 0.0003202769 623 | 0.00031541716 624 | 0.00031289837 625 | 0.00028673696 626 | 0.00029788766 627 | 0.00027007103 628 | 0.00026688876 629 | 0.0002567845 630 | 0.00027846836 631 | 0.00029170013 632 | 0.00029886173 633 | 0.00033507292 634 | 0.00033698636 635 | 0.00034234166 636 | 0.00034659336 637 | 0.00030527182 638 | 0.00029984498 639 | 0.00026852486 640 | 0.0002808812 641 | 0.00028004195 642 | 0.0002873891 643 | 0.00027339047 644 | 0.0002526092 645 | 0.00024829895 646 | 0.0002491751 647 | 0.00023542628 648 | 0.00025616825 649 | 0.00026665677 650 | 0.0002681033 651 | 0.00026591832 652 | 0.00027830762 653 | 0.00028466695 654 | 0.00030774527 655 | 0.00031082024 656 | 0.00032908603 657 | 0.0003359617 658 | 0.00034496665 659 | 0.00036164446 660 | 0.00037012593 661 | 0.00037500446 662 | 0.00037497544 663 | -------------------------------------------------------------------------------- /heat conduction problem/0.11/MMPINN/log6.txt: -------------------------------------------------------------------------------- 1 | 3.5936 2 | 9.557056 3 | 7.5369267 4 | 4.349312 5 | 7.168741 6 | 6.3021765 7 | 7.426038 8 | 5.6136274 9 | 4.755271 10 | 5.3094807 11 | 3.716499 12 | 5.440181 13 | 5.155925 14 | 5.8656173 15 | 5.2257557 16 | 4.1198754 17 | 3.4772325 18 | 4.0583863 19 | 2.9378524 20 | 3.0813446 21 | 2.9486506 22 | 2.5434844 23 | 2.58703 24 | 2.459577 25 | 3.1557796 26 | 2.0837097 27 | 1.9693432 28 | 1.899806 29 | 1.5797603 30 | 1.4968352 31 | 1.4581344 32 | 1.5587962 33 | 1.220492 34 | 1.362877 35 | 1.2247486 36 | 1.489146 37 | 1.4644587 38 | 1.2137071 39 | 1.0841424 40 | 4.0899763 41 | 1.2214422 42 | 1.3296063 43 | 1.2031332 44 | 1.299796 45 | 1.3504095 46 | 1.1491431 47 | 1.1138012 48 | 0.97268635 49 | 1.2345653 50 | 1.0073745 51 | 1.188381 52 | 0.98228824 53 | 1.0379413 54 | 0.9950472 55 | 1.0473084 56 | 0.9406608 57 | 0.9817196 58 | 0.9545123 59 | 0.8435831 60 | 0.8143985 61 | 0.91701496 62 | 0.8336255 63 | 0.85646856 64 | 0.92413497 65 | 0.87450814 66 | 0.8365324 67 | 0.75710034 68 | 0.7371104 69 | 0.77570784 70 | 0.81392986 71 | 0.8216494 72 | 0.8198476 73 | 0.7251981 74 | 0.75723886 75 | 0.7605809 76 | 0.76228595 77 | 0.8431013 78 | 0.8183558 79 | 0.85298234 80 | 0.8530959 81 | 1.3562142 82 | 0.6944893 83 | 0.6296829 84 | 0.5807386 85 | 0.6112453 86 | 0.56399786 87 | 0.6790932 88 | 0.55981314 89 | 0.6758379 90 | 0.5266169 91 | 0.63434607 92 | 0.52375484 93 | 0.570174 94 | 0.5226524 95 | 0.50408334 96 | 0.49079913 97 | 0.43146744 98 | 0.37072882 99 | 0.3393969 100 | 0.3293153 101 | 0.32511514 102 | 0.40294093 103 | 0.33375043 104 | 0.33665854 105 | 0.33706 106 | 0.3306109 107 | 0.30813527 108 | 0.31012738 109 | 0.2880918 110 | 0.27780628 111 | 0.25394046 112 | 0.39892262 113 | 0.23849243 114 | 0.26006222 115 | 0.23009925 116 | 0.23687479 117 | 0.22990245 118 | 0.56309927 119 | 0.23444307 120 | 0.21745971 121 | 0.21070722 122 | 0.213061 123 | 0.21875235 124 | 0.21540111 125 | 0.23609331 126 | 0.18924375 127 | 0.19534901 128 | 0.22435704 129 | 0.18865877 130 | 0.18656209 131 | 0.18443361 132 | 0.17940938 133 | 0.20248407 134 | 0.17129472 135 | 0.16532362 136 | 0.15786105 137 | 0.15836948 138 | 0.14429834 139 | 0.14310397 140 | 0.13805616 141 | 0.13500038 142 | 0.12747559 143 | 0.12342392 144 | 0.11662106 145 | 0.110164165 146 | 0.10586389 147 | 0.1065328 148 | 0.102660984 149 | 0.10605111 150 | 0.11332986 151 | 0.10285639 152 | 0.10357994 153 | 0.103328675 154 | 0.10177197 155 | 0.09618276 156 | 0.09338962 157 | 0.09168422 158 | 0.09095493 159 | 0.099803895 160 | 0.08527745 161 | 0.08214341 162 | 0.07976426 163 | 0.0788444 164 | 0.07906821 165 | 0.07566388 166 | 0.0741553 167 | 0.07328129 168 | 0.07083708 169 | 0.06897974 170 | 0.06784868 171 | 0.06768092 172 | 0.06597698 173 | 0.06573582 174 | 0.06474579 175 | 0.065179795 176 | 0.06439343 177 | 0.062861405 178 | 0.060703553 179 | 0.05993463 180 | 0.05817431 181 | 0.0577647 182 | 0.056495756 183 | 0.056127906 184 | 0.055480722 185 | 0.055380143 186 | 0.055504955 187 | 0.05468315 188 | 0.052907016 189 | 0.051398765 190 | 0.050784305 191 | 0.053260587 192 | 0.052960545 193 | 0.054774202 194 | 0.05322984 195 | 0.053140037 196 | 0.05235585 197 | 0.051487014 198 | 0.051215287 199 | 0.050174557 200 | 0.048497964 201 | 0.046339184 202 | 0.04702593 203 | 0.047394224 204 | 0.04668963 205 | 0.047478665 206 | 0.04763881 207 | 0.047438487 208 | 0.047107067 209 | 0.04755401 210 | 0.04744702 211 | 0.04685104 212 | 0.045773573 213 | 0.04530634 214 | 0.0450984 215 | 0.044767167 216 | 0.04478293 217 | 0.04483425 218 | 0.044318672 219 | 0.04352832 220 | 0.04360339 221 | 0.043525063 222 | 0.04284539 223 | 0.042442337 224 | 0.04255124 225 | 0.042404816 226 | 0.041680135 227 | 0.04105074 228 | 0.041318767 229 | 0.04234954 230 | 0.042180963 231 | 0.04155681 232 | 0.04137364 233 | 0.041053064 234 | 0.041218117 235 | 0.04110297 236 | 0.04116427 237 | 0.040041365 238 | 0.039707493 239 | 0.03876391 240 | 0.038896464 241 | 0.038590297 242 | 0.038043037 243 | 0.037762355 244 | 0.03761966 245 | 0.037600376 246 | 0.036450364 247 | 0.035649627 248 | 0.03502159 249 | 0.034623835 250 | 0.03495665 251 | 0.035235018 252 | 0.035154868 253 | 0.035170265 254 | 0.035217147 255 | 0.036016166 256 | 0.0360736 257 | 0.035810098 258 | 0.03542709 259 | 0.035118904 260 | 0.035227723 261 | 0.03490386 262 | 0.03492393 263 | 0.034651257 264 | 0.034474548 265 | 0.034120385 266 | 0.034134172 267 | 0.03386668 268 | 0.03366927 269 | 0.03336972 270 | 0.03283018 271 | 0.03236047 272 | 0.0319527 273 | 0.031195784 274 | 0.030494802 275 | 0.03014269 276 | 0.030356966 277 | 0.02977141 278 | 0.029470904 279 | 0.02935423 280 | 0.029012611 281 | 0.029290475 282 | 0.02944414 283 | 0.029300101 284 | 0.028703406 285 | 0.028484033 286 | 0.028116612 287 | 0.027537301 288 | 0.027515601 289 | 0.027700277 290 | 0.028005082 291 | 0.02795085 292 | 0.027240712 293 | 0.027222455 294 | 0.02704629 295 | 0.026781049 296 | 0.026614042 297 | 0.026467616 298 | 0.02633188 299 | 0.026122482 300 | 0.026053216 301 | 0.026040886 302 | 0.02628009 303 | 0.025908474 304 | 0.025504354 305 | 0.025308901 306 | 0.024995387 307 | 0.024888638 308 | 0.024782632 309 | 0.024713505 310 | 0.02488972 311 | 0.024959706 312 | 0.025536185 313 | 0.02559463 314 | 0.025657393 315 | 0.025739117 316 | 0.025838781 317 | 0.025618454 318 | 0.025689717 319 | 0.025577767 320 | 0.025243077 321 | 0.025107715 322 | 0.025015332 323 | 0.025154792 324 | 0.02533705 325 | 0.025496466 326 | 0.025423974 327 | 0.025093112 328 | 0.02445102 329 | 0.024263233 330 | 0.024103548 331 | 0.024058495 332 | 0.02413531 333 | 0.023820926 334 | 0.023558203 335 | 0.023190707 336 | 0.023015823 337 | 0.022899136 338 | 0.022786804 339 | 0.022690108 340 | 0.022182398 341 | 0.02212245 342 | 0.022029549 343 | 0.021978162 344 | 0.02192282 345 | 0.021701772 346 | 0.021588791 347 | 0.02147 348 | 0.021340381 349 | 0.021337666 350 | 0.021150887 351 | 0.021131476 352 | 0.021164406 353 | 0.021156052 354 | 0.02116965 355 | 0.021029089 356 | 0.02088185 357 | 0.020787809 358 | 0.02070602 359 | 0.020496279 360 | 0.020391693 361 | 0.02018527 362 | 0.020281829 363 | 0.020167608 364 | 0.019977892 365 | 0.019777115 366 | 0.019712308 367 | 0.019495115 368 | 0.019425733 369 | 0.019140419 370 | 0.019044261 371 | 0.019085526 372 | 0.019217491 373 | 0.019128632 374 | 0.018867724 375 | 0.018837424 376 | 0.018892813 377 | 0.018679772 378 | 0.01853527 379 | 0.018549863 380 | 0.018474573 381 | 0.018489052 382 | 0.01845584 383 | 0.018325228 384 | 0.018343046 385 | 0.01845608 386 | 0.018325698 387 | 0.018141363 388 | 0.018066788 389 | 0.018017627 390 | 0.017777175 391 | 0.017677318 392 | 0.017508212 393 | 0.017343689 394 | 0.017193476 395 | 0.017261472 396 | 0.017297685 397 | 0.017331637 398 | 0.01733784 399 | 0.017245997 400 | 0.017313424 401 | 0.017426386 402 | 0.017348623 403 | 0.017307397 404 | 0.017383117 405 | 0.017472655 406 | 0.017406685 407 | 0.017397195 408 | 0.01737038 409 | 0.01739993 410 | 0.017399319 411 | 0.017342025 412 | 0.017376088 413 | 0.017187152 414 | 0.017005758 415 | 0.017195322 416 | 0.017262304 417 | 0.017240606 418 | 0.017202254 419 | 0.017242476 420 | 0.017325772 421 | 0.017381813 422 | 0.017596716 423 | 0.01759268 424 | 0.017519992 425 | 0.017543249 426 | 0.017507628 427 | 0.01738146 428 | 0.01743956 429 | 0.017382966 430 | 0.017371368 431 | 0.017278364 432 | 0.017282248 433 | 0.017163914 434 | 0.01706127 435 | 0.016975002 436 | 0.016781326 437 | 0.016676545 438 | 0.016676307 439 | 0.016733691 440 | 0.016810685 441 | 0.016760472 442 | 0.017074361 443 | 0.017212242 444 | 0.017275797 445 | 0.017323107 446 | 0.017482046 447 | 0.017556243 448 | 0.017694637 449 | 0.017768536 450 | 0.01790523 451 | 0.01804807 452 | 0.018147592 453 | 0.018220527 454 | 0.018219303 455 | 0.018191028 456 | 0.018239498 457 | 0.01822544 458 | 0.018305399 459 | 0.018164877 460 | 0.018065462 461 | 0.018078448 462 | 0.018074445 463 | 0.017961226 464 | 0.017821128 465 | 0.01761264 466 | 0.017536392 467 | 0.017444674 468 | 0.017408114 469 | 0.017393239 470 | 0.017292175 471 | 0.017178854 472 | 0.017100997 473 | 0.016979005 474 | 0.016918276 475 | 0.016853528 476 | 0.016780816 477 | 0.016700704 478 | 0.016681155 479 | 0.016635282 480 | 0.01651837 481 | 0.016359074 482 | 0.016274728 483 | 0.016059097 484 | 0.015953377 485 | 0.015866874 486 | 0.015882898 487 | 0.01583857 488 | 0.015780695 489 | 0.015790965 490 | 0.01573008 491 | 0.015604233 492 | 0.015439836 493 | 0.01530718 494 | 0.015236402 495 | 0.01535296 496 | 0.015333979 497 | 0.015348202 498 | 0.015283081 499 | 0.0151897 500 | 0.015090562 501 | 0.014893418 502 | 0.014890119 503 | 0.014786834 504 | 0.014690302 505 | 0.014649837 506 | 0.014576275 507 | 0.014686508 508 | 0.014815633 509 | 0.014811817 510 | 0.014812935 511 | 0.014797429 512 | 0.0147727765 513 | 0.014768298 514 | 0.014691103 515 | 0.01462503 516 | 0.014639605 517 | 0.014588371 518 | 0.014604654 519 | 0.014595484 520 | 0.014503131 521 | 0.014442878 522 | 0.0143460985 523 | 0.014264105 524 | 0.014204726 525 | 0.014168955 526 | 0.014162632 527 | 0.014206355 528 | 0.0142188 529 | 0.0141733065 530 | 0.014179031 531 | 0.014180055 532 | 0.014096189 533 | 0.014190808 534 | 0.014178364 535 | 0.014134929 536 | 0.014095043 537 | 0.014112437 538 | 0.0141296005 539 | 0.014214022 540 | 0.014228393 541 | 0.0142224645 542 | 0.014268478 543 | 0.0143520385 544 | 0.014357294 545 | 0.0142859835 546 | 0.01421415 547 | 0.014141267 548 | 0.014065312 549 | 0.014054114 550 | 0.014072235 551 | 0.013974312 552 | 0.013872648 553 | 0.0138603095 554 | 0.013886668 555 | 0.013889547 556 | 0.013909912 557 | 0.013999782 558 | 0.014131998 559 | 0.014132708 560 | 0.01404959 561 | 0.014138807 562 | 0.014190182 563 | 0.014184022 564 | 0.014191522 565 | 0.01427937 566 | 0.014202356 567 | 0.014255339 568 | 0.014327731 569 | 0.01437326 570 | 0.014462834 571 | 0.01440629 572 | 0.014351598 573 | 0.014344489 574 | 0.014360506 575 | 0.014289159 576 | 0.01425822 577 | 0.014182566 578 | 0.014132034 579 | 0.014150973 580 | 0.014109655 581 | 0.014089151 582 | 0.014007343 583 | 0.013982347 584 | 0.013973579 585 | 0.013988411 586 | 0.013928108 587 | 0.013879417 588 | 0.0137743205 589 | 0.013752424 590 | 0.013797229 591 | 0.013791219 592 | 0.013852432 593 | 0.0138518885 594 | 0.013808208 595 | 0.013777421 596 | 0.013772387 597 | 0.013754571 598 | 0.013764872 599 | 0.013753554 600 | 0.0137649495 601 | 0.013722091 602 | 0.013744454 603 | 0.013716079 604 | 0.013680395 605 | 0.013671558 606 | 0.013609791 607 | 0.013512149 608 | 0.013485096 609 | 0.0134958485 610 | 0.013362005 611 | 0.013287552 612 | 0.013326955 613 | 0.013302937 614 | 0.0132948235 615 | 0.013325541 616 | 0.0133242775 617 | 0.013349992 618 | 0.013407009 619 | 0.013380174 620 | 0.013389619 621 | 0.0133875515 622 | 0.013397172 623 | 0.013466133 624 | 0.013478467 625 | 0.013515513 626 | 0.013550548 627 | 0.01349319 628 | 0.013474943 629 | 0.013411749 630 | 0.013375353 631 | 0.013394121 632 | 0.013421699 633 | 0.013479904 634 | 0.013435673 635 | 0.013384938 636 | 0.013368468 637 | 0.013338969 638 | 0.0133076515 639 | 0.013220083 640 | 0.013111874 641 | 0.013049489 642 | 0.012972523 643 | 0.0128942095 644 | 0.01278398 645 | 0.012701768 646 | 0.012660904 647 | 0.012621751 648 | 0.0125385765 649 | 0.012441015 650 | 0.012383174 651 | 0.012315055 652 | 0.012271063 653 | 0.012285881 654 | 0.012274603 655 | 0.012329796 656 | 0.012336215 657 | 0.012336727 658 | 0.012326193 659 | 0.012265852 660 | 0.012208128 661 | 0.012128459 662 | 0.012126907 663 | -------------------------------------------------------------------------------- /heat conduction problem/0.11/MMPINN/log7.txt: -------------------------------------------------------------------------------- 1 | 752760450.0 2 | 635941600.0 3 | 554560100.0 4 | 493159550.0 5 | 428668450.0 6 | 383045570.0 7 | 335035040.0 8 | 296589860.0 9 | 262840130.0 10 | 234752340.0 11 | 210680980.0 12 | 185032700.0 13 | 166303570.0 14 | 145290080.0 15 | 130685400.0 16 | 116572440.0 17 | 105418776.0 18 | 93951520.0 19 | 85558380.0 20 | 78759256.0 21 | 72299470.0 22 | 65787576.0 23 | 60514780.0 24 | 55147292.0 25 | 49750756.0 26 | 45298610.0 27 | 41903544.0 28 | 39181950.0 29 | 36240950.0 30 | 33997200.0 31 | 31829202.0 32 | 29206318.0 33 | 27339972.0 34 | 25446100.0 35 | 23769246.0 36 | 22247272.0 37 | 20980302.0 38 | 19887990.0 39 | 18891932.0 40 | 17692568.0 41 | 16888030.0 42 | 16023490.0 43 | 15125908.0 44 | 14264288.0 45 | 13458225.0 46 | 12625982.0 47 | 11918749.0 48 | 11268519.0 49 | 10650390.0 50 | 10092427.0 51 | 9507573.0 52 | 8931582.0 53 | 8458694.0 54 | 7931540.5 55 | 7433371.0 56 | 6974328.0 57 | 6485836.0 58 | 6093025.5 59 | 5791742.5 60 | 5389744.0 61 | 5060624.0 62 | 4699328.5 63 | 4365740.0 64 | 4082368.8 65 | 3827641.2 66 | 3563671.5 67 | 3328483.0 68 | 3070048.2 69 | 2856565.5 70 | 2640017.2 71 | 2440446.0 72 | 2267084.5 73 | 2096371.1 74 | 1909493.0 75 | 1736409.9 76 | 1592866.1 77 | 1458322.0 78 | 1339840.9 79 | 1221934.9 80 | 1110312.4 81 | 1003367.6 82 | 921660.94 83 | 843702.6 84 | 763932.1 85 | 683344.94 86 | 615863.94 87 | 551468.5 88 | 487112.75 89 | 430368.34 90 | 388901.22 91 | 350036.2 92 | 319397.94 93 | 290111.53 94 | 263910.72 95 | 237299.5 96 | 215336.34 97 | 193774.66 98 | 175966.03 99 | 158637.56 100 | 140786.64 101 | 126764.89 102 | 114628.85 103 | 104439.44 104 | 95480.13 105 | 85011.25 106 | 77725.25 107 | 71267.4 108 | 65221.09 109 | 60709.816 110 | 56300.258 111 | 52502.33 112 | 48530.977 113 | 45617.273 114 | 42275.258 115 | 39464.703 116 | 36821.51 117 | 34042.23 118 | 31926.672 119 | 28813.184 120 | 26395.96 121 | 24185.748 122 | 22482.152 123 | 20799.035 124 | 19441.432 125 | 18338.947 126 | 17181.791 127 | 16067.303 128 | 14911.316 129 | 13918.439 130 | 13023.93 131 | 12133.45 132 | 11222.641 133 | 10565.118 134 | 9811.949 135 | 9270.9 136 | 8776.649 137 | 8259.285 138 | 7841.6084 139 | 7398.841 140 | 7024.5205 141 | 6679.86 142 | 6344.9243 143 | 6012.6655 144 | 5706.4204 145 | 5404.8545 146 | 5137.3843 147 | 4927.26 148 | 4698.391 149 | 4464.2983 150 | 4284.6123 151 | 4075.4827 152 | 3893.3242 153 | 3750.2163 154 | 3600.2778 155 | 3453.9941 156 | 3329.8777 157 | 3201.4607 158 | 3076.2122 159 | 2963.847 160 | 2831.0117 161 | 2728.2688 162 | 2623.7495 163 | 2522.8516 164 | 2435.7163 165 | 2341.7542 166 | 2256.1155 167 | 2174.3564 168 | 2104.5596 169 | 2033.5536 170 | 1961.4012 171 | 1890.1848 172 | 1829.4958 173 | 1763.7756 174 | 1706.823 175 | 1659.2074 176 | 1599.8472 177 | 1554.4381 178 | 1500.6947 179 | 1452.6229 180 | 1411.5555 181 | 1370.7714 182 | 1331.2438 183 | 1295.1117 184 | 1257.3925 185 | 1223.3778 186 | 1191.5687 187 | 1158.0819 188 | 1126.5161 189 | 1101.6521 190 | 1073.2029 191 | 1049.2281 192 | 1023.7333 193 | 993.96814 194 | 970.22406 195 | 948.26776 196 | 926.8228 197 | 906.88495 198 | 885.5361 199 | 864.9827 200 | 844.4119 201 | 825.2301 202 | 806.0917 203 | 787.84503 204 | 771.5072 205 | 753.13 206 | 736.6046 207 | 720.4272 208 | 705.69696 209 | 692.3409 210 | 679.1625 211 | 666.992 212 | 655.5626 213 | 642.10657 214 | 630.7156 215 | 620.0768 216 | 608.25354 217 | 597.27496 218 | 586.8525 219 | 574.9672 220 | 563.06104 221 | 551.43195 222 | 541.3146 223 | 532.044 224 | 522.4046 225 | 514.0342 226 | 504.3468 227 | 495.7956 228 | 487.131 229 | 479.01407 230 | 471.10184 231 | 462.68552 232 | 454.21167 233 | 446.81488 234 | 438.97253 235 | 432.49768 236 | 424.8607 237 | 418.14444 238 | 411.47037 239 | 405.06897 240 | 399.10938 241 | 393.1677 242 | 387.1501 243 | 381.21576 244 | 375.65463 245 | 369.28036 246 | 363.94193 247 | 358.4152 248 | 352.80585 249 | 347.09808 250 | 340.917 251 | 335.4807 252 | 330.3504 253 | 325.54932 254 | 320.9835 255 | 316.08023 256 | 310.62775 257 | 305.44604 258 | 300.6082 259 | 296.21896 260 | 292.12347 261 | 288.18112 262 | 284.045 263 | 279.98947 264 | 275.7522 265 | 271.9076 266 | 267.75104 267 | 263.5414 268 | 259.96887 269 | 256.49567 270 | 252.60297 271 | 249.5657 272 | 246.12907 273 | 243.03313 274 | 239.8218 275 | 236.47052 276 | 233.17656 277 | 230.03699 278 | 226.89742 279 | 223.7213 280 | 220.7904 281 | 217.44864 282 | 214.91492 283 | 211.4572 284 | 208.94316 285 | 206.13858 286 | 203.15977 287 | 200.53091 288 | 198.0431 289 | 195.47002 290 | 193.18443 291 | 190.61247 292 | 188.3556 293 | 185.77652 294 | 183.1214 295 | 180.53864 296 | 177.9821 297 | 175.57156 298 | 173.378 299 | 171.17496 300 | 169.01521 301 | 167.0837 302 | 164.9694 303 | 163.05383 304 | 161.19008 305 | 159.24715 306 | 157.48042 307 | 155.60414 308 | 153.78127 309 | 152.14597 310 | 150.1905 311 | 148.42603 312 | 146.54507 313 | 145.12593 314 | 143.40146 315 | 141.87604 316 | 140.37936 317 | 139.02966 318 | 137.47284 319 | 136.07869 320 | 134.79782 321 | 133.36938 322 | 132.11841 323 | 130.92802 324 | 129.62904 325 | 128.363 326 | 127.25444 327 | 125.93022 328 | 124.93773 329 | 123.70142 330 | 122.53935 331 | 121.4074 332 | 120.12575 333 | 118.90814 334 | 117.97138 335 | 116.82732 336 | 115.71506 337 | 114.7916 338 | 113.76323 339 | 112.69644 340 | 111.66006 341 | 110.67446 342 | 109.81088 343 | 108.80592 344 | 107.81044 345 | 106.98146 346 | 106.0822 347 | 105.14624 348 | 104.38404 349 | 103.57034 350 | 102.62454 351 | 101.81191 352 | 101.00532 353 | 100.23846 354 | 99.4857 355 | 98.70946 356 | 97.91626 357 | 97.25528 358 | 96.50222 359 | 95.78483 360 | 95.10338 361 | 94.32585 362 | 93.64782 363 | 92.94412 364 | 92.24582 365 | 91.56809 366 | 90.99006 367 | 90.34334 368 | 89.67982 369 | 89.05112 370 | 88.3706 371 | 87.67948 372 | 87.00284 373 | 86.40629 374 | 85.69085 375 | 85.15616 376 | 84.44885 377 | 83.85472 378 | 83.16711 379 | 82.46848 380 | 81.806984 381 | 81.15853 382 | 80.54114 383 | 79.92585 384 | 79.27814 385 | 78.73398 386 | 78.15572 387 | 77.60071 388 | 77.0211 389 | 76.4902 390 | 75.97512 391 | 75.43168 392 | 74.912094 393 | 74.37661 394 | 73.8465 395 | 73.29952 396 | 72.80086 397 | 72.33029 398 | 71.83558 399 | 71.37097 400 | 70.87053 401 | 70.423004 402 | 69.92565 403 | 69.46411 404 | 68.92742 405 | 68.3661 406 | 67.838326 407 | 67.32956 408 | 66.824 409 | 66.335724 410 | 65.80835 411 | 65.34923 412 | 64.88604 413 | 64.44832 414 | 64.04981 415 | 63.589165 416 | 63.095154 417 | 62.63021 418 | 62.19007 419 | 61.714226 420 | 61.322224 421 | 60.84194 422 | 60.397934 423 | 59.98533 424 | 59.55511 425 | 59.169895 426 | 58.75116 427 | 58.36008 428 | 57.95291 429 | 57.597157 430 | 57.17832 431 | 56.78686 432 | 56.39757 433 | 56.00878 434 | 55.66162 435 | 55.25153 436 | 54.90744 437 | 54.54066 438 | 54.18664 439 | 53.86087 440 | 53.51687 441 | 53.23199 442 | 52.83173 443 | 52.48416 444 | 52.16149 445 | 51.8114 446 | 51.46237 447 | 51.10547 448 | 50.763313 449 | 50.40985 450 | 50.08117 451 | 49.7083 452 | 49.389324 453 | 49.07376 454 | 48.77229 455 | 48.450005 456 | 48.13025 457 | 47.80532 458 | 47.505035 459 | 47.19399 460 | 46.91155 461 | 46.599323 462 | 46.31327 463 | 46.051186 464 | 45.74538 465 | 45.46626 466 | 45.23075 467 | 44.99118 468 | 44.714115 469 | 44.44431 470 | 44.20565 471 | 43.92005 472 | 43.685986 473 | 43.42549 474 | 43.140026 475 | 42.89635 476 | 42.63696 477 | 42.364304 478 | 42.143604 479 | 41.889492 480 | 41.6644 481 | 41.42989 482 | 41.15753 483 | 40.905674 484 | 40.674706 485 | 40.46568 486 | 40.252888 487 | 40.002274 488 | 39.743988 489 | 39.492805 490 | 39.23068 491 | 38.99867 492 | 38.74605 493 | 38.513645 494 | 38.302547 495 | 38.083187 496 | 37.84875 497 | 37.641033 498 | 37.432194 499 | 37.20153 500 | 36.9838 501 | 36.762367 502 | 36.56487 503 | 36.359554 504 | 36.149605 505 | 35.93513 506 | 35.71518 507 | 35.492085 508 | 35.296387 509 | 35.08406 510 | 34.912384 511 | 34.700512 512 | 34.510616 513 | 34.347855 514 | 34.147816 515 | 33.965527 516 | 33.75652 517 | 33.57192 518 | 33.386097 519 | 33.21446 520 | 33.028088 521 | 32.875423 522 | 32.704727 523 | 32.53273 524 | 32.35853 525 | 32.189877 526 | 32.020897 527 | 31.847422 528 | 31.689434 529 | 31.530293 530 | 31.377645 531 | 31.205997 532 | 31.042517 533 | 30.882225 534 | 30.73206 535 | 30.584572 536 | 30.43481 537 | 30.276487 538 | 30.120556 539 | 29.957926 540 | 29.806973 541 | 29.64573 542 | 29.49573 543 | 29.325396 544 | 29.167578 545 | 29.006676 546 | 28.863735 547 | 28.707237 548 | 28.554686 549 | 28.40096 550 | 28.257267 551 | 28.097324 552 | 27.937063 553 | 27.78017 554 | 27.639416 555 | 27.4963 556 | 27.351723 557 | 27.213665 558 | 27.08165 559 | 26.946127 560 | 26.81568 561 | 26.663748 562 | 26.523464 563 | 26.391405 564 | 26.268656 565 | 26.142588 566 | 26.0158 567 | 25.886536 568 | 25.756016 569 | 25.628162 570 | 25.49345 571 | 25.376715 572 | 25.234646 573 | 25.10458 574 | 24.982704 575 | 24.857792 576 | 24.738 577 | 24.6209 578 | 24.50208 579 | 24.383535 580 | 24.277338 581 | 24.16752 582 | 24.060698 583 | 23.955475 584 | 23.85271 585 | 23.747786 586 | 23.63724 587 | 23.53949 588 | 23.438456 589 | 23.335388 590 | 23.23517 591 | 23.130945 592 | 23.031923 593 | 22.93815 594 | 22.83669 595 | 22.738625 596 | 22.640656 597 | 22.540976 598 | 22.450928 599 | 22.361176 600 | 22.26192 601 | 22.171875 602 | 22.06889 603 | 21.98821 604 | 21.888744 605 | 21.79544 606 | 21.697025 607 | 21.60986 608 | 21.519352 609 | 21.43478 610 | 21.348036 611 | 21.25651 612 | 21.161072 613 | 21.069958 614 | 20.976234 615 | 20.867132 616 | 20.780945 617 | 20.681967 618 | 20.589603 619 | 20.494188 620 | 20.394457 621 | 20.296797 622 | 20.190903 623 | 20.09311 624 | 19.995037 625 | 19.897068 626 | 19.806465 627 | 19.728378 628 | 19.652918 629 | 19.570656 630 | 19.484068 631 | 19.397362 632 | 19.32619 633 | 19.245077 634 | 19.170193 635 | 19.096304 636 | 19.030888 637 | 18.9478 638 | 18.870565 639 | 18.794052 640 | 18.718378 641 | 18.672693 642 | 18.597046 643 | 18.52005 644 | 18.445967 645 | 18.372953 646 | 18.304766 647 | 18.233376 648 | 18.186918 649 | 18.123447 650 | 18.048006 651 | 17.986868 652 | 17.91637 653 | 17.848576 654 | 17.771849 655 | 17.700117 656 | 17.63632 657 | 17.57228 658 | 17.507462 659 | 17.447792 660 | 17.382036 661 | 17.32819 662 | 17.327946 663 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/Hard Constraint/3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/heat conduction problem/0.15/Hard Constraint/3.mat -------------------------------------------------------------------------------- /heat conduction problem/0.15/Hard Constraint/S: -------------------------------------------------------------------------------- 1 | S 2 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/Hard Constraint/log1.txt: -------------------------------------------------------------------------------- 1 | 7.937882e-15 2 | 7.937882e-15 3 | 7.937882e-15 4 | 7.937882e-15 5 | 7.937882e-15 6 | 7.937882e-15 7 | 7.937882e-15 8 | 7.937882e-15 9 | 7.937882e-15 10 | 7.937882e-15 11 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/Hard Constraint/log2.txt: -------------------------------------------------------------------------------- 1 | 0.0 2 | 0.0 3 | 0.0 4 | 0.0 5 | 0.0 6 | 0.0 7 | 0.0 8 | 0.0 9 | 0.0 10 | 0.0 11 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/Hard Constraint/log3.txt: -------------------------------------------------------------------------------- 1 | 4845863.5 2 | 4651052.0 3 | 4549247.0 4 | 4444671.0 5 | 4342365.5 6 | 4243203.0 7 | 4149417.2 8 | 4073335.5 9 | 4013675.2 10 | 3963471.5 11 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/Hard Constraint/log5.txt: -------------------------------------------------------------------------------- 1 | 7.937882e-15 2 | 7.937882e-15 3 | 7.937882e-15 4 | 7.937882e-15 5 | 7.937882e-15 6 | 7.937882e-15 7 | 7.937882e-15 8 | 7.937882e-15 9 | 7.937882e-15 10 | 7.937882e-15 11 | 7.937882e-15 12 | 7.937882e-15 13 | 7.937882e-15 14 | 7.937882e-15 15 | 7.937882e-15 16 | 7.937882e-15 17 | 7.937882e-15 18 | 7.937882e-15 19 | 7.937882e-15 20 | 7.937882e-15 21 | 7.937882e-15 22 | 7.937882e-15 23 | 7.937882e-15 24 | 7.937882e-15 25 | 7.937882e-15 26 | 7.937882e-15 27 | 7.937882e-15 28 | 7.937882e-15 29 | 7.937882e-15 30 | 7.937882e-15 31 | 7.937882e-15 32 | 7.937882e-15 33 | 7.937882e-15 34 | 7.937882e-15 35 | 7.937882e-15 36 | 7.937882e-15 37 | 7.937882e-15 38 | 7.937882e-15 39 | 7.937882e-15 40 | 7.937882e-15 41 | 7.937882e-15 42 | 7.937882e-15 43 | 7.937882e-15 44 | 7.937882e-15 45 | 7.937882e-15 46 | 7.937882e-15 47 | 7.937882e-15 48 | 7.937882e-15 49 | 7.937882e-15 50 | 7.937882e-15 51 | 7.937882e-15 52 | 7.937882e-15 53 | 7.937882e-15 54 | 7.937882e-15 55 | 7.937882e-15 56 | 7.937882e-15 57 | 7.937882e-15 58 | 7.937882e-15 59 | 7.937882e-15 60 | 7.937882e-15 61 | 7.937882e-15 62 | 7.937882e-15 63 | 7.937882e-15 64 | 7.937882e-15 65 | 7.937882e-15 66 | 7.937882e-15 67 | 7.937882e-15 68 | 7.937882e-15 69 | 7.937882e-15 70 | 7.937882e-15 71 | 7.937882e-15 72 | 7.937882e-15 73 | 7.937882e-15 74 | 7.937882e-15 75 | 7.937882e-15 76 | 7.937882e-15 77 | 7.937882e-15 78 | 7.937882e-15 79 | 7.937882e-15 80 | 7.937882e-15 81 | 7.937882e-15 82 | 7.937882e-15 83 | 7.937882e-15 84 | 7.937882e-15 85 | 7.937882e-15 86 | 7.937882e-15 87 | 7.937882e-15 88 | 7.937882e-15 89 | 7.937882e-15 90 | 7.937882e-15 91 | 7.937882e-15 92 | 7.937882e-15 93 | 7.937882e-15 94 | 7.937882e-15 95 | 7.937882e-15 96 | 7.937882e-15 97 | 7.937882e-15 98 | 7.937882e-15 99 | 7.937882e-15 100 | 7.937882e-15 101 | 7.937882e-15 102 | 7.937882e-15 103 | 7.937882e-15 104 | 7.937882e-15 105 | 7.937882e-15 106 | 7.937882e-15 107 | 7.937882e-15 108 | 7.937882e-15 109 | 7.937882e-15 110 | 7.937882e-15 111 | 7.937882e-15 112 | 7.937882e-15 113 | 7.937882e-15 114 | 7.937882e-15 115 | 7.937882e-15 116 | 7.937882e-15 117 | 7.937882e-15 118 | 7.937882e-15 119 | 7.937882e-15 120 | 7.937882e-15 121 | 7.937882e-15 122 | 7.937882e-15 123 | 7.937882e-15 124 | 7.937882e-15 125 | 7.937882e-15 126 | 7.937882e-15 127 | 7.937882e-15 128 | 7.937882e-15 129 | 7.937882e-15 130 | 7.937882e-15 131 | 7.937882e-15 132 | 7.937882e-15 133 | 7.937882e-15 134 | 7.937882e-15 135 | 7.937882e-15 136 | 7.937882e-15 137 | 7.937882e-15 138 | 7.937882e-15 139 | 7.937882e-15 140 | 7.937882e-15 141 | 7.937882e-15 142 | 7.937882e-15 143 | 7.937882e-15 144 | 7.937882e-15 145 | 7.937882e-15 146 | 7.937882e-15 147 | 7.937882e-15 148 | 7.937882e-15 149 | 7.937882e-15 150 | 7.937882e-15 151 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/Hard Constraint/log6.txt: -------------------------------------------------------------------------------- 1 | 0.0 2 | 0.0 3 | 0.0 4 | 0.0 5 | 0.0 6 | 0.0 7 | 0.0 8 | 0.0 9 | 0.0 10 | 0.0 11 | 0.0 12 | 0.0 13 | 0.0 14 | 0.0 15 | 0.0 16 | 0.0 17 | 0.0 18 | 0.0 19 | 0.0 20 | 0.0 21 | 0.0 22 | 0.0 23 | 0.0 24 | 0.0 25 | 0.0 26 | 0.0 27 | 0.0 28 | 0.0 29 | 0.0 30 | 0.0 31 | 0.0 32 | 0.0 33 | 0.0 34 | 0.0 35 | 0.0 36 | 0.0 37 | 0.0 38 | 0.0 39 | 0.0 40 | 0.0 41 | 0.0 42 | 0.0 43 | 0.0 44 | 0.0 45 | 0.0 46 | 0.0 47 | 0.0 48 | 0.0 49 | 0.0 50 | 0.0 51 | 0.0 52 | 0.0 53 | 0.0 54 | 0.0 55 | 0.0 56 | 0.0 57 | 0.0 58 | 0.0 59 | 0.0 60 | 0.0 61 | 0.0 62 | 0.0 63 | 0.0 64 | 0.0 65 | 0.0 66 | 0.0 67 | 0.0 68 | 0.0 69 | 0.0 70 | 0.0 71 | 0.0 72 | 0.0 73 | 0.0 74 | 0.0 75 | 0.0 76 | 0.0 77 | 0.0 78 | 0.0 79 | 0.0 80 | 0.0 81 | 0.0 82 | 0.0 83 | 0.0 84 | 0.0 85 | 0.0 86 | 0.0 87 | 0.0 88 | 0.0 89 | 0.0 90 | 0.0 91 | 0.0 92 | 0.0 93 | 0.0 94 | 0.0 95 | 0.0 96 | 0.0 97 | 0.0 98 | 0.0 99 | 0.0 100 | 0.0 101 | 0.0 102 | 0.0 103 | 0.0 104 | 0.0 105 | 0.0 106 | 0.0 107 | 0.0 108 | 0.0 109 | 0.0 110 | 0.0 111 | 0.0 112 | 0.0 113 | 0.0 114 | 0.0 115 | 0.0 116 | 0.0 117 | 0.0 118 | 0.0 119 | 0.0 120 | 0.0 121 | 0.0 122 | 0.0 123 | 0.0 124 | 0.0 125 | 0.0 126 | 0.0 127 | 0.0 128 | 0.0 129 | 0.0 130 | 0.0 131 | 0.0 132 | 0.0 133 | 0.0 134 | 0.0 135 | 0.0 136 | 0.0 137 | 0.0 138 | 0.0 139 | 0.0 140 | 0.0 141 | 0.0 142 | 0.0 143 | 0.0 144 | 0.0 145 | 0.0 146 | 0.0 147 | 0.0 148 | 0.0 149 | 0.0 150 | 0.0 151 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/Hard Constraint/log7.txt: -------------------------------------------------------------------------------- 1 | 3619627.0 2 | 3453143.5 3 | 3330051.5 4 | 3276472.0 5 | 3236335.8 6 | 3206144.0 7 | 3176404.5 8 | 3153381.2 9 | 3107104.0 10 | 3088418.5 11 | 3075289.5 12 | 3061235.2 13 | 3046906.8 14 | 3036781.8 15 | 3020934.2 16 | 3007575.0 17 | 2998832.0 18 | 2987714.2 19 | 2976123.2 20 | 2967109.8 21 | 2956827.0 22 | 2944871.5 23 | 2925586.0 24 | 2904780.8 25 | 2894978.8 26 | 2884339.5 27 | 2872044.8 28 | 2862111.2 29 | 2850873.2 30 | 2832967.5 31 | 2776974.0 32 | 2643175.5 33 | 2548788.8 34 | 2483753.8 35 | 2449938.8 36 | 2319198.5 37 | 2249238.8 38 | 2185396.2 39 | 2141208.2 40 | 2106973.0 41 | 2066535.2 42 | 2023603.0 43 | 1980710.5 44 | 1949290.2 45 | 1924102.1 46 | 1882504.0 47 | 1855615.0 48 | 1832566.4 49 | 1802708.6 50 | 1780992.4 51 | 1749521.2 52 | 1718102.4 53 | 1691042.0 54 | 1668967.9 55 | 1649043.9 56 | 1630611.6 57 | 1606744.1 58 | 1585521.5 59 | 1566227.8 60 | 1549803.1 61 | 1538021.1 62 | 1526915.9 63 | 1515364.8 64 | 1505564.2 65 | 1497015.2 66 | 1489126.6 67 | 1480525.0 68 | 1473927.6 69 | 1467214.5 70 | 1461146.2 71 | 1455351.9 72 | 1448435.5 73 | 1442101.0 74 | 1435315.0 75 | 1429108.0 76 | 1424824.9 77 | 1421133.9 78 | 1416406.1 79 | 1412839.1 80 | 1409057.4 81 | 1403758.1 82 | 1399276.8 83 | 1395534.5 84 | 1391956.2 85 | 1388692.0 86 | 1385709.9 87 | 1382306.9 88 | 1378927.2 89 | 1376686.1 90 | 1373883.0 91 | 1371326.6 92 | 1369422.6 93 | 1367838.2 94 | 1365731.1 95 | 1363095.0 96 | 1360776.1 97 | 1357821.8 98 | 1354945.4 99 | 1352302.0 100 | 1350217.1 101 | 1347213.1 102 | 1344588.4 103 | 1341993.4 104 | 1338420.2 105 | 1335766.4 106 | 1332459.0 107 | 1329334.1 108 | 1325542.8 109 | 1323451.0 110 | 1318262.0 111 | 1314083.9 112 | 1308676.5 113 | 1303871.9 114 | 1297973.5 115 | 1289669.1 116 | 1280609.9 117 | 1272722.0 118 | 1263232.9 119 | 1255952.4 120 | 1246557.9 121 | 1239924.4 122 | 1232520.9 123 | 1224429.0 124 | 1216914.9 125 | 1209696.6 126 | 1204787.9 127 | 1198202.0 128 | 1188836.4 129 | 1182341.5 130 | 1175059.5 131 | 1167645.1 132 | 1161256.8 133 | 1154305.0 134 | 1148385.1 135 | 1142003.2 136 | 1135792.5 137 | 1129152.8 138 | 1123529.4 139 | 1117866.9 140 | 1111239.5 141 | 1105619.1 142 | 1100034.0 143 | 1093699.8 144 | 1089402.5 145 | 1086594.2 146 | 1083912.2 147 | 1080539.5 148 | 1078214.5 149 | 1075895.2 150 | 1073590.6 151 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/Hard Constraint/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /heat conduction problem/0.15/MMPINN/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /heat conduction problem/0.15/PINN (1000K Adam)/0.15PINN(1000k Adam).py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[ ]: 5 | 6 | 7 | import tensorflow as tf 8 | import numpy as np 9 | import scipy.io 10 | from pyDOE import lhs 11 | import time 12 | import time 13 | import math 14 | 15 | 16 | # In[2]: 17 | 18 | 19 | import matplotlib as mpl 20 | mpl.rcParams.update(mpl.rcParamsDefault) 21 | np.random.seed(1233) 22 | tf.set_random_seed(1233) 23 | 24 | 25 | # In[3]: 26 | 27 | 28 | class PhysicsInformedNN: 29 | # Initialize the class 30 | def __init__(self, x0, u0, tb, X_f, layers, lb, ub,u_lb,u_ub): 31 | 32 | # lb = np.array([-1, 0]) ub = np.array([1, 1]) 33 | 34 | X0 = np.concatenate((x0, 0*x0+0.0), 1) # 初始 35 | X_lb = np.concatenate((0*tb + lb[0], tb), 1) # 边界-1 36 | X_ub = np.concatenate((0*tb + ub[0], tb), 1) # 边界+1 37 | 38 | self.lb = lb 39 | self.ub = ub 40 | 41 | self.x0 = X0[:,0:1] 42 | self.t0 = X0[:,1:2] 43 | 44 | self.x_lb = X_lb[:,0:1] 45 | self.t_lb = X_lb[:,1:2] 46 | self.hsadasjd=1 47 | 48 | self.x_ub = X_ub[:,0:1] 49 | self.t_ub = X_ub[:,1:2] 50 | 51 | self.x_f = X_f[:,0:1] 52 | self.t_f = X_f[:,1:2] 53 | self.u_lb=u_lb 54 | self.u_ub=u_ub 55 | #分别是初始时刻的实部和虚部 56 | self.u0 = u0 57 | self.losslossloss=[] 58 | # Initialize NNs 59 | self.layers = layers 60 | #返回初始的权重w和偏差b 61 | self.weights, self.biases = self.initialize_NN(layers) 62 | 63 | # tf Placeholders 64 | #形参 占位符,行数不确定,列数确定为1 65 | self.x0_tf = tf.placeholder(tf.float32, shape=[None, self.x0.shape[1]]) 66 | self.t0_tf = tf.placeholder(tf.float32, shape=[None, self.t0.shape[1]]) 67 | self.u_lb_tf = tf.placeholder(tf.float32, shape=[None, self.u_lb.shape[1]]) 68 | self.u_ub_tf = tf.placeholder(tf.float32, shape=[None, self.u_ub.shape[1]]) 69 | self.u0_tf = tf.placeholder(tf.float32, shape=[None, self.u0.shape[1]]) 70 | self.x_lb_tf = tf.placeholder(tf.float32, shape=[None, self.x_lb.shape[1]]) 71 | self.t_lb_tf = tf.placeholder(tf.float32, shape=[None, self.t_lb.shape[1]]) 72 | self.x_ub_tf = tf.placeholder(tf.float32, shape=[None, self.x_ub.shape[1]]) 73 | self.t_ub_tf = tf.placeholder(tf.float32, shape=[None, self.t_ub.shape[1]]) 74 | self.x_f_tf = tf.placeholder(tf.float32, shape=[None, self.x_f.shape[1]]) 75 | self.t_f_tf = tf.placeholder(tf.float32, shape=[None, self.t_f.shape[1]]) 76 | 77 | # tf Graphs 进行预测 78 | self.u0_pred= self.net_uv(self.x0_tf, self.t0_tf) 79 | self.u_lb_pred= self.net_uv(self.x_lb_tf, self.t_lb_tf) 80 | self.u_ub_pred = self.net_uv(self.x_ub_tf, self.t_ub_tf) 81 | self.f_u_pred= self.net_f_uv(self.x_f_tf, self.t_f_tf) 82 | 83 | # Loss 8个损失函数相加 84 | self.loss3=tf.pow(tf.reduce_mean(tf.square(self.u0_tf - self.u0_pred)),1/1) 85 | 86 | self.loss2=tf.pow(tf.reduce_mean(tf.square(self.f_u_pred)),1/1) 87 | 88 | 89 | self.loss = tf.pow(tf.reduce_mean(tf.square(self.u0_tf - self.u0_pred)),1)+ tf.pow((tf.reduce_mean(tf.square(self.u_ub_tf - self.u_ub_pred)) + tf.reduce_mean(tf.square(self.u_lb_tf - self.u_lb_pred))),1) + tf.pow(tf.reduce_mean(tf.square(self.f_u_pred)),1/1) 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | self.loss4 = tf.pow((tf.reduce_mean(tf.square(self.u_ub_tf - self.u_ub_pred)) + tf.reduce_mean(tf.square(self.u_lb_tf - self.u_lb_pred))),1/1) 100 | 101 | 102 | # Optimizers maxiter最大迭代次数 maxfun最大求值次数 maxcor int变量的最大数量 103 | #maxls 可选的最大搜索步数 104 | #maxls 可选的最大搜索步数 105 | self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss, 106 | method = 'L-BFGS-B', 107 | options = {'maxiter': 100000, 108 | 'maxfun': 100000, 109 | 'maxcor': 50, 110 | 'maxls': 50, 111 | 'ftol' : 1.0 * np.finfo(float).eps}) 112 | 113 | 114 | 115 | 116 | ''' 117 | 是一个寻找全局最优点的优化算法,引入了二次方梯度校正 118 | 除了利用反向传播算法对权重和偏置项进行修正外,也在运行中不断修正学习率。 119 | 根据其损失量学习自适应,损失量大则学习率大,进行修正的角度越大,损失量小,修正的幅度也小,学习率就小, 120 | 但是不会超过自己所设定的学习率。20 121 | 3 122 | ''' 123 | self.optimizer_Adam = tf.train.AdamOptimizer() 124 | self.train_op_Adam = self.optimizer_Adam.minimize(self.loss) 125 | 126 | # tf session 配置Session运行参数&&GPU设备指定) 127 | self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, 128 | log_device_placement=True)) 129 | #初始化模型的参数 130 | init = tf.global_variables_initializer() 131 | self.sess.run(init) 132 | def initialize_NN(self, layers): 133 | weights = [] 134 | biases = [] 135 | num_layers = len(layers) 136 | for l in range(0,num_layers-1): 137 | W = self.xavier_init(size=[layers[l], layers[l+1]]) 138 | b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32) 139 | weights.append(W) 140 | biases.append(b) 141 | return weights, biases 142 | 143 | def xavier_init(self, size): 144 | in_dim = size[0] 145 | out_dim = size[1] 146 | xavier_stddev = np.sqrt(2/(in_dim + out_dim)) 147 | #产生截断正态分布随机数,stddev是标准差,取值范围为[ 0 - 2 * stddev, 0+2 * stddev ] 148 | return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32) 149 | 150 | def neural_net(self, X, weights, biases): 151 | num_layers = len(weights) + 1 152 | 153 | #将初始输入X映射到-1到1之间为H 154 | H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0 155 | for l in range(0,num_layers-2): 156 | W = weights[l] 157 | b = biases[l] 158 | H = tf.tanh(tf.add(tf.matmul(H, W), b)) 159 | W = weights[-1] 160 | b = biases[-1] 161 | Y = tf.add(tf.matmul(H, W), b) 162 | return Y 163 | 164 | def net_uv(self, x, t): 165 | X = tf.concat([x,t],1) 166 | 167 | uv = self.neural_net(X, self.weights, self.biases) 168 | 169 | 170 | return uv 171 | 172 | 173 | 174 | def net_f_uv(self, x, t): 175 | 176 | u = self.net_uv(x,t) 177 | u_t = tf.gradients(u, t)[0] 178 | u_x = tf.gradients(u, x)[0] 179 | u_xx = tf.gradients(u_x, x)[0] 180 | 181 | hhhhhh=0.15 182 | f_u = u_t-u_xx-2*tf.exp(1/((2*t-1)*(2*t-1)+hhhhhh))+tf.exp(1/((2*t-1)*(2*t-1)+hhhhhh))*(1-x*x)*4*(2*t-1)/((2*t-1)*(2*t-1)+hhhhhh)/((2*t-1)*(2*t-1)+hhhhhh) 183 | #f_u=u-tf.exp(1/((2*t-1)*(2*t-1)+0.5))*(1-x*x) 184 | #return f_u/1319.919299519142 185 | return f_u 186 | 187 | def callback(self, loss,f_u_pred,u0_pred,u_ub_pred,u_lb_pred): 188 | 189 | self.losslossloss.append(loss) 190 | #losslossloss2 191 | sss=self.hsadasjd 192 | if sss%20==0: 193 | losssss =tf.reduce_mean(tf.square(f_u_pred)) 194 | array1 = losssss.eval(session=tf.Session()) 195 | tf_dict = {self.x0_tf: self.x0, self.t0_tf: self.t0, 196 | self.u0_tf: self.u0,self.u_lb_tf:self.u_lb,self.u_ub_tf:self.u_ub, 197 | self.x_lb_tf: self.x_lb, self.t_lb_tf: self.t_lb, 198 | self.x_ub_tf: self.x_ub, self.t_ub_tf: self.t_ub, 199 | self.x_f_tf: self.x_f, self.t_f_tf: self.t_f} 200 | 201 | loss1123456=self.u0_tf 202 | lossskdajsdkas=self.sess.run(loss1123456, tf_dict) 203 | zkjxJXhz = tf.reduce_mean(tf.square(lossskdajsdkas - u0_pred)) 204 | array2 = zkjxJXhz.eval(session=tf.Session()) 205 | 206 | loss1123456=self.u_ub_tf 207 | lssss1=self.sess.run(loss1123456, tf_dict) 208 | loss112345sds6=self.u_lb_tf 209 | sadsk=self.sess.run(loss112345sds6, tf_dict) 210 | 211 | zkjxJXhzs = tf.reduce_mean(tf.square( lssss1- u_ub_pred))+tf.reduce_mean(tf.square(sadsk - u_lb_pred)) 212 | array4 = zkjxJXhzs.eval(session=tf.Session()) 213 | print('It: %d, Loss1: %.9e,loss2: %.9e Loss3: %.9e' % 214 | (sss,array2,array4,array1)) 215 | log5=open("log5.txt",mode = 'a+', encoding = 'utf-8') 216 | print(array2,file=log5) 217 | log5.close() 218 | log6=open("log6.txt",mode = 'a+', encoding = 'utf-8') 219 | print(array4,file=log6) 220 | log6.close() 221 | log7=open("log7.txt",mode = 'a+', encoding = 'utf-8') 222 | print(array1,file=log7) 223 | log7.close() 224 | sss=sss+1 225 | self.hsadasjd=sss 226 | 227 | 228 | def train(self, nIter): 229 | tf_dict = {self.x0_tf: self.x0, self.t0_tf: self.t0, 230 | self.u0_tf: self.u0,self.u_lb_tf:self.u_lb,self.u_ub_tf:self.u_ub, 231 | self.x_lb_tf: self.x_lb, self.t_lb_tf: self.t_lb, 232 | self.x_ub_tf: self.x_ub, self.t_ub_tf: self.t_ub, 233 | self.x_f_tf: self.x_f, self.t_f_tf: self.t_f} 234 | lossloss1 = [] 235 | lossloss2 = [] 236 | lossloss3=[] 237 | 238 | start_time = time.time() 239 | 240 | loss_value11 = self.sess.run(self.loss3, tf_dict) 241 | lossloss1.append(loss_value11) 242 | 243 | loss_value22 = self.sess.run(self.loss2, tf_dict) 244 | lossloss2.append(loss_value22) 245 | 246 | loss_value33 = self.sess.run(self.loss4, tf_dict) 247 | lossloss3.append(loss_value33) 248 | 249 | for it in range(nIter): 250 | self.sess.run(self.train_op_Adam, tf_dict) 251 | # Print 252 | if it % 20 == 0: 253 | elapsed = time.time() - start_time 254 | 255 | 256 | loss_value11 = self.sess.run(self.loss3, tf_dict) 257 | lossloss1.append(loss_value11) 258 | 259 | loss_value22 = self.sess.run(self.loss2, tf_dict) 260 | lossloss2.append(loss_value22) 261 | 262 | loss_value33 = self.sess.run(self.loss4, tf_dict) 263 | lossloss3.append(loss_value33) 264 | 265 | print('It: %d, Loss1: %.9e,loss2: %.9e Loss3: %.9e,Time: %.2f' % 266 | (it, loss_value11,loss_value33,loss_value22, elapsed)) 267 | start_time = time.time() 268 | log1=open("log1.txt",mode = 'a+', encoding = 'utf-8') 269 | print(loss_value11,file=log1) 270 | log1.close() 271 | log2=open("log2.txt",mode = 'a+', encoding = 'utf-8') 272 | print(loss_value33,file=log2) 273 | log2.close() 274 | log3=open("log3.txt",mode = 'a+', encoding = 'utf-8') 275 | print(loss_value22,file=log3) 276 | log3.close() 277 | 278 | 279 | lb = np.array([-1, 0]) 280 | ub = np.array([1, 1]) 281 | 282 | x=np.linspace(-1,1,1200).flatten()[:,None] 283 | t=np.linspace(0,1,1200).flatten()[:,None] 284 | X, T = np.meshgrid(x, t) 285 | 286 | X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 287 | u_pred, f_u_pred = model.predict(X_star) 288 | 289 | scipy.io.savemat("1.mat", {'u': u_pred}) 290 | 291 | self.optimizer.minimize(self.sess, 292 | feed_dict = tf_dict, 293 | fetches = [self.loss,self.f_u_pred,self.u0_pred,self.u_ub_pred,self.u_lb_pred], 294 | loss_callback = self.callback 295 | ) 296 | 297 | 298 | lb = np.array([-1, 0]) 299 | ub = np.array([1, 1]) 300 | 301 | x=np.linspace(-1,1,1200).flatten()[:,None] 302 | t=np.linspace(0,1,1200).flatten()[:,None] 303 | X, T = np.meshgrid(x, t) 304 | 305 | X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 306 | u_pred, f_u_pred = model.predict(X_star) 307 | 308 | scipy.io.savemat("2.mat", {'u': u_pred}) 309 | 310 | return lossloss1,lossloss2 311 | 312 | def predict(self, X_star): 313 | 314 | tf_dict = {self.x0_tf: X_star[:,0:1], self.t0_tf: X_star[:,1:2]} 315 | 316 | u_star = self.sess.run(self.u0_pred, tf_dict) 317 | 318 | 319 | tf_dict = {self.x_f_tf: X_star[:,0:1], self.t_f_tf: X_star[:,1:2]} 320 | 321 | f_u_star = self.sess.run(self.f_u_pred, tf_dict) 322 | 323 | return u_star,f_u_star 324 | def loss_show(self): 325 | return self.losslossloss 326 | 327 | 328 | # In[4]: 329 | 330 | 331 | def heatsolution(x,t): 332 | return math.exp(1/((2*t-1)**2+0.15))*(1-x**2) 333 | 334 | 335 | # In[6]: 336 | 337 | 338 | if __name__ == "__main__": 339 | 340 | 341 | # Doman bounds 342 | lb = np.array([-1, 0]) 343 | ub = np.array([1, 1]) 344 | 345 | N0 = 1200 #初始点 346 | N_b = 1200 #边界点 347 | N_f = 10000 #适配点 348 | layers = [2,50,50,50,50,1] 349 | #读取真实解 350 | x=np.linspace(-1,1,1200).flatten()[:,None] 351 | t=np.linspace(0,1,1200).flatten()[:,None] 352 | res=np.zeros([len(x),len(t)]) 353 | for i in range(len(x)): 354 | for j in range(len(t)): 355 | res[i,j]=heatsolution(x[i],t[j]) 356 | 357 | 358 | X, T = np.meshgrid(x, t) 359 | X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 360 | #选定初始点N0=700个点 361 | idx_x = np.random.choice(x.shape[0], N0, replace=False) 362 | x0 = x[idx_x,:] 363 | u0 = res[idx_x,0:1] 364 | #选择N_b=700个边界点 365 | idx_t = np.random.choice(t.shape[0], N_b, replace=False) 366 | tb = t[idx_t,:] 367 | u_lb = res[0,idx_t] 368 | u_ub=res[-1,idx_t] 369 | #N_f=2500个随机搭配点 第一列位置 第二列时间 370 | X_f = lb + (ub-lb)*lhs(2, N_f) 371 | x0=np.array(x0).flatten()[:,None] 372 | u0=np.array(u0).flatten()[:,None] 373 | u_lb=np.array(u_lb).flatten()[:,None] 374 | u_ub=np.array(u_ub).flatten()[:,None] 375 | 376 | 377 | # In[7]: 378 | 379 | 380 | model = PhysicsInformedNN(x0, u0,tb, X_f, layers, lb, ub,u_lb,u_ub) 381 | 382 | 383 | # In[8]: 384 | 385 | 386 | LOSS1,LOSS2=model.train(1000000) 387 | 388 | 389 | # In[9]: 390 | 391 | 392 | X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 393 | u_pred, f_u_pred = model.predict(X_star) 394 | u_star = res.T.flatten()[:,None] 395 | error_u1 = np.linalg.norm(u_star-u_pred,2)/np.linalg.norm(u_star,2) 396 | error_u2 = np.linalg.norm(u_star-u_pred,1)/len(u_star) 397 | error_u3 = np.linalg.norm(u_star-u_pred,np.inf) 398 | print('二范数Error u: %e' % (error_u1)) 399 | print('平均绝对Error u: %e' % (error_u2)) 400 | print('无穷范数Error u: %e' % (error_u3)) 401 | scipy.io.savemat("3.mat", {'f_u': f_u_pred}) 402 | # In[ ]: 403 | 404 | 405 | 406 | 407 | 408 | # In[ ]: 409 | 410 | 411 | 412 | 413 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/PINN (1000K Adam)/1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/heat conduction problem/0.15/PINN (1000K Adam)/1.mat -------------------------------------------------------------------------------- /heat conduction problem/0.15/PINN (1000K Adam)/2.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/heat conduction problem/0.15/PINN (1000K Adam)/2.mat -------------------------------------------------------------------------------- /heat conduction problem/0.15/PINN (1000K Adam)/3.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wangyong1301108/MMPINN/91238ca91c46571fdb270c3dfa64e85ff602fa3c/heat conduction problem/0.15/PINN (1000K Adam)/3.mat -------------------------------------------------------------------------------- /heat conduction problem/0.15/PINN (1000K Adam)/log5.txt: -------------------------------------------------------------------------------- 1 | 0.013153081 2 | 0.012882078 3 | 0.012886628 4 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/PINN (1000K Adam)/log6.txt: -------------------------------------------------------------------------------- 1 | 0.37558794 2 | 0.37480602 3 | 0.37483788 4 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/PINN (1000K Adam)/log7.txt: -------------------------------------------------------------------------------- 1 | 4.678425 2 | 4.6777654 3 | 4.6776032 4 | -------------------------------------------------------------------------------- /heat conduction problem/0.15/PINN/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /heat conduction problem/0.15/SAPINN/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /heat conduction problem/0.3/MMPINN/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') -------------------------------------------------------------------------------- /heat conduction problem/0.3/PINN/plotting.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 9 20:11:57 2017 5 | 6 | @author: mraissi 7 | """ 8 | 9 | import numpy as np 10 | import matplotlib as mpl 11 | #mpl.use('pgf') 12 | 13 | def figsize(scale, nplots = 1): 14 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 15 | inches_per_pt = 1.0/72.27 # Convert pt to inch 16 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 17 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 18 | fig_height = nplots*fig_width*golden_mean # height in inches 19 | fig_size = [fig_width,fig_height] 20 | return fig_size 21 | 22 | pgf_with_latex = { # setup matplotlib to use latex for output 23 | "pgf.texsystem": "pdflatex", # change this if using xetex or lautex 24 | "text.usetex": True, # use LaTeX to write all text 25 | "font.family": "serif", 26 | "font.serif": [], # blank entries should cause plots to inherit fonts from the document 27 | "font.sans-serif": [], 28 | "font.monospace": [], 29 | "axes.labelsize": 10, # LaTeX default is 10pt font. 30 | "font.size": 10, 31 | "legend.fontsize": 8, # Make the legend/label fonts a little smaller 32 | "xtick.labelsize": 8, 33 | "ytick.labelsize": 8, 34 | "figure.figsize": figsize(1.0), # default fig size of 0.9 textwidth 35 | "pgf.preamble": [ 36 | r"\usepackage[utf8x]{inputenc}", # use utf8 fonts becasue your computer can handle it :) 37 | r"\usepackage[T1]{fontenc}", # plots will be generated using this preamble 38 | ] 39 | } 40 | mpl.rcParams.update(pgf_with_latex) 41 | 42 | import matplotlib.pyplot as plt 43 | 44 | # I make my own newfig and savefig functions 45 | def newfig(width, nplots = 1): 46 | fig = plt.figure(figsize=figsize(width, nplots)) 47 | ax = fig.add_subplot(111) 48 | return fig, ax 49 | 50 | def savefig(filename, crop = True): 51 | if crop == True: 52 | # plt.savefig('{}.pgf'.format(filename), bbox_inches='tight', pad_inches=0) 53 | plt.savefig('{}.pdf'.format(filename), bbox_inches='tight', pad_inches=0) 54 | plt.savefig('{}.eps'.format(filename), bbox_inches='tight', pad_inches=0) 55 | else: 56 | # plt.savefig('{}.pgf'.format(filename)) 57 | plt.savefig('{}.pdf'.format(filename)) 58 | plt.savefig('{}.eps'.format(filename)) 59 | 60 | ## Simple plot 61 | #fig, ax = newfig(1.0) 62 | # 63 | #def ema(y, a): 64 | # s = [] 65 | # s.append(y[0]) 66 | # for t in range(1, len(y)): 67 | # s.append(a * y[t] + (1-a) * s[t-1]) 68 | # return np.array(s) 69 | # 70 | #y = [0]*200 71 | #y.extend([20]*(1000-len(y))) 72 | #s = ema(y, 0.01) 73 | # 74 | #ax.plot(s) 75 | #ax.set_xlabel('X Label') 76 | #ax.set_ylabel('EMA') 77 | # 78 | #savefig('ema') --------------------------------------------------------------------------------