├── .gitattributes
├── Results
├── 2dpoisson-autograd
│ ├── 2dpoisson-curve.pdf
│ ├── 2dpoisson-exact.pdf
│ ├── 2dpoisson-numerical.pdf
│ └── lossData2.txt
├── 2dpoisson-ls-autograd
│ ├── 2dpoisson-ls-curve.pdf
│ └── 2dpoisson-ls-numerical.pdf
├── 2dpoisson-hole-autograd
│ ├── 2dpoisson-hole-curve.pdf
│ ├── 2dpoisson-hole-exact.pdf
│ └── 2dpoisson-hole-numerical.pdf
├── 10dpoisson-cube-autograd
│ └── 10dpoisson-cube-curve.pdf
├── 2dpoisson-hole-ls-autograd
│ ├── 2dpoisson-hole-ls-curve.pdf
│ └── 2dpoisson-hole-ls-numerical.pdf
├── 10dpoisson-cube-ls-autograd
│ └── 10dpoisson-cube-ls-curve.pdf
└── Test Errors.txt
├── areaVolume.py
├── writeSolution.py
├── LICENSE
├── README.md
├── .gitignore
├── generateData.py
├── plotInMatlab.m
├── 10dpoisson-autograd.py
├── 2dpoisson-hole-autograd.py
├── 10dpoisson-cube-autograd.py
├── 2dpoisson-autograd.py
├── 2dpoisson-hole-ls-autograd.py
├── 2dpoisson-hole.py
├── 2dpoisson.py
├── 2dpoisson-ls-autograd.py
├── 2dpoisson-hole-ls.py
├── 2dpoisson-ls.py
├── 10dpoisson-ls-autograd.py
├── 10dpoisson.py
├── 10dpoisson-cube-ls-autograd.py
├── 10dpoisson-cube.py
├── 10dpoisson-ls.py
└── 10dpoisson-cube-ls.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
--------------------------------------------------------------------------------
/Results/2dpoisson-autograd/2dpoisson-curve.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/2dpoisson-autograd/2dpoisson-curve.pdf
--------------------------------------------------------------------------------
/Results/2dpoisson-autograd/2dpoisson-exact.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/2dpoisson-autograd/2dpoisson-exact.pdf
--------------------------------------------------------------------------------
/Results/2dpoisson-autograd/2dpoisson-numerical.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/2dpoisson-autograd/2dpoisson-numerical.pdf
--------------------------------------------------------------------------------
/Results/2dpoisson-ls-autograd/2dpoisson-ls-curve.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/2dpoisson-ls-autograd/2dpoisson-ls-curve.pdf
--------------------------------------------------------------------------------
/Results/2dpoisson-hole-autograd/2dpoisson-hole-curve.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/2dpoisson-hole-autograd/2dpoisson-hole-curve.pdf
--------------------------------------------------------------------------------
/Results/2dpoisson-hole-autograd/2dpoisson-hole-exact.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/2dpoisson-hole-autograd/2dpoisson-hole-exact.pdf
--------------------------------------------------------------------------------
/Results/2dpoisson-ls-autograd/2dpoisson-ls-numerical.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/2dpoisson-ls-autograd/2dpoisson-ls-numerical.pdf
--------------------------------------------------------------------------------
/Results/10dpoisson-cube-autograd/10dpoisson-cube-curve.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/10dpoisson-cube-autograd/10dpoisson-cube-curve.pdf
--------------------------------------------------------------------------------
/Results/2dpoisson-hole-autograd/2dpoisson-hole-numerical.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/2dpoisson-hole-autograd/2dpoisson-hole-numerical.pdf
--------------------------------------------------------------------------------
/Results/2dpoisson-hole-ls-autograd/2dpoisson-hole-ls-curve.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/2dpoisson-hole-ls-autograd/2dpoisson-hole-ls-curve.pdf
--------------------------------------------------------------------------------
/Results/10dpoisson-cube-ls-autograd/10dpoisson-cube-ls-curve.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/10dpoisson-cube-ls-autograd/10dpoisson-cube-ls-curve.pdf
--------------------------------------------------------------------------------
/Results/2dpoisson-hole-ls-autograd/2dpoisson-hole-ls-numerical.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junbinhuang/DeepRitz/HEAD/Results/2dpoisson-hole-ls-autograd/2dpoisson-hole-ls-numerical.pdf
--------------------------------------------------------------------------------
/areaVolume.py:
--------------------------------------------------------------------------------
1 | # The surface area of n-dimensional sphere.
2 | import numpy as np
3 | import math
4 |
5 | def areaVolume(r,n):
6 | area = np.zeros(n)
7 | volume = np.zeros(n)
8 |
9 | area[0] = 0
10 | area[1] = 2
11 | area[2] = 2*math.pi*r
12 | volume[0] = 1
13 | volume[1] = 2*r
14 | volume[2] = math.pi*r**2
15 |
16 | for i in range(3,n):
17 | area[i] = 2*area[i-2]*math.pi*r**2 / (i-2)
18 | volume[i] = 2*math.pi*volume[i-2]*r**2 / i
19 |
20 | return (area[-1]/volume[-1])
21 |
22 | if __name__=="__main__":
23 | n = 10 # Number of dimensions
24 | r = 1.0 # Radius
25 | areaVolume(r,n)
26 |
--------------------------------------------------------------------------------
/writeSolution.py:
--------------------------------------------------------------------------------
1 | import sys, os
2 | import numpy as np
3 | import math
4 |
5 | def writeRow(list,file):
6 | for i in list: file.write("%s "%i)
7 | file.write("\n")
8 |
9 | def write(X,Y,Z,nSampling,file):
10 | for k1 in range(nSampling):
11 | writeRow(X[k1],file)
12 | writeRow(Y[k1],file)
13 | writeRow(Z[k1],file)
14 |
15 | def writeBoundary(edgeList,edgeList2 = None):
16 | length=[]
17 | file=open("boundaryCoord.txt","w")
18 |
19 | for i in edgeList:
20 | writeRow(i,file)
21 | if edgeList2 != None:
22 | for i in edgeList2:
23 | writeRow(i,file)
24 |
25 | file=open("boundaryNumber.txt","w")
26 | if edgeList2 == None: length = [len(edgeList)]
27 | else: length = [len(edgeList),len(edgeList2)]
28 |
29 | for i in length:
30 | file.write("%s\n"%i)
31 |
32 | if __name__=="__main__":
33 | pass
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Junbin Huang
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DeepRitz&DeepGalerkin
2 |
3 | ## Implementation of the Deep Ritz method and the Deep Galerkin method
4 |
5 | Four problems are solved using the Deep Ritz method, see 2dpoisson-autograd.py, 2dpoisson-hole-autograd.py, 10dpoisson-cube-autograd.py, and 10dpoisson-autograd.py. Four problems are solved using least square functionals, see 2dpoisson-ls-autograd.py, 2dpoisson-hole-ls-autograd.py, 10dpoisson-cube-ls-autograd.py, and 10dpoisson-ls-autograd.py.
6 |
7 | ## Dependencies
8 |
9 | * [NumPy](https://numpy.org)
10 | * [PyTorch](https://pytorch.org/)
11 | * [MATLAB](https://www.mathworks.com/products/matlab.html) (for post-processing only)
12 |
13 | ## References
14 |
15 | [1] W E, B Yu. The Deep Ritz method: A deep learning-based numerical algorithm for solving variational problems. Communications in Mathematics and Statistics 2018, 6:1-12. [[journal]](https://link.springer.com/article/10.1007/s40304-018-0127-z)[[arXiv]](https://arxiv.org/abs/1710.00211)
16 | [2] J Sirignano, K Spiliopoulos. DGM: A deep learning algorithm for solving partial differential equations. Journal of Computational Physics 2018, 375:1339–1364. [[journal]](https://www.sciencedirect.com/science/article/pii/S0021999118305527)[[arXiv]](https://arxiv.org/abs/1708.07469)
17 | [3] Y Liao, P Ming. Deep Nitsche method: Deep Ritz method with essential boundary conditions. 2019. [[arXiv]](https://arxiv.org/abs/1912.01309)
--------------------------------------------------------------------------------
/Results/Test Errors.txt:
--------------------------------------------------------------------------------
1 | 2D poisson (No pre-training; lr = 0.01; 177 parameters; batch = 1,024; penalty = 500):
2 | 0.007931844879894219; 0.007497982255081411; 0.016470392829285096; 0.006519810020557416; 0.010751115798170818.
3 |
4 | 2D poisson-ls (No pre-training; lr = 0.01; 177 parameters; batch = 1,024; penalty = 500):
5 | 0.0003936106864145957; 0.0003410351957616997; 0.00031040094845135994; 0.00042363867982423217; 0.00038041533154758604.
6 |
7 | 2D poisson-hole (No pre-training; lr = 0.01; 177 parameters; batch = 1,024; penalty = 500):
8 | 0.00821072314326543; 0.006787571379978156; 0.003298662712746577; 0.0034181092545655565; 0.004054527873655315.
9 |
10 | 2D poisson-hole-ls (No pre-training; lr = 0.01; 177 parameters; batch = 1,024; penalty = 500):
11 | 0.0007640472733499068; 0.0016114493865665575; 0.000965763045200117; 0.0011497808955798865; 0.0008791529884956095.
12 |
13 | 10D poisson (No pre-training; lr = 0.016; 451 parameters; batch = 1,024; penalty = 500):
14 | NA
15 |
16 | 10D poisson-ls (No pre-training; lr = 0.016; 451 parameters; batch = 1,024; penalty = 500):
17 | NA
18 |
19 | 10D poisson-cube (No pre-training; lr = 0.016; 451 parameters; batch = 1,024; penalty = 500):
20 | 0.0045127704042920245; 0.00456290086049379; 0.004365411622764213; 0.004243311581965337; 0.004561260638601018.
21 |
22 | 10D poisson-cube-ls (No pre-training; lr = 0.016; 451 parameters; batch = 1,024; penalty = 500):
23 | 0.003663410011448244; 0.003499157149499948; 0.0041029348370053035; 0.004310148522267912; 0.0039732782284218155.
24 |
25 | The test errors are evaluated using 40,000 random points.
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # celery beat schedule file
95 | celerybeat-schedule
96 |
97 | # SageMath parsed files
98 | *.sage.py
99 |
100 | # Environments
101 | .env
102 | .venv
103 | env/
104 | venv/
105 | ENV/
106 | env.bak/
107 | venv.bak/
108 |
109 | # Spyder project settings
110 | .spyderproject
111 | .spyproject
112 |
113 | # Rope project settings
114 | .ropeproject
115 |
116 | # mkdocs documentation
117 | /site
118 |
119 | # mypy
120 | .mypy_cache/
121 | .dmypy.json
122 | dmypy.json
123 |
124 | # Pyre type checker
125 | .pyre/
126 | .vscode/
127 | *.pt
128 |
129 | # Some data files
130 | boundaryCoord.txt
131 | boundaryNumber.txt
132 | Data.txt
133 | last_model.pt
134 | lossData.txt
135 | nSample.txt
--------------------------------------------------------------------------------
/generateData.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math
3 | import matplotlib.pyplot as plt
4 |
5 | # Sample points in a disk
6 | def sampleFromDisk(r,n):
7 | """
8 | r -- radius;
9 | n -- number of samples.
10 | """
11 | array = np.random.rand(2*n,2)*2*r-r
12 |
13 | array = np.multiply(array.T,(np.linalg.norm(array,2,axis=1)=n:
17 | return array[0:n]
18 | else:
19 | return sampleFromDisk(r,n)
20 |
21 | def sampleFromDomain(n):
22 | # For simplicity, consider a square with a hole.
23 | # Square: [-1,1]*[-1,1]
24 | # Hole: c = (0.3,0.0), r = 0.3
25 | array = np.zeros([n,2])
26 | c = np.array([0.3,0.0])
27 | r = 0.3
28 |
29 | for i in range(n):
30 | array[i] = randomPoint(c,r)
31 |
32 | return array
33 |
34 | def randomPoint(c,r):
35 | point = np.random.rand(2)*2-1
36 | if np.linalg.norm(point-c) 500
31 | % lossData = lossData(:,10:10:end);
32 | % lossData_itr = lossData_itr(:,10:10:end);
33 | % end
34 | logLossData = log10(lossData);
35 | lossData_err = mean(logLossData);
36 | lossData_err_std = std(logLossData);
37 | lossData_err1 = lossData_err-lossData_err_std;
38 | lossData_err2 = lossData_err+lossData_err_std;
39 | lossData_err = 10.^lossData_err;
40 | lossData_err1 = 10.^lossData_err1;
41 | lossData_err2 = 10.^lossData_err2;
42 |
43 | figure
44 | semilogy(lossData_itr,lossData_err,'b-','LineWidth',1.0)
45 | hold on
46 | XX = [lossData_itr, fliplr(lossData_itr)];
47 | YY = [lossData_err1, fliplr(lossData_err2)];
48 | theFill = fill(XX,YY,'b');
49 | set(theFill,'facealpha',0.3,'edgecolor','b','edgealpha',0.0)
50 |
51 | ylabel('Error','Interpreter','latex')
52 | xlabel('Iterations','Interpreter','latex')
53 | % ylim([0.005,1])
54 | % legend({'No pre-training'},'Interpreter','latex')
55 | set(gca,'ticklabelinterpreter','latex','fontsize',11)
56 |
57 | %% Now we can start plotting figures.
58 | fid=fopen('nSample.txt');
59 | data = textscan(fid, '%d', 'CommentStyle','#', 'CollectOutput',true);
60 | fclose(fid);
61 | numbers=cell2mat(data);
62 |
63 | fid=fopen('boundaryNumber.txt');
64 | data = textscan(fid, '%d', 'CommentStyle','#', 'CollectOutput',true);
65 | fclose(fid);
66 | boundaryNumber=cell2mat(data);
67 |
68 | fid=fopen('boundaryCoord.txt');
69 | data = textscan(fid, '%f %f', 'CommentStyle','#', 'CollectOutput',true);
70 | fclose(fid);
71 | bCoord=cell2mat(data);
72 |
73 | nSample=numbers(1);
74 |
75 | fid=fopen('Data.txt');
76 | a = '%f ';
77 | for i = 1:nSample-2
78 | a = [a,'%f '];
79 | end
80 | a = [a,'%f'];
81 | data = textscan(fid, a, 'CommentStyle','#', 'CollectOutput',true);
82 | fclose(fid);
83 | totalData=cell2mat(data);
84 |
85 | clear data numbers a
86 |
87 | % Plot the boundary.
88 | figure
89 | hold on
90 | axis equal
91 | axis off
92 |
93 | % Plot the contourf results!
94 | plotData=totalData;
95 |
96 | nScale=100;
97 | nDomain=size(plotData,1)/nSample/3;
98 |
99 | xArray=plotData(1:3:end,:);
100 | yArray=plotData(2:3:end,:);
101 | zArray=plotData(3:3:end,:);
102 |
103 | xMin=min(xArray(:));xMax=max(xArray(:));
104 | yMin=min(yArray(:));yMax=max(yArray(:));
105 | zMin=min(zArray(:));zMax=max(zArray(:));
106 |
107 | %% Set limits
108 | zMin=0;
109 | zMax=1;
110 | %%
111 |
112 | scale=linspace(zMin,zMax,nScale);
113 |
114 | for i=1:nDomain
115 | myContourf(xArray(nSample*(i-1)+1:nSample*i,:),...
116 | yArray(nSample*(i-1)+1:nSample*i,:),...
117 | zArray(nSample*(i-1)+1:nSample*i,:),scale)
118 | end
119 |
120 | xlim([xMin,xMax])
121 | ylim([yMin,yMax])
122 | % Colorbar limits
123 | caxis([zMin,zMax])
124 |
125 | boundaryCoord=bCoord;
126 |
127 | %% Plot the boundary.
128 | for i=1:length(boundaryNumber)
129 | coord=boundaryCoord(1:boundaryNumber(i),:);
130 | boundaryCoord=boundaryCoord(boundaryNumber(i)+1:end,:);
131 |
132 | plot(coord(:,1),coord(:,2),'k','LineWidth',1.5)
133 | if coord(1,1)~=coord(end,1) || coord(1,2)~=coord(end,2)
134 | plot(coord([1,end],1),coord([1,end],2),'k','LineWidth',1.5)
135 | end
136 | end
137 |
138 | clear plotData xArray yArray zArray scale nDomain xMin xMax yMin yMax zMin zMax...
139 | i fid coord
140 |
141 | %% Some functions used:
142 | function myContourf(x,y,z,scale)
143 | %Used in visualization
144 | contourf(x,y,z,scale,'LineStyle','none');
145 | set(gca,'ticklabelinterpreter','latex','fontsize',11)
146 | colormap(jet);
147 | colorbar('ticklabelinterpreter','latex')
148 | end
--------------------------------------------------------------------------------
/10dpoisson-autograd.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR, MultiplicativeLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 | from areaVolume import areaVolume
10 |
11 | # Network structure
12 | class RitzNet(torch.nn.Module):
13 | def __init__(self, params):
14 | super(RitzNet, self).__init__()
15 | self.params = params
16 | # self.linearIn = nn.Linear(self.params["d"], self.params["width"])
17 | self.linear = nn.ModuleList()
18 | for _ in range(params["depth"]):
19 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
20 |
21 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
22 |
23 | def forward(self, x):
24 | # x = F.softplus(self.linearIn(x)) # Match dimension
25 | for layer in self.linear:
26 | x_temp = F.softplus(layer(x))
27 | x = x_temp+x
28 |
29 | return self.linearOut(x)
30 |
31 | def initWeights(m):
32 | if type(m) == nn.Linear:
33 | torch.nn.init.xavier_normal_(m.weight)
34 | torch.nn.init.zeros_(m.bias)
35 |
36 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
37 | model.train()
38 | file = open("lossData.txt","w")
39 |
40 | for step in range(params["preStep"]):
41 | # The volume integral
42 | data = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
43 |
44 | output = model(data)
45 |
46 | target = fun(params["radius"],data)
47 |
48 | loss = output-target
49 | loss = torch.mean(loss*loss)
50 |
51 | if step%params["writeStep"] == params["writeStep"]-1:
52 | with torch.no_grad():
53 | ref = exact(params["radius"],data)
54 | error = errorFun(output,ref,params)
55 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
56 | print("Error at Step %s is %s."%(step+1,error))
57 | file.write(str(step+1)+" "+str(error)+"\n")
58 |
59 | model.zero_grad()
60 | loss.backward()
61 |
62 | # Update the weights.
63 | preOptimizer.step()
64 | # preScheduler.step()
65 |
66 | def train(model,device,params,optimizer,scheduler):
67 | model.train()
68 |
69 | data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
70 | data1.requires_grad = True
71 | data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device)
72 |
73 | for step in range(params["trainStep"]-params["preStep"]):
74 | output1 = model(data1)
75 |
76 | model.zero_grad()
77 |
78 | dfdx = torch.autograd.grad(output1,data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0]
79 | # Loss function 1
80 | fTerm = ffun(data1).to(device)
81 | loss1 = torch.mean(0.5*torch.sum(dfdx*dfdx,1).unsqueeze(1)-fTerm*output1)
82 |
83 | # Loss function 2
84 | output2 = model(data2)
85 | target2 = exact(params["radius"],data2)
86 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] *params["area"])
87 | loss = loss1+loss2
88 |
89 | if step%params["writeStep"] == params["writeStep"]-1:
90 | with torch.no_grad():
91 | target = exact(params["radius"],data1)
92 | error = errorFun(output1,target,params)
93 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
94 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
95 | file = open("lossData.txt","a")
96 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
97 |
98 | if step%params["sampleStep"] == params["sampleStep"]-1:
99 | data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
100 | data1.requires_grad = True
101 | data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device)
102 |
103 | if 10*(step+1)%params["trainStep"] == 0:
104 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
105 |
106 | loss.backward()
107 |
108 | optimizer.step()
109 | scheduler.step()
110 |
111 | def errorFun(output,target,params):
112 | error = output-target
113 | error = math.sqrt(torch.mean(error*error))
114 | # Calculate the L2 norm error.
115 | ref = math.sqrt(torch.mean(target*target))
116 | return error/ref
117 |
118 | def test(model,device,params):
119 | numQuad = params["numQuad"]
120 |
121 | data = torch.from_numpy(generateData.sampleFromDisk10(1,numQuad)).float().to(device)
122 | output = model(data)
123 | target = exact(params["radius"],data).to(device)
124 |
125 | error = output-target
126 | error = math.sqrt(torch.mean(error*error))
127 | # Calculate the L2 norm error.
128 | ref = math.sqrt(torch.mean(target*target))
129 | return error/ref
130 |
131 | def ffun(data):
132 | # f = 0
133 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
134 | # f = 20
135 | # return 20.0*torch.ones([data.shape[0],1],dtype=torch.float)
136 |
137 | def exact(r,data):
138 | # f = 20 ==> u = r^2-x^2-y^2-...
139 | # output = r**2-torch.sum(data*data,dim=1)
140 | # f = 0 ==> u = x1x2+x3x4+x5x6+...
141 | output = data[:,0]*data[:,1] + data[:,2]*data[:,3] + data[:,4]*data[:,5] + \
142 | data[:,6]*data[:,7] + data[:,8]*data[:,9]
143 | return output.unsqueeze(1)
144 |
145 | def rough(r,data):
146 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
147 | output = torch.zeros(data.shape[0],dtype=torch.float)
148 | return output.unsqueeze(1)
149 |
150 | def count_parameters(model):
151 | return sum(p.numel() for p in model.parameters()) # if p.requires_grad
152 |
153 | def main():
154 | # Parameters
155 | torch.manual_seed(21)
156 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
157 |
158 | params = dict()
159 | params["radius"] = 1
160 | params["d"] = 10 # 10D
161 | params["dd"] = 1 # Scalar field
162 | params["bodyBatch"] = 1024 # Batch size
163 | params["bdryBatch"] = 2048 # Batch size for the boundary integral
164 | params["lr"] = 0.016 # Learning rate
165 | params["preLr"] = params["lr"] # Learning rate (Pre-training)
166 | params["width"] = 10 # Width of layers
167 | params["depth"] = 4 # Depth of the network: depth+2
168 | params["numQuad"] = 40000 # Number of quadrature points for testing
169 | params["trainStep"] = 50000
170 | params["penalty"] = 500
171 | params["preStep"] = 0
172 | params["writeStep"] = 50
173 | params["sampleStep"] = 10
174 | params["area"] = areaVolume(params["radius"],params["d"])
175 | params["step_size"] = 5000
176 | params["milestone"] = [5000,10000,20000,35000,48000]
177 | params["gamma"] = 0.5
178 | params["decay"] = 0.0001
179 |
180 | startTime = time.time()
181 | model = RitzNet(params).to(device)
182 | # model.apply(initWeights)
183 | print("Generating network costs %s seconds."%(time.time()-startTime))
184 |
185 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
186 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
187 | # scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
188 | scheduler = MultiStepLR(optimizer,milestones=params["milestone"],gamma=params["gamma"])
189 | # schedulerFun = lambda epoch: ((epoch+100)/(epoch+101))
190 | # scheduler = MultiplicativeLR(optimizer,lr_lambda=schedulerFun)
191 |
192 | startTime = time.time()
193 | preTrain(model,device,params,preOptimizer,None,rough)
194 | train(model,device,params,optimizer,scheduler)
195 | print("Training costs %s seconds."%(time.time()-startTime))
196 |
197 | model.eval()
198 | testError = test(model,device,params)
199 | print("The test error (of the last model) is %s."%testError)
200 | print("The number of parameters is %s,"%count_parameters(model))
201 |
202 | torch.save(model.state_dict(),"last_model.pt")
203 |
204 | if __name__=="__main__":
205 | main()
--------------------------------------------------------------------------------
/2dpoisson-hole-autograd.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 |
10 | # Network structure
11 | class RitzNet(torch.nn.Module):
12 | def __init__(self, params):
13 | super(RitzNet, self).__init__()
14 | self.params = params
15 | self.linearIn = nn.Linear(self.params["d"], self.params["width"])
16 | self.linear = nn.ModuleList()
17 | for _ in range(params["depth"]):
18 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
19 |
20 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
21 |
22 | def forward(self, x):
23 | x = torch.tanh(self.linearIn(x)) # Match dimension
24 | for layer in self.linear:
25 | x_temp = torch.tanh(layer(x))
26 | x = x_temp
27 |
28 | return self.linearOut(x)
29 |
30 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
31 | model.train()
32 | file = open("lossData.txt","w")
33 |
34 | for step in range(params["preStep"]):
35 | # The volume integral
36 | data = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
37 |
38 | output = model(data)
39 |
40 | target = fun(params["radius"],data)
41 |
42 | loss = output-target
43 | loss = torch.mean(loss*loss)*math.pi*params["radius"]**2
44 |
45 | if step%params["writeStep"] == params["writeStep"]-1:
46 | with torch.no_grad():
47 | ref = exact(data)
48 | error = errorFun(output,ref,params)
49 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
50 | print("Error at Step %s is %s."%(step+1,error))
51 | file.write(str(step+1)+" "+str(error)+"\n")
52 |
53 | model.zero_grad()
54 | loss.backward()
55 |
56 | # Update the weights.
57 | preOptimizer.step()
58 | # preScheduler.step()
59 |
60 | def train(model,device,params,optimizer,scheduler):
61 | ratio = (4*2.0+2*math.pi*0.3)/(2.0*2.0-math.pi*0.3**2)
62 | model.train()
63 |
64 | data1 = torch.from_numpy(generateData.sampleFromDomain(params["bodyBatch"])).float().to(device)
65 | data1.requires_grad = True
66 | data2 = torch.from_numpy(generateData.sampleFromBoundary(params["bdryBatch"])).float().to(device)
67 |
68 | for step in range(params["trainStep"]-params["preStep"]):
69 | output1 = model(data1)
70 |
71 | model.zero_grad()
72 |
73 | dfdx = torch.autograd.grad(output1,data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0]
74 | # Loss function 1
75 | fTerm = ffun(data1).to(device)
76 | loss1 = torch.mean(0.5*torch.sum(dfdx*dfdx,1).unsqueeze(1)-fTerm*output1)
77 |
78 | # Loss function 2
79 | output2 = model(data2)
80 | target2 = exact(data2)
81 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] * ratio)
82 | loss = loss1+loss2
83 |
84 | if step%params["writeStep"] == params["writeStep"]-1:
85 | with torch.no_grad():
86 | target = exact(data1)
87 | error = errorFun(output1,target,params)
88 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
89 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
90 | file = open("lossData.txt","a")
91 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
92 |
93 | if step%params["sampleStep"] == params["sampleStep"]-1:
94 | data1 = torch.from_numpy(generateData.sampleFromDomain(params["bodyBatch"])).float().to(device)
95 | data1.requires_grad = True
96 | data2 = torch.from_numpy(generateData.sampleFromBoundary(params["bdryBatch"])).float().to(device)
97 |
98 | if 10*(step+1)%params["trainStep"] == 0:
99 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
100 |
101 | loss.backward()
102 |
103 | optimizer.step()
104 | scheduler.step()
105 |
106 | def errorFun(output,target,params):
107 | error = output-target
108 | error = math.sqrt(torch.mean(error*error))
109 | # Calculate the L2 norm error.
110 | ref = math.sqrt(torch.mean(target*target))
111 | return error/ref
112 |
113 | def test(model,device,params):
114 | numQuad = params["numQuad"]
115 |
116 | data = torch.from_numpy(generateData.sampleFromDomain(numQuad)).float().to(device)
117 | output = model(data)
118 | target = exact(data).to(device)
119 |
120 | error = output-target
121 | error = math.sqrt(torch.mean(error*error))
122 | # Calculate the L2 norm error.
123 | ref = math.sqrt(torch.mean(target*target))
124 | return error/ref
125 |
126 | def ffun(data):
127 | # f = 0.0
128 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
129 |
130 | def exact(data):
131 | # f = 0 ==> u = xy
132 | output = data[:,0]*data[:,1]
133 |
134 | return output.unsqueeze(1)
135 |
136 | def count_parameters(model):
137 | return sum(p.numel() for p in model.parameters())
138 |
139 | # def rough(r,data):
140 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
141 | # return output.unsqueeze(1)
142 |
143 | def main():
144 | # Parameters
145 | # torch.manual_seed(21)
146 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
147 |
148 | params = dict()
149 | params["d"] = 2 # 2D
150 | params["dd"] = 1 # Scalar field
151 | params["bodyBatch"] = 1024 # Batch size
152 | params["bdryBatch"] = 1024 # Batch size for the boundary integral
153 | params["lr"] = 0.01 # Learning rate
154 | params["preLr"] = 0.01 # Learning rate (Pre-training)
155 | params["width"] = 8 # Width of layers
156 | params["depth"] = 2 # Depth of the network: depth+2
157 | params["numQuad"] = 40000 # Number of quadrature points for testing
158 | params["trainStep"] = 50000
159 | params["penalty"] = 500
160 | params["preStep"] = 0
161 | params["writeStep"] = 50
162 | params["sampleStep"] = 10
163 | params["step_size"] = 5000
164 | params["gamma"] = 0.5
165 | params["decay"] = 0.00001
166 |
167 | startTime = time.time()
168 | model = RitzNet(params).to(device)
169 | print("Generating network costs %s seconds."%(time.time()-startTime))
170 |
171 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
172 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
173 | scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
174 |
175 | startTime = time.time()
176 | preTrain(model,device,params,preOptimizer,None,exact)
177 | train(model,device,params,optimizer,scheduler)
178 | print("Training costs %s seconds."%(time.time()-startTime))
179 |
180 | model.eval()
181 | testError = test(model,device,params)
182 | print("The test error (of the last model) is %s."%testError)
183 | print("The number of parameters is %s,"%count_parameters(model))
184 |
185 | torch.save(model.state_dict(),"last_model.pt")
186 |
187 | pltResult(model,device,500,params)
188 |
189 | def pltResult(model,device,nSample,params):
190 | xList = np.linspace(-1,1,nSample)
191 | yList = np.linspace(-1,1,nSample)
192 | thetaList = np.linspace(0,2*math.pi,50)
193 |
194 | xx = np.zeros([nSample,nSample])
195 | yy = np.zeros([nSample,nSample])
196 | zz = np.zeros([nSample,nSample])
197 | for i in range(nSample):
198 | for j in range(nSample):
199 | xx[i,j] = xList[i]
200 | yy[i,j] = yList[j]
201 | coord = np.array([xx[i,j],yy[i,j]])
202 | zz[i,j] = model(torch.from_numpy(coord).float().to(device)).item()
203 | # zz[i,j] = xx[i,j]*yy[i,j] # Plot the exact solution.
204 | if np.linalg.norm(coord-np.array([0.3,0.0]))<0.3:
205 | zz[i,j] = "NaN"
206 |
207 | file = open("nSample.txt","w")
208 | file.write(str(nSample))
209 |
210 | file = open("Data.txt","w")
211 | writeSolution.write(xx,yy,zz,nSample,file)
212 |
213 | edgeList2 = [[0.3*math.cos(i)+0.3,0.3*math.sin(i)] for i in thetaList]
214 | edgeList1 = [[-1.0,-1.0],[1.0,-1.0],[1.0,1.0],[-1.0,1.0],[-1.0,-1.0]]
215 | writeSolution.writeBoundary(edgeList1,edgeList2)
216 |
217 | if __name__=="__main__":
218 | main()
--------------------------------------------------------------------------------
/10dpoisson-cube-autograd.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 | from areaVolume import areaVolume
10 |
11 | # Network structure
12 | class RitzNet(torch.nn.Module):
13 | def __init__(self, params):
14 | super(RitzNet, self).__init__()
15 | self.params = params
16 | # self.linearIn = nn.Linear(self.params["d"], self.params["width"])
17 | self.linear = nn.ModuleList()
18 | for _ in range(params["depth"]):
19 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
20 |
21 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
22 |
23 | def forward(self, x):
24 | # x = torch.tanh(self.linearIn(x)) # Match dimension
25 | for i in range(len(self.linear)//2):
26 | x_temp = torch.tanh(self.linear[2*i](x))
27 | x_temp = torch.tanh(self.linear[2*i+1](x_temp))
28 | x = x_temp+x
29 |
30 | return self.linearOut(x)
31 |
32 | def initWeights(m):
33 | if type(m) == nn.Linear:
34 | torch.nn.init.xavier_normal_(m.weight)
35 | torch.nn.init.zeros_(m.bias)
36 |
37 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
38 | model.train()
39 | file = open("lossData.txt","w")
40 |
41 | for step in range(params["preStep"]):
42 | # The volume integral
43 | data = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
44 |
45 | output = model(data)
46 |
47 | target = fun(params["radius"],data)
48 |
49 | loss = output-target
50 | loss = torch.mean(loss*loss)
51 |
52 | if step%params["writeStep"] == params["writeStep"]-1:
53 | with torch.no_grad():
54 | ref = exact(params["radius"],data)
55 | error = errorFun(output,ref,params)
56 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
57 | print("Error at Step %s is %s."%(step+1,error))
58 | file.write(str(step+1)+" "+str(error)+"\n")
59 |
60 | model.zero_grad()
61 | loss.backward()
62 |
63 | # Update the weights.
64 | preOptimizer.step()
65 | # preScheduler.step()
66 |
67 | def train(model,device,params,optimizer,scheduler):
68 | model.train()
69 |
70 | data1 = torch.rand(params["bodyBatch"],params["d"]).float().to(device)
71 | data1.requires_grad = True
72 | data2 = torch.rand(2*params["d"]*(params["bdryBatch"]//(2*params["d"])),params["d"]).float().to(device)
73 | temp = params["bdryBatch"]//(2*params["d"])
74 | for i in range(params["d"]):
75 | data2[(2*i+0)*temp:(2*i+1)*temp,i] = 0.0
76 | data2[(2*i+1)*temp:(2*i+2)*temp,i] = 1.0
77 |
78 | for step in range(params["trainStep"]-params["preStep"]):
79 | output1 = model(data1)
80 |
81 | model.zero_grad()
82 |
83 | dfdx = torch.autograd.grad(output1,data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0]
84 | # Loss function 1
85 | fTerm = ffun(data1).to(device)
86 | loss1 = torch.mean(0.5*torch.sum(dfdx*dfdx,1).unsqueeze(1)-fTerm*output1)
87 |
88 | # Loss function 2
89 | output2 = model(data2)
90 | target2 = exact(params["radius"],data2)
91 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] *params["area"])
92 | loss = loss1+loss2
93 |
94 | if step%params["writeStep"] == params["writeStep"]-1:
95 | with torch.no_grad():
96 | target = exact(params["radius"],data1)
97 | error = errorFun(output1,target,params)
98 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
99 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
100 | file = open("lossData.txt","a")
101 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
102 |
103 | if step%params["sampleStep"] == params["sampleStep"]-1:
104 | data1 = torch.rand(params["bodyBatch"],params["d"]).float().to(device)
105 | data1.requires_grad = True
106 | data2 = torch.rand(2*params["d"]*(params["bdryBatch"]//(2*params["d"])),params["d"]).float().to(device)
107 | temp = params["bdryBatch"]//(2*params["d"])
108 | for i in range(params["d"]):
109 | data2[(2*i+0)*temp:(2*i+1)*temp,i] = 0.0
110 | data2[(2*i+1)*temp:(2*i+2)*temp,i] = 1.0
111 |
112 | if 10*(step+1)%params["trainStep"] == 0:
113 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
114 |
115 | loss.backward()
116 |
117 | optimizer.step()
118 | scheduler.step()
119 |
120 | def errorFun(output,target,params):
121 | error = output-target
122 | error = math.sqrt(torch.mean(error*error))
123 | # Calculate the L2 norm error.
124 | ref = math.sqrt(torch.mean(target*target))
125 | return error/ref
126 |
127 | def test(model,device,params):
128 | numQuad = params["numQuad"]
129 |
130 | data = torch.rand(numQuad,10).float().to(device)
131 | output = model(data)
132 | target = exact(params["radius"],data).to(device)
133 |
134 | error = output-target
135 | error = math.sqrt(torch.mean(error*error))
136 | # Calculate the L2 norm error.
137 | ref = math.sqrt(torch.mean(target*target))
138 | return error/ref
139 |
140 | def ffun(data):
141 | # f = 0
142 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
143 | # f = 20
144 | # return 20.0*torch.ones([data.shape[0],1],dtype=torch.float)
145 |
146 | def exact(r,data):
147 | # f = 20 ==> u = r^2-x^2-y^2-...
148 | # output = r**2-torch.sum(data*data,dim=1)
149 | # f = 0 ==> u = x1x2+x3x4+x5x6+...
150 | output = data[:,0]*data[:,1] + data[:,2]*data[:,3] + data[:,4]*data[:,5] + \
151 | data[:,6]*data[:,7] + data[:,8]*data[:,9]
152 | return output.unsqueeze(1)
153 |
154 | def rough(r,data):
155 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
156 | output = torch.zeros(data.shape[0],dtype=torch.float)
157 | return output.unsqueeze(1)
158 |
159 | def count_parameters(model):
160 | return sum(p.numel() for p in model.parameters()) # if p.requires_grad
161 |
162 | def main():
163 | # Parameters
164 | # torch.manual_seed(21)
165 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
166 |
167 | params = dict()
168 | params["radius"] = 1
169 | params["d"] = 10 # 10D
170 | params["dd"] = 1 # Scalar field
171 | params["bodyBatch"] = 1024 # Batch size
172 | params["bdryBatch"] = 2000 # Batch size for the boundary integral
173 | params["lr"] = 0.016 # Learning rate
174 | params["preLr"] = params["lr"] # Learning rate (Pre-training)
175 | params["width"] = 10 # Width of layers
176 | params["depth"] = 4 # Depth of the network: depth+2
177 | params["numQuad"] = 40000 # Number of quadrature points for testing
178 | params["trainStep"] = 50000
179 | params["penalty"] = 500
180 | params["preStep"] = 0
181 | params["writeStep"] = 50
182 | params["sampleStep"] = 10
183 | params["area"] = 20
184 | params["step_size"] = 5000
185 | params["milestone"] = [5000,10000,20000,35000,48000]
186 | params["gamma"] = 0.5
187 | params["decay"] = 0.00001
188 |
189 | startTime = time.time()
190 | model = RitzNet(params).to(device)
191 | model.apply(initWeights)
192 | print("Generating network costs %s seconds."%(time.time()-startTime))
193 |
194 | # torch.seed()
195 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
196 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
197 | # scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
198 | scheduler = MultiStepLR(optimizer,milestones=params["milestone"],gamma=params["gamma"])
199 |
200 | startTime = time.time()
201 | preTrain(model,device,params,preOptimizer,None,rough)
202 | train(model,device,params,optimizer,scheduler)
203 | print("Training costs %s seconds."%(time.time()-startTime))
204 |
205 | model.eval()
206 | testError = test(model,device,params)
207 | print("The test error (of the last model) is %s."%testError)
208 | print("The number of parameters is %s,"%count_parameters(model))
209 |
210 | torch.save(model.state_dict(),"last_model.pt")
211 |
212 | if __name__=="__main__":
213 | main()
--------------------------------------------------------------------------------
/2dpoisson-autograd.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 |
10 | # Network structure
11 | class RitzNet(torch.nn.Module):
12 | def __init__(self, params):
13 | super(RitzNet, self).__init__()
14 | self.params = params
15 | self.linearIn = nn.Linear(self.params["d"], self.params["width"])
16 | self.linear = nn.ModuleList()
17 | for _ in range(params["depth"]):
18 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
19 |
20 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
21 |
22 | def forward(self, x):
23 | x = torch.tanh(self.linearIn(x)) # Match dimension
24 | for layer in self.linear:
25 | x_temp = torch.tanh(layer(x))
26 | x = x_temp
27 |
28 | return self.linearOut(x)
29 |
30 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
31 | model.train()
32 | file = open("lossData.txt","w")
33 |
34 | for step in range(params["preStep"]):
35 | # The volume integral
36 | data = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
37 |
38 | output = model(data)
39 |
40 | target = fun(params["radius"],data)
41 |
42 | loss = output-target
43 | loss = torch.mean(loss*loss)*math.pi*params["radius"]**2
44 |
45 | if step%params["writeStep"] == params["writeStep"]-1:
46 | with torch.no_grad():
47 | ref = exact(params["radius"],data)
48 | error = errorFun(output,ref,params)
49 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
50 | print("Error at Step %s is %s."%(step+1,error))
51 | file.write(str(step+1)+" "+str(error)+"\n")
52 |
53 | model.zero_grad()
54 | loss.backward()
55 |
56 | # Update the weights.
57 | preOptimizer.step()
58 | # preScheduler.step()
59 |
60 | def train(model,device,params,optimizer,scheduler):
61 | model.train()
62 |
63 | data1 = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
64 | data1.requires_grad = True
65 | data2 = torch.from_numpy(generateData.sampleFromSurface(params["radius"],params["bdryBatch"])).float().to(device)
66 |
67 | for step in range(params["trainStep"]-params["preStep"]):
68 | output1 = model(data1)
69 |
70 | model.zero_grad()
71 |
72 | dfdx = torch.autograd.grad(output1,data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0]
73 | # Loss function 1
74 | fTerm = ffun(data1).to(device)
75 | loss1 = torch.mean(0.5*torch.sum(dfdx*dfdx,1).unsqueeze(1)-fTerm*output1) * math.pi*params["radius"]**2
76 |
77 | # Loss function 2
78 | output2 = model(data2)
79 | target2 = exact(params["radius"],data2)
80 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] * 2*math.pi*params["radius"])
81 | loss = loss1+loss2
82 |
83 | if step%params["writeStep"] == params["writeStep"]-1:
84 | with torch.no_grad():
85 | target = exact(params["radius"],data1)
86 | error = errorFun(output1,target,params)
87 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
88 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
89 | file = open("lossData.txt","a")
90 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
91 |
92 | if step%params["sampleStep"] == params["sampleStep"]-1:
93 | data1 = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
94 | data1.requires_grad = True
95 | data2 = torch.from_numpy(generateData.sampleFromSurface(params["radius"],params["bdryBatch"])).float().to(device)
96 |
97 | if 10*(step+1)%params["trainStep"] == 0:
98 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
99 |
100 | loss.backward()
101 |
102 | optimizer.step()
103 | scheduler.step()
104 |
105 | def errorFun(output,target,params):
106 | error = output-target
107 | error = math.sqrt(torch.mean(error*error)*math.pi*params["radius"]**2)
108 | # Calculate the L2 norm error.
109 | ref = math.sqrt(torch.mean(target*target)*math.pi*params["radius"]**2)
110 | return error/ref
111 |
112 | def test(model,device,params):
113 | numQuad = params["numQuad"]
114 |
115 | data = torch.from_numpy(generateData.sampleFromDisk(1,numQuad)).float().to(device)
116 | output = model(data)
117 | target = exact(params["radius"],data).to(device)
118 |
119 | error = output-target
120 | error = math.sqrt(torch.mean(error*error)*math.pi*params["radius"]**2)
121 | # Calculate the L2 norm error.
122 | ref = math.sqrt(torch.mean(target*target)*math.pi*params["radius"]**2)
123 | return error/ref
124 |
125 | def ffun(data):
126 | # f = 4
127 | return 4.0*torch.ones([data.shape[0],1],dtype=torch.float)
128 |
129 | def exact(r,data):
130 | # f = 4 ==> u = r^2-x^2-y^2
131 | output = r**2-torch.sum(data*data,dim=1)
132 |
133 | return output.unsqueeze(1)
134 |
135 | def rough(r,data):
136 | # A rough guess
137 | output = r**2-r*torch.sum(data*data,dim=1)**0.5
138 | return output.unsqueeze(1)
139 |
140 | def count_parameters(model):
141 | return sum(p.numel() for p in model.parameters())
142 |
143 | def main():
144 | # Parameters
145 | # torch.manual_seed(21)
146 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
147 |
148 | params = dict()
149 | params["radius"] = 1
150 | params["d"] = 2 # 2D
151 | params["dd"] = 1 # Scalar field
152 | params["bodyBatch"] = 1024 # Batch size
153 | params["bdryBatch"] = 1024 # Batch size for the boundary integral
154 | params["lr"] = 0.01 # Learning rate
155 | params["preLr"] = 0.01 # Learning rate (Pre-training)
156 | params["width"] = 8 # Width of layers
157 | params["depth"] = 2 # Depth of the network: depth+2
158 | params["numQuad"] = 40000 # Number of quadrature points for testing
159 | params["trainStep"] = 50000
160 | params["penalty"] = 500
161 | params["preStep"] = 0
162 | params["writeStep"] = 50
163 | params["sampleStep"] = 10
164 | params["step_size"] = 5000
165 | params["gamma"] = 0.5
166 | params["decay"] = 0.00001
167 |
168 | startTime = time.time()
169 | model = RitzNet(params).to(device)
170 | print("Generating network costs %s seconds."%(time.time()-startTime))
171 |
172 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
173 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
174 | scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
175 |
176 | startTime = time.time()
177 | preTrain(model,device,params,preOptimizer,None,rough)
178 | train(model,device,params,optimizer,scheduler)
179 | print("Training costs %s seconds."%(time.time()-startTime))
180 |
181 | model.eval()
182 | testError = test(model,device,params)
183 | print("The test error (of the last model) is %s."%testError)
184 | print("The number of parameters is %s,"%count_parameters(model))
185 |
186 | torch.save(model.state_dict(),"last_model.pt")
187 |
188 | pltResult(model,device,100,params)
189 |
190 | def pltResult(model,device,nSample,params):
191 | rList = np.linspace(0,params["radius"],nSample)
192 | thetaList = np.linspace(0,math.pi*2,nSample)
193 |
194 | xx = np.zeros([nSample,nSample])
195 | yy = np.zeros([nSample,nSample])
196 | zz = np.zeros([nSample,nSample])
197 | for i in range(nSample):
198 | for j in range(nSample):
199 | xx[i,j] = rList[i]*math.cos(thetaList[j])
200 | yy[i,j] = rList[i]*math.sin(thetaList[j])
201 | coord = np.array([xx[i,j],yy[i,j]])
202 | zz[i,j] = model(torch.from_numpy(coord).float().to(device)).item()
203 | # zz[i,j] = params["radius"]**2-xx[i,j]**2-yy[i,j]**2 # Plot the exact solution.
204 |
205 | file = open("nSample.txt","w")
206 | file.write(str(nSample))
207 |
208 | file = open("Data.txt","w")
209 | writeSolution.write(xx,yy,zz,nSample,file)
210 |
211 | edgeList = [[params["radius"]*math.cos(i),params["radius"]*math.sin(i)] for i in thetaList]
212 | writeSolution.writeBoundary(edgeList)
213 |
214 | if __name__=="__main__":
215 | main()
--------------------------------------------------------------------------------
/2dpoisson-hole-ls-autograd.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 |
10 | # Network structure
11 | class RitzNet(torch.nn.Module):
12 | def __init__(self, params):
13 | super(RitzNet, self).__init__()
14 | self.params = params
15 | self.linearIn = nn.Linear(self.params["d"], self.params["width"])
16 | self.linear = nn.ModuleList()
17 | for _ in range(params["depth"]):
18 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
19 |
20 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
21 |
22 | def forward(self, x):
23 | x = torch.tanh(self.linearIn(x)) # Match dimension
24 | for layer in self.linear:
25 | x_temp = torch.tanh(layer(x))
26 | x = x_temp
27 |
28 | return self.linearOut(x)
29 |
30 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
31 | model.train()
32 | file = open("lossData.txt","w")
33 |
34 | for step in range(params["preStep"]):
35 | # The volume integral
36 | data = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
37 |
38 | output = model(data)
39 |
40 | target = fun(params["radius"],data)
41 |
42 | loss = output-target
43 | loss = torch.mean(loss*loss)*math.pi*params["radius"]**2
44 |
45 | if step%params["writeStep"] == params["writeStep"]-1:
46 | with torch.no_grad():
47 | ref = exact(data)
48 | error = errorFun(output,ref,params)
49 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
50 | print("Error at Step %s is %s."%(step+1,error))
51 | file.write(str(step+1)+" "+str(error)+"\n")
52 |
53 | model.zero_grad()
54 | loss.backward()
55 |
56 | # Update the weights.
57 | preOptimizer.step()
58 | # preScheduler.step()
59 |
60 | def train(model,device,params,optimizer,scheduler):
61 | ratio = (4*2.0+2*math.pi*0.3)/(2.0*2.0-math.pi*0.3**2)
62 | model.train()
63 |
64 | data1 = torch.from_numpy(generateData.sampleFromDomain(params["bodyBatch"])).float().to(device)
65 | data1.requires_grad = True
66 | data2 = torch.from_numpy(generateData.sampleFromBoundary(params["bdryBatch"])).float().to(device)
67 |
68 | for step in range(params["trainStep"]-params["preStep"]):
69 | output1 = model(data1)
70 |
71 | model.zero_grad()
72 |
73 | dfdx = torch.autograd.grad(output1,data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0]
74 | dfdxx = torch.autograd.grad(dfdx[:,0].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,0].unsqueeze(1)
75 | dfdyy = torch.autograd.grad(dfdx[:,1].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,1].unsqueeze(1)
76 | # Loss function 1
77 | fTerm = ffun(data1).to(device)
78 | loss1 = torch.mean((dfdxx+dfdyy+fTerm)*(dfdxx+dfdyy+fTerm))
79 |
80 | # Loss function 2
81 | output2 = model(data2)
82 | target2 = exact(data2)
83 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] * ratio)
84 | loss = loss1+loss2
85 |
86 | if step%params["writeStep"] == params["writeStep"]-1:
87 | with torch.no_grad():
88 | target = exact(data1)
89 | error = errorFun(output1,target,params)
90 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
91 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
92 | file = open("lossData.txt","a")
93 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
94 |
95 | if step%params["sampleStep"] == params["sampleStep"]-1:
96 | data1 = torch.from_numpy(generateData.sampleFromDomain(params["bodyBatch"])).float().to(device)
97 | data1.requires_grad = True
98 | data2 = torch.from_numpy(generateData.sampleFromBoundary(params["bdryBatch"])).float().to(device)
99 |
100 | if 10*(step+1)%params["trainStep"] == 0:
101 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
102 |
103 | loss.backward()
104 |
105 | optimizer.step()
106 | scheduler.step()
107 |
108 | def errorFun(output,target,params):
109 | error = output-target
110 | error = math.sqrt(torch.mean(error*error))
111 | # Calculate the L2 norm error.
112 | ref = math.sqrt(torch.mean(target*target))
113 | return error/ref
114 |
115 | def test(model,device,params):
116 | numQuad = params["numQuad"]
117 |
118 | data = torch.from_numpy(generateData.sampleFromDomain(numQuad)).float().to(device)
119 | output = model(data)
120 | target = exact(data).to(device)
121 |
122 | error = output-target
123 | error = math.sqrt(torch.mean(error*error))
124 | # Calculate the L2 norm error.
125 | ref = math.sqrt(torch.mean(target*target))
126 | return error/ref
127 |
128 | def ffun(data):
129 | # f = 0.0
130 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
131 |
132 | def exact(data):
133 | # f = 0 ==> u = xy
134 | output = data[:,0]*data[:,1]
135 |
136 | return output.unsqueeze(1)
137 |
138 | def count_parameters(model):
139 | return sum(p.numel() for p in model.parameters())
140 |
141 | # def rough(r,data):
142 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
143 | # return output.unsqueeze(1)
144 |
145 | def main():
146 | # Parameters
147 | # torch.manual_seed(21)
148 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
149 |
150 | params = dict()
151 | params["d"] = 2 # 2D
152 | params["dd"] = 1 # Scalar field
153 | params["bodyBatch"] = 1024 # Batch size
154 | params["bdryBatch"] = 1024 # Batch size for the boundary integral
155 | params["lr"] = 0.01 # Learning rate
156 | params["preLr"] = 0.01 # Learning rate (Pre-training)
157 | params["width"] = 8 # Width of layers
158 | params["depth"] = 2 # Depth of the network: depth+2
159 | params["numQuad"] = 40000 # Number of quadrature points for testing
160 | params["trainStep"] = 50000
161 | params["penalty"] = 500
162 | params["preStep"] = 0
163 | params["writeStep"] = 50
164 | params["sampleStep"] = 10
165 | params["step_size"] = 5000
166 | params["gamma"] = 0.5
167 | params["decay"] = 0.00001
168 |
169 | startTime = time.time()
170 | model = RitzNet(params).to(device)
171 | print("Generating network costs %s seconds."%(time.time()-startTime))
172 |
173 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
174 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
175 | scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
176 |
177 | startTime = time.time()
178 | preTrain(model,device,params,preOptimizer,None,exact)
179 | train(model,device,params,optimizer,scheduler)
180 | print("Training costs %s seconds."%(time.time()-startTime))
181 |
182 | model.eval()
183 | testError = test(model,device,params)
184 | print("The test error (of the last model) is %s."%testError)
185 | print("The number of parameters is %s,"%count_parameters(model))
186 |
187 | torch.save(model.state_dict(),"last_model.pt")
188 |
189 | pltResult(model,device,500,params)
190 |
191 | def pltResult(model,device,nSample,params):
192 | xList = np.linspace(-1,1,nSample)
193 | yList = np.linspace(-1,1,nSample)
194 | thetaList = np.linspace(0,2*math.pi,50)
195 |
196 | xx = np.zeros([nSample,nSample])
197 | yy = np.zeros([nSample,nSample])
198 | zz = np.zeros([nSample,nSample])
199 | for i in range(nSample):
200 | for j in range(nSample):
201 | xx[i,j] = xList[i]
202 | yy[i,j] = yList[j]
203 | coord = np.array([xx[i,j],yy[i,j]])
204 | zz[i,j] = model(torch.from_numpy(coord).float().to(device)).item()
205 | # zz[i,j] = xx[i,j]*yy[i,j] # Plot the exact solution.
206 | if np.linalg.norm(coord-np.array([0.3,0.0]))<0.3:
207 | zz[i,j] = "NaN"
208 |
209 | file = open("nSample.txt","w")
210 | file.write(str(nSample))
211 |
212 | file = open("Data.txt","w")
213 | writeSolution.write(xx,yy,zz,nSample,file)
214 |
215 | edgeList2 = [[0.3*math.cos(i)+0.3,0.3*math.sin(i)] for i in thetaList]
216 | edgeList1 = [[-1.0,-1.0],[1.0,-1.0],[1.0,1.0],[-1.0,1.0],[-1.0,-1.0]]
217 | writeSolution.writeBoundary(edgeList1,edgeList2)
218 |
219 | if __name__=="__main__":
220 | main()
--------------------------------------------------------------------------------
/2dpoisson-hole.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 |
10 | # Network structure
11 | class RitzNet(torch.nn.Module):
12 | def __init__(self, params):
13 | super(RitzNet, self).__init__()
14 | self.params = params
15 | self.linearIn = nn.Linear(self.params["d"], self.params["width"])
16 | self.linear = nn.ModuleList()
17 | for _ in range(params["depth"]):
18 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
19 |
20 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
21 |
22 | def forward(self, x):
23 | x = torch.tanh(self.linearIn(x)) # Match dimension
24 | for layer in self.linear:
25 | x_temp = torch.tanh(layer(x))
26 | x = x_temp
27 |
28 | return self.linearOut(x)
29 |
30 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
31 | model.train()
32 | file = open("lossData.txt","w")
33 |
34 | for step in range(params["preStep"]):
35 | # The volume integral
36 | data = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
37 |
38 | output = model(data)
39 |
40 | target = fun(params["radius"],data)
41 |
42 | loss = output-target
43 | loss = torch.mean(loss*loss)*math.pi*params["radius"]**2
44 |
45 | if step%params["writeStep"] == params["writeStep"]-1:
46 | with torch.no_grad():
47 | ref = exact(data)
48 | error = errorFun(output,ref,params)
49 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
50 | print("Error at Step %s is %s."%(step+1,error))
51 | file.write(str(step+1)+" "+str(error)+"\n")
52 |
53 | model.zero_grad()
54 | loss.backward()
55 |
56 | # Update the weights.
57 | preOptimizer.step()
58 | # preScheduler.step()
59 |
60 | def train(model,device,params,optimizer,scheduler):
61 | ratio = (4*2.0+2*math.pi*0.3)/(2.0*2.0-math.pi*0.3**2)
62 | model.train()
63 |
64 | data1 = torch.from_numpy(generateData.sampleFromDomain(params["bodyBatch"])).float().to(device)
65 | data2 = torch.from_numpy(generateData.sampleFromBoundary(params["bdryBatch"])).float().to(device)
66 | x_shift = torch.from_numpy(np.array([params["diff"],0.0])).float().to(device)
67 | y_shift = torch.from_numpy(np.array([0.0,params["diff"]])).float().to(device)
68 | data1_x_shift = data1+x_shift
69 | data1_y_shift = data1+y_shift
70 |
71 | for step in range(params["trainStep"]-params["preStep"]):
72 | output1 = model(data1)
73 | output1_x_shift = model(data1_x_shift)
74 | output1_y_shift = model(data1_y_shift)
75 |
76 | dfdx = (output1_x_shift-output1)/params["diff"] # Use difference to approximate derivatives.
77 | dfdy = (output1_y_shift-output1)/params["diff"]
78 |
79 | model.zero_grad()
80 |
81 | # Loss function 1
82 | fTerm = ffun(data1).to(device)
83 | loss1 = torch.mean(0.5*(dfdx*dfdx+dfdy*dfdy)-fTerm*output1)
84 |
85 | # Loss function 2
86 | output2 = model(data2)
87 | target2 = exact(data2)
88 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] * ratio)
89 | loss = loss1+loss2
90 |
91 | if step%params["writeStep"] == params["writeStep"]-1:
92 | with torch.no_grad():
93 | target = exact(data1)
94 | error = errorFun(output1,target,params)
95 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
96 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
97 | file = open("lossData.txt","a")
98 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
99 |
100 | if step%params["sampleStep"] == params["sampleStep"]-1:
101 | data1 = torch.from_numpy(generateData.sampleFromDomain(params["bodyBatch"])).float().to(device)
102 | data2 = torch.from_numpy(generateData.sampleFromBoundary(params["bdryBatch"])).float().to(device)
103 |
104 | data1_x_shift = data1+x_shift
105 | data1_y_shift = data1+y_shift
106 |
107 | if 10*(step+1)%params["trainStep"] == 0:
108 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
109 |
110 | loss.backward()
111 |
112 | optimizer.step()
113 | scheduler.step()
114 |
115 | def errorFun(output,target,params):
116 | error = output-target
117 | error = math.sqrt(torch.mean(error*error))
118 | # Calculate the L2 norm error.
119 | ref = math.sqrt(torch.mean(target*target))
120 | return error/ref
121 |
122 | def test(model,device,params):
123 | numQuad = params["numQuad"]
124 |
125 | data = torch.from_numpy(generateData.sampleFromDomain(numQuad)).float().to(device)
126 | output = model(data)
127 | target = exact(data).to(device)
128 |
129 | error = output-target
130 | error = math.sqrt(torch.mean(error*error))
131 | # Calculate the L2 norm error.
132 | ref = math.sqrt(torch.mean(target*target))
133 | return error/ref
134 |
135 | def ffun(data):
136 | # f = 0.0
137 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
138 |
139 | def exact(data):
140 | # f = 0 ==> u = xy
141 | output = data[:,0]*data[:,1]
142 |
143 | return output.unsqueeze(1)
144 |
145 | def count_parameters(model):
146 | return sum(p.numel() for p in model.parameters())
147 |
148 | # def rough(r,data):
149 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
150 | # return output.unsqueeze(1)
151 |
152 | def main():
153 | # Parameters
154 | # torch.manual_seed(21)
155 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
156 |
157 | params = dict()
158 | params["d"] = 2 # 2D
159 | params["dd"] = 1 # Scalar field
160 | params["bodyBatch"] = 1024 # Batch size
161 | params["bdryBatch"] = 1024 # Batch size for the boundary integral
162 | params["lr"] = 0.01 # Learning rate
163 | params["preLr"] = 0.01 # Learning rate (Pre-training)
164 | params["width"] = 8 # Width of layers
165 | params["depth"] = 2 # Depth of the network: depth+2
166 | params["numQuad"] = 40000 # Number of quadrature points for testing
167 | params["trainStep"] = 50000
168 | params["penalty"] = 500
169 | params["preStep"] = 0
170 | params["diff"] = 0.001
171 | params["writeStep"] = 50
172 | params["sampleStep"] = 10
173 | params["step_size"] = 5000
174 | params["gamma"] = 0.3
175 | params["decay"] = 0.00001
176 |
177 | startTime = time.time()
178 | model = RitzNet(params).to(device)
179 | print("Generating network costs %s seconds."%(time.time()-startTime))
180 |
181 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
182 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
183 | scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
184 |
185 | startTime = time.time()
186 | preTrain(model,device,params,preOptimizer,None,exact)
187 | train(model,device,params,optimizer,scheduler)
188 | print("Training costs %s seconds."%(time.time()-startTime))
189 |
190 | model.eval()
191 | testError = test(model,device,params)
192 | print("The test error (of the last model) is %s."%testError)
193 | print("The number of parameters is %s,"%count_parameters(model))
194 |
195 | torch.save(model.state_dict(),"last_model.pt")
196 |
197 | pltResult(model,device,500,params)
198 |
199 | def pltResult(model,device,nSample,params):
200 | xList = np.linspace(-1,1,nSample)
201 | yList = np.linspace(-1,1,nSample)
202 | thetaList = np.linspace(0,2*math.pi,50)
203 |
204 | xx = np.zeros([nSample,nSample])
205 | yy = np.zeros([nSample,nSample])
206 | zz = np.zeros([nSample,nSample])
207 | for i in range(nSample):
208 | for j in range(nSample):
209 | xx[i,j] = xList[i]
210 | yy[i,j] = yList[j]
211 | coord = np.array([xx[i,j],yy[i,j]])
212 | zz[i,j] = model(torch.from_numpy(coord).float().to(device)).item()
213 | # zz[i,j] = xx[i,j]*yy[i,j] # Plot the exact solution.
214 | if np.linalg.norm(coord-np.array([0.3,0.0]))<0.3:
215 | zz[i,j] = "NaN"
216 |
217 | file = open("nSample.txt","w")
218 | file.write(str(nSample))
219 |
220 | file = open("Data.txt","w")
221 | writeSolution.write(xx,yy,zz,nSample,file)
222 |
223 | edgeList2 = [[0.3*math.cos(i)+0.3,0.3*math.sin(i)] for i in thetaList]
224 | edgeList1 = [[-1.0,-1.0],[1.0,-1.0],[1.0,1.0],[-1.0,1.0],[-1.0,-1.0]]
225 | writeSolution.writeBoundary(edgeList1,edgeList2)
226 |
227 | if __name__=="__main__":
228 | main()
--------------------------------------------------------------------------------
/2dpoisson.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 |
10 | # Network structure
11 | class RitzNet(torch.nn.Module):
12 | def __init__(self, params):
13 | super(RitzNet, self).__init__()
14 | self.params = params
15 | self.linearIn = nn.Linear(self.params["d"], self.params["width"])
16 | self.linear = nn.ModuleList()
17 | for _ in range(params["depth"]):
18 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
19 |
20 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
21 |
22 | def forward(self, x):
23 | x = torch.tanh(self.linearIn(x)) # Match dimension
24 | for layer in self.linear:
25 | x_temp = torch.tanh(layer(x))
26 | x = x_temp
27 |
28 | return self.linearOut(x)
29 |
30 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
31 | model.train()
32 | file = open("lossData.txt","w")
33 |
34 | for step in range(params["preStep"]):
35 | # The volume integral
36 | data = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
37 |
38 | output = model(data)
39 |
40 | target = fun(params["radius"],data)
41 |
42 | loss = output-target
43 | loss = torch.mean(loss*loss)*math.pi*params["radius"]**2
44 |
45 | if step%params["writeStep"] == params["writeStep"]-1:
46 | with torch.no_grad():
47 | ref = exact(params["radius"],data)
48 | error = errorFun(output,ref,params)
49 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
50 | print("Error at Step %s is %s."%(step+1,error))
51 | file.write(str(step+1)+" "+str(error)+"\n")
52 |
53 | model.zero_grad()
54 | loss.backward()
55 |
56 | # Update the weights.
57 | preOptimizer.step()
58 | # preScheduler.step()
59 |
60 | def train(model,device,params,optimizer,scheduler):
61 | model.train()
62 |
63 | data1 = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
64 | data2 = torch.from_numpy(generateData.sampleFromSurface(params["radius"],params["bdryBatch"])).float().to(device)
65 |
66 | x_shift = torch.from_numpy(np.array([params["diff"],0.0])).float().to(device)
67 | y_shift = torch.from_numpy(np.array([0.0,params["diff"]])).float().to(device)
68 | data1_x_shift = data1+x_shift
69 | data1_y_shift = data1+y_shift
70 |
71 | for step in range(params["trainStep"]-params["preStep"]):
72 | output1 = model(data1)
73 | output1_x_shift = model(data1_x_shift)
74 | output1_y_shift = model(data1_y_shift)
75 |
76 | dfdx = (output1_x_shift-output1)/params["diff"] # Use difference to approximate derivatives.
77 | dfdy = (output1_y_shift-output1)/params["diff"]
78 |
79 | model.zero_grad()
80 |
81 | # Loss function 1
82 | fTerm = ffun(data1).to(device)
83 | loss1 = torch.mean(0.5*(dfdx*dfdx+dfdy*dfdy)-fTerm*output1) * math.pi*params["radius"]**2
84 |
85 | # Loss function 2
86 | output2 = model(data2)
87 | target2 = exact(params["radius"],data2)
88 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] * 2*math.pi*params["radius"])
89 | loss = loss1+loss2
90 |
91 | if step%params["writeStep"] == params["writeStep"]-1:
92 | with torch.no_grad():
93 | target = exact(params["radius"],data1)
94 | error = errorFun(output1,target,params)
95 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
96 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
97 | file = open("lossData.txt","a")
98 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
99 |
100 | if step%params["sampleStep"] == params["sampleStep"]-1:
101 | data1 = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
102 | data2 = torch.from_numpy(generateData.sampleFromSurface(params["radius"],params["bdryBatch"])).float().to(device)
103 |
104 | data1_x_shift = data1+x_shift
105 | data1_y_shift = data1+y_shift
106 |
107 | if 10*(step+1)%params["trainStep"] == 0:
108 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
109 |
110 | loss.backward()
111 |
112 | optimizer.step()
113 | scheduler.step()
114 |
115 | def errorFun(output,target,params):
116 | error = output-target
117 | error = math.sqrt(torch.mean(error*error)*math.pi*params["radius"]**2)
118 | # Calculate the L2 norm error.
119 | ref = math.sqrt(torch.mean(target*target)*math.pi*params["radius"]**2)
120 | return error/ref
121 |
122 | def test(model,device,params):
123 | numQuad = params["numQuad"]
124 |
125 | data = torch.from_numpy(generateData.sampleFromDisk(1,numQuad)).float().to(device)
126 | output = model(data)
127 | target = exact(params["radius"],data).to(device)
128 |
129 | error = output-target
130 | error = math.sqrt(torch.mean(error*error)*math.pi*params["radius"]**2)
131 | # Calculate the L2 norm error.
132 | ref = math.sqrt(torch.mean(target*target)*math.pi*params["radius"]**2)
133 | return error/ref
134 |
135 | def ffun(data):
136 | # f = 4
137 | return 4.0*torch.ones([data.shape[0],1],dtype=torch.float)
138 |
139 | def exact(r,data):
140 | # f = 4 ==> u = r^2-x^2-y^2
141 | output = r**2-torch.sum(data*data,dim=1)
142 |
143 | return output.unsqueeze(1)
144 |
145 | def rough(r,data):
146 | # A rough guess
147 | output = r**2-r*torch.sum(data*data,dim=1)**0.5
148 | return output.unsqueeze(1)
149 |
150 | def count_parameters(model):
151 | return sum(p.numel() for p in model.parameters())
152 |
153 | def main():
154 | # Parameters
155 | # torch.manual_seed(21)
156 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
157 |
158 | params = dict()
159 | params["radius"] = 1
160 | params["d"] = 2 # 2D
161 | params["dd"] = 1 # Scalar field
162 | params["bodyBatch"] = 1024 # Batch size
163 | params["bdryBatch"] = 1024 # Batch size for the boundary integral
164 | params["lr"] = 0.01 # Learning rate
165 | params["preLr"] = 0.01 # Learning rate (Pre-training)
166 | params["width"] = 8 # Width of layers
167 | params["depth"] = 2 # Depth of the network: depth+2
168 | params["numQuad"] = 40000 # Number of quadrature points for testing
169 | params["trainStep"] = 50000
170 | params["penalty"] = 500
171 | params["preStep"] = 0
172 | params["diff"] = 0.001
173 | params["writeStep"] = 50
174 | params["sampleStep"] = 10
175 | params["step_size"] = 5000
176 | params["gamma"] = 0.3
177 | params["decay"] = 0.00001
178 |
179 | startTime = time.time()
180 | model = RitzNet(params).to(device)
181 | print("Generating network costs %s seconds."%(time.time()-startTime))
182 |
183 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
184 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
185 | scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
186 |
187 | startTime = time.time()
188 | preTrain(model,device,params,preOptimizer,None,rough)
189 | train(model,device,params,optimizer,scheduler)
190 | print("Training costs %s seconds."%(time.time()-startTime))
191 |
192 | model.eval()
193 | testError = test(model,device,params)
194 | print("The test error (of the last model) is %s."%testError)
195 | print("The number of parameters is %s,"%count_parameters(model))
196 |
197 | torch.save(model.state_dict(),"last_model.pt")
198 |
199 | pltResult(model,device,100,params)
200 |
201 | def pltResult(model,device,nSample,params):
202 | rList = np.linspace(0,params["radius"],nSample)
203 | thetaList = np.linspace(0,math.pi*2,nSample)
204 |
205 | xx = np.zeros([nSample,nSample])
206 | yy = np.zeros([nSample,nSample])
207 | zz = np.zeros([nSample,nSample])
208 | for i in range(nSample):
209 | for j in range(nSample):
210 | xx[i,j] = rList[i]*math.cos(thetaList[j])
211 | yy[i,j] = rList[i]*math.sin(thetaList[j])
212 | coord = np.array([xx[i,j],yy[i,j]])
213 | zz[i,j] = model(torch.from_numpy(coord).float().to(device)).item()
214 | # zz[i,j] = params["radius"]**2-xx[i,j]**2-yy[i,j]**2 # Plot the exact solution.
215 |
216 | file = open("nSample.txt","w")
217 | file.write(str(nSample))
218 |
219 | file = open("Data.txt","w")
220 | writeSolution.write(xx,yy,zz,nSample,file)
221 |
222 | edgeList = [[params["radius"]*math.cos(i),params["radius"]*math.sin(i)] for i in thetaList]
223 | writeSolution.writeBoundary(edgeList)
224 |
225 | if __name__=="__main__":
226 | main()
--------------------------------------------------------------------------------
/2dpoisson-ls-autograd.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 |
10 | # Network structure
11 | class RitzNet(torch.nn.Module):
12 | def __init__(self, params):
13 | super(RitzNet, self).__init__()
14 | self.params = params
15 | self.linearIn = nn.Linear(self.params["d"], self.params["width"])
16 | self.linear = nn.ModuleList()
17 | for _ in range(params["depth"]):
18 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
19 |
20 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
21 |
22 | def forward(self, x):
23 | x = torch.tanh(self.linearIn(x)) # Match dimension
24 | for layer in self.linear:
25 | x_temp = torch.tanh(layer(x))
26 | x = x_temp
27 |
28 | return self.linearOut(x)
29 |
30 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
31 | model.train()
32 | file = open("lossData.txt","w")
33 |
34 | for step in range(params["preStep"]):
35 | # The volume integral
36 | data = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
37 |
38 | output = model(data)
39 |
40 | target = fun(params["radius"],data)
41 |
42 | loss = output-target
43 | loss = torch.mean(loss*loss)*math.pi*params["radius"]**2
44 |
45 | if step%params["writeStep"] == params["writeStep"]-1:
46 | with torch.no_grad():
47 | ref = exact(params["radius"],data)
48 | error = errorFun(output,ref,params)
49 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
50 | print("Error at Step %s is %s."%(step+1,error))
51 | file.write(str(step+1)+" "+str(error)+"\n")
52 |
53 | model.zero_grad()
54 | loss.backward()
55 |
56 | # Update the weights.
57 | preOptimizer.step()
58 | # preScheduler.step()
59 |
60 | def train(model,device,params,optimizer,scheduler):
61 | model.train()
62 |
63 | data1 = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
64 | data1.requires_grad = True
65 | data2 = torch.from_numpy(generateData.sampleFromSurface(params["radius"],params["bdryBatch"])).float().to(device)
66 |
67 | for step in range(params["trainStep"]-params["preStep"]):
68 | output1 = model(data1)
69 |
70 | model.zero_grad()
71 |
72 | dfdx = torch.autograd.grad(output1,data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0]
73 | dfdxx = torch.autograd.grad(dfdx[:,0].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,0].unsqueeze(1)
74 | dfdyy = torch.autograd.grad(dfdx[:,1].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,1].unsqueeze(1)
75 | # Loss function 1
76 | fTerm = ffun(data1).to(device)
77 | loss1 = torch.mean((dfdxx+dfdyy+fTerm)*(dfdxx+dfdyy+fTerm)) * math.pi*params["radius"]**2
78 |
79 | # Loss function 2
80 | output2 = model(data2)
81 | target2 = exact(params["radius"],data2)
82 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] * 2*math.pi*params["radius"])
83 | loss = loss1+loss2
84 |
85 | if step%params["writeStep"] == params["writeStep"]-1:
86 | with torch.no_grad():
87 | target = exact(params["radius"],data1)
88 | error = errorFun(output1,target,params)
89 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
90 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
91 | file = open("lossData.txt","a")
92 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
93 |
94 | if step%params["sampleStep"] == params["sampleStep"]-1:
95 | data1 = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
96 | data1.requires_grad = True
97 | data2 = torch.from_numpy(generateData.sampleFromSurface(params["radius"],params["bdryBatch"])).float().to(device)
98 |
99 | if 10*(step+1)%params["trainStep"] == 0:
100 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
101 |
102 | loss.backward()
103 |
104 | optimizer.step()
105 | scheduler.step()
106 |
107 | def errorFun(output,target,params):
108 | error = output-target
109 | error = math.sqrt(torch.mean(error*error)*math.pi*params["radius"]**2)
110 | # Calculate the L2 norm error.
111 | ref = math.sqrt(torch.mean(target*target)*math.pi*params["radius"]**2)
112 | return error/ref
113 |
114 | def test(model,device,params):
115 | numQuad = params["numQuad"]
116 |
117 | data = torch.from_numpy(generateData.sampleFromDisk(1,numQuad)).float().to(device)
118 | output = model(data)
119 | target = exact(params["radius"],data).to(device)
120 |
121 | error = output-target
122 | error = math.sqrt(torch.mean(error*error)*math.pi*params["radius"]**2)
123 | # Calculate the L2 norm error.
124 | ref = math.sqrt(torch.mean(target*target)*math.pi*params["radius"]**2)
125 | return error/ref
126 |
127 | def ffun(data):
128 | # f = 4
129 | return 4.0*torch.ones([data.shape[0],1],dtype=torch.float)
130 | # f = 0
131 | # return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
132 |
133 | def exact(r,data):
134 | # f = 4 ==> u = r^2-x^2-y^2
135 | output = r**2-torch.sum(data*data,dim=1)
136 | # f = 0 ==> u = x1*x2
137 | # output = data[:,0]*data[:,1]
138 |
139 | return output.unsqueeze(1)
140 |
141 | def rough(r,data):
142 | # A rough guess
143 | output = r**2-r*torch.sum(data*data,dim=1)**0.5
144 | # output = torch.zeros(data.shape[0],dtype=torch.float)
145 | return output.unsqueeze(1)
146 |
147 | def count_parameters(model):
148 | return sum(p.numel() for p in model.parameters())
149 |
150 | def main():
151 | # Parameters
152 | # torch.manual_seed(21)
153 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
154 |
155 | params = dict()
156 | params["radius"] = 1
157 | params["d"] = 2 # 2D
158 | params["dd"] = 1 # Scalar field
159 | params["bodyBatch"] = 1024 # Batch size
160 | params["bdryBatch"] = 1024 # Batch size for the boundary integral
161 | params["lr"] = 0.01 # Learning rate
162 | params["preLr"] = 0.01 # Learning rate (Pre-training)
163 | params["width"] = 8 # Width of layers
164 | params["depth"] = 2 # Depth of the network: depth+2
165 | params["numQuad"] = 40000 # Number of quadrature points for testing
166 | params["trainStep"] = 50000
167 | params["penalty"] = 500
168 | params["preStep"] = 0
169 | params["writeStep"] = 50
170 | params["sampleStep"] = 10
171 | params["step_size"] = 5000
172 | params["gamma"] = 0.5
173 | params["decay"] = 0.00001
174 |
175 | startTime = time.time()
176 | model = RitzNet(params).to(device)
177 | print("Generating network costs %s seconds."%(time.time()-startTime))
178 |
179 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
180 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
181 | scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
182 |
183 | startTime = time.time()
184 | preTrain(model,device,params,preOptimizer,None,rough)
185 | train(model,device,params,optimizer,scheduler)
186 | print("Training costs %s seconds."%(time.time()-startTime))
187 |
188 | model.eval()
189 | testError = test(model,device,params)
190 | print("The test error (of the last model) is %s."%testError)
191 | print("The number of parameters is %s,"%count_parameters(model))
192 |
193 | torch.save(model.state_dict(),"last_model.pt")
194 |
195 | pltResult(model,device,100,params)
196 |
197 | def pltResult(model,device,nSample,params):
198 | rList = np.linspace(0,params["radius"],nSample)
199 | thetaList = np.linspace(0,math.pi*2,nSample)
200 |
201 | xx = np.zeros([nSample,nSample])
202 | yy = np.zeros([nSample,nSample])
203 | zz = np.zeros([nSample,nSample])
204 | for i in range(nSample):
205 | for j in range(nSample):
206 | xx[i,j] = rList[i]*math.cos(thetaList[j])
207 | yy[i,j] = rList[i]*math.sin(thetaList[j])
208 | coord = np.array([xx[i,j],yy[i,j]])
209 | zz[i,j] = model(torch.from_numpy(coord).float().to(device)).item()
210 | # zz[i,j] = params["radius"]**2-xx[i,j]**2-yy[i,j]**2 # Plot the exact solution.
211 |
212 | file = open("nSample.txt","w")
213 | file.write(str(nSample))
214 |
215 | file = open("Data.txt","w")
216 | writeSolution.write(xx,yy,zz,nSample,file)
217 |
218 | edgeList = [[params["radius"]*math.cos(i),params["radius"]*math.sin(i)] for i in thetaList]
219 | writeSolution.writeBoundary(edgeList)
220 |
221 | if __name__=="__main__":
222 | main()
--------------------------------------------------------------------------------
/2dpoisson-hole-ls.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 |
10 | # Network structure
11 | class RitzNet(torch.nn.Module):
12 | def __init__(self, params):
13 | super(RitzNet, self).__init__()
14 | self.params = params
15 | self.linearIn = nn.Linear(self.params["d"], self.params["width"])
16 | self.linear = nn.ModuleList()
17 | for _ in range(params["depth"]):
18 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
19 |
20 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
21 |
22 | def forward(self, x):
23 | x = torch.tanh(self.linearIn(x)) # Match dimension
24 | for layer in self.linear:
25 | x_temp = torch.tanh(layer(x))
26 | x = x_temp
27 |
28 | return self.linearOut(x)
29 |
30 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
31 | model.train()
32 | file = open("lossData.txt","w")
33 |
34 | for step in range(params["preStep"]):
35 | # The volume integral
36 | data = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
37 |
38 | output = model(data)
39 |
40 | target = fun(params["radius"],data)
41 |
42 | loss = output-target
43 | loss = torch.mean(loss*loss)*math.pi*params["radius"]**2
44 |
45 | if step%params["writeStep"] == params["writeStep"]-1:
46 | with torch.no_grad():
47 | ref = exact(data)
48 | error = errorFun(output,ref,params)
49 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
50 | print("Error at Step %s is %s."%(step+1,error))
51 | file.write(str(step+1)+" "+str(error)+"\n")
52 |
53 | model.zero_grad()
54 | loss.backward()
55 |
56 | # Update the weights.
57 | preOptimizer.step()
58 | # preScheduler.step()
59 |
60 | def train(model,device,params,optimizer,scheduler):
61 | ratio = (4*2.0+2*math.pi*0.3)/(2.0*2.0-math.pi*0.3**2)
62 | model.train()
63 |
64 | data1 = torch.from_numpy(generateData.sampleFromDomain(params["bodyBatch"])).float().to(device)
65 | data2 = torch.from_numpy(generateData.sampleFromBoundary(params["bdryBatch"])).float().to(device)
66 | x_shift = torch.from_numpy(np.array([params["diff"],0.0])).float().to(device)
67 | y_shift = torch.from_numpy(np.array([0.0,params["diff"]])).float().to(device)
68 | data1_x_shift = data1+x_shift
69 | data1_y_shift = data1+y_shift
70 | data1_x_nshift = data1-x_shift
71 | data1_y_nshift = data1-y_shift
72 |
73 | for step in range(params["trainStep"]-params["preStep"]):
74 | output1 = model(data1)
75 | output1_x_shift = model(data1_x_shift)
76 | output1_y_shift = model(data1_y_shift)
77 | output1_x_nshift = model(data1_x_nshift)
78 | output1_y_nshift = model(data1_y_nshift)
79 |
80 | # Second order difference
81 | dfdx2 = (output1_x_shift+output1_x_nshift-2*output1)/(params["diff"]**2) # Use difference to approximate derivatives.
82 | dfdy2 = (output1_y_shift+output1_y_nshift-2*output1)/(params["diff"]**2)
83 |
84 | model.zero_grad()
85 |
86 | # Loss function 1
87 | fTerm = ffun(data1).to(device)
88 | loss1 = torch.mean((dfdx2+dfdy2+fTerm)*(dfdx2+dfdy2+fTerm))
89 |
90 | # Loss function 2
91 | output2 = model(data2)
92 | target2 = exact(data2)
93 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] * ratio)
94 | loss = loss1+loss2
95 |
96 | if step%params["writeStep"] == params["writeStep"]-1:
97 | with torch.no_grad():
98 | target = exact(data1)
99 | error = errorFun(output1,target,params)
100 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
101 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
102 | file = open("lossData.txt","a")
103 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
104 |
105 | if step%params["sampleStep"] == params["sampleStep"]-1:
106 | data1 = torch.from_numpy(generateData.sampleFromDomain(params["bodyBatch"])).float().to(device)
107 | data2 = torch.from_numpy(generateData.sampleFromBoundary(params["bdryBatch"])).float().to(device)
108 |
109 | data1_x_shift = data1+x_shift
110 | data1_y_shift = data1+y_shift
111 | data1_x_nshift = data1-x_shift
112 | data1_y_nshift = data1-y_shift
113 |
114 | if 10*(step+1)%params["trainStep"] == 0:
115 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
116 |
117 | loss.backward()
118 |
119 | optimizer.step()
120 | scheduler.step()
121 |
122 | def errorFun(output,target,params):
123 | error = output-target
124 | error = math.sqrt(torch.mean(error*error))
125 | # Calculate the L2 norm error.
126 | ref = math.sqrt(torch.mean(target*target))
127 | return error/ref
128 |
129 | def test(model,device,params):
130 | numQuad = params["numQuad"]
131 |
132 | data = torch.from_numpy(generateData.sampleFromDomain(numQuad)).float().to(device)
133 | output = model(data)
134 | target = exact(data).to(device)
135 |
136 | error = output-target
137 | error = math.sqrt(torch.mean(error*error))
138 | # Calculate the L2 norm error.
139 | ref = math.sqrt(torch.mean(target*target))
140 | return error/ref
141 |
142 | def ffun(data):
143 | # f = 0.0
144 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
145 |
146 | def exact(data):
147 | # f = 0 ==> u = xy
148 | output = data[:,0]*data[:,1]
149 |
150 | return output.unsqueeze(1)
151 |
152 | def count_parameters(model):
153 | return sum(p.numel() for p in model.parameters())
154 |
155 | # def rough(r,data):
156 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
157 | # return output.unsqueeze(1)
158 |
159 | def main():
160 | # Parameters
161 | # torch.manual_seed(21)
162 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
163 |
164 | params = dict()
165 | params["d"] = 2 # 2D
166 | params["dd"] = 1 # Scalar field
167 | params["bodyBatch"] = 1024 # Batch size
168 | params["bdryBatch"] = 1024 # Batch size for the boundary integral
169 | params["lr"] = 0.01 # Learning rate
170 | params["preLr"] = 0.01 # Learning rate (Pre-training)
171 | params["width"] = 8 # Width of layers
172 | params["depth"] = 2 # Depth of the network: depth+2
173 | params["numQuad"] = 40000 # Number of quadrature points for testing
174 | params["trainStep"] = 50000
175 | params["penalty"] = 500
176 | params["preStep"] = 0
177 | params["diff"] = 0.001
178 | params["writeStep"] = 50
179 | params["sampleStep"] = 10
180 | params["step_size"] = 5000
181 | params["gamma"] = 0.3
182 | params["decay"] = 0.00001
183 |
184 | startTime = time.time()
185 | model = RitzNet(params).to(device)
186 | print("Generating network costs %s seconds."%(time.time()-startTime))
187 |
188 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
189 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
190 | scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
191 |
192 | startTime = time.time()
193 | preTrain(model,device,params,preOptimizer,None,exact)
194 | train(model,device,params,optimizer,scheduler)
195 | print("Training costs %s seconds."%(time.time()-startTime))
196 |
197 | model.eval()
198 | testError = test(model,device,params)
199 | print("The test error (of the last model) is %s."%testError)
200 | print("The number of parameters is %s,"%count_parameters(model))
201 |
202 | torch.save(model.state_dict(),"last_model.pt")
203 |
204 | pltResult(model,device,500,params)
205 |
206 | def pltResult(model,device,nSample,params):
207 | xList = np.linspace(-1,1,nSample)
208 | yList = np.linspace(-1,1,nSample)
209 | thetaList = np.linspace(0,2*math.pi,50)
210 |
211 | xx = np.zeros([nSample,nSample])
212 | yy = np.zeros([nSample,nSample])
213 | zz = np.zeros([nSample,nSample])
214 | for i in range(nSample):
215 | for j in range(nSample):
216 | xx[i,j] = xList[i]
217 | yy[i,j] = yList[j]
218 | coord = np.array([xx[i,j],yy[i,j]])
219 | zz[i,j] = model(torch.from_numpy(coord).float().to(device)).item()
220 | # zz[i,j] = xx[i,j]*yy[i,j] # Plot the exact solution.
221 | if np.linalg.norm(coord-np.array([0.3,0.0]))<0.3:
222 | zz[i,j] = "NaN"
223 |
224 | file = open("nSample.txt","w")
225 | file.write(str(nSample))
226 |
227 | file = open("Data.txt","w")
228 | writeSolution.write(xx,yy,zz,nSample,file)
229 |
230 | edgeList2 = [[0.3*math.cos(i)+0.3,0.3*math.sin(i)] for i in thetaList]
231 | edgeList1 = [[-1.0,-1.0],[1.0,-1.0],[1.0,1.0],[-1.0,1.0],[-1.0,-1.0]]
232 | writeSolution.writeBoundary(edgeList1,edgeList2)
233 |
234 | if __name__=="__main__":
235 | main()
--------------------------------------------------------------------------------
/2dpoisson-ls.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 |
10 | # Network structure
11 | class RitzNet(torch.nn.Module):
12 | def __init__(self, params):
13 | super(RitzNet, self).__init__()
14 | self.params = params
15 | self.linearIn = nn.Linear(self.params["d"], self.params["width"])
16 | self.linear = nn.ModuleList()
17 | for _ in range(params["depth"]):
18 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
19 |
20 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
21 |
22 | def forward(self, x):
23 | x = torch.tanh(self.linearIn(x)) # Match dimension
24 | for layer in self.linear:
25 | x_temp = torch.tanh(layer(x))
26 | x = x_temp
27 |
28 | return self.linearOut(x)
29 |
30 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
31 | model.train()
32 | file = open("lossData.txt","w")
33 |
34 | for step in range(params["preStep"]):
35 | # The volume integral
36 | data = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
37 |
38 | output = model(data)
39 |
40 | target = fun(params["radius"],data)
41 |
42 | loss = output-target
43 | loss = torch.mean(loss*loss)*math.pi*params["radius"]**2
44 |
45 | if step%params["writeStep"] == params["writeStep"]-1:
46 | with torch.no_grad():
47 | ref = exact(params["radius"],data)
48 | error = errorFun(output,ref,params)
49 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
50 | print("Error at Step %s is %s."%(step+1,error))
51 | file.write(str(step+1)+" "+str(error)+"\n")
52 |
53 | model.zero_grad()
54 | loss.backward()
55 |
56 | # Update the weights.
57 | preOptimizer.step()
58 | # preScheduler.step()
59 |
60 | def train(model,device,params,optimizer,scheduler):
61 | model.train()
62 |
63 | data1 = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
64 | data2 = torch.from_numpy(generateData.sampleFromSurface(params["radius"],params["bdryBatch"])).float().to(device)
65 |
66 | x_shift = torch.from_numpy(np.array([params["diff"],0.0])).float().to(device)
67 | y_shift = torch.from_numpy(np.array([0.0,params["diff"]])).float().to(device)
68 |
69 | data1_x_shift = data1+x_shift
70 | data1_y_shift = data1+y_shift
71 | data1_x_nshift = data1-x_shift
72 | data1_y_nshift = data1-y_shift
73 |
74 | for step in range(params["trainStep"]-params["preStep"]):
75 | output1 = model(data1)
76 | output1_x_shift = model(data1_x_shift)
77 | output1_y_shift = model(data1_y_shift)
78 | output1_x_nshift = model(data1_x_nshift)
79 | output1_y_nshift = model(data1_y_nshift)
80 |
81 | # Second order difference
82 | dfdx2 = (output1_x_shift+output1_x_nshift-2*output1)/(params["diff"]**2) # Use difference to approximate derivatives.
83 | dfdy2 = (output1_y_shift+output1_y_nshift-2*output1)/(params["diff"]**2)
84 |
85 | model.zero_grad()
86 |
87 | # Loss function 1
88 | fTerm = ffun(data1).to(device)
89 | loss1 = torch.mean((dfdx2+dfdy2+fTerm)*(dfdx2+dfdy2+fTerm)) * math.pi*params["radius"]**2
90 |
91 | # Loss function 2
92 | output2 = model(data2)
93 | target2 = exact(params["radius"],data2)
94 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] * 2*math.pi*params["radius"])
95 | loss = loss1+loss2
96 |
97 | if step%params["writeStep"] == params["writeStep"]-1:
98 | with torch.no_grad():
99 | target = exact(params["radius"],data1)
100 | error = errorFun(output1,target,params)
101 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
102 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
103 | file = open("lossData.txt","a")
104 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
105 |
106 | if step%params["sampleStep"] == params["sampleStep"]-1:
107 | data1 = torch.from_numpy(generateData.sampleFromDisk(params["radius"],params["bodyBatch"])).float().to(device)
108 | data2 = torch.from_numpy(generateData.sampleFromSurface(params["radius"],params["bdryBatch"])).float().to(device)
109 |
110 | data1_x_shift = data1+x_shift
111 | data1_y_shift = data1+y_shift
112 | data1_x_nshift = data1-x_shift
113 | data1_y_nshift = data1-y_shift
114 |
115 | if 10*(step+1)%params["trainStep"] == 0:
116 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
117 |
118 | loss.backward()
119 |
120 | optimizer.step()
121 | scheduler.step()
122 |
123 | def errorFun(output,target,params):
124 | error = output-target
125 | error = math.sqrt(torch.mean(error*error)*math.pi*params["radius"]**2)
126 | # Calculate the L2 norm error.
127 | ref = math.sqrt(torch.mean(target*target)*math.pi*params["radius"]**2)
128 | return error/ref
129 |
130 | def test(model,device,params):
131 | numQuad = params["numQuad"]
132 |
133 | data = torch.from_numpy(generateData.sampleFromDisk(1,numQuad)).float().to(device)
134 | output = model(data)
135 | target = exact(params["radius"],data).to(device)
136 |
137 | error = output-target
138 | error = math.sqrt(torch.mean(error*error)*math.pi*params["radius"]**2)
139 | # Calculate the L2 norm error.
140 | ref = math.sqrt(torch.mean(target*target)*math.pi*params["radius"]**2)
141 | return error/ref
142 |
143 | def ffun(data):
144 | # f = 4
145 | return 4.0*torch.ones([data.shape[0],1],dtype=torch.float)
146 | # f = 0
147 | # return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
148 |
149 | def exact(r,data):
150 | # f = 4 ==> u = r^2-x^2-y^2
151 | output = r**2-torch.sum(data*data,dim=1)
152 | # f = 0 ==> u = x1*x2
153 | # output = data[:,0]*data[:,1]
154 |
155 | return output.unsqueeze(1)
156 |
157 | def rough(r,data):
158 | # A rough guess
159 | output = r**2-r*torch.sum(data*data,dim=1)**0.5
160 | # output = torch.zeros(data.shape[0],dtype=torch.float)
161 | return output.unsqueeze(1)
162 |
163 | def count_parameters(model):
164 | return sum(p.numel() for p in model.parameters())
165 |
166 | def main():
167 | # Parameters
168 | # torch.manual_seed(21)
169 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
170 |
171 | params = dict()
172 | params["radius"] = 1
173 | params["d"] = 2 # 2D
174 | params["dd"] = 1 # Scalar field
175 | params["bodyBatch"] = 1024 # Batch size
176 | params["bdryBatch"] = 1024 # Batch size for the boundary integral
177 | params["lr"] = 0.01 # Learning rate
178 | params["preLr"] = 0.01 # Learning rate (Pre-training)
179 | params["width"] = 8 # Width of layers
180 | params["depth"] = 2 # Depth of the network: depth+2
181 | params["numQuad"] = 40000 # Number of quadrature points for testing
182 | params["trainStep"] = 50000
183 | params["penalty"] = 500
184 | params["preStep"] = 0
185 | params["diff"] = 0.001
186 | params["writeStep"] = 50
187 | params["sampleStep"] = 10
188 | params["step_size"] = 5000
189 | params["gamma"] = 0.3
190 | params["decay"] = 0.00001
191 |
192 | startTime = time.time()
193 | model = RitzNet(params).to(device)
194 | print("Generating network costs %s seconds."%(time.time()-startTime))
195 |
196 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
197 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
198 | scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
199 |
200 | startTime = time.time()
201 | preTrain(model,device,params,preOptimizer,None,rough)
202 | train(model,device,params,optimizer,scheduler)
203 | print("Training costs %s seconds."%(time.time()-startTime))
204 |
205 | model.eval()
206 | testError = test(model,device,params)
207 | print("The test error (of the last model) is %s."%testError)
208 | print("The number of parameters is %s,"%count_parameters(model))
209 |
210 | torch.save(model.state_dict(),"last_model.pt")
211 |
212 | pltResult(model,device,100,params)
213 |
214 | def pltResult(model,device,nSample,params):
215 | rList = np.linspace(0,params["radius"],nSample)
216 | thetaList = np.linspace(0,math.pi*2,nSample)
217 |
218 | xx = np.zeros([nSample,nSample])
219 | yy = np.zeros([nSample,nSample])
220 | zz = np.zeros([nSample,nSample])
221 | for i in range(nSample):
222 | for j in range(nSample):
223 | xx[i,j] = rList[i]*math.cos(thetaList[j])
224 | yy[i,j] = rList[i]*math.sin(thetaList[j])
225 | coord = np.array([xx[i,j],yy[i,j]])
226 | zz[i,j] = model(torch.from_numpy(coord).float().to(device)).item()
227 | # zz[i,j] = params["radius"]**2-xx[i,j]**2-yy[i,j]**2 # Plot the exact solution.
228 |
229 | file = open("nSample.txt","w")
230 | file.write(str(nSample))
231 |
232 | file = open("Data.txt","w")
233 | writeSolution.write(xx,yy,zz,nSample,file)
234 |
235 | edgeList = [[params["radius"]*math.cos(i),params["radius"]*math.sin(i)] for i in thetaList]
236 | writeSolution.writeBoundary(edgeList)
237 |
238 | if __name__=="__main__":
239 | main()
--------------------------------------------------------------------------------
/10dpoisson-ls-autograd.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR, MultiplicativeLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 | from areaVolume import areaVolume
10 |
11 | # Network structure
12 | class RitzNet(torch.nn.Module):
13 | def __init__(self, params):
14 | super(RitzNet, self).__init__()
15 | self.params = params
16 | # self.linearIn = nn.Linear(self.params["d"], self.params["width"])
17 | self.linear = nn.ModuleList()
18 | for _ in range(params["depth"]):
19 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
20 |
21 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
22 |
23 | def forward(self, x):
24 | # x = torch.tanh(self.linearIn(x)) # Match dimension
25 | for i in range(len(self.linear)//2):
26 | x_temp = torch.tanh(self.linear[2*i](x))
27 | x_temp = torch.tanh(self.linear[2*i+1](x_temp))
28 | x = x_temp+x
29 |
30 | return self.linearOut(x)
31 |
32 | def initWeights(m):
33 | if type(m) == nn.Linear:
34 | torch.nn.init.xavier_normal_(m.weight)
35 | torch.nn.init.zeros_(m.bias)
36 |
37 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
38 | model.train()
39 | file = open("lossData.txt","w")
40 |
41 | for step in range(params["preStep"]):
42 | # The volume integral
43 | data = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
44 |
45 | output = model(data)
46 |
47 | target = fun(params["radius"],data)
48 |
49 | loss = output-target
50 | loss = torch.mean(loss*loss)
51 |
52 | if step%params["writeStep"] == params["writeStep"]-1:
53 | with torch.no_grad():
54 | ref = exact(params["radius"],data)
55 | error = errorFun(output,ref,params)
56 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
57 | print("Error at Step %s is %s."%(step+1,error))
58 | file.write(str(step+1)+" "+str(error)+"\n")
59 |
60 | model.zero_grad()
61 | loss.backward()
62 |
63 | # Update the weights.
64 | preOptimizer.step()
65 | # preScheduler.step()
66 |
67 | def train(model,device,params,optimizer,scheduler):
68 | model.train()
69 |
70 | data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
71 | data1.requires_grad = True
72 | data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device)
73 |
74 | for step in range(params["trainStep"]-params["preStep"]):
75 | output1 = model(data1)
76 |
77 | model.zero_grad()
78 |
79 | dfdx = torch.autograd.grad(output1,data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0]
80 | dfdx20 = torch.autograd.grad(dfdx[:,0].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,0].unsqueeze(1)
81 | dfdx21 = torch.autograd.grad(dfdx[:,1].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,1].unsqueeze(1)
82 | dfdx22 = torch.autograd.grad(dfdx[:,2].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,2].unsqueeze(1)
83 | dfdx23 = torch.autograd.grad(dfdx[:,3].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,3].unsqueeze(1)
84 | dfdx24 = torch.autograd.grad(dfdx[:,4].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,4].unsqueeze(1)
85 | dfdx25 = torch.autograd.grad(dfdx[:,5].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,5].unsqueeze(1)
86 | dfdx26 = torch.autograd.grad(dfdx[:,6].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,6].unsqueeze(1)
87 | dfdx27 = torch.autograd.grad(dfdx[:,7].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,7].unsqueeze(1)
88 | dfdx28 = torch.autograd.grad(dfdx[:,8].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,8].unsqueeze(1)
89 | dfdx29 = torch.autograd.grad(dfdx[:,9].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,9].unsqueeze(1)
90 | # Loss function 1
91 | fTerm = ffun(data1).to(device)
92 | loss1 = torch.mean((dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm)*\
93 | (dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm))
94 |
95 | # Loss function 2
96 | output2 = model(data2)
97 | target2 = exact(params["radius"],data2)
98 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] *params["area"])
99 | loss = loss1+loss2
100 |
101 | if step%params["writeStep"] == params["writeStep"]-1:
102 | with torch.no_grad():
103 | target = exact(params["radius"],data1)
104 | error = errorFun(output1,target,params)
105 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
106 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
107 | file = open("lossData.txt","a")
108 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
109 |
110 | if step%params["sampleStep"] == params["sampleStep"]-1:
111 | data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
112 | data1.requires_grad = True
113 | data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device)
114 |
115 | if 10*(step+1)%params["trainStep"] == 0:
116 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
117 |
118 | loss.backward()
119 |
120 | optimizer.step()
121 | scheduler.step()
122 |
123 | def errorFun(output,target,params):
124 | error = output-target
125 | error = math.sqrt(torch.mean(error*error))
126 | # Calculate the L2 norm error.
127 | ref = math.sqrt(torch.mean(target*target))
128 | return error/ref
129 |
130 | def test(model,device,params):
131 | numQuad = params["numQuad"]
132 |
133 | data = torch.from_numpy(generateData.sampleFromDisk10(1,numQuad)).float().to(device)
134 | output = model(data)
135 | target = exact(params["radius"],data).to(device)
136 |
137 | error = output-target
138 | error = math.sqrt(torch.mean(error*error))
139 | # Calculate the L2 norm error.
140 | ref = math.sqrt(torch.mean(target*target))
141 | return error/ref
142 |
143 | def ffun(data):
144 | # f = 0
145 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
146 | # f = 20
147 | # return 20.0*torch.ones([data.shape[0],1],dtype=torch.float)
148 |
149 | def exact(r,data):
150 | # f = 20 ==> u = r^2-x^2-y^2-...
151 | # output = r**2-torch.sum(data*data,dim=1)
152 | # f = 0 ==> u = x1x2+x3x4+x5x6+...
153 | output = data[:,0]*data[:,1] + data[:,2]*data[:,3] + data[:,4]*data[:,5] + \
154 | data[:,6]*data[:,7] + data[:,8]*data[:,9]
155 | return output.unsqueeze(1)
156 |
157 | def rough(r,data):
158 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
159 | output = torch.zeros(data.shape[0],dtype=torch.float)
160 | return output.unsqueeze(1)
161 |
162 | def count_parameters(model):
163 | return sum(p.numel() for p in model.parameters()) # if p.requires_grad
164 |
165 | def main():
166 | # Parameters
167 | # torch.manual_seed(21)
168 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
169 |
170 | params = dict()
171 | params["radius"] = 1
172 | params["d"] = 10 # 10D
173 | params["dd"] = 1 # Scalar field
174 | params["bodyBatch"] = 1024 # Batch size
175 | params["bdryBatch"] = 2048 # Batch size for the boundary integral
176 | params["lr"] = 0.016 # Learning rate
177 | params["preLr"] = params["lr"] # Learning rate (Pre-training)
178 | params["width"] = 10 # Width of layers
179 | params["depth"] = 4 # Depth of the network: depth+2
180 | params["numQuad"] = 40000 # Number of quadrature points for testing
181 | params["trainStep"] = 50000
182 | params["penalty"] = 500
183 | params["preStep"] = 0
184 | params["writeStep"] = 50
185 | params["sampleStep"] = 10
186 | params["area"] = areaVolume(params["radius"],params["d"])
187 | params["step_size"] = 5000
188 | params["milestone"] = [5000,10000,20000,35000,48000]
189 | params["gamma"] = 0.5
190 | params["decay"] = 0.00001
191 |
192 | startTime = time.time()
193 | model = RitzNet(params).to(device)
194 | model.apply(initWeights)
195 | print("Generating network costs %s seconds."%(time.time()-startTime))
196 |
197 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
198 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
199 | # scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
200 | scheduler = MultiStepLR(optimizer,milestones=params["milestone"],gamma=params["gamma"])
201 | # schedulerFun = lambda epoch: ((epoch+100)/(epoch+101))
202 | # scheduler = MultiplicativeLR(optimizer,lr_lambda=schedulerFun)
203 |
204 | startTime = time.time()
205 | preTrain(model,device,params,preOptimizer,None,rough)
206 | train(model,device,params,optimizer,scheduler)
207 | print("Training costs %s seconds."%(time.time()-startTime))
208 |
209 | model.eval()
210 | testError = test(model,device,params)
211 | print("The test error (of the last model) is %s."%testError)
212 | print("The number of parameters is %s,"%count_parameters(model))
213 |
214 | torch.save(model.state_dict(),"last_model.pt")
215 |
216 | if __name__=="__main__":
217 | main()
--------------------------------------------------------------------------------
/10dpoisson.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR, MultiplicativeLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 | from areaVolume import areaVolume
10 |
11 | # Network structure
12 | class RitzNet(torch.nn.Module):
13 | def __init__(self, params):
14 | super(RitzNet, self).__init__()
15 | self.params = params
16 | # self.linearIn = nn.Linear(self.params["d"], self.params["width"])
17 | self.linear = nn.ModuleList()
18 | for _ in range(params["depth"]):
19 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
20 |
21 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
22 |
23 | def forward(self, x):
24 | # x = F.softplus(self.linearIn(x)) # Match dimension
25 | for layer in self.linear:
26 | x_temp = F.softplus(layer(x))
27 | x = x_temp+x
28 |
29 | return self.linearOut(x)
30 |
31 | def initWeights(m):
32 | if type(m) == nn.Linear:
33 | torch.nn.init.xavier_normal_(m.weight)
34 | torch.nn.init.zeros_(m.bias)
35 |
36 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
37 | model.train()
38 | file = open("lossData.txt","w")
39 |
40 | for step in range(params["preStep"]):
41 | # The volume integral
42 | data = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
43 |
44 | output = model(data)
45 |
46 | target = fun(params["radius"],data)
47 |
48 | loss = output-target
49 | loss = torch.mean(loss*loss)
50 |
51 | if step%params["writeStep"] == params["writeStep"]-1:
52 | with torch.no_grad():
53 | ref = exact(params["radius"],data)
54 | error = errorFun(output,ref,params)
55 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
56 | print("Error at Step %s is %s."%(step+1,error))
57 | file.write(str(step+1)+" "+str(error)+"\n")
58 |
59 | model.zero_grad()
60 | loss.backward()
61 |
62 | # Update the weights.
63 | preOptimizer.step()
64 | # preScheduler.step()
65 |
66 | def train(model,device,params,optimizer,scheduler):
67 | model.train()
68 |
69 | data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
70 | data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device)
71 | x_shift = torch.from_numpy(np.eye(10)*params["diff"]).float().to(device)
72 | data1_shift0 = data1+x_shift[0]
73 | data1_shift1 = data1+x_shift[1]
74 | data1_shift2 = data1+x_shift[2]
75 | data1_shift3 = data1+x_shift[3]
76 | data1_shift4 = data1+x_shift[4]
77 | data1_shift5 = data1+x_shift[5]
78 | data1_shift6 = data1+x_shift[6]
79 | data1_shift7 = data1+x_shift[7]
80 | data1_shift8 = data1+x_shift[8]
81 | data1_shift9 = data1+x_shift[9]
82 |
83 | for step in range(params["trainStep"]-params["preStep"]):
84 | output1 = model(data1)
85 | output1_shift0 = model(data1_shift0)
86 | output1_shift1 = model(data1_shift1)
87 | output1_shift2 = model(data1_shift2)
88 | output1_shift3 = model(data1_shift3)
89 | output1_shift4 = model(data1_shift4)
90 | output1_shift5 = model(data1_shift5)
91 | output1_shift6 = model(data1_shift6)
92 | output1_shift7 = model(data1_shift7)
93 | output1_shift8 = model(data1_shift8)
94 | output1_shift9 = model(data1_shift9)
95 |
96 | dfdx0 = (output1_shift0-output1)/params["diff"] # Use difference to approximate derivatives.
97 | dfdx1 = (output1_shift1-output1)/params["diff"]
98 | dfdx2 = (output1_shift2-output1)/params["diff"]
99 | dfdx3 = (output1_shift3-output1)/params["diff"]
100 | dfdx4 = (output1_shift4-output1)/params["diff"]
101 | dfdx5 = (output1_shift5-output1)/params["diff"]
102 | dfdx6 = (output1_shift6-output1)/params["diff"]
103 | dfdx7 = (output1_shift7-output1)/params["diff"]
104 | dfdx8 = (output1_shift8-output1)/params["diff"]
105 | dfdx9 = (output1_shift9-output1)/params["diff"]
106 |
107 | model.zero_grad()
108 |
109 | # Loss function 1
110 | fTerm = ffun(data1).to(device)
111 | loss1 = torch.mean(0.5*(dfdx0*dfdx0 + dfdx1*dfdx1 + dfdx2*dfdx2 +\
112 | dfdx3*dfdx3 + dfdx4*dfdx4 + dfdx5*dfdx5 + dfdx6*dfdx6 +\
113 | dfdx7*dfdx7 + dfdx8*dfdx8 + dfdx9*dfdx9)-fTerm*output1)
114 |
115 | # Loss function 2
116 | output2 = model(data2)
117 | target2 = exact(params["radius"],data2)
118 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] *params["area"])
119 | loss = loss1+loss2
120 |
121 | if step%params["writeStep"] == params["writeStep"]-1:
122 | with torch.no_grad():
123 | target = exact(params["radius"],data1)
124 | error = errorFun(output1,target,params)
125 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
126 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
127 | file = open("lossData.txt","a")
128 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
129 |
130 | if step%params["sampleStep"] == params["sampleStep"]-1:
131 | data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
132 | data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device)
133 |
134 | data1_shift0 = data1+x_shift[0]
135 | data1_shift1 = data1+x_shift[1]
136 | data1_shift2 = data1+x_shift[2]
137 | data1_shift3 = data1+x_shift[3]
138 | data1_shift4 = data1+x_shift[4]
139 | data1_shift5 = data1+x_shift[5]
140 | data1_shift6 = data1+x_shift[6]
141 | data1_shift7 = data1+x_shift[7]
142 | data1_shift8 = data1+x_shift[8]
143 | data1_shift9 = data1+x_shift[9]
144 |
145 | if 10*(step+1)%params["trainStep"] == 0:
146 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
147 |
148 | loss.backward()
149 |
150 | optimizer.step()
151 | scheduler.step()
152 |
153 | def errorFun(output,target,params):
154 | error = output-target
155 | error = math.sqrt(torch.mean(error*error))
156 | # Calculate the L2 norm error.
157 | ref = math.sqrt(torch.mean(target*target))
158 | return error/ref
159 |
160 | def test(model,device,params):
161 | numQuad = params["numQuad"]
162 |
163 | data = torch.from_numpy(generateData.sampleFromDisk10(1,numQuad)).float().to(device)
164 | output = model(data)
165 | target = exact(params["radius"],data).to(device)
166 |
167 | error = output-target
168 | error = math.sqrt(torch.mean(error*error))
169 | # Calculate the L2 norm error.
170 | ref = math.sqrt(torch.mean(target*target))
171 | return error/ref
172 |
173 | def ffun(data):
174 | # f = 0
175 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
176 | # f = 20
177 | # return 20.0*torch.ones([data.shape[0],1],dtype=torch.float)
178 |
179 | def exact(r,data):
180 | # f = 20 ==> u = r^2-x^2-y^2-...
181 | # output = r**2-torch.sum(data*data,dim=1)
182 | # f = 0 ==> u = x1x2+x3x4+x5x6+...
183 | output = data[:,0]*data[:,1] + data[:,2]*data[:,3] + data[:,4]*data[:,5] + \
184 | data[:,6]*data[:,7] + data[:,8]*data[:,9]
185 | return output.unsqueeze(1)
186 |
187 | def rough(r,data):
188 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
189 | output = torch.zeros(data.shape[0],dtype=torch.float)
190 | return output.unsqueeze(1)
191 |
192 | def count_parameters(model):
193 | return sum(p.numel() for p in model.parameters()) # if p.requires_grad
194 |
195 | def main():
196 | # Parameters
197 | torch.manual_seed(21)
198 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
199 |
200 | params = dict()
201 | params["radius"] = 1
202 | params["d"] = 10 # 10D
203 | params["dd"] = 1 # Scalar field
204 | params["bodyBatch"] = 1024 # Batch size
205 | params["bdryBatch"] = 2048 # Batch size for the boundary integral
206 | params["lr"] = 0.016 # Learning rate
207 | params["preLr"] = params["lr"] # Learning rate (Pre-training)
208 | params["width"] = 10 # Width of layers
209 | params["depth"] = 4 # Depth of the network: depth+2
210 | params["numQuad"] = 40000 # Number of quadrature points for testing
211 | params["trainStep"] = 50000
212 | params["penalty"] = 500
213 | params["preStep"] = 0
214 | params["diff"] = 0.001
215 | params["writeStep"] = 50
216 | params["sampleStep"] = 10
217 | params["area"] = areaVolume(params["radius"],params["d"])
218 | params["step_size"] = 5000
219 | params["milestone"] = [5000,10000,20000,35000,48000]
220 | params["gamma"] = 0.5
221 | params["decay"] = 0.0001
222 |
223 | startTime = time.time()
224 | model = RitzNet(params).to(device)
225 | # model.apply(initWeights)
226 | print("Generating network costs %s seconds."%(time.time()-startTime))
227 |
228 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
229 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
230 | # scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
231 | scheduler = MultiStepLR(optimizer,milestones=params["milestone"],gamma=params["gamma"])
232 | # schedulerFun = lambda epoch: ((epoch+100)/(epoch+101))
233 | # scheduler = MultiplicativeLR(optimizer,lr_lambda=schedulerFun)
234 |
235 | startTime = time.time()
236 | preTrain(model,device,params,preOptimizer,None,rough)
237 | train(model,device,params,optimizer,scheduler)
238 | print("Training costs %s seconds."%(time.time()-startTime))
239 |
240 | model.eval()
241 | testError = test(model,device,params)
242 | print("The test error (of the last model) is %s."%testError)
243 | print("The number of parameters is %s,"%count_parameters(model))
244 |
245 | torch.save(model.state_dict(),"last_model.pt")
246 |
247 | if __name__=="__main__":
248 | main()
--------------------------------------------------------------------------------
/10dpoisson-cube-ls-autograd.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 | from areaVolume import areaVolume
10 |
11 | # Network structure
12 | class RitzNet(torch.nn.Module):
13 | def __init__(self, params):
14 | super(RitzNet, self).__init__()
15 | self.params = params
16 | # self.linearIn = nn.Linear(self.params["d"], self.params["width"])
17 | self.linear = nn.ModuleList()
18 | for _ in range(params["depth"]):
19 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
20 |
21 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
22 |
23 | def forward(self, x):
24 | # x = torch.tanh(self.linearIn(x)) # Match dimension
25 | for i in range(len(self.linear)//2):
26 | x_temp = torch.tanh(self.linear[2*i](x))
27 | x_temp = torch.tanh(self.linear[2*i+1](x_temp))
28 | x = x_temp+x
29 |
30 | return self.linearOut(x)
31 |
32 | def initWeights(m):
33 | if type(m) == nn.Linear:
34 | torch.nn.init.xavier_normal_(m.weight)
35 | torch.nn.init.zeros_(m.bias)
36 |
37 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
38 | model.train()
39 | file = open("lossData.txt","w")
40 |
41 | for step in range(params["preStep"]):
42 | # The volume integral
43 | data = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
44 |
45 | output = model(data)
46 |
47 | target = fun(params["radius"],data)
48 |
49 | loss = output-target
50 | loss = torch.mean(loss*loss)
51 |
52 | if step%params["writeStep"] == params["writeStep"]-1:
53 | with torch.no_grad():
54 | ref = exact(params["radius"],data)
55 | error = errorFun(output,ref,params)
56 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
57 | print("Error at Step %s is %s."%(step+1,error))
58 | file.write(str(step+1)+" "+str(error)+"\n")
59 |
60 | model.zero_grad()
61 | loss.backward()
62 |
63 | # Update the weights.
64 | preOptimizer.step()
65 | # preScheduler.step()
66 |
67 | def train(model,device,params,optimizer,scheduler):
68 | model.train()
69 |
70 | data1 = torch.rand(params["bodyBatch"],params["d"]).float().to(device)
71 | data1.requires_grad = True
72 | data2 = torch.rand(2*params["d"]*(params["bdryBatch"]//(2*params["d"])),params["d"]).float().to(device)
73 | temp = params["bdryBatch"]//(2*params["d"])
74 | for i in range(params["d"]):
75 | data2[(2*i+0)*temp:(2*i+1)*temp,i] = 0.0
76 | data2[(2*i+1)*temp:(2*i+2)*temp,i] = 1.0
77 |
78 | for step in range(params["trainStep"]-params["preStep"]):
79 | output1 = model(data1)
80 |
81 | model.zero_grad()
82 |
83 | dfdx = torch.autograd.grad(output1,data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0]
84 | dfdx20 = torch.autograd.grad(dfdx[:,0].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,0].unsqueeze(1)
85 | dfdx21 = torch.autograd.grad(dfdx[:,1].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,1].unsqueeze(1)
86 | dfdx22 = torch.autograd.grad(dfdx[:,2].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,2].unsqueeze(1)
87 | dfdx23 = torch.autograd.grad(dfdx[:,3].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,3].unsqueeze(1)
88 | dfdx24 = torch.autograd.grad(dfdx[:,4].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,4].unsqueeze(1)
89 | dfdx25 = torch.autograd.grad(dfdx[:,5].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,5].unsqueeze(1)
90 | dfdx26 = torch.autograd.grad(dfdx[:,6].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,6].unsqueeze(1)
91 | dfdx27 = torch.autograd.grad(dfdx[:,7].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,7].unsqueeze(1)
92 | dfdx28 = torch.autograd.grad(dfdx[:,8].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,8].unsqueeze(1)
93 | dfdx29 = torch.autograd.grad(dfdx[:,9].unsqueeze(1),data1,grad_outputs=torch.ones_like(output1),retain_graph=True,create_graph=True,only_inputs=True)[0][:,9].unsqueeze(1)
94 | # Loss function 1
95 | fTerm = ffun(data1).to(device)
96 | loss1 = torch.mean((dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm)*\
97 | (dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm))
98 |
99 | # Loss function 2
100 | output2 = model(data2)
101 | target2 = exact(params["radius"],data2)
102 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] *params["area"])
103 | loss = loss1+loss2
104 |
105 | if step%params["writeStep"] == params["writeStep"]-1:
106 | with torch.no_grad():
107 | target = exact(params["radius"],data1)
108 | error = errorFun(output1,target,params)
109 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
110 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
111 | file = open("lossData.txt","a")
112 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
113 |
114 | if step%params["sampleStep"] == params["sampleStep"]-1:
115 | data1 = torch.rand(params["bodyBatch"],params["d"]).float().to(device)
116 | data1.requires_grad = True
117 | data2 = torch.rand(2*params["d"]*(params["bdryBatch"]//(2*params["d"])),params["d"]).float().to(device)
118 | temp = params["bdryBatch"]//(2*params["d"])
119 | for i in range(params["d"]):
120 | data2[(2*i+0)*temp:(2*i+1)*temp,i] = 0.0
121 | data2[(2*i+1)*temp:(2*i+2)*temp,i] = 1.0
122 |
123 | if 10*(step+1)%params["trainStep"] == 0:
124 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
125 |
126 | loss.backward()
127 |
128 | optimizer.step()
129 | scheduler.step()
130 |
131 | def errorFun(output,target,params):
132 | error = output-target
133 | error = math.sqrt(torch.mean(error*error))
134 | # Calculate the L2 norm error.
135 | ref = math.sqrt(torch.mean(target*target))
136 | return error/ref
137 |
138 | def test(model,device,params):
139 | numQuad = params["numQuad"]
140 |
141 | data = torch.rand(numQuad,10).float().to(device)
142 | output = model(data)
143 | target = exact(params["radius"],data).to(device)
144 |
145 | error = output-target
146 | error = math.sqrt(torch.mean(error*error))
147 | # Calculate the L2 norm error.
148 | ref = math.sqrt(torch.mean(target*target))
149 | return error/ref
150 |
151 | def ffun(data):
152 | # f = 0
153 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
154 | # f = 20
155 | # return 20.0*torch.ones([data.shape[0],1],dtype=torch.float)
156 |
157 | def exact(r,data):
158 | # f = 20 ==> u = r^2-x^2-y^2-...
159 | # output = r**2-torch.sum(data*data,dim=1)
160 | # f = 0 ==> u = x1x2+x3x4+x5x6+...
161 | output = data[:,0]*data[:,1] + data[:,2]*data[:,3] + data[:,4]*data[:,5] + \
162 | data[:,6]*data[:,7] + data[:,8]*data[:,9]
163 | return output.unsqueeze(1)
164 |
165 | def rough(r,data):
166 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
167 | output = torch.zeros(data.shape[0],dtype=torch.float)
168 | return output.unsqueeze(1)
169 |
170 | def count_parameters(model):
171 | return sum(p.numel() for p in model.parameters()) # if p.requires_grad
172 |
173 | def main():
174 | # Parameters
175 | # torch.manual_seed(21)
176 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
177 |
178 | params = dict()
179 | params["radius"] = 1
180 | params["d"] = 10 # 10D
181 | params["dd"] = 1 # Scalar field
182 | params["bodyBatch"] = 1024 # Batch size
183 | params["bdryBatch"] = 2000 # Batch size for the boundary integral
184 | params["lr"] = 0.016 # Learning rate
185 | params["preLr"] = params["lr"] # Learning rate (Pre-training)
186 | params["width"] = 10 # Width of layers
187 | params["depth"] = 4 # Depth of the network: depth+2
188 | params["numQuad"] = 40000 # Number of quadrature points for testing
189 | params["trainStep"] = 50000
190 | params["penalty"] = 500
191 | params["preStep"] = 0
192 | params["writeStep"] = 50
193 | params["sampleStep"] = 10
194 | params["area"] = 20
195 | params["step_size"] = 5000
196 | params["milestone"] = [5000,10000,20000,35000,48000]
197 | params["gamma"] = 0.5
198 | params["decay"] = 0.00001
199 |
200 | startTime = time.time()
201 | model = RitzNet(params).to(device)
202 | model.apply(initWeights)
203 | print("Generating network costs %s seconds."%(time.time()-startTime))
204 |
205 | # torch.seed()
206 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
207 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
208 | # scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
209 | scheduler = MultiStepLR(optimizer,milestones=params["milestone"],gamma=params["gamma"])
210 |
211 | startTime = time.time()
212 | preTrain(model,device,params,preOptimizer,None,rough)
213 | train(model,device,params,optimizer,scheduler)
214 | print("Training costs %s seconds."%(time.time()-startTime))
215 |
216 | model.eval()
217 | testError = test(model,device,params)
218 | print("The test error (of the last model) is %s."%testError)
219 | print("The number of parameters is %s,"%count_parameters(model))
220 |
221 | torch.save(model.state_dict(),"last_model.pt")
222 |
223 | if __name__=="__main__":
224 | main()
--------------------------------------------------------------------------------
/10dpoisson-cube.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 | from areaVolume import areaVolume
10 |
11 | # Network structure
12 | class RitzNet(torch.nn.Module):
13 | def __init__(self, params):
14 | super(RitzNet, self).__init__()
15 | self.params = params
16 | # self.linearIn = nn.Linear(self.params["d"], self.params["width"])
17 | self.linear = nn.ModuleList()
18 | for _ in range(params["depth"]):
19 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
20 |
21 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
22 |
23 | def forward(self, x):
24 | # x = torch.tanh(self.linearIn(x)) # Match dimension
25 | for i in range(len(self.linear)//2):
26 | x_temp = torch.tanh(self.linear[2*i](x))
27 | x_temp = torch.tanh(self.linear[2*i+1](x_temp))
28 | x = x_temp+x
29 |
30 | return self.linearOut(x)
31 |
32 | def initWeights(m):
33 | if type(m) == nn.Linear:
34 | torch.nn.init.xavier_normal_(m.weight)
35 | torch.nn.init.zeros_(m.bias)
36 |
37 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
38 | model.train()
39 | file = open("lossData.txt","w")
40 |
41 | for step in range(params["preStep"]):
42 | # The volume integral
43 | data = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
44 |
45 | output = model(data)
46 |
47 | target = fun(params["radius"],data)
48 |
49 | loss = output-target
50 | loss = torch.mean(loss*loss)
51 |
52 | if step%params["writeStep"] == params["writeStep"]-1:
53 | with torch.no_grad():
54 | ref = exact(params["radius"],data)
55 | error = errorFun(output,ref,params)
56 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
57 | print("Error at Step %s is %s."%(step+1,error))
58 | file.write(str(step+1)+" "+str(error)+"\n")
59 |
60 | model.zero_grad()
61 | loss.backward()
62 |
63 | # Update the weights.
64 | preOptimizer.step()
65 | # preScheduler.step()
66 |
67 | def train(model,device,params,optimizer,scheduler):
68 | model.train()
69 |
70 | data1 = torch.rand(params["bodyBatch"],params["d"]).float().to(device)
71 | data2 = torch.rand(2*params["d"]*(params["bdryBatch"]//(2*params["d"])),params["d"]).float().to(device)
72 | temp = params["bdryBatch"]//(2*params["d"])
73 | for i in range(params["d"]):
74 | data2[(2*i+0)*temp:(2*i+1)*temp,i] = 0.0
75 | data2[(2*i+1)*temp:(2*i+2)*temp,i] = 1.0
76 |
77 | x_shift = torch.from_numpy(np.eye(10)*params["diff"]).float().to(device)
78 | data1_shift0 = data1+x_shift[0]
79 | data1_shift1 = data1+x_shift[1]
80 | data1_shift2 = data1+x_shift[2]
81 | data1_shift3 = data1+x_shift[3]
82 | data1_shift4 = data1+x_shift[4]
83 | data1_shift5 = data1+x_shift[5]
84 | data1_shift6 = data1+x_shift[6]
85 | data1_shift7 = data1+x_shift[7]
86 | data1_shift8 = data1+x_shift[8]
87 | data1_shift9 = data1+x_shift[9]
88 |
89 | for step in range(params["trainStep"]-params["preStep"]):
90 | output1 = model(data1)
91 | output1_shift0 = model(data1_shift0)
92 | output1_shift1 = model(data1_shift1)
93 | output1_shift2 = model(data1_shift2)
94 | output1_shift3 = model(data1_shift3)
95 | output1_shift4 = model(data1_shift4)
96 | output1_shift5 = model(data1_shift5)
97 | output1_shift6 = model(data1_shift6)
98 | output1_shift7 = model(data1_shift7)
99 | output1_shift8 = model(data1_shift8)
100 | output1_shift9 = model(data1_shift9)
101 |
102 | dfdx0 = (output1_shift0-output1)/params["diff"] # Use difference to approximate derivatives.
103 | dfdx1 = (output1_shift1-output1)/params["diff"]
104 | dfdx2 = (output1_shift2-output1)/params["diff"]
105 | dfdx3 = (output1_shift3-output1)/params["diff"]
106 | dfdx4 = (output1_shift4-output1)/params["diff"]
107 | dfdx5 = (output1_shift5-output1)/params["diff"]
108 | dfdx6 = (output1_shift6-output1)/params["diff"]
109 | dfdx7 = (output1_shift7-output1)/params["diff"]
110 | dfdx8 = (output1_shift8-output1)/params["diff"]
111 | dfdx9 = (output1_shift9-output1)/params["diff"]
112 |
113 | model.zero_grad()
114 |
115 | # Loss function 1
116 | fTerm = ffun(data1).to(device)
117 | loss1 = torch.mean(0.5*(dfdx0*dfdx0 + dfdx1*dfdx1 + dfdx2*dfdx2 +\
118 | dfdx3*dfdx3 + dfdx4*dfdx4 + dfdx5*dfdx5 + dfdx6*dfdx6 +\
119 | dfdx7*dfdx7 + dfdx8*dfdx8 + dfdx9*dfdx9)-fTerm*output1)
120 |
121 | # Loss function 2
122 | output2 = model(data2)
123 | target2 = exact(params["radius"],data2)
124 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] *params["area"])
125 | loss = loss1+loss2
126 |
127 | if step%params["writeStep"] == params["writeStep"]-1:
128 | with torch.no_grad():
129 | target = exact(params["radius"],data1)
130 | error = errorFun(output1,target,params)
131 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
132 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
133 | file = open("lossData.txt","a")
134 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
135 |
136 | if step%params["sampleStep"] == params["sampleStep"]-1:
137 | data1 = torch.rand(params["bodyBatch"],params["d"]).float().to(device)
138 | data2 = torch.rand(2*params["d"]*(params["bdryBatch"]//(2*params["d"])),params["d"]).float().to(device)
139 | temp = params["bdryBatch"]//(2*params["d"])
140 | for i in range(params["d"]):
141 | data2[(2*i+0)*temp:(2*i+1)*temp,i] = 0.0
142 | data2[(2*i+1)*temp:(2*i+2)*temp,i] = 1.0
143 |
144 | data1_shift0 = data1+x_shift[0]
145 | data1_shift1 = data1+x_shift[1]
146 | data1_shift2 = data1+x_shift[2]
147 | data1_shift3 = data1+x_shift[3]
148 | data1_shift4 = data1+x_shift[4]
149 | data1_shift5 = data1+x_shift[5]
150 | data1_shift6 = data1+x_shift[6]
151 | data1_shift7 = data1+x_shift[7]
152 | data1_shift8 = data1+x_shift[8]
153 | data1_shift9 = data1+x_shift[9]
154 |
155 | if 10*(step+1)%params["trainStep"] == 0:
156 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
157 |
158 | loss.backward()
159 |
160 | optimizer.step()
161 | scheduler.step()
162 |
163 | def errorFun(output,target,params):
164 | error = output-target
165 | error = math.sqrt(torch.mean(error*error))
166 | # Calculate the L2 norm error.
167 | ref = math.sqrt(torch.mean(target*target))
168 | return error/ref
169 |
170 | def test(model,device,params):
171 | numQuad = params["numQuad"]
172 |
173 | data = torch.rand(numQuad,10).float().to(device)
174 | output = model(data)
175 | target = exact(params["radius"],data).to(device)
176 |
177 | error = output-target
178 | error = math.sqrt(torch.mean(error*error))
179 | # Calculate the L2 norm error.
180 | ref = math.sqrt(torch.mean(target*target))
181 | return error/ref
182 |
183 | def ffun(data):
184 | # f = 0
185 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
186 | # f = 20
187 | # return 20.0*torch.ones([data.shape[0],1],dtype=torch.float)
188 |
189 | def exact(r,data):
190 | # f = 20 ==> u = r^2-x^2-y^2-...
191 | # output = r**2-torch.sum(data*data,dim=1)
192 | # f = 0 ==> u = x1x2+x3x4+x5x6+...
193 | output = data[:,0]*data[:,1] + data[:,2]*data[:,3] + data[:,4]*data[:,5] + \
194 | data[:,6]*data[:,7] + data[:,8]*data[:,9]
195 | return output.unsqueeze(1)
196 |
197 | def rough(r,data):
198 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
199 | output = torch.zeros(data.shape[0],dtype=torch.float)
200 | return output.unsqueeze(1)
201 |
202 | def count_parameters(model):
203 | return sum(p.numel() for p in model.parameters()) # if p.requires_grad
204 |
205 | def main():
206 | # Parameters
207 | # torch.manual_seed(21)
208 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
209 |
210 | params = dict()
211 | params["radius"] = 1
212 | params["d"] = 10 # 10D
213 | params["dd"] = 1 # Scalar field
214 | params["bodyBatch"] = 1024 # Batch size
215 | params["bdryBatch"] = 2000 # Batch size for the boundary integral
216 | params["lr"] = 0.016 # Learning rate
217 | params["preLr"] = params["lr"] # Learning rate (Pre-training)
218 | params["width"] = 10 # Width of layers
219 | params["depth"] = 4 # Depth of the network: depth+2
220 | params["numQuad"] = 40000 # Number of quadrature points for testing
221 | params["trainStep"] = 50000
222 | params["penalty"] = 500
223 | params["preStep"] = 0
224 | params["diff"] = 0.001
225 | params["writeStep"] = 50
226 | params["sampleStep"] = 10
227 | params["area"] = 20
228 | params["step_size"] = 5000
229 | params["milestone"] = [5000,10000,20000,35000,48000]
230 | params["gamma"] = 0.5
231 | params["decay"] = 0.00001
232 |
233 | startTime = time.time()
234 | model = RitzNet(params).to(device)
235 | model.apply(initWeights)
236 | print("Generating network costs %s seconds."%(time.time()-startTime))
237 |
238 | # torch.seed()
239 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
240 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
241 | # scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
242 | scheduler = MultiStepLR(optimizer,milestones=params["milestone"],gamma=params["gamma"])
243 |
244 | startTime = time.time()
245 | preTrain(model,device,params,preOptimizer,None,rough)
246 | train(model,device,params,optimizer,scheduler)
247 | print("Training costs %s seconds."%(time.time()-startTime))
248 |
249 | model.eval()
250 | testError = test(model,device,params)
251 | print("The test error (of the last model) is %s."%testError)
252 | print("The number of parameters is %s,"%count_parameters(model))
253 |
254 | torch.save(model.state_dict(),"last_model.pt")
255 |
256 | if __name__=="__main__":
257 | main()
--------------------------------------------------------------------------------
/10dpoisson-ls.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR, MultiplicativeLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 | from areaVolume import areaVolume
10 |
11 | # Network structure
12 | class RitzNet(torch.nn.Module):
13 | def __init__(self, params):
14 | super(RitzNet, self).__init__()
15 | self.params = params
16 | # self.linearIn = nn.Linear(self.params["d"], self.params["width"])
17 | self.linear = nn.ModuleList()
18 | for _ in range(params["depth"]):
19 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
20 |
21 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
22 |
23 | def forward(self, x):
24 | # x = torch.tanh(self.linearIn(x)) # Match dimension
25 | for i in range(len(self.linear)//2):
26 | x_temp = torch.tanh(self.linear[2*i](x))
27 | x_temp = torch.tanh(self.linear[2*i+1](x_temp))
28 | x = x_temp+x
29 |
30 | return self.linearOut(x)
31 |
32 | def initWeights(m):
33 | if type(m) == nn.Linear:
34 | torch.nn.init.xavier_normal_(m.weight)
35 | torch.nn.init.zeros_(m.bias)
36 |
37 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
38 | model.train()
39 | file = open("lossData.txt","w")
40 |
41 | for step in range(params["preStep"]):
42 | # The volume integral
43 | data = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
44 |
45 | output = model(data)
46 |
47 | target = fun(params["radius"],data)
48 |
49 | loss = output-target
50 | loss = torch.mean(loss*loss)
51 |
52 | if step%params["writeStep"] == params["writeStep"]-1:
53 | with torch.no_grad():
54 | ref = exact(params["radius"],data)
55 | error = errorFun(output,ref,params)
56 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
57 | print("Error at Step %s is %s."%(step+1,error))
58 | file.write(str(step+1)+" "+str(error)+"\n")
59 |
60 | model.zero_grad()
61 | loss.backward()
62 |
63 | # Update the weights.
64 | preOptimizer.step()
65 | # preScheduler.step()
66 |
67 | def train(model,device,params,optimizer,scheduler):
68 | model.train()
69 |
70 | data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
71 | data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device)
72 | x_shift = torch.from_numpy(np.eye(10)*params["diff"]).float().to(device)
73 | data1_shift0 = data1+x_shift[0]
74 | data1_shift1 = data1+x_shift[1]
75 | data1_shift2 = data1+x_shift[2]
76 | data1_shift3 = data1+x_shift[3]
77 | data1_shift4 = data1+x_shift[4]
78 | data1_shift5 = data1+x_shift[5]
79 | data1_shift6 = data1+x_shift[6]
80 | data1_shift7 = data1+x_shift[7]
81 | data1_shift8 = data1+x_shift[8]
82 | data1_shift9 = data1+x_shift[9]
83 | data1_nshift0 = data1-x_shift[0]
84 | data1_nshift1 = data1-x_shift[1]
85 | data1_nshift2 = data1-x_shift[2]
86 | data1_nshift3 = data1-x_shift[3]
87 | data1_nshift4 = data1-x_shift[4]
88 | data1_nshift5 = data1-x_shift[5]
89 | data1_nshift6 = data1-x_shift[6]
90 | data1_nshift7 = data1-x_shift[7]
91 | data1_nshift8 = data1-x_shift[8]
92 | data1_nshift9 = data1-x_shift[9]
93 |
94 | for step in range(params["trainStep"]-params["preStep"]):
95 | output1 = model(data1)
96 | output1_shift0 = model(data1_shift0)
97 | output1_shift1 = model(data1_shift1)
98 | output1_shift2 = model(data1_shift2)
99 | output1_shift3 = model(data1_shift3)
100 | output1_shift4 = model(data1_shift4)
101 | output1_shift5 = model(data1_shift5)
102 | output1_shift6 = model(data1_shift6)
103 | output1_shift7 = model(data1_shift7)
104 | output1_shift8 = model(data1_shift8)
105 | output1_shift9 = model(data1_shift9)
106 | output1_nshift0 = model(data1_nshift0)
107 | output1_nshift1 = model(data1_nshift1)
108 | output1_nshift2 = model(data1_nshift2)
109 | output1_nshift3 = model(data1_nshift3)
110 | output1_nshift4 = model(data1_nshift4)
111 | output1_nshift5 = model(data1_nshift5)
112 | output1_nshift6 = model(data1_nshift6)
113 | output1_nshift7 = model(data1_nshift7)
114 | output1_nshift8 = model(data1_nshift8)
115 | output1_nshift9 = model(data1_nshift9)
116 |
117 | dfdx20 = (output1_shift0+output1_nshift0-2*output1)/(params["diff"]**2)
118 | dfdx21 = (output1_shift1+output1_nshift1-2*output1)/(params["diff"]**2)
119 | dfdx22 = (output1_shift2+output1_nshift2-2*output1)/(params["diff"]**2)
120 | dfdx23 = (output1_shift3+output1_nshift3-2*output1)/(params["diff"]**2)
121 | dfdx24 = (output1_shift4+output1_nshift4-2*output1)/(params["diff"]**2)
122 | dfdx25 = (output1_shift5+output1_nshift5-2*output1)/(params["diff"]**2)
123 | dfdx26 = (output1_shift6+output1_nshift6-2*output1)/(params["diff"]**2)
124 | dfdx27 = (output1_shift7+output1_nshift7-2*output1)/(params["diff"]**2)
125 | dfdx28 = (output1_shift8+output1_nshift8-2*output1)/(params["diff"]**2)
126 | dfdx29 = (output1_shift9+output1_nshift9-2*output1)/(params["diff"]**2)
127 |
128 | model.zero_grad()
129 |
130 | # Loss function 1
131 | fTerm = ffun(data1).to(device)
132 | loss1 = torch.mean((dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm)*\
133 | (dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm))
134 |
135 | # Loss function 2
136 | output2 = model(data2)
137 | target2 = exact(params["radius"],data2)
138 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] *params["area"])
139 | loss = loss1+loss2
140 |
141 | if step%params["writeStep"] == params["writeStep"]-1:
142 | with torch.no_grad():
143 | target = exact(params["radius"],data1)
144 | error = errorFun(output1,target,params)
145 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
146 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
147 | file = open("lossData.txt","a")
148 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
149 |
150 | if step%params["sampleStep"] == params["sampleStep"]-1:
151 | data1 = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
152 | data2 = torch.from_numpy(generateData.sampleFromSurface10(params["radius"],params["bdryBatch"])).float().to(device)
153 |
154 | data1_shift0 = data1+x_shift[0]
155 | data1_shift1 = data1+x_shift[1]
156 | data1_shift2 = data1+x_shift[2]
157 | data1_shift3 = data1+x_shift[3]
158 | data1_shift4 = data1+x_shift[4]
159 | data1_shift5 = data1+x_shift[5]
160 | data1_shift6 = data1+x_shift[6]
161 | data1_shift7 = data1+x_shift[7]
162 | data1_shift8 = data1+x_shift[8]
163 | data1_shift9 = data1+x_shift[9]
164 | data1_nshift0 = data1-x_shift[0]
165 | data1_nshift1 = data1-x_shift[1]
166 | data1_nshift2 = data1-x_shift[2]
167 | data1_nshift3 = data1-x_shift[3]
168 | data1_nshift4 = data1-x_shift[4]
169 | data1_nshift5 = data1-x_shift[5]
170 | data1_nshift6 = data1-x_shift[6]
171 | data1_nshift7 = data1-x_shift[7]
172 | data1_nshift8 = data1-x_shift[8]
173 | data1_nshift9 = data1-x_shift[9]
174 |
175 | if 10*(step+1)%params["trainStep"] == 0:
176 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
177 |
178 | loss.backward()
179 |
180 | optimizer.step()
181 | scheduler.step()
182 |
183 | def errorFun(output,target,params):
184 | error = output-target
185 | error = math.sqrt(torch.mean(error*error))
186 | # Calculate the L2 norm error.
187 | ref = math.sqrt(torch.mean(target*target))
188 | return error/ref
189 |
190 | def test(model,device,params):
191 | numQuad = params["numQuad"]
192 |
193 | data = torch.from_numpy(generateData.sampleFromDisk10(1,numQuad)).float().to(device)
194 | output = model(data)
195 | target = exact(params["radius"],data).to(device)
196 |
197 | error = output-target
198 | error = math.sqrt(torch.mean(error*error))
199 | # Calculate the L2 norm error.
200 | ref = math.sqrt(torch.mean(target*target))
201 | return error/ref
202 |
203 | def ffun(data):
204 | # f = 0
205 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
206 | # f = 20
207 | # return 20.0*torch.ones([data.shape[0],1],dtype=torch.float)
208 |
209 | def exact(r,data):
210 | # f = 20 ==> u = r^2-x^2-y^2-...
211 | # output = r**2-torch.sum(data*data,dim=1)
212 | # f = 0 ==> u = x1x2+x3x4+x5x6+...
213 | output = data[:,0]*data[:,1] + data[:,2]*data[:,3] + data[:,4]*data[:,5] + \
214 | data[:,6]*data[:,7] + data[:,8]*data[:,9]
215 | return output.unsqueeze(1)
216 |
217 | def rough(r,data):
218 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
219 | output = torch.zeros(data.shape[0],dtype=torch.float)
220 | return output.unsqueeze(1)
221 |
222 | def count_parameters(model):
223 | return sum(p.numel() for p in model.parameters()) # if p.requires_grad
224 |
225 | def main():
226 | # Parameters
227 | # torch.manual_seed(21)
228 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
229 |
230 | params = dict()
231 | params["radius"] = 1
232 | params["d"] = 10 # 10D
233 | params["dd"] = 1 # Scalar field
234 | params["bodyBatch"] = 1024 # Batch size
235 | params["bdryBatch"] = 2048 # Batch size for the boundary integral
236 | params["lr"] = 0.016 # Learning rate
237 | params["preLr"] = params["lr"] # Learning rate (Pre-training)
238 | params["width"] = 10 # Width of layers
239 | params["depth"] = 4 # Depth of the network: depth+2
240 | params["numQuad"] = 40000 # Number of quadrature points for testing
241 | params["trainStep"] = 50000
242 | params["penalty"] = 500
243 | params["preStep"] = 0
244 | params["diff"] = 0.001
245 | params["writeStep"] = 50
246 | params["sampleStep"] = 10
247 | params["area"] = areaVolume(params["radius"],params["d"])
248 | params["step_size"] = 5000
249 | params["milestone"] = [5000,10000,20000,35000,48000]
250 | params["gamma"] = 0.5
251 | params["decay"] = 0.00001
252 |
253 | startTime = time.time()
254 | model = RitzNet(params).to(device)
255 | model.apply(initWeights)
256 | print("Generating network costs %s seconds."%(time.time()-startTime))
257 |
258 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
259 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
260 | # scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
261 | scheduler = MultiStepLR(optimizer,milestones=params["milestone"],gamma=params["gamma"])
262 | # schedulerFun = lambda epoch: ((epoch+100)/(epoch+101))
263 | # scheduler = MultiplicativeLR(optimizer,lr_lambda=schedulerFun)
264 |
265 | startTime = time.time()
266 | preTrain(model,device,params,preOptimizer,None,rough)
267 | train(model,device,params,optimizer,scheduler)
268 | print("Training costs %s seconds."%(time.time()-startTime))
269 |
270 | model.eval()
271 | testError = test(model,device,params)
272 | print("The test error (of the last model) is %s."%testError)
273 | print("The number of parameters is %s,"%count_parameters(model))
274 |
275 | torch.save(model.state_dict(),"last_model.pt")
276 |
277 | if __name__=="__main__":
278 | main()
--------------------------------------------------------------------------------
/10dpoisson-cube-ls.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import math, torch, generateData, time
3 | import torch.nn.functional as F
4 | from torch.optim.lr_scheduler import MultiStepLR, StepLR
5 | import torch.nn as nn
6 | import matplotlib.pyplot as plt
7 | import sys, os
8 | import writeSolution
9 | from areaVolume import areaVolume
10 |
11 | # Network structure
12 | class RitzNet(torch.nn.Module):
13 | def __init__(self, params):
14 | super(RitzNet, self).__init__()
15 | self.params = params
16 | # self.linearIn = nn.Linear(self.params["d"], self.params["width"])
17 | self.linear = nn.ModuleList()
18 | for _ in range(params["depth"]):
19 | self.linear.append(nn.Linear(self.params["width"], self.params["width"]))
20 |
21 | self.linearOut = nn.Linear(self.params["width"], self.params["dd"])
22 |
23 | def forward(self, x):
24 | # x = torch.tanh(self.linearIn(x)) # Match dimension
25 | for i in range(len(self.linear)//2):
26 | x_temp = torch.tanh(self.linear[2*i](x))
27 | x_temp = torch.tanh(self.linear[2*i+1](x_temp))
28 | x = x_temp+x
29 |
30 | return self.linearOut(x)
31 |
32 | def initWeights(m):
33 | if type(m) == nn.Linear:
34 | torch.nn.init.xavier_normal_(m.weight)
35 | torch.nn.init.zeros_(m.bias)
36 |
37 | def preTrain(model,device,params,preOptimizer,preScheduler,fun):
38 | model.train()
39 | file = open("lossData.txt","w")
40 |
41 | for step in range(params["preStep"]):
42 | # The volume integral
43 | data = torch.from_numpy(generateData.sampleFromDisk10(params["radius"],params["bodyBatch"])).float().to(device)
44 |
45 | output = model(data)
46 |
47 | target = fun(params["radius"],data)
48 |
49 | loss = output-target
50 | loss = torch.mean(loss*loss)
51 |
52 | if step%params["writeStep"] == params["writeStep"]-1:
53 | with torch.no_grad():
54 | ref = exact(params["radius"],data)
55 | error = errorFun(output,ref,params)
56 | # print("Loss at Step %s is %s."%(step+1,loss.item()))
57 | print("Error at Step %s is %s."%(step+1,error))
58 | file.write(str(step+1)+" "+str(error)+"\n")
59 |
60 | model.zero_grad()
61 | loss.backward()
62 |
63 | # Update the weights.
64 | preOptimizer.step()
65 | # preScheduler.step()
66 |
67 | def train(model,device,params,optimizer,scheduler):
68 | model.train()
69 |
70 | data1 = torch.rand(params["bodyBatch"],params["d"]).float().to(device)
71 | data2 = torch.rand(2*params["d"]*(params["bdryBatch"]//(2*params["d"])),params["d"]).float().to(device)
72 | temp = params["bdryBatch"]//(2*params["d"])
73 | for i in range(params["d"]):
74 | data2[(2*i+0)*temp:(2*i+1)*temp,i] = 0.0
75 | data2[(2*i+1)*temp:(2*i+2)*temp,i] = 1.0
76 |
77 | x_shift = torch.from_numpy(np.eye(10)*params["diff"]).float().to(device)
78 | data1_shift0 = data1+x_shift[0]
79 | data1_shift1 = data1+x_shift[1]
80 | data1_shift2 = data1+x_shift[2]
81 | data1_shift3 = data1+x_shift[3]
82 | data1_shift4 = data1+x_shift[4]
83 | data1_shift5 = data1+x_shift[5]
84 | data1_shift6 = data1+x_shift[6]
85 | data1_shift7 = data1+x_shift[7]
86 | data1_shift8 = data1+x_shift[8]
87 | data1_shift9 = data1+x_shift[9]
88 | data1_nshift0 = data1-x_shift[0]
89 | data1_nshift1 = data1-x_shift[1]
90 | data1_nshift2 = data1-x_shift[2]
91 | data1_nshift3 = data1-x_shift[3]
92 | data1_nshift4 = data1-x_shift[4]
93 | data1_nshift5 = data1-x_shift[5]
94 | data1_nshift6 = data1-x_shift[6]
95 | data1_nshift7 = data1-x_shift[7]
96 | data1_nshift8 = data1-x_shift[8]
97 | data1_nshift9 = data1-x_shift[9]
98 |
99 | for step in range(params["trainStep"]-params["preStep"]):
100 | output1 = model(data1)
101 | output1_shift0 = model(data1_shift0)
102 | output1_shift1 = model(data1_shift1)
103 | output1_shift2 = model(data1_shift2)
104 | output1_shift3 = model(data1_shift3)
105 | output1_shift4 = model(data1_shift4)
106 | output1_shift5 = model(data1_shift5)
107 | output1_shift6 = model(data1_shift6)
108 | output1_shift7 = model(data1_shift7)
109 | output1_shift8 = model(data1_shift8)
110 | output1_shift9 = model(data1_shift9)
111 | output1_nshift0 = model(data1_nshift0)
112 | output1_nshift1 = model(data1_nshift1)
113 | output1_nshift2 = model(data1_nshift2)
114 | output1_nshift3 = model(data1_nshift3)
115 | output1_nshift4 = model(data1_nshift4)
116 | output1_nshift5 = model(data1_nshift5)
117 | output1_nshift6 = model(data1_nshift6)
118 | output1_nshift7 = model(data1_nshift7)
119 | output1_nshift8 = model(data1_nshift8)
120 | output1_nshift9 = model(data1_nshift9)
121 |
122 | dfdx20 = (output1_shift0+output1_nshift0-2*output1)/(params["diff"]**2)
123 | dfdx21 = (output1_shift1+output1_nshift1-2*output1)/(params["diff"]**2)
124 | dfdx22 = (output1_shift2+output1_nshift2-2*output1)/(params["diff"]**2)
125 | dfdx23 = (output1_shift3+output1_nshift3-2*output1)/(params["diff"]**2)
126 | dfdx24 = (output1_shift4+output1_nshift4-2*output1)/(params["diff"]**2)
127 | dfdx25 = (output1_shift5+output1_nshift5-2*output1)/(params["diff"]**2)
128 | dfdx26 = (output1_shift6+output1_nshift6-2*output1)/(params["diff"]**2)
129 | dfdx27 = (output1_shift7+output1_nshift7-2*output1)/(params["diff"]**2)
130 | dfdx28 = (output1_shift8+output1_nshift8-2*output1)/(params["diff"]**2)
131 | dfdx29 = (output1_shift9+output1_nshift9-2*output1)/(params["diff"]**2)
132 |
133 | model.zero_grad()
134 |
135 | # Loss function 1
136 | fTerm = ffun(data1).to(device)
137 | loss1 = torch.mean((dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm)*\
138 | (dfdx20+dfdx21+dfdx22+dfdx23+dfdx24+dfdx25+dfdx26+dfdx27+dfdx28+dfdx29+fTerm))
139 |
140 | # Loss function 2
141 | output2 = model(data2)
142 | target2 = exact(params["radius"],data2)
143 | loss2 = torch.mean((output2-target2)*(output2-target2) * params["penalty"] *params["area"])
144 | loss = loss1+loss2
145 |
146 | if step%params["writeStep"] == params["writeStep"]-1:
147 | with torch.no_grad():
148 | target = exact(params["radius"],data1)
149 | error = errorFun(output1,target,params)
150 | # print("Loss at Step %s is %s."%(step+params["preStep"]+1,loss.item()))
151 | print("Error at Step %s is %s."%(step+params["preStep"]+1,error))
152 | file = open("lossData.txt","a")
153 | file.write(str(step+params["preStep"]+1)+" "+str(error)+"\n")
154 |
155 | if step%params["sampleStep"] == params["sampleStep"]-1:
156 | data1 = torch.rand(params["bodyBatch"],params["d"]).float().to(device)
157 | data2 = torch.rand(2*params["d"]*(params["bdryBatch"]//(2*params["d"])),params["d"]).float().to(device)
158 | temp = params["bdryBatch"]//(2*params["d"])
159 | for i in range(params["d"]):
160 | data2[(2*i+0)*temp:(2*i+1)*temp,i] = 0.0
161 | data2[(2*i+1)*temp:(2*i+2)*temp,i] = 1.0
162 |
163 | data1_shift0 = data1+x_shift[0]
164 | data1_shift1 = data1+x_shift[1]
165 | data1_shift2 = data1+x_shift[2]
166 | data1_shift3 = data1+x_shift[3]
167 | data1_shift4 = data1+x_shift[4]
168 | data1_shift5 = data1+x_shift[5]
169 | data1_shift6 = data1+x_shift[6]
170 | data1_shift7 = data1+x_shift[7]
171 | data1_shift8 = data1+x_shift[8]
172 | data1_shift9 = data1+x_shift[9]
173 | data1_nshift0 = data1-x_shift[0]
174 | data1_nshift1 = data1-x_shift[1]
175 | data1_nshift2 = data1-x_shift[2]
176 | data1_nshift3 = data1-x_shift[3]
177 | data1_nshift4 = data1-x_shift[4]
178 | data1_nshift5 = data1-x_shift[5]
179 | data1_nshift6 = data1-x_shift[6]
180 | data1_nshift7 = data1-x_shift[7]
181 | data1_nshift8 = data1-x_shift[8]
182 | data1_nshift9 = data1-x_shift[9]
183 |
184 | if 10*(step+1)%params["trainStep"] == 0:
185 | print("%s%% finished..."%(100*(step+1)//params["trainStep"]))
186 |
187 | loss.backward()
188 |
189 | optimizer.step()
190 | scheduler.step()
191 |
192 | def errorFun(output,target,params):
193 | error = output-target
194 | error = math.sqrt(torch.mean(error*error))
195 | # Calculate the L2 norm error.
196 | ref = math.sqrt(torch.mean(target*target))
197 | return error/ref
198 |
199 | def test(model,device,params):
200 | numQuad = params["numQuad"]
201 |
202 | data = torch.rand(numQuad,10).float().to(device)
203 | output = model(data)
204 | target = exact(params["radius"],data).to(device)
205 |
206 | error = output-target
207 | error = math.sqrt(torch.mean(error*error))
208 | # Calculate the L2 norm error.
209 | ref = math.sqrt(torch.mean(target*target))
210 | return error/ref
211 |
212 | def ffun(data):
213 | # f = 0
214 | return 0.0*torch.ones([data.shape[0],1],dtype=torch.float)
215 | # f = 20
216 | # return 20.0*torch.ones([data.shape[0],1],dtype=torch.float)
217 |
218 | def exact(r,data):
219 | # f = 20 ==> u = r^2-x^2-y^2-...
220 | # output = r**2-torch.sum(data*data,dim=1)
221 | # f = 0 ==> u = x1x2+x3x4+x5x6+...
222 | output = data[:,0]*data[:,1] + data[:,2]*data[:,3] + data[:,4]*data[:,5] + \
223 | data[:,6]*data[:,7] + data[:,8]*data[:,9]
224 | return output.unsqueeze(1)
225 |
226 | def rough(r,data):
227 | # output = r**2-r*torch.sum(data*data,dim=1)**0.5
228 | output = torch.zeros(data.shape[0],dtype=torch.float)
229 | return output.unsqueeze(1)
230 |
231 | def count_parameters(model):
232 | return sum(p.numel() for p in model.parameters()) # if p.requires_grad
233 |
234 | def main():
235 | # Parameters
236 | # torch.manual_seed(21)
237 | device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
238 |
239 | params = dict()
240 | params["radius"] = 1
241 | params["d"] = 10 # 10D
242 | params["dd"] = 1 # Scalar field
243 | params["bodyBatch"] = 1024 # Batch size
244 | params["bdryBatch"] = 2000 # Batch size for the boundary integral
245 | params["lr"] = 0.016 # Learning rate
246 | params["preLr"] = params["lr"] # Learning rate (Pre-training)
247 | params["width"] = 10 # Width of layers
248 | params["depth"] = 4 # Depth of the network: depth+2
249 | params["numQuad"] = 40000 # Number of quadrature points for testing
250 | params["trainStep"] = 50000
251 | params["penalty"] = 500
252 | params["preStep"] = 0
253 | params["diff"] = 0.001
254 | params["writeStep"] = 50
255 | params["sampleStep"] = 10
256 | params["area"] = 20
257 | params["step_size"] = 5000
258 | params["milestone"] = [5000,10000,20000,35000,48000]
259 | params["gamma"] = 0.5
260 | params["decay"] = 0.00001
261 |
262 | startTime = time.time()
263 | model = RitzNet(params).to(device)
264 | model.apply(initWeights)
265 | print("Generating network costs %s seconds."%(time.time()-startTime))
266 |
267 | # torch.seed()
268 | preOptimizer = torch.optim.Adam(model.parameters(),lr=params["preLr"])
269 | optimizer = torch.optim.Adam(model.parameters(),lr=params["lr"],weight_decay=params["decay"])
270 | # scheduler = StepLR(optimizer,step_size=params["step_size"],gamma=params["gamma"])
271 | scheduler = MultiStepLR(optimizer,milestones=params["milestone"],gamma=params["gamma"])
272 |
273 | startTime = time.time()
274 | preTrain(model,device,params,preOptimizer,None,rough)
275 | train(model,device,params,optimizer,scheduler)
276 | print("Training costs %s seconds."%(time.time()-startTime))
277 |
278 | model.eval()
279 | testError = test(model,device,params)
280 | print("The test error (of the last model) is %s."%testError)
281 | print("The number of parameters is %s,"%count_parameters(model))
282 |
283 | torch.save(model.state_dict(),"last_model.pt")
284 |
285 | if __name__=="__main__":
286 | main()
--------------------------------------------------------------------------------
/Results/2dpoisson-autograd/lossData2.txt:
--------------------------------------------------------------------------------
1 | 50 0.9931627967566563
2 | 100 0.9961082057327859
3 | 150 0.993973349532946
4 | 200 0.9914909902483404
5 | 250 0.9861258606694111
6 | 300 0.9690648951528804
7 | 350 0.922847182840115
8 | 400 0.8540289960694325
9 | 450 0.7476752549560652
10 | 500 0.6663574862452025
11 | 550 0.5845575958213695
12 | 600 0.32635616041511517
13 | 650 0.5486251101358648
14 | 700 0.2994316896897969
15 | 750 0.16065855319991998
16 | 800 0.23473687032408316
17 | 850 0.12519555850093977
18 | 900 0.11250588569511315
19 | 950 0.14612773066855994
20 | 1000 0.146437782942866
21 | 1050 0.08268289765194266
22 | 1100 0.11830802786062253
23 | 1150 0.16243695745925668
24 | 1200 0.08535771337478498
25 | 1250 0.06061268984157089
26 | 1300 0.06582602051475636
27 | 1350 0.09482209959442807
28 | 1400 0.060720266771548036
29 | 1450 0.07537723397139606
30 | 1500 0.07252795272297871
31 | 1550 0.07729743834051944
32 | 1600 0.15870962848850062
33 | 1650 0.05785781570404657
34 | 1700 0.14637421785048568
35 | 1750 0.07648383104164527
36 | 1800 0.06030226450362245
37 | 1850 0.04601616912916416
38 | 1900 0.32587683207267704
39 | 1950 0.1341702316039337
40 | 2000 0.05072280862320173
41 | 2050 0.04978187074892018
42 | 2100 0.04983165196361146
43 | 2150 0.05055589910084112
44 | 2200 0.0505576863800536
45 | 2250 0.12969435307653973
46 | 2300 0.07060835379369707
47 | 2350 0.04692575364970658
48 | 2400 0.10696804275017761
49 | 2450 0.12113778153052904
50 | 2500 0.055523850185254846
51 | 2550 0.04498817681603193
52 | 2600 0.04475869639899298
53 | 2650 0.04483919893834774
54 | 2700 0.04739993246413639
55 | 2750 0.10090598976784734
56 | 2800 0.046310572638923735
57 | 2850 0.3649923004241082
58 | 2900 0.16613419592996714
59 | 2950 0.05665600466301497
60 | 3000 0.04664481194738777
61 | 3050 0.04939361300535637
62 | 3100 0.04546853542001056
63 | 3150 0.045026228621552034
64 | 3200 0.05161512122581658
65 | 3250 0.11209321083546053
66 | 3300 0.05108648326668507
67 | 3350 0.04301149760955948
68 | 3400 0.04981185412080834
69 | 3450 0.04218248687338309
70 | 3500 0.04086298108104299
71 | 3550 0.04322177347809125
72 | 3600 0.05839420063090938
73 | 3650 0.09280095058784853
74 | 3700 0.03852992019645379
75 | 3750 0.04580650004708473
76 | 3800 0.05855529098823003
77 | 3850 0.2286735621617995
78 | 3900 0.0670391720124366
79 | 3950 0.03733553539660425
80 | 4000 0.08406233218804555
81 | 4050 0.0429478803387651
82 | 4100 0.04618513018804904
83 | 4150 0.2730548635158397
84 | 4200 0.04398145380357255
85 | 4250 0.05530101913684853
86 | 4300 0.04555329765221001
87 | 4350 0.041111146497555394
88 | 4400 0.0565862930239338
89 | 4450 0.04769495806053175
90 | 4500 0.06775508884888475
91 | 4550 0.038396781544028126
92 | 4600 0.05593050596280882
93 | 4650 0.037880703693324316
94 | 4700 0.06009728892735042
95 | 4750 0.06539761858378332
96 | 4800 0.03768994330966774
97 | 4850 0.035966185071450676
98 | 4900 0.033884857005092854
99 | 4950 0.10099381286955046
100 | 5000 0.038256722407410945
101 | 5050 0.036839285713056856
102 | 5100 0.044387715088214
103 | 5150 0.03928502007724825
104 | 5200 0.03807345007190882
105 | 5250 0.03559320549538103
106 | 5300 0.035683871823011845
107 | 5350 0.037846341818459694
108 | 5400 0.03875469876299204
109 | 5450 0.050679185688993635
110 | 5500 0.042832659473988004
111 | 5550 0.035946476986335664
112 | 5600 0.038510699467868344
113 | 5650 0.03712847287640523
114 | 5700 0.040332239649943036
115 | 5750 0.03589681697842795
116 | 5800 0.03644627919095366
117 | 5850 0.035858144529348684
118 | 5900 0.0368062260496335
119 | 5950 0.034053356466993014
120 | 6000 0.033932619100052575
121 | 6050 0.033294828136669
122 | 6100 0.04174055029972922
123 | 6150 0.034573842072319616
124 | 6200 0.049897097420753066
125 | 6250 0.057735346419647726
126 | 6300 0.0384779179072889
127 | 6350 0.04573743739738332
128 | 6400 0.03682267253152245
129 | 6450 0.03276995796699535
130 | 6500 0.033687937406085035
131 | 6550 0.031966976840588995
132 | 6600 0.03548109355894514
133 | 6650 0.03494870687285064
134 | 6700 0.03870333238057443
135 | 6750 0.03876202978744393
136 | 6800 0.03336977708163377
137 | 6850 0.03456407217073675
138 | 6900 0.05704739749066687
139 | 6950 0.03200573101892562
140 | 7000 0.03279904351224228
141 | 7050 0.03247645356111801
142 | 7100 0.03315347908558709
143 | 7150 0.05080527436117722
144 | 7200 0.03937507161510169
145 | 7250 0.03126147313258631
146 | 7300 0.03486310023210931
147 | 7350 0.03225844343326926
148 | 7400 0.051451115773446895
149 | 7450 0.043266282592352295
150 | 7500 0.04575641160266796
151 | 7550 0.03869752560640238
152 | 7600 0.035288318706638
153 | 7650 0.03330767222526402
154 | 7700 0.0308244006513034
155 | 7750 0.033591427739845886
156 | 7800 0.036911328567654125
157 | 7850 0.03310609131399162
158 | 7900 0.04733441162356522
159 | 7950 0.03147572376010548
160 | 8000 0.032433509270626555
161 | 8050 0.03243907994487757
162 | 8100 0.038783315426594134
163 | 8150 0.03480634502017727
164 | 8200 0.03404855748385337
165 | 8250 0.034366202319007555
166 | 8300 0.05159617599836713
167 | 8350 0.08568110292509196
168 | 8400 0.03565706656474518
169 | 8450 0.032511360979992236
170 | 8500 0.03623514041999349
171 | 8550 0.03446872481872126
172 | 8600 0.03053615744426828
173 | 8650 0.032713200122820675
174 | 8700 0.03165407109912547
175 | 8750 0.03850806251251523
176 | 8800 0.06968437179627268
177 | 8850 0.034213178690280725
178 | 8900 0.04207899008059226
179 | 8950 0.03202732220248833
180 | 9000 0.03243047187639797
181 | 9050 0.06648048610818254
182 | 9100 0.033173957134848255
183 | 9150 0.03148590575079879
184 | 9200 0.030917658306197138
185 | 9250 0.047704383970359567
186 | 9300 0.06721513765651572
187 | 9350 0.030437874030241455
188 | 9400 0.03195915691747503
189 | 9450 0.04518057750923051
190 | 9500 0.0303851191061737
191 | 9550 0.031729822061180975
192 | 9600 0.05082387069286451
193 | 9650 0.043275261505393105
194 | 9700 0.031196159880243466
195 | 9750 0.03204853886980921
196 | 9800 0.04191981270974401
197 | 9850 0.09027529371346897
198 | 9900 0.03149487909271633
199 | 9950 0.031172140626076837
200 | 10000 0.03354829725204159
201 | 10050 0.03339984473810697
202 | 10100 0.03138804844218217
203 | 10150 0.03216886093634885
204 | 10200 0.03350914744241164
205 | 10250 0.030866391216635793
206 | 10300 0.030004358763633672
207 | 10350 0.04493945936683548
208 | 10400 0.030419086066348803
209 | 10450 0.029989537561215033
210 | 10500 0.029989403615159862
211 | 10550 0.033254452477719715
212 | 10600 0.034259107249373026
213 | 10650 0.030572195153270816
214 | 10700 0.029107976632739375
215 | 10750 0.03247544841332875
216 | 10800 0.042874659018484335
217 | 10850 0.03047003777439342
218 | 10900 0.04679872422256517
219 | 10950 0.030499659087929988
220 | 11000 0.029173889768187307
221 | 11050 0.03645467448276841
222 | 11100 0.03018134675953116
223 | 11150 0.027749689534256272
224 | 11200 0.029587167556769784
225 | 11250 0.030842850705651344
226 | 11300 0.031863599338633204
227 | 11350 0.028862366604755826
228 | 11400 0.04117743664071819
229 | 11450 0.04292388392800279
230 | 11500 0.03775494467620211
231 | 11550 0.02964052467200417
232 | 11600 0.04732286476176935
233 | 11650 0.028675562582818535
234 | 11700 0.027644699468627156
235 | 11750 0.028002814775861248
236 | 11800 0.03180027937661913
237 | 11850 0.038997176716484096
238 | 11900 0.04205293992854209
239 | 11950 0.032703979599625906
240 | 12000 0.027876399408205424
241 | 12050 0.035229204500510126
242 | 12100 0.03433440699903269
243 | 12150 0.026419218892765852
244 | 12200 0.027593055013334932
245 | 12250 0.029613981896438376
246 | 12300 0.028610556638744063
247 | 12350 0.029135955476141756
248 | 12400 0.02997541827083267
249 | 12450 0.027383718266169813
250 | 12500 0.042673618785952376
251 | 12550 0.026997922883918793
252 | 12600 0.028359008259657793
253 | 12650 0.028569507879042628
254 | 12700 0.03509345709418913
255 | 12750 0.027658496286737114
256 | 12800 0.028034333600667084
257 | 12850 0.03461319598039119
258 | 12900 0.026948526613877175
259 | 12950 0.0320168476967268
260 | 13000 0.02920512336085355
261 | 13050 0.028313151472632907
262 | 13100 0.028438948649131657
263 | 13150 0.02882294230739426
264 | 13200 0.038813825480797516
265 | 13250 0.05133169025165078
266 | 13300 0.050170459783757546
267 | 13350 0.0277696727102171
268 | 13400 0.03808950608039092
269 | 13450 0.031842941831747754
270 | 13500 0.031266092567752646
271 | 13550 0.0299416177081484
272 | 13600 0.02911654435077324
273 | 13650 0.034107853495678804
274 | 13700 0.02997162528565396
275 | 13750 0.02667491659469258
276 | 13800 0.02691114362558815
277 | 13850 0.02902905630506199
278 | 13900 0.047185591624264056
279 | 13950 0.029821653351044836
280 | 14000 0.025877082152796018
281 | 14050 0.033051322379481614
282 | 14100 0.02614030908543824
283 | 14150 0.02643776950268261
284 | 14200 0.026020104412063295
285 | 14250 0.02607211951811768
286 | 14300 0.026240382014116024
287 | 14350 0.04014953239499337
288 | 14400 0.02609893184189425
289 | 14450 0.026450006295253143
290 | 14500 0.027060992766581256
291 | 14550 0.027970582487791006
292 | 14600 0.0314366798373399
293 | 14650 0.030276653665568913
294 | 14700 0.02510023320658643
295 | 14750 0.026422068607510382
296 | 14800 0.03416445875766613
297 | 14850 0.026517605109203847
298 | 14900 0.026370832212412217
299 | 14950 0.02802564928293308
300 | 15000 0.03471280991991882
301 | 15050 0.025657604051560888
302 | 15100 0.025197888438871607
303 | 15150 0.024038178891169788
304 | 15200 0.024717811997206435
305 | 15250 0.0255531738682303
306 | 15300 0.026427322209379624
307 | 15350 0.026575558364300055
308 | 15400 0.026358288498072358
309 | 15450 0.026114106649475576
310 | 15500 0.030935008896908516
311 | 15550 0.025863241219432075
312 | 15600 0.02923167866527576
313 | 15650 0.02376573808255412
314 | 15700 0.025339145712214114
315 | 15750 0.028778753917631582
316 | 15800 0.025774936338865715
317 | 15850 0.03181168841678508
318 | 15900 0.032484315724566816
319 | 15950 0.026211781658270403
320 | 16000 0.025816566799791912
321 | 16050 0.025357340089534913
322 | 16100 0.027756845730377892
323 | 16150 0.041035381469861014
324 | 16200 0.02521257226866742
325 | 16250 0.02616186511078198
326 | 16300 0.03060597731889547
327 | 16350 0.02658133990057778
328 | 16400 0.02668511741729568
329 | 16450 0.02744589096767417
330 | 16500 0.026092538283061708
331 | 16550 0.027978853916743873
332 | 16600 0.02764462458690075
333 | 16650 0.034107835910264776
334 | 16700 0.03059254106473676
335 | 16750 0.027098823699233613
336 | 16800 0.027307884050776884
337 | 16850 0.028948774865740788
338 | 16900 0.0265661265427276
339 | 16950 0.025691072898467622
340 | 17000 0.028850982406325155
341 | 17050 0.02824699985421628
342 | 17100 0.029657461860002114
343 | 17150 0.025560071328293636
344 | 17200 0.027381734805810284
345 | 17250 0.02721017961827635
346 | 17300 0.0325769888792589
347 | 17350 0.03184820478957284
348 | 17400 0.02470116680902606
349 | 17450 0.02473984712180813
350 | 17500 0.023169498329975204
351 | 17550 0.025625379667641116
352 | 17600 0.025519134328026365
353 | 17650 0.030211191347666307
354 | 17700 0.03051876706299705
355 | 17750 0.024074034672176652
356 | 17800 0.025024386370564228
357 | 17850 0.02466765330122066
358 | 17900 0.02670156524655483
359 | 17950 0.026678044912572696
360 | 18000 0.026692065721868084
361 | 18050 0.02462264080685278
362 | 18100 0.02541672478053791
363 | 18150 0.024910943425201924
364 | 18200 0.02440010058856714
365 | 18250 0.024288069432332113
366 | 18300 0.030320807686816036
367 | 18350 0.02386173498349407
368 | 18400 0.025975451560266878
369 | 18450 0.033413042522514536
370 | 18500 0.02795174875024274
371 | 18550 0.024147602571631444
372 | 18600 0.024345237765902993
373 | 18650 0.024192974013590697
374 | 18700 0.023330769115574038
375 | 18750 0.024366426683639156
376 | 18800 0.028596705187744778
377 | 18850 0.024175830301209626
378 | 18900 0.0366955011451533
379 | 18950 0.02663020933900965
380 | 19000 0.023649472082735448
381 | 19050 0.03562262118159586
382 | 19100 0.02490753207169438
383 | 19150 0.025356430909166405
384 | 19200 0.022451289413494523
385 | 19250 0.02687178848668371
386 | 19300 0.024859009678075045
387 | 19350 0.023915557416132525
388 | 19400 0.024020450384112206
389 | 19450 0.028264285172305448
390 | 19500 0.02275060421802303
391 | 19550 0.037332750601644274
392 | 19600 0.02752296411754791
393 | 19650 0.02217685055967513
394 | 19700 0.02305474133735604
395 | 19750 0.025642327710658518
396 | 19800 0.025479382311647168
397 | 19850 0.029918104256108375
398 | 19900 0.02656603675189674
399 | 19950 0.022943523486993505
400 | 20000 0.028158559978435072
401 | 20050 0.0234355944441078
402 | 20100 0.021889311001538337
403 | 20150 0.022779657186822875
404 | 20200 0.0226871889999525
405 | 20250 0.02871123899926752
406 | 20300 0.02962637963557515
407 | 20350 0.025055449956724565
408 | 20400 0.022367471271768163
409 | 20450 0.02309067784972857
410 | 20500 0.022730645665540155
411 | 20550 0.021651158097429894
412 | 20600 0.022175116722039992
413 | 20650 0.022353856329051867
414 | 20700 0.03138263484816746
415 | 20750 0.021729693709031113
416 | 20800 0.02917264907409778
417 | 20850 0.024183573153433596
418 | 20900 0.022609218979090157
419 | 20950 0.022586854841310452
420 | 21000 0.024197083404972224
421 | 21050 0.028032322611406712
422 | 21100 0.0250137030185817
423 | 21150 0.02728607092079862
424 | 21200 0.027020732500202527
425 | 21250 0.02318292325478272
426 | 21300 0.021314144933353162
427 | 21350 0.022473445102591198
428 | 21400 0.023475953935779476
429 | 21450 0.02755654746576746
430 | 21500 0.022481396480411402
431 | 21550 0.02131813870736119
432 | 21600 0.024098775578565802
433 | 21650 0.024908895386435648
434 | 21700 0.022775266291996537
435 | 21750 0.021093984563361843
436 | 21800 0.027731464087230456
437 | 21850 0.022699079385608625
438 | 21900 0.024622566776948296
439 | 21950 0.022317895356996298
440 | 22000 0.021438362220905855
441 | 22050 0.022102914075536287
442 | 22100 0.021388544193289272
443 | 22150 0.02210049204531169
444 | 22200 0.022998460067880148
445 | 22250 0.022293037825748933
446 | 22300 0.021718593683126824
447 | 22350 0.0220651069221137
448 | 22400 0.024906348565384328
449 | 22450 0.02205897814977635
450 | 22500 0.023466854501672775
451 | 22550 0.024264192468378996
452 | 22600 0.023074843852348842
453 | 22650 0.02137074962197426
454 | 22700 0.020697367509812013
455 | 22750 0.022482442609829425
456 | 22800 0.02091505170909304
457 | 22850 0.029360836638925268
458 | 22900 0.021231615935033497
459 | 22950 0.021932313049479527
460 | 23000 0.022113177774524238
461 | 23050 0.02185309544963163
462 | 23100 0.021799844666519085
463 | 23150 0.0197991676198912
464 | 23200 0.021707620608745426
465 | 23250 0.02157886610722212
466 | 23300 0.02098802229667668
467 | 23350 0.031135724261360253
468 | 23400 0.02894973198665271
469 | 23450 0.020988725303766678
470 | 23500 0.022264786533340287
471 | 23550 0.02042689433746435
472 | 23600 0.03677780848958173
473 | 23650 0.02195999795532596
474 | 23700 0.024206760801839314
475 | 23750 0.020380053189581245
476 | 23800 0.02487403325226286
477 | 23850 0.023798873984497763
478 | 23900 0.03116337774857319
479 | 23950 0.020340391439988014
480 | 24000 0.021233663267758295
481 | 24050 0.024461650742966766
482 | 24100 0.02031554208372857
483 | 24150 0.029797336425907518
484 | 24200 0.02061545691090506
485 | 24250 0.02209392965696309
486 | 24300 0.023050139491134068
487 | 24350 0.02237333219217652
488 | 24400 0.03282943693498967
489 | 24450 0.020189191704833572
490 | 24500 0.020109306612545202
491 | 24550 0.026870259162732228
492 | 24600 0.022295134640406365
493 | 24650 0.020919438509724834
494 | 24700 0.019627909821443936
495 | 24750 0.02054263366087755
496 | 24800 0.020060977868209904
497 | 24850 0.020088534795217224
498 | 24900 0.0196721405117391
499 | 24950 0.02595271138250484
500 | 25000 0.0213284056892949
501 | 25050 0.021692624546306053
502 | 25100 0.01992497403594697
503 | 25150 0.024880751721290006
504 | 25200 0.021462717598121402
505 | 25250 0.023485301414197655
506 | 25300 0.019441017367109577
507 | 25350 0.023285239299500573
508 | 25400 0.020003397168991428
509 | 25450 0.02071741897988526
510 | 25500 0.020806359924001126
511 | 25550 0.023656453435207045
512 | 25600 0.020616306395165786
513 | 25650 0.019727599097047317
514 | 25700 0.019130727597273432
515 | 25750 0.01955206428864125
516 | 25800 0.022712924992826875
517 | 25850 0.019901548189536173
518 | 25900 0.022607408121726864
519 | 25950 0.021689714175838343
520 | 26000 0.02194338163833673
521 | 26050 0.025182624111954058
522 | 26100 0.0209673384438864
523 | 26150 0.021900729043434465
524 | 26200 0.022152144796986795
525 | 26250 0.02025270810746031
526 | 26300 0.02535527500954379
527 | 26350 0.029230931171158565
528 | 26400 0.022994479401707374
529 | 26450 0.020368861611832254
530 | 26500 0.03147172497174024
531 | 26550 0.0323736003455271
532 | 26600 0.021924493596460953
533 | 26650 0.02355912251131087
534 | 26700 0.022617817836858983
535 | 26750 0.020261659704203547
536 | 26800 0.021849500985062283
537 | 26850 0.01962863064374542
538 | 26900 0.019744692976115343
539 | 26950 0.01916206742103282
540 | 27000 0.020487562605942648
541 | 27050 0.019060624074874487
542 | 27100 0.019875505518329537
543 | 27150 0.0211377215523359
544 | 27200 0.019444864256440066
545 | 27250 0.01924792965238475
546 | 27300 0.023274801873020577
547 | 27350 0.019558607584872024
548 | 27400 0.020494124885183297
549 | 27450 0.020279293442300148
550 | 27500 0.02164067723714525
551 | 27550 0.01927425896040327
552 | 27600 0.020181689825366837
553 | 27650 0.020500886550947044
554 | 27700 0.019443582984785757
555 | 27750 0.03464794591134594
556 | 27800 0.02118111213972303
557 | 27850 0.020099710221914827
558 | 27900 0.02219124710464873
559 | 27950 0.021258278207925998
560 | 28000 0.02021019487759209
561 | 28050 0.02156391664281626
562 | 28100 0.019972869756556953
563 | 28150 0.0195445404013381
564 | 28200 0.020688810057045225
565 | 28250 0.018372703232209284
566 | 28300 0.019310178458696307
567 | 28350 0.02275221984254294
568 | 28400 0.022605716603613615
569 | 28450 0.02931464735521117
570 | 28500 0.020928812116828698
571 | 28550 0.018940667536791893
572 | 28600 0.018380345833398294
573 | 28650 0.02030717473462427
574 | 28700 0.01912735689614299
575 | 28750 0.018693304116815575
576 | 28800 0.020835926157354558
577 | 28850 0.03019506184514446
578 | 28900 0.029035417920760514
579 | 28950 0.018602183391039268
580 | 29000 0.018599612351644035
581 | 29050 0.025393639583432392
582 | 29100 0.018962266497628322
583 | 29150 0.01860153970891123
584 | 29200 0.018728141460470037
585 | 29250 0.01883932655754245
586 | 29300 0.017913720647640095
587 | 29350 0.019144157711326742
588 | 29400 0.018302581802521072
589 | 29450 0.01874771741958902
590 | 29500 0.021017879352362472
591 | 29550 0.0230044362449867
592 | 29600 0.02208950246557154
593 | 29650 0.02216721210385789
594 | 29700 0.020782163492151225
595 | 29750 0.01888164834581407
596 | 29800 0.01912509360675582
597 | 29850 0.022097613408358745
598 | 29900 0.01915929496023221
599 | 29950 0.018630028769272458
600 | 30000 0.018994433850077347
601 | 30050 0.019323174540707134
602 | 30100 0.01950812935217754
603 | 30150 0.0218971207786504
604 | 30200 0.019070137151044157
605 | 30250 0.018377460850191096
606 | 30300 0.018079972403907525
607 | 30350 0.018596990712874163
608 | 30400 0.01940906199002544
609 | 30450 0.018624011462304798
610 | 30500 0.01800382338490284
611 | 30550 0.01811157311946667
612 | 30600 0.023275353193706032
613 | 30650 0.023353916156073243
614 | 30700 0.023059941522224132
615 | 30750 0.021894189474004577
616 | 30800 0.02033355549032548
617 | 30850 0.017403901860172776
618 | 30900 0.017923208677453503
619 | 30950 0.01841690284597846
620 | 31000 0.01965157679729697
621 | 31050 0.01812611393325761
622 | 31100 0.01885059018232025
623 | 31150 0.02363727889173034
624 | 31200 0.02046214812694272
625 | 31250 0.02178665814574022
626 | 31300 0.018306043832795313
627 | 31350 0.01999309265940049
628 | 31400 0.018573502271363184
629 | 31450 0.01970951353479535
630 | 31500 0.017451399267513584
631 | 31550 0.017983355362579546
632 | 31600 0.01862284981927557
633 | 31650 0.01861874126979627
634 | 31700 0.017800258615155957
635 | 31750 0.01884768441122506
636 | 31800 0.017598110671862753
637 | 31850 0.018382235688834292
638 | 31900 0.025240705810987297
639 | 31950 0.021161665132932272
640 | 32000 0.01783052302150053
641 | 32050 0.017470454947624645
642 | 32100 0.019319989321407938
643 | 32150 0.018293123130140615
644 | 32200 0.019188757688608678
645 | 32250 0.022800789523291783
646 | 32300 0.023920960506760965
647 | 32350 0.025344452361605166
648 | 32400 0.021992196317035848
649 | 32450 0.02084744735421377
650 | 32500 0.01807873755268066
651 | 32550 0.0179990509820522
652 | 32600 0.020041265496498813
653 | 32650 0.02077804132046774
654 | 32700 0.019109840047275454
655 | 32750 0.021285425770104366
656 | 32800 0.018849341048677182
657 | 32850 0.01837916514732946
658 | 32900 0.01766942063516288
659 | 32950 0.017128267267632495
660 | 33000 0.020527628498988783
661 | 33050 0.0191700616066987
662 | 33100 0.02042995962794487
663 | 33150 0.01780472301212746
664 | 33200 0.01825125564169508
665 | 33250 0.018590945002896212
666 | 33300 0.01890884712797871
667 | 33350 0.020840034378792328
668 | 33400 0.018904610535945828
669 | 33450 0.017787202794772028
670 | 33500 0.017853367935640362
671 | 33550 0.018379738084830954
672 | 33600 0.01851444536755373
673 | 33650 0.01895299747284832
674 | 33700 0.017042852454282862
675 | 33750 0.01877955766909444
676 | 33800 0.017402600532115613
677 | 33850 0.020008323783258678
678 | 33900 0.018200701230075934
679 | 33950 0.024017352492087685
680 | 34000 0.019543106271464872
681 | 34050 0.018889970378260252
682 | 34100 0.01793025059533843
683 | 34150 0.0172872719941707
684 | 34200 0.018471166444769375
685 | 34250 0.01734236437042468
686 | 34300 0.020271027495924278
687 | 34350 0.02209518472417121
688 | 34400 0.020873535758126973
689 | 34450 0.017773693506317568
690 | 34500 0.018841890139391314
691 | 34550 0.017854364450761998
692 | 34600 0.017310933529430594
693 | 34650 0.01818457801551776
694 | 34700 0.01819359971810102
695 | 34750 0.017447029495884732
696 | 34800 0.01804453797238272
697 | 34850 0.018161228856526757
698 | 34900 0.019308593128389548
699 | 34950 0.017758252375233728
700 | 35000 0.017927918800816962
701 | 35050 0.017464335186795313
702 | 35100 0.018015619007822208
703 | 35150 0.01818573521122327
704 | 35200 0.018984450194963146
705 | 35250 0.017908781572337894
706 | 35300 0.016611302351843087
707 | 35350 0.01755066113743257
708 | 35400 0.017895545115613098
709 | 35450 0.018128237554084395
710 | 35500 0.02079950527216074
711 | 35550 0.01860817254159878
712 | 35600 0.017412301295536895
713 | 35650 0.016341454628189606
714 | 35700 0.018419320823187334
715 | 35750 0.0186589831715982
716 | 35800 0.018701225390452332
717 | 35850 0.0198308682847066
718 | 35900 0.019965999106455936
719 | 35950 0.01968198028478423
720 | 36000 0.017535743517712452
721 | 36050 0.01763277868396663
722 | 36100 0.018087490918631693
723 | 36150 0.019958068117618764
724 | 36200 0.01840152618793088
725 | 36250 0.017870808440499947
726 | 36300 0.01964671036941074
727 | 36350 0.023225307144969037
728 | 36400 0.018601171641737555
729 | 36450 0.017932129723963304
730 | 36500 0.019538718055448278
731 | 36550 0.019374538448223396
732 | 36600 0.022060665113832884
733 | 36650 0.017954684620083573
734 | 36700 0.018094252987281
735 | 36750 0.017705986567174533
736 | 36800 0.01716061925859736
737 | 36850 0.019506316997531605
738 | 36900 0.018917889158103666
739 | 36950 0.01750049334566419
740 | 37000 0.0186695093908235
741 | 37050 0.019657972632664097
742 | 37100 0.02040194573805178
743 | 37150 0.02036224523186541
744 | 37200 0.021568885463424548
745 | 37250 0.0193318160324204
746 | 37300 0.01746091673309956
747 | 37350 0.018222379960946673
748 | 37400 0.017207212280343817
749 | 37450 0.017720683757380187
750 | 37500 0.017995609410111756
751 | 37550 0.017614043883695113
752 | 37600 0.017711473353707024
753 | 37650 0.01785725221187802
754 | 37700 0.019547155075434502
755 | 37750 0.02112991910602152
756 | 37800 0.017920859898591306
757 | 37850 0.018232994844234598
758 | 37900 0.01783533668249196
759 | 37950 0.017623346766786452
760 | 38000 0.016746254240504736
761 | 38050 0.02249129464142529
762 | 38100 0.019884874433399112
763 | 38150 0.019433630666789794
764 | 38200 0.02007595949216576
765 | 38250 0.022474734274736573
766 | 38300 0.01888094066649813
767 | 38350 0.01705789109718758
768 | 38400 0.017454391491734292
769 | 38450 0.017371247615292233
770 | 38500 0.01771659522950979
771 | 38550 0.017849565647485983
772 | 38600 0.017696655861456207
773 | 38650 0.019974919072174642
774 | 38700 0.022749222163656425
775 | 38750 0.021137268762599373
776 | 38800 0.017344160489439975
777 | 38850 0.017012753427006748
778 | 38900 0.02027802326153488
779 | 38950 0.01974187510590808
780 | 39000 0.017374870257934605
781 | 39050 0.02828407274961491
782 | 39100 0.019520882006598808
783 | 39150 0.018998138909381
784 | 39200 0.021928946345969882
785 | 39250 0.01752386821588561
786 | 39300 0.019281306365213958
787 | 39350 0.01776723290668705
788 | 39400 0.01986669661139392
789 | 39450 0.019110988746507383
790 | 39500 0.01898421408375967
791 | 39550 0.01965650465220175
792 | 39600 0.02258141774904767
793 | 39650 0.02246667795094198
794 | 39700 0.02162656143525962
795 | 39750 0.02247900237956233
796 | 39800 0.019928998208716173
797 | 39850 0.021824755866881533
798 | 39900 0.01955418696936941
799 | 39950 0.019703676671888264
800 | 40000 0.017285566103580563
801 | 40050 0.016988406554572733
802 | 40100 0.017102881083521468
803 | 40150 0.017498136704891205
804 | 40200 0.01719714390523128
805 | 40250 0.017531426381775433
806 | 40300 0.017199923722062083
807 | 40350 0.017362924977663657
808 | 40400 0.01711619545646105
809 | 40450 0.01752803527319076
810 | 40500 0.01740374654096583
811 | 40550 0.017455591319092252
812 | 40600 0.01752139207992952
813 | 40650 0.017599766055011234
814 | 40700 0.01738164661808847
815 | 40750 0.017670885073565467
816 | 40800 0.017873564916697227
817 | 40850 0.01797921214545982
818 | 40900 0.017085070863198454
819 | 40950 0.017906446307632103
820 | 41000 0.01890971581960686
821 | 41050 0.018656677404269253
822 | 41100 0.01848303458695553
823 | 41150 0.019513210073601288
824 | 41200 0.017612016807092824
825 | 41250 0.01745333447383937
826 | 41300 0.01753906422772185
827 | 41350 0.017271212005812286
828 | 41400 0.018031509576741624
829 | 41450 0.017169456441177944
830 | 41500 0.017342342570449622
831 | 41550 0.016596756936059127
832 | 41600 0.017167447597036017
833 | 41650 0.017143682026793543
834 | 41700 0.017456106508994162
835 | 41750 0.018288151254838087
836 | 41800 0.01713440717340292
837 | 41850 0.017856035403900364
838 | 41900 0.017071140027725567
839 | 41950 0.01705811496918748
840 | 42000 0.017448760354164424
841 | 42050 0.01796677301907299
842 | 42100 0.01721071407138368
843 | 42150 0.01721153866649268
844 | 42200 0.018373721527380874
845 | 42250 0.017811680634834178
846 | 42300 0.01721163960578317
847 | 42350 0.01700722318045802
848 | 42400 0.017696012249692993
849 | 42450 0.01661797902410733
850 | 42500 0.017515241295974152
851 | 42550 0.01712789605981323
852 | 42600 0.01668959795715136
853 | 42650 0.01660877768993593
854 | 42700 0.01648016508857599
855 | 42750 0.017104751235653935
856 | 42800 0.016478348424047686
857 | 42850 0.018101755326980887
858 | 42900 0.01771198383881339
859 | 42950 0.019740853719465056
860 | 43000 0.018527912114212994
861 | 43050 0.02117400425474593
862 | 43100 0.021333746632372804
863 | 43150 0.02232764300560826
864 | 43200 0.026764080791705377
865 | 43250 0.02316678351490941
866 | 43300 0.020322705525803165
867 | 43350 0.021553125941709484
868 | 43400 0.021513813618996232
869 | 43450 0.018264175323900147
870 | 43500 0.017068259390304326
871 | 43550 0.016804286048701923
872 | 43600 0.01659007075525472
873 | 43650 0.0167643147495064
874 | 43700 0.01769169603278717
875 | 43750 0.01734593036815441
876 | 43800 0.016954705944903312
877 | 43850 0.01659968083685363
878 | 43900 0.016634012371236795
879 | 43950 0.01617432360729111
880 | 44000 0.018238966590741652
881 | 44050 0.0173124337102314
882 | 44100 0.017072630525669864
883 | 44150 0.0173523651941617
884 | 44200 0.01802925242144151
885 | 44250 0.018449181787460283
886 | 44300 0.018056542503802866
887 | 44350 0.017426078366233694
888 | 44400 0.0177444657887722
889 | 44450 0.01895781660976831
890 | 44500 0.020403864543332356
891 | 44550 0.018983202324867485
892 | 44600 0.017691964946389582
893 | 44650 0.017545604998248063
894 | 44700 0.018033289724670193
895 | 44750 0.019204530842908827
896 | 44800 0.01851808515090401
897 | 44850 0.01879085391823548
898 | 44900 0.017546418860012503
899 | 44950 0.017556491028064534
900 | 45000 0.01772235691911205
901 | 45050 0.017484827126562685
902 | 45100 0.01801792884561283
903 | 45150 0.01814422447052051
904 | 45200 0.01758317706449106
905 | 45250 0.016230055058622187
906 | 45300 0.016673522185317383
907 | 45350 0.016443992419510733
908 | 45400 0.016717854934648675
909 | 45450 0.01730119947337285
910 | 45500 0.01694992169331597
911 | 45550 0.01645785413161559
912 | 45600 0.017069900108433813
913 | 45650 0.01699859689186828
914 | 45700 0.016999110906684583
915 | 45750 0.017114834605050734
916 | 45800 0.018218861792718755
917 | 45850 0.01744317050992602
918 | 45900 0.01762354811196598
919 | 45950 0.01849354053911916
920 | 46000 0.017418302567287183
921 | 46050 0.01749932989681911
922 | 46100 0.01692576422343035
923 | 46150 0.016384519013926756
924 | 46200 0.01573158403970596
925 | 46250 0.016416522941324663
926 | 46300 0.016335798354839323
927 | 46350 0.016494850350722
928 | 46400 0.016645883280168083
929 | 46450 0.016484591575655277
930 | 46500 0.016334484506003936
931 | 46550 0.01689266817276204
932 | 46600 0.016291739160626435
933 | 46650 0.016523876247078868
934 | 46700 0.017531144167146923
935 | 46750 0.016331781720026313
936 | 46800 0.01822152572113532
937 | 46850 0.017533418048480312
938 | 46900 0.018085944986378145
939 | 46950 0.018621171313913602
940 | 47000 0.018130972968090547
941 | 47050 0.01813401809660816
942 | 47100 0.016272301147406316
943 | 47150 0.016827103469971228
944 | 47200 0.0169178263776078
945 | 47250 0.016667910855056027
946 | 47300 0.01759837084087304
947 | 47350 0.01723488680446932
948 | 47400 0.01701817504715473
949 | 47450 0.017559365173986864
950 | 47500 0.01679257651245017
951 | 47550 0.016378833266727507
952 | 47600 0.016843263642264062
953 | 47650 0.016601181407908453
954 | 47700 0.01626556396712742
955 | 47750 0.016905688692815692
956 | 47800 0.016611206453865525
957 | 47850 0.015885337965672975
958 | 47900 0.017206368370642582
959 | 47950 0.01629064078879325
960 | 48000 0.016598582528919083
961 | 48050 0.01683979363203979
962 | 48100 0.016408528286991485
963 | 48150 0.017039662833974352
964 | 48200 0.01614196804030559
965 | 48250 0.016743914269447525
966 | 48300 0.01694185557225115
967 | 48350 0.018533214214322677
968 | 48400 0.018605530591213084
969 | 48450 0.016492784773495875
970 | 48500 0.01753843094382709
971 | 48550 0.01740069366674855
972 | 48600 0.0173026896358844
973 | 48650 0.01736851018107424
974 | 48700 0.01660793172269933
975 | 48750 0.015823389884879763
976 | 48800 0.01764114148963115
977 | 48850 0.018223359696669716
978 | 48900 0.016464043973499827
979 | 48950 0.017578349420645973
980 | 49000 0.017048464158549593
981 | 49050 0.01737916587908629
982 | 49100 0.017894640052940433
983 | 49150 0.017432972081888983
984 | 49200 0.01768449056661291
985 | 49250 0.019839630577420605
986 | 49300 0.018843304703730104
987 | 49350 0.017592116675744913
988 | 49400 0.01712245066550385
989 | 49450 0.016568897710295606
990 | 49500 0.016890776538219115
991 | 49550 0.016712344541969272
992 | 49600 0.01613150716659604
993 | 49650 0.016306609359537656
994 | 49700 0.016858187849047083
995 | 49750 0.016671776397589804
996 | 49800 0.01629788205406599
997 | 49850 0.016611347298653004
998 | 49900 0.016830674601828032
999 | 49950 0.016879577640169948
1000 | 50000 0.016890788685384003
1001 |
--------------------------------------------------------------------------------