├── .dvc
├── .gitignore
└── config
├── .dvcignore
├── .gitignore
├── .python-version
├── CHANGELOG.md
├── README.md
├── assets
├── dvc.lock
├── dvc.yaml
├── foam
├── newInterFoam
│ ├── Make
│ │ ├── files
│ │ └── options
│ ├── UEqn.H
│ ├── alphaSuSp.H
│ ├── correctPhi.H
│ ├── createFields.H
│ ├── initCorrectPhi.H
│ ├── interMixingFoam
│ │ ├── Make
│ │ │ ├── files
│ │ │ └── options
│ │ ├── alphaControls.H
│ │ ├── alphaEqn.H
│ │ ├── alphaEqnSubCycle.H
│ │ ├── createFields.H
│ │ ├── immiscibleIncompressibleThreePhaseMixture
│ │ │ ├── immiscibleIncompressibleThreePhaseMixture.C
│ │ │ └── immiscibleIncompressibleThreePhaseMixture.H
│ │ ├── incompressibleThreePhaseMixture
│ │ │ ├── incompressibleThreePhaseMixture.C
│ │ │ └── incompressibleThreePhaseMixture.H
│ │ ├── interMixingFoam.C
│ │ └── threePhaseInterfaceProperties
│ │ │ ├── threePhaseInterfaceProperties.C
│ │ │ └── threePhaseInterfaceProperties.H
│ ├── newInterFoam.C
│ ├── pEqn.H
│ └── rhofs.H
└── sim
│ ├── 0
│ ├── U
│ ├── alpha.water
│ ├── alpha.water.org
│ ├── epsilon
│ ├── k
│ ├── nut
│ └── p_rgh
│ ├── .gitignore
│ ├── Allrun
│ ├── constant
│ ├── g
│ ├── transportProperties
│ └── turbulenceProperties
│ └── system
│ ├── blockMeshDict
│ ├── controlDict
│ ├── decomposeParDict
│ ├── fvSchemes
│ ├── fvSolution
│ ├── setFieldsDict
│ ├── snappyHexMeshDict
│ └── snappyHexMeshDict.org
├── params.yaml
├── pyproject.toml
├── scripts
└── compare_meshes.py
├── tests
├── test_model.py
└── test_utils.py
├── uibk
└── deep_preconditioning
│ ├── __init__.py
│ ├── cg.py
│ ├── data_set.py
│ ├── generate_data.py
│ ├── metrics.py
│ ├── model.py
│ ├── test.py
│ ├── train.py
│ └── utils.py
└── uv.lock
/.dvc/.gitignore:
--------------------------------------------------------------------------------
1 | /config.local
2 | /tmp
3 | /cache
4 |
--------------------------------------------------------------------------------
/.dvc/config:
--------------------------------------------------------------------------------
1 | [core]
2 | analytics = false
3 | autostage = true
4 |
--------------------------------------------------------------------------------
/.dvcignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Custom
2 | .bash_history
3 | .viminfo
4 | foam/newInterFoam/Make/linux*
5 | platforms/
6 | assets # symlink
7 |
8 | # Created by https://www.toptal.com/developers/gitignore/api/python
9 | # Edit at https://www.toptal.com/developers/gitignore?templates=python
10 |
11 | ### Python ###
12 | # Byte-compiled / optimized / DLL files
13 | __pycache__/
14 | *.py[cod]
15 | *$py.class
16 |
17 | # C extensions
18 | *.so
19 |
20 | # Distribution / packaging
21 | .Python
22 | build/
23 | develop-eggs/
24 | dist/
25 | downloads/
26 | eggs/
27 | .eggs/
28 | lib/
29 | lib64/
30 | parts/
31 | sdist/
32 | var/
33 | wheels/
34 | share/python-wheels/
35 | *.egg-info/
36 | .installed.cfg
37 | *.egg
38 | MANIFEST
39 |
40 | # PyInstaller
41 | # Usually these files are written by a python script from a template
42 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
43 | *.manifest
44 | *.spec
45 |
46 | # Installer logs
47 | pip-log.txt
48 | pip-delete-this-directory.txt
49 |
50 | # Unit test / coverage reports
51 | htmlcov/
52 | .tox/
53 | .nox/
54 | .coverage
55 | .coverage.*
56 | .cache
57 | nosetests.xml
58 | coverage.xml
59 | *.cover
60 | *.py,cover
61 | .hypothesis/
62 | .pytest_cache/
63 | cover/
64 |
65 | # Translations
66 | *.mo
67 | *.pot
68 |
69 | # Django stuff:
70 | *.log
71 | local_settings.py
72 | db.sqlite3
73 | db.sqlite3-journal
74 |
75 | # Flask stuff:
76 | instance/
77 | .webassets-cache
78 |
79 | # Scrapy stuff:
80 | .scrapy
81 |
82 | # Sphinx documentation
83 | docs/_build/
84 |
85 | # PyBuilder
86 | .pybuilder/
87 | target/
88 |
89 | # Jupyter Notebook
90 | .ipynb_checkpoints
91 |
92 | # IPython
93 | profile_default/
94 | ipython_config.py
95 |
96 | # pyenv
97 | # For a library or package, you might want to ignore these files since the code is
98 | # intended to run in multiple environments; otherwise, check them in:
99 | # .python-version
100 |
101 | # pipenv
102 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
103 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
104 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
105 | # install all needed dependencies.
106 | #Pipfile.lock
107 |
108 | # poetry
109 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
110 | # This is especially recommended for binary packages to ensure reproducibility, and is more
111 | # commonly ignored for libraries.
112 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
113 | #poetry.lock
114 |
115 | # pdm
116 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
117 | #pdm.lock
118 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
119 | # in version control.
120 | # https://pdm.fming.dev/#use-with-ide
121 | .pdm.toml
122 |
123 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
124 | __pypackages__/
125 |
126 | # Celery stuff
127 | celerybeat-schedule
128 | celerybeat.pid
129 |
130 | # SageMath parsed files
131 | *.sage.py
132 |
133 | # Environments
134 | .env
135 | .venv
136 | env/
137 | venv/
138 | ENV/
139 | env.bak/
140 | venv.bak/
141 |
142 | # Spyder project settings
143 | .spyderproject
144 | .spyproject
145 |
146 | # Rope project settings
147 | .ropeproject
148 |
149 | # mkdocs documentation
150 | /site
151 |
152 | # mypy
153 | .mypy_cache/
154 | .dmypy.json
155 | dmypy.json
156 |
157 | # Pyre type checker
158 | .pyre/
159 |
160 | # pytype static type analyzer
161 | .pytype/
162 |
163 | # Cython debug symbols
164 | cython_debug/
165 |
166 | # PyCharm
167 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
168 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
169 | # and can be added to the global gitignore or merged into this file. For a more nuclear
170 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
171 | #.idea/
172 |
173 | ### Python Patch ###
174 | # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
175 | poetry.toml
176 |
177 | # ruff
178 | .ruff_cache/
179 |
180 | # LSP config files
181 | pyrightconfig.json
182 |
183 | # End of https://www.toptal.com/developers/gitignore/api/python
184 |
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
1 | 3.12.8
2 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # CHANGELOG
2 |
3 | ## [0.1.0] - 2024-05-17
4 |
5 | :seedling: Initial release.
6 |
7 | [0.1.0]: https://github.com/jsappl/DeepPreconditioning/releases/tag/v0.1.0
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Deep Preconditioning
4 |
5 | Design preconditioners with a CNN to accelerate the conjugate gradient method.
6 |
7 | []()
8 | [](https://github.com/astral-sh/uv)
9 | [](https://github.com/astral-sh/ruff)
10 |
11 |
12 |
13 | ## Setup (Linux)
14 |
15 | This has been tested with
16 |
17 | - Debian 10.2.1-6 (GNU/Linux 6.1.0-28-amd64 x86_64)
18 | - Intel(R) Core(TM) i9-10900KF CPU @ 3.70GHz
19 | - NVIDIA GeForce RTX 3070
20 | - NVIDIA Driver Version 550.54.14
21 | - CUDA Version 12.4
22 | - Python 3.12.8
23 |
24 | Clone this repository and `cd` into the project root.
25 |
26 | ```bash
27 | git clone git@github.com:jsappl/DeepPreconditioning.git
28 | cd DeepPreconditioning/
29 | ```
30 |
31 | We use [PDM](https://pdm-project.org/en/latest/) to build the package and manage dependencies so make sure it is installed.
32 | After selecting a Python interpreter, PDM will ask you whether you want to create a virtual environment for the project.
33 | Having one is optional but highly recommended.
34 | PDM will try to auto-detect possible virtual environments.
35 | Run `pdm install` to install dependencies from the `pdm.lock` file and restore the project environment.
36 |
37 | ## Generating Data Set
38 |
39 | The data set of linear systems resulting from the discretization of the pressure correction equation is generated using OpenFOAM 7.
40 | The most convenient way to use OpenFOAM 7 is to download and run the official Docker container.
41 | For further instructions please visit .
42 | First, compile our custom `newInterFoam` solver inside the container.
43 |
44 | ```bash
45 | openfoam7-linux # run in root folder
46 | cd foam/newInterFoam/ # inside Docker container
47 | wmake
48 | ```
49 |
50 | After compiling the solver keep the Docker container up and running.
51 | In another shell run the `generate` stage of the `dvc` pipeline.
52 | The data set is automatically generated making use of the OpenFOAM 7 Docker container.
53 |
54 | ## Developing
55 |
56 | Use PDM to add another dependency and update the projects `pdm.lock` file afterward.
57 |
58 | ```bash
59 | pdm add
60 | pdm update
61 | ```
62 |
63 | Run `pdm sync --clean` to remove packages that are no longer in the `pdm.lock` file.
64 |
65 | Version numbers are (roughly) assigned and incremented according to [Semantic Versioning 2.0.0](https://semver.org/spec/v2.0.0.html).
66 | Write commit messages according to the [Conventional Commits 1.0.0](https://www.conventionalcommits.org/en/v1.0.0/) convention.
67 | Keep a changelog and stick to this style guide .
68 | Use these tools for code formatting and linting.
69 |
70 | - `ruff` (includes `isort` and `flake8`)
71 | - `yapf`
72 |
73 | They are automatically configured by the `pyproject.toml` file.
74 |
--------------------------------------------------------------------------------
/assets:
--------------------------------------------------------------------------------
1 | /data/deep_preconditioning/assets/
--------------------------------------------------------------------------------
/dvc.lock:
--------------------------------------------------------------------------------
1 | schema: '2.0'
2 | stages:
3 | generate:
4 | cmd: python ./uibk/deep_preconditioning/generate_data.py
5 | deps:
6 | - path: ./uibk/deep_preconditioning/generate_data.py
7 | hash: md5
8 | md5: 4450e4d7c0e1bacdb774efadfb4be2ec
9 | size: 3809
10 | params:
11 | params.yaml:
12 | mesh_cells: 3
13 | number_samples: 500
14 | resolution: 128
15 | outs:
16 | - path: ./assets/data/raw/sludge_patterns/
17 | hash: md5
18 | md5: f00fb1455098e66d9d65dd935fa00b58.dir
19 | size: 437897486
20 | nfiles: 1500
21 | train:
22 | cmd: python ./uibk/deep_preconditioning/train.py
23 | deps:
24 | - path: ./assets/data/raw/sludge_patterns/
25 | hash: md5
26 | md5: bc642c9e50920d092fe5179ddbca61fb.dir
27 | size: 193604030
28 | nfiles: 1500
29 | - path: ./uibk/deep_preconditioning/data_set.py
30 | hash: md5
31 | md5: 3f0e983129bd17eae88b8e3e9733e35a
32 | size: 13956
33 | - path: ./uibk/deep_preconditioning/train.py
34 | hash: md5
35 | md5: 81ed2150b8a82e3fcc6a1e5396f4c6e1
36 | size: 5925
37 | params:
38 | params.yaml:
39 | batch_size: 4
40 | channels:
41 | - 1
42 | - 16
43 | - 32
44 | - 64
45 | - 32
46 | - 16
47 | - 1
48 | learning_rate: 0.001
49 | patience: 16
50 | outs:
51 | - path: ./assets/checkpoints/
52 | hash: md5
53 | md5: 8fb15d404211bdf4cf8582285c846f81.dir
54 | size: 87844
55 | nfiles: 1
56 | - path: ./assets/dvclive/
57 | hash: md5
58 | md5: 40ac247cafef9fec5ee7abc187bb2ecf.dir
59 | size: 65800
60 | nfiles: 7
61 | test:
62 | cmd: python ./uibk/deep_preconditioning/test.py
63 | deps:
64 | - path: ./assets/checkpoints/
65 | hash: md5
66 | md5: 8fb15d404211bdf4cf8582285c846f81.dir
67 | size: 87844
68 | nfiles: 1
69 | - path: ./assets/data/raw/sludge_patterns/
70 | hash: md5
71 | md5: bc642c9e50920d092fe5179ddbca61fb.dir
72 | size: 193604030
73 | nfiles: 1500
74 | - path: ./uibk/deep_preconditioning/test.py
75 | hash: md5
76 | md5: 133cebd6bb980189b3f9e6a14048f469
77 | size: 9176
78 | outs:
79 | - path: ./assets/results/
80 | hash: md5
81 | md5: 21c877b1b416c16797716cb78904608f.dir
82 | size: 200197
83 | nfiles: 3
84 |
--------------------------------------------------------------------------------
/dvc.yaml:
--------------------------------------------------------------------------------
1 | stages:
2 | generate:
3 | cmd: python ./uibk/deep_preconditioning/generate_data.py
4 | deps:
5 | - ./uibk/deep_preconditioning/generate_data.py
6 | outs:
7 | - ./assets/data/raw/sludge_patterns/
8 | params:
9 | - number_samples
10 | - resolution
11 | - mesh_cells
12 | desc: Generate linear systems from synthetic sludge patterns.
13 |
14 | train:
15 | cmd: python ./uibk/deep_preconditioning/train.py
16 | deps:
17 | - ./uibk/deep_preconditioning/data_set.py
18 | - ./uibk/deep_preconditioning/train.py
19 | - ./assets/data/raw/sludge_patterns/
20 | outs:
21 | - ./assets/checkpoints/
22 | - ./assets/dvclive/
23 | params:
24 | - channels
25 | - batch_size
26 | - learning_rate
27 | - patience
28 | desc: Train a deep preconditioner on synthetic sludge patterns.
29 | test:
30 | cmd: python ./uibk/deep_preconditioning/test.py
31 | deps:
32 | - ./uibk/deep_preconditioning/test.py
33 | - ./assets/checkpoints/
34 | - ./assets/data/raw/sludge_patterns/
35 | outs:
36 | - ./assets/results/
37 | desc: Test the trained deep preconditioner performance.
38 |
39 | metrics:
40 | - ./assets/dvclive/metrics.json
41 |
42 | params:
43 | - params.yaml
44 |
--------------------------------------------------------------------------------
/foam/newInterFoam/Make/files:
--------------------------------------------------------------------------------
1 | newInterFoam.C
2 |
3 | EXE = $(FOAM_USER_APPBIN)/newInterFoam
4 |
--------------------------------------------------------------------------------
/foam/newInterFoam/Make/options:
--------------------------------------------------------------------------------
1 | EXE_INC = \
2 | -I../VoF \
3 | -I$(LIB_SRC)/transportModels/twoPhaseMixture/lnInclude \
4 | -I$(LIB_SRC)/transportModels \
5 | -I$(LIB_SRC)/transportModels/incompressible/lnInclude \
6 | -I$(LIB_SRC)/transportModels/interfaceProperties/lnInclude \
7 | -I$(LIB_SRC)/TurbulenceModels/turbulenceModels/lnInclude \
8 | -I$(LIB_SRC)/TurbulenceModels/incompressible/lnInclude \
9 | -I$(LIB_SRC)/transportModels/immiscibleIncompressibleTwoPhaseMixture/lnInclude \
10 | -I$(LIB_SRC)/finiteVolume/lnInclude \
11 | -I$(LIB_SRC)/dynamicFvMesh/lnInclude \
12 | -I$(LIB_SRC)/meshTools/lnInclude \
13 | -I$(LIB_SRC)/sampling/lnInclude \
14 | -I/opt/openfoam7/applications/solvers/multiphase/VoF
15 |
16 | EXE_LIBS = \
17 | -limmiscibleIncompressibleTwoPhaseMixture \
18 | -lturbulenceModels \
19 | -lincompressibleTurbulenceModels \
20 | -lfiniteVolume \
21 | -ldynamicFvMesh \
22 | -ltopoChangerFvMesh \
23 | -lfvOptions \
24 | -lmeshTools
25 |
--------------------------------------------------------------------------------
/foam/newInterFoam/UEqn.H:
--------------------------------------------------------------------------------
1 | MRF.correctBoundaryVelocity(U);
2 |
3 | fvVectorMatrix UEqn
4 | (
5 | fvm::ddt(rho, U) + fvm::div(rhoPhi, U)
6 | + MRF.DDt(rho, U)
7 | + turbulence->divDevRhoReff(rho, U)
8 | ==
9 | fvOptions(rho, U)
10 | );
11 |
12 | UEqn.relax();
13 |
14 | fvOptions.constrain(UEqn);
15 |
16 | if (pimple.momentumPredictor())
17 | {
18 | solve
19 | (
20 | UEqn
21 | ==
22 | fvc::reconstruct
23 | (
24 | (
25 | mixture.surfaceTensionForce()
26 | - ghf*fvc::snGrad(rho)
27 | - fvc::snGrad(p_rgh)
28 | ) * mesh.magSf()
29 | )
30 | );
31 |
32 | fvOptions.correct(U);
33 | }
34 |
--------------------------------------------------------------------------------
/foam/newInterFoam/alphaSuSp.H:
--------------------------------------------------------------------------------
1 | zeroField Su;
2 | zeroField Sp;
3 | zeroField divU;
4 |
--------------------------------------------------------------------------------
/foam/newInterFoam/correctPhi.H:
--------------------------------------------------------------------------------
1 | CorrectPhi
2 | (
3 | U,
4 | phi,
5 | p_rgh,
6 | surfaceScalarField("rAUf", fvc::interpolate(rAU())),
7 | geometricZeroField(),
8 | pimple,
9 | true
10 | );
11 |
12 | #include "continuityErrs.H"
13 |
--------------------------------------------------------------------------------
/foam/newInterFoam/createFields.H:
--------------------------------------------------------------------------------
1 | #include "createRDeltaT.H"
2 |
3 | Info<< "Reading field p_rgh\n" << endl;
4 | volScalarField p_rgh
5 | (
6 | IOobject
7 | (
8 | "p_rgh",
9 | runTime.timeName(),
10 | mesh,
11 | IOobject::MUST_READ,
12 | IOobject::AUTO_WRITE
13 | ),
14 | mesh
15 | );
16 |
17 | Info<< "Reading field U\n" << endl;
18 | volVectorField U
19 | (
20 | IOobject
21 | (
22 | "U",
23 | runTime.timeName(),
24 | mesh,
25 | IOobject::MUST_READ,
26 | IOobject::AUTO_WRITE
27 | ),
28 | mesh
29 | );
30 |
31 | #include "createPhi.H"
32 |
33 |
34 | Info<< "Reading transportProperties\n" << endl;
35 | immiscibleIncompressibleTwoPhaseMixture mixture(U, phi);
36 |
37 | volScalarField& alpha1(mixture.alpha1());
38 | volScalarField& alpha2(mixture.alpha2());
39 |
40 | const dimensionedScalar& rho1 = mixture.rho1();
41 | const dimensionedScalar& rho2 = mixture.rho2();
42 |
43 |
44 | // Need to store rho for ddt(rho, U)
45 | volScalarField rho
46 | (
47 | IOobject
48 | (
49 | "rho",
50 | runTime.timeName(),
51 | mesh,
52 | IOobject::READ_IF_PRESENT
53 | ),
54 | alpha1*rho1 + alpha2*rho2
55 | );
56 | rho.oldTime();
57 |
58 |
59 | // Mass flux
60 | surfaceScalarField rhoPhi
61 | (
62 | IOobject
63 | (
64 | "rhoPhi",
65 | runTime.timeName(),
66 | mesh,
67 | IOobject::NO_READ,
68 | IOobject::NO_WRITE
69 | ),
70 | fvc::interpolate(rho)*phi
71 | );
72 |
73 |
74 | // Construct incompressible turbulence model
75 | autoPtr turbulence
76 | (
77 | incompressible::turbulenceModel::New(U, phi, mixture)
78 | );
79 |
80 |
81 | #include "readGravitationalAcceleration.H"
82 | #include "readhRef.H"
83 | #include "gh.H"
84 |
85 |
86 | volScalarField p
87 | (
88 | IOobject
89 | (
90 | "p",
91 | runTime.timeName(),
92 | mesh,
93 | IOobject::NO_READ,
94 | IOobject::AUTO_WRITE
95 | ),
96 | p_rgh + rho*gh
97 | );
98 |
99 | label pRefCell = 0;
100 | scalar pRefValue = 0.0;
101 | setRefCell
102 | (
103 | p,
104 | p_rgh,
105 | pimple.dict(),
106 | pRefCell,
107 | pRefValue
108 | );
109 |
110 | if (p_rgh.needReference())
111 | {
112 | p += dimensionedScalar
113 | (
114 | "p",
115 | p.dimensions(),
116 | pRefValue - getRefCellValue(p, pRefCell)
117 | );
118 | p_rgh = p - rho*gh;
119 | }
120 |
121 | mesh.setFluxRequired(p_rgh.name());
122 | mesh.setFluxRequired(alpha1.name());
123 |
124 | #include "createMRF.H"
125 | #include "createFvOptions.H"
126 |
--------------------------------------------------------------------------------
/foam/newInterFoam/initCorrectPhi.H:
--------------------------------------------------------------------------------
1 | tmp rAU;
2 |
3 | if (correctPhi)
4 | {
5 | rAU = new volScalarField
6 | (
7 | IOobject
8 | (
9 | "rAU",
10 | runTime.timeName(),
11 | mesh,
12 | IOobject::READ_IF_PRESENT,
13 | IOobject::AUTO_WRITE
14 | ),
15 | mesh,
16 | dimensionedScalar(dimTime/dimDensity, 1)
17 | );
18 |
19 | CorrectPhi
20 | (
21 | U,
22 | phi,
23 | p_rgh,
24 | surfaceScalarField("rAUf", fvc::interpolate(rAU())),
25 | geometricZeroField(),
26 | pimple,
27 | false
28 | );
29 |
30 | #include "continuityErrs.H"
31 | }
32 | else
33 | {
34 | CorrectPhi
35 | (
36 | U,
37 | phi,
38 | p_rgh,
39 | dimensionedScalar(dimTime/rho.dimensions(), 1),
40 | geometricZeroField(),
41 | pimple,
42 | false
43 | );
44 |
45 | #include "continuityErrs.H"
46 | }
47 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/Make/files:
--------------------------------------------------------------------------------
1 | incompressibleThreePhaseMixture/incompressibleThreePhaseMixture.C
2 | threePhaseInterfaceProperties/threePhaseInterfaceProperties.C
3 | immiscibleIncompressibleThreePhaseMixture/immiscibleIncompressibleThreePhaseMixture.C
4 | interMixingFoam.C
5 |
6 | EXE = $(FOAM_APPBIN)/interMixingFoam
7 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/Make/options:
--------------------------------------------------------------------------------
1 | EXE_INC = \
2 | -I. \
3 | -I.. \
4 | -I../../VoF \
5 | -I$(LIB_SRC)/transportModels/twoPhaseMixture/lnInclude \
6 | -IimmiscibleIncompressibleThreePhaseMixture \
7 | -IincompressibleThreePhaseMixture \
8 | -IthreePhaseInterfaceProperties \
9 | -I$(LIB_SRC)/transportModels/interfaceProperties/lnInclude \
10 | -I$(LIB_SRC)/transportModels/twoPhaseProperties/alphaContactAngle/alphaContactAngle \
11 | -I$(LIB_SRC)/TurbulenceModels/turbulenceModels/lnInclude \
12 | -I$(LIB_SRC)/TurbulenceModels/incompressible/lnInclude \
13 | -I$(LIB_SRC)/finiteVolume/lnInclude \
14 | -I$(LIB_SRC)/dynamicFvMesh/lnInclude \
15 | -I$(LIB_SRC)/transportModels \
16 | -I$(LIB_SRC)/meshTools/lnInclude \
17 | -I$(LIB_SRC)/sampling/lnInclude
18 |
19 | EXE_LIBS = \
20 | -ltwoPhaseMixture \
21 | -ltwoPhaseProperties \
22 | -lincompressibleTransportModels \
23 | -lturbulenceModels \
24 | -lincompressibleTurbulenceModels \
25 | -lfiniteVolume \
26 | -ldynamicFvMesh \
27 | -lmeshTools \
28 | -lfvOptions \
29 | -lsampling
30 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/alphaControls.H:
--------------------------------------------------------------------------------
1 | const dictionary& alphaControls = mesh.solverDict(alpha1.name());
2 |
3 | label nAlphaCorr(readLabel(alphaControls.lookup("nAlphaCorr")));
4 |
5 | label nAlphaSubCycles(readLabel(alphaControls.lookup("nAlphaSubCycles")));
6 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/alphaEqn.H:
--------------------------------------------------------------------------------
1 | {
2 | word alphaScheme("div(phi,alpha)");
3 | word alpharScheme("div(phirb,alpha)");
4 |
5 | surfaceScalarField phir
6 | (
7 | IOobject
8 | (
9 | "phir",
10 | runTime.timeName(),
11 | mesh,
12 | IOobject::NO_READ,
13 | IOobject::NO_WRITE
14 | ),
15 | mixture.cAlpha()*mag(phi/mesh.magSf())*mixture.nHatf()
16 | );
17 |
18 | for (int gCorr=0; gCorr(mesh, phi).flux(alpha1)
69 | );
70 |
71 | // Calculate the flux correction for alpha1
72 | alphaPhi1 -= alphaPhi1BD;
73 |
74 | // Calculate the limiter for alpha1
75 | if (LTS)
76 | {
77 | const volScalarField& rDeltaT =
78 | fv::localEulerDdt::localRDeltaT(mesh);
79 |
80 | MULES::limiter
81 | (
82 | allLambda,
83 | rDeltaT,
84 | geometricOneField(),
85 | alpha1,
86 | alphaPhi1BD,
87 | alphaPhi1,
88 | zeroField(),
89 | zeroField(),
90 | oneField(),
91 | zeroField()
92 | );
93 | }
94 | else
95 | {
96 | MULES::limiter
97 | (
98 | allLambda,
99 | 1.0/runTime.deltaT().value(),
100 | geometricOneField(),
101 | alpha1,
102 | alphaPhi1BD,
103 | alphaPhi1,
104 | zeroField(),
105 | zeroField(),
106 | oneField(),
107 | zeroField()
108 | );
109 | }
110 |
111 | // Create the complete flux for alpha2
112 | surfaceScalarField alphaPhi2
113 | (
114 | fvc::flux
115 | (
116 | phi,
117 | alpha2,
118 | alphaScheme
119 | )
120 | + fvc::flux
121 | (
122 | -fvc::flux(phir, alpha1, alpharScheme),
123 | alpha2,
124 | alpharScheme
125 | )
126 | );
127 |
128 | // Create the bounded (upwind) flux for alpha2
129 | surfaceScalarField alphaPhi2BD
130 | (
131 | upwind(mesh, phi).flux(alpha2)
132 | );
133 |
134 | // Calculate the flux correction for alpha2
135 | alphaPhi2 -= alphaPhi2BD;
136 |
137 | // Further limit the limiter for alpha2
138 | if (LTS)
139 | {
140 | const volScalarField& rDeltaT =
141 | fv::localEulerDdt::localRDeltaT(mesh);
142 |
143 | MULES::limiter
144 | (
145 | allLambda,
146 | rDeltaT,
147 | geometricOneField(),
148 | alpha2,
149 | alphaPhi2BD,
150 | alphaPhi2,
151 | zeroField(),
152 | zeroField(),
153 | oneField(),
154 | zeroField()
155 | );
156 | }
157 | else
158 | {
159 | MULES::limiter
160 | (
161 | allLambda,
162 | 1.0/runTime.deltaT().value(),
163 | geometricOneField(),
164 | alpha2,
165 | alphaPhi2BD,
166 | alphaPhi2,
167 | zeroField(),
168 | zeroField(),
169 | oneField(),
170 | zeroField()
171 | );
172 | }
173 |
174 | // Construct the limited fluxes
175 | alphaPhi1 = alphaPhi1BD + lambda*alphaPhi1;
176 | alphaPhi2 = alphaPhi2BD + lambda*alphaPhi2;
177 |
178 | // Solve for alpha1
179 | solve(fvm::ddt(alpha1) + fvc::div(alphaPhi1));
180 |
181 | // Create the diffusion coefficients for alpha2<->alpha3
182 | volScalarField Dc23(D23*max(alpha3, scalar(0))*pos0(alpha2));
183 | volScalarField Dc32(D23*max(alpha2, scalar(0))*pos0(alpha3));
184 |
185 | // Add the diffusive flux for alpha3->alpha2
186 | alphaPhi2 -= fvc::interpolate(Dc32)*mesh.magSf()*fvc::snGrad(alpha1);
187 |
188 | // Solve for alpha2
189 | fvScalarMatrix alpha2Eqn
190 | (
191 | fvm::ddt(alpha2)
192 | + fvc::div(alphaPhi2)
193 | - fvm::laplacian(Dc23 + Dc32, alpha2)
194 | );
195 | alpha2Eqn.solve();
196 |
197 | // Construct the complete mass flux
198 | rhoPhi =
199 | alphaPhi1*(rho1 - rho3)
200 | + (alphaPhi2 + alpha2Eqn.flux())*(rho2 - rho3)
201 | + phi*rho3;
202 |
203 | alpha3 = 1.0 - alpha1 - alpha2;
204 | }
205 |
206 | Info<< "Air phase volume fraction = "
207 | << alpha1.weightedAverage(mesh.V()).value()
208 | << " Min(" << alpha1.name() << ") = " << min(alpha1).value()
209 | << " Max(" << alpha1.name() << ") = " << max(alpha1).value()
210 | << endl;
211 |
212 | Info<< "Liquid phase volume fraction = "
213 | << alpha2.weightedAverage(mesh.V()).value()
214 | << " Min(" << alpha2.name() << ") = " << min(alpha2).value()
215 | << " Max(" << alpha2.name() << ") = " << max(alpha2).value()
216 | << endl;
217 | }
218 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/alphaEqnSubCycle.H:
--------------------------------------------------------------------------------
1 | if (nAlphaSubCycles > 1)
2 | {
3 | dimensionedScalar totalDeltaT = runTime.deltaT();
4 | surfaceScalarField rhoPhiSum
5 | (
6 | IOobject
7 | (
8 | "rhoPhiSum",
9 | runTime.timeName(),
10 | mesh
11 | ),
12 | mesh,
13 | dimensionedScalar(rhoPhi.dimensions(), 0)
14 | );
15 |
16 | for
17 | (
18 | subCycle alphaSubCycle(alpha1, nAlphaSubCycles);
19 | !(++alphaSubCycle).end();
20 | )
21 | {
22 | #include "alphaEqn.H"
23 | rhoPhiSum += (runTime.deltaT()/totalDeltaT)*rhoPhi;
24 | }
25 |
26 | rhoPhi = rhoPhiSum;
27 | }
28 | else
29 | {
30 | #include "alphaEqn.H"
31 | }
32 |
33 | {
34 | volScalarField rhoNew(alpha1*rho1 + alpha2*rho2 + alpha3*rho3);
35 |
36 | // solve(fvm::ddt(rho) + fvc::div(rhoPhi));
37 | // Info<< "density error = "
38 | // << max((mag(rho - rhoNew)/mag(rhoNew))().primitiveField()) << endl;
39 |
40 | rho == rhoNew;
41 | }
42 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/createFields.H:
--------------------------------------------------------------------------------
1 | #include "createRDeltaT.H"
2 |
3 | Info<< "Reading field p_rgh\n" << endl;
4 | volScalarField p_rgh
5 | (
6 | IOobject
7 | (
8 | "p_rgh",
9 | runTime.timeName(),
10 | mesh,
11 | IOobject::MUST_READ,
12 | IOobject::AUTO_WRITE
13 | ),
14 | mesh
15 | );
16 |
17 | Info<< "Reading field U\n" << endl;
18 | volVectorField U
19 | (
20 | IOobject
21 | (
22 | "U",
23 | runTime.timeName(),
24 | mesh,
25 | IOobject::MUST_READ,
26 | IOobject::AUTO_WRITE
27 | ),
28 | mesh
29 | );
30 |
31 | #include "createPhi.H"
32 |
33 | immiscibleIncompressibleThreePhaseMixture mixture(U, phi);
34 |
35 | volScalarField& alpha1(mixture.alpha1());
36 | volScalarField& alpha2(mixture.alpha2());
37 | volScalarField& alpha3(mixture.alpha3());
38 |
39 | const dimensionedScalar& rho1 = mixture.rho1();
40 | const dimensionedScalar& rho2 = mixture.rho2();
41 | const dimensionedScalar& rho3 = mixture.rho3();
42 |
43 | dimensionedScalar D23("D23", dimViscosity, mixture);
44 |
45 | // Need to store rho for ddt(rho, U)
46 | volScalarField rho
47 | (
48 | IOobject
49 | (
50 | "rho",
51 | runTime.timeName(),
52 | mesh,
53 | IOobject::READ_IF_PRESENT
54 | ),
55 | alpha1*rho1 + alpha2*rho2 + alpha3*rho3
56 | );
57 | rho.oldTime();
58 |
59 |
60 | // Mass flux
61 | // Initialisation does not matter because rhoPhi is reset after the
62 | // alpha solution before it is used in the U equation.
63 | surfaceScalarField rhoPhi
64 | (
65 | IOobject
66 | (
67 | "rhoPhi",
68 | runTime.timeName(),
69 | mesh,
70 | IOobject::NO_READ,
71 | IOobject::NO_WRITE
72 | ),
73 | rho1*phi
74 | );
75 |
76 |
77 | // Construct incompressible turbulence model
78 | autoPtr turbulence
79 | (
80 | incompressible::turbulenceModel::New(U, phi, mixture)
81 | );
82 |
83 |
84 | #include "readGravitationalAcceleration.H"
85 | #include "readhRef.H"
86 | #include "gh.H"
87 |
88 |
89 | volScalarField p
90 | (
91 | IOobject
92 | (
93 | "p",
94 | runTime.timeName(),
95 | mesh,
96 | IOobject::NO_READ,
97 | IOobject::AUTO_WRITE
98 | ),
99 | p_rgh + rho*gh
100 | );
101 |
102 | label pRefCell = 0;
103 | scalar pRefValue = 0.0;
104 | setRefCell
105 | (
106 | p,
107 | p_rgh,
108 | pimple.dict(),
109 | pRefCell,
110 | pRefValue
111 | );
112 |
113 | if (p_rgh.needReference())
114 | {
115 | p += dimensionedScalar
116 | (
117 | "p",
118 | p.dimensions(),
119 | pRefValue - getRefCellValue(p, pRefCell)
120 | );
121 | p_rgh = p - rho*gh;
122 | }
123 |
124 | mesh.setFluxRequired(p_rgh.name());
125 | mesh.setFluxRequired(alpha2.name());
126 |
127 | #include "createMRF.H"
128 | #include "createFvOptions.H"
129 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/immiscibleIncompressibleThreePhaseMixture/immiscibleIncompressibleThreePhaseMixture.C:
--------------------------------------------------------------------------------
1 | /*---------------------------------------------------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Copyright (C) 2014-2018 OpenFOAM Foundation
6 | \\/ M anipulation |
7 | -------------------------------------------------------------------------------
8 | License
9 | This file is part of OpenFOAM.
10 |
11 | OpenFOAM is free software: you can redistribute it and/or modify it
12 | under the terms of the GNU General Public License as published by
13 | the Free Software Foundation, either version 3 of the License, or
14 | (at your option) any later version.
15 |
16 | OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 | for more details.
20 |
21 | You should have received a copy of the GNU General Public License
22 | along with OpenFOAM. If not, see .
23 |
24 | \*---------------------------------------------------------------------------*/
25 |
26 | #include "immiscibleIncompressibleThreePhaseMixture.H"
27 |
28 |
29 | // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
30 |
31 | Foam::immiscibleIncompressibleThreePhaseMixture::
32 | immiscibleIncompressibleThreePhaseMixture
33 | (
34 | const volVectorField& U,
35 | const surfaceScalarField& phi
36 | )
37 | :
38 | incompressibleThreePhaseMixture(U, phi),
39 | threePhaseInterfaceProperties
40 | (
41 | static_cast(*this)
42 | )
43 | {}
44 |
45 |
46 | // ************************************************************************* //
47 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/immiscibleIncompressibleThreePhaseMixture/immiscibleIncompressibleThreePhaseMixture.H:
--------------------------------------------------------------------------------
1 | /*---------------------------------------------------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Copyright (C) 2014-2018 OpenFOAM Foundation
6 | \\/ M anipulation |
7 | -------------------------------------------------------------------------------
8 | License
9 | This file is part of OpenFOAM.
10 |
11 | OpenFOAM is free software: you can redistribute it and/or modify it
12 | under the terms of the GNU General Public License as published by
13 | the Free Software Foundation, either version 3 of the License, or
14 | (at your option) any later version.
15 |
16 | OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 | for more details.
20 |
21 | You should have received a copy of the GNU General Public License
22 | along with OpenFOAM. If not, see .
23 |
24 | Class
25 | Foam::immiscibleIncompressibleThreePhaseMixture
26 |
27 | Description
28 | An immiscible incompressible two-phase mixture transport model
29 |
30 | SourceFiles
31 | immiscibleIncompressibleThreePhaseMixture.C
32 |
33 | \*---------------------------------------------------------------------------*/
34 |
35 | #ifndef immiscibleIncompressibleThreePhaseMixture_H
36 | #define immiscibleIncompressibleThreePhaseMixture_H
37 |
38 | #include "incompressibleThreePhaseMixture.H"
39 | #include "threePhaseInterfaceProperties.H"
40 |
41 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
42 |
43 | namespace Foam
44 | {
45 |
46 | /*---------------------------------------------------------------------------*\
47 | Class immiscibleIncompressibleThreePhaseMixture Declaration
48 | \*---------------------------------------------------------------------------*/
49 |
50 | class immiscibleIncompressibleThreePhaseMixture
51 | :
52 | public incompressibleThreePhaseMixture,
53 | public threePhaseInterfaceProperties
54 | {
55 |
56 | public:
57 |
58 | // Constructors
59 |
60 | //- Construct from components
61 | immiscibleIncompressibleThreePhaseMixture
62 | (
63 | const volVectorField& U,
64 | const surfaceScalarField& phi
65 | );
66 |
67 |
68 | //- Destructor
69 | virtual ~immiscibleIncompressibleThreePhaseMixture()
70 | {}
71 |
72 |
73 | // Member Functions
74 |
75 | //- Correct the transport and interface properties
76 | virtual void correct()
77 | {
78 | incompressibleThreePhaseMixture::correct();
79 | threePhaseInterfaceProperties::correct();
80 | }
81 | };
82 |
83 |
84 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
85 |
86 | } // End namespace Foam
87 |
88 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
89 |
90 | #endif
91 |
92 | // ************************************************************************* //
93 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/incompressibleThreePhaseMixture/incompressibleThreePhaseMixture.C:
--------------------------------------------------------------------------------
1 | /*---------------------------------------------------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Copyright (C) 2011-2018 OpenFOAM Foundation
6 | \\/ M anipulation |
7 | -------------------------------------------------------------------------------
8 | License
9 | This file is part of OpenFOAM.
10 |
11 | OpenFOAM is free software: you can redistribute it and/or modify it
12 | under the terms of the GNU General Public License as published by
13 | the Free Software Foundation, either version 3 of the License, or
14 | (at your option) any later version.
15 |
16 | OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 | for more details.
20 |
21 | You should have received a copy of the GNU General Public License
22 | along with OpenFOAM. If not, see .
23 |
24 | \*---------------------------------------------------------------------------*/
25 |
26 | #include "incompressibleThreePhaseMixture.H"
27 | #include "addToRunTimeSelectionTable.H"
28 | #include "surfaceFields.H"
29 | #include "fvc.H"
30 |
31 | // * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * * //
32 |
33 | void Foam::incompressibleThreePhaseMixture::calcNu()
34 | {
35 | nuModel1_->correct();
36 | nuModel2_->correct();
37 | nuModel3_->correct();
38 |
39 | // Average kinematic viscosity calculated from dynamic viscosity
40 | nu_ = mu()/(alpha1_*rho1_ + alpha2_*rho2_ + alpha3_*rho3_);
41 | }
42 |
43 |
44 | // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
45 |
46 | Foam::incompressibleThreePhaseMixture::incompressibleThreePhaseMixture
47 | (
48 | const volVectorField& U,
49 | const surfaceScalarField& phi
50 | )
51 | :
52 | IOdictionary
53 | (
54 | IOobject
55 | (
56 | "transportProperties",
57 | U.time().constant(),
58 | U.db(),
59 | IOobject::MUST_READ_IF_MODIFIED,
60 | IOobject::NO_WRITE
61 | )
62 | ),
63 |
64 | phase1Name_(wordList(lookup("phases"))[0]),
65 | phase2Name_(wordList(lookup("phases"))[1]),
66 | phase3Name_(wordList(lookup("phases"))[2]),
67 |
68 | alpha1_
69 | (
70 | IOobject
71 | (
72 | IOobject::groupName("alpha", phase1Name_),
73 | U.time().timeName(),
74 | U.mesh(),
75 | IOobject::MUST_READ,
76 | IOobject::AUTO_WRITE
77 | ),
78 | U.mesh()
79 | ),
80 |
81 | alpha2_
82 | (
83 | IOobject
84 | (
85 | IOobject::groupName("alpha", phase2Name_),
86 | U.time().timeName(),
87 | U.mesh(),
88 | IOobject::MUST_READ,
89 | IOobject::AUTO_WRITE
90 | ),
91 | U.mesh()
92 | ),
93 |
94 | alpha3_
95 | (
96 | IOobject
97 | (
98 | IOobject::groupName("alpha", phase3Name_),
99 | U.time().timeName(),
100 | U.mesh(),
101 | IOobject::MUST_READ,
102 | IOobject::AUTO_WRITE
103 | ),
104 | U.mesh()
105 | ),
106 |
107 | U_(U),
108 | phi_(phi),
109 |
110 | nu_
111 | (
112 | IOobject
113 | (
114 | "nu",
115 | U.time().timeName(),
116 | U.db()
117 | ),
118 | U.mesh(),
119 | dimensionedScalar(dimensionSet(0, 2, -1, 0, 0), 0),
120 | calculatedFvPatchScalarField::typeName
121 | ),
122 |
123 | nuModel1_
124 | (
125 | viscosityModel::New
126 | (
127 | "nu1",
128 | subDict(phase1Name_),
129 | U,
130 | phi
131 | )
132 | ),
133 | nuModel2_
134 | (
135 | viscosityModel::New
136 | (
137 | "nu2",
138 | subDict(phase2Name_),
139 | U,
140 | phi
141 | )
142 | ),
143 | nuModel3_
144 | (
145 | viscosityModel::New
146 | (
147 | "nu3",
148 | subDict(phase3Name_),
149 | U,
150 | phi
151 | )
152 | ),
153 |
154 | rho1_("rho", dimDensity, nuModel1_->viscosityProperties()),
155 | rho2_("rho", dimDensity, nuModel2_->viscosityProperties()),
156 | rho3_("rho", dimDensity, nuModel3_->viscosityProperties())
157 | {
158 | alpha3_ == 1.0 - alpha1_ - alpha2_;
159 | calcNu();
160 | }
161 |
162 |
163 | // * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
164 |
165 | Foam::tmp
166 | Foam::incompressibleThreePhaseMixture::mu() const
167 | {
168 | return volScalarField::New
169 | (
170 | "mu",
171 | alpha1_*rho1_*nuModel1_->nu()
172 | + alpha2_*rho2_*nuModel2_->nu()
173 | + alpha3_*rho3_*nuModel3_->nu()
174 | );
175 | }
176 |
177 |
178 | Foam::tmp
179 | Foam::incompressibleThreePhaseMixture::muf() const
180 | {
181 | surfaceScalarField alpha1f(fvc::interpolate(alpha1_));
182 | surfaceScalarField alpha2f(fvc::interpolate(alpha2_));
183 | surfaceScalarField alpha3f(fvc::interpolate(alpha3_));
184 |
185 | return surfaceScalarField::New
186 | (
187 | "mu",
188 | alpha1f*rho1_*fvc::interpolate(nuModel1_->nu())
189 | + alpha2f*rho2_*fvc::interpolate(nuModel2_->nu())
190 | + alpha3f*rho3_*fvc::interpolate(nuModel3_->nu())
191 | );
192 | }
193 |
194 |
195 | Foam::tmp
196 | Foam::incompressibleThreePhaseMixture::nuf() const
197 | {
198 | surfaceScalarField alpha1f(fvc::interpolate(alpha1_));
199 | surfaceScalarField alpha2f(fvc::interpolate(alpha2_));
200 | surfaceScalarField alpha3f(fvc::interpolate(alpha3_));
201 |
202 | return surfaceScalarField::New
203 | (
204 | "nu",
205 | (
206 | alpha1f*rho1_*fvc::interpolate(nuModel1_->nu())
207 | + alpha2f*rho2_*fvc::interpolate(nuModel2_->nu())
208 | + alpha3f*rho3_*fvc::interpolate(nuModel3_->nu())
209 | )/(alpha1f*rho1_ + alpha2f*rho2_ + alpha3f*rho3_)
210 | );
211 | }
212 |
213 |
214 | bool Foam::incompressibleThreePhaseMixture::read()
215 | {
216 | if (transportModel::read())
217 | {
218 | if
219 | (
220 | nuModel1_().read(*this)
221 | && nuModel2_().read(*this)
222 | && nuModel3_().read(*this)
223 | )
224 | {
225 | nuModel1_->viscosityProperties().lookup("rho") >> rho1_;
226 | nuModel2_->viscosityProperties().lookup("rho") >> rho2_;
227 | nuModel3_->viscosityProperties().lookup("rho") >> rho3_;
228 |
229 | return true;
230 | }
231 | else
232 | {
233 | return false;
234 | }
235 | }
236 | else
237 | {
238 | return false;
239 | }
240 | }
241 |
242 |
243 | // ************************************************************************* //
244 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/incompressibleThreePhaseMixture/incompressibleThreePhaseMixture.H:
--------------------------------------------------------------------------------
1 | /*---------------------------------------------------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Copyright (C) 2011-2019 OpenFOAM Foundation
6 | \\/ M anipulation |
7 | -------------------------------------------------------------------------------
8 | License
9 | This file is part of OpenFOAM.
10 |
11 | OpenFOAM is free software: you can redistribute it and/or modify it
12 | under the terms of the GNU General Public License as published by
13 | the Free Software Foundation, either version 3 of the License, or
14 | (at your option) any later version.
15 |
16 | OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 | for more details.
20 |
21 | You should have received a copy of the GNU General Public License
22 | along with OpenFOAM. If not, see .
23 |
24 | Class
25 | Foam::incompressibleThreePhaseMixture
26 |
27 | Description
28 |
29 | SourceFiles
30 | incompressibleThreePhaseMixture.C
31 |
32 | \*---------------------------------------------------------------------------*/
33 |
34 | #ifndef incompressibleThreePhaseMixture_H
35 | #define incompressibleThreePhaseMixture_H
36 |
37 | #include "incompressible/transportModel/transportModel.H"
38 | #include "IOdictionary.H"
39 | #include "incompressible/viscosityModels/viscosityModel/viscosityModel.H"
40 | #include "dimensionedScalar.H"
41 | #include "volFields.H"
42 |
43 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
44 |
45 | namespace Foam
46 | {
47 |
48 | /*---------------------------------------------------------------------------*\
49 | Class incompressibleThreePhaseMixture Declaration
50 | \*---------------------------------------------------------------------------*/
51 |
52 | class incompressibleThreePhaseMixture
53 | :
54 | public IOdictionary,
55 | public transportModel
56 | {
57 | // Private Data
58 |
59 | word phase1Name_;
60 | word phase2Name_;
61 | word phase3Name_;
62 |
63 | volScalarField alpha1_;
64 | volScalarField alpha2_;
65 | volScalarField alpha3_;
66 |
67 | const volVectorField& U_;
68 | const surfaceScalarField& phi_;
69 |
70 | volScalarField nu_;
71 |
72 | autoPtr nuModel1_;
73 | autoPtr nuModel2_;
74 | autoPtr nuModel3_;
75 |
76 | dimensionedScalar rho1_;
77 | dimensionedScalar rho2_;
78 | dimensionedScalar rho3_;
79 |
80 |
81 | // Private Member Functions
82 |
83 | //- Calculate and return the laminar viscosity
84 | void calcNu();
85 |
86 |
87 | public:
88 |
89 | // Constructors
90 |
91 | //- Construct from components
92 | incompressibleThreePhaseMixture
93 | (
94 | const volVectorField& U,
95 | const surfaceScalarField& phi
96 | );
97 |
98 |
99 | //- Destructor
100 | ~incompressibleThreePhaseMixture()
101 | {}
102 |
103 |
104 | // Member Functions
105 |
106 | const word phase1Name() const
107 | {
108 | return phase1Name_;
109 | }
110 |
111 | const word phase2Name() const
112 | {
113 | return phase2Name_;
114 | }
115 |
116 | const word phase3Name() const
117 | {
118 | return phase3Name_;
119 | }
120 |
121 | const volScalarField& alpha1() const
122 | {
123 | return alpha1_;
124 | }
125 |
126 | volScalarField& alpha1()
127 | {
128 | return alpha1_;
129 | }
130 |
131 | const volScalarField& alpha2() const
132 | {
133 | return alpha2_;
134 | }
135 |
136 | volScalarField& alpha2()
137 | {
138 | return alpha2_;
139 | }
140 |
141 | const volScalarField& alpha3() const
142 | {
143 | return alpha3_;
144 | }
145 |
146 | volScalarField& alpha3()
147 | {
148 | return alpha3_;
149 | }
150 |
151 | //- Return const-access to phase1 density
152 | const dimensionedScalar& rho1() const
153 | {
154 | return rho1_;
155 | }
156 |
157 | //- Return const-access to phase2 density
158 | const dimensionedScalar& rho2() const
159 | {
160 | return rho2_;
161 | };
162 |
163 | //- Return const-access to phase3 density
164 | const dimensionedScalar& rho3() const
165 | {
166 | return rho3_;
167 | };
168 |
169 | //- Return the velocity
170 | const volVectorField& U() const
171 | {
172 | return U_;
173 | }
174 |
175 | //- Return the flux
176 | const surfaceScalarField& phi() const
177 | {
178 | return phi_;
179 | }
180 |
181 | //- Return const-access to phase1 viscosityModel
182 | const viscosityModel& nuModel1() const
183 | {
184 | return nuModel1_();
185 | }
186 |
187 | //- Return const-access to phase2 viscosityModel
188 | const viscosityModel& nuModel2() const
189 | {
190 | return nuModel2_();
191 | }
192 |
193 | //- Return const-access to phase3 viscosityModel
194 | const viscosityModel& nuModel3() const
195 | {
196 | return nuModel3_();
197 | }
198 |
199 | //- Return the dynamic laminar viscosity
200 | tmp mu() const;
201 |
202 | //- Return the face-interpolated dynamic laminar viscosity
203 | tmp muf() const;
204 |
205 | //- Return the kinematic laminar viscosity
206 | tmp nu() const
207 | {
208 | return nu_;
209 | }
210 |
211 | //- Return the laminar viscosity for patch
212 | tmp nu(const label patchi) const
213 | {
214 | return nu_.boundaryField()[patchi];
215 | }
216 |
217 | //- Return the face-interpolated dynamic laminar viscosity
218 | tmp nuf() const;
219 |
220 | //- Correct the laminar viscosity
221 | void correct()
222 | {
223 | calcNu();
224 | }
225 |
226 | //- Read base transportProperties dictionary
227 | bool read();
228 | };
229 |
230 |
231 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
232 |
233 | } // End namespace Foam
234 |
235 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
236 |
237 | #endif
238 |
239 | // ************************************************************************* //
240 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/interMixingFoam.C:
--------------------------------------------------------------------------------
1 | /*---------------------------------------------------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Copyright (C) 2011-2019 OpenFOAM Foundation
6 | \\/ M anipulation |
7 | -------------------------------------------------------------------------------
8 | License
9 | This file is part of OpenFOAM.
10 |
11 | OpenFOAM is free software: you can redistribute it and/or modify it
12 | under the terms of the GNU General Public License as published by
13 | the Free Software Foundation, either version 3 of the License, or
14 | (at your option) any later version.
15 |
16 | OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 | for more details.
20 |
21 | You should have received a copy of the GNU General Public License
22 | along with OpenFOAM. If not, see .
23 |
24 | Application
25 | interMixingFoam
26 |
27 | Description
28 | Solver for 3 incompressible fluids, two of which are miscible, using a VOF
29 | method to capture the interface, with optional mesh motion and mesh topology
30 | changes including adaptive re-meshing.
31 |
32 | \*---------------------------------------------------------------------------*/
33 |
34 | #include "fvCFD.H"
35 | #include "dynamicFvMesh.H"
36 | #include "CMULES.H"
37 | #include "localEulerDdtScheme.H"
38 | #include "subCycle.H"
39 | #include "immiscibleIncompressibleThreePhaseMixture.H"
40 | #include "turbulentTransportModel.H"
41 | #include "pimpleControl.H"
42 | #include "fvOptions.H"
43 | #include "CorrectPhi.H"
44 | #include "fvcSmooth.H"
45 |
46 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
47 |
48 | int main(int argc, char *argv[])
49 | {
50 | #include "postProcess.H"
51 |
52 | #include "setRootCaseLists.H"
53 | #include "createTime.H"
54 | #include "createDynamicFvMesh.H"
55 | #include "initContinuityErrs.H"
56 | #include "createDyMControls.H"
57 | #include "createFields.H"
58 | #include "initCorrectPhi.H"
59 | #include "createUfIfPresent.H"
60 |
61 | turbulence->validate();
62 |
63 | if (!LTS)
64 | {
65 | #include "readTimeControls.H"
66 | #include "CourantNo.H"
67 | #include "setInitialDeltaT.H"
68 | }
69 |
70 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
71 |
72 | Info<< "\nStarting time loop\n" << endl;
73 |
74 | while (runTime.run())
75 | {
76 | #include "readDyMControls.H"
77 |
78 | if (LTS)
79 | {
80 | #include "setRDeltaT.H"
81 | }
82 | else
83 | {
84 | #include "CourantNo.H"
85 | #include "alphaCourantNo.H"
86 | #include "setDeltaT.H"
87 | }
88 |
89 | runTime++;
90 |
91 | Info<< "Time = " << runTime.timeName() << nl << endl;
92 |
93 | // --- Pressure-velocity PIMPLE corrector loop
94 | while (pimple.loop())
95 | {
96 | if (pimple.firstPimpleIter() || moveMeshOuterCorrectors)
97 | {
98 | mesh.update();
99 |
100 | if (mesh.changing())
101 | {
102 | gh = (g & mesh.C()) - ghRef;
103 | ghf = (g & mesh.Cf()) - ghRef;
104 |
105 | MRF.update();
106 |
107 | if (correctPhi)
108 | {
109 | // Calculate absolute flux
110 | // from the mapped surface velocity
111 | phi = mesh.Sf() & Uf();
112 |
113 | #include "correctPhi.H"
114 |
115 | // Make the flux relative to the mesh motion
116 | fvc::makeRelative(phi, U);
117 |
118 | mixture.correct();
119 | }
120 |
121 | if (checkMeshCourantNo)
122 | {
123 | #include "meshCourantNo.H"
124 | }
125 | }
126 | }
127 |
128 | #include "alphaControls.H"
129 | #include "alphaEqnSubCycle.H"
130 |
131 | mixture.correct();
132 |
133 | #include "UEqn.H"
134 |
135 | // --- Pressure corrector loop
136 | while (pimple.correct())
137 | {
138 | #include "pEqn.H"
139 | }
140 |
141 | if (pimple.turbCorr())
142 | {
143 | turbulence->correct();
144 | }
145 | }
146 |
147 | #include "continuityErrs.H"
148 |
149 | runTime.write();
150 |
151 | Info<< "ExecutionTime = " << runTime.elapsedCpuTime() << " s"
152 | << " ClockTime = " << runTime.elapsedClockTime() << " s"
153 | << nl << endl;
154 | }
155 |
156 | Info<< "End\n" << endl;
157 |
158 | return 0;
159 | }
160 |
161 |
162 | // ************************************************************************* //
163 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/threePhaseInterfaceProperties/threePhaseInterfaceProperties.C:
--------------------------------------------------------------------------------
1 | /*---------------------------------------------------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Copyright (C) 2011-2018 OpenFOAM Foundation
6 | \\/ M anipulation |
7 | -------------------------------------------------------------------------------
8 | License
9 | This file is part of OpenFOAM.
10 |
11 | OpenFOAM is free software: you can redistribute it and/or modify it
12 | under the terms of the GNU General Public License as published by
13 | the Free Software Foundation, either version 3 of the License, or
14 | (at your option) any later version.
15 |
16 | OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 | for more details.
20 |
21 | You should have received a copy of the GNU General Public License
22 | along with OpenFOAM. If not, see .
23 |
24 | \*---------------------------------------------------------------------------*/
25 |
26 | #include "threePhaseInterfaceProperties.H"
27 | #include "alphaContactAngleFvPatchScalarField.H"
28 | #include "mathematicalConstants.H"
29 | #include "surfaceInterpolate.H"
30 | #include "fvcDiv.H"
31 | #include "fvcGrad.H"
32 | #include "fvcSnGrad.H"
33 |
34 | // * * * * * * * * * * * * * * * Static Member Data * * * * * * * * * * * * //
35 |
36 | const Foam::scalar Foam::threePhaseInterfaceProperties::convertToRad =
37 | Foam::constant::mathematical::pi/180.0;
38 |
39 |
40 | // * * * * * * * * * * * * * Private Member Functions * * * * * * * * * * * //
41 |
42 | void Foam::threePhaseInterfaceProperties::correctContactAngle
43 | (
44 | surfaceVectorField::Boundary& nHatb
45 | ) const
46 | {
47 | const volScalarField::Boundary& alpha1 =
48 | mixture_.alpha1().boundaryField();
49 | const volScalarField::Boundary& alpha2 =
50 | mixture_.alpha2().boundaryField();
51 | const volScalarField::Boundary& alpha3 =
52 | mixture_.alpha3().boundaryField();
53 | const volVectorField::Boundary& U =
54 | mixture_.U().boundaryField();
55 |
56 | const fvMesh& mesh = mixture_.U().mesh();
57 | const fvBoundaryMesh& boundary = mesh.boundary();
58 |
59 | forAll(boundary, patchi)
60 | {
61 | if (isA(alpha1[patchi]))
62 | {
63 | const alphaContactAngleFvPatchScalarField& a2cap =
64 | refCast
65 | (alpha2[patchi]);
66 |
67 | const alphaContactAngleFvPatchScalarField& a3cap =
68 | refCast
69 | (alpha3[patchi]);
70 |
71 | scalarField twoPhaseAlpha2(max(a2cap, scalar(0)));
72 | scalarField twoPhaseAlpha3(max(a3cap, scalar(0)));
73 |
74 | scalarField sumTwoPhaseAlpha
75 | (
76 | twoPhaseAlpha2 + twoPhaseAlpha3 + small
77 | );
78 |
79 | twoPhaseAlpha2 /= sumTwoPhaseAlpha;
80 | twoPhaseAlpha3 /= sumTwoPhaseAlpha;
81 |
82 | fvsPatchVectorField& nHatp = nHatb[patchi];
83 |
84 | scalarField theta
85 | (
86 | convertToRad
87 | * (
88 | twoPhaseAlpha2*(180 - a2cap.theta(U[patchi], nHatp))
89 | + twoPhaseAlpha3*(180 - a3cap.theta(U[patchi], nHatp))
90 | )
91 | );
92 |
93 | vectorField nf(boundary[patchi].nf());
94 |
95 | // Reset nHatPatch to correspond to the contact angle
96 |
97 | scalarField a12(nHatp & nf);
98 |
99 | scalarField b1(cos(theta));
100 |
101 | scalarField b2(nHatp.size());
102 |
103 | forAll(b2, facei)
104 | {
105 | b2[facei] = cos(acos(a12[facei]) - theta[facei]);
106 | }
107 |
108 | scalarField det(1.0 - a12*a12);
109 |
110 | scalarField a((b1 - a12*b2)/det);
111 | scalarField b((b2 - a12*b1)/det);
112 |
113 | nHatp = a*nf + b*nHatp;
114 |
115 | nHatp /= (mag(nHatp) + deltaN_.value());
116 | }
117 | }
118 | }
119 |
120 |
121 | void Foam::threePhaseInterfaceProperties::calculateK()
122 | {
123 | const volScalarField& alpha1 = mixture_.alpha1();
124 |
125 | const fvMesh& mesh = alpha1.mesh();
126 | const surfaceVectorField& Sf = mesh.Sf();
127 |
128 | // Cell gradient of alpha
129 | volVectorField gradAlpha(fvc::grad(alpha1));
130 |
131 | // Interpolated face-gradient of alpha
132 | surfaceVectorField gradAlphaf(fvc::interpolate(gradAlpha));
133 |
134 | // Face unit interface normal
135 | surfaceVectorField nHatfv(gradAlphaf/(mag(gradAlphaf) + deltaN_));
136 |
137 | correctContactAngle(nHatfv.boundaryFieldRef());
138 |
139 | // Face unit interface normal flux
140 | nHatf_ = nHatfv & Sf;
141 |
142 | // Simple expression for curvature
143 | K_ = -fvc::div(nHatf_);
144 |
145 | // Complex expression for curvature.
146 | // Correction is formally zero but numerically non-zero.
147 | // volVectorField nHat = gradAlpha/(mag(gradAlpha) + deltaN_);
148 | // nHat.boundaryField() = nHatfv.boundaryField();
149 | // K_ = -fvc::div(nHatf_) + (nHat & fvc::grad(nHatfv) & nHat);
150 | }
151 |
152 |
153 | // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
154 |
155 | Foam::threePhaseInterfaceProperties::threePhaseInterfaceProperties
156 | (
157 | const incompressibleThreePhaseMixture& mixture
158 | )
159 | :
160 | mixture_(mixture),
161 | cAlpha_
162 | (
163 | readScalar
164 | (
165 | mixture.U().mesh().solverDict
166 | (
167 | mixture_.alpha1().name()
168 | ).lookup("cAlpha")
169 | )
170 | ),
171 | sigma12_("sigma12", dimensionSet(1, 0, -2, 0, 0), mixture),
172 | sigma13_("sigma13", dimensionSet(1, 0, -2, 0, 0), mixture),
173 |
174 | deltaN_
175 | (
176 | "deltaN",
177 | 1e-8/pow(average(mixture.U().mesh().V()), 1.0/3.0)
178 | ),
179 |
180 | nHatf_
181 | (
182 | IOobject
183 | (
184 | "nHatf",
185 | mixture.alpha1().time().timeName(),
186 | mixture.alpha1().mesh()
187 | ),
188 | mixture.alpha1().mesh(),
189 | dimensionedScalar(dimArea, 0)
190 | ),
191 |
192 | K_
193 | (
194 | IOobject
195 | (
196 | "interfaceProperties:K",
197 | mixture.alpha1().time().timeName(),
198 | mixture.alpha1().mesh()
199 | ),
200 | mixture.alpha1().mesh(),
201 | dimensionedScalar(dimless/dimLength, 0)
202 | )
203 | {
204 | calculateK();
205 | }
206 |
207 |
208 | // * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
209 |
210 | Foam::tmp
211 | Foam::threePhaseInterfaceProperties::surfaceTensionForce() const
212 | {
213 | return fvc::interpolate(sigmaK())*fvc::snGrad(mixture_.alpha1());
214 | }
215 |
216 |
217 | Foam::tmp
218 | Foam::threePhaseInterfaceProperties::nearInterface() const
219 | {
220 | return max
221 | (
222 | pos0(mixture_.alpha1() - 0.01)*pos0(0.99 - mixture_.alpha1()),
223 | pos0(mixture_.alpha2() - 0.01)*pos0(0.99 - mixture_.alpha2())
224 | );
225 | }
226 |
227 |
228 | // ************************************************************************* //
229 |
--------------------------------------------------------------------------------
/foam/newInterFoam/interMixingFoam/threePhaseInterfaceProperties/threePhaseInterfaceProperties.H:
--------------------------------------------------------------------------------
1 | /*---------------------------------------------------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Copyright (C) 2011-2019 OpenFOAM Foundation
6 | \\/ M anipulation |
7 | -------------------------------------------------------------------------------
8 | License
9 | This file is part of OpenFOAM.
10 |
11 | OpenFOAM is free software: you can redistribute it and/or modify it
12 | under the terms of the GNU General Public License as published by
13 | the Free Software Foundation, either version 3 of the License, or
14 | (at your option) any later version.
15 |
16 | OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 | for more details.
20 |
21 | You should have received a copy of the GNU General Public License
22 | along with OpenFOAM. If not, see .
23 |
24 | Class
25 | Foam::threePhaseInterfaceProperties
26 |
27 | Description
28 | Properties to aid interFoam :
29 | 1. Correct the alpha boundary condition for dynamic contact angle.
30 | 2. Calculate interface curvature.
31 |
32 | SourceFiles
33 | threePhaseInterfaceProperties.C
34 |
35 | \*---------------------------------------------------------------------------*/
36 |
37 | #ifndef threePhaseInterfaceProperties_H
38 | #define threePhaseInterfaceProperties_H
39 |
40 | #include "incompressibleThreePhaseMixture.H"
41 | #include "surfaceFields.H"
42 |
43 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
44 |
45 | namespace Foam
46 | {
47 |
48 | /*---------------------------------------------------------------------------*\
49 | Class threePhaseInterfaceProperties Declaration
50 | \*---------------------------------------------------------------------------*/
51 |
52 | class threePhaseInterfaceProperties
53 | {
54 | // Private Data
55 |
56 | const incompressibleThreePhaseMixture& mixture_;
57 |
58 | //- Compression coefficient
59 | scalar cAlpha_;
60 |
61 | //- Surface tension 1-2
62 | dimensionedScalar sigma12_;
63 |
64 | //- Surface tension 1-3
65 | dimensionedScalar sigma13_;
66 |
67 | //- Stabilisation for normalisation of the interface normal
68 | const dimensionedScalar deltaN_;
69 |
70 | surfaceScalarField nHatf_;
71 | volScalarField K_;
72 |
73 |
74 | // Private Member Functions
75 |
76 | //- Correction for the boundary condition on the unit normal nHat on
77 | // walls to produce the correct contact dynamic angle.
78 | // Calculated from the component of U parallel to the wall
79 | void correctContactAngle
80 | (
81 | surfaceVectorField::Boundary& nHat
82 | ) const;
83 |
84 | //- Re-calculate the interface curvature
85 | void calculateK();
86 |
87 |
88 | public:
89 |
90 | //- Conversion factor for degrees into radians
91 | static const scalar convertToRad;
92 |
93 |
94 | // Constructors
95 |
96 | //- Construct from volume fraction field alpha and IOdictionary
97 | threePhaseInterfaceProperties
98 | (
99 | const incompressibleThreePhaseMixture& mixture
100 | );
101 |
102 | //- Disallow default bitwise copy construction
103 | threePhaseInterfaceProperties
104 | (
105 | const threePhaseInterfaceProperties&
106 | ) = delete;
107 |
108 |
109 | // Member Functions
110 |
111 | scalar cAlpha() const
112 | {
113 | return cAlpha_;
114 | }
115 |
116 | const dimensionedScalar& deltaN() const
117 | {
118 | return deltaN_;
119 | }
120 |
121 | const surfaceScalarField& nHatf() const
122 | {
123 | return nHatf_;
124 | }
125 |
126 | const volScalarField& K() const
127 | {
128 | return K_;
129 | }
130 |
131 | tmp sigma() const
132 | {
133 | volScalarField limitedAlpha2(max(mixture_.alpha2(), scalar(0)));
134 | volScalarField limitedAlpha3(max(mixture_.alpha3(), scalar(0)));
135 |
136 | return
137 | (limitedAlpha2*sigma12_ + limitedAlpha3*sigma13_)
138 | /(limitedAlpha2 + limitedAlpha3 + small);
139 | }
140 |
141 | tmp sigmaK() const
142 | {
143 | return sigma()*K_;
144 | }
145 |
146 | tmp surfaceTensionForce() const;
147 |
148 | //- Indicator of the proximity of the interface
149 | // Field values are 1 near and 0 away for the interface.
150 | tmp nearInterface() const;
151 |
152 | void correct()
153 | {
154 | calculateK();
155 | }
156 |
157 |
158 | // Member Operators
159 |
160 | //- Disallow default bitwise assignment
161 | void operator=(const threePhaseInterfaceProperties&) = delete;
162 | };
163 |
164 |
165 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
166 |
167 | } // End namespace Foam
168 |
169 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
170 |
171 | #endif
172 |
173 | // ************************************************************************* //
174 |
--------------------------------------------------------------------------------
/foam/newInterFoam/newInterFoam.C:
--------------------------------------------------------------------------------
1 | /*---------------------------------------------------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Copyright (C) 2011-2019 OpenFOAM Foundation
6 | \\/ M anipulation |
7 | -------------------------------------------------------------------------------
8 | License
9 | This file is part of OpenFOAM.
10 |
11 | OpenFOAM is free software: you can redistribute it and/or modify it
12 | under the terms of the GNU General Public License as published by
13 | the Free Software Foundation, either version 3 of the License, or
14 | (at your option) any later version.
15 |
16 | OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 | for more details.
20 |
21 | You should have received a copy of the GNU General Public License
22 | along with OpenFOAM. If not, see .
23 |
24 | Application
25 | interFoam
26 |
27 | Description
28 | Solver for 2 incompressible, isothermal immiscible fluids using a VOF
29 | (volume of fluid) phase-fraction based interface capturing approach,
30 | with optional mesh motion and mesh topology changes including adaptive
31 | re-meshing.
32 |
33 | \*---------------------------------------------------------------------------*/
34 |
35 | #include "fvCFD.H"
36 | #include "dynamicFvMesh.H"
37 | #include "CMULES.H"
38 | #include "EulerDdtScheme.H"
39 | #include "localEulerDdtScheme.H"
40 | #include "CrankNicolsonDdtScheme.H"
41 | #include "subCycle.H"
42 | #include "immiscibleIncompressibleTwoPhaseMixture.H"
43 | #include "turbulentTransportModel.H"
44 | #include "pimpleControl.H"
45 | #include "fvOptions.H"
46 | #include "CorrectPhi.H"
47 | #include "fvcSmooth.H"
48 |
49 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
50 |
51 | int main(int argc, char *argv[])
52 | {
53 | #include "postProcess.H"
54 |
55 | #include "setRootCaseLists.H"
56 | #include "createTime.H"
57 | #include "createDynamicFvMesh.H"
58 | #include "initContinuityErrs.H"
59 | #include "createDyMControls.H"
60 | #include "createFields.H"
61 | #include "createAlphaFluxes.H"
62 | #include "initCorrectPhi.H"
63 | #include "createUfIfPresent.H"
64 |
65 | turbulence->validate();
66 |
67 | if (!LTS)
68 | {
69 | #include "CourantNo.H"
70 | #include "setInitialDeltaT.H"
71 | }
72 |
73 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
74 | Info<< "\nStarting time loop\n" << endl;
75 |
76 | while (runTime.run())
77 | {
78 | #include "readDyMControls.H"
79 |
80 | if (LTS)
81 | {
82 | #include "setRDeltaT.H"
83 | }
84 | else
85 | {
86 | #include "CourantNo.H"
87 | #include "alphaCourantNo.H"
88 | #include "setDeltaT.H"
89 | }
90 |
91 | runTime++;
92 |
93 | Info<< "Time = " << runTime.timeName() << nl << endl;
94 |
95 | // --- Pressure-velocity PIMPLE corrector loop
96 | while (pimple.loop())
97 | {
98 | if (pimple.firstPimpleIter() || moveMeshOuterCorrectors)
99 | {
100 | mesh.update();
101 |
102 | if (mesh.changing())
103 | {
104 | // Do not apply previous time-step mesh compression flux
105 | // if the mesh topology changed
106 | if (mesh.topoChanging())
107 | {
108 | talphaPhi1Corr0.clear();
109 | }
110 |
111 | gh = (g & mesh.C()) - ghRef;
112 | ghf = (g & mesh.Cf()) - ghRef;
113 |
114 | MRF.update();
115 |
116 | if (correctPhi)
117 | {
118 | // Calculate absolute flux
119 | // from the mapped surface velocity
120 | phi = mesh.Sf() & Uf();
121 |
122 | #include "correctPhi.H"
123 |
124 | // Make the flux relative to the mesh motion
125 | fvc::makeRelative(phi, U);
126 | }
127 |
128 | mixture.correct();
129 |
130 | if (checkMeshCourantNo)
131 | {
132 | #include "meshCourantNo.H"
133 | }
134 | }
135 | }
136 |
137 | #include "alphaControls.H"
138 | #include "alphaEqnSubCycle.H"
139 |
140 | mixture.correct();
141 |
142 | #include "UEqn.H"
143 |
144 | // --- Pressure corrector loop
145 | while (pimple.correct())
146 | {
147 | #include "pEqn.H"
148 | }
149 |
150 | if (pimple.turbCorr())
151 | {
152 | turbulence->correct();
153 | }
154 | }
155 |
156 | runTime.write();
157 |
158 | Info<< "ExecutionTime = " << runTime.elapsedCpuTime() << " s"
159 | << " ClockTime = " << runTime.elapsedClockTime() << " s"
160 | << nl << endl;
161 | }
162 |
163 | Info<< "End\n" << endl;
164 |
165 | return 0;
166 | }
167 |
168 |
169 | // ************************************************************************* //
170 |
--------------------------------------------------------------------------------
/foam/newInterFoam/pEqn.H:
--------------------------------------------------------------------------------
1 | {
2 | if (correctPhi)
3 | {
4 | rAU.ref() = 1.0/UEqn.A();
5 | }
6 | else
7 | {
8 | rAU = 1.0/UEqn.A();
9 | }
10 |
11 | surfaceScalarField rAUf("rAUf", fvc::interpolate(rAU()));
12 | volVectorField HbyA(constrainHbyA(rAU()*UEqn.H(), U, p_rgh));
13 | surfaceScalarField phiHbyA
14 | (
15 | "phiHbyA",
16 | fvc::flux(HbyA)
17 | + MRF.zeroFilter(fvc::interpolate(rho*rAU())*fvc::ddtCorr(U, phi, Uf))
18 | );
19 | MRF.makeRelative(phiHbyA);
20 |
21 | if (p_rgh.needReference())
22 | {
23 | fvc::makeRelative(phiHbyA, U);
24 | adjustPhi(phiHbyA, U, p_rgh);
25 | fvc::makeAbsolute(phiHbyA, U);
26 | }
27 |
28 | surfaceScalarField phig
29 | (
30 | (
31 | mixture.surfaceTensionForce()
32 | - ghf*fvc::snGrad(rho)
33 | )*rAUf*mesh.magSf()
34 | );
35 |
36 | phiHbyA += phig;
37 |
38 | // Update the pressure BCs to ensure flux consistency
39 | constrainPressure(p_rgh, U, phiHbyA, rAUf, MRF);
40 |
41 | while (pimple.correctNonOrthogonal())
42 | {
43 | fvScalarMatrix p_rghEqn
44 | (
45 | fvm::laplacian(rAUf, p_rgh) == fvc::div(phiHbyA)
46 | );
47 |
48 | p_rghEqn.setReference(pRefCell, getRefCellValue(p_rgh, pRefCell));
49 | p_rghEqn.solve();
50 |
51 | if (pimple.finalNonOrthogonalIter())
52 | {
53 | // Dump L,d of PPE Lp=d to disk, cf. https://is.gd/FMYQsn and https://is.gd/d6CE3q
54 | List> L; // init 2-dimensional list L
55 | List d; // init list d
56 |
57 | // Initialization of matrix/vector.
58 | L.resize(p_rgh.size());
59 | d.resize(p_rgh.size());
60 | forAll(L, i)
61 | {
62 | L[i].resize(p_rgh.size()); // every element of L is array
63 | forAll(L[i], j) // clearing L and d
64 | {
65 | L[i][j] = 0.0;
66 | }
67 | d[i] = 0.0;
68 | }
69 |
70 | // Assigning diagonal coefficients and source term.
71 | forAll(p_rgh, i)
72 | {
73 | L[i][i] = p_rghEqn.diag()[i];
74 | d[i] = p_rghEqn.source()[i];
75 | }
76 |
77 | // Assigning off-diagonal coefficients.
78 | for (label faceI=0; faceI /dev/null
4 | rm -r 0.* [1-9]*
5 |
6 | # Source tutorial run functions
7 | . /opt/openfoam7/bin/tools/RunFunctions
8 |
9 | # Meshing
10 | runApplication blockMesh
11 | runApplication snappyHexMesh -overwrite
12 |
13 | cp 0/alpha.water.org 0/alpha.water
14 | runApplication setFields
15 | runApplication $(getApplication)
16 |
17 | #------------------------------------------------------------------------------
18 |
--------------------------------------------------------------------------------
/foam/sim/constant/g:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | | ========= | |
3 | | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
4 | | \\ / O peration | Version: 3.0.1 |
5 | | \\ / A nd | Web: www.OpenFOAM.org |
6 | | \\/ M anipulation | |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class uniformDimensionedVectorField;
13 | location "constant";
14 | object g;
15 | }
16 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
17 |
18 | dimensions [0 1 -2 0 0 0 0];
19 | value (0 -9.81 0);
20 |
21 |
22 | // ************************************************************************* //
23 |
--------------------------------------------------------------------------------
/foam/sim/constant/transportProperties:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | | ========= | |
3 | | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
4 | | \\ / O peration | Version: 3.0.1 |
5 | | \\ / A nd | Web: www.OpenFOAM.org |
6 | | \\/ M anipulation | |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class dictionary;
13 | location "constant";
14 | object transportProperties;
15 | }
16 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
17 |
18 | phases (water sludge);
19 |
20 | water
21 | {
22 | transportModel Newtonian;
23 | nu [0 2 -1 0 0 0 0] 1.005e-06;
24 | rho [1 -3 0 0 0 0 0] 998;
25 | }
26 |
27 | sludge
28 | {
29 | transportModel Newtonian;
30 | nu [0 2 -1 0 0 0 0] 0.955e-06;
31 | rho [1 -3 0 0 0 0 0] 1050;
32 | }
33 |
34 | sigma [1 0 -2 0 0 0 0] 0;
35 |
36 |
37 | // ************************************************************************* //
38 |
--------------------------------------------------------------------------------
/foam/sim/constant/turbulenceProperties:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | | ========= | |
3 | | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
4 | | \\ / O peration | Version: 3.0.1 |
5 | | \\ / A nd | Web: www.OpenFOAM.org |
6 | | \\/ M anipulation | |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class dictionary;
13 | location "constant";
14 | object turbulenceProperties;
15 | }
16 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
17 |
18 | simulationType RAS;
19 |
20 | RAS
21 | {
22 | RASModel kEpsilon;
23 |
24 | turbulence on;
25 |
26 | printCoeffs on;
27 | }
28 |
29 |
30 | // ************************************************************************* //
31 |
--------------------------------------------------------------------------------
/foam/sim/system/blockMeshDict:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Version: 7
6 | \\/ M anipulation |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class dictionary;
13 | object blockMeshDict;
14 | }
15 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
16 |
17 | convertToMeters 1;
18 |
19 | res 2; // cells per 0.5 m
20 | depth #calc "-0.5/$res";
21 | vertices
22 | (
23 | (0 0 0)
24 | (1 0 0)
25 | (25 0 0)
26 | (25.5 0 0)
27 | (0 -0.5 0)
28 | (1 -0.5 0)
29 | (25 -0.5 0)
30 | (25.5 -0.5 0)
31 | (-0.5 -1 0)
32 | (0 -1 0)
33 | (1 -1 0)
34 | (25 -1 0)
35 | (-0.5 -1.5 0)
36 | (0 -1.5 0)
37 | (1 -1.5 0)
38 | (25 -1.5 0)
39 | (0 -6 0)
40 | (1 -6 0)
41 | (25 -4.5 0)
42 | (0 -7 0)
43 | (1 -7 0)
44 | // fake z dim
45 | (0 0 $depth)
46 | (1 0 $depth)
47 | (25 0 $depth)
48 | (25.5 0 $depth)
49 | (0 -0.5 $depth)
50 | (1 -0.5 $depth)
51 | (25 -0.5 $depth)
52 | (25.5 -0.5 $depth)
53 | (-0.5 -1 $depth)
54 | (0 -1 $depth)
55 | (1 -1 $depth)
56 | (25 -1 $depth)
57 | (-0.5 -1.5 $depth)
58 | (0 -1.5 $depth)
59 | (1 -1.5 $depth)
60 | (25 -1.5 $depth)
61 | (0 -6 $depth)
62 | (1 -6 $depth)
63 | (25 -4.5 $depth)
64 | (0 -7 $depth)
65 | (1 -7 $depth)
66 | );
67 |
68 | blocks
69 | (
70 | hex (4 5 26 25 0 1 22 21) (#calc "2*$res" 1 $res) simpleGrading (1 1 1)
71 | hex (5 6 27 26 1 2 23 22) (#calc "48*$res" 1 $res) simpleGrading (1 1 1)
72 | hex (6 7 28 27 2 3 24 23) ($res 1 $res) simpleGrading (1 1 1) // outlet
73 | hex (9 10 31 30 4 5 26 25) (#calc "2*$res" 1 $res) simpleGrading (1 1 1)
74 | hex (10 11 32 31 5 6 27 26) (#calc "48*$res" 1 $res) simpleGrading (1 1 1)
75 | hex (12 13 34 33 8 9 30 29) ($res 1 $res) simpleGrading (1 1 1) // inlet
76 | hex (13 14 35 34 9 10 31 30) (#calc "2*$res" 1 $res) simpleGrading (1 1 1)
77 | hex (14 15 36 35 10 11 32 31) (#calc "48*$res" 1 $res) simpleGrading (1 1 1)
78 | hex (16 17 38 37 13 14 35 34) (#calc "2*$res" 1 #calc "9*$res") simpleGrading (1 1 1)
79 | hex (17 18 39 38 14 15 36 35) (#calc "48*$res" 1 #calc "9*$res") simpleGrading (1 1 1)
80 | hex (19 20 41 40 16 17 38 37) (#calc "2*$res" 1 #calc "2*$res") simpleGrading (1 1 1) // hopper
81 | );
82 |
83 | edges
84 | (
85 | );
86 |
87 | boundary
88 | (
89 | inlet
90 | {
91 | type patch;
92 | faces
93 | (
94 | (12 8 29 33)
95 | );
96 | }
97 | outlet
98 | {
99 | type patch;
100 | faces
101 | (
102 | (3 7 28 24)
103 | );
104 | }
105 | outlet_bot
106 | {
107 | type patch;
108 | faces
109 | (
110 | (20 19 40 41)
111 | );
112 | }
113 | bot
114 | {
115 | type wall;
116 | faces
117 | (
118 | (18 17 38 39)
119 | (17 20 41 38)
120 | );
121 | }
122 | top
123 | {
124 | type wall;
125 | faces
126 | (
127 | (0 1 22 21)
128 | (1 2 23 22)
129 | (2 3 24 23)
130 | );
131 | }
132 | wall-surface_body
133 | {
134 | type wall;
135 | faces
136 | (
137 | // inlet pipe
138 | (13 12 33 34)
139 | (8 9 30 29)
140 | // outlet pipe
141 | (7 6 27 28)
142 | //hopper
143 | (19 16 37 40)
144 | // left
145 | (4 0 21 25)
146 | (9 4 25 30)
147 | (16 13 34 37)
148 | // right
149 | (6 11 32 27)
150 | (11 15 36 32)
151 | (15 18 39 36)
152 | );
153 | }
154 | frontAndBackPlanes
155 | {
156 | type empty;
157 | faces
158 | (
159 | // front
160 | (4 5 1 0)
161 | (5 6 2 1)
162 | (6 7 3 2)
163 | (9 10 5 4)
164 | (10 11 6 5)
165 | (12 13 9 8)
166 | (13 14 10 9)
167 | (14 15 11 10)
168 | (16 17 14 13)
169 | (17 18 15 14)
170 | (19 20 17 16)
171 | // back
172 | (21 22 26 25)
173 | (22 23 27 26)
174 | (23 24 28 27)
175 | (30 25 26 31)
176 | (31 26 27 32)
177 | (29 30 34 33)
178 | (34 30 31 35)
179 | (35 31 32 36)
180 | (37 34 35 38)
181 | (38 35 36 39)
182 | (40 37 38 41)
183 | );
184 | }
185 | );
186 |
187 | // ************************************************************************* //
188 |
--------------------------------------------------------------------------------
/foam/sim/system/controlDict:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | | ========= | |
3 | | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
4 | | \\ / O peration | Version: 3.0.1 |
5 | | \\ / A nd | Web: www.OpenFOAM.org |
6 | | \\/ M anipulation | |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class dictionary;
13 | location "system";
14 | object controlDict;
15 | }
16 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
17 |
18 | application newInterFoam;
19 |
20 | startFrom startTime;
21 |
22 | startTime 0;
23 |
24 | stopAt endTime;
25 |
26 | endTime 1;
27 |
28 | deltaT 0.01;
29 |
30 | writeControl runTime;
31 |
32 | writeInterval 1;
33 |
34 | purgeWrite 0;
35 |
36 | writeFormat ascii;
37 |
38 | writePrecision 6;
39 |
40 | writeCompression off;
41 |
42 | timeFormat general;
43 |
44 | timePrecision 6;
45 |
46 | runTimeModifiable no;
47 |
48 | adjustTimeStep no;
49 |
50 | maxCo 1;
51 | maxAlphaCo 1;
52 |
53 | maxDeltaT 1;
54 |
55 |
56 | // ************************************************************************* //
57 |
--------------------------------------------------------------------------------
/foam/sim/system/decomposeParDict:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Version: 6
6 | \\/ M anipulation |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class dictionary;
13 | location "system";
14 | object decomposeParDict;
15 | }
16 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
17 |
18 | numberOfSubdomains 4;
19 |
20 | method simple;
21 |
--------------------------------------------------------------------------------
/foam/sim/system/fvSchemes:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | | ========= | |
3 | | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
4 | | \\ / O peration | Version: 3.0.1 |
5 | | \\ / A nd | Web: www.OpenFOAM.org |
6 | | \\/ M anipulation | |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class dictionary;
13 | location "system";
14 | object fvSchemes;
15 | }
16 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
17 |
18 | ddtSchemes
19 | {
20 | default Euler;
21 | }
22 |
23 | gradSchemes
24 | {
25 | default Gauss linear;
26 | }
27 |
28 | divSchemes
29 | {
30 | div(rhoPhi,U) Gauss upwind;
31 | div(phi,alpha) Gauss vanLeer;
32 | div(phirb,alpha) Gauss linear;
33 | div(phi,k) Gauss upwind;
34 | div(phi,epsilon) Gauss upwind;
35 | div(((rho*nuEff)*dev2(T(grad(U))))) Gauss linear;
36 | }
37 |
38 | laplacianSchemes
39 | {
40 | default Gauss linear corrected;
41 | }
42 |
43 | interpolationSchemes
44 | {
45 | default linear;
46 | }
47 |
48 | snGradSchemes
49 | {
50 | default corrected;
51 | }
52 |
53 |
54 | // ************************************************************************* //
55 |
--------------------------------------------------------------------------------
/foam/sim/system/fvSolution:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | | ========= | |
3 | | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
4 | | \\ / O peration | Version: 3.0.1 |
5 | | \\ / A nd | Web: www.OpenFOAM.org |
6 | | \\/ M anipulation | |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class dictionary;
13 | location "system";
14 | object fvSolution;
15 | }
16 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
17 |
18 | solvers
19 | {
20 | "alpha.water.*"
21 | {
22 | nAlphaCorr 2;
23 | nAlphaSubCycles 1;
24 | cAlpha 1;
25 |
26 | MULESCorr yes;
27 | nLimiterIter 3;
28 |
29 | solver smoothSolver;
30 | smoother symGaussSeidel;
31 | tolerance 1e-8;
32 | relTol 0;
33 | }
34 |
35 | "pcorr.*"
36 | {
37 | solver PCG;
38 | preconditioner DIC;
39 | tolerance 1e-5;
40 | relTol 0;
41 | }
42 |
43 | p_rgh
44 | {
45 | solver PCG;
46 | preconditioner DIC;
47 | tolerance 5e-9;
48 | relTol 0.01;
49 | };
50 |
51 | p_rghFinal
52 | {
53 | $p_rgh;
54 | relTol 0;
55 | }
56 |
57 | "(U|k|epsilon).*"
58 | {
59 | solver smoothSolver;
60 | smoother symGaussSeidel;
61 | tolerance 1e-06;
62 | relTol 0;
63 | minIter 1;
64 | }
65 | }
66 |
67 | PIMPLE
68 | {
69 | momentumPredictor no;
70 | nOuterCorrectors 1;
71 | nCorrectors 3;
72 | nNonOrthogonalCorrectors 0;
73 | }
74 |
75 | relaxationFactors
76 | {
77 | equations
78 | {
79 | ".*" 1;
80 | }
81 | }
82 |
83 | // ************************************************************************* //
84 |
--------------------------------------------------------------------------------
/foam/sim/system/setFieldsDict:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | | ========= | |
3 | | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
4 | | \\ / O peration | Version: 3.0.1 |
5 | | \\ / A nd | Web: www.OpenFOAM.org |
6 | | \\/ M anipulation | |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class dictionary;
13 | location "system";
14 | object setFieldsDict;
15 | }
16 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
17 |
18 | defaultFieldValues
19 | (
20 | volScalarFieldValue alpha.water 0
21 | );
22 | regions
23 | (
24 | boxToCell
25 | {
26 | box (-0.5 -7 -0.25) (25.5 0 0);
27 |
28 | fieldValues
29 | (
30 | volScalarFieldValue alpha.water 1
31 | );
32 | }
33 | );
34 |
35 |
36 | // ************************************************************************* //
37 |
--------------------------------------------------------------------------------
/foam/sim/system/snappyHexMeshDict:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Version: 7
6 | \\/ M anipulation |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class dictionary;
13 | object snappyHexMeshDict;
14 | }
15 |
16 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
17 |
18 | castellatedMesh true;
19 | snap true;
20 | addLayers false;
21 |
22 | geometry
23 | {
24 | baffle
25 | {
26 | type triSurfaceMesh;
27 | file "sludge.stl";
28 | }
29 | };
30 |
31 | castellatedMeshControls
32 | {
33 | maxLocalCells 0;
34 | maxGlobalCells 2000;
35 | minRefinementCells 0;
36 | nCellsBetweenLevels 1;
37 |
38 | features ();
39 |
40 | refinementSurfaces
41 | {
42 | baffle
43 | {
44 | level (1 1);
45 | patchInfo
46 | {
47 | type wall;
48 | }
49 | }
50 | }
51 |
52 | resolveFeatureAngle 30;
53 |
54 | refinementRegions
55 | {
56 | }
57 |
58 | locationInMesh (24.12345 -0.12345e-05 -0.12345e-05); // Inside point
59 | allowFreeStandingZoneFaces false;
60 | }
61 |
62 | snapControls
63 | {
64 | nSmoothPatch 3;
65 | tolerance 1.0;
66 | nSolveIter 300;
67 | nRelaxIter 5;
68 |
69 | // nFeatureSnapIter 10;
70 | // implicitFeatureSnap false;
71 | // explicitFeatureSnap true;
72 | // multiRegionFeatureSnap true;
73 | }
74 |
75 | addLayersControls
76 | {
77 | relativeSizes true;
78 |
79 | layers
80 | {
81 | }
82 |
83 | expansionRatio 1.0;
84 | finalLayerThickness 0.3;
85 | minThickness 0.25;
86 | nGrow 0;
87 | featureAngle 30;
88 | // nRelaxIter 5;
89 | // nSmoothSurfaceNormals 1;
90 | // nSmoothNormals 3;
91 | // nSmoothThickness 10;
92 | maxFaceThicknessRatio 0.5;
93 | // maxThicknessToMedialRatio 0.3;
94 | // minMedianAxisAngle 90;
95 | nBufferCellsNoExtrude 0;
96 | nLayerIter 50;
97 | // nRelaxedIter 20;
98 | }
99 |
100 | meshQualityControls
101 | {
102 | #include "${FOAM_ETC}/caseDicts/mesh/generation/meshQualityDict"
103 | }
104 |
105 | mergeTolerance 1E-6;
106 |
107 | // ************************************************************************* //
108 |
--------------------------------------------------------------------------------
/foam/sim/system/snappyHexMeshDict.org:
--------------------------------------------------------------------------------
1 | /*--------------------------------*- C++ -*----------------------------------*\
2 | ========= |
3 | \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 | \\ / O peration | Website: https://openfoam.org
5 | \\ / A nd | Version: 7
6 | \\/ M anipulation |
7 | \*---------------------------------------------------------------------------*/
8 | FoamFile
9 | {
10 | version 2.0;
11 | format ascii;
12 | class dictionary;
13 | object snappyHexMeshDict;
14 | }
15 |
16 | // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
17 |
18 | castellatedMesh true;
19 | snap false;
20 | addLayers false;
21 |
22 | geometry
23 | {
24 | baffle
25 | {
26 | type searchableBox;
27 | min (xx yy zz);
28 | max (xx yy zz);
29 | }
30 | };
31 |
32 | castellatedMeshControls
33 | {
34 | maxLocalCells 0;
35 | maxGlobalCells 2000;
36 | minRefinementCells 0;
37 | nCellsBetweenLevels 1;
38 |
39 | features ();
40 |
41 | refinementSurfaces
42 | {
43 | baffle
44 | {
45 | level (1 1);
46 | patchInfo
47 | {
48 | type wall;
49 | }
50 | }
51 | }
52 |
53 | resolveFeatureAngle 30;
54 |
55 | refinementRegions
56 | {
57 | }
58 |
59 | locationInMesh (24.12345 -0.12345e-05 -0.12345e-05); // Inside point
60 | allowFreeStandingZoneFaces false;
61 | }
62 |
63 | snapControls
64 | {
65 | nSmoothPatch 3;
66 | tolerance 1.0;
67 | nSolveIter 300;
68 | nRelaxIter 5;
69 |
70 | // nFeatureSnapIter 10;
71 | // implicitFeatureSnap false;
72 | // explicitFeatureSnap true;
73 | // multiRegionFeatureSnap true;
74 | }
75 |
76 | addLayersControls
77 | {
78 | relativeSizes true;
79 |
80 | layers
81 | {
82 | }
83 |
84 | expansionRatio 1.0;
85 | finalLayerThickness 0.3;
86 | minThickness 0.25;
87 | nGrow 0;
88 | featureAngle 30;
89 | // nRelaxIter 5;
90 | // nSmoothSurfaceNormals 1;
91 | // nSmoothNormals 3;
92 | // nSmoothThickness 10;
93 | maxFaceThicknessRatio 0.5;
94 | // maxThicknessToMedialRatio 0.3;
95 | // minMedianAxisAngle 90;
96 | nBufferCellsNoExtrude 0;
97 | nLayerIter 50;
98 | // nRelaxedIter 20;
99 | }
100 |
101 | meshQualityControls
102 | {
103 | #include "${FOAM_ETC}/caseDicts/mesh/generation/meshQualityDict"
104 | }
105 |
106 | mergeTolerance 1E-6;
107 |
108 | // ************************************************************************* //
109 |
--------------------------------------------------------------------------------
/params.yaml:
--------------------------------------------------------------------------------
1 | model: PreconditionerNet
2 | data: SludgePatternDataSet
3 | number_samples: 500
4 | resolution: 128
5 | mesh_cells: 2
6 | channels: # must be odd due to padding
7 | - 1
8 | - 16
9 | - 32
10 | - 64
11 | - 32
12 | - 16
13 | - 1
14 | batch_size: 4
15 | learning_rate: 0.001
16 | patience: 16
17 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "uibk.deep-preconditioning"
3 | version = "0.1.0"
4 | description = "Design preconditioners with a CNN to accelerate the conjugate gradient method."
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | license = { text = "MIT" }
8 | authors = [
9 | { name = "Viktor Daropoulos", email = "Viktor.Daropoulos@student.uibk.ac.at" },
10 | { name = "Johannes Sappl", email = "johannes.sappl@student.uibk.ac.at" },
11 | ]
12 | dependencies = [
13 | "dvc>=3.50.2",
14 | "dvclive>=3.46.0",
15 | "dvc-webdav>=3.0.0",
16 | "kaggle>=1.6.14",
17 | "matplotlib>=3.9.2",
18 | "pyamg>=5.2.1",
19 | "scipy>=1.14.1",
20 | "spconv-cu124>=2.3.8",
21 | "torch>=2.3.0",
22 | "numpy-stl>=3.2.0",
23 | "triangle>=20250106",
24 | "ilupp>=1.0.2",
25 | ]
26 |
27 | [project.urls]
28 | Repository = "https://github.com/jsappl/DeepPreconditioning"
29 |
30 | [build-system]
31 | requires = ["flit_core>=3.4"]
32 | build-backend = "flit_core.buildapi"
33 |
34 | [tool.ruff]
35 | exclude = ["__init__.py"]
36 | fix = true
37 | line-length = 120
38 | target-version = "py312"
39 |
40 | [tool.ruff.lint]
41 | select = [
42 | "ARG", # flake8-unused-arguments
43 | "B", # flake8-bugbear
44 | "D", # pydocstyle
45 | "E", # pycodestyle Error
46 | "ERA", # eradicate
47 | "F", # Pyflakes
48 | "I", # isort
49 | "N", # pep8-naming
50 | "NPY", # NumPy-specific rules
51 | "PD", # pandas-vet
52 | "PT", # flake8-pytest-style
53 | "PTH", # flake8-use-pathlib
54 | "Q", # flake8-quotes
55 | "RET", # flake8-return
56 | "TD", # flake8-todos
57 | "W", # pycodestyle Warning
58 | ]
59 |
60 | [tool.ruff.lint.pydocstyle]
61 | convention = "google"
62 |
--------------------------------------------------------------------------------
/scripts/compare_meshes.py:
--------------------------------------------------------------------------------
1 | """Compare generalization capabilities on different meshes."""
2 |
3 | import subprocess
4 | from pathlib import Path
5 |
6 | import dvc.api
7 | import numpy as np
8 | import torch
9 | from scipy.sparse import load_npz, save_npz
10 | from spconv.pytorch import SparseConvTensor
11 |
12 | import uibk.deep_preconditioning.generate_data as generate_data
13 | import uibk.deep_preconditioning.metrics as metrics
14 | import uibk.deep_preconditioning.model as models
15 |
16 | ROOT: Path = Path(__file__).parents[1]
17 | RESULTS_DIRECTORY: Path = Path("./assets/results/")
18 | MESH_CELLS: list[int] = [2, 3, 4, 5, 6]
19 |
20 | rng = np.random.default_rng(seed=69)
21 |
22 |
23 | def generate_mesh(mesh_cells: int, resolution: int) -> None:
24 | """Generate a meshe for some given resolution."""
25 | generate_data._sludge_pattern(resolution=resolution)
26 |
27 | case_directory = ROOT / "assets/data/meshes/"
28 | case_directory.mkdir(parents=True, exist_ok=True)
29 |
30 | # Run simulation and dump matrix.
31 | command = "docker exec openfoam /bin/bash -i -c 'cd foam/sim/ && ./Allrun'"
32 | subprocess.run(command, cwd=ROOT / "foam", shell=True)
33 |
34 | matrix = generate_data._build_matrix(ROOT / "foam/sim/matrix.csv")
35 |
36 | save_npz(case_directory / f"mesh_cells_{mesh_cells}_matrix.npz", matrix, compressed=False)
37 |
38 |
39 | def main():
40 | """Generate different meshes for the generalization test."""
41 | assert torch.cuda.is_available(), "CUDA not available"
42 | device = torch.device("cuda")
43 | torch.manual_seed(69)
44 |
45 | params = dvc.api.params_show()
46 |
47 | for mesh_cells in MESH_CELLS:
48 | pattern = f"s/^res [0-9]+/res {mesh_cells}/"
49 | subprocess.run(f"sed -i -E '{pattern}' blockMeshDict", cwd=ROOT / "foam/sim/system", shell=True)
50 | generate_mesh(mesh_cells, params["resolution"])
51 |
52 | model = getattr(models, params["model"])(params["channels"])
53 | model.load_state_dict(torch.load(Path("./assets/checkpoints/best.pt"), weights_only=True))
54 | model = model.to(device)
55 |
56 | with (RESULTS_DIRECTORY / "compare_meshes.csv").open(mode="w") as file_io:
57 | file_io.write("mesh_cells,size,kappa_pre,kappa_post\n")
58 |
59 | for mesh_cells in MESH_CELLS:
60 | matrix = load_npz(ROOT / f"assets/data/meshes/mesh_cells_{mesh_cells}_matrix.npz").toarray()
61 | condition_number_pre = np.linalg.cond(matrix).item()
62 | matrix = SparseConvTensor.from_dense(
63 | torch.tril(torch.tensor(matrix, dtype=torch.float32, device=device)).unsqueeze(0).unsqueeze(-1)
64 | )
65 | condition_number_post = metrics.condition_loss(matrix, model(matrix)).item()
66 | line = f"{mesh_cells},{matrix.spatial_shape[0]},{condition_number_pre:.0f},{condition_number_post:.0f}"
67 | file_io.write(line + "\n")
68 |
69 |
70 | if __name__ == "__main__":
71 | main()
72 |
--------------------------------------------------------------------------------
/tests/test_model.py:
--------------------------------------------------------------------------------
1 | """Test the implementation of various models."""
2 |
3 | from pathlib import Path
4 |
5 | import spconv.pytorch as spconv
6 | import torch
7 |
8 | import uibk.deep_preconditioning.model as model
9 |
10 | SIZE: int = 64
11 | BATCH_SIZE: int = 2
12 |
13 |
14 | def precondnet_fixture(device):
15 | """Return an instance of `PreconditionerNet`."""
16 | return model.PreconditionerNet().to(device)
17 |
18 |
19 | def spconv_tensor_fixture(device):
20 | """Fixture to create a `SparseConvTensor` for testing."""
21 | eye = torch.eye(SIZE, device=device).unsqueeze(0).unsqueeze(0).expand(BATCH_SIZE, -1, -1, -1)
22 | return spconv.SparseConvTensor.from_dense(eye.permute(0, 2, 3, 1))
23 |
24 |
25 | def test_forward(model, input_):
26 | """Test the forward pass of the PreconditionerNet."""
27 | lower_triangular = model(input_).dense()
28 |
29 | assert lower_triangular.shape[2:] == torch.Size(input_.spatial_shape)
30 |
31 | for batch_index in range(input_.batch_size):
32 | assert torch.all(lower_triangular[batch_index, 0].diag() != 0)
33 | assert torch.all(lower_triangular[batch_index, 0].triu(diagonal=1) == 0)
34 | assert torch.any(lower_triangular[batch_index, 0].tril(diagonal=-1) != 0)
35 |
36 | preconditioner = lower_triangular.matmul(lower_triangular.transpose(-1, -2)).squeeze()
37 | assert preconditioner.shape == (BATCH_SIZE, SIZE, SIZE)
38 | assert torch.all(preconditioner == preconditioner.transpose(-1, -2))
39 |
40 | eigenvalues = torch.linalg.eigvals(preconditioner)
41 | assert torch.all(eigenvalues.imag == 0)
42 | assert torch.all(eigenvalues.real > 0)
43 |
44 |
45 | def main():
46 | """Run the tests."""
47 | assert torch.cuda.is_available(), "CUDA is not available"
48 | device = torch.device("cuda")
49 |
50 | model = precondnet_fixture(device)
51 | spconv_tensor = spconv_tensor_fixture(device)
52 |
53 | test_forward(model, spconv_tensor)
54 |
55 | print(f"{Path(__file__).name} all passed")
56 |
57 |
58 | if __name__ == "__main__":
59 | main()
60 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | """Test the untility functions in the module."""
2 |
3 | from pathlib import Path
4 |
5 | import spconv.pytorch as spconv
6 | import torch
7 |
8 | import uibk.deep_preconditioning.utils as utils
9 |
10 |
11 | def spconv_tensor_fixture():
12 | """Fixture to create a `SparseConvTensor` for testing."""
13 | indices = torch.tensor(
14 | [
15 | [0, 0, 0],
16 | [0, 0, 1],
17 | [0, 1, 0],
18 | [0, 1, 1],
19 | [0, 2, 2],
20 | [1, 0, 1],
21 | [1, 0, 2],
22 | [1, 1, 0],
23 | [1, 1, 1],
24 | [1, 2, 1],
25 | ]).int()
26 | features = torch.tensor([[1, 2, 3, 4, 5, 2, 3, 1, 4, 5]]).T.float()
27 | return spconv.SparseConvTensor(features, indices, spatial_shape=[3, 3], batch_size=2)
28 |
29 |
30 | def vector_batch_fixture():
31 | """Fixture to create a batch of vectors."""
32 | return torch.tensor([[1, 2, 3], [1, -1, 1]]).float()
33 |
34 |
35 | def test_sparse_matvec_mul(spconv_tensor, vector_batch):
36 | """Test the `sparse_matvec_mul` function."""
37 | result = utils.sparse_matvec_mul(spconv_tensor, vector_batch, transpose=False)
38 |
39 | expected_output = torch.tensor([[5, 11, 15], [1, -3, -5]]).float()
40 |
41 | assert torch.allclose(result, expected_output), f"Expected {expected_output}, but got {result}"
42 |
43 |
44 | def main():
45 | """Run the tests."""
46 | spconv_tensor = spconv_tensor_fixture()
47 | vector_batch = vector_batch_fixture()
48 |
49 | test_sparse_matvec_mul(spconv_tensor, vector_batch)
50 |
51 | print(f"{Path(__file__).name} all passed")
52 |
53 |
54 | if __name__ == "__main__":
55 | main()
56 |
--------------------------------------------------------------------------------
/uibk/deep_preconditioning/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jsappl/DeepPreconditioning/1c44e043b24e583cb7e6d70237905a256a24d8ae/uibk/deep_preconditioning/__init__.py
--------------------------------------------------------------------------------
/uibk/deep_preconditioning/cg.py:
--------------------------------------------------------------------------------
1 | """The conjugate gradient method with optional preconditioners.
2 |
3 | https://github.com/paulhausner/neural-incomplete-factorization/blob/main/krylov/cg.py
4 | """
5 |
6 | import time
7 | from typing import TYPE_CHECKING
8 |
9 | import torch
10 |
11 | if TYPE_CHECKING:
12 | from torch import Tensor
13 |
14 |
15 | def stopping_criterion(_, rk, b):
16 | """Stopping criterion for the conjugate gradient method."""
17 | return torch.inner(rk, rk) / torch.inner(b, b)
18 |
19 |
20 | def conjugate_gradient(A, b, x0=None, x_true=None, rtol=1e-8, max_iter=1024):
21 | """The conjugate gradient method for solving linear systems of equations."""
22 | x_hat = x0 if x0 is not None else torch.zeros_like(b)
23 | r = b - A @ x_hat # residual
24 | p = r.clone() # search direction
25 |
26 | # Errors is a tuple of (error, residual)
27 | error_i = (x_hat - x_true) if x_true is not None else torch.zeros_like(b, requires_grad=False)
28 | res = stopping_criterion(A, r, b)
29 | errors = [(torch.inner(error_i, A @ error_i), res)]
30 |
31 | for _ in range(max_iter):
32 | if res < rtol:
33 | break
34 |
35 | Ap = A @ p
36 | r_norm = torch.inner(r, r)
37 |
38 | a = r_norm / torch.inner(Ap, p) # step length
39 | x_hat = x_hat + a * p
40 | r = r - a * Ap
41 | p = r + (torch.inner(r, r) / r_norm) * p
42 |
43 | error_i = (x_hat - x_true) if x_true is not None else torch.zeros_like(b, requires_grad=False)
44 | res = stopping_criterion(A, r, b)
45 | errors.append((torch.inner(error_i, A @ error_i), res))
46 |
47 | return errors, x_hat
48 |
49 |
50 | def preconditioned_conjugate_gradient(
51 | A, b: torch.Tensor, M: "Tensor", x0=None, x_true=None, rtol=1e-8, max_iter=1024
52 | ):
53 | """The preconditioned conjugate gradient method for solving linear systems of equations.
54 |
55 | `prec` should be a function solving the linear equation system Mz=r one way or another. `M` is the preconditioner
56 | approximation of A^-1 or split approximation of MM^T=A, cf. Saad, 2003 Algorithm 9.1
57 | """
58 | x_hat = x0 if x0 is not None else torch.zeros_like(b, dtype=torch.float64)
59 |
60 | rk = b - A @ x_hat
61 | zk = M @ rk
62 | pk = zk.clone()
63 |
64 | # Errors is a tuple of (error, residual)
65 | error_i = (x_hat - x_true) if x_true is not None else torch.zeros_like(b, requires_grad=False)
66 | res = stopping_criterion(A, zk, b)
67 | errors = [(torch.inner(error_i, A @ error_i), res)]
68 |
69 | start_time = time.perf_counter()
70 | for _ in range(max_iter):
71 | if res < rtol:
72 | break
73 |
74 | # precomputations
75 | Ap = A @ pk
76 | rz = torch.inner(rk, zk)
77 |
78 | a = rz / torch.inner(Ap, pk) # step length
79 | x_hat = x_hat + a * pk
80 | rk = rk - a * Ap
81 | zk = M @ rk
82 | beta = torch.inner(rk, zk) / rz
83 | pk = zk + beta * pk
84 |
85 | error_i = (x_hat - x_true) if x_true is not None else torch.zeros_like(b, requires_grad=False)
86 | res = stopping_criterion(A, rk, b)
87 | errors.append((torch.inner(error_i, A @ error_i), res))
88 | end_time = time.perf_counter()
89 |
90 | return end_time - start_time, len(errors) - 1, 0
91 |
--------------------------------------------------------------------------------
/uibk/deep_preconditioning/data_set.py:
--------------------------------------------------------------------------------
1 | """A collection of PyTorch data sets from sparse symmetric positive-definite problems.
2 |
3 | Classes:
4 | SludgePatternDataSet: A collection of linear Poisson problems from CFD simulations.
5 | StAnDataSet: A large collection of solved linear static analysis problems on frame structures.
6 | RandomSPDDataSet: Random symmetric positive-definite matrices data set.
7 | """
8 |
9 | import random
10 | from pathlib import Path
11 |
12 | import kaggle
13 | import numpy as np
14 | import spconv.pytorch as spconv
15 | import torch
16 | from scipy.sparse import coo_matrix, load_npz, save_npz
17 | from torch.utils.data import Dataset
18 | from tqdm import tqdm
19 |
20 | ROOT: Path = Path("./assets/data/raw/")
21 |
22 |
23 | class SludgePatternDataSet(Dataset):
24 | """A collection of linear Poisson problems from CFD."""
25 |
26 | def __init__(self, stage: str, batch_size: int, shuffle: bool = True, root: Path = ROOT) -> None:
27 | """Initialize the data set.
28 |
29 | Args:
30 | stage: One of "train" or "test" in 80/20 split.
31 | batch_size: Number of samples per batch.
32 | shuffle: Whether to shuffle the data.
33 | root: Path to the data directory.
34 |
35 | Raises:
36 | An `AssertionError` if `stage` is neither "train" nor "test" or if CUDA is not available.
37 | """
38 | self._folders = sorted(list((root / "sludge_patterns").glob("case_*")))
39 |
40 | match stage:
41 | case "train":
42 | self.folders = self._folders[: len(self._folders) * 80 // 100]
43 | case "test":
44 | self.folders = self._folders[len(self._folders) * 80 // 100 :]
45 | case _:
46 | raise AssertionError(f"Invalid stage {stage}")
47 | if shuffle:
48 | random.shuffle(self.folders)
49 |
50 | self.batch_size = batch_size
51 | self.dof_max = self._compute_max_dof()
52 |
53 | assert torch.cuda.is_available(), "CUDA is mandatory but not available"
54 | self.device = torch.device("cuda")
55 |
56 | def _compute_max_dof(self) -> int:
57 | """Compute the maximum degrees of freedom in the data set."""
58 | max_dof = 0
59 |
60 | for folder in self._folders:
61 | current = np.load(folder / "matrix.npz")["shape"].max().item()
62 | if current > max_dof:
63 | max_dof = current
64 |
65 | assert max_dof > 0, "Maximum degrees of freedom is zero"
66 |
67 | return max_dof
68 |
69 | def __len__(self) -> int:
70 | """Return the number of batches."""
71 | return len(self.folders) // self.batch_size
72 |
73 | def __getitem__(self, index: int) -> tuple[spconv.SparseConvTensor, torch.Tensor, torch.Tensor, tuple[int]]:
74 | """Return a single batch of linear system data.
75 |
76 | The tensor format is as required in the `traveller59/spconv` package. The matrices, solutions, and right-hand
77 | sides are zero-padded to fit the maximum degrees of freedom.
78 | """
79 | batch = dict(features=list(), indices=list(), solutions=list(), right_hand_sides=list())
80 | original_sizes = tuple()
81 |
82 | for batch_index in range(self.batch_size):
83 | case_folder = self.folders[index * self.batch_size + batch_index]
84 |
85 | rows, columns, _, original_size, values = np.load(case_folder / "matrix.npz").values()
86 | original_sizes += (original_size[0],)
87 | difference = self.dof_max - original_size[0]
88 |
89 | # filter lower triangular part because of symmetry
90 | (filter,) = np.where(rows >= columns)
91 | rows = rows[filter]
92 | columns = columns[filter]
93 | values = values[filter]
94 | # add trivial equations for maximum degrees of freedom
95 | rows = np.append(rows, np.arange(original_size[0], self.dof_max))
96 | columns = np.append(columns, np.arange(original_size[0], self.dof_max))
97 | values = np.append(values, np.ones((difference,)))
98 |
99 | solution = np.loadtxt(case_folder / "solution.csv")
100 | right_hand_side = np.loadtxt(case_folder / "right_hand_side.csv")
101 |
102 | batch["features"].append(np.expand_dims(values, axis=-1))
103 | batch["indices"].append(
104 | np.column_stack(
105 | (np.full(len(values), batch_index), rows, columns),
106 | )
107 | )
108 | batch["solutions"].append(
109 | np.expand_dims(
110 | np.pad(solution, (0, difference), "constant", constant_values=1),
111 | axis=0,
112 | )
113 | )
114 | batch["right_hand_sides"].append(
115 | np.expand_dims(
116 | np.pad(right_hand_side, (0, difference), "constant", constant_values=1),
117 | axis=0,
118 | )
119 | )
120 |
121 | features = torch.from_numpy(np.vstack(batch["features"])).float().to(self.device)
122 | indices = torch.from_numpy(np.vstack(batch["indices"])).int().to(self.device)
123 | lower_triangular_systems = spconv.SparseConvTensor(
124 | features, indices, [self.dof_max, self.dof_max], self.batch_size
125 | )
126 |
127 | solutions = torch.from_numpy(np.vstack(batch["solutions"])).float().to(self.device)
128 | right_hand_sides = torch.from_numpy(np.vstack(batch["right_hand_sides"])).float().to(self.device)
129 |
130 | return lower_triangular_systems, solutions, right_hand_sides, original_sizes
131 |
132 |
133 | def download_from_kaggle() -> None:
134 | """Download the StAn data set from Kaggle."""
135 | assert Path.home() / ".kaggle/kaggle.json", "Kaggle API key is missing"
136 |
137 | kaggle.api.authenticate()
138 | kaggle.api.dataset_download_files(dataset="zurutech/stand-small-problems", path=ROOT, quiet=False, unzip=False)
139 |
140 |
141 | class StAnDataSet(Dataset):
142 | """A large collection of solved linear static analysis problems on frame structures.
143 |
144 | See also https://www.kaggle.com/datasets/zurutech/stand-small-problems.
145 | """
146 |
147 | def __init__(self, stage: str, batch_size: int, shuffle: bool, root: Path = ROOT) -> None:
148 | """Initialize the data set.
149 |
150 | Args:
151 | stage: One of "train" or "test".
152 | batch_size: Number of samples per batch.
153 | shuffle: Whether to shuffle the data.
154 | root: Path to the data directory.
155 |
156 | Raises:
157 | An `AssertionError` if `stage` is neither "train" nor "test" or if CUDA is not available.
158 | """
159 | match stage:
160 | case "train" | "test":
161 | self.files = list(root.glob(f"stand_small_{stage}/*.npz"))
162 | case _:
163 | raise AssertionError(f"Invalid stage {stage}")
164 | if shuffle:
165 | random.shuffle(self.files)
166 | self.batch_size = batch_size
167 | self.dof_max = 5166 # https://www.kaggle.com/datasets/zurutech/stand-small-problems
168 |
169 | assert torch.cuda.is_available(), "CUDA is mandatory but not available"
170 | self.device = torch.device("cuda")
171 |
172 | def __len__(self) -> int:
173 | """Return the number of batches."""
174 | return len(self.files) // self.batch_size
175 |
176 | def __getitem__(self, index: int) -> tuple[spconv.SparseConvTensor, torch.Tensor, torch.Tensor, tuple[int]]:
177 | """Return a single batch of linear system data.
178 |
179 | The tensor format is as required in the `traveller59/spconv` package. The matrices, solutions, and right-hand
180 | sides are zero-padded to fit the maximum degrees of freedom.
181 | """
182 | batch = dict(features=list(), indices=list(), solutions=list(), right_hand_sides=list())
183 | original_sizes = tuple()
184 |
185 | for batch_index in range(self.batch_size):
186 | indices, values, solution, right_hand_side = np.load(
187 | self.files[index * self.batch_size + batch_index]
188 | ).values()
189 | original_sizes += solution.shape
190 | difference = self.dof_max - len(solution)
191 |
192 | # filter lower triangular part because of symmetry
193 | (filter,) = np.where(indices[0] >= indices[1])
194 | indices = indices[:, filter]
195 | values = values[filter]
196 |
197 | batch["features"].append(np.expand_dims(values, axis=-1))
198 | batch["indices"].append(np.concatenate((np.full((len(values), 1), batch_index), indices.T), axis=1))
199 | batch["solutions"].append(
200 | np.expand_dims(
201 | np.pad(solution, (0, difference)),
202 | axis=0,
203 | )
204 | )
205 | batch["right_hand_sides"].append(
206 | np.expand_dims(
207 | np.pad(right_hand_side, (0, difference)),
208 | axis=0,
209 | )
210 | )
211 |
212 | features = torch.from_numpy(np.vstack(batch["features"])).float().to(self.device)
213 | indices = torch.from_numpy(np.vstack(batch["indices"])).int().to(self.device)
214 | systems_tril = spconv.SparseConvTensor(features, indices, [self.dof_max, self.dof_max], self.batch_size)
215 |
216 | solutions = torch.from_numpy(np.vstack(batch["solutions"])).float().to(self.device)
217 | right_hand_sides = torch.from_numpy(np.vstack(batch["right_hand_sides"])).float().to(self.device)
218 |
219 | return systems_tril, solutions, right_hand_sides, original_sizes
220 |
221 |
222 | class RandomSPDDataSet(Dataset):
223 | """Random symmetric positive-definite matrices data set."""
224 |
225 | def __init__(
226 | self, stage: str, dof: int, batch_size: int, sparsity: float = 0.99, length: int = 1000, shuffle: bool = True
227 | ) -> None:
228 | """Initialize the data set.
229 |
230 | Args:
231 | stage: One of "train" or "test" in 80/20 split.
232 | dof: Degrees of freedom where (dof, dof) is the size of each matrix.
233 | batch_size: Number of samples per batch.
234 | sparsity: Percentage in (0, 1] indicating how many off-diagonal elements are zero.
235 | length: Number of total samples.
236 | shuffle: Whether to shuffle the data.
237 | """
238 | assert torch.cuda.is_available(), "CUDA is mandatory but not available"
239 | self.device = torch.device("cuda")
240 |
241 | assert 0 < sparsity <= 1, f"`sparsity` must be in (0, 1] but got {sparsity}"
242 |
243 | self.dof = dof
244 | self.sparsity = sparsity
245 | self.batch_size = batch_size
246 | self.length = length
247 |
248 | self.save_dir = ROOT / "random_spd"
249 |
250 | if not self.save_dir.exists():
251 | self.save_dir.mkdir(parents=True)
252 | self._generate_data_set()
253 |
254 | match stage:
255 | case "train":
256 | self.files = list(self.save_dir.glob("*.npz"))[: length * 80 // 100]
257 | case "test":
258 | self.files = list(self.save_dir.glob("*.npz"))[length * 80 // 100 :]
259 | case _:
260 | raise AssertionError(f"Invalid stage {stage}")
261 | if shuffle:
262 | random.shuffle(self.files)
263 |
264 | def __len__(self) -> int:
265 | """Return the number of batches."""
266 | return len(self.files) // self.batch_size
267 |
268 | def __getitem__(self, index: int) -> tuple[spconv.SparseConvTensor, torch.Tensor, torch.Tensor, tuple[int, ...]]:
269 | """Return a single batch of random SPD matrices.
270 |
271 | The tensor format is as required in the `traveller59/spconv` package.
272 | """
273 | batch = dict(features=list(), indices=list(), solutions=list(), right_hand_sides=list())
274 | original_sizes = tuple()
275 |
276 | for batch_index in range(self.batch_size):
277 | file = self.files[index * self.batch_size + batch_index]
278 |
279 | rows, columns, _, _, values = np.load(file).values()
280 | matrix = load_npz(file)
281 | original_sizes += (self.dof,)
282 |
283 | # filter lower triangular part because of symmetry
284 | (filter,) = np.where(rows >= columns)
285 | rows = rows[filter]
286 | columns = columns[filter]
287 | values = values[filter]
288 |
289 | solution = np.ones((self.dof,))
290 | right_hand_side = matrix @ solution
291 |
292 | batch["features"].append(np.expand_dims(values, axis=-1))
293 | batch["indices"].append(
294 | np.column_stack(
295 | (np.full(len(values), batch_index), rows, columns),
296 | )
297 | )
298 | batch["solutions"].append(np.expand_dims(solution, axis=0))
299 | batch["right_hand_sides"].append(np.expand_dims(right_hand_side, axis=0))
300 |
301 | features = torch.from_numpy(np.vstack(batch["features"])).float().to(self.device)
302 | indices = torch.from_numpy(np.vstack(batch["indices"])).int().to(self.device)
303 | lower_triangular_systems = spconv.SparseConvTensor(features, indices, [self.dof, self.dof], self.batch_size)
304 |
305 | solutions = torch.from_numpy(np.vstack(batch["solutions"])).float().to(self.device)
306 | right_hand_sides = torch.from_numpy(np.vstack(batch["right_hand_sides"])).float().to(self.device)
307 |
308 | return lower_triangular_systems, solutions, right_hand_sides, original_sizes
309 |
310 | def _generate_random_spd_matrix(self) -> np.ndarray:
311 | """Generate a single random SPD matrix with a given non-zero pattern.
312 |
313 | For this synthetic data set, see also:
314 |
315 | HÄUSNER, Paul; ÖKTEM, Ozan; SJÖLUND, Jens. Neural incomplete factorization: learning preconditioners for the
316 | conjugate gradient method. arXiv preprint arXiv:2305.16368, 2023.
317 |
318 | https://arxiv.org/pdf/2305.16368
319 | """
320 | row_indices, col_indices = np.tril_indices(n=self.dof, k=-1)
321 | sample_indices = random.sample(range(len(row_indices)), k=int((1 - self.sparsity) * len(row_indices)))
322 |
323 | interim = np.zeros((self.dof, self.dof), dtype=np.float32)
324 |
325 | rng = np.random.default_rng()
326 | for sample_index in sample_indices:
327 | interim[row_indices[sample_index], col_indices[sample_index]] = rng.standard_normal()
328 |
329 | alpha = 1e-3
330 | return interim @ interim.T + alpha * np.eye(self.dof)
331 |
332 | def _generate_data_set(self) -> None:
333 | """Generate the data set."""
334 | for index in tqdm(iterable=list(range(self.length)), desc="Generating random SPD matrices", unit="matrices"):
335 | matrix = coo_matrix(self._generate_random_spd_matrix())
336 | save_npz(self.save_dir / f"{index:04}.npz", matrix, compressed=False)
337 |
--------------------------------------------------------------------------------
/uibk/deep_preconditioning/generate_data.py:
--------------------------------------------------------------------------------
1 | """Generate linear systems from OpenFOAM based on different sludge patterns."""
2 |
3 | import subprocess
4 | from pathlib import Path
5 |
6 | import dvc.api
7 | import numpy as np
8 | import scipy
9 | import triangle as tr
10 | from scipy.sparse import coo_matrix, save_npz
11 | from stl import mesh
12 |
13 | ROOT: Path = Path(__file__).parents[2]
14 |
15 | rng = np.random.default_rng(seed=69420)
16 |
17 |
18 | def _sludge_pattern(resolution: int) -> None:
19 | """Generate a random sludge pattern at the bottom of the tank.
20 |
21 | The resulting mesh is saved as an STL file in the correct OpenFOAM directory.
22 |
23 | Args:
24 | resolution: The resolution of the OpenFOAM mesh.
25 | """
26 | positions_x = np.linspace(1, 25, num=resolution)
27 | positions_y = 0.0625 * positions_x - 6.0625
28 | positions_y[1:-1] += rng.normal(loc=0.25, scale=0.1, size=resolution - 2)
29 |
30 | vertices = np.zeros((2 * resolution, 3))
31 | vertices[:, 0] = np.concatenate((positions_x, positions_x[::-1]))
32 | vertices[:, 1] = np.concatenate((positions_y, positions_y[::-1]))
33 | vertices[resolution:, 2] = resolution * [-0.5]
34 |
35 | vertice_ids = np.array(range(2 * resolution))
36 | triangles = tr.triangulate(
37 | dict(
38 | vertices=vertices[:, [0, 2]],
39 | segments=np.stack((vertice_ids, (vertice_ids + 1) % len(vertice_ids))).T,
40 | ),
41 | "p",
42 | )
43 | faces = triangles["triangles"]
44 |
45 | sludge = mesh.Mesh(np.zeros(faces.shape[0], dtype=mesh.Mesh.dtype))
46 | for face_index, face in enumerate(faces):
47 | for dimension in range(3):
48 | sludge.vectors[face_index][dimension] = vertices[face[dimension], :]
49 |
50 | stl_directory = ROOT / "foam/sim/constant/triSurface/"
51 | stl_directory.mkdir(parents=True, exist_ok=True)
52 | sludge.save(stl_directory / "sludge.stl")
53 |
54 |
55 | def _build_matrix(csv_file: Path) -> coo_matrix:
56 | """Build an SPD matrix from a csv file.
57 |
58 | Args:
59 | csv_file: Path to the matrix csv file from OpenFOAM.
60 |
61 | Returns:
62 | matrix: The matrix in COO format.
63 |
64 | Raises:
65 | AssertionError: If the matrix is not positive definite.
66 | """
67 | data = np.genfromtxt(csv_file, delimiter=",")
68 |
69 | row = data[:, 0]
70 | col = data[:, 1]
71 | val = -data[:, 2]
72 |
73 | n_rows = int(max(row)) + 1
74 | matrix = coo_matrix((val, (row, col)), shape=(n_rows, n_rows))
75 |
76 | assert (matrix.transpose() != matrix).nnz == 0, "Generated matrix is non-symmetric matrix"
77 |
78 | eigenvalues = np.linalg.eigvals(matrix.toarray())
79 | assert np.all(eigenvalues > 0), "Generated matrix is not positive definite"
80 |
81 | return matrix
82 |
83 |
84 | def main() -> None:
85 | """Simulate random sluge patterns.
86 |
87 | Make sure the OpenFOAM 7 docker container is running before executing this script. To do so execute
88 | `openfoam7-linux` in the terminal.
89 | """
90 | params = dvc.api.params_show()
91 |
92 | pattern = f"s/^res [0-9]+/res {params['mesh_cells']}/"
93 | subprocess.run(f"sed -i -E '{pattern}' blockMeshDict", cwd=ROOT / "foam/sim/system", shell=True)
94 |
95 | for index in range(params["number_samples"]):
96 | _sludge_pattern(resolution=params["resolution"])
97 |
98 | case_directory = ROOT / f"assets/data/raw/sludge_patterns/case_{index:04}/"
99 | case_directory.mkdir(parents=True, exist_ok=True)
100 |
101 | # Run simulation and dump matrix.
102 | command = "docker exec openfoam /bin/bash -i -c 'cd foam/sim/ && ./Allrun'"
103 | subprocess.run(command, cwd=ROOT / "foam", shell=True)
104 |
105 | matrix = _build_matrix(ROOT / "foam/sim/matrix.csv")
106 | right_hand_side = rng.uniform(-1, 1, size=matrix.shape[0])
107 | solution, _ = scipy.sparse.linalg.cg(matrix, right_hand_side, rtol=0, atol=1e-6)
108 |
109 | save_npz(case_directory / "matrix.npz", matrix, compressed=False)
110 | np.savetxt(case_directory / "right_hand_side.csv", right_hand_side)
111 | np.savetxt(case_directory / "solution.csv", solution)
112 |
113 |
114 | if __name__ == "__main__":
115 | main()
116 |
--------------------------------------------------------------------------------
/uibk/deep_preconditioning/metrics.py:
--------------------------------------------------------------------------------
1 | """A collection of metrics to be used in the project."""
2 |
3 | from typing import TYPE_CHECKING
4 |
5 | import torch
6 |
7 | from uibk.deep_preconditioning.utils import sparse_matvec_mul
8 |
9 | if TYPE_CHECKING:
10 | from spconv.pytorch import SparseConvTensor
11 |
12 |
13 | def frobenius_loss(
14 | lower_triangular: "SparseConvTensor", solution: torch.Tensor, right_hand_side: torch.Tensor
15 | ) -> torch.Tensor:
16 | """Compute the Frobenius norm of the error.
17 |
18 | See also , equation (11).
19 |
20 | Args:
21 | lower_triangular: Lower triangular matrices as `spconv` tensors.
22 | solution: Solution tensors.
23 | right_hand_side: Right-hand side tensors.
24 |
25 | Returns:
26 | The Frobenius norm of the error on the batch.
27 | """
28 | interim = sparse_matvec_mul(lower_triangular, solution, transpose=True)
29 | interim = sparse_matvec_mul(lower_triangular, interim, transpose=False)
30 |
31 | return torch.linalg.vector_norm(interim - right_hand_side, ord=2, dim=1).sum()
32 |
33 |
34 | def inverse_loss(systems_tril: "SparseConvTensor", preconditioners_tril: "SparseConvTensor") -> torch.Tensor:
35 | """Compute how well the preconditioner approximates the matrix inverse.
36 |
37 | Args:
38 | systems_tril: Lower triangular matrices as `spconv` tensors.
39 | preconditioners_tril: Lower triangular matrices as `spconv` tensors.
40 |
41 | Returns:
42 | The inverse loss on the batch.
43 | """
44 | preconditioners = preconditioners_tril.dense()[:, 0]
45 | preconditioners = torch.matmul(preconditioners, preconditioners.transpose(-1, -2))
46 |
47 | systems = systems_tril.dense()[:, 0]
48 | systems += torch.tril(systems, -1).transpose(-1, -2)
49 |
50 | preconditioned_systems = torch.matmul(preconditioners, systems)
51 |
52 | identity = (
53 | torch.eye(systems.shape[1]).unsqueeze(0).expand((systems.shape[0], -1, -1)).to(preconditioned_systems.device)
54 | )
55 | return torch.linalg.matrix_norm(preconditioned_systems - identity).mean()
56 |
57 |
58 | def hutchinson_trace(systems_tril: "SparseConvTensor", preconditioners_tril: "SparseConvTensor") -> torch.Tensor:
59 | """Compute the trace estimate of the preconditioned system.
60 |
61 | Args:
62 | systems_tril: Lower triangular matrices as `spconv` tensors.
63 | preconditioners_tril: Lower triangular matrices as `spconv` tensors.
64 |
65 | Returns:
66 | The trace estimate on the batch.
67 | """
68 | preconditioners = preconditioners_tril.dense()[:, 0]
69 |
70 | systems = systems_tril.dense()[:, 0]
71 | systems += torch.tril(systems, -1).transpose(-1, -2)
72 |
73 | vector = torch.randn(systems.shape[:2], device=systems.device).unsqueeze(-1)
74 | interim = torch.bmm(preconditioners, torch.bmm(preconditioners.transpose(-1, -2), vector))
75 | interim -= torch.bmm(systems, vector)
76 |
77 | return torch.linalg.vector_norm(interim.squeeze(), ord=2, dim=1).mean()
78 |
79 |
80 | def condition_loss(systems_tril: "SparseConvTensor", preconditioners_tril: "SparseConvTensor") -> torch.Tensor:
81 | """Compute the condition number loss.
82 |
83 | Args:
84 | systems_tril: Lower triangular matrices as `spconv` tensors.
85 | preconditioners_tril: Lower triangular matrices as `spconv` tensors.
86 |
87 | Returns:
88 | The average condition number of the batch.
89 | """
90 | preconditioners = preconditioners_tril.dense()[:, 0]
91 | preconditioners = torch.matmul(preconditioners, preconditioners.transpose(-1, -2))
92 |
93 | systems = systems_tril.dense()[:, 0]
94 | systems += torch.tril(systems, -1).transpose(-1, -2)
95 |
96 | preconditioned_systems = torch.matmul(preconditioners, systems)
97 |
98 | sigmas = torch.linalg.svdvals(preconditioned_systems)
99 |
100 | return (sigmas.max(dim=1)[0] / sigmas.min(dim=1)[0]).mean()
101 |
--------------------------------------------------------------------------------
/uibk/deep_preconditioning/model.py:
--------------------------------------------------------------------------------
1 | """Define convolutional neural network architecture for preconditioning.
2 |
3 | Classes:
4 | PreconditionerNet: CNN returns lower triangular matrices for preconditioning.
5 | PreconditionerSparseUNet: U-Net inspired architecture for preconditioning.
6 | """
7 |
8 | import spconv.pytorch as spconv
9 | import torch
10 | from torch import nn
11 |
12 |
13 | class PreconditionerNet(nn.Module):
14 | """Fully convolutional network mapping matrices to lower triangular matrices."""
15 |
16 | def __init__(self, channels: list[int]) -> None:
17 | """Initialize the network architecture.
18 |
19 | Args:
20 | channels: Even (mandatory because padding) number of channels in all layers.
21 | """
22 | super().__init__()
23 |
24 | assert len(channels) % 2
25 |
26 | # input layer
27 | self.layers = spconv.SparseSequential(
28 | spconv.SparseConv2d(channels[0], channels[1], 1),
29 | nn.PReLU(),
30 | )
31 |
32 | # hidden layers
33 | for index, (in_channels, out_channels) in enumerate(zip(channels[1:-2], channels[2:-1], strict=True)):
34 | padding = (1, 0) if index < (len(channels) - 2) // 2 else (0, 1)
35 |
36 | self.layers.add(spconv.SparseConv2d(in_channels, out_channels, 2, padding=padding))
37 | self.layers.add(nn.PReLU())
38 |
39 | # output layer
40 | self.layers.add(spconv.SparseConv2d(channels[-2], channels[-1], 1))
41 |
42 | def forward(self, input_: spconv.SparseConvTensor) -> spconv.SparseConvTensor:
43 | """Return the `L` part of the `L @ L.T` preconditioner for the conjugate gradient solver.
44 |
45 | Args:
46 | input_: Sparse batch tensor representing the linear system.
47 |
48 | Returns:
49 | Sparse batch tensor of lower triangular matrices.
50 | """
51 | interim = self.layers(input_)
52 |
53 | (filter,) = torch.where(interim.indices[:, 1] < interim.indices[:, 2]) # (batch, row, col)
54 | interim.features[filter] *= 0 # make the matrix lower triangular
55 |
56 | (filter,) = torch.where(interim.indices[:, 1] == interim.indices[:, 2])
57 | interim.features[filter] = nn.functional.softplus(interim.features[filter]) # enforce positive diagonal
58 |
59 | return interim
60 |
61 |
62 | class PreconditionerSparseUNet(nn.Module):
63 | """U-Net inspired architecture for preconditioning."""
64 |
65 | def __init__(self, channels: list[int]) -> None:
66 | """Initialize the network architecture."""
67 | super().__init__()
68 |
69 | self.enc1 = spconv.SparseSequential(
70 | spconv.SubMConv2d(channels[0], channels[1], kernel_size=3, padding=1, indice_key="subm1"),
71 | nn.LeakyReLU(),
72 | )
73 | self.down1 = spconv.SparseSequential(
74 | spconv.SparseConv2d(channels[1], channels[2], kernel_size=3, stride=2, padding=1, indice_key="down1"),
75 | nn.LeakyReLU(),
76 | )
77 | self.enc2 = spconv.SparseSequential(
78 | spconv.SubMConv2d(channels[2], channels[2], kernel_size=3, padding=1, indice_key="subm2"),
79 | nn.LeakyReLU(),
80 | )
81 | self.down2 = spconv.SparseSequential(
82 | spconv.SparseConv2d(channels[2], channels[3], kernel_size=3, stride=2, padding=1, indice_key="down2"),
83 | nn.LeakyReLU(),
84 | )
85 | self.enc3 = spconv.SparseSequential(
86 | spconv.SubMConv2d(channels[3], channels[3], kernel_size=3, padding=1, indice_key="subm3"),
87 | nn.LeakyReLU(),
88 | )
89 | self.down3 = spconv.SparseSequential(
90 | spconv.SparseConv2d(channels[3], channels[4], kernel_size=3, stride=2, padding=1, indice_key="down3"),
91 | nn.LeakyReLU(),
92 | )
93 | self.enc4 = spconv.SparseSequential(
94 | spconv.SubMConv2d(channels[4], channels[4], kernel_size=3, padding=1, indice_key="subm4"),
95 | nn.LeakyReLU(),
96 | )
97 |
98 | self.bottleneck = spconv.SparseSequential(
99 | spconv.SparseConv2d(channels[4], channels[5], kernel_size=3, stride=2, padding=1, indice_key="bneck"),
100 | nn.LeakyReLU(),
101 | )
102 |
103 | self.up3 = spconv.SparseSequential(
104 | spconv.SparseInverseConv2d(channels[5], channels[4], kernel_size=3, indice_key="bneck"),
105 | nn.LeakyReLU(),
106 | )
107 | self.dec3 = spconv.SparseSequential(
108 | spconv.SubMConv2d(channels[4], channels[4], kernel_size=3, padding=1, indice_key="subm4"),
109 | nn.LeakyReLU(),
110 | )
111 | self.up2 = spconv.SparseSequential(
112 | spconv.SparseInverseConv2d(channels[4], channels[3], kernel_size=3, indice_key="down3"),
113 | nn.LeakyReLU(),
114 | )
115 | self.dec2 = spconv.SparseSequential(
116 | spconv.SubMConv2d(channels[3], channels[3], kernel_size=3, padding=1, indice_key="subm3"),
117 | nn.LeakyReLU(),
118 | )
119 | self.up1 = spconv.SparseSequential(
120 | spconv.SparseInverseConv2d(channels[3], channels[2], kernel_size=3, indice_key="down2"),
121 | nn.LeakyReLU(),
122 | )
123 | self.dec1 = spconv.SparseSequential(
124 | spconv.SubMConv2d(channels[2], channels[2], kernel_size=3, padding=1, indice_key="subm2"),
125 | nn.LeakyReLU(),
126 | )
127 | self.up0 = spconv.SparseSequential(
128 | spconv.SparseInverseConv2d(channels[2], channels[1], kernel_size=3, indice_key="down1"),
129 | nn.LeakyReLU(),
130 | )
131 | self.dec0 = spconv.SparseSequential(
132 | spconv.SubMConv2d(channels[1], channels[1], kernel_size=3, padding=1, indice_key="subm1"),
133 | nn.LeakyReLU(),
134 | )
135 |
136 | self.out_conv = spconv.SparseSequential(
137 | spconv.SubMConv2d(channels[1], channels[5], kernel_size=1, padding=0),
138 | )
139 |
140 | def forward(self, input_: spconv.SparseConvTensor) -> spconv.SparseConvTensor:
141 | """Return the `L` part of the `L @ L.T` preconditioner for the conjugate gradient solver."""
142 | # --- Encoder ---
143 | enc1 = self.enc1(input_)
144 | down1 = self.down1(enc1)
145 | enc2 = self.enc2(down1) # process at R/2
146 | down2 = self.down2(enc2) # downsample R/2 -> R/4
147 | enc3 = self.enc3(down2) # process at R/4
148 | down3 = self.down3(enc3)
149 | enc4 = self.enc4(down3)
150 |
151 | # --- Bottleneck ---
152 | bottleneck = self.bottleneck(enc4)
153 |
154 | # --- Decoder ---
155 | up3 = self.up3(bottleneck)
156 | up3 = spconv.functional.sparse_add(up3, enc4)
157 | dec3 = self.dec3(up3)
158 |
159 | up2 = self.up2(dec3)
160 | up2 = spconv.functional.sparse_add(up2, enc3)
161 | dec2 = self.dec2(up2)
162 |
163 | up1 = self.up1(dec2)
164 | up1 = spconv.functional.sparse_add(up1, enc2)
165 | dec1 = self.dec1(up1)
166 |
167 | up0 = self.up0(dec1)
168 | up0 = spconv.functional.sparse_add(up0, enc1)
169 | dec0 = self.dec0(up0)
170 |
171 | interim = self.out_conv(dec0)
172 |
173 | (filter,) = torch.where(interim.indices[:, 1] < interim.indices[:, 2]) # (batch, row, col)
174 | interim.features[filter] *= 0 # make the matrix lower triangular
175 |
176 | (filter,) = torch.where(interim.indices[:, 1] == interim.indices[:, 2])
177 | interim.features[filter] = nn.functional.softplus(interim.features[filter]) # enforce positive diagonal
178 |
179 | return interim
180 |
--------------------------------------------------------------------------------
/uibk/deep_preconditioning/test.py:
--------------------------------------------------------------------------------
1 | """Test the performance of convetional and our preconditioner."""
2 |
3 | import csv
4 | import time
5 | from dataclasses import dataclass
6 | from pathlib import Path
7 | from typing import TYPE_CHECKING, Generator
8 |
9 | import dvc.api
10 | import ilupp
11 | import matplotlib.pyplot as plt
12 | import numpy as np
13 | import torch
14 | from pyamg.aggregation import smoothed_aggregation_solver
15 | from scipy.sparse import csr_matrix
16 | from tqdm import tqdm
17 |
18 | import uibk.deep_preconditioning.data_set as data_sets
19 | import uibk.deep_preconditioning.model as models
20 | from uibk.deep_preconditioning.cg import preconditioned_conjugate_gradient
21 |
22 | if TYPE_CHECKING:
23 | from matplotlib.figure import Figure
24 | from spconv.pytorch import SparseConvTensor
25 | from torch import Tensor
26 | from torch.utils.data import Dataset
27 |
28 | RESULTS_DIRECTORY: Path = Path("./assets/results/")
29 |
30 |
31 | @dataclass
32 | class BenchmarkSuite:
33 | """Data class that holds the preconditioner benchmark suite.
34 |
35 | Args:
36 | data_set: The test data set to benchmark on.
37 | model: Our fully convolutional model to benchmark.
38 | """
39 |
40 | data_set: "Dataset"
41 | model: torch.nn.Module
42 | techniques: tuple[str, ...] = (
43 | "vanilla",
44 | "jacobi",
45 | "incomplete_cholesky", # unstable
46 | # "incomplete_lu", # unstable
47 | # "algebraic_multigrid",
48 | "learned",
49 | )
50 | kappas = {name: [] for name in techniques}
51 | densities = {name: [] for name in techniques}
52 | iterations = {name: [] for name in techniques}
53 | setups = {name: [] for name in techniques}
54 | durations = {name: [] for name in techniques}
55 | totals = {name: [] for name in techniques}
56 | successes = {name: [] for name in techniques}
57 | histograms = dict()
58 |
59 | RESULTS_DIRECTORY.mkdir(parents=True, exist_ok=True)
60 |
61 | def _reconstruct_system(self, system_tril: "SparseConvTensor", original_size: int) -> "Tensor":
62 | """Reconstruct the linear system from the sparse tensor."""
63 | assert system_tril.batch_size == 1, "Set batch size to one for testing"
64 |
65 | matrix = system_tril.dense()[0, 0, :original_size, :original_size]
66 | matrix += torch.tril(matrix, -1).T
67 |
68 | return matrix.cpu().to(torch.float64)
69 |
70 | def _construct_vanilla(self, matrix: "Tensor") -> "Tensor":
71 | """Construct the baseline which is no preconditioner."""
72 | return torch.eye(matrix.shape[0], dtype=torch.float64).to_sparse_csr()
73 |
74 | def _construct_jacobi(self, matrix: "Tensor") -> "Tensor":
75 | """Construct the Jacobi preconditioner."""
76 | data = 1 / matrix.diagonal()
77 | indices = torch.vstack((torch.arange(matrix.shape[0]), torch.arange(matrix.shape[0])))
78 | diagonal = torch.sparse_coo_tensor(indices, data, size=matrix.shape, dtype=torch.float64)
79 | return diagonal.to_sparse_csr()
80 |
81 | def _construct_incomplete_cholesky(self, matrix: "Tensor", fill_in: int = 1, threshold: float = 0.1) -> "Tensor":
82 | """Construct the incomplete Cholesky preconditioner."""
83 | if fill_in == 0 and threshold == 0.0:
84 | icholprec = ilupp.ichol0(csr_matrix(matrix.numpy()))
85 | else:
86 | icholprec = ilupp.icholt(csr_matrix(matrix.numpy()), add_fill_in=fill_in, threshold=threshold)
87 |
88 | return torch.from_numpy((icholprec @ icholprec.T).toarray()).to_sparse_csr()
89 |
90 | def _construct_incomplete_lu(self, matrix: "Tensor") -> "Tensor":
91 | """Construct the incomplete LU preconditioner."""
92 | l_factor, u_factor = ilupp.ilut(csr_matrix(matrix.numpy()))
93 | return torch.from_numpy((l_factor @ u_factor).toarray()).to_sparse_csr()
94 |
95 | def _construct_algebraic_multigrid(self, matrix: "Tensor") -> "Tensor":
96 | """Construct the algebraic multigrid preconditioner."""
97 | preconditioner = smoothed_aggregation_solver(matrix.numpy()).aspreconditioner(cycle="V")
98 | return torch.from_numpy(preconditioner.matmat(np.eye(matrix.shape[0], dtype=np.float64))).to_sparse_csr()
99 |
100 | def _construct_learned(self, system_tril: "SparseConvTensor", original_size: int) -> "Tensor":
101 | """Construct our preconditioner."""
102 | preconditioners_tril = self.model(system_tril)
103 | preconditioner = preconditioners_tril.dense()[0, 0, :original_size, :original_size]
104 | preconditioner = torch.matmul(preconditioner, preconditioner.transpose(-1, -2))
105 | return preconditioner.detach().cpu().to(torch.float64).to_sparse_csr()
106 |
107 | def _compute_sparsity(self, matrix: "Tensor") -> float:
108 | """Compute the sparsity of a matrix."""
109 | return 100 * len(matrix.values()) / (matrix.shape[0] * matrix.shape[1])
110 |
111 | def _compute_kappa(self, matrix: "Tensor", preconditioner: "Tensor") -> float:
112 | """Compute the condition number."""
113 | return torch.linalg.cond(preconditioner @ matrix).item()
114 |
115 | def _compute_eigenvalues(self, matrix: "Tensor", preconditioner: "Tensor") -> list:
116 | """Compute the eigenvalues of a matrix."""
117 | return torch.linalg.svdvals(preconditioner @ matrix).tolist()
118 |
119 | def run(self) -> None:
120 | """Run the whole benchmark suite."""
121 | for index in tqdm(range(len(self.data_set))):
122 | system_tril, _, right_hand_side, original_size = self.data_set[index]
123 | matrix = self._reconstruct_system(system_tril, original_size[0])
124 | right_hand_side = right_hand_side[0, : original_size[0]].squeeze().cpu().to(torch.float64)
125 |
126 | eigenvalues = dict.fromkeys(self.techniques)
127 |
128 | for name in self.techniques:
129 | if name == "learned":
130 | start_time = time.perf_counter()
131 | preconditioner = self._construct_learned(system_tril, original_size[0])
132 | else:
133 | start_time = time.perf_counter()
134 | preconditioner = getattr(self, f"_construct_{name}")(matrix)
135 | setup = time.perf_counter() - start_time if name != "vanilla" else 0.0
136 |
137 | density = self._compute_sparsity(preconditioner)
138 | duration, iteration, info = preconditioned_conjugate_gradient(matrix, right_hand_side, preconditioner)
139 | kappa = self._compute_kappa(matrix, preconditioner)
140 | if index == 0:
141 | eigenvalues[name] = self._compute_eigenvalues(matrix, preconditioner)
142 |
143 | self.kappas[name].append(kappa)
144 | self.densities[name].append(density)
145 | self.iterations[name].append(iteration)
146 | self.setups[name].append(setup)
147 | self.durations[name].append(duration)
148 | self.totals[name].append(setup + duration)
149 | self.successes[name].append(100 * (1 - info))
150 |
151 | if index == 0:
152 | with (RESULTS_DIRECTORY / "eigenvalues.csv").open(mode="w") as file_io:
153 | writer = csv.writer(file_io)
154 | writer.writerow(eigenvalues.keys())
155 | writer.writerows(zip(*eigenvalues.values(), strict=True))
156 |
157 | def plot_histograms(self) -> Generator[tuple[str, "Figure"], None, None]:
158 | """Plot histograms for the durations and iterations."""
159 | for parameter, label in zip(
160 | ["durations", "iterations"],
161 | ["Durations [ms]", "Iterations [-]"],
162 | strict=True,
163 | ):
164 | figure, ax = plt.subplots()
165 |
166 | ax.set_ylabel(label)
167 | ax.boxplot(
168 | [getattr(self, parameter)[name] for name in self.durations.keys()],
169 | notch=True,
170 | tick_labels=self.techniques,
171 | )
172 |
173 | yield parameter, figure
174 |
175 | def dump_csv(self) -> None:
176 | """Dump the durations and iterations to a CSV file.
177 |
178 | Keep in mind that it has to be consumed and rendered using LaTeX later on.
179 | """
180 | parameters = ["kappas", "densities", "iterations", "setups", "durations", "totals", "successes"]
181 |
182 | with (RESULTS_DIRECTORY / "table.csv").open(mode="w") as file_io:
183 | file_io.write("technique," + ",".join(parameters) + "\n")
184 |
185 | for technique in self.techniques:
186 | line = technique
187 |
188 | for parameter in parameters:
189 | line += "," + str(np.mean(getattr(self, parameter)[technique], dtype=float))
190 |
191 | file_io.write(line + "\n")
192 |
193 | with (RESULTS_DIRECTORY / "totals.csv").open(mode="w") as file_io:
194 | file_io.write(",".join(self.techniques) + "\n")
195 |
196 | for index in range(len(self.totals["vanilla"])):
197 | line = ",".join([str(self.totals[technique][index]) for technique in self.techniques])
198 | file_io.write(line + "\n")
199 |
200 |
201 | def main():
202 | """Run the main test loop."""
203 | assert torch.cuda.is_available(), "CUDA not available"
204 | device = torch.device("cuda")
205 | torch.manual_seed(69)
206 |
207 | params = dvc.api.params_show()
208 |
209 | data_set = getattr(data_sets, params["data"])(
210 | stage="test",
211 | batch_size=1,
212 | shuffle=False,
213 | )
214 |
215 | model = getattr(models, params["model"])(params["channels"])
216 | model.load_state_dict(torch.load(Path("./assets/checkpoints/best.pt")))
217 | model = model.to(device)
218 |
219 | suite = BenchmarkSuite(data_set, model)
220 | suite.run()
221 | suite.dump_csv()
222 |
223 |
224 | if __name__ == "__main__":
225 | main()
226 |
--------------------------------------------------------------------------------
/uibk/deep_preconditioning/train.py:
--------------------------------------------------------------------------------
1 | """Implement model training methods and the loop."""
2 |
3 | import os
4 | import random
5 | from pathlib import Path
6 | from typing import TYPE_CHECKING
7 |
8 | import dvc.api
9 | import numpy as np
10 | import torch
11 | from dvclive.live import Live
12 | from torch.utils.data.dataset import random_split
13 |
14 | import uibk.deep_preconditioning.data_set as data_sets
15 | import uibk.deep_preconditioning.model as models
16 | from uibk.deep_preconditioning.cg import preconditioned_conjugate_gradient
17 | from uibk.deep_preconditioning.metrics import inverse_loss
18 |
19 | if TYPE_CHECKING:
20 | from torch import nn
21 | from torch.optim import Optimizer
22 | from torch.utils.data import Dataset, Subset
23 |
24 | SEED: int = 69
25 |
26 |
27 | random.seed(SEED)
28 | torch.manual_seed(SEED)
29 | if torch.cuda.is_available():
30 | torch.cuda.manual_seed(SEED)
31 | torch.cuda.manual_seed_all(SEED)
32 |
33 | torch.use_deterministic_algorithms(True)
34 |
35 | torch.backends.cudnn.deterministic = True
36 | torch.backends.cudnn.benchmark = False
37 | os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
38 |
39 |
40 | def _train_single_epoch(model: "nn.Module", data_set: "Dataset | Subset", optimizer: "Optimizer") -> float:
41 | """Train the model for a single epoch.
42 |
43 | Args:
44 | model: The model to train.
45 | data_set: The training data set.
46 | optimizer: The optimizer to train the model with.
47 |
48 | Returns:
49 | The average Frobenius loss on the training data.
50 | """
51 | model.train()
52 | running_loss = 0
53 |
54 | for index in range(len(data_set)):
55 | systems_tril, _, _, _ = data_set[index]
56 | preconditioners_tril = model(systems_tril)
57 |
58 | optimizer.zero_grad()
59 | loss = inverse_loss(systems_tril, preconditioners_tril)
60 | running_loss += loss.item()
61 | loss.backward()
62 | optimizer.step()
63 |
64 | return running_loss / len(data_set)
65 |
66 |
67 | @torch.no_grad()
68 | def _validate(model: "nn.Module", data_set: "Dataset | Subset") -> tuple[float, ...]:
69 | """Test the model on the validation data.
70 |
71 | Args:
72 | model: The model to test.
73 | data_set: The validation data set.
74 |
75 | Returns:
76 | The validation loss and metrics of the preconditioned systems.
77 | """
78 | model.eval()
79 |
80 | val_losses = list()
81 | durations = list()
82 | iterations = list()
83 |
84 | for index in range(len(data_set)):
85 | systems_tril, _, right_hand_sides, original_sizes = data_set[index]
86 | preconditioners_tril = model(systems_tril)
87 |
88 | val_losses.append(inverse_loss(systems_tril, preconditioners_tril).item())
89 |
90 | for batch_index in range(systems_tril.batch_size):
91 | original_size = original_sizes[batch_index]
92 |
93 | system = systems_tril.dense()[batch_index, 0, :original_size, :original_size]
94 | system += torch.tril(system, -1).transpose(-1, -2)
95 | system = system.to(torch.float64)
96 |
97 | right_hand_side = right_hand_sides[batch_index, :original_size].squeeze().to(torch.float64)
98 |
99 | preconditioner = preconditioners_tril.dense()[batch_index, 0, :original_size, :original_size]
100 | preconditioner = torch.matmul(preconditioner, preconditioner.transpose(-1, -2)).to(torch.float64)
101 |
102 | duration, n_iterations, _ = preconditioned_conjugate_gradient(
103 | system,
104 | right_hand_side,
105 | M=preconditioner,
106 | )
107 | durations.append(duration)
108 | iterations.append(n_iterations)
109 |
110 | return np.mean(val_losses).item(), np.mean(durations).item(), np.mean(iterations).item()
111 |
112 |
113 | class EarlyStopping:
114 | """Stop the training when no more significant improvement."""
115 |
116 | def __init__(self, patience: int) -> None:
117 | """Initialize the early stopping hyperparameters.
118 |
119 | Attributes:
120 | patience: Steps with no improvement after which training will be stopped.
121 | local_min: The lowest validation loss cached.
122 | counter: Epochs with nondecreasing validation loss.
123 | """
124 | self.patience = patience
125 | self.local_min = float("inf")
126 | self.counter = 0
127 |
128 | def __call__(self, val_loss: float) -> bool:
129 | """Check change of validation loss and update counter."""
130 | if val_loss > self.local_min:
131 | self.counter += 1
132 | else:
133 | self.local_min = val_loss
134 | self.counter = 0
135 |
136 | return self.counter >= self.patience
137 |
138 |
139 | def main() -> None:
140 | """Run the main model training pipeline."""
141 | assert torch.cuda.is_available(), "CUDA not available"
142 | device = torch.device("cuda")
143 | torch.manual_seed(SEED)
144 |
145 | params = dvc.api.params_show()
146 |
147 | data_set = getattr(data_sets, params["data"])(
148 | stage="train",
149 | batch_size=params["batch_size"],
150 | shuffle=True,
151 | )
152 | train_data, val_data = random_split(data_set, lengths=[0.95, 0.05])
153 |
154 | model = getattr(models, params["model"])(params["channels"]).to(device)
155 |
156 | optimizer = torch.optim.Adam(model.parameters(), lr=params["learning_rate"])
157 | early_stopping = EarlyStopping(patience=params["patience"])
158 | best_val_loss = float("inf")
159 |
160 | live = Live( # init logger
161 | dir=str(Path("assets/dvclive/")),
162 | report="html",
163 | save_dvc_exp=True,
164 | dvcyaml=None,
165 | )
166 | live.log_params(params)
167 |
168 | checkpoint_directory = Path("assets/checkpoints")
169 | checkpoint_directory.mkdir(parents=True, exist_ok=True)
170 |
171 | while True:
172 | train_loss = _train_single_epoch(model, train_data, optimizer)
173 | live.log_metric("train/loss/inverse", train_loss)
174 |
175 | val_loss, durations, iterations = _validate(model, val_data)
176 | live.log_metric("val/loss/inverse", val_loss)
177 | live.log_metric("val/metric/durations", durations)
178 | live.log_metric("val/metric/iterations", iterations)
179 |
180 | if early_stopping(val_loss):
181 | break
182 |
183 | if val_loss < best_val_loss:
184 | best_val_loss = val_loss
185 |
186 | torch.save(model.state_dict(), checkpoint_directory / "best.pt")
187 |
188 | live.next_step()
189 |
190 | live.end()
191 |
192 |
193 | if __name__ == "__main__":
194 | main()
195 |
--------------------------------------------------------------------------------
/uibk/deep_preconditioning/utils.py:
--------------------------------------------------------------------------------
1 | """A collection of utility functions for the project."""
2 |
3 | import time
4 | from typing import TYPE_CHECKING
5 |
6 | import torch
7 | from scipy.sparse.linalg import cg
8 |
9 | if TYPE_CHECKING:
10 | from numpy import ndarray
11 | from scipy.sparse import csr_matrix
12 | from spconv.pytorch import SparseConvTensor
13 |
14 |
15 | def sparse_matvec_mul(spconv_batch: "SparseConvTensor", vector_batch: torch.Tensor, transpose: bool) -> torch.Tensor:
16 | """Perform a sparse matrix-vector multiplication.
17 |
18 | Args:
19 | spconv_batch: A single batch as an `spconv` tensor.
20 | vector_batch: A batch of vectors to multiply with the matrix.
21 | transpose: Whether to transpose the matrix batch.
22 |
23 | Returns:
24 | The result of the batched matrix-vector multiplication.
25 | """
26 | batch_indices = spconv_batch.indices[:, 0]
27 | row_indices = spconv_batch.indices[:, 2 if transpose else 1]
28 | column_indices = spconv_batch.indices[:, 1 if transpose else 2]
29 | output_batch = torch.zeros_like(vector_batch, device=vector_batch.device)
30 |
31 | spconv_batch = spconv_batch.replace_feature(
32 | spconv_batch.features * vector_batch[batch_indices, column_indices].unsqueeze(-1)
33 | )
34 | for batch_index in range(spconv_batch.batch_size):
35 | filter = torch.where(batch_indices == batch_index)
36 | output_batch[batch_index] = torch.zeros(vector_batch.shape[-1], device=vector_batch.device).scatter_reduce(
37 | dim=0,
38 | index=row_indices[filter].to(torch.int64),
39 | src=spconv_batch.features[filter].squeeze(),
40 | reduce="sum",
41 | )
42 |
43 | return output_batch
44 |
45 |
46 | def benchmark_cg(
47 | matrix: "ndarray", right_hand_side: "ndarray", preconditioner: "ndarray | csr_matrix | None" = None
48 | ) -> tuple[float, int, int]:
49 | """Benchmark the (preconditioned) conjugate gradient method.
50 |
51 | Args:
52 | matrix: The matrix of the linear system.
53 | right_hand_side: The right-hand side of the linear system.
54 | preconditioner: The preconditioner for the conjugate gradient method.
55 |
56 | Returns:
57 | The duration, number of iterations, and a boolean for convergence.
58 | """
59 | iterations = 0
60 |
61 | def _callback(_):
62 | nonlocal iterations
63 | iterations += 1
64 |
65 | start_time = time.perf_counter()
66 | _, info = cg(
67 | matrix,
68 | right_hand_side,
69 | maxiter=512,
70 | M=preconditioner,
71 | callback=_callback,
72 | )
73 | stop_time = time.perf_counter()
74 | duration = stop_time - start_time
75 |
76 | return duration, iterations, info
77 |
--------------------------------------------------------------------------------