├── .gitattributes
├── .github
└── workflows
│ ├── build-and-test.yml
│ ├── check-markdown.yml
│ ├── check-pep8.yml
│ ├── pythonpublish.yml
│ └── run-solverdummy.yml
├── .gitignore
├── .markdownlint.json
├── CHANGELOG.md
├── LICENSE.txt
├── MANIFEST.in
├── README.md
├── cyprecice
├── .gitignore
├── Participant.pxd
├── cyprecice.pxd
└── cyprecice.pyx
├── docs
├── MigrationGuide.md
└── ReleaseGuide.md
├── examples
└── solverdummy
│ ├── .gitignore
│ ├── README.md
│ ├── precice-config.xml
│ ├── requirements.txt
│ └── solverdummy.py
├── precice
├── .gitignore
├── __init__.py
└── _version.py
├── pyproject.toml
├── setup.cfg
├── setup.py
├── test
├── .gitignore
├── Participant.cpp
├── __init__.py
└── test_bindings_module.py
└── versioneer.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | precice/_version.py export-subst
2 |
--------------------------------------------------------------------------------
/.github/workflows/build-and-test.yml:
--------------------------------------------------------------------------------
1 | name: Build and Test
2 | on:
3 | push:
4 | branches:
5 | - "*"
6 | pull_request:
7 | branches:
8 | - "*"
9 |
10 | jobs:
11 | setup_install:
12 | name: Run setup install
13 | runs-on: ubuntu-latest
14 | container:
15 | image: precice/precice:nightly
16 | options: --user root
17 | steps:
18 | - name: Checkout Repository
19 | uses: actions/checkout@v2
20 | - name: Install pip3, pkgconfig and upgrade pip3
21 | run: |
22 | apt-get -yy update
23 | apt-get install -y python3-pip python3.12-venv pkg-config
24 | rm -rf /var/lib/apt/lists/*
25 | - name: Create venv
26 | run: python3 -m venv .venv
27 | - name: Activate venv
28 | # see https://stackoverflow.com/a/74669486
29 | run: |
30 | . .venv/bin/activate
31 | echo PATH=$PATH >> $GITHUB_ENV
32 | - name: Install dependencies
33 | run: |
34 | pip3 install toml
35 | python3 -c 'import toml; c = toml.load("pyproject.toml"); print("\n".join(c["build-system"]["requires"]))' | pip3 install -r /dev/stdin
36 | - name: Run setup install
37 | run: python3 setup.py install
38 | - name: Test install
39 | run: |
40 | export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
41 | python3 -c "import precice"
42 |
43 | setup_test:
44 | name: Run setup test
45 | runs-on: ubuntu-latest
46 | steps:
47 | - name: Checkout Repository
48 | uses: actions/checkout@v2
49 | - uses: actions/setup-python@v1
50 | - name: Install OpenMPI, CMake, Boost library, Eigen and pkg-config
51 | run: |
52 | sudo apt-get -yy update
53 | sudo apt-get install -y libopenmpi-dev cmake libboost-all-dev libeigen3-dev pkg-config
54 | sudo rm -rf /var/lib/apt/lists/*
55 | - uses: BSFishy/pip-action@v1
56 | with:
57 | packages: toml
58 | - name: Checkout precice and make required files discoverable
59 | run: |
60 | git clone --branch develop https://github.com/precice/precice.git precice-core
61 | mkdir -p precice
62 | cp precice-core/src/precice/Participant.hpp precice/Participant.hpp
63 | cp precice-core/src/precice/Tooling.hpp precice/Tooling.hpp
64 | cp precice-core/src/precice/Tooling.cpp precice/Tooling.cpp
65 | cd precice-core
66 | mkdir build && cd build
67 | cmake .. -DPRECICE_FEATURE_MPI_COMMUNICATION=OFF -DPRECICE_FEATURE_PETSC_MAPPING=OFF -DPRECICE_FEATURE_PYTHON_ACTIONS=OFF -DBUILD_TESTING=OFF
68 | - name: Install dependencies
69 | run: |
70 | python3 -c 'import toml; c = toml.load("pyproject.toml"); print("\n".join(c["build-system"]["requires"]))' | pip3 install -r /dev/stdin
71 | - name: Run setup test
72 | env:
73 | PKG_CONFIG_PATH: "precice-core/build"
74 | PKG_CONFIG_SYSTEM_INCLUDE_PATH: 1
75 | run: |
76 | export CFLAGS=-I$GITHUB_WORKSPACE
77 | python3 setup.py test
78 |
79 | pip_install:
80 | name: Run pip install
81 | needs: [setup_test]
82 | runs-on: ubuntu-latest
83 | container:
84 | image: precice/precice:nightly
85 | options: --user root
86 | steps:
87 | - name: Checkout Repository
88 | uses: actions/checkout@v2
89 | - name: Install dependencies
90 | run: |
91 | apt-get -yy update
92 | apt-get install -y python3-pip python3.12-venv pkg-config
93 | rm -rf /var/lib/apt/lists/*
94 | - name: Create venv
95 | run: |
96 | python3 -m venv .venv
97 | - name: Activate venv
98 | # see https://stackoverflow.com/a/74669486
99 | run: |
100 | . .venv/bin/activate
101 | echo PATH=$PATH >> $GITHUB_ENV
102 | - name: Run pip install
103 | run: pip3 install .
104 | - name: Check import
105 | run: |
106 | export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
107 | python3 -c "import precice"
108 |
109 | solverdummy_test:
110 | name: Run solverdummy
111 | needs: [pip_install]
112 | runs-on: ubuntu-latest
113 | container:
114 | image: precice/precice:nightly
115 | options: --user root
116 | steps:
117 | - name: Checkout Repository
118 | uses: actions/checkout@v2
119 | - name: Install dependencies
120 | run: |
121 | apt-get -yy update
122 | apt-get install -y python3-pip python3.12-venv pkg-config
123 | rm -rf /var/lib/apt/lists/*
124 | - name: Create venv
125 | run: |
126 | python3 -m venv .venv
127 | - name: Activate venv
128 | # see https://stackoverflow.com/a/74669486
129 | run: |
130 | . .venv/bin/activate
131 | echo PATH=$PATH >> $GITHUB_ENV
132 | - name: Run pip install
133 | run: pip3 install .
134 | - name: Run solverdummy
135 | run: |
136 | export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
137 | cd examples/solverdummy/
138 | python3 solverdummy.py precice-config.xml SolverOne MeshOne & python3 solverdummy.py precice-config.xml SolverTwo MeshTwo
139 |
--------------------------------------------------------------------------------
/.github/workflows/check-markdown.yml:
--------------------------------------------------------------------------------
1 | name: Lint docs
2 | on: [push, pull_request]
3 | jobs:
4 | check_md:
5 | runs-on: ubuntu-latest
6 | steps:
7 | - name: Check out repository
8 | uses: actions/checkout@v2
9 | - name: Lint markdown files (markdownlint)
10 | uses: articulate/actions-markdownlint@v1
11 | with:
12 | config: .markdownlint.json
13 | files: '.'
14 | ignore: changelog-entries
15 |
--------------------------------------------------------------------------------
/.github/workflows/check-pep8.yml:
--------------------------------------------------------------------------------
1 | name: autopep8
2 | on:
3 | push:
4 | branches:
5 | - "*"
6 | pull_request:
7 | branches:
8 | - "*"
9 | jobs:
10 | autopep8:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v2
14 | - name: autopep8
15 | id: autopep8
16 | uses: peter-evans/autopep8@v1
17 | with:
18 | args: --recursive --diff --aggressive --aggressive --exit-code --ignore E402 --max-line-length 120 .
19 | - name: Fail if autopep8 made changes
20 | if: ${{ steps.autopep8.outputs.exit-code == 2 }}
21 | run: exit 1
22 |
--------------------------------------------------------------------------------
/.github/workflows/pythonpublish.yml:
--------------------------------------------------------------------------------
1 | name: Upload Python Package
2 |
3 | on:
4 | push:
5 | tags:
6 | - v*
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v2
13 | - name: Set up Python
14 | uses: actions/setup-python@v1
15 | with:
16 | python-version: '3.x'
17 | - name: Install dependencies
18 | uses: BSFishy/pip-action@v1
19 | with:
20 | packages: |
21 | setuptools
22 | wheel
23 | twine
24 | cython
25 | packaging
26 | numpy
27 | pkgconfig
28 | - name: Build and publish
29 | env:
30 | TWINE_USERNAME: __token__
31 | TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
32 | run: |
33 | python setup.py sdist
34 | twine upload dist/*
35 |
--------------------------------------------------------------------------------
/.github/workflows/run-solverdummy.yml:
--------------------------------------------------------------------------------
1 | name: Run preCICE Solverdummies
2 | on:
3 | push:
4 | branches:
5 | - "*"
6 | pull_request:
7 | branches:
8 | - "*"
9 | jobs:
10 | run_solverdummies:
11 | name: Run solverdummies
12 | runs-on: ubuntu-latest
13 | container:
14 | image: precice/precice:nightly
15 | options: --user root
16 | steps:
17 | - name: Checkout Repository
18 | uses: actions/checkout@v2
19 | - name: Install Dependencies
20 | run: |
21 | apt-get -qq update
22 | apt-get -qq install software-properties-common python3-dev python3-pip python3.12-venv git apt-utils pkg-config
23 | rm -rf /var/lib/apt/lists/*
24 | - name: Create venv
25 | run: |
26 | python3 -m venv .venv
27 | - name: Activate venv
28 | # see https://stackoverflow.com/a/74669486
29 | run: |
30 | . .venv/bin/activate
31 | echo PATH=$PATH >> $GITHUB_ENV
32 | - name: Install bindings
33 | run: pip3 install .
34 | - name: Check whether preCICE was built with MPI # reformat version information as a dict and check whether preCICE was compiled with MPI
35 | run: python3 -c "import precice; assert({item.split('=')[0]:item.split('=')[-1] for item in str(precice.get_version_information()).split(';')}['PRECICE_FEATURE_MPI_COMMUNICATION']=='Y')"
36 | - name: Run solverdummies
37 | run: |
38 | cd examples/solverdummy/
39 | python3 solverdummy.py precice-config.xml SolverOne & python3 solverdummy.py precice-config.xml SolverTwo
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # The following files are ignored by Git
2 | .idea # Pycharm related file
3 | *.so
4 | build
5 | *.egg-info
6 | __pycache__
7 | env
8 |
--------------------------------------------------------------------------------
/.markdownlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "MD013": false,
3 | "MD014": false,
4 | "MD024": false,
5 | "MD034": false,
6 | "MD033": false
7 | }
8 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog of Python language bindings for preCICE
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | ## latest
6 |
7 | * Fix bug in `map_and_read_data` https://github.com/precice/python-bindings/pull/237
8 |
9 | ## 3.2.0
10 |
11 | * Update `requirements.txt` of the solver dummy https://github.com/precice/python-bindings/pull/233
12 | * Add API functions for Just-in-time mapping https://github.com/precice/python-bindings/pull/231
13 | * Discontinued maintainment of Docker image `precice/python-bindings`. Python packages should be installed using virtual environments instead. Please refer to `README.md` for further information. https://github.com/precice/python-bindings/pull/228
14 | * Added profiling API functions https://github.com/precice/python-bindings/pull/226
15 | * Added API function `reset_mesh()` https://github.com/precice/python-bindings/pull/224
16 | * Removed testing of spack package https://github.com/precice/python-bindings/pull/221
17 | * Use the newer `precice/precice.hpp` header to access C++ API https://github.com/precice/python-bindings/pull/193
18 |
19 | ## 3.1.2
20 |
21 | * Restrict to numpy < 2 for better compatibility with CI pipeline. https://github.com/precice/python-bindings/pull/213
22 | * Require setuptools < 72 since support for the test command was removed in Setuptools 72. https://github.com/precice/python-bindings/pull/213
23 | * Require setuptools >= 61 to guarantee that pyproject.toml is used https://github.com/precice/python-bindings/pull/207
24 | * Fix CI pipeline for spack https://github.com/precice/python-bindings/pull/206
25 |
26 | ## 3.1.1
27 |
28 | * Fix NumPy include order to not conflict with system NumPy and the one installed via pip https://github.com/precice/python-bindings/pull/204
29 |
30 | ## 3.1.0
31 |
32 | * Change versioning scheme https://github.com/precice/python-bindings/pull/199
33 |
34 | ## 3.0.0.0
35 |
36 | * Add Cython as build time dependency https://github.com/precice/python-bindings/pull/177
37 | * Update CMake configuration flags for preCICE source installation in Actions. https://github.com/precice/python-bindings/commit/23a840144c2647d6cf09c0ed87be3b768a22feb7
38 | * Remove API functions `has_mesh` and `has_data` and rename `get_mesh_vertices_and_ids` to `get_mesh_vertices_and_coordinates`. https://github.com/precice/python-bindings/commit/cd446d2807b841d81a4cf5c9dd6656ab43c278c3
39 | * Update API according to preCICE v3.0.0 https://github.com/precice/python-bindings/pull/179
40 |
41 | ## 2.5.0.4
42 |
43 | * Add `tag_prefix = v` in versioneer configuration of `setup.cfg`.
44 |
45 | ## 2.5.0.3
46 |
47 | * Update from versioneer 0.19 to 0.29.
48 | * Add `cimport numpy` to avoid a segmentation fault originating from using Cython v3.0.0. https://github.com/precice/python-bindings/issues/182
49 |
50 | ## 2.5.0.2
51 |
52 | * Add Waveform API introduced in preCICE v2.4.0.
53 |
54 | ## 2.5.0.1
55 |
56 | * Add pkgconfig as dependency to the pythonpublish workflow https://github.com/precice/python-bindings/commit/200dc2aba160e18a7d1dae44ef3493d546e69eb9
57 |
58 | ## 2.5.0.0
59 |
60 | * Bindings now use pkgconfig to determine flags and link to preCICE. https://github.com/precice/python-bindings/pull/149
61 |
62 | ## 2.4.0.0
63 |
64 | * Move solverdummy into examples/ folder and remove MeshName from its input arguments. https://github.com/precice/python-bindings/pull/141
65 | * Remove MeshName from input arguments of solverdummy. https://github.com/precice/python-bindings/pull/142
66 |
67 | ## 2.3.0.1
68 |
69 | * Improve CI w.r.t spack package. https://github.com/precice/python-bindings/pull/117
70 | * Mesh connectivity requirement API function: https://github.com/precice/python-bindings/pull/126
71 | * Direct mesh access API functions: https://github.com/precice/python-bindings/pull/124
72 |
73 | ## 2.2.1.1
74 |
75 | * Remove Travis CI https://github.com/precice/python-bindings/pull/103
76 | * Improve CI w.r.t. testing dockerimage and autopep8 formatting: https://github.com/precice/python-bindings/pull/98
77 |
78 | ## 2.2.0.2
79 |
80 | * Improved error messgaes for all assertions. https://github.com/precice/python-bindings/pull/9
81 | * Improve CI w.r.t spack package. https://github.com/precice/python-bindings/pull/89
82 |
83 | ## 2.2.0.1
84 |
85 | * Format complete codebase according to PEP8 and test formatting. https://github.com/precice/python-bindings/pull/82
86 | * Added checks for correct input to API functions accepting array-like input (e.g. `write_block_scalar_data`). https://github.com/precice/python-bindings/pull/80
87 | * Use github actions for CI. https://github.com/precice/python-bindings/pull/67, https://github.com/precice/python-bindings/pull/68
88 | * Do major restructuring of codebase. https://github.com/precice/python-bindings/pull/71
89 | * Support `__version__` and provide version via python-versioneer. https://github.com/precice/python-bindings/pull/70
90 | * `packaging` and `pip` are now optional dependencies. https://github.com/precice/python-bindings/pull/63
91 | * Feature: Bindings are now available via Spack. https://github.com/spack/spack/pull/19558
92 |
93 | ## 2.1.1.2
94 |
95 | * Bugfix: Bindings also support empty read/write data for block read/write operations (like C++ preCICE API). https://github.com/precice/python-bindings/pull/69
96 |
97 | ## 2.1.1.1
98 |
99 | * Bindings can now handle mesh initialization with no vertices. This behavior is consistent with the C++ preCICE API.
100 | * Adds a CHANGELOG to the project.
101 |
102 | ## 2.1.0.1
103 |
104 | * Update solverdummy to include data transfer.
105 |
106 | ## 2.0.2.1
107 |
108 | * No relevant features or fixes. This version is released for compatibility reasons.
109 |
110 | ## 2.0.1.1
111 |
112 | * No relevant features or fixes. This version is released for compatibility reasons.
113 |
114 | ## 2.0.0.2
115 |
116 | * Improvement of PyPI intergration.
117 |
118 | ## 2.0.0.1
119 |
120 | * Introduces new versioning system. See https://github.com/precice/python-bindings/issues/31.
121 | * First independent release of the python bindings.
122 | * Name the package `pyprecice`.
123 | * Publish package on [PyPI](https://pypi.org/project/pyprecice/).
124 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 |
9 | This version of the GNU Lesser General Public License incorporates
10 | the terms and conditions of version 3 of the GNU General Public
11 | License, supplemented by the additional permissions listed below.
12 |
13 | 0. Additional Definitions.
14 |
15 | As used herein, "this License" refers to version 3 of the GNU Lesser
16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
17 | General Public License.
18 |
19 | "The Library" refers to a covered work governed by this License,
20 | other than an Application or a Combined Work as defined below.
21 |
22 | An "Application" is any work that makes use of an interface provided
23 | by the Library, but which is not otherwise based on the Library.
24 | Defining a subclass of a class defined by the Library is deemed a mode
25 | of using an interface provided by the Library.
26 |
27 | A "Combined Work" is a work produced by combining or linking an
28 | Application with the Library. The particular version of the Library
29 | with which the Combined Work was made is also called the "Linked
30 | Version".
31 |
32 | The "Minimal Corresponding Source" for a Combined Work means the
33 | Corresponding Source for the Combined Work, excluding any source code
34 | for portions of the Combined Work that, considered in isolation, are
35 | based on the Application, and not on the Linked Version.
36 |
37 | The "Corresponding Application Code" for a Combined Work means the
38 | object code and/or source code for the Application, including any data
39 | and utility programs needed for reproducing the Combined Work from the
40 | Application, but excluding the System Libraries of the Combined Work.
41 |
42 | 1. Exception to Section 3 of the GNU GPL.
43 |
44 | You may convey a covered work under sections 3 and 4 of this License
45 | without being bound by section 3 of the GNU GPL.
46 |
47 | 2. Conveying Modified Versions.
48 |
49 | If you modify a copy of the Library, and, in your modifications, a
50 | facility refers to a function or data to be supplied by an Application
51 | that uses the facility (other than as an argument passed when the
52 | facility is invoked), then you may convey a copy of the modified
53 | version:
54 |
55 | a) under this License, provided that you make a good faith effort to
56 | ensure that, in the event an Application does not supply the
57 | function or data, the facility still operates, and performs
58 | whatever part of its purpose remains meaningful, or
59 |
60 | b) under the GNU GPL, with none of the additional permissions of
61 | this License applicable to that copy.
62 |
63 | 3. Object Code Incorporating Material from Library Header Files.
64 |
65 | The object code form of an Application may incorporate material from
66 | a header file that is part of the Library. You may convey such object
67 | code under terms of your choice, provided that, if the incorporated
68 | material is not limited to numerical parameters, data structure
69 | layouts and accessors, or small macros, inline functions and templates
70 | (ten or fewer lines in length), you do both of the following:
71 |
72 | a) Give prominent notice with each copy of the object code that the
73 | Library is used in it and that the Library and its use are
74 | covered by this License.
75 |
76 | b) Accompany the object code with a copy of the GNU GPL and this license
77 | document.
78 |
79 | 4. Combined Works.
80 |
81 | You may convey a Combined Work under terms of your choice that,
82 | taken together, effectively do not restrict modification of the
83 | portions of the Library contained in the Combined Work and reverse
84 | engineering for debugging such modifications, if you also do each of
85 | the following:
86 |
87 | a) Give prominent notice with each copy of the Combined Work that
88 | the Library is used in it and that the Library and its use are
89 | covered by this License.
90 |
91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
92 | document.
93 |
94 | c) For a Combined Work that displays copyright notices during
95 | execution, include the copyright notice for the Library among
96 | these notices, as well as a reference directing the user to the
97 | copies of the GNU GPL and this license document.
98 |
99 | d) Do one of the following:
100 |
101 | 0) Convey the Minimal Corresponding Source under the terms of this
102 | License, and the Corresponding Application Code in a form
103 | suitable for, and under terms that permit, the user to
104 | recombine or relink the Application with a modified version of
105 | the Linked Version to produce a modified Combined Work, in the
106 | manner specified by section 6 of the GNU GPL for conveying
107 | Corresponding Source.
108 |
109 | 1) Use a suitable shared library mechanism for linking with the
110 | Library. A suitable mechanism is one that (a) uses at run time
111 | a copy of the Library already present on the user's computer
112 | system, and (b) will operate properly with a modified version
113 | of the Library that is interface-compatible with the Linked
114 | Version.
115 |
116 | e) Provide Installation Information, but only if you would otherwise
117 | be required to provide such information under section 6 of the
118 | GNU GPL, and only to the extent that such information is
119 | necessary to install and execute a modified version of the
120 | Combined Work produced by recombining or relinking the
121 | Application with a modified version of the Linked Version. (If
122 | you use option 4d0, the Installation Information must accompany
123 | the Minimal Corresponding Source and Corresponding Application
124 | Code. If you use option 4d1, you must provide the Installation
125 | Information in the manner specified by section 6 of the GNU GPL
126 | for conveying Corresponding Source.)
127 |
128 | 5. Combined Libraries.
129 |
130 | You may place library facilities that are a work based on the
131 | Library side by side in a single library together with other library
132 | facilities that are not Applications and are not covered by this
133 | License, and convey such a combined library under terms of your
134 | choice, if you do both of the following:
135 |
136 | a) Accompany the combined library with a copy of the same work based
137 | on the Library, uncombined with any other library facilities,
138 | conveyed under the terms of this License.
139 |
140 | b) Give prominent notice with the combined library that part of it
141 | is a work based on the Library, and explaining where to find the
142 | accompanying uncombined form of the same work.
143 |
144 | 6. Revised Versions of the GNU Lesser General Public License.
145 |
146 | The Free Software Foundation may publish revised and/or new versions
147 | of the GNU Lesser General Public License from time to time. Such new
148 | versions will be similar in spirit to the present version, but may
149 | differ in detail to address new problems or concerns.
150 |
151 | Each version is given a distinguishing version number. If the
152 | Library as you received it specifies that a certain numbered version
153 | of the GNU Lesser General Public License "or any later version"
154 | applies to it, you have the option of following the terms and
155 | conditions either of that published version or of any later version
156 | published by the Free Software Foundation. If the Library as you
157 | received it does not specify a version number of the GNU Lesser
158 | General Public License, you may choose any version of the GNU Lesser
159 | General Public License ever published by the Free Software Foundation.
160 |
161 | If the Library as you received it specifies that a proxy can decide
162 | whether future versions of the GNU Lesser General Public License shall
163 | apply, that proxy's public statement of acceptance of any version is
164 | permanent authorization for you to choose that version for the
165 | Library.
166 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include pyproject.toml
2 |
3 | # Include the README
4 | include *.md
5 |
6 | # Include the license file
7 | include LICENSE.txt
8 |
9 | # Include cython files
10 | include cyprecice/*.pyx
11 | include cyprecice/*.pxd
12 | include test/test_bindings_module.py
13 | include versioneer.py
14 | include precice/_version.py
15 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Python language bindings for the C++ library preCICE
2 |
3 | ⚠️ The latest version of the documentation for the python bindings can be found on [precice.org](https://precice.org/installation-bindings-python.html). The information from this `README` is currently under revision and will be moved ⚠️
4 |
5 | [](https://pypi.org/project/pyprecice/)
6 |
7 | This package provides python language bindings for the C++ library [preCICE](https://github.com/precice/precice). Note that the first two digits of the version number of the bindings indicate the major and minor version of the preCICE version that the bindings support. The last digit represents the version of the bindings. Example: `v3.1.0` and `v3.1.1` of the bindings represent versions `0` and `1` of the bindings that are compatible with preCICE `v3.1.x`. Note that this versioning scheme was introduced from bindings `v3.1.0`, which is different than the [old versioning scheme](#old-versioning-scheme).
8 |
9 | ## User documentation
10 |
11 | Please refer to [the preCICE documentation](https://www.precice.org/installation-bindings-python.html) for information on how to install and use the python bindings. Information below is intended for advanced users and developers.
12 |
13 | ## Required dependencies
14 |
15 | **preCICE**: Refer to [the preCICE documentation](https://precice.org/installation-overview.html) for information on building and installation.
16 |
17 | **C++**: A working C++ compiler, e.g., `g++`.
18 |
19 | **MPI**: `mpi4py` requires MPI to be installed on your system.
20 |
21 | ## Installing the package
22 |
23 | Generally, it is recommended to work in a virtual environment when using the preCICE Python bindings. For this purpose, you might have to run `apt install python3-venv` first.
24 |
25 | Create a virtual environment in your working directory by running:
26 |
27 | ```bash
28 | python3 -m venv .venv
29 | ```
30 |
31 | Using `.venv` is a common choice as path of your virtual environment. But you can use any path here. Activate the virtual environment by running
32 |
33 | ```bash
34 | . .venv/bin/activate
35 | ```
36 |
37 | ### Using pip
38 |
39 | We recommend using `pip` (version 19.0.0 or newer required). You can check your pip version via `pip --version`.
40 |
41 | #### preCICE system installs
42 |
43 | For system installs of preCICE, installation works out of the box. There are different ways how pip can be used to install pyprecice. pip will fetch cython and other build-time dependencies, compile the bindings and finally install the package pyprecice.
44 |
45 | * (recommended) install [pyprecice from PyPI](https://pypi.org/project/pyprecice/)
46 |
47 | ```bash
48 | $ pip install pyprecice
49 | ```
50 |
51 | * provide the link to this repository to pip (replace `` with the branch you want to use, preferably `master` or `develop`)
52 |
53 | ```bash
54 | $ pip install https://github.com/precice/python-bindings.git@
55 | ```
56 |
57 | * if you already cloned this repository, execute the following command from this directory:
58 |
59 | ```bash
60 | $ pip install .
61 | ```
62 |
63 | *note the dot at the end of the line*
64 |
65 | #### preCICE at custom location (setting PATHS)
66 |
67 | If preCICE (the C++ library) was installed in a custom prefix, or only built but not installed at all, you have to extend the following environment variables:
68 |
69 | * `LIBRARY_PATH`, `LD_LIBRARY_PATH` to the library location, or `$prefix/lib`
70 | * `CPATH` either to the `src` directory or the `$prefix/include`
71 |
72 | The preCICE documentation provides more informaiton on [linking preCICE](https://precice.org/installation-linking.html).
73 |
74 | ### Using Spack
75 |
76 | You can also install the python language bindings for preCICE via Spack by installing the Spack package `py-pyprecice`. Refer to [our installation guide for preCICE via Spack](https://precice.org/installation-spack.html) for getting started with Spack.
77 |
78 | ### Using setup.py (deprecated)
79 |
80 | #### preCICE system installs
81 |
82 | In this directory, execute:
83 |
84 | ```bash
85 | $ python3 setup.py install
86 | ```
87 |
88 | #### preCICE at custom location (setting PATHS)
89 |
90 | see above. Then run
91 |
92 | ```bash
93 | $ python3 setup.py install
94 | ```
95 |
96 | #### preCICE at custom location (explicit include path, library path)
97 |
98 | 1. Install cython and other dependencies via pip3
99 |
100 | ```bash
101 | $ pip3 install setuptools wheel cython packaging numpy
102 | ```
103 |
104 | 2. Open terminal in this folder.
105 | 3. Build the bindings
106 |
107 | ```bash
108 | $ python3 setup.py build_ext --include-dirs=$PRECICE_ROOT/src --library-dirs=$PRECICE_ROOT/build/last
109 | ```
110 |
111 | **Options:**
112 | * `--include-dirs=`, default: `''`
113 | Path to the headers of preCICE, point to the sources `$PRECICE_ROOT/src`, or the your custom install prefix `$prefix/include`.
114 |
115 | **NOTES:**
116 |
117 | * If you have built preCICE using CMake, you can pass the path to the CMake binary directory using `--library-dirs`.
118 | * It is recommended to use preCICE as a shared library here.
119 |
120 | 4. Install the bindings
121 |
122 | ```bash
123 | $ python3 setup.py install
124 | ```
125 |
126 | 5. Clean-up *optional*
127 |
128 | ```bash
129 | $ python3 setup.py clean --all
130 | ```
131 |
132 | ## Test the installation
133 |
134 | Update `LD_LIBRARY_PATH` such that python can find `precice.so`
135 |
136 | ```bash
137 | $ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PRECICE_ROOT/build/last
138 | ```
139 |
140 | Run the following to test the installation:
141 |
142 | ```bash
143 | $ python3 -c "import precice"
144 | ```
145 |
146 | ### Unit tests
147 |
148 | 1. Clean-up **mandatory** (because we must not link against the real `precice.so`, but we use a mocked version)
149 |
150 | ```bash
151 | $ python3 setup.py clean --all
152 | ```
153 |
154 | 2. Set `CPLUS_INCLUDE_PATH` (we cannot use `build_ext` and the `--include-dirs` option here)
155 |
156 | ```bash
157 | $ export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:$PRECICE_ROOT/src
158 | ```
159 |
160 | 3. Run tests with
161 |
162 | ```bash
163 | $ python3 setup.py test
164 | ```
165 |
166 | ## Usage
167 |
168 | You can find the documentation of the implemented interface in the file `precice.pyx`. For an example of how `pyprecice` can be used please refer to the [1D elastic tube example](https://precice.org/tutorials-elastic-tube-1d.html#python).
169 |
170 | **Note** The python package that is installed is called `pyprecice`. It provides the python module `precice` that can be use in your code via `import precice`, for example.
171 |
172 | ## Troubleshooting & miscellaneous
173 |
174 | ### preCICE is not found
175 |
176 | The following error shows up during installation, if preCICE is not found:
177 |
178 | ```bash
179 | /tmp/pip-install-d_fjyo1h/pyprecice/precice.cpp:643:10: fatal error: precice/Participant.hpp: No such file or directory
180 | 643 | #include "precice/Participant.hpp"
181 | | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
182 | compilation terminated.
183 | error: command 'x86_64-linux-gnu-gcc' failed with exit status 1
184 | ----------------------------------------
185 | ERROR: Failed building wheel for pyprecice
186 | Failed to build pyprecice
187 | ERROR: Could not build wheels for pyprecice which use PEP 517 and cannot be installed directly
188 | ```
189 |
190 | Or, for preCICE v2:
191 |
192 | ```bash
193 | /tmp/pip-install-d_fjyo1h/pyprecice/precice.cpp:643:10: fatal error: precice/SolverInterface.hpp: No such file or directory
194 | 643 | #include "precice/SolverInterface.hpp"
195 | | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
196 | compilation terminated.
197 | error: command 'x86_64-linux-gnu-gcc' failed with exit status 1
198 | ----------------------------------------
199 | ERROR: Failed building wheel for pyprecice
200 | Failed to build pyprecice
201 | ERROR: Could not build wheels for pyprecice which use PEP 517 and cannot be installed directly
202 | ```
203 |
204 | There are two possible reasons, why preCICE is not found:
205 |
206 | 1. preCICE is not installed. Please download and install the C++ library preCICE. See [above](https://github.com/precice/python-bindings/blob/develop/README.md#required-dependencies).
207 | 2. preCICE is installed, but cannot be found. Please make sure that preCICE can be found during the installation process. See our wiki page on [linking to preCICE](https://precice.org/installation-linking.html) and [the instructions above](https://github.com/precice/python-bindings/blob/develop/README.md#precice-at-custom-location-setting-paths).
208 |
209 | ### Version of Cython is too old
210 |
211 | In case the compilation fails with `shared_ptr.pxd not found` messages, check if you use the latest version of Cython.
212 |
213 | ### `Python.h` missing
214 |
215 | ```bash
216 | $ python3 -m pip install pyprecice
217 | Collecting pyprecice
218 | ...
219 | /tmp/pip-build-7rj4_h93/pyprecice/precice.cpp:25:20: fatal error: Python.h: No such file or directory
220 | compilation terminated.
221 | error: command 'x86_64-linux-gnu-gcc' failed with exit status 1
222 |
223 | ----------------------------------------
224 | Failed building wheel for pyprecice
225 | ```
226 |
227 | Please try to install `python3-dev`. E.g. via `apt install python3-dev`. Please make sure that you use the correct version (e.g. `python3.5-dev` or `python3.6-dev`). You can check your version via `python3 --version`.
228 |
229 | ### `libprecice.so` is not found at runtime
230 |
231 | ```bash
232 | $ python3 -c "import precice"
233 | Traceback (most recent call last):
234 | File "", line 1, in
235 | ImportError: libprecice.so.2: cannot open shared object file: No such file or directory
236 | ```
237 |
238 | Make sure that your `LD_LIBRARY_PATH` includes the directory that contains `libprecice.so`. The actual path depends on how you installed preCICE. Example: If preCICE was installed using `sudo make install` and you did not define a `CMAKE_INSTALL_PREFIX` the library path is `/usr/local/lib`. This means you have to `export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH`.
239 |
240 | ### I'm using preCICE < 2.0.0, but there is no matching version of the bindings. What can I do?
241 |
242 | If you want to use the old experimental python bindings (released with preCICE version < 2.0.0), please refer to the corresponding preCICE version. Example: for preCICE v1.6.1 there are three different versions of the python bindings: [`precice_future`](https://github.com/precice/precice/tree/v1.6.1/src/precice/bindings/python_future), [`precice`](https://github.com/precice/precice/tree/v1.6.1/src/precice/bindings/python) and [`PySolverInterface`](https://github.com/precice/precice/tree/v1.6.1/src/precice/bindings/PySolverInterface). Installation instructions can be found in the corresponding `README` files.
243 |
244 | ### Installing the python bindings for Python 2.7.17
245 |
246 | *Note that the instructions in this section are outdated and refer to the deprecated python bindings. Until we have updated information on the installation procedure for the python bindings under this use-case, we will keep these instructions, since they might still be very useful* (Originally contributed by [huangq1234553](https://github.com/huangq1234553) to the precice wiki in [`precice/precice/wiki:8bb74b7`](https://github.com/precice/precice/wiki/Dependencies/8bb74b78a7ebc54983f4822af82fb3d638021faa).)
247 |
248 | show details
249 |
250 | This guide provides steps to install python bindings for precice-1.6.1 for a conda environment Python 2.7.17 on the CoolMUC. Note that preCICE no longer supports Python 2 after v1.4.0. Hence, some modifications to the python setup code was necessary. Most steps are similar if not identical to the basic guide without petsc or python above. This guide assumes that the Eigen dependencies have already been installed.
251 |
252 | Load the prerequisite libraries:
253 |
254 | ```bash
255 | module load gcc/7
256 | module unload mpi.intel
257 | module load mpi.intel/2018_gcc
258 | module load cmake/3.12.1
259 | ```
260 |
261 | At the time of this writing `module load boost/1.68.0` is no longer available. Instead
262 | boost 1.65.1 was installed per the `boost and yaml-cpp` guide above.
263 |
264 | In order to have the right python dependencies, a packaged conda environment was transferred to
265 | SuperMUC. The following dependencies were installed:
266 |
267 | * numpy
268 | * mpi4py
269 |
270 | With the python environment active, we have to feed the right python file directories to the cmake command.
271 | Note that -DPYTHON_LIBRARY expects a python shared library. You can likely modify the version to fit what is required.
272 |
273 | ```bash
274 | mkdir build && cd build
275 | cmake -DBUILD_SHARED_LIBS=ON -DPRECICE_FEATURE_PETSC_MAPPING=OFF -DPRECICE_FEATURE_PYTHON_ACTIONS=ON -DCMAKE_INSTALL_PREFIX=/path/to/precice -DCMAKE_BUILD_TYPE=Debug .. -DPYTHON_INCLUDE_DIR=$(python -c "from distutils.sysconfig import get_python_inc; print(get_python_inc())") -DPYTHON_LIBRARY=$(python -c "import distutils.sysconfig as sysconfig; print(sysconfig.get_config_var('LIBDIR')+'/libpython2.7.so')") -DNumPy_INCLUDE_DIR=$(python -c "import numpy; print(numpy.get_include())")
276 | make -j 12
277 | make install
278 | ```
279 |
280 | After installing, make sure you add the preCICE installation paths to your `.bashrc`, so that other programs can find it:
281 |
282 | ```bash
283 | export PRECICE_ROOT="path/to/precice_install"
284 | export PKG_CONFIG_PATH="path/to/precice_install/lib/pkgconfig:${PKG_CONFIG_PATH}"
285 | export CPLUS_INCLUDE_PATH="path/to/precice_install/include:${CPLUS_INCLUDE_PATH}"
286 | export LD_LIBRARY_PATH="path/to/precice_install/lib:${LD_LIBRARY_PATH}"
287 | ```
288 |
289 | Then, navigate to the python_future bindings script.
290 |
291 | ```bash
292 | cd /path/to/precice/src/precice/bindings/python_future
293 | ```
294 |
295 | Append the following to the head of the file to allow Python2 to run Python3 code. Note that
296 | importing `unicode_literals` from `future` will cause errors in `setuptools` methods as string literals
297 | in code are interpreted as `unicode` with this import.
298 |
299 | ```python
300 | from __future__ import (absolute_import, division,
301 | print_function)
302 | from builtins import (
303 | bytes, dict, int, list, object, range, str,
304 | ascii, chr, hex, input, next, oct, open,
305 | pow, round, super,
306 | filter, map, zip)
307 | ```
308 |
309 | Modify `mpicompiler_default = "mpic++"` to `mpicompiler_default = "mpicxx"` in line 100.
310 | Run the setup file using the default Python 2.7.17.
311 |
312 | ```bash
313 | python setup.py install --user
314 | ```
315 |
316 |
317 |
318 | ### ValueError while importing preCICE
319 |
320 | If you face the error:
321 |
322 | ```bash
323 | ValueError: numpy.ndarray size changed, may indicate binary incompatibility. Expected 88 from C header, got 80 from PyObject
324 | ```
325 |
326 | make sure that you are using an up-to-date version of NumPy. You can update NumPy with
327 |
328 | ```bash
329 | pip3 install numpy --upgrade
330 | ```
331 |
332 | ## Old versioning scheme
333 |
334 | Bindings versions up to `v3.0.0.0` have four digits, where the first three digits are the supported preCICE version, and the fourth digit is the bindings version. Example: `v2.0.0.1` and `v2.0.0.2` of the bindings represent versions `1` and `2` of the bindings that are compatible with preCICE `v2.0.0`. We dropped the third digit of the preCICE version as bugfix releases are always compatible and do not impact the bindings. The new three digit format is now consistent with other preCICE bindings.
335 |
336 | ## Contributors
337 |
338 | * [Benjamin Rodenberg](https://github.com/BenjaminRodenberg)
339 | * [Ishaan Desai](https://github.com/IshaanDesai)
340 | * [Saumitra Vinay Joshi](https://github.com/saumiJ) contributed first working prototype in [`3db9c9` on `precice/precice`](https://github.com/precice/precice/commit/3db9c95e527db1e1cacb2fd116a5ce13ee877513)
341 | * [Frédéric Simonis](https://github.com/fsimonis)
342 | * [Florian Lindner](https://github.com/floli)
343 | * [Benjamin Uekermann](https://github.com/uekerman)
344 | * [Gerasimos Chourdakis](https://github.com/MakisH)
345 |
--------------------------------------------------------------------------------
/cyprecice/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | cyprecice.cpp
3 |
--------------------------------------------------------------------------------
/cyprecice/Participant.pxd:
--------------------------------------------------------------------------------
1 | from libcpp cimport bool
2 | from libcpp.set cimport set
3 | from libcpp.string cimport string
4 | from libcpp.vector cimport vector
5 |
6 | cdef extern from "precice/precice.hpp" namespace "precice":
7 | cdef cppclass Participant:
8 | # construction and configuration
9 |
10 | Participant (const string&, const string&, int, int) except +
11 |
12 | Participant (const string&, const string&, int, int, void*) except +
13 |
14 | # steering methods
15 |
16 | void initialize () except +
17 |
18 | void advance (double computedTimestepLength) except +
19 |
20 | void finalize()
21 |
22 | # status queries
23 |
24 | int getMeshDimensions(const string& meshName) except +
25 |
26 | int getDataDimensions(const string& meshName, const string& dataName) except +
27 |
28 | bool isCouplingOngoing()
29 |
30 | bool isTimeWindowComplete()
31 |
32 | double getMaxTimeStepSize()
33 |
34 | bool requiresInitialData()
35 |
36 | bool requiresWritingCheckpoint()
37 |
38 | bool requiresReadingCheckpoint()
39 |
40 | # mesh access
41 |
42 | bool requiresMeshConnectivityFor (const string& meshName) except +
43 |
44 | int setMeshVertex (const string& meshName, vector[double] position) except +
45 |
46 | int getMeshVertexSize (const string& meshName) except +
47 |
48 | void setMeshVertices (const string& meshName, vector[double] positions, vector[int]& ids) except +
49 |
50 | void setMeshEdge (const string& meshName, int firstVertexID, int secondVertexID) except +
51 |
52 | void setMeshEdges (const string& meshName, vector[int] vertices) except +
53 |
54 | void setMeshTriangle (const string& meshName, int firstVertexID, int secondVertexID, int thirdVertexID) except +
55 |
56 | void setMeshTriangles (const string& meshName, vector[int] vertices) except +
57 |
58 | void setMeshQuad (const string& meshName, int firstVertexID, int secondVertexID, int thirdVertexID, int fourthVertexID) except +
59 |
60 | void setMeshQuads (const string& meshName, vector[int] vertices) except +
61 |
62 | void setMeshTetrahedron (const string& meshName, int firstVertexID, int secondVertexID, int thirdVertexID, int fourthVertexID) except +
63 |
64 | void setMeshTetrahedra (const string& meshName, vector[int] vertices) except +
65 |
66 | # remeshing
67 |
68 | void resetMesh (const string& meshName) except +
69 |
70 | # data access
71 |
72 | void writeData (const string& meshName, const string& dataName, vector[int] vertices, vector[double] values) except +
73 |
74 | void readData (const string& meshName, const string& dataName, vector[int] vertices, const double relativeReadTime, vector[double]& values) except +
75 |
76 | # Just-in-time mapping
77 |
78 | void writeAndMapData (const string& meshName, const string& dataName, vector[double] coordinates, vector[double] values) except +
79 |
80 | void mapAndReadData (const string& meshName, const string& dataName, vector[double] coordinates, double relativeReadTime, vector[double]& values) except +
81 |
82 | # direct access
83 |
84 | void setMeshAccessRegion (const string& meshName, vector[double] boundingBox) except +
85 |
86 | void getMeshVertexIDsAndCoordinates (const string& meshName, vector[int]& ids, vector[double]& coordinates) except +
87 |
88 | # Gradient related API
89 |
90 | bool requiresGradientDataFor(const string& meshName, const string& dataName) except +
91 |
92 | void writeGradientData(const string& meshName, const string& dataName, vector[int] vertices, vector[double] gradientValues) except +
93 |
94 | # Experimental profiling API
95 |
96 | void startProfilingSection(const string& eventName)
97 |
98 | void stopLastProfilingSection()
99 |
100 | cdef extern from "precice/Tooling.hpp" namespace "precice":
101 | string getVersionInformation()
102 |
--------------------------------------------------------------------------------
/cyprecice/cyprecice.pxd:
--------------------------------------------------------------------------------
1 | """precice
2 |
3 | The python module precice offers python language bindings to the C++ coupling library precice. Please refer to precice.org for further information.
4 | """
5 |
6 | cimport numpy as np
7 | cimport cython
8 | cimport Participant as CppParticipant
9 |
10 | from cpython.version cimport PY_MAJOR_VERSION # important for determining python version in order to properly normalize string input. See http://docs.cython.org/en/latest/src/tutorial/strings.html#general-notes-about-c-strings and https://github.com/precice/precice/issues/68 .
11 |
12 | @cython.embedsignature(True)
13 | cdef class Participant:
14 | cdef CppParticipant.Participant *thisptr # hold a C++ instance being wrapped
15 |
--------------------------------------------------------------------------------
/cyprecice/cyprecice.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language = c++
2 |
3 | """precice
4 |
5 | The python module precice offers python language bindings to the C++ coupling library precice. Please refer to precice.org for further information.
6 | """
7 |
8 | cimport cyprecice
9 | cimport numpy
10 | import numpy as np
11 | from mpi4py import MPI
12 | import warnings
13 | from libcpp.string cimport string
14 | from libcpp.vector cimport vector
15 |
16 | from cpython.version cimport PY_MAJOR_VERSION # important for determining python version in order to properly normalize string input. See http://docs.cython.org/en/latest/src/tutorial/strings.html#general-notes-about-c-strings and https://github.com/precice/precice/issues/68 .
17 |
18 | cdef bytes convert(s):
19 | """
20 | source code from http://docs.cython.org/en/latest/src/tutorial/strings.html#general-notes-about-c-strings
21 | """
22 | if type(s) is bytes:
23 | return s
24 | elif type(s) is str:
25 | return s.encode()
26 | else:
27 | raise TypeError("Could not convert.")
28 |
29 |
30 | def check_array_like(argument, argument_name, function_name):
31 | try:
32 | argument.__len__
33 | argument.__getitem__
34 | except AttributeError:
35 | raise TypeError("{} requires array_like input for {}, but was provided the following input type: {}".format(
36 | function_name, argument_name, type(argument))) from None
37 |
38 | cdef class Participant:
39 | """
40 | Main Application Programming Interface of preCICE.
41 | To adapt a solver to preCICE, follow the following main structure:
42 | - Create an object of Participant with Participant()
43 | - Initialize preCICE with Participant::initialize()
44 | - Advance to the next (time)step with Participant::advance()
45 | - Finalize preCICE with Participant::finalize()
46 | - We use solver, simulation code, and participant as synonyms.
47 | - The preferred name in the documentation is participant.
48 | """
49 |
50 | # fake __init__ needed to display docstring for __cinit__ (see https://stackoverflow.com/a/42733794/5158031)
51 | def __init__(self, solver_name, configuration_file_name, solver_process_index, solver_process_size, communicator=None):
52 | """
53 | Constructor of Participant class.
54 |
55 | Parameters
56 | ----------
57 | solver_name : string
58 | Name of the solver
59 | configuration_file_name : string
60 | Name of the preCICE config file
61 | solver_process_index : int
62 | Rank of the process
63 | solver_process_size : int
64 | Size of the process
65 | communicator: mpi4py.MPI.Intracomm, optional
66 | Custom MPI communicator to use
67 |
68 | Returns
69 | -------
70 | Participant : object
71 | Object pointing to the defined participant
72 |
73 | Example
74 | -------
75 | >>> participant = precice.Participant("SolverOne", "precice-config.xml", 0, 1)
76 | preCICE: This is preCICE version X.X.X
77 | preCICE: Revision info: vX.X.X-X-XXXXXXXXX
78 | preCICE: Configuring preCICE with configuration: "precice-config.xml"
79 |
80 | """
81 | pass
82 |
83 | def __cinit__ (self, solver_name, configuration_file_name, solver_process_index, solver_process_size, communicator=None):
84 | cdef void* communicator_ptr
85 | if communicator:
86 | communicator_ptr = communicator
87 | self.thisptr = new CppParticipant.Participant (convert(solver_name), convert(configuration_file_name), solver_process_index, solver_process_size, communicator_ptr)
88 | else:
89 | self.thisptr = new CppParticipant.Participant (convert(solver_name), convert(configuration_file_name), solver_process_index, solver_process_size)
90 | pass
91 |
92 | def __dealloc__ (self):
93 | """
94 | Destructor of Participant class
95 | """
96 | del self.thisptr
97 |
98 |
99 | # steering methods
100 |
101 | def initialize (self):
102 | """
103 | Fully initializes preCICE and initializes coupling data. The starting values for coupling data are zero by
104 | default. To provide custom values, first set the data using the Data Access methods before calling this
105 | method to finally exchange the data.
106 |
107 | This function handles:
108 | - Parallel communication to the coupling partner/s is setup.
109 | - Meshes are exchanged between coupling partners and the parallel partitions are created.
110 | - [Serial Coupling Scheme] If the solver is not starting the simulation, coupling data is received
111 | from the coupling partner's first computation.
112 |
113 | Returns
114 | -------
115 | max_timestep : double
116 | Maximum length of first timestep to be computed by the solver.
117 | """
118 | self.thisptr.initialize ()
119 |
120 |
121 | def advance (self, double computed_timestep_length):
122 | """
123 | Advances preCICE after the solver has computed one timestep.
124 |
125 | Parameters
126 | ----------
127 | computed_timestep_length : double
128 | Length of timestep used by the solver.
129 |
130 | Notes
131 | -----
132 | Previous calls:
133 | initialize() has been called successfully.
134 | The solver has computed one timestep.
135 | The solver has written all coupling data.
136 | finalize() has not yet been called.
137 |
138 | Tasks completed:
139 | Coupling data values specified in the configuration are exchanged.
140 | Coupling scheme state (computed time, computed timesteps, ...) is updated.
141 | The coupling state is logged.
142 | Configured data mapping schemes are applied.
143 | [Second Participant] Configured post processing schemes are applied.
144 | Meshes with data are exported to files if configured.
145 | """
146 | self.thisptr.advance (computed_timestep_length)
147 |
148 |
149 | def finalize (self):
150 | """
151 | Finalizes preCICE.
152 |
153 | Notes
154 | -----
155 | Previous calls:
156 | initialize() has been called successfully.
157 |
158 | Tasks completed:
159 | Communication channels are closed.
160 | Meshes and data are deallocated.
161 | """
162 | self.thisptr.finalize ()
163 |
164 |
165 | # status queries
166 |
167 | def get_mesh_dimensions (self, mesh_name):
168 | """
169 | Returns the spatial dimensionality of the given mesh.
170 |
171 | Parameters
172 | ----------
173 | mesh_name : string
174 | Name of the mesh.
175 |
176 | Returns
177 | -------
178 | dimension : int
179 | The dimensions of the given mesh.
180 | """
181 |
182 | return self.thisptr.getMeshDimensions (convert(mesh_name))
183 |
184 |
185 | def get_data_dimensions (self, mesh_name, data_name):
186 | """
187 | Returns the spatial dimensionality of the given data on the given mesh.
188 |
189 | Parameters
190 | ----------
191 | mesh_name : string
192 | Name of the mesh.
193 | data_name : string
194 | Name of the data.
195 |
196 | Returns
197 | -------
198 | dimension : int
199 | The dimensions of the given data.
200 | """
201 |
202 | return self.thisptr.getDataDimensions (convert(mesh_name), convert(data_name))
203 |
204 |
205 | def is_coupling_ongoing (self):
206 | """
207 | Checks if the coupled simulation is still ongoing.
208 | A coupling is ongoing as long as
209 | - the maximum number of timesteps has not been reached, and
210 | - the final time has not been reached.
211 | The user should call finalize() after this function returns false.
212 |
213 | Returns
214 | -------
215 | tag : bool
216 | Whether the coupling is ongoing.
217 |
218 | Notes
219 | -----
220 | Previous calls:
221 | initialize() has been called successfully.
222 | """
223 | return self.thisptr.isCouplingOngoing ()
224 |
225 |
226 | def is_time_window_complete (self):
227 | """
228 | Checks if the current coupling timewindow is completed.
229 | The following reasons require several solver time steps per coupling time step:
230 | - A solver chooses to perform subcycling.
231 | - An implicit coupling timestep iteration is not yet converged.
232 |
233 | Returns
234 | -------
235 | tag : bool
236 | Whether the timestep is complete.
237 |
238 | Notes
239 | -----
240 | Previous calls:
241 | initialize() has been called successfully.
242 | """
243 | return self.thisptr.isTimeWindowComplete ()
244 |
245 |
246 | def get_max_time_step_size (self):
247 | """
248 | Get the maximum allowed time step size of the current window.
249 |
250 | Allows the user to query the maximum allowed time step size in the current window.
251 | This should be used to compute the actual time step that the solver uses.
252 |
253 | Returns
254 | -------
255 | tag : double
256 | Maximum size of time step to be computed by solver.
257 |
258 | Notes
259 | -----
260 | Previous calls:
261 | initialize() has been called successfully.
262 | """
263 | return self.thisptr.getMaxTimeStepSize ()
264 |
265 |
266 | def requires_initial_data (self):
267 | """
268 | Checks if the participant is required to provide initial data.
269 | If true, then the participant needs to write initial data to defined vertices
270 | prior to calling initialize().
271 |
272 | Returns
273 | -------
274 | tag : bool
275 | Returns True if inital data is required.
276 |
277 | Notes
278 | -----
279 | Previous calls:
280 | initialize() has not yet been called
281 | """
282 | return self.thisptr.requiresInitialData ()
283 |
284 | def requires_writing_checkpoint (self):
285 | """
286 | Checks if the participant is required to write an iteration checkpoint.
287 |
288 | If true, the participant is required to write an iteration checkpoint before
289 | calling advance().
290 |
291 | preCICE refuses to proceed if writing a checkpoint is required,
292 | but this method isn't called prior to advance().
293 |
294 | Notes
295 | -----
296 | Previous calls:
297 | initialize() has been called
298 | """
299 | return self.thisptr.requiresWritingCheckpoint ()
300 |
301 | def requires_reading_checkpoint (self):
302 | """
303 | Checks if the participant is required to read an iteration checkpoint.
304 |
305 | If true, the participant is required to read an iteration checkpoint before
306 | calling advance().
307 |
308 | preCICE refuses to proceed if reading a checkpoint is required,
309 | but this method isn't called prior to advance().
310 |
311 | Notes
312 | -----
313 | This function returns false before the first call to advance().
314 |
315 | Previous calls:
316 | initialize() has been called
317 | """
318 | return self.thisptr.requiresReadingCheckpoint ()
319 |
320 | # mesh access
321 |
322 | def requires_mesh_connectivity_for (self, mesh_name):
323 | """
324 | Checks if the given mesh requires connectivity.
325 |
326 | Parameters
327 | ----------
328 | mesh_name : string
329 | Name of the mesh.
330 |
331 | Returns
332 | -------
333 | tag : bool
334 | True if mesh connectivity is required.
335 | """
336 | return self.thisptr.requiresMeshConnectivityFor(convert(mesh_name))
337 |
338 |
339 | def set_mesh_vertex(self, mesh_name, position):
340 | """
341 | Creates a mesh vertex
342 |
343 | Parameters
344 | ----------
345 | mesh_name : str
346 | Name of the mesh to add the vertex to.
347 | position : array_like
348 | The coordinates of the vertex.
349 |
350 | Returns
351 | -------
352 | vertex_id : int
353 | ID of the vertex which is set.
354 |
355 | Notes
356 | -----
357 | Previous calls:
358 | Count of available elements at position matches the configured dimension
359 | """
360 | check_array_like(position, "position", "set_mesh_vertex")
361 |
362 | if len(position) > 0:
363 | dimensions = len(position)
364 | assert dimensions == self.get_mesh_dimensions(mesh_name), "Dimensions of vertex coordinate in set_mesh_vertex does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_mesh_dimensions(mesh_name))
365 | elif len(position) == 0:
366 | dimensions = self.get_mesh_dimensions(mesh_name)
367 |
368 | cdef vector[double] cpp_position = position
369 |
370 | vertex_id = self.thisptr.setMeshVertex(convert(mesh_name), cpp_position)
371 |
372 | return vertex_id
373 |
374 |
375 | def get_mesh_vertex_size (self, mesh_name):
376 | """
377 | Returns the number of vertices of a mesh
378 |
379 | Parameters
380 | ----------
381 | mesh_name : str
382 | Name of the mesh.
383 |
384 | Returns
385 | -------
386 | sum : int
387 | Number of vertices of the mesh.
388 | """
389 |
390 | return self.thisptr.getMeshVertexSize(convert(mesh_name))
391 |
392 |
393 | def set_mesh_vertices (self, mesh_name, positions):
394 | """
395 | Creates multiple mesh vertices
396 |
397 | Parameters
398 | ----------
399 | mesh_name : str
400 | Name of the mesh to add the vertices to.
401 | positions : array_like
402 | The coordinates of the vertices in a numpy array [N x D] where
403 | N = number of vertices and D = dimensions of geometry.
404 |
405 | Returns
406 | -------
407 | vertex_ids : numpy.ndarray
408 | IDs of the created vertices.
409 |
410 | Notes
411 | -----
412 | Previous calls:
413 | initialize() has not yet been called
414 | count of available elements at positions matches the configured dimension * size
415 | count of available elements at ids matches size
416 |
417 | Examples
418 | --------
419 | Set mesh vertices for a 2D problem with 5 mesh vertices.
420 |
421 | >>> positions = np.array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])
422 | >>> positions.shape
423 | (5, 2)
424 | >>> mesh_name = "MeshOne"
425 | >>> vertex_ids = participant.set_mesh_vertices(mesh_name, positions)
426 | >>> vertex_ids.shape
427 | (5,)
428 |
429 | Set mesh vertices for a 3D problem with 5 mesh vertices.
430 |
431 | >>> positions = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4], [5, 5, 5]])
432 | >>> positions.shape
433 | (5, 3)
434 | >>> mesh_name = "MeshOne"
435 | >>> vertex_ids = participant.set_mesh_vertices(mesh_name, positions)
436 | >>> vertex_ids.shape
437 | (5,)
438 | """
439 | check_array_like(positions, "positions", "set_mesh_vertices")
440 |
441 | if not isinstance(positions, np.ndarray):
442 | positions = np.asarray(positions)
443 |
444 | if len(positions) > 0:
445 | size, dimensions = positions.shape
446 | assert dimensions == self.get_mesh_dimensions(mesh_name), "Dimensions of vertex coordinates in set_mesh_vertices does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_mesh_dimensions(mesh_name))
447 | elif len(positions) == 0:
448 | size = 0
449 | dimensions = self.get_mesh_dimensions(mesh_name)
450 |
451 | cdef vector[double] cpp_positions = positions.flatten()
452 | cdef vector[int] cpp_ids = [-1 for _ in range(size)]
453 |
454 | self.thisptr.setMeshVertices (convert(mesh_name), cpp_positions, cpp_ids)
455 |
456 | cdef np.ndarray[int, ndim=1] np_ids = np.array(cpp_ids, dtype=np.int32)
457 |
458 | return np_ids
459 |
460 |
461 | def set_mesh_edge (self, mesh_name, first_vertex_id, second_vertex_id):
462 | """
463 | Sets mesh edge from vertex IDs, returns edge ID.
464 |
465 | Parameters
466 | ----------
467 | mesh_name : str
468 | Name of the mesh to add the edge to.
469 | first_vertex_id : int
470 | ID of the first vertex of the edge.
471 | second_vertex_id : int
472 | ID of the second vertex of the edge.
473 |
474 | Returns
475 | -------
476 | edge_id : int
477 | ID of the edge.
478 |
479 | Notes
480 | -----
481 | Previous calls:
482 | vertices with firstVertexID and secondVertexID were added to the mesh with name mesh_name
483 | """
484 |
485 | self.thisptr.setMeshEdge (convert(mesh_name), first_vertex_id, second_vertex_id)
486 |
487 |
488 | def set_mesh_edges (self, mesh_name, vertices):
489 | """
490 | Creates multiple mesh edges
491 |
492 | Parameters
493 | ----------
494 | mesh_name : str
495 | Name of the mesh to add the vertices to.
496 | vertices : array_like
497 | The IDs of the vertices in a numpy array [N x 2] where
498 | N = number of edges and D = dimensions of geometry.
499 |
500 | Examples
501 | --------
502 | Set mesh edges for a problem with 4 mesh vertices in the form of a square with both diagonals which are fully interconnected.
503 |
504 | >>> vertices = np.array([[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]])
505 | >>> vertices.shape
506 | (6, 2)
507 | >>> participant.set_mesh_edges(mesh_name, vertices)
508 | """
509 | check_array_like(vertices, "vertices", "set_mesh_edges")
510 |
511 | if not isinstance(vertices, np.ndarray):
512 | vertices = np.asarray(vertices)
513 |
514 | if len(vertices) > 0:
515 | _, n = vertices.shape
516 | assert n == 2, "Provided vertices are not of a [N x 2] format, but instead of a [N x {}]".format(n)
517 | elif len(vertices) == 0:
518 | dimensions = self.get_mesh_dimensions(mesh_name)
519 |
520 | cdef vector[int] cpp_vertices = vertices.flatten()
521 |
522 | self.thisptr.setMeshEdges (convert(mesh_name), cpp_vertices)
523 |
524 |
525 | def set_mesh_triangle (self, mesh_name, first_vertex_id, second_vertex_id, third_vertex_id):
526 | """
527 | Set a mesh triangle from edge IDs
528 |
529 | Parameters
530 | ----------
531 | mesh_name : str
532 | Name of the mesh to add the triangle to.
533 | first_vertex_id : int
534 | ID of the first vertex of the triangle.
535 | second_vertex_id : int
536 | ID of the second vertex of the triangle.
537 | third_vertex_id : int
538 | ID of the third vertex of the triangle.
539 |
540 | Notes
541 | -----
542 | Previous calls:
543 | vertices with first_vertex_id, second_vertex_id, and third_vertex_id were added to the mesh with the name mesh_name
544 | """
545 |
546 | self.thisptr.setMeshTriangle (convert(mesh_name), first_vertex_id, second_vertex_id, third_vertex_id)
547 |
548 |
549 | def set_mesh_triangles (self, mesh_name, vertices):
550 | """
551 | Creates multiple mesh triangles
552 |
553 | Parameters
554 | ----------
555 | mesh_name : str
556 | Name of the mesh to add the triangles to.
557 | vertices : array_like
558 | The IDs of the vertices in a numpy array [N x 3] where
559 | N = number of triangles and D = dimensions of geometry.
560 |
561 | Examples
562 | --------
563 | Set mesh triangles for a problem with 4 mesh vertices in the form of a square with both diagonals which are fully interconnected.
564 |
565 | >>> vertices = np.array([[1, 2, 3], [1, 3, 4], [1, 2, 4], [1, 3, 4]])
566 | >>> vertices.shape
567 | (4, 2)
568 | >>> participant.set_mesh_triangles(mesh_name, vertices)
569 | """
570 | check_array_like(vertices, "vertices", "set_mesh_triangles")
571 |
572 | if not isinstance(vertices, np.ndarray):
573 | vertices = np.asarray(vertices)
574 |
575 | if len(vertices) > 0:
576 | _, n = vertices.shape
577 | assert n == self.get_mesh_dimensions(mesh_name), "Provided vertices are not of a [N x {}] format, but instead of a [N x {}]".format(self.get_mesh_dimensions(mesh_name), n)
578 | elif len(vertices) == 0:
579 | dimensions = self.get_mesh_dimensions(mesh_name)
580 |
581 | cdef vector[int] cpp_vertices = vertices.flatten()
582 |
583 | self.thisptr.setMeshTriangles (convert(mesh_name), cpp_vertices)
584 |
585 |
586 | def set_mesh_quad (self, mesh_name, first_vertex_id, second_vertex_id, third_vertex_id, fourth_vertex_id):
587 | """
588 | Set a mesh Quad from vertex IDs.
589 |
590 | Parameters
591 | ----------
592 | mesh_name : str
593 | Name of the mesh to add the quad to.
594 | first_vertex_id : int
595 | ID of the first vertex of the quad.
596 | second_vertex_id : int
597 | ID of the second vertex of the quad.
598 | third_vertex_id : int
599 | ID of the third vertex of the quad.
600 | fourth_vertex_id : int
601 | ID of the third vertex of the quad.
602 |
603 | Notes
604 | -----
605 | Previous calls:
606 | vertices with first_vertex_id, second_vertex_id, third_vertex_id, and fourth_vertex_id were added
607 | to the mesh with the name mesh_name
608 | """
609 |
610 | self.thisptr.setMeshQuad (convert(mesh_name), first_vertex_id, second_vertex_id, third_vertex_id, fourth_vertex_id)
611 |
612 |
613 | def set_mesh_quads (self, mesh_name, vertices):
614 | """
615 | Creates multiple mesh quads
616 |
617 | Parameters
618 | ----------
619 | mesh_name : str
620 | Name of the mesh to add the quads to.
621 | vertices : array_like
622 | The IDs of the vertices in a numpy array [N x 4] where
623 | N = number of quads and D = dimensions of geometry.
624 |
625 | Examples
626 | --------
627 | Set mesh quads for a problem with 4 mesh vertices in the form of a square with both diagonals which are fully interconnected.
628 |
629 | >>> vertices = np.array([[1, 2, 3, 4]])
630 | >>> vertices.shape
631 | (1, 2)
632 | >>> participant.set_mesh_quads(mesh_name, vertices)
633 | """
634 | check_array_like(vertices, "vertices", "set_mesh_quads")
635 |
636 | if not isinstance(vertices, np.ndarray):
637 | vertices = np.asarray(vertices)
638 |
639 | if len(vertices) > 0:
640 | _, n = vertices.shape
641 | assert n == 4, "Provided vertices are not of a [N x 4] format, but instead of a [N x {}]".format(n)
642 | elif len(vertices) == 0:
643 | dimensions = self.get_mesh_dimensions(mesh_name)
644 |
645 | cdef vector[int] cpp_vertices = vertices.flatten()
646 |
647 | self.thisptr.setMeshQuads (convert(mesh_name), cpp_vertices)
648 |
649 |
650 | def set_mesh_tetrahedron (self, mesh_name, first_vertex_id, second_vertex_id, third_vertex_id, fourth_vertex_id):
651 | """
652 | Sets a mesh tetrahedron from vertex IDs.
653 |
654 | Parameters
655 | ----------
656 | mesh_name : str
657 | Name of the mesh to add the tetrahedron to.
658 | first_vertex_id : int
659 | ID of the first vertex of the tetrahedron.
660 | second_vertex_id : int
661 | ID of the second vertex of the tetrahedron.
662 | third_vertex_id : int
663 | ID of the third vertex of the tetrahedron.
664 | fourth_vertex_id : int
665 | ID of the third vertex of the tetrahedron.
666 |
667 | Notes
668 | -----
669 | Previous calls:
670 | vertices with first_vertex_id, second_vertex_id, third_vertex_id, and fourth_vertex_id were added
671 | to the mesh with the name mesh_name
672 | """
673 |
674 | self.thisptr.setMeshTetrahedron (convert(mesh_name), first_vertex_id, second_vertex_id, third_vertex_id, fourth_vertex_id)
675 |
676 |
677 | def set_mesh_tetrahedra (self, mesh_name, vertices):
678 | """
679 | Creates multiple mesh tetdrahedrons
680 |
681 | Parameters
682 | ----------
683 | mesh_name : str
684 | Name of the mesh to add the tetrahedrons to.
685 | vertices : array_like
686 | The IDs of the vertices in a numpy array [N x 4] where
687 | N = number of quads and D = dimensions of geometry.
688 |
689 | Examples
690 | --------
691 | Set mesh tetrahedrons for a problem with 4 mesh vertices.
692 |
693 | >>> vertices = np.array([[1, 2, 3, 4]])
694 | >>> vertices.shape
695 | (1, 2)
696 | >>> participant.set_mesh_tetradehra(mesh_name, vertices)
697 | """
698 | check_array_like(vertices, "vertices", "set_mesh_tetrahedra")
699 |
700 | if not isinstance(vertices, np.ndarray):
701 | vertices = np.asarray(vertices)
702 |
703 | if len(vertices) > 0:
704 | _, n = vertices.shape
705 | assert n == 4, "Provided vertices are not of a [N x 4] format, but instead of a [N x {}]".format(n)
706 | elif len(vertices) == 0:
707 | dimensions = self.get_mesh_dimensions(mesh_name)
708 |
709 | cdef vector[int] cpp_vertices = vertices.flatten()
710 |
711 | self.thisptr.setMeshTetrahedra (convert(mesh_name), cpp_vertices)
712 |
713 | # remeshing
714 |
715 |
716 | def reset_mesh (self, mesh_name):
717 | """
718 | Resets a mesh and allows setting it using set_mesh functions again.
719 |
720 | Parameters
721 | ----------
722 | mesh_name : str
723 | Name of the mesh to reset.
724 |
725 | Notes
726 | -----
727 | This function is still experimental.
728 | Please refer to the documentation on how to enable and use it.
729 |
730 | Previous calls:
731 | advance() has been called
732 |
733 | Examples
734 | --------
735 | Reset a mesh with 5 vertices to have 3 vertices.
736 |
737 | >>> positions = np.array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5]])
738 | >>> mesh_name = "MeshOne"
739 | >>> vertex_ids = participant.set_mesh_vertices(mesh_name, positions)
740 | >>> # later in the coupling loop
741 | >>> if remeshing_required():
742 | >>> participant.reset_mesh(mesh_name)
743 | >>> positions = np.array([[1, 1], [3, 3], [5, 5]])
744 | >>> vertex_ids = participant.set_mesh_vertices(mesh_name, positions)
745 | """
746 |
747 | self.thisptr.resetMesh (convert(mesh_name))
748 |
749 | # data access
750 |
751 | def write_data (self, mesh_name, data_name, vertex_ids, values):
752 | """
753 | This function writes values of specified vertices to data of a mesh.
754 | Values are provided as a block of continuous memory defined by values. Values are stored in a numpy array [N x D] where N = number of vertices and D = dimensions of geometry.
755 | The order of the provided data follows the order specified by vertices.
756 |
757 | Parameters
758 | ----------
759 | mesh_name : str
760 | name of the mesh to write to.
761 | data_name : str
762 | Data name to write to.
763 | vertex_ids : array_like
764 | Indices of the vertices.
765 | values : array_like
766 | Values of data
767 |
768 | Notes
769 | -----
770 | Previous calls:
771 | count of available elements at values matches the configured dimension * size
772 | count of available elements at vertex_ids matches the given size
773 | initialize() has been called
774 |
775 | Examples
776 | --------
777 | Write scalar data for a 2D problem with 5 vertices:
778 | >>> mesh_name = "MeshOne"
779 | >>> data_name = "DataOne"
780 | >>> vertex_ids = [1, 2, 3, 4, 5]
781 | >>> values = np.array([v1, v2, v3, v4, v5])
782 | >>> participant.write_data(mesh_name, data_name, vertex_ids, values)
783 |
784 | Write vector data for a 2D problem with 5 vertices:
785 | >>> mesh_name = "MeshOne"
786 | >>> data_name = "DataOne"
787 | >>> vertex_ids = [1, 2, 3, 4, 5]
788 | >>> values = np.array([[v1_x, v1_y], [v2_x, v2_y], [v3_x, v3_y], [v4_x, v4_y], [v5_x, v5_y]])
789 | >>> participant.write_data(mesh_name, data_name, vertex_ids, values)
790 |
791 | Write vector data for a 3D (D=3) problem with 5 (N=5) vertices:
792 | >>> mesh_name = "MeshOne"
793 | >>> data_name = "DataOne"
794 | >>> vertex_ids = [1, 2, 3, 4, 5]
795 | >>> values = np.array([[v1_x, v1_y, v1_z], [v2_x, v2_y, v2_z], [v3_x, v3_y, v3_z], [v4_x, v4_y, v4_z], [v5_x, v5_y, v5_z]])
796 | >>> participant.write_data(mesh_name, data_name, vertex_ids, values)
797 | """
798 | check_array_like(vertex_ids, "vertex_ids", "write_data")
799 | check_array_like(values, "values", "write_data")
800 |
801 | if not isinstance(values, np.ndarray):
802 | values = np.asarray(values)
803 |
804 | if len(values) == 0:
805 | size = 0
806 | elif self.get_data_dimensions(mesh_name, data_name) == 1:
807 | size = values.flatten().shape[0]
808 | dimensions = 1
809 | else:
810 | assert len(values.shape) == 2, "Vector valued data has to be provided as a numpy array of shape [N x D] where N = number of vertices and D = number of dimensions."
811 | size, dimensions = values.shape
812 |
813 | assert dimensions == self.get_data_dimensions(mesh_name, data_name), "Dimensions of vector data in write_data do not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_data_dimensions(mesh_name, data_name))
814 |
815 | assert len(vertex_ids) == size, "Vertex IDs are of incorrect length in write_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(vertex_ids.size, size)
816 |
817 | cdef vector[int] cpp_ids = vertex_ids
818 | cdef vector[double] cpp_values = values.flatten()
819 |
820 | self.thisptr.writeData (convert(mesh_name), convert(data_name), cpp_ids, cpp_values)
821 |
822 |
823 | def read_data (self, mesh_name, data_name, vertex_ids, relative_read_time):
824 | """
825 | Reads data into a provided block. This function reads values of specified vertices
826 | from a dataID. Values are read into a block of continuous memory.
827 |
828 | Parameters
829 | ----------
830 | mesh_name : str
831 | Name of the mesh to write to.
832 | data_name : str
833 | Name of the data to read from.
834 | vertex_ids : array_like
835 | Indices of the vertices.
836 | relative_read_time : double
837 | Point in time where data is read relative to the beginning of the current time step
838 |
839 | Returns
840 | -------
841 | values : numpy.ndarray
842 | Contains the read data.
843 |
844 | Notes
845 | -----
846 | Previous calls:
847 | count of available elements at values matches the configured dimension * size
848 | count of available elements at vertex_ids matches the given size
849 | initialize() has been called
850 |
851 | Examples
852 | --------
853 | Read scalar data for a 2D problem with 5 vertices:
854 | >>> mesh_name = "MeshOne"
855 | >>> data_name = "DataOne"
856 | >>> vertex_ids = [1, 2, 3, 4, 5]
857 | >>> dt = 1.0
858 | >>> values = read_data(mesh_name, data_name, vertex_ids, dt)
859 | >>> values.shape
860 | >>> (5, )
861 |
862 | Read vector data for a 2D problem with 5 vertices:
863 | >>> mesh_name = "MeshOne"
864 | >>> data_name = "DataOne"
865 | >>> vertex_ids = [1, 2, 3, 4, 5]
866 | >>> dt = 1.0
867 | >>> values = read_data(mesh_name, data_name, vertex_ids, dt)
868 | >>> values.shape
869 | >>> (5, 2)
870 |
871 | Read vector data for a 3D system with 5 vertices:
872 | >>> mesh_name = "MeshOne"
873 | >>> data_name = "DataOne"
874 | >>> vertex_ids = [1, 2, 3, 4, 5]
875 | >>> dt = 1.0
876 | >>> values = read_data(mesh_name, data_name, vertex_ids, dt)
877 | >>> values.shape
878 | >>> (5, 3)
879 | """
880 | check_array_like(vertex_ids, "vertex_ids", "read_data")
881 |
882 | if len(vertex_ids) == 0:
883 | size = 0
884 | dimensions = self.get_data_dimensions(mesh_name, data_name)
885 | elif self.get_data_dimensions(mesh_name, data_name) == 1:
886 | size = len(vertex_ids)
887 | dimensions = 1
888 | else:
889 | size = len(vertex_ids)
890 | dimensions = self.get_data_dimensions(mesh_name, data_name)
891 |
892 | cdef vector[int] cpp_ids = vertex_ids
893 | cdef vector[double] cpp_values = [-1 for _ in range(size * dimensions)]
894 |
895 | self.thisptr.readData (convert(mesh_name), convert(data_name), cpp_ids, relative_read_time, cpp_values)
896 |
897 | cdef np.ndarray[double, ndim=1] np_values = np.array(cpp_values, dtype=np.double)
898 |
899 | if len(vertex_ids) == 0:
900 | return np_values.reshape((size))
901 | elif self.get_data_dimensions(mesh_name, data_name) == 1:
902 | return np_values.reshape((size))
903 | else:
904 | return np_values.reshape((size, dimensions))
905 |
906 | def write_and_map_data (self, mesh_name, data_name, coordinates, values):
907 | """
908 | This function writes values at temporary locations to data of a mesh.
909 | As opposed to the writeData function using VertexIDs, this function allows to write data via coordinates,
910 | which don't have to be specified during the initialization. This is particularly useful for meshes, which
911 | vary over time. Note that using this function comes at a performance cost, since the specified mapping
912 | needs to be computed locally for the given locations, whereas the other variant (writeData) can typically
913 | exploit the static interface mesh and pre-compute data structures more efficiently.
914 |
915 | Values are passed identically to write_data.
916 |
917 | Parameters
918 | ----------
919 | mesh_name : str
920 | name of the mesh to write to.
921 | data_name : str
922 | Data name to write to.
923 | coordinates : array_like
924 | The coordinates of the vertices in a numpy array [N x D] where
925 | N = number of vertices and D = dimensions of geometry.
926 | values : array_like
927 | Values of data
928 |
929 | Examples
930 | --------
931 | Write scalar data for a 2D problem with 5 vertices:
932 | >>> mesh_name = "MeshOne"
933 | >>> data_name = "DataOne"
934 | >>> coordinates = np.array([[c1_x, c1_y], [c2_x, c2_y], [c3_x, c3_y], [c4_x, c4_y], [c5_x, c5_y]])
935 | >>> values = np.array([v1, v2, v3, v4, v5])
936 | >>> participant.write_and_map_data(mesh_name, data_name, coordinates, values)
937 | """
938 |
939 | check_array_like(coordinates, "coordinates", "write_and_map_data")
940 | check_array_like(values, "values", "write_and_map_data")
941 |
942 | if not isinstance(coordinates, np.ndarray):
943 | coordinates = np.asarray(coordinates)
944 |
945 | if not isinstance(values, np.ndarray):
946 | values = np.asarray(values)
947 |
948 | cdef vector[double] cpp_coordinates = coordinates.flatten()
949 | cdef vector[double] cpp_values = values.flatten()
950 |
951 | self.thisptr.writeAndMapData (convert(mesh_name), convert(data_name), cpp_coordinates, cpp_values)
952 |
953 | def map_and_read_data (self, mesh_name, data_name, coordinates, relative_read_time):
954 | """
955 | This function reads values at temporary locations from data of a mesh.
956 | As opposed to the readData function using VertexIDs, this function allows reading data via coordinates,
957 | which don't have to be specified during the initialization. This is particularly useful for meshes, which
958 | vary over time. Note that using this function comes at a performance cost, since the specified mapping
959 | needs to be computed locally for the given locations, whereas the other variant (readData) can typically
960 | exploit the static interface mesh and pre-compute data structures more efficient.
961 |
962 | Values are read identically to read_data.
963 |
964 | Parameters
965 | ----------
966 | mesh_name : str
967 | Name of the mesh to write to.
968 | data_name : str
969 | Name of the data to read from.
970 | coordinates : array_like
971 | Coordinates of the vertices.
972 | relative_read_time : double
973 | Point in time where data is read relative to the beginning of the current time step
974 |
975 | Returns
976 | -------
977 | values : numpy.ndarray
978 | Contains the read data.
979 |
980 | Examples
981 | --------
982 | Read scalar data for a 2D problem with 2 vertices:
983 | >>> mesh_name = "MeshOne"
984 | >>> data_name = "DataOne"
985 | >>> coordinates = [(1.0, 1.0), (2.0, 2.0)]
986 | >>> dt = 1.0
987 | >>> values = map_and_read_data(mesh_name, data_name, coordinates, dt)
988 | >>> values.shape
989 | >>> (2, )
990 | """
991 |
992 | check_array_like(coordinates, "coordinates", "map_and_read_data")
993 |
994 | if not isinstance(coordinates, np.ndarray):
995 | coordinates = np.asarray(coordinates)
996 |
997 | size = coordinates.shape[0]
998 | dimensions = self.get_data_dimensions(mesh_name, data_name)
999 |
1000 | cdef vector[double] cpp_coordinates = coordinates.flatten()
1001 | cdef vector[double] cpp_values = [-1 for _ in range(size * dimensions)]
1002 |
1003 | self.thisptr.mapAndReadData (convert(mesh_name), convert(data_name), cpp_coordinates, relative_read_time, cpp_values)
1004 |
1005 | cdef np.ndarray[double, ndim=1] np_values = np.array(cpp_values, dtype=np.double)
1006 |
1007 | if len(coordinates) == 0:
1008 | return np_values.reshape((size))
1009 | elif self.get_data_dimensions(mesh_name, data_name) == 1:
1010 | return np_values.reshape((size))
1011 | else:
1012 | return np_values.reshape((size, dimensions))
1013 |
1014 | def write_gradient_data (self, mesh_name, data_name, vertex_ids, gradients):
1015 | """
1016 | Writes gradient data given as block. This function writes gradient values of specified vertices to a dataID.
1017 | Values are provided as a block of continuous memory. Values are stored in a numpy array [N x D] where N = number
1018 | of vertices and D = number of gradient components.
1019 |
1020 | Parameters
1021 | ----------
1022 | mesh_name : str
1023 | Name of the mesh to write to.
1024 | data_name : str
1025 | Data name to write to.
1026 | vertex_ids : array_like
1027 | Indices of the vertices.
1028 | gradients : array_like
1029 | Gradient values differentiated in the spacial direction (dx, dy) for 2D space, (dx, dy, dz) for 3D space
1030 |
1031 | Notes
1032 | -----
1033 | Previous calls:
1034 | Count of available elements at values matches the configured dimension
1035 | Count of available elements at vertex_ids matches the given size
1036 | Initialize() has been called
1037 | Data with dataID has attribute hasGradient = true
1038 |
1039 | Examples
1040 | --------
1041 | Write gradient vector data for a 2D problem with 2 vertices:
1042 | >>> mesh_name = "MeshOne"
1043 | >>> data_name = "DataOne"
1044 | >>> vertex_ids = [1, 2]
1045 | >>> gradients = np.array([[v1x_dx, v1y_dx, v1x_dy, v1y_dy], [v2x_dx, v2y_dx, v2x_dy, v2y_dy]])
1046 | >>> participant.write_gradient_data(mesh_name, data_name, vertex_ids, gradients)
1047 |
1048 | Write vector data for a 3D problem with 2 vertices:
1049 | >>> mesh_name = "MeshOne"
1050 | >>> data_name = "DataOne"
1051 | >>> vertex_ids = [1, 2]
1052 | >>> gradients = np.array([[v1x_dx, v1y_dx, v1z_dx, v1x_dy, v1y_dy, v1z_dy, v1x_dz, v1y_dz, v1z_dz], [v2x_dx, v2y_dx, v2z_dx, v2x_dy, v2y_dy, v2z_dy, v2x_dz, v2y_dz, v2z_dz]])
1053 | >>> participant.write_gradient_data(mesh_name, data_name, vertex_ids, gradients)
1054 | """
1055 | check_array_like(vertex_ids, "vertex_ids", "write_gradient_data")
1056 | check_array_like(gradients, "gradients", "write_gradient_data")
1057 |
1058 | if not isinstance(gradients, np.ndarray):
1059 | gradients = np.asarray(gradients)
1060 |
1061 | if len(gradients) > 0:
1062 | size, dimensions = gradients.shape
1063 | assert dimensions == self.get_mesh_dimensions(mesh_name) * self.get_data_dimensions(mesh_name, data_name), "Dimensions of vector data in write_gradient_data does not match with dimensions in problem definition. Provided dimensions: {}, expected dimensions: {}".format(dimensions, self.get_mesh_dimensions(mesh_name) * self.get_data_dimensions (mesh_name, data_name))
1064 | if len(gradients) == 0:
1065 | size = 0
1066 |
1067 | cdef vector[int] cpp_vertex_ids = vertex_ids
1068 | cdef vector[double] cpp_gradients = gradients.flatten()
1069 |
1070 | assert cpp_gradients.size() == size * self.get_mesh_dimensions(mesh_name) * self.get_data_dimensions (mesh_name, data_name), "Dimension of gradient data provided in write_gradient_data does not match problem definition. Check length of input data provided. Provided size: {}, expected size: {}".format(cpp_gradients.size(), size * self.get_mesh_dimensions(mesh_name) * self.get_data_dimensions (mesh_name, data_name))
1071 | assert cpp_vertex_ids.size() == size, "Vertex IDs are of incorrect length in write_gradient_data. Check length of vertex ids input. Provided size: {}, expected size: {}".format(cpp_vertex_ids.size(), size)
1072 |
1073 | self.thisptr.writeGradientData (convert(mesh_name), convert(data_name), cpp_vertex_ids, cpp_gradients)
1074 |
1075 |
1076 | def requires_gradient_data_for(self, mesh_name, data_name):
1077 | """
1078 | Checks if the given data set requires gradient data. We check if the data object has been intialized with the gradient flag.
1079 |
1080 | Parameters
1081 | ----------
1082 | mesh_name : str
1083 | Mesh name to check.
1084 | data_name : str
1085 | Data name to check.
1086 |
1087 | Returns
1088 | -------
1089 | bool
1090 | True if gradient data is required for a data.
1091 |
1092 | Examples
1093 | --------
1094 | Check if gradient data is required for a data:
1095 | >>> mesh_name = "MeshOne"
1096 | >>> data_name = "DataOne"
1097 | >>> participant.is_gradient_data_required(mesh_name, data_name)
1098 | """
1099 |
1100 | return self.thisptr.requiresGradientDataFor(convert(mesh_name), convert(data_name))
1101 |
1102 |
1103 | def set_mesh_access_region (self, mesh_name, bounding_box):
1104 | """
1105 | This function is required if you don't want to use the mapping schemes in preCICE, but rather
1106 | want to use your own solver for data mapping. As opposed to the usual preCICE mapping, only a
1107 | single mesh (from the other participant) is now involved in this situation since an 'own'
1108 | mesh defined by the participant itself is not required any more. In order to re-partition the
1109 | received mesh, the participant needs to define the mesh region it wants read data from and
1110 | write data to. The mesh region is specified through an axis-aligned bounding box given by the
1111 | lower and upper [min and max] bounding-box limits in each space dimension [x, y, z]. This function is still
1112 | experimental
1113 |
1114 | Parameters
1115 | ----------
1116 | mesh_name : str
1117 | Name of the mesh you want to access through the bounding box
1118 | bounding_box : array_like
1119 | Axis aligned bounding box. Example for 3D the format: [x_min, x_max, y_min, y_max, z_min, z_max]
1120 |
1121 | Notes
1122 | -----
1123 | Defining a bounding box for serial runs of the solver (not to be confused with serial coupling
1124 | mode) is valid. However, a warning is raised in case vertices are filtered out completely
1125 | on the receiving side, since the associated data values of the filtered vertices are filled
1126 | with zero data.
1127 |
1128 | This function can only be called once per participant and rank and trying to call it more than
1129 | once results in an error.
1130 |
1131 | If you combine the direct access with a mapping (say you want to read data from a defined
1132 | mesh, as usual, but you want to directly access and write data on a received mesh without a
1133 | mapping) you may not need this function at all since the region of interest is already defined
1134 | through the defined mesh used for data reading. This is the case if you define any mapping
1135 | involving the directly accessed mesh on the receiving participant. (In parallel, only the cases
1136 | read-consistent and write-conservative are relevant, as usual).
1137 |
1138 | The safety factor scaling (see safety-factor in the configuration file) is not applied to the
1139 | defined access region and a specified safety will be ignored in case there is no additional
1140 | mapping involved. However, in case a mapping is in addition to the direct access involved, you
1141 | will receive (and gain access to) vertices inside the defined access region plus vertices inside
1142 | the safety factor region resulting from the mapping. The default value of the safety factor is
1143 | 0.5, i.e. the defined access region as computed through the involved provided mesh is by 50%
1144 | enlarged.
1145 | """
1146 | check_array_like(bounding_box, "bounding_box", "set_mesh_access_region")
1147 |
1148 | if not isinstance(bounding_box, np.ndarray):
1149 | bounding_box = np.asarray(bounding_box)
1150 |
1151 | assert len(bounding_box) > 0, "Bounding box cannot be empty."
1152 |
1153 | assert len(bounding_box) == (self.get_mesh_dimensions(mesh_name) * 2), "Dimensions of bounding box in set_mesh_access_region does not match with dimensions in problem definition."
1154 |
1155 | cdef vector[double] cpp_bounding_box = list(bounding_box)
1156 |
1157 | self.thisptr.setMeshAccessRegion(convert(mesh_name), cpp_bounding_box)
1158 |
1159 |
1160 | def get_mesh_vertex_ids_and_coordinates (self, mesh_name):
1161 | """
1162 | Iterating over the region of interest defined by bounding boxes and reading the corresponding
1163 | coordinates omitting the mapping. This function is still experimental.
1164 |
1165 | Parameters
1166 | ----------
1167 | mesh_name : str
1168 | Corresponding mesh name
1169 |
1170 | Returns
1171 | -------
1172 | ids : numpy.ndarray
1173 | Vertex IDs corresponding to the coordinates
1174 | coordinates : numpy.ndarray
1175 | he coordinates associated to the IDs and corresponding data values (dim * size)
1176 | """
1177 | size = self.get_mesh_vertex_size(mesh_name)
1178 | dimensions = self.get_mesh_dimensions(mesh_name)
1179 |
1180 | cdef vector[int] cpp_ids = [-1 for _ in range(size)]
1181 | cdef vector[double] cpp_coordinates = [-1 for _ in range(size * dimensions)]
1182 |
1183 | self.thisptr.getMeshVertexIDsAndCoordinates(convert(mesh_name), cpp_ids, cpp_coordinates)
1184 |
1185 | cdef np.ndarray[int, ndim=1] np_ids = np.array(cpp_ids, dtype=np.int32)
1186 | cdef np.ndarray[double, ndim=1] np_coordinates = np.array(cpp_coordinates, dtype=np.double)
1187 |
1188 | return np_ids, np_coordinates.reshape((size, dimensions))
1189 |
1190 | def start_profiling_section(self, event_name):
1191 | """
1192 | Starts a profiling section with the given event name.
1193 |
1194 | Parameters
1195 | ----------
1196 | event_name : str
1197 | Name of the event to profile.
1198 |
1199 | Examples
1200 | --------
1201 | Start a profiling section with the event name "EventOne":
1202 | >>> event_name = "EventOne"
1203 | >>> participant.start_profiling_section(event_name)
1204 | """
1205 | self.thisptr.startProfilingSection(convert(event_name))
1206 |
1207 | def stop_last_profiling_section(self):
1208 | """
1209 | Stops the last profiling section.
1210 |
1211 | Examples
1212 | --------
1213 | Stop the last profiling section:
1214 | >>> participant.stop_last_profiling_section()
1215 | """
1216 | self.thisptr.stopLastProfilingSection()
1217 |
1218 | def get_version_information ():
1219 | """
1220 | Returns
1221 | -------
1222 | Current preCICE version information
1223 | """
1224 | return CppParticipant.getVersionInformation()
1225 |
--------------------------------------------------------------------------------
/docs/MigrationGuide.md:
--------------------------------------------------------------------------------
1 | # Migration Guide for Python language bindings for preCICE version 2.0
2 |
3 | ## Steps to move from old Python API to the new API
4 |
5 | ### 1. Python language bindings moved to a new repository in the preCICE Project
6 |
7 | Previously, the Python language bindings were part of the repository [`precice/precice`](https://github.com/precice/precice).
8 | The bindings have now been moved to the independent repository [`precice/python-bindings`](https://github.com/precice/python-bindings).
9 |
10 | The installation procedure is the same as before. Please refer to the [README](https://github.com/precice/python-bindings/blob/develop/README.md).
11 |
12 | ### 2. New initialization of `Interface`
13 |
14 | The initialization of the `Interface` object now initializes the solver and also configures it using the configuration
15 | file provided by the user.
16 |
17 | **Old:** Before preCICE Version 2 you had to call:
18 |
19 | ```python
20 | interface = precice.Interface(solverName, processRank, processSize)
21 | interface.configure(configFileName)
22 | ```
23 |
24 | **New:** The two commands have now been combined into a single one:
25 |
26 | ```python
27 | interface = precice.Interface(solverName, configFileName, processRank, processSize)
28 | ```
29 |
30 | ### 3. Reduced number of inputs arguments for API calls
31 |
32 | Unlike the old bindings, API calls now do not need the array size to be passed as an argument anymore. The bindings directly take the size of the array that you are providing.
33 |
34 | For example let us consider the call `write_block_vector_data`:
35 |
36 | **Old:** The previous call was:
37 |
38 | ```python
39 | interface.write_block_vector_data(writeDataID, writeDataSize, vertexIDs, writeDataArray)
40 | ```
41 |
42 | **New:** The new function call is:
43 |
44 | ```python
45 | interface.write_block_vector_data(writeDataID, vertexIDs, writeDataArray)
46 | ```
47 |
48 | The same change is applied for all other calls which work with arrays of data.
49 |
50 | ### 4. API functions use a return value, if appropriate
51 |
52 | In older versions of the python bindings arrays were modified by the API in a call-by-reference fashion. This means a pointer to the array was passed to the API as a function argument. This approach was changed and the API functions now directly return the an array.
53 |
54 | For example let us consider the interface function `set_mesh_vertices`. `set_mesh_vertices` is used to register vertices for a mesh and it returns an array of `vertexIDs`.
55 |
56 | **Old:** The old signature of this function was:
57 |
58 | ```python
59 | vertexIDs = np.zeros(numberofVertices)
60 | interface.set_mesh_vertices(meshID, numberofVertices, grid, vertexIDs)
61 | ```
62 |
63 | Note that `vertexIDs` is passed as an argument to the function.
64 |
65 | **New:** This has now been changed to:
66 |
67 | ```python
68 | vertexIDs = interface.set_mesh_vertices(meshID, grid)
69 | ```
70 |
71 | Here, `vertexIDs` is directly returned by `set_mesh_vertices`.
72 |
73 | The same change has been applied to the functions `read_block_scalar_data` and `read_block_vector_data`.
74 |
75 | ### 5. Consequently use numpy arrays as data structure
76 |
77 | We consequently use numpy arrays for storing array data (multidimensional lists are still accepted). As an example, the `N` coupling mesh vertices of a mesh in `D` dimensions are represented as `grid = np.zeros([N, D])`. Previous versions of the bindings used either `grid = np.zeros([N, D])` (transposed version) or `grid = np.zeros(N*D)`. The same rule applies for data written and read in `write_block_vector_data` and `read_block_vector_data`.
78 |
--------------------------------------------------------------------------------
/docs/ReleaseGuide.md:
--------------------------------------------------------------------------------
1 | # Guide to release new version of python-bindings
2 |
3 | The developer who is releasing a new version of the python-bindings is expected to follow this work flow:
4 |
5 | The release of the `python-bindings` repository is made directly from a release branch called `python-bindings-v2.1.1.1`. This branch is mainly needed to help other developers with testing.
6 |
7 | 1. Create a branch called `python-bindings-v2.1.1.1` from the latest commit of the `develop` branch.
8 |
9 | 2. [Open a Pull Request `master` <-- `python-bindings-v2.1.1.1`](https://github.com/precice/python-bindings/compare/master...master) named after the version (i.e. `Release v2.1.1.1`) and briefly describe the new features of the release in the PR description.
10 |
11 | 3. Bump the version in the following places:
12 |
13 | * `CHANGELOG.md` on `python-bindings-v2.1.1.1`.
14 | * There is no need to bump the version anywhere else, since we use the [python-versioneer](https://github.com/python-versioneer/python-versioneer/) for maintaining the version everywhere else.
15 |
16 | 4. *Optional* test the [py-pyprecice Spack package](https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/py-pyprecice/package.py) using `spack dev-build py-pyprecice@develop`.
17 |
18 | 5. [Draft a New Release](https://github.com/precice/python-bindings/releases/new) in the `Releases` section of the repository page in a web browser.
19 |
20 | * The release tag needs to be the exact version number (i.e.`v2.1.1.1` or `v2.1.1.1rc1`, compare to [existing tags](https://github.com/precice/python-bindings/tags)).
21 | * If this is a stable release, use `@target:master`. If this is a pre-release, use `@target:python-bindings-v2.1.1.1`. If you are making a pre-release, **directly skip to the [pre-release](#pre-release) section below**.
22 | * Release title is also the version number (i.e. `v2.1.1.1` or `v2.1.1.1rc1`, compare to [existing releases](https://github.com/precice/python-bindings/tags)).
23 |
24 | 6. As soon as one approving review is made, merge the release PR (from `python-bindings-v2.1.1.1`) into `master`.
25 |
26 | 7. Merge `master` into `develop` for synchronization of `develop`.
27 |
28 | 8. If everything is in order up to this point then the new version can be released by hitting the "Publish release" button in your release Draft. This will create the corresponding tag and trigger [publishing the release to PyPI](https://github.com/precice/python-bindings/actions?query=workflow%3A%22Upload+Python+Package%22).
29 |
30 | 9. Now there exists be a tag corresponding to the release on `master`. Re-run the [docker release workflow `build-docker.yml` via dispatch]([https://github.com/precice/fenics-adapter/actions/workflows/build-docker.yml](https://github.com/precice/python-bindings/actions/workflows/build-docker.yml)) such that the correct version is picked up by `versioneer`. Check the version in the container via `docker pull precice/python-bindings`, then `docker run -ti precice/python-bindings`, and inside the container `$ python3 -c "import precice; print(precice.__version__)"`. ⚠️ There is an open issue that needs fixing https://github.com/precice/python-bindings/issues/195 ⚠️
31 |
32 | 10. Add an empty commit (details https://github.com/precice/python-bindings/issues/109) on master by running the steps:
33 |
34 | ```bash
35 | git checkout master
36 | git commit --allow-empty -m "post-tag bump"
37 | git push
38 | ```
39 |
40 | Check that everything is in order via `git log`. Important: The `tag` and `origin/master` should not point to the same commit. For example:
41 |
42 | ```bash
43 | commit 44b715dde4e3194fa69e61045089ca4ec6925fe3 (HEAD -> master, origin/master)
44 | Author: Benjamin Rodenberg
45 | Date: Wed Oct 20 10:52:41 2021 +0200
46 |
47 | post-tag bump
48 |
49 | commit d2645cc51f84ad5eda43b9c673400aada8e1505a (tag: v2.3.0.1)
50 | Merge: 2039557 aca2354
51 | Author: Benjamin Rodenberg
52 | Date: Tue Oct 19 12:57:24 2021 +0200
53 |
54 | Merge pull request #132 from precice/python-bindings-v2.3.0.1
55 |
56 | Release v2.3.0.1
57 | ```
58 |
59 | For more details refer to https://github.com/precice/python-bindings/issues/109 and https://github.com/python-versioneer/python-versioneer/issues/217.
60 |
61 | 11. *Temporarily not maintained* Update the [py-pyprecice Spack package](https://github.com/spack/spack/blob/develop/var/spack/repos/builtin/packages/py-pyprecice/package.py).
62 |
63 | ## Pre-release
64 |
65 | After creating the branch and drafting a release, directly hit the "Publish release" button in your Release Draft. Please note that the release branch is not merged into the master branch during a pre-release. Merging is done only for the stable release. You can check the pre-release artifacts (e.g. release on [PyPI](https://pypi.org/project/pyprecice/#history)) of the release. No further action is required for a pre-release.
66 |
--------------------------------------------------------------------------------
/examples/solverdummy/.gitignore:
--------------------------------------------------------------------------------
1 | *events.json
2 | venv
3 | .idea
4 | *.log
5 | __pycache__
6 |
--------------------------------------------------------------------------------
/examples/solverdummy/README.md:
--------------------------------------------------------------------------------
1 | # Solverdummies
2 |
3 | ## Install Dependencies
4 |
5 | * [preCICE](https://github.com/precice/precice)
6 | * [python bindings](https://github.com/precice/python-bindings)
7 | * Run in this directory `pip3 install --user -r requirements.txt`
8 |
9 | ## Run
10 |
11 | You can test the dummy solver by coupling two instances with each other. Open two terminals and run
12 |
13 | * `python3 solverdummy.py precice-config.xml SolverOne`
14 | * `python3 solverdummy.py precice-config.xml SolverTwo`
15 |
16 | ## Next Steps
17 |
18 | If you want to couple any other solver against this dummy solver be sure to adjust the preCICE configuration (participant names, mesh names, data names etc.) to the needs of your solver, compare our [step-by-step guide for new adapters](https://github.com/precice/precice/wiki/Adapter-Example).
19 |
--------------------------------------------------------------------------------
/examples/solverdummy/precice-config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
39 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/examples/solverdummy/requirements.txt:
--------------------------------------------------------------------------------
1 | pyprecice~=3.0
2 | argparse>=1.4
3 | numpy >1, <2
4 |
--------------------------------------------------------------------------------
/examples/solverdummy/solverdummy.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 |
3 | import argparse
4 | import numpy as np
5 | import precice
6 |
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument("configurationFileName",
9 | help="Name of the xml config file.", type=str)
10 | parser.add_argument("participantName", help="Name of the solver.", type=str)
11 |
12 | try:
13 | args = parser.parse_args()
14 | except SystemExit:
15 | print("")
16 | print("Usage: python ./solverdummy precice-config participant-name")
17 | quit()
18 |
19 | configuration_file_name = args.configurationFileName
20 | participant_name = args.participantName
21 |
22 | if participant_name == 'SolverOne':
23 | write_data_name = 'Data-One'
24 | read_data_name = 'Data-Two'
25 | mesh_name = 'SolverOne-Mesh'
26 |
27 | if participant_name == 'SolverTwo':
28 | read_data_name = 'Data-One'
29 | write_data_name = 'Data-Two'
30 | mesh_name = 'SolverTwo-Mesh'
31 |
32 | num_vertices = 3 # Number of vertices
33 |
34 | solver_process_index = 0
35 | solver_process_size = 1
36 |
37 | participant = precice.Participant(participant_name, configuration_file_name,
38 | solver_process_index, solver_process_size)
39 |
40 | assert (participant.requires_mesh_connectivity_for(mesh_name) is False)
41 |
42 | vertices = np.zeros((num_vertices, participant.get_mesh_dimensions(mesh_name)))
43 | read_data = np.zeros((num_vertices, participant.get_data_dimensions(mesh_name, read_data_name)))
44 | write_data = np.zeros((num_vertices, participant.get_data_dimensions(mesh_name, write_data_name)))
45 |
46 | for x in range(num_vertices):
47 | for y in range(participant.get_mesh_dimensions(mesh_name)):
48 | vertices[x, y] = x
49 |
50 | for y in range(participant.get_data_dimensions(mesh_name, read_data_name)):
51 | read_data[x, y] = x
52 |
53 | for y in range(participant.get_data_dimensions(mesh_name, write_data_name)):
54 | write_data[x, y] = x
55 |
56 | vertex_ids = participant.set_mesh_vertices(mesh_name, vertices)
57 |
58 | participant.initialize()
59 |
60 | while participant.is_coupling_ongoing():
61 | if participant.requires_writing_checkpoint():
62 | print("DUMMY: Writing iteration checkpoint")
63 |
64 | dt = participant.get_max_time_step_size()
65 | read_data = participant.read_data(mesh_name, read_data_name, vertex_ids, dt)
66 |
67 | write_data = read_data + 1
68 |
69 | participant.write_data(mesh_name, write_data_name, vertex_ids, write_data)
70 |
71 | print("DUMMY: Advancing in time")
72 | participant.advance(dt)
73 |
74 | if participant.requires_reading_checkpoint():
75 | print("DUMMY: Reading iteration checkpoint")
76 |
77 | participant.finalize()
78 | print("DUMMY: Closing python solver dummy...")
79 |
--------------------------------------------------------------------------------
/precice/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | __init__.c
3 |
--------------------------------------------------------------------------------
/precice/__init__.py:
--------------------------------------------------------------------------------
1 | from ._version import get_versions
2 | __version__ = "unknown"
3 |
4 | import warnings
5 | from cyprecice import Participant, get_version_information
6 |
7 |
8 | __version__ = get_versions()['version']
9 | del get_versions
10 |
11 | from . import _version
12 | __version__ = _version.get_versions()['version']
13 |
--------------------------------------------------------------------------------
/precice/_version.py:
--------------------------------------------------------------------------------
1 |
2 | # This file helps to compute a version number in source trees obtained from
3 | # git-archive tarball (such as those provided by githubs download-from-tag
4 | # feature). Distribution tarballs (built by setup.py sdist) and build
5 | # directories (produced by setup.py build) will contain a much shorter file
6 | # that just contains the computed version number.
7 |
8 | # This file is released into the public domain.
9 | # Generated by versioneer-0.29
10 | # https://github.com/python-versioneer/python-versioneer
11 |
12 | """Git implementation of _version.py."""
13 |
14 | import errno
15 | import os
16 | import re
17 | import subprocess
18 | import sys
19 | from typing import Any, Callable, Dict, List, Optional, Tuple
20 | import functools
21 |
22 |
23 | def get_keywords() -> Dict[str, str]:
24 | """Get the keywords needed to look up the version information."""
25 | # these strings will be replaced by git during git-archive.
26 | # setup.py/versioneer.py will grep for the variable names, so they must
27 | # each be defined on a line of their own. _version.py will just call
28 | # get_keywords().
29 | git_refnames = " (HEAD -> develop)"
30 | git_full = "43a82f4a4c1d101331be2b17093653b0079a01d4"
31 | git_date = "2025-05-13 09:01:04 +0200"
32 | keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
33 | return keywords
34 |
35 |
36 | class VersioneerConfig:
37 | """Container for Versioneer configuration parameters."""
38 |
39 | VCS: str
40 | style: str
41 | tag_prefix: str
42 | parentdir_prefix: str
43 | versionfile_source: str
44 | verbose: bool
45 |
46 |
47 | def get_config() -> VersioneerConfig:
48 | """Create, populate and return the VersioneerConfig() object."""
49 | # these strings are filled in when 'setup.py versioneer' creates
50 | # _version.py
51 | cfg = VersioneerConfig()
52 | cfg.VCS = "git"
53 | cfg.style = "pep440"
54 | cfg.tag_prefix = "v"
55 | cfg.parentdir_prefix = "precice-"
56 | cfg.versionfile_source = "precice/_version.py"
57 | cfg.verbose = False
58 | return cfg
59 |
60 |
61 | class NotThisMethod(Exception):
62 | """Exception raised if a method is not valid for the current scenario."""
63 |
64 |
65 | LONG_VERSION_PY: Dict[str, str] = {}
66 | HANDLERS: Dict[str, Dict[str, Callable]] = {}
67 |
68 |
69 | def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator
70 | """Create decorator to mark a method as the handler of a VCS."""
71 | def decorate(f: Callable) -> Callable:
72 | """Store f in HANDLERS[vcs][method]."""
73 | if vcs not in HANDLERS:
74 | HANDLERS[vcs] = {}
75 | HANDLERS[vcs][method] = f
76 | return f
77 | return decorate
78 |
79 |
80 | def run_command(
81 | commands: List[str],
82 | args: List[str],
83 | cwd: Optional[str] = None,
84 | verbose: bool = False,
85 | hide_stderr: bool = False,
86 | env: Optional[Dict[str, str]] = None,
87 | ) -> Tuple[Optional[str], Optional[int]]:
88 | """Call the given command(s)."""
89 | assert isinstance(commands, list)
90 | process = None
91 |
92 | popen_kwargs: Dict[str, Any] = {}
93 | if sys.platform == "win32":
94 | # This hides the console window if pythonw.exe is used
95 | startupinfo = subprocess.STARTUPINFO()
96 | startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
97 | popen_kwargs["startupinfo"] = startupinfo
98 |
99 | for command in commands:
100 | try:
101 | dispcmd = str([command] + args)
102 | # remember shell=False, so use git.cmd on windows, not just git
103 | process = subprocess.Popen([command] + args, cwd=cwd, env=env,
104 | stdout=subprocess.PIPE,
105 | stderr=(subprocess.PIPE if hide_stderr
106 | else None), **popen_kwargs)
107 | break
108 | except OSError as e:
109 | if e.errno == errno.ENOENT:
110 | continue
111 | if verbose:
112 | print("unable to run %s" % dispcmd)
113 | print(e)
114 | return None, None
115 | else:
116 | if verbose:
117 | print("unable to find command, tried %s" % (commands,))
118 | return None, None
119 | stdout = process.communicate()[0].strip().decode()
120 | if process.returncode != 0:
121 | if verbose:
122 | print("unable to run %s (error)" % dispcmd)
123 | print("stdout was %s" % stdout)
124 | return None, process.returncode
125 | return stdout, process.returncode
126 |
127 |
128 | def versions_from_parentdir(
129 | parentdir_prefix: str,
130 | root: str,
131 | verbose: bool,
132 | ) -> Dict[str, Any]:
133 | """Try to determine the version from the parent directory name.
134 |
135 | Source tarballs conventionally unpack into a directory that includes both
136 | the project name and a version string. We will also support searching up
137 | two directory levels for an appropriately named parent directory
138 | """
139 | rootdirs = []
140 |
141 | for _ in range(3):
142 | dirname = os.path.basename(root)
143 | if dirname.startswith(parentdir_prefix):
144 | return {"version": dirname[len(parentdir_prefix):],
145 | "full-revisionid": None,
146 | "dirty": False, "error": None, "date": None}
147 | rootdirs.append(root)
148 | root = os.path.dirname(root) # up a level
149 |
150 | if verbose:
151 | print("Tried directories %s but none started with prefix %s" %
152 | (str(rootdirs), parentdir_prefix))
153 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
154 |
155 |
156 | @register_vcs_handler("git", "get_keywords")
157 | def git_get_keywords(versionfile_abs: str) -> Dict[str, str]:
158 | """Extract version information from the given file."""
159 | # the code embedded in _version.py can just fetch the value of these
160 | # keywords. When used from setup.py, we don't want to import _version.py,
161 | # so we do it with a regexp instead. This function is not used from
162 | # _version.py.
163 | keywords: Dict[str, str] = {}
164 | try:
165 | with open(versionfile_abs, "r") as fobj:
166 | for line in fobj:
167 | if line.strip().startswith("git_refnames ="):
168 | mo = re.search(r'=\s*"(.*)"', line)
169 | if mo:
170 | keywords["refnames"] = mo.group(1)
171 | if line.strip().startswith("git_full ="):
172 | mo = re.search(r'=\s*"(.*)"', line)
173 | if mo:
174 | keywords["full"] = mo.group(1)
175 | if line.strip().startswith("git_date ="):
176 | mo = re.search(r'=\s*"(.*)"', line)
177 | if mo:
178 | keywords["date"] = mo.group(1)
179 | except OSError:
180 | pass
181 | return keywords
182 |
183 |
184 | @register_vcs_handler("git", "keywords")
185 | def git_versions_from_keywords(
186 | keywords: Dict[str, str],
187 | tag_prefix: str,
188 | verbose: bool,
189 | ) -> Dict[str, Any]:
190 | """Get version information from git keywords."""
191 | if "refnames" not in keywords:
192 | raise NotThisMethod("Short version file found")
193 | date = keywords.get("date")
194 | if date is not None:
195 | # Use only the last line. Previous lines may contain GPG signature
196 | # information.
197 | date = date.splitlines()[-1]
198 |
199 | # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
200 | # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
201 | # -like" string, which we must then edit to make compliant), because
202 | # it's been around since git-1.5.3, and it's too difficult to
203 | # discover which version we're using, or to work around using an
204 | # older one.
205 | date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
206 | refnames = keywords["refnames"].strip()
207 | if refnames.startswith("$Format"):
208 | if verbose:
209 | print("keywords are unexpanded, not using")
210 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
211 | refs = {r.strip() for r in refnames.strip("()").split(",")}
212 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
213 | # just "foo-1.0". If we see a "tag: " prefix, prefer those.
214 | TAG = "tag: "
215 | tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
216 | if not tags:
217 | # Either we're using git < 1.8.3, or there really are no tags. We use
218 | # a heuristic: assume all version tags have a digit. The old git %d
219 | # expansion behaves like git log --decorate=short and strips out the
220 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish
221 | # between branches and tags. By ignoring refnames without digits, we
222 | # filter out many common branch names like "release" and
223 | # "stabilization", as well as "HEAD" and "master".
224 | tags = {r for r in refs if re.search(r'\d', r)}
225 | if verbose:
226 | print("discarding '%s', no digits" % ",".join(refs - tags))
227 | if verbose:
228 | print("likely tags: %s" % ",".join(sorted(tags)))
229 | for ref in sorted(tags):
230 | # sorting will prefer e.g. "2.0" over "2.0rc1"
231 | if ref.startswith(tag_prefix):
232 | r = ref[len(tag_prefix):]
233 | # Filter out refs that exactly match prefix or that don't start
234 | # with a number once the prefix is stripped (mostly a concern
235 | # when prefix is '')
236 | if not re.match(r'\d', r):
237 | continue
238 | if verbose:
239 | print("picking %s" % r)
240 | return {"version": r,
241 | "full-revisionid": keywords["full"].strip(),
242 | "dirty": False, "error": None,
243 | "date": date}
244 | # no suitable tags, so version is "0+unknown", but full hex is still there
245 | if verbose:
246 | print("no suitable tags, using unknown + full revision id")
247 | return {"version": "0+unknown",
248 | "full-revisionid": keywords["full"].strip(),
249 | "dirty": False, "error": "no suitable tags", "date": None}
250 |
251 |
252 | @register_vcs_handler("git", "pieces_from_vcs")
253 | def git_pieces_from_vcs(
254 | tag_prefix: str,
255 | root: str,
256 | verbose: bool,
257 | runner: Callable = run_command
258 | ) -> Dict[str, Any]:
259 | """Get version from 'git describe' in the root of the source tree.
260 |
261 | This only gets called if the git-archive 'subst' keywords were *not*
262 | expanded, and _version.py hasn't already been rewritten with a short
263 | version string, meaning we're inside a checked out source tree.
264 | """
265 | GITS = ["git"]
266 | if sys.platform == "win32":
267 | GITS = ["git.cmd", "git.exe"]
268 |
269 | # GIT_DIR can interfere with correct operation of Versioneer.
270 | # It may be intended to be passed to the Versioneer-versioned project,
271 | # but that should not change where we get our version from.
272 | env = os.environ.copy()
273 | env.pop("GIT_DIR", None)
274 | runner = functools.partial(runner, env=env)
275 |
276 | _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
277 | hide_stderr=not verbose)
278 | if rc != 0:
279 | if verbose:
280 | print("Directory %s not under git control" % root)
281 | raise NotThisMethod("'git rev-parse --git-dir' returned error")
282 |
283 | # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
284 | # if there isn't one, this yields HEX[-dirty] (no NUM)
285 | describe_out, rc = runner(GITS, [
286 | "describe", "--tags", "--dirty", "--always", "--long",
287 | "--match", f"{tag_prefix}[[:digit:]]*"
288 | ], cwd=root)
289 | # --long was added in git-1.5.5
290 | if describe_out is None:
291 | raise NotThisMethod("'git describe' failed")
292 | describe_out = describe_out.strip()
293 | full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
294 | if full_out is None:
295 | raise NotThisMethod("'git rev-parse' failed")
296 | full_out = full_out.strip()
297 |
298 | pieces: Dict[str, Any] = {}
299 | pieces["long"] = full_out
300 | pieces["short"] = full_out[:7] # maybe improved later
301 | pieces["error"] = None
302 |
303 | branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
304 | cwd=root)
305 | # --abbrev-ref was added in git-1.6.3
306 | if rc != 0 or branch_name is None:
307 | raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
308 | branch_name = branch_name.strip()
309 |
310 | if branch_name == "HEAD":
311 | # If we aren't exactly on a branch, pick a branch which represents
312 | # the current commit. If all else fails, we are on a branchless
313 | # commit.
314 | branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
315 | # --contains was added in git-1.5.4
316 | if rc != 0 or branches is None:
317 | raise NotThisMethod("'git branch --contains' returned error")
318 | branches = branches.split("\n")
319 |
320 | # Remove the first line if we're running detached
321 | if "(" in branches[0]:
322 | branches.pop(0)
323 |
324 | # Strip off the leading "* " from the list of branches.
325 | branches = [branch[2:] for branch in branches]
326 | if "master" in branches:
327 | branch_name = "master"
328 | elif not branches:
329 | branch_name = None
330 | else:
331 | # Pick the first branch that is returned. Good or bad.
332 | branch_name = branches[0]
333 |
334 | pieces["branch"] = branch_name
335 |
336 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
337 | # TAG might have hyphens.
338 | git_describe = describe_out
339 |
340 | # look for -dirty suffix
341 | dirty = git_describe.endswith("-dirty")
342 | pieces["dirty"] = dirty
343 | if dirty:
344 | git_describe = git_describe[:git_describe.rindex("-dirty")]
345 |
346 | # now we have TAG-NUM-gHEX or HEX
347 |
348 | if "-" in git_describe:
349 | # TAG-NUM-gHEX
350 | mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
351 | if not mo:
352 | # unparsable. Maybe git-describe is misbehaving?
353 | pieces["error"] = ("unable to parse git-describe output: '%s'"
354 | % describe_out)
355 | return pieces
356 |
357 | # tag
358 | full_tag = mo.group(1)
359 | if not full_tag.startswith(tag_prefix):
360 | if verbose:
361 | fmt = "tag '%s' doesn't start with prefix '%s'"
362 | print(fmt % (full_tag, tag_prefix))
363 | pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
364 | % (full_tag, tag_prefix))
365 | return pieces
366 | pieces["closest-tag"] = full_tag[len(tag_prefix):]
367 |
368 | # distance: number of commits since tag
369 | pieces["distance"] = int(mo.group(2))
370 |
371 | # commit: short hex revision ID
372 | pieces["short"] = mo.group(3)
373 |
374 | else:
375 | # HEX: no tags
376 | pieces["closest-tag"] = None
377 | out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
378 | pieces["distance"] = len(out.split()) # total number of commits
379 |
380 | # commit date: see ISO-8601 comment in git_versions_from_keywords()
381 | date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
382 | # Use only the last line. Previous lines may contain GPG signature
383 | # information.
384 | date = date.splitlines()[-1]
385 | pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
386 |
387 | return pieces
388 |
389 |
390 | def plus_or_dot(pieces: Dict[str, Any]) -> str:
391 | """Return a + if we don't already have one, else return a ."""
392 | if "+" in pieces.get("closest-tag", ""):
393 | return "."
394 | return "+"
395 |
396 |
397 | def render_pep440(pieces: Dict[str, Any]) -> str:
398 | """Build up version string, with post-release "local version identifier".
399 |
400 | Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
401 | get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
402 |
403 | Exceptions:
404 | 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
405 | """
406 | if pieces["closest-tag"]:
407 | rendered = pieces["closest-tag"]
408 | if pieces["distance"] or pieces["dirty"]:
409 | rendered += plus_or_dot(pieces)
410 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
411 | if pieces["dirty"]:
412 | rendered += ".dirty"
413 | else:
414 | # exception #1
415 | rendered = "0+untagged.%d.g%s" % (pieces["distance"],
416 | pieces["short"])
417 | if pieces["dirty"]:
418 | rendered += ".dirty"
419 | return rendered
420 |
421 |
422 | def render_pep440_branch(pieces: Dict[str, Any]) -> str:
423 | """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
424 |
425 | The ".dev0" means not master branch. Note that .dev0 sorts backwards
426 | (a feature branch will appear "older" than the master branch).
427 |
428 | Exceptions:
429 | 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
430 | """
431 | if pieces["closest-tag"]:
432 | rendered = pieces["closest-tag"]
433 | if pieces["distance"] or pieces["dirty"]:
434 | if pieces["branch"] != "master":
435 | rendered += ".dev0"
436 | rendered += plus_or_dot(pieces)
437 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
438 | if pieces["dirty"]:
439 | rendered += ".dirty"
440 | else:
441 | # exception #1
442 | rendered = "0"
443 | if pieces["branch"] != "master":
444 | rendered += ".dev0"
445 | rendered += "+untagged.%d.g%s" % (pieces["distance"],
446 | pieces["short"])
447 | if pieces["dirty"]:
448 | rendered += ".dirty"
449 | return rendered
450 |
451 |
452 | def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]:
453 | """Split pep440 version string at the post-release segment.
454 |
455 | Returns the release segments before the post-release and the
456 | post-release version number (or -1 if no post-release segment is present).
457 | """
458 | vc = str.split(ver, ".post")
459 | return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
460 |
461 |
462 | def render_pep440_pre(pieces: Dict[str, Any]) -> str:
463 | """TAG[.postN.devDISTANCE] -- No -dirty.
464 |
465 | Exceptions:
466 | 1: no tags. 0.post0.devDISTANCE
467 | """
468 | if pieces["closest-tag"]:
469 | if pieces["distance"]:
470 | # update the post release segment
471 | tag_version, post_version = pep440_split_post(pieces["closest-tag"])
472 | rendered = tag_version
473 | if post_version is not None:
474 | rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
475 | else:
476 | rendered += ".post0.dev%d" % (pieces["distance"])
477 | else:
478 | # no commits, use the tag as the version
479 | rendered = pieces["closest-tag"]
480 | else:
481 | # exception #1
482 | rendered = "0.post0.dev%d" % pieces["distance"]
483 | return rendered
484 |
485 |
486 | def render_pep440_post(pieces: Dict[str, Any]) -> str:
487 | """TAG[.postDISTANCE[.dev0]+gHEX] .
488 |
489 | The ".dev0" means dirty. Note that .dev0 sorts backwards
490 | (a dirty tree will appear "older" than the corresponding clean one),
491 | but you shouldn't be releasing software with -dirty anyways.
492 |
493 | Exceptions:
494 | 1: no tags. 0.postDISTANCE[.dev0]
495 | """
496 | if pieces["closest-tag"]:
497 | rendered = pieces["closest-tag"]
498 | if pieces["distance"] or pieces["dirty"]:
499 | rendered += ".post%d" % pieces["distance"]
500 | if pieces["dirty"]:
501 | rendered += ".dev0"
502 | rendered += plus_or_dot(pieces)
503 | rendered += "g%s" % pieces["short"]
504 | else:
505 | # exception #1
506 | rendered = "0.post%d" % pieces["distance"]
507 | if pieces["dirty"]:
508 | rendered += ".dev0"
509 | rendered += "+g%s" % pieces["short"]
510 | return rendered
511 |
512 |
513 | def render_pep440_post_branch(pieces: Dict[str, Any]) -> str:
514 | """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
515 |
516 | The ".dev0" means not master branch.
517 |
518 | Exceptions:
519 | 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
520 | """
521 | if pieces["closest-tag"]:
522 | rendered = pieces["closest-tag"]
523 | if pieces["distance"] or pieces["dirty"]:
524 | rendered += ".post%d" % pieces["distance"]
525 | if pieces["branch"] != "master":
526 | rendered += ".dev0"
527 | rendered += plus_or_dot(pieces)
528 | rendered += "g%s" % pieces["short"]
529 | if pieces["dirty"]:
530 | rendered += ".dirty"
531 | else:
532 | # exception #1
533 | rendered = "0.post%d" % pieces["distance"]
534 | if pieces["branch"] != "master":
535 | rendered += ".dev0"
536 | rendered += "+g%s" % pieces["short"]
537 | if pieces["dirty"]:
538 | rendered += ".dirty"
539 | return rendered
540 |
541 |
542 | def render_pep440_old(pieces: Dict[str, Any]) -> str:
543 | """TAG[.postDISTANCE[.dev0]] .
544 |
545 | The ".dev0" means dirty.
546 |
547 | Exceptions:
548 | 1: no tags. 0.postDISTANCE[.dev0]
549 | """
550 | if pieces["closest-tag"]:
551 | rendered = pieces["closest-tag"]
552 | if pieces["distance"] or pieces["dirty"]:
553 | rendered += ".post%d" % pieces["distance"]
554 | if pieces["dirty"]:
555 | rendered += ".dev0"
556 | else:
557 | # exception #1
558 | rendered = "0.post%d" % pieces["distance"]
559 | if pieces["dirty"]:
560 | rendered += ".dev0"
561 | return rendered
562 |
563 |
564 | def render_git_describe(pieces: Dict[str, Any]) -> str:
565 | """TAG[-DISTANCE-gHEX][-dirty].
566 |
567 | Like 'git describe --tags --dirty --always'.
568 |
569 | Exceptions:
570 | 1: no tags. HEX[-dirty] (note: no 'g' prefix)
571 | """
572 | if pieces["closest-tag"]:
573 | rendered = pieces["closest-tag"]
574 | if pieces["distance"]:
575 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
576 | else:
577 | # exception #1
578 | rendered = pieces["short"]
579 | if pieces["dirty"]:
580 | rendered += "-dirty"
581 | return rendered
582 |
583 |
584 | def render_git_describe_long(pieces: Dict[str, Any]) -> str:
585 | """TAG-DISTANCE-gHEX[-dirty].
586 |
587 | Like 'git describe --tags --dirty --always -long'.
588 | The distance/hash is unconditional.
589 |
590 | Exceptions:
591 | 1: no tags. HEX[-dirty] (note: no 'g' prefix)
592 | """
593 | if pieces["closest-tag"]:
594 | rendered = pieces["closest-tag"]
595 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
596 | else:
597 | # exception #1
598 | rendered = pieces["short"]
599 | if pieces["dirty"]:
600 | rendered += "-dirty"
601 | return rendered
602 |
603 |
604 | def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]:
605 | """Render the given version pieces into the requested style."""
606 | if pieces["error"]:
607 | return {"version": "unknown",
608 | "full-revisionid": pieces.get("long"),
609 | "dirty": None,
610 | "error": pieces["error"],
611 | "date": None}
612 |
613 | if not style or style == "default":
614 | style = "pep440" # the default
615 |
616 | if style == "pep440":
617 | rendered = render_pep440(pieces)
618 | elif style == "pep440-branch":
619 | rendered = render_pep440_branch(pieces)
620 | elif style == "pep440-pre":
621 | rendered = render_pep440_pre(pieces)
622 | elif style == "pep440-post":
623 | rendered = render_pep440_post(pieces)
624 | elif style == "pep440-post-branch":
625 | rendered = render_pep440_post_branch(pieces)
626 | elif style == "pep440-old":
627 | rendered = render_pep440_old(pieces)
628 | elif style == "git-describe":
629 | rendered = render_git_describe(pieces)
630 | elif style == "git-describe-long":
631 | rendered = render_git_describe_long(pieces)
632 | else:
633 | raise ValueError("unknown style '%s'" % style)
634 |
635 | return {"version": rendered, "full-revisionid": pieces["long"],
636 | "dirty": pieces["dirty"], "error": None,
637 | "date": pieces.get("date")}
638 |
639 |
640 | def get_versions() -> Dict[str, Any]:
641 | """Get version information or return default if unable to do so."""
642 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
643 | # __file__, we can work backwards from there to the root. Some
644 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
645 | # case we can only use expanded keywords.
646 |
647 | cfg = get_config()
648 | verbose = cfg.verbose
649 |
650 | try:
651 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
652 | verbose)
653 | except NotThisMethod:
654 | pass
655 |
656 | try:
657 | root = os.path.realpath(__file__)
658 | # versionfile_source is the relative path from the top of the source
659 | # tree (where the .git directory might live) to this file. Invert
660 | # this to find the root from __file__.
661 | for _ in cfg.versionfile_source.split('/'):
662 | root = os.path.dirname(root)
663 | except NameError:
664 | return {"version": "0+unknown", "full-revisionid": None,
665 | "dirty": None,
666 | "error": "unable to find root of source tree",
667 | "date": None}
668 |
669 | try:
670 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
671 | return render(pieces, cfg.style)
672 | except NotThisMethod:
673 | pass
674 |
675 | try:
676 | if cfg.parentdir_prefix:
677 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
678 | except NotThisMethod:
679 | pass
680 |
681 | return {"version": "0+unknown", "full-revisionid": None,
682 | "dirty": None,
683 | "error": "unable to compute version", "date": None}
684 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | # PEP 518 - minimum build system requirements
3 | requires = ["setuptools>=61,<72", "wheel", "Cython>=0.29", "packaging", "pip>=19.0.0", "numpy", "mpi4py", "pkgconfig"]
4 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | # This includes the license file(s) in the wheel.
3 | # https://wheel.readthedocs.io/en/stable/user_guide.html#including-license-files-in-the-generated-wheel-file
4 | license_files = LICENSE.txt
5 |
6 | [bdist_wheel]
7 | # This flag says to generate wheels that support both Python 2 and Python
8 | # 3. If your code will not run unchanged on both Python 2 and 3, you will
9 | # need to generate separate wheels for each Python version that you
10 | # support. Removing this line (or setting universal to 0) will prevent
11 | # bdist_wheel from trying to make a universal wheel. For more see:
12 | # https://packaging.python.org/guides/distributing-packages-using-setuptools/#wheels
13 | universal=1
14 |
15 | [versioneer]
16 | VCS = git
17 | style = pep440
18 | versionfile_source = precice/_version.py
19 | versionfile_build = precice/_version.py
20 | tag_prefix = v
21 | parentdir_prefix = precice-
22 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import warnings
4 | import versioneer
5 |
6 | uses_pip = "pip" in __file__
7 |
8 | if uses_pip:
9 | # If installed with pip we need to check its version
10 | try:
11 | import pip
12 | except ModuleNotFoundError:
13 | raise Exception(
14 | "It looks like you are trying to use pip for installation of the package, but pip is not "
15 | "installed on your system (or cannot be found). This can lead to problems with missing "
16 | "dependencies. Please make sure that pip is discoverable. Try python3 -c 'import pip'. "
17 | "Alternatively, you can also run python3 setup.py install --user.")
18 | try:
19 | from packaging import version
20 | except ModuleNotFoundError:
21 | warnings.warn(
22 | "It looks like you are trying to use pip for installation of the package. Please install, "
23 | "the module packaging by running 'pip3 install --user packaging', since it is needed to perform "
24 | "additional security checks. You can continue installation. However, if you face problems when "
25 | "installing or running pyprecice, it might be a good idea to install packaging to enable "
26 | "additional checks.")
27 | if "pip" in sys.modules and "packaging" in sys.modules:
28 | if version.parse(pip.__version__) < version.parse("19.0"):
29 | # version 19.0 is required, since we are using pyproject.toml for definition of build-time depdendencies.
30 | # See https://pip.pypa.io/en/stable/news/#id209
31 | raise Exception(
32 | "You are using pip version {}. However, pip version >= 19.0 is required. Please upgrade "
33 | "your pip installation via 'pip3 install --upgrade pip'. You might have to add the --user"
34 | " flag.".format(pip.__version__))
35 |
36 | from setuptools import setup
37 | from setuptools.command.test import test
38 | from setuptools.command.install import install
39 | from Cython.Distutils.extension import Extension
40 | from Cython.Distutils.build_ext import new_build_ext as build_ext
41 | from Cython.Build import cythonize
42 | import numpy
43 | import pkgconfig
44 |
45 |
46 | # name of Interfacing API
47 | APPNAME = "pyprecice"
48 |
49 | PYTHON_BINDINGS_PATH = os.path.dirname(os.path.abspath(__file__))
50 |
51 |
52 | def get_extensions(is_test):
53 | compile_args = []
54 | link_args = []
55 | compile_args.append("-std=c++17")
56 | include_dirs = [numpy.get_include()]
57 |
58 | bindings_sources = [os.path.join(PYTHON_BINDINGS_PATH, "cyprecice",
59 | "cyprecice" + ".pyx")]
60 |
61 | if not pkgconfig.exists('libprecice'):
62 | raise Exception("\n".join([
63 | "pkg-config was unable to find libprecice.",
64 | "Please make sure that preCICE was installed correctly and pkg-config is able to find it.",
65 | "You may need to set PKG_CONFIG_PATH to include the location of the libprecice.pc file.",
66 | "Use \"pkg-config --modversion libprecice\" for debugging."]))
67 |
68 | print("Found preCICE version " + pkgconfig.modversion('libprecice'))
69 |
70 | compile_args += pkgconfig.cflags('libprecice').split()
71 |
72 | if not is_test:
73 | link_args += pkgconfig.libs('libprecice').split()
74 | if is_test:
75 | bindings_sources.append(os.path.join(PYTHON_BINDINGS_PATH, "test",
76 | "Participant.cpp"))
77 |
78 | return [
79 | Extension(
80 | "cyprecice",
81 | sources=bindings_sources,
82 | libraries=[],
83 | language="c++",
84 | include_dirs=include_dirs,
85 | extra_compile_args=compile_args,
86 | extra_link_args=link_args,
87 | define_macros=[("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION")]
88 | )
89 | ]
90 |
91 |
92 | class my_build_ext(build_ext, object):
93 | def finalize_options(self):
94 | try:
95 | self.distribution.is_test
96 | except AttributeError:
97 | self.distribution.is_test = False
98 |
99 | if not self.distribution.ext_modules:
100 | self.distribution.ext_modules = cythonize(
101 | get_extensions(self.distribution.is_test),
102 | compiler_directives={'language_level': "3"})
103 |
104 | super().finalize_options()
105 |
106 |
107 | class my_install(install, object):
108 | def finalize_options(self):
109 | try:
110 | self.distribution.is_test
111 | except AttributeError:
112 | self.distribution.is_test = False
113 |
114 | if not self.distribution.ext_modules:
115 | self.distribution.ext_modules = cythonize(
116 | get_extensions(self.distribution.is_test),
117 | compiler_directives={'language_level': "3"})
118 |
119 | super().finalize_options()
120 |
121 |
122 | class my_test(test, object):
123 | def initialize_options(self):
124 | self.distribution.is_test = True
125 | super().initialize_options()
126 |
127 |
128 | this_directory = os.path.abspath(os.path.dirname(__file__))
129 | with open(os.path.join(this_directory, 'README.md'), encoding='utf-8') as f:
130 | long_description = f.read()
131 |
132 | my_cmdclass = {
133 | 'test': my_test,
134 | 'build_ext': my_build_ext,
135 | 'install': my_install}
136 |
137 | # build precice.so python extension to be added to "PYTHONPATH" later
138 | setup(
139 | name=APPNAME,
140 | version=versioneer.get_version(),
141 | cmdclass=versioneer.get_cmdclass(my_cmdclass),
142 | description='Python language bindings for the preCICE coupling library',
143 | long_description=long_description,
144 | long_description_content_type='text/markdown',
145 | url='https://github.com/precice/python-bindings',
146 | author='the preCICE developers',
147 | author_email='info@precice.org',
148 | license='LGPL-3.0',
149 | python_requires='>=3',
150 | install_requires=['numpy', 'mpi4py', 'Cython'],
151 | # mpi4py is only needed, if preCICE was compiled with MPI
152 | # see https://github.com/precice/python-bindings/issues/8
153 | packages=['precice'],
154 | zip_safe=False # needed because setuptools are used
155 | )
156 |
--------------------------------------------------------------------------------
/test/.gitignore:
--------------------------------------------------------------------------------
1 | *.json
2 | *.cpython*
3 | test_bindings_module.cpp
4 |
--------------------------------------------------------------------------------
/test/Participant.cpp:
--------------------------------------------------------------------------------
1 | #include "precice/Participant.hpp"
2 | #include "precice/Tooling.hpp"
3 | #include
4 | #include
5 | #include
6 | #include
7 |
8 | std::string fake_version;
9 | std::vector fake_read_write_buffer;
10 | int fake_mesh_dimensions;
11 | int fake_scalar_data_dimensions;
12 | int fake_vector_data_dimensions;
13 | std::vector fake_ids;
14 | int n_fake_vertices;
15 | std::string fake_mesh_name;
16 | std::string fake_scalar_data_name;
17 | std::string fake_vector_data_name;
18 | int fake_data_id;
19 | std::vector fake_bounding_box;
20 | std::vector fake_coordinates;
21 |
22 | namespace precice {
23 |
24 | namespace impl{
25 | class ParticipantImpl{};
26 | }
27 |
28 | Participant:: Participant
29 | (
30 | precice::string_view participantName,
31 | precice::string_view configurationFileName,
32 | int solverProcessIndex,
33 | int solverProcessSize )
34 | {
35 | fake_version = "dummy";
36 | fake_read_write_buffer = std::vector();
37 | fake_mesh_dimensions = 3;
38 | fake_scalar_data_dimensions = 1;
39 | fake_vector_data_dimensions = 3;
40 | fake_data_id = 15;
41 | fake_mesh_name = "FakeMesh";
42 | fake_scalar_data_name = "FakeScalarData";
43 | fake_vector_data_name = "FakeVectorData";
44 | n_fake_vertices = 3;
45 | fake_ids.resize(n_fake_vertices);
46 | std::iota(fake_ids.begin(), fake_ids.end(), 0);
47 | fake_bounding_box.resize(fake_mesh_dimensions*2);
48 | std::iota(fake_bounding_box.begin(), fake_bounding_box.end(), 0);
49 | fake_coordinates.resize(n_fake_vertices*fake_mesh_dimensions);
50 | std::iota(fake_coordinates.begin(), fake_coordinates.end(), 0);
51 | }
52 |
53 | Participant::Participant(
54 | precice::string_view participantName,
55 | precice::string_view configurationFileName,
56 | int solverProcessIndex,
57 | int solverProcessSize,
58 | void * communicator)
59 | {
60 | fake_version = "dummy";
61 | fake_read_write_buffer = std::vector();
62 | fake_mesh_dimensions = 3;
63 | fake_scalar_data_dimensions = 1;
64 | fake_vector_data_dimensions = 3;
65 | fake_data_id = 15;
66 | fake_mesh_name = "FakeMesh";
67 | fake_scalar_data_name = "FakeScalarData";
68 | fake_vector_data_name = "FakeVectorData";
69 | n_fake_vertices = 3;
70 | fake_ids.resize(n_fake_vertices);
71 | std::iota(fake_ids.begin(), fake_ids.end(), 0);
72 | fake_bounding_box.resize(fake_mesh_dimensions*2);
73 | std::iota(fake_bounding_box.begin(), fake_bounding_box.end(), 0);
74 | }
75 |
76 | Participant::~Participant() = default;
77 |
78 | void Participant:: initialize()
79 | {
80 | }
81 |
82 | void Participant:: advance
83 | (
84 | double computedTimestepLength)
85 | {
86 | }
87 |
88 | void Participant:: finalize()
89 | {}
90 |
91 | int Participant:: getMeshDimensions
92 | (
93 | precice::string_view meshName) const
94 | {
95 | return fake_mesh_dimensions;
96 | }
97 |
98 | int Participant:: getDataDimensions
99 | (
100 | precice::string_view meshName,
101 | precice::string_view dataName) const
102 | {
103 | if (dataName.data() == fake_scalar_data_name) {
104 | return fake_scalar_data_dimensions;
105 | } else if (dataName.data() == fake_vector_data_name) {
106 | return fake_vector_data_dimensions;
107 | } else {
108 | return -1;
109 | }
110 | }
111 |
112 | bool Participant:: isCouplingOngoing() const
113 | {
114 | return 0;
115 | }
116 |
117 | bool Participant:: isTimeWindowComplete() const
118 | {
119 | return 0;
120 | }
121 |
122 | double Participant:: getMaxTimeStepSize() const
123 | {
124 | return -1.0;
125 | }
126 |
127 | bool Participant:: requiresInitialData()
128 | {
129 | return 0;
130 | }
131 |
132 | bool Participant:: requiresReadingCheckpoint()
133 | {
134 | return 0;
135 | }
136 |
137 | bool Participant:: requiresWritingCheckpoint()
138 | {
139 | return 0;
140 | }
141 |
142 | bool Participant:: requiresMeshConnectivityFor
143 | (
144 | precice::string_view meshName) const
145 | {
146 | return 0;
147 | }
148 |
149 | bool Participant:: requiresGradientDataFor
150 | (
151 | precice::string_view meshName,
152 | precice::string_view dataName) const
153 | {
154 | return 0;
155 | }
156 |
157 | int Participant:: setMeshVertex
158 | (
159 | precice::string_view meshName,
160 | precice::span position )
161 | {
162 | return 0;
163 | }
164 |
165 | int Participant:: getMeshVertexSize
166 | (
167 | precice::string_view meshName) const
168 | {
169 | return n_fake_vertices;
170 | }
171 |
172 | void Participant:: setMeshVertices
173 | (
174 | precice::string_view meshName,
175 | precice::span positions,
176 | precice::span ids)
177 | {
178 | if(ids.size() > 0) {
179 | assert (ids.size() == fake_ids.size());
180 | std::copy(fake_ids.begin(), fake_ids.end(), ids.data());
181 | }
182 | }
183 |
184 | void Participant:: setMeshEdge
185 | (
186 | precice::string_view meshName,
187 | int firstVertexID,
188 | int secondVertexID)
189 | {}
190 |
191 | void Participant::setMeshEdges(
192 | precice::string_view meshName,
193 | precice::span vertices)
194 | {}
195 |
196 | void Participant:: setMeshTriangle
197 | (
198 | precice::string_view meshName,
199 | int firstVertexID,
200 | int secondVertexID,
201 | int thirdVertexID )
202 | {}
203 |
204 | void Participant:: setMeshTriangles
205 | (
206 | precice::string_view meshName,
207 | precice::span vertices )
208 | {}
209 |
210 | void Participant:: setMeshQuad
211 | (
212 | precice::string_view meshName,
213 | int firstVertexID,
214 | int secondVertexID,
215 | int thirdVertexID,
216 | int fourthVertexID )
217 | {}
218 |
219 | void Participant:: setMeshQuads
220 | (
221 | precice::string_view meshName,
222 | precice::span vertices)
223 | {}
224 |
225 | void Participant::setMeshTetrahedron
226 | (
227 | precice::string_view meshName,
228 | int firstVertexID,
229 | int secondVertexID,
230 | int thirdVertexID,
231 | int fourthVertexID)
232 | {}
233 |
234 | void Participant::setMeshTetrahedra
235 | (
236 | precice::string_view meshName,
237 | precice::span vertices)
238 | {}
239 |
240 | void Participant::resetMesh
241 | (
242 | precice::string_view meshName)
243 | {}
244 |
245 | void Participant:: writeData
246 | (
247 | precice::string_view meshName,
248 | precice::string_view dataName,
249 | precice::span vertices,
250 | precice::span values)
251 | {
252 | fake_read_write_buffer.clear();
253 |
254 | for(const double value: values) {
255 | fake_read_write_buffer.push_back(value);
256 | }
257 | }
258 |
259 | void Participant:: readData
260 | (
261 | precice::string_view meshName,
262 | precice::string_view dataName,
263 | precice::span vertices,
264 | double relativeReadTime,
265 | precice::span values) const
266 | {
267 | if (dataName.data() == fake_scalar_data_name) {
268 | for(const int id: vertices) {
269 | values[id] = fake_read_write_buffer[id];
270 | }
271 | } else if (dataName.data() == fake_vector_data_name) {
272 | for(const int id: vertices) {
273 | for(int d = 0; d < fake_vector_data_dimensions; d++) {
274 | const int linearized_id = fake_vector_data_dimensions * id + d;
275 | values[linearized_id] = fake_read_write_buffer[linearized_id];
276 | }
277 | }
278 | }
279 | }
280 |
281 | void Participant:: writeAndMapData
282 | (
283 | precice::string_view meshName,
284 | precice::string_view dataName,
285 | precice::span coordinates,
286 | precice::span values)
287 | {
288 | fake_read_write_buffer.clear();
289 |
290 | for(const double value: values) {
291 | fake_read_write_buffer.push_back(value);
292 | }
293 | }
294 |
295 | void Participant:: mapAndReadData
296 | (
297 | precice::string_view meshName,
298 | precice::string_view dataName,
299 | precice::span coordinates,
300 | double relativeReadTime,
301 | precice::span values) const
302 | {
303 | std::copy(fake_read_write_buffer.begin(), fake_read_write_buffer.end(), values.begin());
304 | }
305 |
306 | void Participant:: setMeshAccessRegion
307 | (
308 | precice::string_view meshName,
309 | precice::span boundingBox ) const
310 | {
311 | assert(meshName == fake_mesh_name);
312 |
313 | for(std::size_t i = 0; i < fake_bounding_box.size(); i++){
314 | assert(boundingBox[i] == fake_bounding_box[i]);
315 | }
316 | }
317 |
318 | void Participant:: getMeshVertexIDsAndCoordinates
319 | (
320 | precice::string_view meshName,
321 | precice::span valueIndices,
322 | precice::span coordinates ) const
323 | {
324 | assert(meshName == fake_mesh_name);
325 | assert(valueIndices.size() == fake_ids.size());
326 | assert(coordinates.size() == fake_coordinates.size());
327 |
328 | for(std::size_t i = 0; i < fake_ids.size(); i++){
329 | valueIndices[i] = fake_ids[i];
330 | }
331 | for(std::size_t i = 0; i < fake_coordinates.size(); i++){
332 | coordinates[i] = fake_coordinates[i];
333 | }
334 | }
335 |
336 | void Participant::writeGradientData(
337 | precice::string_view meshName,
338 | precice::string_view dataName,
339 | precice::span vertices,
340 | precice::span gradients)
341 | {
342 | fake_read_write_buffer.clear();
343 | for (const double gradient: gradients) {
344 | fake_read_write_buffer.push_back(gradient);
345 | }
346 | }
347 |
348 | void Participant::startProfilingSection(
349 | precice::string_view sectionName)
350 | {
351 | }
352 |
353 | void Participant::stopLastProfilingSection()
354 | {
355 | }
356 |
357 | std::string getVersionInformation()
358 | {
359 | return fake_version;
360 | }
361 |
362 | } // namespace precice
363 |
--------------------------------------------------------------------------------
/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/precice/python-bindings/43a82f4a4c1d101331be2b17093653b0079a01d4/test/__init__.py
--------------------------------------------------------------------------------
/test/test_bindings_module.py:
--------------------------------------------------------------------------------
1 | import precice
2 | from unittest import TestCase
3 | import numpy as np
4 | from mpi4py import MPI
5 |
6 |
7 | class TestBindings(TestCase):
8 | """
9 | Test suite to check correct behaviour of python bindings.
10 | """
11 |
12 | def test_constructor(self):
13 | participant = precice.Participant("test", "dummy.xml", 0, 1)
14 | self.assertTrue(True)
15 |
16 | def test_constructor_custom_mpi_comm(self):
17 | participant = precice.Participant(
18 | "test", "dummy.xml", 0, 1, MPI.COMM_WORLD)
19 | self.assertTrue(True)
20 |
21 | def test_version(self):
22 | precice.__version__
23 |
24 | def test_get_mesh_dimensions(self):
25 | participant = precice.Participant("test", "dummy.xml", 0, 1)
26 | # TODO: it would be nice to be able to mock the output of the interface
27 | # directly in the test, not in test/Participant.hpp
28 | fake_mesh_dimension = 3 # compare to test/Participant.hpp, fake_mesh_dimension
29 | # TODO: it would be nice to be able to mock the output of the interface
30 | # directly in the test, not in test/Participant.hpp
31 | self.assertEqual(fake_mesh_dimension, participant.get_mesh_dimensions("dummy"))
32 |
33 | def test_get_data_dimensions(self):
34 | participant = precice.Participant("test", "dummy.xml", 0, 1)
35 | fake_scalar_data_dimension = 1 # compare to test/Participant.hpp, fake_scalar_data_dimension
36 | self.assertEqual(fake_scalar_data_dimension, participant.get_data_dimensions("dummy", "FakeScalarData"))
37 | fake_vector_data_dimension = 3 # compare to test/Participant.hpp, fake_vector_data_dimension
38 | self.assertEqual(fake_vector_data_dimension, participant.get_data_dimensions("dummy", "FakeVectorData"))
39 |
40 | def test_requires_mesh_connectivity_for(self):
41 | participant = precice.Participant("test", "dummy.xml", 0, 1)
42 | fake_bool = 0 # compare to output in test/SolverInterface.cpp
43 | fake_mesh_name = "FakeMesh"
44 | self.assertEqual(fake_bool, participant.requires_mesh_connectivity_for(fake_mesh_name))
45 |
46 | def test_reset_mesh(self):
47 | participant = precice.Participant("test", "dummy.xml", 0, 1)
48 | fake_mesh_name = "FakeMesh"
49 | participant.reset_mesh(fake_mesh_name)
50 |
51 | def test_set_mesh_vertices(self):
52 | participant = precice.Participant("test", "dummy.xml", 0, 1)
53 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
54 | fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions
55 | n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices
56 | positions = np.random.rand(n_fake_vertices, fake_dimension)
57 | expected_output = np.array(range(n_fake_vertices))
58 | actual_output = participant.set_mesh_vertices(fake_mesh_name, positions)
59 | self.assertTrue(np.array_equal(expected_output, actual_output))
60 |
61 | def test_set_mesh_vertices_empty(self):
62 | participant = precice.Participant("test", "dummy.xml", 0, 1)
63 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
64 | fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions
65 | n_fake_vertices = 0 # compare to test/SolverInterface.cpp, n_fake_vertices
66 | positions = np.zeros((n_fake_vertices, fake_dimension))
67 | expected_output = np.array(range(n_fake_vertices))
68 | actual_output = participant.set_mesh_vertices(fake_mesh_name, positions)
69 | self.assertTrue(np.array_equal(expected_output, actual_output))
70 |
71 | def test_set_mesh_vertices_list(self):
72 | participant = precice.Participant("test", "dummy.xml", 0, 1)
73 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
74 | fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions
75 | n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices
76 | positions = np.random.rand(n_fake_vertices, fake_dimension)
77 | positions = list(list(positions[i, j] for j in range(
78 | positions.shape[1])) for i in range(positions.shape[0]))
79 | expected_output = np.array(range(n_fake_vertices))
80 | actual_output = participant.set_mesh_vertices(fake_mesh_name, positions)
81 | self.assertTrue(np.array_equal(expected_output, actual_output))
82 |
83 | def test_set_mesh_vertices_empty_list(self):
84 | participant = precice.Participant("test", "dummy.xml", 0, 1)
85 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
86 | positions = []
87 | n_fake_vertices = 0
88 | expected_output = np.array(range(n_fake_vertices))
89 | actual_output = participant.set_mesh_vertices(fake_mesh_name, positions)
90 | self.assertTrue(np.array_equal(expected_output, actual_output))
91 |
92 | def test_set_mesh_vertices_tuple(self):
93 | participant = precice.Participant("test", "dummy.xml", 0, 1)
94 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
95 | fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions
96 | n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices
97 | positions = np.random.rand(n_fake_vertices, fake_dimension)
98 | positions = tuple(tuple(positions[i, j] for j in range(
99 | positions.shape[1])) for i in range(positions.shape[0]))
100 | expected_output = np.array(range(n_fake_vertices))
101 | actual_output = participant.set_mesh_vertices(fake_mesh_name, positions)
102 | self.assertTrue(np.array_equal(expected_output, actual_output))
103 |
104 | def test_set_mesh_vertices_empty_tuple(self):
105 | participant = precice.Participant("test", "dummy.xml", 0, 1)
106 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
107 | positions = ()
108 | n_fake_vertices = 0
109 | expected_output = np.array(range(n_fake_vertices))
110 | actual_output = participant.set_mesh_vertices(fake_mesh_name, positions)
111 | self.assertTrue(np.array_equal(expected_output, actual_output))
112 |
113 | def test_set_mesh_vertices_mixed(self):
114 | participant = precice.Participant("test", "dummy.xml", 0, 1)
115 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
116 | fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions
117 | n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices
118 | positions = np.random.rand(n_fake_vertices, fake_dimension)
119 | positions = list(tuple(positions[i, j] for j in range(
120 | positions.shape[1])) for i in range(positions.shape[0]))
121 | expected_output = np.array(range(n_fake_vertices))
122 | actual_output = participant.set_mesh_vertices(fake_mesh_name, positions)
123 | self.assertTrue(np.array_equal(expected_output, actual_output))
124 |
125 | def test_set_mesh_vertex(self):
126 | participant = precice.Participant("test", "dummy.xml", 0, 1)
127 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
128 | fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions
129 | position = np.random.rand(fake_dimension)
130 | vertex_id = participant.set_mesh_vertex(fake_mesh_name, position)
131 | self.assertTrue(0 == vertex_id)
132 |
133 | def test_set_mesh_vertex_empty(self):
134 | participant = precice.Participant("test", "dummy.xml", 0, 1)
135 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
136 | fake_dimension = 0 # compare to test/SolverInterface.cpp, fake_dimensions
137 | position = np.random.rand(fake_dimension)
138 | vertex_id = participant.set_mesh_vertex(fake_mesh_name, position)
139 | self.assertTrue(0 == vertex_id)
140 |
141 | def test_set_mesh_vertex_list(self):
142 | participant = precice.Participant("test", "dummy.xml", 0, 1)
143 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
144 | fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions
145 | position = list(np.random.rand(fake_dimension))
146 | vertex_id = participant.set_mesh_vertex(fake_mesh_name, position)
147 | self.assertTrue(0 == vertex_id)
148 |
149 | def test_set_mesh_vertex_empty_list(self):
150 | participant = precice.Participant("test", "dummy.xml", 0, 1)
151 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
152 | position = []
153 | vertex_id = participant.set_mesh_vertex(fake_mesh_name, position)
154 | self.assertTrue(0 == vertex_id)
155 |
156 | def test_set_mesh_vertex_tuple(self):
157 | participant = precice.Participant("test", "dummy.xml", 0, 1)
158 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
159 | fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions
160 | position = tuple(np.random.rand(fake_dimension))
161 | vertex_id = participant.set_mesh_vertex(fake_mesh_name, position)
162 | self.assertTrue(0 == vertex_id)
163 |
164 | def test_set_mesh_vertex_empty_tuple(self):
165 | participant = precice.Participant("test", "dummy.xml", 0, 1)
166 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
167 | position = ()
168 | vertex_id = participant.set_mesh_vertex(fake_mesh_name, position)
169 | self.assertTrue(0 == vertex_id)
170 |
171 | def test_get_mesh_vertex_size(self):
172 | participant = precice.Participant("test", "dummy.xml", 0, 1)
173 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
174 | n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices
175 | n_vertices = participant.get_mesh_vertex_size(fake_mesh_name)
176 | self.assertTrue(n_fake_vertices == n_vertices)
177 |
178 | def test_read_write_block_scalar_data(self):
179 | participant = precice.Participant("test", "dummy.xml", 0, 1)
180 | write_data = np.array([3, 7, 8], dtype=np.double)
181 | participant.write_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), write_data)
182 | dt = 1
183 | read_data = participant.read_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), dt)
184 | self.assertTrue(np.array_equal(write_data, read_data))
185 |
186 | def test_read_write_block_scalar_data_single_float(self):
187 | participant = precice.Participant("test", "dummy.xml", 0, 1)
188 | write_data = 8
189 | with self.assertRaises(TypeError):
190 | participant.write_data("FakeMesh", "FakeScalarData", 1, write_data)
191 | with self.assertRaises(TypeError):
192 | participant.read_data("FakeMesh", "FakeScalarData", 1)
193 |
194 | def test_read_write_block_scalar_data_empty(self):
195 | participant = precice.Participant("test", "dummy.xml", 0, 1)
196 | write_data = np.array([])
197 | participant.write_data("FakeMesh", "FakeScalarData", [], write_data)
198 | dt = 1
199 | read_data = participant.read_data("FakeMesh", "FakeScalarData", [], dt)
200 | self.assertTrue(len(read_data) == 0)
201 |
202 | def test_read_write_block_scalar_data_non_contiguous(self):
203 | """
204 | Tests behaviour of solver interface, if a non contiguous array is passed to the interface.
205 |
206 | Note: Check whether np.ndarray is contiguous via np.ndarray.flags.
207 | """
208 | participant = precice.Participant("test", "dummy.xml", 0, 1)
209 | dummy_array = np.random.rand(3, 3)
210 | write_data = dummy_array[:, 1]
211 | assert (write_data.flags["C_CONTIGUOUS"] is False)
212 | participant.write_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), write_data)
213 | dt = 1
214 | read_data = participant.read_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), dt)
215 | self.assertTrue(np.array_equal(write_data, read_data))
216 |
217 | def test_read_write_scalar_data(self):
218 | participant = precice.Participant("test", "dummy.xml", 0, 1)
219 | write_data = [3]
220 | participant.write_data("FakeMesh", "FakeScalarData", [0], write_data)
221 | dt = 1
222 | read_data = participant.read_data("FakeMesh", "FakeScalarData", [0], dt)
223 | self.assertTrue(np.array_equal(write_data, read_data))
224 |
225 | def test_read_write_block_vector_data(self):
226 | participant = precice.Participant("test", "dummy.xml", 0, 1)
227 | write_data = np.array([[3, 7, 8],
228 | [7, 6, 5]], dtype=np.double)
229 | participant.write_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data)
230 | dt = 1
231 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array([0, 1]), dt)
232 | self.assertTrue(np.array_equal(write_data, read_data))
233 |
234 | def test_read_write_block_vector_data_empty(self):
235 | participant = precice.Participant("test", "dummy.xml", 0, 1)
236 | write_data = np.array([])
237 | participant.write_data("FakeMesh", "FakeVectorData", [], write_data)
238 | dt = 1
239 | read_data = participant.read_data("FakeMesh", "FakeVectorData", [], dt)
240 | self.assertTrue(len(read_data) == 0)
241 |
242 | def test_read_write_block_vector_data_list(self):
243 | participant = precice.Participant("test", "dummy.xml", 0, 1)
244 | write_data = [[3, 7, 8], [7, 6, 5]]
245 | participant.write_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data)
246 | dt = 1
247 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array([0, 1]), dt)
248 | self.assertTrue(np.array_equal(write_data, read_data))
249 |
250 | def test_read_write_block_vector_data_tuple(self):
251 | participant = precice.Participant("test", "dummy.xml", 0, 1)
252 | write_data = ((3, 7, 8), (7, 6, 5))
253 | participant.write_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data)
254 | dt = 1
255 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array([0, 1]), dt)
256 | self.assertTrue(np.array_equal(write_data, read_data))
257 |
258 | def test_read_write_block_vector_data_mixed(self):
259 | participant = precice.Participant("test", "dummy.xml", 0, 1)
260 | write_data = [(3, 7, 8), (7, 6, 5)]
261 | participant.write_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data)
262 | dt = 1
263 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array([0, 1]), dt)
264 | self.assertTrue(np.array_equal(write_data, read_data))
265 |
266 | def test_read_write_block_vector_data_non_contiguous(self):
267 | """
268 | Tests behaviour of solver interface, if a non contiguous array is passed to the interface.
269 |
270 | Note: Check whether np.ndarray is contiguous via np.ndarray.flags.
271 | """
272 | participant = precice.Participant("test", "dummy.xml", 0, 1)
273 | size = 6
274 | dummy_array = np.random.rand(size, 5)
275 | write_data = dummy_array[:, 1:4]
276 | assert (write_data.flags["C_CONTIGUOUS"] is False)
277 | vertex_ids = np.arange(size)
278 | participant.write_data("FakeMesh", "FakeVectorData", vertex_ids, write_data)
279 | dt = 1
280 | read_data = participant.read_data("FakeMesh", "FakeVectorData", vertex_ids, dt)
281 | self.assertTrue(np.array_equal(write_data, read_data))
282 |
283 | def test_read_write_vector_data(self):
284 | participant = precice.Participant("test", "dummy.xml", 0, 1)
285 | write_data = np.array([[0, 1, 2]], dtype=np.double)
286 | participant.write_data("FakeMesh", "FakeVectorData", [0], write_data)
287 | dt = 1
288 | read_data = participant.read_data("FakeMesh", "FakeVectorData", [0], dt)
289 | self.assertTrue(np.array_equal(write_data, read_data))
290 |
291 | def test_read_write_vector_data_list(self):
292 | participant = precice.Participant("test", "dummy.xml", 0, 1)
293 | write_data = [[0, 1, 2]]
294 | participant.write_data("FakeMesh", "FakeVectorData", [0], write_data)
295 | dt = 1
296 | read_data = participant.read_data("FakeMesh", "FakeVectorData", [0], dt)
297 | self.assertTrue(np.array_equal(write_data, read_data))
298 |
299 | def test_read_write_vector_data_tuple(self):
300 | participant = precice.Participant("test", "dummy.xml", 0, 1)
301 | write_data = [(1, 2, 3)]
302 | participant.write_data("FakeMesh", "FakeVectorData", [0], write_data)
303 | dt = 1
304 | read_data = participant.read_data("FakeMesh", "FakeVectorData", [0], dt)
305 | self.assertTrue(np.array_equal(write_data, read_data))
306 |
307 | def test_read_write_vector_data_non_contiguous(self):
308 | """
309 | Tests behaviour of solver interface, if a non contiguous array is passed to the interface.
310 |
311 | Note: Check whether np.ndarray is contiguous via np.ndarray.flags.
312 | """
313 | participant = precice.Participant("test", "dummy.xml", 0, 1)
314 | dummy_array = np.random.rand(3, 3)
315 | write_data = dummy_array[:, 1]
316 | assert (write_data.flags["C_CONTIGUOUS"] is False)
317 | write_data = [write_data]
318 | participant.write_data("FakeMesh", "FakeVectorData", [0], write_data)
319 | dt = 1
320 | read_data = participant.read_data("FakeMesh", "FakeVectorData", [0], dt)
321 | self.assertTrue(np.array_equal(write_data, read_data))
322 |
323 | def test_jit_mapping(self):
324 | participant = precice.Participant("test", "dummy.xml", 0, 1)
325 | write_data = [1, 2, 3]
326 | participant.write_and_map_data("FakeMesh", "FakeScalarData", [0, 1, 2], write_data)
327 | dt = 1
328 | read_data = participant.map_and_read_data("FakeMesh", "FakeScalarData", [0, 1, 2], dt)
329 | self.assertTrue(np.array_equal(write_data, read_data))
330 |
331 | def test_get_version_information(self):
332 | version_info = precice.get_version_information()
333 | fake_version_info = b"dummy" # compare to test/SolverInterface.cpp
334 | self.assertEqual(version_info, fake_version_info)
335 |
336 | def test_set_mesh_access_region(self):
337 | participant = precice.Participant("test", "dummy.xml", 0, 1)
338 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
339 | fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions
340 | fake_bounding_box = np.arange(fake_dimension * 2)
341 | participant.set_mesh_access_region(fake_mesh_name, fake_bounding_box)
342 |
343 | def test_get_mesh_vertex_ids_and_coordinates(self):
344 | participant = precice.Participant("test", "dummy.xml", 0, 1)
345 | fake_mesh_name = "FakeMesh" # compare to test/SolverInterface.cpp, fake_mesh_name
346 | n_fake_vertices = 3 # compare to test/SolverInterface.cpp, n_fake_vertices
347 | fake_dimension = 3 # compare to test/SolverInterface.cpp, fake_dimensions
348 | vertex_ids = np.arange(n_fake_vertices)
349 | coordinates = np.zeros((n_fake_vertices, fake_dimension))
350 | for i in range(n_fake_vertices):
351 | coordinates[i, 0] = i * fake_dimension
352 | coordinates[i, 1] = i * fake_dimension + 1
353 | coordinates[i, 2] = i * fake_dimension + 2
354 | fake_ids, fake_coordinates = participant.get_mesh_vertex_ids_and_coordinates(fake_mesh_name)
355 | self.assertTrue(np.array_equal(fake_ids, vertex_ids))
356 | self.assertTrue(np.array_equal(fake_coordinates, coordinates))
357 |
358 | def test_requires_gradient_data_for(self):
359 | participant = precice.Participant("test", "dummy.xml", 0, 1)
360 | fake_bool = 0 # compare to output in test/SolverInterface.cpp
361 | fake_mesh_name = "FakeMesh"
362 | fake_data_name = "FakeName"
363 | self.assertEqual(fake_bool, participant.requires_gradient_data_for(fake_mesh_name, fake_data_name))
364 |
365 | def test_write_block_scalar_gradient_data(self):
366 | participant = precice.Participant("test", "dummy.xml", 0, 1)
367 | write_data = np.array([[0, 1, 2], [6, 7, 8], [9, 10, 11]], dtype=np.double)
368 | participant.write_gradient_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), write_data)
369 | dt = 1
370 | read_data = participant.read_data("FakeMesh", "FakeScalarData", np.array(range(9)), dt)
371 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
372 |
373 | def test_write_block_scalar_gradient_data_single_float(self):
374 | participant = precice.Participant("test", "dummy.xml", 0, 1)
375 | fake_dimension = 3
376 | n_fake_vertices = 1
377 | vertex_ids = np.arange(n_fake_vertices)
378 | write_data = np.random.rand(n_fake_vertices, fake_dimension)
379 | participant.write_gradient_data("FakeMesh", "FakeScalarData", vertex_ids, write_data)
380 | dt = 1
381 | read_data = participant.read_data("FakeMesh", "FakeScalarData", np.arange(n_fake_vertices * fake_dimension), dt)
382 | self.assertTrue(np.array_equal(write_data.flatten(), read_data))
383 |
384 | def test_write_block_scalar_gradient_data_empty(self):
385 | participant = precice.Participant("test", "dummy.xml", 0, 1)
386 | write_data = np.array([])
387 | participant.write_gradient_data("FakeMesh", "FakeScalarData", [], write_data)
388 | dt = 1
389 | read_data = participant.read_data("FakeMesh", "FakeScalarData", [], dt)
390 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
391 |
392 | def test_write_block_scalar_gradient_data_non_contiguous(self):
393 | """
394 | Tests behavior of solver interface, if a non contiguous array is passed to the interface.
395 | Note: Check whether np.ndarray is contiguous via np.ndarray.flags.
396 | """
397 | participant = precice.Participant("test", "dummy.xml", 0, 1)
398 | dummy_array = np.random.rand(3, 9)
399 | write_data = dummy_array[:, 3:6]
400 | assert write_data.flags["C_CONTIGUOUS"] is False
401 | participant.write_gradient_data("FakeMesh", "FakeScalarData", np.array([0, 1, 2]), write_data)
402 | dt = 1
403 | read_data = participant.read_data("FakeMesh", "FakeScalarData", np.array(range(9)), dt)
404 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
405 |
406 | def test_write_scalar_gradient_data(self):
407 | participant = precice.Participant("test", "dummy.xml", 0, 1)
408 | fake_dimension = 3
409 | write_data = [np.random.rand(fake_dimension)]
410 | participant.write_gradient_data("FakeMesh", "FakeScalarData", [0], write_data)
411 | dt = 1
412 | # Gradient data is essential vector data, hence the appropriate data name is used here
413 | read_data = participant.read_data("FakeMesh", "FakeVectorData", [0], dt)
414 | self.assertTrue(np.array_equiv(write_data, read_data))
415 |
416 | def test_write_block_vector_gradient_data(self):
417 | participant = precice.Participant("test", "dummy.xml", 0, 1)
418 | fake_dimension = 3
419 | n_fake_vertices = 4
420 | vertex_ids = np.arange(n_fake_vertices)
421 | write_data = np.random.rand(n_fake_vertices, fake_dimension * fake_dimension)
422 | participant.write_gradient_data("FakeMesh", "FakeVectorData", vertex_ids, write_data)
423 | dt = 1
424 | read_data = participant.read_data(
425 | "FakeMesh", "FakeVectorData", np.array(range(n_fake_vertices * fake_dimension)), dt)
426 | self.assertTrue(np.array_equiv(write_data.flatten(), read_data.flatten()))
427 |
428 | def test_write_block_vector_gradient_data_empty(self):
429 | participant = precice.Participant("test", "dummy.xml", 0, 1)
430 | write_data = np.array([])
431 | participant.write_gradient_data("FakeMesh", "FakeVectorData", [], write_data)
432 | dt = 1
433 | read_data = participant.read_data("FakeMesh", "FakeVectorData", [], dt)
434 | self.assertTrue(len(read_data) == 0)
435 |
436 | def test_write_block_vector_gradient_data_list(self):
437 | participant = precice.Participant("test", "dummy.xml", 0, 1)
438 | write_data = [[3.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0]]
439 | participant.write_gradient_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data)
440 | dt = 1
441 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(6)), dt)
442 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
443 |
444 | def test_write_block_vector_gradient_data_tuple(self):
445 | participant = precice.Participant("test", "dummy.xml", 0, 1)
446 | write_data = ((1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 3.0, 7.0, 8.0), (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 6.0, 5.0))
447 | participant.write_gradient_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data)
448 | dt = 1
449 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(6)), dt)
450 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
451 |
452 | def test_write_block_vector_gradient_data_mixed(self):
453 | participant = precice.Participant("test", "dummy.xml", 0, 1)
454 | write_data = [(1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 3.0, 7.0, 8.0), (4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 7.0, 6.0, 5.0)]
455 | participant.write_gradient_data("FakeMesh", "FakeVectorData", np.array([0, 1]), write_data)
456 | dt = 1
457 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(6)), dt)
458 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
459 |
460 | def test_write_block_vector_gradient_data_non_contiguous(self):
461 | """
462 | Tests behavior of solver interface, if a non contiguous array is passed to the interface.
463 | Note: Check whether np.ndarray is contiguous via np.ndarray.flags.
464 | """
465 | participant = precice.Participant("test", "dummy.xml", 0, 1)
466 | dummy_array = np.random.rand(3, 15)
467 | write_data = dummy_array[:, 2:11]
468 | assert write_data.flags["C_CONTIGUOUS"] is False
469 | vertex_ids = np.arange(3)
470 | participant.write_gradient_data("FakeMesh", "FakeVectorData", vertex_ids, write_data)
471 | dt = 1
472 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(9)), dt)
473 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
474 |
475 | def test_write_vector_gradient_data(self):
476 | participant = precice.Participant("test", "dummy.xml", 0, 1)
477 | write_data = [np.arange(0, 9, dtype=np.double)]
478 | participant.write_gradient_data("FakeMesh", "FakeVectorData", [0], write_data)
479 | dt = 1
480 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(3)), dt)
481 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
482 |
483 | def test_write_vector_gradient_data_list(self):
484 | participant = precice.Participant("test", "dummy.xml", 0, 1)
485 | write_data = [[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]]
486 | participant.write_gradient_data("FakeMesh", "FakeVectorData", [0], write_data)
487 | dt = 1
488 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(3)), dt)
489 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
490 |
491 | def test_write_vector_gradient_data_tuple(self):
492 | participant = precice.Participant("test", "dummy.xml", 0, 1)
493 | write_data = [(1.0, 2.0, 3.0, 9.0, 8.0, 7.0, 6.0, 5.0, 4.0)]
494 | participant.write_gradient_data("FakeMesh", "FakeVectorData", [0], write_data)
495 | dt = 1
496 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(3)), dt)
497 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
498 |
499 | def test_write_vector_gradient_data_non_contiguous(self):
500 | """
501 | Tests behavior of solver interface, if a non contiguous array is passed to the interface.
502 | Note: Check whether np.ndarray is contiguous via np.ndarray.flags.
503 | """
504 | participant = precice.Participant("test", "dummy.xml", 0, 1)
505 | dummy_array = np.random.rand(9, 3)
506 | write_data = dummy_array[:, 1]
507 | assert write_data.flags["C_CONTIGUOUS"] is False
508 | write_data = [write_data]
509 | participant.write_gradient_data("FakeMesh", "FakeVectorData", [0], write_data)
510 | dt = 1
511 | read_data = participant.read_data("FakeMesh", "FakeVectorData", np.array(range(3)), dt)
512 | self.assertTrue(np.array_equiv(np.array(write_data).flatten(), read_data.flatten()))
513 |
--------------------------------------------------------------------------------