├── .clang-format ├── .cmake-format ├── .gitattributes ├── .github ├── dependabot.yml ├── pull_request_template.md └── workflows │ ├── build_and_test.yaml │ ├── build_and_test_mac.yaml │ ├── docker.yaml │ ├── run-checks.yml │ └── test_pyfans.yaml ├── .gitignore ├── .markdown-link-check-config.json ├── .markdownlint.json ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── CITATION.cff ├── CMakeLists.txt ├── CONTRIBUTING.md ├── FANS_Dashboard ├── FANS_Dashboard.ipynb ├── README.md ├── fans_dashboard │ ├── __init__.py │ ├── core │ │ ├── __init__.py │ │ ├── postprocessing.py │ │ ├── tensortools.py │ │ └── utils.py │ └── plotting │ │ ├── PlotYoungsModulus.py │ │ ├── __init__.py │ │ ├── h52xdmf.py │ │ └── plotting.py └── pyproject.toml ├── LICENSE ├── README.md ├── cmake ├── FANSConfig.cmake.in ├── modules │ └── FindFFTW3.cmake └── packaging │ └── CMakeLists.txt ├── docker ├── Dockerfile ├── Dockerfile_user_env_entrypoint.sh └── README.md ├── docs ├── ReleaseGuide.md └── images │ └── FANS_example.png ├── include ├── general.h ├── json.hpp ├── material_models │ ├── GBDiffusion.h │ ├── J2Plasticity.h │ ├── LinearElastic.h │ ├── LinearThermal.h │ └── PseudoPlastic.h ├── matmodel.h ├── mixedBCs.h ├── reader.h ├── setup.h ├── solver.h ├── solverCG.h ├── solverFP.h └── version.h.in ├── pixi.lock ├── pixi.toml ├── pyfans ├── CMakeLists.txt ├── README.md ├── micro.cpp └── micro.hpp ├── src ├── main.cpp └── reader.cpp └── test ├── CMakeLists.txt ├── README.md ├── input_files ├── test_J2Plasticity.json ├── test_LinearElastic.json ├── test_LinearThermal.json ├── test_MixedBCs.json └── test_PseudoPlastic.json ├── microstructures └── sphere32.h5 ├── pytest ├── test_displacement_averaging.py ├── test_homogenization_consistency.py ├── test_homogenized_tangent_spd.py ├── test_homogenized_tangent_within_VRbounds.py ├── test_loading_to_strain_average.py ├── test_strain_stress_averaging.py └── test_tensortools.py ├── run_tests.sh └── test_pyfans ├── README.md ├── input.json ├── macro-cube.py ├── micro-manager-config.json └── precice-config.xml /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | AccessModifierOffset: -2 4 | AlignAfterOpenBracket: Align 5 | AlignConsecutiveAssignments: true 6 | AlignConsecutiveDeclarations: true 7 | AlignEscapedNewlines: Right 8 | AlignOperands: true 9 | AlignTrailingComments: true 10 | AllowAllParametersOfDeclarationOnNextLine: true 11 | AllowShortBlocksOnASingleLine: false 12 | AllowShortCaseLabelsOnASingleLine: false 13 | AllowShortFunctionsOnASingleLine: Empty 14 | AllowShortIfStatementsOnASingleLine: false 15 | AllowShortLoopsOnASingleLine: false 16 | AlwaysBreakAfterDefinitionReturnType: None 17 | AlwaysBreakAfterReturnType: None 18 | AlwaysBreakBeforeMultilineStrings: false 19 | AlwaysBreakTemplateDeclarations: MultiLine 20 | BinPackArguments: true 21 | BinPackParameters: true 22 | BraceWrapping: 23 | AfterClass: false 24 | AfterControlStatement: false 25 | AfterEnum: false 26 | AfterFunction: true 27 | AfterNamespace: false 28 | AfterObjCDeclaration: false 29 | AfterStruct: false 30 | AfterUnion: false 31 | AfterExternBlock: false 32 | BeforeCatch: false 33 | BeforeElse: false 34 | IndentBraces: false 35 | SplitEmptyFunction: true 36 | SplitEmptyRecord: true 37 | SplitEmptyNamespace: true 38 | BreakBeforeBinaryOperators: None 39 | BreakBeforeBraces: Custom 40 | BreakBeforeInheritanceComma: false 41 | BreakInheritanceList: BeforeColon 42 | BreakBeforeTernaryOperators: true 43 | BreakConstructorInitializersBeforeComma: false 44 | BreakConstructorInitializers: BeforeColon 45 | BreakAfterJavaFieldAnnotations: false 46 | BreakStringLiterals: true 47 | ColumnLimit: 0 48 | CommentPragmas: '^ IWYU pragma:' 49 | CompactNamespaces: false 50 | ConstructorInitializerAllOnOneLineOrOnePerLine: false 51 | ConstructorInitializerIndentWidth: 4 52 | ContinuationIndentWidth: 4 53 | Cpp11BracedListStyle: true 54 | DerivePointerAlignment: false 55 | DisableFormat: false 56 | ExperimentalAutoDetectBinPacking: false 57 | FixNamespaceComments: true 58 | ForEachMacros: 59 | - foreach 60 | - Q_FOREACH 61 | - BOOST_FOREACH 62 | IncludeBlocks: Preserve 63 | IncludeCategories: 64 | - Regex: '^(<|"(gtest|isl|json)/)' 65 | Priority: 1 66 | - Regex: '.*' 67 | Priority: 2 68 | - Regex: '.*' 69 | Priority: 1 70 | IncludeIsMainRegex: '$' 71 | IndentCaseLabels: false 72 | IndentPPDirectives: None 73 | IndentWidth: 4 74 | IndentWrappedFunctionNames: false 75 | JavaScriptQuotes: Leave 76 | JavaScriptWrapImports: true 77 | KeepEmptyLinesAtTheStartOfBlocks: true 78 | MacroBlockBegin: '' 79 | MacroBlockEnd: '' 80 | MaxEmptyLinesToKeep: 1 81 | NamespaceIndentation: None 82 | ObjCBinPackProtocolList: Auto 83 | ObjCBlockIndentWidth: 4 84 | ObjCSpaceAfterProperty: false 85 | ObjCSpaceBeforeProtocolList: true 86 | PenaltyBreakAssignment: 2 87 | PenaltyBreakBeforeFirstCallParameter: 19 88 | PenaltyBreakComment: 300 89 | PenaltyBreakFirstLessLess: 120 90 | PenaltyBreakString: 1000 91 | PenaltyBreakTemplateDeclaration: 10 92 | PenaltyExcessCharacter: 1000000 93 | PenaltyReturnTypeOnItsOwnLine: 100 94 | PointerAlignment: Right 95 | ReflowComments: true 96 | SortIncludes: false 97 | SortUsingDeclarations: true 98 | SpaceAfterCStyleCast: true 99 | SpaceAfterTemplateKeyword: true 100 | SpaceBeforeAssignmentOperators: true 101 | SpaceBeforeCpp11BracedList: false 102 | SpaceBeforeCtorInitializerColon: true 103 | SpaceBeforeInheritanceColon: true 104 | SpaceBeforeParens: ControlStatements 105 | SpaceBeforeRangeBasedForLoopColon: true 106 | SpaceInEmptyParentheses: false 107 | SpacesBeforeTrailingComments: 1 108 | SpacesInAngles: false 109 | SpacesInContainerLiterals: true 110 | SpacesInCStyleCastParentheses: false 111 | SpacesInParentheses: false 112 | SpacesInSquareBrackets: false 113 | Standard: Cpp11 114 | StatementMacros: 115 | - Q_UNUSED 116 | - QT_REQUIRE_VERSION 117 | TabWidth: 4 118 | UseTab: Never 119 | ... 120 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # SCM syntax highlighting & preventing 3-way merges 2 | pixi.lock merge=binary linguist-language=YAML linguist-generated=true 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "github-actions" 9 | directory: "/" 10 | schedule: 11 | # Check for updates to GitHub Actions every week 12 | interval: "weekly" 13 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | Checklist: 4 | 5 | - [ ] I made sure that the CI passed before I ask for a review. 6 | - [ ] I added a summary of the changes (compared to the last release) in the `CHANGELOG.md`. 7 | - [ ] If necessary, I made changes to the documentation and/or added new content. 8 | - [ ] I will remember to squash-and-merge, providing a useful summary of the changes of this PR. 9 | -------------------------------------------------------------------------------- /.github/workflows/build_and_test.yaml: -------------------------------------------------------------------------------- 1 | name: Build and Test 2 | # Builds FANS inside various docker containers and runs the tests. 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | - develop 9 | pull_request: 10 | workflow_dispatch: 11 | 12 | concurrency: 13 | group: ${{ github.event_name }}-${{ github.workflow }}-${{ github.ref }} 14 | cancel-in-progress: ${{github.event_name == 'pull_request'}} 15 | 16 | jobs: 17 | build: 18 | name: ${{ format('Ubuntu {0}', matrix.UBUNTU_VERSION) }} 19 | runs-on: ubuntu-latest 20 | container: unistuttgartdae/fans-ci:${{ matrix.UBUNTU_VERSION }} 21 | defaults: 22 | run: 23 | shell: "bash --login -eo pipefail {0}" 24 | env: 25 | FANS_BUILD_DIR: build 26 | FANS_MPI_USER: fans 27 | strategy: 28 | fail-fast: false 29 | matrix: 30 | UBUNTU_VERSION: [noble, jammy] 31 | steps: 32 | - name: Checkout code 33 | uses: actions/checkout@v4 34 | 35 | - name: Set up pixi 36 | uses: prefix-dev/setup-pixi@v0.8.8 37 | 38 | - name: Generate build directory 39 | run: mkdir -p ${{ env.FANS_BUILD_DIR }} 40 | 41 | - name: Configure 42 | working-directory: ${{ env.FANS_BUILD_DIR }} 43 | run: | 44 | cmake --version 45 | cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_EXPORT_COMPILE_COMMANDS=ON .. 46 | 47 | - uses: actions/upload-artifact@v4 48 | if: failure() 49 | with: 50 | name: ${{ format('Ubuntu {0}', matrix.UBUNTU_VERSION) }} CMakeCache 51 | path: ${{ env.FANS_BUILD_DIR }}/CMakeCache.txt 52 | - uses: actions/upload-artifact@v4 53 | if: failure() 54 | with: 55 | name: ${{ format('Ubuntu {0}', matrix.UBUNTU_VERSION) }} CMakeLogs 56 | path: '${{ env.FANS_BUILD_DIR }}/CMakeFiles/*.log' 57 | - uses: actions/upload-artifact@v4 58 | if: failure() 59 | with: 60 | name: ${{ format('Ubuntu {0}', matrix.UBUNTU_VERSION) }} CompileCommands 61 | path: ${{ env.FANS_BUILD_DIR }}/compile_commands.json 62 | 63 | - name: Compile 64 | working-directory: ${{ env.FANS_BUILD_DIR }} 65 | run: 66 | cmake --build . -j $(nproc) || cmake --build . -j1 67 | 68 | - name: Adjust user rights 69 | run: chown -R ${{ env.FANS_MPI_USER }} ${{ env.FANS_BUILD_DIR }} 70 | 71 | - name: Tests 72 | working-directory: ${{ env.FANS_BUILD_DIR }} 73 | run: | 74 | su -c "ctest" ${{ env.FANS_MPI_USER }} 75 | 76 | - uses: actions/upload-artifact@v4 77 | if: failure() 78 | with: 79 | name: ${{ format('Ubuntu {0}', matrix.UBUNTU_VERSION) }} CTest logs 80 | path: ${{ env.FANS_BUILD_DIR }}/Testing/Temporary/LastTest.log 81 | 82 | # ──────────────────────────────────────────────────────────────── 83 | # Pytest checks 84 | # ──────────────────────────────────────────────────────────────── 85 | - name: Install Pixi Python deps 86 | run: | 87 | pixi --version 88 | pixi install 89 | 90 | - name: Run pytest checks on HDF5 output 91 | run: pixi run test 92 | 93 | - uses: actions/upload-artifact@v4 94 | if: failure() 95 | with: 96 | name: ${{ format('Ubuntu {0}', matrix.UBUNTU_VERSION) }} PyTest logs 97 | path: | 98 | **/pytest*.xml 99 | **/.pytest_cache 100 | -------------------------------------------------------------------------------- /.github/workflows/build_and_test_mac.yaml: -------------------------------------------------------------------------------- 1 | name: Build and test macOS 15 2 | # Builds FANS for macOS 15 on Apple Silicon CPU and runs the tests. 3 | 4 | on: 5 | push: 6 | branches: 7 | - main 8 | - develop 9 | pull_request: 10 | workflow_dispatch: 11 | 12 | concurrency: 13 | group: ${{ github.event_name }}-${{ github.workflow }}-${{ github.ref }} 14 | cancel-in-progress: ${{github.event_name == 'pull_request'}} 15 | 16 | jobs: 17 | build-macos: 18 | name: macOS 15 19 | runs-on: macos-15 20 | env: 21 | FANS_BUILD_DIR: build 22 | strategy: 23 | fail-fast: false 24 | steps: 25 | - name: Checkout code 26 | uses: actions/checkout@v4 27 | 28 | - name: Install FANS dependencies 29 | run: | 30 | brew install gnu-time cmake gcc@14 31 | brew install open-mpi --build-from-source --cc=gcc-14 32 | brew install hdf5-mpi --build-from-source --cc=gcc-14 33 | brew install fftw eigen 34 | 35 | - name: Set up pixi 36 | uses: prefix-dev/setup-pixi@v0.8.8 37 | 38 | - name: Generate build directory 39 | run: mkdir -p ${{ env.FANS_BUILD_DIR }} 40 | 41 | - name: Configure 42 | working-directory: ${{ env.FANS_BUILD_DIR }} 43 | env: 44 | CC: gcc-14 45 | CXX: g++-14 46 | MPICC: mpicc 47 | MPICXX: mpicxx 48 | run: | 49 | cmake --version 50 | cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_EXPORT_COMPILE_COMMANDS=ON .. 51 | 52 | - uses: actions/upload-artifact@v4 53 | if: failure() 54 | with: 55 | name: macOS 15 CMakeCache 56 | path: ${{ env.FANS_BUILD_DIR }}/CMakeCache.txt 57 | - uses: actions/upload-artifact@v4 58 | if: failure() 59 | with: 60 | name: macOS 15 CMakeLogs 61 | path: '${{ env.FANS_BUILD_DIR }}/CMakeFiles/*.log' 62 | - uses: actions/upload-artifact@v4 63 | if: failure() 64 | with: 65 | name: macOS 15 CompileCommands 66 | path: ${{ env.FANS_BUILD_DIR }}/compile_commands.json 67 | 68 | - name: Compile 69 | working-directory: ${{ env.FANS_BUILD_DIR }} 70 | run: 71 | cmake --build . -j $(nproc) || cmake --build . -j1 72 | 73 | - name: Tests 74 | working-directory: ${{ env.FANS_BUILD_DIR }} 75 | env: 76 | CTEST_OUTPUT_ON_FAILURE: 1 77 | run: ctest 78 | 79 | - uses: actions/upload-artifact@v4 80 | if: failure() 81 | with: 82 | name: macOS 15 CTest logs 83 | path: ${{ env.FANS_BUILD_DIR }}/Testing/Temporary/LastTest.log 84 | 85 | # ──────────────────────────────────────────────────────────────── 86 | # Pytest checks 87 | # ──────────────────────────────────────────────────────────────── 88 | - name: Install Pixi Python deps 89 | run: | 90 | pixi --version 91 | pixi install 92 | 93 | - name: Run pytest checks on HDF5 output 94 | run: pixi run test 95 | 96 | - uses: actions/upload-artifact@v4 97 | if: failure() 98 | with: 99 | name: ${{ format('Ubuntu {0}', matrix.UBUNTU_VERSION) }} PyTest logs 100 | path: | 101 | **/pytest*.xml 102 | **/.pytest_cache 103 | -------------------------------------------------------------------------------- /.github/workflows/docker.yaml: -------------------------------------------------------------------------------- 1 | name: Build and push docker images 2 | 3 | on: workflow_dispatch 4 | 5 | jobs: 6 | build-and-push: 7 | strategy: 8 | matrix: 9 | ubuntu-version: [noble, jammy] 10 | 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout code 15 | uses: actions/checkout@v4 16 | 17 | - name: Set up QEMU 18 | uses: docker/setup-qemu-action@v3 19 | 20 | - name: Set up Docker Buildx 21 | uses: docker/setup-buildx-action@v3 22 | 23 | - name: Login to DockerHub 24 | uses: docker/login-action@v3 25 | with: 26 | username: ${{ secrets.DOCKERHUB_USERNAME }} 27 | password: ${{ secrets.DOCKERHUB_TOKEN }} 28 | 29 | - name: Build and push fans-ci image 30 | uses: docker/build-push-action@v6 31 | with: 32 | context: ${{ github.workspace }} 33 | file: docker/Dockerfile 34 | platforms: | 35 | linux/amd64 36 | linux/arm64 37 | push: true 38 | tags: | 39 | unistuttgartdae/fans-ci:${{ matrix.ubuntu-version }} 40 | ${{ matrix.ubuntu-version == 'noble' && format('unistuttgartdae/fans-ci:latest') || '' }} 41 | target: fans_ci 42 | build-args: UBUNTU_VERSION=${{ matrix.ubuntu-version }} 43 | 44 | - name: Build and push fans-dev image 45 | uses: docker/build-push-action@v6 46 | with: 47 | context: ${{ github.workspace }} 48 | file: docker/Dockerfile 49 | platforms: | 50 | linux/amd64 51 | linux/arm64 52 | push: true 53 | tags: | 54 | unistuttgartdae/fans-dev:${{ matrix.ubuntu-version }} 55 | ${{ matrix.ubuntu-version == 'noble' && format('unistuttgartdae/fans-dev:latest') || '' }} 56 | target: fans_dev 57 | build-args: UBUNTU_VERSION=${{ matrix.ubuntu-version }} 58 | -------------------------------------------------------------------------------- /.github/workflows/run-checks.yml: -------------------------------------------------------------------------------- 1 | name: Run checks for markdown, links, and pre-commit 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - develop 7 | pull_request: 8 | branches: 9 | - "*" 10 | jobs: 11 | check_md: 12 | name: Lint markdown files 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Check out repository 16 | uses: actions/checkout@v4 17 | - name: Lint markdown files (markdownlint) 18 | uses: articulate/actions-markdownlint@v1 19 | with: 20 | config: .markdownlint.json 21 | files: '.' 22 | 23 | check_links: 24 | name: Check links in markdown files 25 | runs-on: ubuntu-latest 26 | steps: 27 | - name: Check out repository 28 | uses: actions/checkout@v4 29 | - name: Check links in markdown files (markdown-link-check) 30 | uses: gaurav-nelson/github-action-markdown-link-check@v1 31 | with: 32 | use-quiet-mode: 'yes' 33 | use-verbose-mode: 'no' 34 | config-file: '.markdown-link-check-config.json' 35 | 36 | precommit: 37 | name: pre-commit checks 38 | runs-on: ubuntu-latest 39 | steps: 40 | - uses: actions/checkout@v4 41 | - name: Setup python 42 | uses: actions/setup-python@v5 43 | with: 44 | python-version: '3.10' 45 | check-latest: true 46 | - name: Install pre-commit 47 | run: pip install pre-commit 48 | - name: Run checks 49 | run: pre-commit run -a -v 50 | - name: Git status 51 | if: always() 52 | run: git status 53 | - name: Full diff 54 | if: always() 55 | run: git diff 56 | -------------------------------------------------------------------------------- /.github/workflows/test_pyfans.yaml: -------------------------------------------------------------------------------- 1 | name: Test PyFans 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - develop 8 | pull_request: 9 | branches: 10 | - "*" 11 | 12 | jobs: 13 | test-pyfans: 14 | runs-on: ubuntu-latest 15 | container: unistuttgartdae/fans-ci:noble 16 | defaults: 17 | run: 18 | shell: "bash --login -eo pipefail {0}" 19 | env: 20 | FANS_BUILD_DIR: build 21 | FANS_MPI_USER: fans 22 | steps: 23 | 24 | - name: Checkout repository 25 | uses: actions/checkout@v4 26 | 27 | - name: Generate build directory 28 | run: mkdir -p ${{ env.FANS_BUILD_DIR }} 29 | 30 | - name: Install dependencies 31 | run: | 32 | apt update 33 | apt install -y wget python3-venv 34 | 35 | - name: Install preCICE 36 | run: | 37 | wget https://github.com/precice/precice/releases/download/v3.2.0/libprecice3_3.2.0_noble.deb 38 | apt install -y ./libprecice3_3.2.0_noble.deb 39 | 40 | - name: Install the Micro Manager 41 | run: | 42 | python3 -m venv .venv 43 | . .venv/bin/activate 44 | pip install micro-manager-precice 45 | 46 | - name: Configure 47 | working-directory: ${{ env.FANS_BUILD_DIR }} 48 | run: | 49 | cmake .. -DFANS_LIBRARY_FOR_MICRO_MANAGER=ON 50 | make 51 | 52 | - name: Run a dummy macro-micro coupling test 53 | run: | 54 | . .venv/bin/activate 55 | cd test/test_pyfans 56 | python3 macro-cube.py & micro-manager-precice micro-manager-config.json 57 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore build directories 2 | build/ 3 | bin/ 4 | lib/ 5 | lib64/ 6 | */build*/ 7 | 8 | # Ignore CMake generated files 9 | CMakeFiles/ 10 | CMakeCache.txt 11 | cmake_install.cmake 12 | Makefile 13 | CMakeLists.txt.user* 14 | .cmake/ 15 | 16 | # Ignore compiled binaries and executables 17 | *.exe 18 | *.out 19 | *.app 20 | *.so 21 | *.dylib 22 | 23 | # Ignore object files and libraries 24 | *.o 25 | *.a 26 | *.lib 27 | 28 | # Ignore editor-specific files 29 | .vscode/ 30 | .idea/ 31 | *.vs/ 32 | *.suo 33 | *.ntvs* 34 | *.njsproj 35 | *.sln 36 | *.suo 37 | *.swp 38 | *.sln.docstates 39 | *.user 40 | *.userosscache 41 | *.suo 42 | *.tsserver.log.* 43 | *.dbmdl 44 | *.dbproj 45 | *.jfm 46 | *.pfx 47 | *.publishsettings 48 | node_modules/ 49 | bower_components/ 50 | 51 | # Ignore OS generated files 52 | .DS_Store 53 | .DS_Store? 54 | ._* 55 | .Spotlight-V100 56 | .Trashes 57 | ehthumbs.db 58 | Thumbs.db 59 | 60 | # Ignore package manager directories 61 | pip-wheel-metadata/ 62 | 63 | !fans_input.grid.json 64 | !fans_input.slides.json 65 | 66 | # Byte-compiled / optimized / DLL files 67 | __pycache__/ 68 | *.py[cod] 69 | *$py.class 70 | 71 | # C extensions 72 | *.so 73 | 74 | # Distribution / packaging 75 | .Python 76 | build/ 77 | develop-eggs/ 78 | dist/ 79 | downloads/ 80 | eggs/ 81 | .eggs/ 82 | lib/ 83 | lib64/ 84 | parts/ 85 | sdist/ 86 | var/ 87 | wheels/ 88 | *.egg-info/ 89 | .installed.cfg 90 | *.egg 91 | 92 | # PyInstaller 93 | # Usually these files are written by a python script from a template 94 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 95 | *.manifest 96 | *.spec 97 | 98 | # Installer logs 99 | pip-log.txt 100 | pip-delete-this-directory.txt 101 | 102 | # Unit test / coverage reports 103 | htmlcov/ 104 | .tox/ 105 | .nox/ 106 | .coverage 107 | .cache 108 | nosetests.xml 109 | coverage.xml 110 | *.cover 111 | *.py,cover 112 | .hypothesis/ 113 | .pytest_cache/ 114 | cover/ 115 | 116 | # Translations 117 | *.mo 118 | *.pot 119 | 120 | # Django stuff: 121 | *.log 122 | local_settings.py 123 | db.sqlite3 124 | db.sqlite3-journal 125 | 126 | # Flask stuff: 127 | instance/ 128 | .webassets-cache 129 | 130 | # Scrapy stuff: 131 | .scrapy 132 | 133 | # Sphinx documentation 134 | docs/_build/ 135 | docs/_static/ 136 | docs/_autosummary/ 137 | 138 | # Jupyter Notebook 139 | .ipynb_checkpoints 140 | 141 | # IPython 142 | profile_default/ 143 | ipython_config.py 144 | 145 | # pyenv 146 | .python-version 147 | 148 | # celery beat schedule file 149 | celerybeat-schedule 150 | 151 | # SageMath parsed files 152 | *.sage.py 153 | 154 | # Environments 155 | .env 156 | .venv 157 | env/ 158 | venv/ 159 | ENV/ 160 | env.bak/ 161 | venv.bak/ 162 | 163 | # Spyder project settings 164 | .spyderproject 165 | .spyproject 166 | 167 | # Rope project settings 168 | .ropeproject 169 | 170 | # MkDocs documentation 171 | /site 172 | 173 | # mypy 174 | .mypy_cache/ 175 | .dmypy.json 176 | dmypy.json 177 | 178 | # Pyre type checker 179 | .pyre/ 180 | 181 | # pyright type checker 182 | .pyright/ 183 | 184 | # End of standard Python ignores 185 | 186 | 187 | # Extra 188 | *.xdmf 189 | FANS 190 | *.log 191 | run.sh 192 | *.h5 193 | data_gen/ 194 | test/input_files/*.json 195 | test/input_files/**/*.json 196 | 197 | # Dedicated folder for personal projects 198 | **/scratch/ 199 | 200 | # Test microstructure files 201 | !sphere32.h5 202 | 203 | # Test input files 204 | !test_LinearElastic.json 205 | !test_LinearThermal.json 206 | !test_PseudoPlastic.json 207 | !test_J2Plasticity.json 208 | !test_MixedBCs.json 209 | 210 | # pixi environments 211 | .pixi 212 | *.egg-info 213 | -------------------------------------------------------------------------------- /.markdown-link-check-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "aliveStatusCodes": [429, 200], 3 | "ignorePatterns": [ 4 | { 5 | "pattern": "*.html" 6 | } 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "MD013": false, 3 | "MD033": false, 4 | "MD034": false 5 | } 6 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | # Official repo for the clang-format hook 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v2.3.0 6 | hooks: 7 | - id: check-xml 8 | - id: check-merge-conflict 9 | - id: mixed-line-ending 10 | - id: end-of-file-fixer 11 | - id: trailing-whitespace 12 | # black repo for python formatting 13 | - repo: https://github.com/ambv/black 14 | rev: 22.12.0 15 | hooks: 16 | - id: black 17 | # clang-format for C/C++ formatting 18 | - repo: https://github.com/pre-commit/mirrors-clang-format 19 | rev: v19.1.2 20 | hooks: 21 | - id: clang-format 22 | args: ['--style=file'] 23 | exclude: "include/json.hpp" 24 | types_or: [c++] 25 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # FANS Changelog 2 | 3 | ## latest 4 | 5 | - Add pixi task `h52xdmf` to generate XDMF from H5 files directly as `pixi run h52xdmf {h5filepath}` 6 | 7 | ## v0.4.1 8 | 9 | - remove std::sqrt from constexpr - failed on Clang https://github.com/DataAnalyticsEngineering/FANS/pull/64 10 | 11 | ## v0.4.0 12 | 13 | - Support compilaion on MacOS X via conda-forge https://github.com/DataAnalyticsEngineering/FANS/pull/59 14 | - Add support for macroscale mixed stress-strain boundary conditions https://github.com/DataAnalyticsEngineering/FANS/pull/58 15 | - Add grain boundary diffusion material model for polycrystals https://github.com/DataAnalyticsEngineering/FANS/pull/52 16 | - Add a pixi environment for the FANS_dashboard and some tests https://github.com/DataAnalyticsEngineering/FANS/pull/55 17 | - Remove MPI initialization from pyFANS and add an integration test for it https://github.com/DataAnalyticsEngineering/FANS/pull/46 18 | - Native support for MacOS https://github.com/DataAnalyticsEngineering/FANS/pull/25 19 | - Remove Ubuntu 20.04 from testing and Docker support https://github.com/DataAnalyticsEngineering/FANS/pull/51 20 | - Add support for `--version` command line argument for checking the version of FANS 21 | - Modify way to provide micro structure in JSON input https://github.com/DataAnalyticsEngineering/FANS/pull/43 22 | - Add conda package for FANS https://github.com/DataAnalyticsEngineering/FANS/pull/39 23 | - Introduce system for checking compiler flags: `avx2` & `fma` https://github.com/DataAnalyticsEngineering/FANS/pull/34 24 | - Add `results_prefix` field in the JSON input https://github.com/DataAnalyticsEngineering/FANS/pull/36 25 | - Build FANS as a library to be coupled to a macro-scale simulation via preCICE and the Micro Manager https://github.com/DataAnalyticsEngineering/FANS/pull/23 26 | 27 | ## v0.3.0 28 | 29 | - Added Linear thermal and mechanical triclinic material models https://github.com/DataAnalyticsEngineering/FANS/pull/32 30 | - Added API to get homogenized stress and homogenized tangent https://github.com/DataAnalyticsEngineering/FANS/pull/31 31 | 32 | ## v0.2.0 33 | 34 | - Add integration tests https://github.com/DataAnalyticsEngineering/FANS/pull/20 35 | - Add GitHub Action workflow to build and test FANS https://github.com/DataAnalyticsEngineering/FANS/pull/19 36 | 37 | ## v0.1.2 38 | 39 | - Update TIK GitHub links in the documentation to public GitHub links https://github.com/DataAnalyticsEngineering/FANS/pull/13 40 | 41 | ## v0.1.1 42 | 43 | - Disable sorting of includes in clang-format https://github.com/DataAnalyticsEngineering/FANS/pull/7 44 | 45 | ## v0.1.0 46 | 47 | - Add release guide and a Changelog file https://github.com/DataAnalyticsEngineering/FANS/pull/4 48 | - Add clang-format check and format all relevant files https://github.com/DataAnalyticsEngineering/FANS/pull/1 49 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: "1.2.0" 2 | authors: 3 | - family-names: Leuschner 4 | given-names: Matthias 5 | orcid: "https://orcid.org/0000-0003-0477-3441" 6 | - family-names: Fritzen 7 | given-names: Felix 8 | orcid: "https://orcid.org/0000-0003-4926-0068" 9 | preferred-citation: 10 | authors: 11 | - family-names: Leuschner 12 | given-names: Matthias 13 | orcid: "https://orcid.org/0000-0003-0477-3441" 14 | - family-names: Fritzen 15 | given-names: Felix 16 | orcid: "https://orcid.org/0000-0003-4926-0068" 17 | date-published: 2017-11-30 18 | doi: 10.1007/s00466-017-1501-5 19 | issn: 1432-0924 20 | issue: 3 21 | journal: Computational Mechanics 22 | publisher: 23 | name: Springer 24 | title: "Fourier-Accelerated Nodal Solvers (FANS) for homogenization problems" 25 | type: article 26 | url: "https://link.springer.com/article/10.1007/s00466-017-1501-5" 27 | volume: 62 28 | title: "Fourier-Accelerated Nodal Solvers (FANS) for homogenization problems" 29 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to FANS 2 | 3 | Contributions to FANS are most welcome! Please refer to the steps below for more details. 4 | 5 | ## Changelog 6 | 7 | We maintain a `CHANGELOG.md` where all major changes and contributions are entered. 8 | 9 | ## How to contribute 10 | 11 | 1. **Fork and Clone**: Fork the repository on GitHub and clone your fork locally. 12 | 13 | ```bash 14 | git clone https://github.com/your-username/FANS.git 15 | cd FANS 16 | ``` 17 | 18 | 2. **Create a Branch**: Create a branch for your work, using a descriptive name. 19 | 20 | ```bash 21 | git checkout -b feature/my-feature 22 | ``` 23 | 24 | 3. **Make Changes**: Implement your changes, adhering to the [Code Style Guidelines](#code-style-guidelines). 25 | 26 | 4. **Write Tests**: Ensure new features or bug fixes are covered by tests. 27 | 28 | 5. **Commit and Push**: Commit your changes with a clear message, then push to your fork. 29 | 30 | ```bash 31 | git add . 32 | git commit -m "Describe your changes" 33 | git push origin feature/my-feature 34 | ``` 35 | 36 | 6. **Create a Pull Request**: Open a pull request to the `develop` branch. Include relevant details, such as the issue being fixed or the feature being added. 37 | 38 | ### Code Style Guidelines 39 | 40 | - **C++ Standard**: Use C++17 or later. 41 | - **Indentation**: 4 spaces, no tabs. 42 | - **Naming**: 43 | - Functions: `camelCase` 44 | - Classes: `PascalCase` 45 | - Variables: `snake_case` 46 | - Constants: `ALL_CAPS` 47 | - **Documentation**: Use Doxygen-style comments. 48 | 49 | ### Branching and Merging 50 | 51 | - **`main`**: Latest stable release. 52 | - **`develop`**: Active development. Base your feature branches off `develop`. 53 | - **Feature branches**: Branch off `develop` and submit pull requests back to `develop`. 54 | - **Release branches**: Merged into `main` for new releases. 55 | -------------------------------------------------------------------------------- /FANS_Dashboard/README.md: -------------------------------------------------------------------------------- 1 | # FANS Dashboard 2 | 3 | The FANS Dashboard is a comprehensive tool designed to streamline the post-processing, interpretation, and visualization of results generated by the FANS solver. This jupyter notebook provides a user-friendly environment to work with complex simulation data stored in HDF5 format, offering a step-by-step workflow that covers data extraction, postprocessing, visualization, and preparation for 3D visualization in tools like ParaView. 4 | 5 | For further details follow along [`FANS_Dashboard.ipynb`](FANS_Dashboard.ipynb) 6 | -------------------------------------------------------------------------------- /FANS_Dashboard/fans_dashboard/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataAnalyticsEngineering/FANS/3c681507289fe30d459fe6b378b2ccb6687414ab/FANS_Dashboard/fans_dashboard/__init__.py -------------------------------------------------------------------------------- /FANS_Dashboard/fans_dashboard/core/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataAnalyticsEngineering/FANS/3c681507289fe30d459fe6b378b2ccb6687414ab/FANS_Dashboard/fans_dashboard/core/__init__.py -------------------------------------------------------------------------------- /FANS_Dashboard/fans_dashboard/core/postprocessing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def compute_rank2tensor_measures(tensor_matrix, measures_to_compute=None): 5 | """ 6 | Computes various tensor measures from a given stress or strain tensor in Mandel notation. 7 | The user can specify which measures to compute. This function supports input tensors with arbitrary leading dimensions, 8 | as long as the last dimension is 6 (Mandel notation). 9 | 10 | Based on : https://doc.comsol.com/5.5/doc/com.comsol.help.sme/sme_ug_theory.06.16.html 11 | 12 | Parameters: 13 | - tensor_matrix: numpy array, tensor (stress or strain) in Mandel notation with shape (..., 6). 14 | The tensor should be organized as follows: 15 | [s11, s22, s33, s12, s13, s23] for stress or 16 | [e11, e22, e33, e12, e13, e23] for strain. 17 | - measures_to_compute: list of strings, optional, specifying which measures to compute. 18 | If not provided, default measures ['von_mises', 'hydrostatic', 'deviatoric'] will be computed. 19 | Available options include: 20 | - 'von_mises': Computes the von Mises stress/strain. 21 | - 'hydrostatic': Computes the hydrostatic stress/strain. 22 | - 'deviatoric': Computes the deviatoric stress/strain. 23 | - 'principal': Computes the principal stresses/strains (eigenvalues). 24 | - 'max_shear': Computes the maximum shear stress/strain. 25 | - 'I_invariants': Computes the I1, I2, I3 invariants. 26 | - 'J_invariants': Computes the J1, J2, J3 invariants of the deviatoric tensor. 27 | - 'eigenvalues': Computes the eigenvalues of the stress/strain tensor. 28 | - 'eigenvectors': Computes the eigenvectors of the stress/strain tensor. 29 | - 'lode_angle': Computes the Lode angle, useful in advanced plasticity models. 30 | 31 | Returns: 32 | - result: dictionary, keys are the requested measure names and values are the computed measures. 33 | Each returned measure will have the same leading dimensions as the input tensor_matrix, 34 | with the last dimension adjusted based on the measure (e.g., eigenvalues will have an extra dimension for the 3 components). 35 | """ 36 | if measures_to_compute is None: 37 | measures_to_compute = ["von_mises", "hydrostatic", "deviatoric"] 38 | 39 | original_shape = tensor_matrix.shape[:-1] # All dimensions except the last one 40 | tensor_matrix = tensor_matrix.reshape(-1, 6) # Flatten to (N, 6) for processing 41 | 42 | result = {} 43 | 44 | # Hydrostatic stress/strain (mean of the diagonal components) 45 | hydrostatic = np.mean(tensor_matrix[:, :3], axis=1) 46 | if "hydrostatic" in measures_to_compute: 47 | result["hydrostatic"] = hydrostatic.reshape(original_shape) 48 | 49 | # Deviatoric stress/strain and von Mises stress/strain 50 | deviatoric = tensor_matrix[:, :3] - hydrostatic[:, np.newaxis] 51 | deviatoric_shear = tensor_matrix[:, 3:6] 52 | deviatoric_tensor = np.hstack([deviatoric, deviatoric_shear]) 53 | if "deviatoric" in measures_to_compute: 54 | result["deviatoric"] = deviatoric_tensor.reshape(original_shape + (6,)) 55 | 56 | if "von_mises" in measures_to_compute: 57 | von_mises = np.sqrt( 58 | 0.5 59 | * ( 60 | (deviatoric[:, 0] - deviatoric[:, 1]) ** 2 61 | + (deviatoric[:, 1] - deviatoric[:, 2]) ** 2 62 | + (deviatoric[:, 2] - deviatoric[:, 0]) ** 2 63 | + 6 64 | * ( 65 | deviatoric_shear[:, 0] ** 2 66 | + deviatoric_shear[:, 1] ** 2 67 | + deviatoric_shear[:, 2] ** 2 68 | ) 69 | ) 70 | ) 71 | result["von_mises"] = von_mises.reshape(original_shape) 72 | 73 | # Compute I1, I2, I3 invariants if requested 74 | if "I_invariants" in measures_to_compute: 75 | I1 = np.sum(tensor_matrix[:, :3], axis=1) 76 | I2 = ( 77 | tensor_matrix[:, 0] * tensor_matrix[:, 1] 78 | + tensor_matrix[:, 1] * tensor_matrix[:, 2] 79 | + tensor_matrix[:, 2] * tensor_matrix[:, 0] 80 | - tensor_matrix[:, 3] ** 2 81 | - tensor_matrix[:, 4] ** 2 82 | - tensor_matrix[:, 5] ** 2 83 | ) 84 | if "full_tensor" not in locals(): 85 | full_tensor = mandel_to_matrix(tensor_matrix) 86 | I3 = np.linalg.det(full_tensor) 87 | result["I_invariants"] = np.stack([I1, I2, I3], axis=-1).reshape( 88 | original_shape + (3,) 89 | ) 90 | 91 | # Compute J1, J2, J3 invariants if requested 92 | if "J_invariants" in measures_to_compute or "lode_angle" in measures_to_compute: 93 | J1 = np.sum(deviatoric_tensor[:, :3], axis=1) 94 | J2 = 0.5 * np.sum(deviatoric**2 + 2 * deviatoric_shear**2, axis=1) 95 | full_deviatoric_tensor = mandel_to_matrix(deviatoric_tensor) 96 | J3 = np.linalg.det(full_deviatoric_tensor) 97 | result["J_invariants"] = np.stack([J1, J2, J3], axis=-1).reshape( 98 | original_shape + (3,) 99 | ) 100 | 101 | # Principal stresses/strains, maximum shear, eigenvalues, and eigenvectors 102 | if any( 103 | measure in measures_to_compute 104 | for measure in ["principal", "max_shear", "eigenvalues", "eigenvectors"] 105 | ): 106 | full_tensor = mandel_to_matrix(tensor_matrix) 107 | eigenvalues, eigenvectors = np.linalg.eigh(full_tensor) 108 | if "principal" in measures_to_compute: 109 | result["principal"] = eigenvalues.reshape(original_shape + (3,)) 110 | if "max_shear" in measures_to_compute: 111 | max_shear = 0.5 * (eigenvalues[:, 2] - eigenvalues[:, 0]) 112 | result["max_shear"] = max_shear.reshape(original_shape) 113 | if "eigenvalues" in measures_to_compute: 114 | result["eigenvalues"] = eigenvalues.reshape(original_shape + (3,)) 115 | if "eigenvectors" in measures_to_compute: 116 | result["eigenvectors"] = eigenvectors.reshape(original_shape + (3, 3)) 117 | 118 | # Lode angle calculation 119 | if "lode_angle" in measures_to_compute: 120 | if "J2" not in locals(): # Compute J2 if not already computed 121 | J2 = 0.5 * np.sum(deviatoric**2 + 2 * deviatoric_shear**2, axis=1) 122 | if "J3" not in locals(): # Compute J3 if not already computed 123 | full_deviatoric_tensor = mandel_to_matrix(deviatoric_tensor) 124 | J3 = np.linalg.det(full_deviatoric_tensor) 125 | # Handle very small J2 values to prevent division by zero 126 | safe_J2 = np.where(J2 > 1e-12, J2, 1e-12) 127 | sqrt_3_3 = (3 * np.sqrt(3)) / 2 128 | cos_3theta = np.clip(sqrt_3_3 * (J3 / safe_J2 ** (3 / 2)), -1, 1) 129 | lode_angle = (1.0 / 3.0) * np.arccos(cos_3theta) 130 | result["lode_angle"] = lode_angle.reshape(original_shape) 131 | 132 | return result 133 | 134 | 135 | def mandel_to_matrix(mandel_tensor): 136 | """ 137 | Convert a tensor from Mandel notation to a full 3x3 matrix. 138 | 139 | Parameters: 140 | - mandel_tensor: numpy array, tensor in Mandel notation with shape (n_steps, 6). 141 | The tensor should be organized as follows: 142 | [s11, s22, s33, s12, s13, s23] for stress or 143 | [e11, e22, e33, e12, e13, e23] for strain. 144 | 145 | Returns: 146 | - full_tensor: numpy array, tensor in full 3x3 matrix form with shape (n_steps, 3, 3). 147 | """ 148 | full_tensor = np.zeros((mandel_tensor.shape[0], 3, 3)) 149 | full_tensor[:, 0, 0] = mandel_tensor[:, 0] # s11 or e11 150 | full_tensor[:, 1, 1] = mandel_tensor[:, 1] # s22 or e22 151 | full_tensor[:, 2, 2] = mandel_tensor[:, 2] # s33 or e33 152 | full_tensor[:, 0, 1] = full_tensor[:, 1, 0] = mandel_tensor[:, 3] / np.sqrt( 153 | 2 154 | ) # s12 or e12 155 | full_tensor[:, 0, 2] = full_tensor[:, 2, 0] = mandel_tensor[:, 4] / np.sqrt( 156 | 2 157 | ) # s13 or e13 158 | full_tensor[:, 1, 2] = full_tensor[:, 2, 1] = mandel_tensor[:, 5] / np.sqrt( 159 | 2 160 | ) # s23 or e23 161 | return full_tensor 162 | 163 | 164 | def matrix_to_mandel(full_tensor, tolerance=1e-8): 165 | """ 166 | Convert a full 3x3 symmetric tensor to Mandel notation in a vectorized and efficient way. 167 | 168 | Parameters: 169 | - full_tensor: numpy array, tensor in full 3x3 matrix form with shape (n_steps, 3, 3). 170 | - tolerance: float, optional, tolerance for checking symmetry. Default is 1e-8. 171 | 172 | Returns: 173 | - mandel_tensor: numpy array, tensor in Mandel notation with shape (n_steps, 6). 174 | The tensor will be organized as follows: 175 | [s11, s22, s33, s12, s13, s23] for stress or 176 | [e11, e22, e33, e12, e13, e23] for strain. 177 | 178 | Raises: 179 | - ValueError: if any of the tensors in the batch are not symmetric within the specified tolerance. 180 | """ 181 | # Check if the tensors are symmetric within the given tolerance 182 | if not np.allclose(full_tensor, full_tensor.transpose(0, 2, 1), atol=tolerance): 183 | raise ValueError( 184 | "One or more tensors are not symmetric within the specified tolerance." 185 | ) 186 | 187 | # Efficiently extract and scale the relevant components 188 | mandel_tensor = np.zeros((full_tensor.shape[0], 6)) 189 | mandel_tensor[:, 0] = full_tensor[:, 0, 0] # s11 or e11 190 | mandel_tensor[:, 1] = full_tensor[:, 1, 1] # s22 or e22 191 | mandel_tensor[:, 2] = full_tensor[:, 2, 2] # s33 or e33 192 | mandel_tensor[:, 3] = np.sqrt(2) * full_tensor[:, 0, 1] # s12 or e12 193 | mandel_tensor[:, 4] = np.sqrt(2) * full_tensor[:, 0, 2] # s13 or e13 194 | mandel_tensor[:, 5] = np.sqrt(2) * full_tensor[:, 1, 2] # s23 or e23 195 | 196 | return mandel_tensor 197 | -------------------------------------------------------------------------------- /FANS_Dashboard/fans_dashboard/plotting/PlotYoungsModulus.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import plotly.graph_objs as go 3 | import meshio 4 | 5 | 6 | def compute_YoungsModulus3D(C_batch): 7 | """ 8 | Compute Young's modulus for all directions in 3D for a batch of stiffness tensors. 9 | 10 | Args: 11 | C_batch (ndarray): Batch of stiffness tensors in Mandel notation, shape (n, 6, 6). 12 | 13 | Returns: 14 | tuple: A tuple containing: 15 | - X_batch (ndarray): X-coordinates for plotting the modulus surface, shape (n, n_theta, n_phi). 16 | - Y_batch (ndarray): Y-coordinates for plotting the modulus surface, shape (n, n_theta, n_phi). 17 | - Z_batch (ndarray): Z-coordinates for plotting the modulus surface, shape (n, n_theta, n_phi). 18 | - E_batch (ndarray): Young's modulus in all directions, shape (n, n_theta, n_phi). 19 | """ 20 | n = C_batch.shape[0] 21 | n_theta = 180 22 | n_phi = 360 23 | 24 | theta = np.linspace(0, np.pi, n_theta) 25 | phi = np.linspace(0, 2 * np.pi, n_phi) 26 | theta_grid, phi_grid = np.meshgrid(theta, phi, indexing="ij") 27 | 28 | d_x = np.sin(theta_grid) * np.cos(phi_grid) # Shape (n_theta, n_phi) 29 | d_y = np.sin(theta_grid) * np.sin(phi_grid) 30 | d_z = np.cos(theta_grid) 31 | 32 | N = np.stack( 33 | ( 34 | d_x**2, 35 | d_y**2, 36 | d_z**2, 37 | np.sqrt(2) * d_x * d_y, 38 | np.sqrt(2) * d_x * d_z, 39 | np.sqrt(2) * d_y * d_z, 40 | ), 41 | axis=-1, 42 | ) # Shape (n_theta, n_phi, 6) 43 | 44 | N_flat = N.reshape(-1, 6) # Shape (n_points, 6) 45 | 46 | # Invert stiffness tensors to get compliance tensors 47 | S_batch = np.linalg.inv(C_batch) # Shape (n, 6, 6) 48 | 49 | # Compute E for each tensor in the batch 50 | NSN = np.einsum("pi,nij,pj->np", N_flat, S_batch, N_flat) # Shape (n, n_points) 51 | E_batch = 1.0 / NSN # Shape (n, n_points) 52 | 53 | # Reshape E_batch back to (n, n_theta, n_phi) 54 | E_batch = E_batch.reshape(n, *d_x.shape) 55 | 56 | X_batch = E_batch * d_x # Shape (n, n_theta, n_phi) 57 | Y_batch = E_batch * d_y 58 | Z_batch = E_batch * d_z 59 | 60 | return X_batch, Y_batch, Z_batch, E_batch 61 | 62 | 63 | def plot_YoungsModulus3D(C, title="Young's Modulus Surface"): 64 | """ 65 | Plot a 3D surface of Young's modulus. 66 | 67 | Args: 68 | C (ndarray): Stiffness tensor in Mandel notation. Can be a single tensor of shape (6,6) or a batch of tensors of shape (n,6,6). 69 | title (str): Title of the plot. 70 | 71 | Raises: 72 | ValueError: If C is not of shape (6,6) or (1,6,6). 73 | """ 74 | if C.shape == (6, 6): 75 | C_batch = C[np.newaxis, :, :] 76 | elif C.shape == (1, 6, 6): 77 | C_batch = C 78 | else: 79 | raise ValueError( 80 | "C must be either a (6,6) tensor or a batch with one tensor of shape (1,6,6)." 81 | ) 82 | 83 | X_batch, Y_batch, Z_batch, E_batch = compute_YoungsModulus3D(C_batch) 84 | X, Y, Z, E = X_batch[0], Y_batch[0], Z_batch[0], E_batch[0] 85 | 86 | surface = go.Surface(x=X, y=Y, z=Z, surfacecolor=E, colorscale="Viridis") 87 | layout = go.Layout( 88 | title=title, 89 | scene=dict( 90 | xaxis=dict(title="X"), 91 | yaxis=dict(title="Y"), 92 | zaxis=dict(title="Z"), 93 | aspectmode="auto", 94 | ), 95 | ) 96 | 97 | fig = go.Figure(data=[surface], layout=layout) 98 | fig.show() 99 | 100 | 101 | def export_YoungsModulus3D_to_vtk(C, prefix="youngs_modulus_surface"): 102 | """ 103 | Export the computed Young's modulus surfaces to VTK files for Paraview visualization. 104 | 105 | Args: 106 | C (ndarray): Stiffness tensor in Mandel notation. Can be a single tensor of shape (6,6) or a batch of tensors of shape (n,6,6). 107 | prefix (str): Prefix for the output files. 108 | 109 | Returns: 110 | None 111 | """ 112 | X_batch, Y_batch, Z_batch, E_batch = compute_YoungsModulus3D(C) 113 | n, n_theta, n_phi = X_batch.shape 114 | 115 | for k in range(n): 116 | points = np.vstack( 117 | (X_batch[k].ravel(), Y_batch[k].ravel(), Z_batch[k].ravel()) 118 | ).T 119 | cells = [ 120 | ( 121 | "quad", 122 | np.array( 123 | [ 124 | [ 125 | i * n_phi + j, 126 | (i + 1) * n_phi + j, 127 | (i + 1) * n_phi + (j + 1), 128 | i * n_phi + (j + 1), 129 | ] 130 | for i in range(n_theta - 1) 131 | for j in range(n_phi - 1) 132 | ], 133 | dtype=np.int32, 134 | ), 135 | ) 136 | ] 137 | mesh = meshio.Mesh( 138 | points=points, 139 | cells=cells, 140 | point_data={"Youngs_Modulus": E_batch[k].ravel()}, 141 | ) 142 | filename = f"{prefix}_{k}.vtk" 143 | meshio.write(filename, mesh) 144 | print(f"Exported {filename}") 145 | 146 | 147 | def demoCubic(): 148 | """ 149 | Demonstrates the Young's modulus surface plotting routine for a cubic material (Copper). 150 | 151 | This function generates the stiffness tensor for a cubic material, specifically copper, 152 | and then plots the 3D Young's modulus surface using the generated tensor. 153 | 154 | Args: 155 | None 156 | 157 | Returns: 158 | None 159 | """ 160 | P1 = np.zeros((6, 6)) 161 | P1[:3, :3] = 1.0 / 3.0 162 | D = np.diag([1, 1, 1, 0, 0, 0]) 163 | P2 = D - P1 164 | P3 = np.eye(6) - D 165 | 166 | # generate stiffness for a cubic material: copper 167 | l1, l2, l3 = 136.67, 46, 150 168 | C = 3 * l1 * P1 + l2 * P2 + l3 * P3 169 | 170 | # show the 3D Young's modulus plot for copper 171 | plot_YoungsModulus3D(C, title="Young's Modulus Surface for Copper") 172 | -------------------------------------------------------------------------------- /FANS_Dashboard/fans_dashboard/plotting/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataAnalyticsEngineering/FANS/3c681507289fe30d459fe6b378b2ccb6687414ab/FANS_Dashboard/fans_dashboard/plotting/__init__.py -------------------------------------------------------------------------------- /FANS_Dashboard/fans_dashboard/plotting/plotting.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import plotly.graph_objects as go 3 | from plotly.subplots import make_subplots 4 | 5 | 6 | def plot_subplots( 7 | data1, 8 | data2, 9 | labels_x=None, 10 | labels_y=None, 11 | subplot_titles=None, 12 | title="", 13 | nrows=None, 14 | ncols=None, 15 | linewidth=1, 16 | markersize=4, 17 | linecolor=None, 18 | markercolor=None, 19 | fontsize=12, 20 | fig=None, 21 | ): 22 | """ 23 | Plot a grid of subplots using Plotly, handling both single-component (scalar vs scalar) and multi-component data. 24 | 25 | Parameters: 26 | - data1: numpy array, first set of data to plot (e.g., strain, time) with shape (n_datapoints, n_plots) 27 | - data2: numpy array, second set of data to plot (e.g., stress) with shape (n_datapoints, n_plots) 28 | - labels_x: list of strings, labels for the x axes of each subplot (optional, default=None) 29 | - labels_y: list of strings, labels for the y axes of each subplot (optional, default=None) 30 | - subplot_titles: list of strings, titles for each subplot (optional, default=None) 31 | - title: string, title of the overall plot 32 | - nrows: int, number of rows in the subplot grid (optional) 33 | - ncols: int, number of columns in the subplot grid (optional) 34 | - linewidth: int, line width for the plots (optional, default=1) 35 | - markersize: int, size of the markers (optional, default=4) 36 | - linecolor: list of strings, colors of the lines for each subplot (optional, default=None, all blue) 37 | - markercolor: list of strings, colors of the markers for each subplot (optional, default=None, all blue) 38 | - fontsize: int, font size for axis labels, subplot titles, and tick labels (optional, default=12) 39 | - fig: existing Plotly figure to overlay the new subplots (optional, default=None, creates a new figure) 40 | """ 41 | # Validate data shapes 42 | if not isinstance(data1, np.ndarray) or not isinstance(data2, np.ndarray): 43 | raise ValueError("data1 and data2 must be numpy arrays.") 44 | 45 | if data1.shape[0] != data2.shape[0]: 46 | raise ValueError( 47 | "data1 and data2 must have the same number of data points (rows)." 48 | ) 49 | 50 | if data1.shape[1] != data2.shape[1]: 51 | raise ValueError( 52 | "data1 and data2 must have the same number of components (columns)." 53 | ) 54 | 55 | # Set the number of components based on data shape 56 | n_components = data1.shape[1] 57 | 58 | # Initialize linecolor and markercolor lists if not provided 59 | if linecolor is None: 60 | linecolor = ["blue"] * n_components 61 | elif len(linecolor) != n_components: 62 | raise ValueError( 63 | f"The length of linecolor must match the number of components ({n_components})." 64 | ) 65 | 66 | if markercolor is None: 67 | markercolor = ["blue"] * n_components 68 | elif len(markercolor) != n_components: 69 | raise ValueError( 70 | f"The length of markercolor must match the number of components ({n_components})." 71 | ) 72 | 73 | # If nrows or ncols is not specified, determine an optimal grid layout 74 | if nrows is None or ncols is None: 75 | nrows = int(np.ceil(np.sqrt(n_components))) 76 | ncols = int(np.ceil(n_components / nrows)) 77 | 78 | # Handle subplot titles 79 | if subplot_titles is None: 80 | subplot_titles = [f"Component {i+1}" for i in range(n_components)] 81 | elif len(subplot_titles) != n_components: 82 | raise ValueError( 83 | f"The length of subplot_titles must match the number of components ({n_components})." 84 | ) 85 | 86 | # Handle labels_x and labels_y 87 | if labels_x is None: 88 | labels_x = [""] * n_components 89 | elif len(labels_x) != n_components: 90 | raise ValueError( 91 | f"The length of labels_x must match the number of components ({n_components})." 92 | ) 93 | 94 | if labels_y is None: 95 | labels_y = [""] * n_components 96 | elif len(labels_y) != n_components: 97 | raise ValueError( 98 | f"The length of labels_y must match the number of components ({n_components})." 99 | ) 100 | 101 | # Create the subplot figure if not provided 102 | if fig is None: 103 | fig = make_subplots(rows=nrows, cols=ncols, subplot_titles=subplot_titles) 104 | 105 | # Add traces for each component 106 | for i in range(n_components): 107 | row = i // ncols + 1 108 | col = i % ncols + 1 109 | fig.add_trace( 110 | go.Scatter( 111 | x=data1[:, i], 112 | y=data2[:, i], 113 | mode="lines+markers", 114 | marker=dict(symbol="x", size=markersize, color=markercolor[i]), 115 | line=dict(width=linewidth, color=linecolor[i]), 116 | name=f"Component {i+1}", 117 | ), 118 | row=row, 119 | col=col, 120 | ) 121 | 122 | # Update axes with text labels 123 | fig.update_xaxes( 124 | title_text=labels_x[i], 125 | row=row, 126 | col=col, 127 | showgrid=True, 128 | mirror=True, 129 | ticks="inside", 130 | tickwidth=2, 131 | ticklen=6, 132 | title_font=dict(size=fontsize), 133 | tickfont=dict(size=fontsize), 134 | automargin=True, 135 | ) 136 | fig.update_yaxes( 137 | title_text=labels_y[i], 138 | row=row, 139 | col=col, 140 | showgrid=True, 141 | mirror=True, 142 | ticks="inside", 143 | tickwidth=2, 144 | ticklen=6, 145 | title_font=dict(size=fontsize), 146 | tickfont=dict(size=fontsize), 147 | automargin=True, 148 | ) 149 | 150 | # Update layout with the overall plot title and styling 151 | fig.update_layout( 152 | height=1000, 153 | width=1600, 154 | title_text=title, 155 | title_font=dict(size=fontsize), 156 | showlegend=False, # Legends removed 157 | template="plotly_white", 158 | margin=dict(l=50, r=50, t=50, b=50), # Adjust margins to prevent overlap 159 | title_x=0.5, 160 | autosize=False, 161 | ) 162 | 163 | # Add a box outline around all subplots 164 | for i in range(1, nrows * ncols + 1): 165 | fig.update_xaxes( 166 | showline=True, 167 | linewidth=2, 168 | linecolor="black", 169 | row=(i - 1) // ncols + 1, 170 | col=(i - 1) % ncols + 1, 171 | ) 172 | fig.update_yaxes( 173 | showline=True, 174 | linewidth=2, 175 | linecolor="black", 176 | row=(i - 1) // ncols + 1, 177 | col=(i - 1) % ncols + 1, 178 | ) 179 | 180 | # Update subplot titles with the specified fontsize 181 | for annotation in fig["layout"]["annotations"]: 182 | annotation["font"] = dict(size=fontsize) 183 | 184 | # Return the figure for further customization or overlaying 185 | return fig 186 | -------------------------------------------------------------------------------- /FANS_Dashboard/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "fans-dashboard" 7 | version = "0.4.1" 8 | requires-python = ">=3.13" 9 | dependencies = [ 10 | "numpy>=2.2.5,<3", 11 | "h5py>=3.13.0,<4", 12 | "plotly>=6.0.1,<7", 13 | "lxml>=5.4.0,<6", 14 | "nbformat>=5.10.4,<6", 15 | "matplotlib>=3.10.1,<4", 16 | "scipy>=1.15.2,<2", 17 | "meshio>=5.3.5,<6", 18 | "ipykernel>=6.29.5,<7", 19 | "nbclient>=0.10.2,<0.11", 20 | ] 21 | 22 | [tool.hatch.build.targets.wheel] 23 | packages = ["fans_dashboard"] 24 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /cmake/FANSConfig.cmake.in: -------------------------------------------------------------------------------- 1 | @PACKAGE_INIT@ 2 | 3 | set(CMAKE_MODULE_PATH_save "${CMAKE_MODULE_PATH}") 4 | list(INSERT CMAKE_MODULE_PATH 0 "${CMAKE_CURRENT_LIST_DIR}/modules") 5 | 6 | if ("$ENV{SETVARS_COMPLETED}" STREQUAL "1") 7 | message( 8 | WARNING 9 | "Intel OneAPI environment is active, which might lead to issues with MPI discovery." 10 | ) 11 | endif () 12 | 13 | include(CMakeFindDependencyMacro) 14 | set(HDF5_ENABLE_PARALLEL ON) 15 | set(HDF5_PREFER_PARALLEL ON) 16 | find_dependency(HDF5 REQUIRED COMPONENTS C CXX) 17 | if (NOT HDF5_C_IS_PARALLEL) 18 | message(FATAL_ERROR "Parallel HDF5 implementation (mpi) required but not found!") 19 | endif() 20 | find_dependency(Eigen3) 21 | find_dependency(MPI) 22 | find_dependency(FFTW3 COMPONENTS DOUBLE MPI) 23 | 24 | set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH_save}") 25 | unset(CMAKE_MODULE_PATH_save) 26 | 27 | include(${CMAKE_CURRENT_LIST_DIR}/FANSTargets.cmake) 28 | 29 | get_target_property(FANS_LOCATION FANS::FANS LOCATION) 30 | message(STATUS "Found FANS: ${FANS_LOCATION} (found version \"@PROJECT_VERSION@\")") 31 | -------------------------------------------------------------------------------- /cmake/modules/FindFFTW3.cmake: -------------------------------------------------------------------------------- 1 | # ########################################################################################### 2 | # copied from: https://github.com/UCL/GreatCMakeCookOff/blob/master/modules/FindFFTW3.cmake # 3 | # ########################################################################################### 4 | 5 | # - Try to find FFTW 6 | # 7 | # By default, it will look only for the serial libraries with single, double, 8 | # and long double precision. Any combination of precision (SINGLE, DOUBLE, 9 | # LONGDOUBLE) and library type (SERIAL, [THREADS|OPENMP], MPI) is possible by 10 | # using the COMPONENTS keyword. For example, 11 | # 12 | # find_package(FFTW3 COMPONENTS SINGLE DOUBLE OPENMP MPI) 13 | # 14 | # Once done this will define 15 | # FFTW3_FOUND - System has FFTW3 16 | # FFTW3_INCLUDE_DIRS - The FFTW3 include directories 17 | # FFTW3_LIBRARIES - The libraries needed to use FFTW3 18 | # FFTW3_DEFINITIONS - Compiler switches required for using FFTW3 19 | # FFTW3_$KIND_$PARALLEL_FOUND- Set if FFTW3 exists in KIND precision format for PARALLEL mode. 20 | # where KIND can be: SINGLE, DOUBLE, LONGDOUBLE 21 | # and PARALLEL: SERIAL, OPENMP, MPI, THREADS. 22 | # FFTW3_$KIND_$PARALLEL_LIBRARY - The libraries needed to use. 23 | # FFTW3_INCLUDE_DIR_PARALLEL - The FFTW3 include directories for parallels mode. 24 | 25 | cmake_policy(SET CMP0054 NEW) 26 | 27 | if(FFTW3_FOUND) 28 | return() 29 | endif() 30 | 31 | if(FFTW3_INCLUDE_DIR AND FFTW3_LIBRARIES) 32 | set(FFTW3_FOUND TRUE) 33 | foreach(component ${FFTW3_FIND_COMPONENTS}) 34 | if("${FFTW3_${component}_LIBRARY}" STREQUAL "") 35 | set(FFTW3_${component}_LIBRARY "${FFTW3_LIBRARIES}") 36 | endif() 37 | endforeach() 38 | return() 39 | endif() 40 | 41 | macro(find_specific_libraries KIND PARALLEL) 42 | list(APPEND FFTW3_FIND_COMPONENTS ${KIND}_${PARALLEL}) 43 | if(NOT (${PARALLEL} STREQUAL "SERIAL") AND NOT ${PARALLEL}_FOUND) 44 | message(FATAL_ERROR "Please, find ${PARALLEL} libraries before FFTW") 45 | endif() 46 | 47 | find_library(FFTW3_${KIND}_${PARALLEL}_LIBRARY NAMES 48 | fftw3${SUFFIX_${KIND}}${SUFFIX_${PARALLEL}}${SUFFIX_FINAL} HINTS ${HINT_DIRS}) 49 | if(FFTW3_${KIND}_${PARALLEL}_LIBRARY MATCHES fftw3) 50 | list(APPEND FFTW3_LIBRARIES ${FFTW3_${KIND}_${PARALLEL}_LIBRARY}) 51 | set(FFTW3_${KIND}_${PARALLEL}_FOUND TRUE) 52 | 53 | STRING(TOLOWER "${KIND}" kind) 54 | STRING(TOLOWER "${PARALLEL}" parallel) 55 | if(FFTW3_${kind}_${parallel}_LIBRARY MATCHES "\\.a$") 56 | add_library(fftw3::${kind}::${parallel} STATIC IMPORTED GLOBAL) 57 | else() 58 | add_library(fftw3::${kind}::${parallel} SHARED IMPORTED GLOBAL) 59 | endif() 60 | 61 | # MPI Has a different included library than the others 62 | # FFTW3_INCLUDE_DIR_PARALLEL will change depending of which on is used. 63 | set(FFTW3_INCLUDE_DIR_PARALLEL ${FFTW3_INCLUDE_DIR} ) 64 | if(PARALLEL STREQUAL "MPI") 65 | set(FFTW3_INCLUDE_DIR_PARALLEL ${FFTW3_${PARALLEL}_INCLUDE_DIR}) 66 | endif() 67 | 68 | set_target_properties(fftw3::${kind}::${parallel} PROPERTIES 69 | IMPORTED_LOCATION "${FFTW3_${KIND}_${PARALLEL}_LIBRARY}" 70 | INTERFACE_INCLUDE_DIRECTORIES "${FFTW3_INCLUDE_DIR_PARALLEL}") 71 | 72 | # adding target properties to the different cases 73 | ## MPI 74 | if(PARALLEL STREQUAL "MPI") 75 | if(MPI_C_LIBRARIES) 76 | set_target_properties(fftw3::${kind}::mpi PROPERTIES 77 | IMPORTED_LOCATION "${FFTW3_${KIND}_${PARALLEL}_LIBRARY}" 78 | INTERFACE_INCLUDE_DIRECTORIES "${FFTW3_INCLUDE_DIR_PARALLEL}" 79 | IMPORTED_LINK_INTERFACE_LIBRARIES ${MPI_C_LIBRARIES}) 80 | endif() 81 | endif() 82 | ## OpenMP 83 | if(PARALLEL STREQUAL "OPENMP") 84 | if(OPENMP_C_FLAGS) 85 | set_target_properties(fftw3::${kind}::${parallel} PROPERTIES 86 | IMPORTED_LOCATION "${FFTW3_${KIND}_${PARALLEL}_LIBRARY}" 87 | INTERFACE_INCLUDE_DIRECTORIES "${FFTW3_INCLUDE_DIR_PARALLEL}" 88 | INTERFACE_COMPILE_OPTIONS "${OPENMP_C_FLAGS}") 89 | endif() 90 | endif() 91 | ## THREADS 92 | if(PARALLEL STREQUAL "THREADS") 93 | if(CMAKE_THREAD_LIBS_INIT) # TODO: this is not running 94 | set_target_properties(fftw3::${kind}::${parallel} PROPERTIES 95 | IMPORTED_LOCATION "${FFTW3_${KIND}_${PARALLEL}_LIBRARY}" 96 | INTERFACE_INCLUDE_DIRECTORIES "${FFTW3_INCLUDE_DIR_PARALLEL}" 97 | INTERFACE_COMPILE_OPTIONS "${CMAKE_THREAD_LIBS_INIT}") 98 | endif() 99 | endif() 100 | endif() 101 | endmacro() 102 | 103 | 104 | 105 | 106 | if(NOT FFTW3_FIND_COMPONENTS) 107 | set(FFTW3_FIND_COMPONENTS SINGLE DOUBLE LONGDOUBLE SERIAL) 108 | endif() 109 | 110 | string(TOUPPER "${FFTW3_FIND_COMPONENTS}" FFTW3_FIND_COMPONENTS) 111 | 112 | list(FIND FFTW3_FIND_COMPONENTS SINGLE LOOK_FOR_SINGLE) 113 | list(FIND FFTW3_FIND_COMPONENTS DOUBLE LOOK_FOR_DOUBLE) 114 | list(FIND FFTW3_FIND_COMPONENTS LONGDOUBLE LOOK_FOR_LONGDOUBLE) 115 | list(FIND FFTW3_FIND_COMPONENTS THREADS LOOK_FOR_THREADS) 116 | list(FIND FFTW3_FIND_COMPONENTS OPENMP LOOK_FOR_OPENMP) 117 | list(FIND FFTW3_FIND_COMPONENTS MPI LOOK_FOR_MPI) 118 | list(FIND FFTW3_FIND_COMPONENTS SERIAL LOOK_FOR_SERIAL) 119 | 120 | # FIXME - This may fail in computers wihtout serial 121 | # Default serial to obtain version number 122 | set(LOOK_FOR_SERIAL 1) 123 | 124 | # set serial as default if none parallel component has been set 125 | if((LOOK_FOR_THREADS LESS 0) AND (LOOK_FOR_MPI LESS 0) AND 126 | (LOOK_FOR_OPENMP LESS 0)) 127 | set(LOOK_FOR_SERIAL 1) 128 | endif() 129 | 130 | if(MPI_C_FOUND) 131 | set(MPI_FOUND ${MPI_C_FOUND}) 132 | endif() 133 | unset(FFTW3_FIND_COMPONENTS) 134 | 135 | 136 | 137 | 138 | if(WIN32) 139 | set(HINT_DIRS ${FFTW3_DIRECTORY} $ENV{FFTW3_DIRECTORY}) 140 | else() 141 | find_package(PkgConfig) 142 | if(PKG_CONFIG_FOUND) 143 | pkg_check_modules(PC_FFTW QUIET fftw3) 144 | set(FFTW3_DEFINITIONS ${PC_FFTW3_CFLAGS_OTHER}) 145 | endif() 146 | set(HINT_DIRS ${PC_FFTW3_INCLUDEDIR} ${PC_FFTW3_INCLUDE_DIRS} 147 | ${FFTW3_INCLUDE_DIR} $ENV{FFTW3_INCLUDE_DIR} ) 148 | endif() 149 | 150 | find_path(FFTW3_INCLUDE_DIR NAMES fftw3.h HINTS ${HINT_DIRS}) 151 | if (LOOK_FOR_MPI) # Probably is going to be the same as fftw3.h 152 | find_path(FFTW3_MPI_INCLUDE_DIR NAMES fftw3-mpi.h HINTS ${HINT_DIRS}) 153 | endif() 154 | 155 | function(find_version OUTVAR LIBRARY SUFFIX) 156 | file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/fftw${SUFFIX}/main.c 157 | # TODO: do we need to add include for mpi headers? 158 | "#include 159 | #include 160 | int main(int nargs, char const *argv[]) { 161 | printf(\"%s\", fftw${SUFFIX}_version); 162 | return 0; 163 | }" 164 | ) 165 | if(NOT CMAKE_CROSSCOMPILING) 166 | try_run(RUN_RESULT COMPILE_RESULT 167 | "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/fftw${SUFFIX}/" 168 | "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/fftw${SUFFIX}/main.c" 169 | CMAKE_FLAGS 170 | -DLINK_LIBRARIES=${LIBRARY} 171 | -DINCLUDE_DIRECTORIES=${FFTW3_INCLUDE_DIR} 172 | RUN_OUTPUT_VARIABLE OUTPUT 173 | COMPILE_OUTPUT_VARIABLE COUTPUT 174 | ) 175 | endif() 176 | if(RUN_RESULT EQUAL 0) 177 | string(REGEX REPLACE 178 | ".*([0-9]+\\.[0-9]+\\.[0-9]+).*" 179 | "\\1" VERSION_STRING "${OUTPUT}" 180 | ) 181 | set(${OUTVAR} ${VERSION_STRING} PARENT_SCOPE) 182 | endif() 183 | endfunction() 184 | 185 | set(SUFFIX_DOUBLE "") 186 | set(SUFFIX_SINGLE "f") 187 | set(SUFFIX_LONGDOUBLE "l") 188 | set(SUFFIX_SERIAL "") 189 | set(SUFFIX_OPENMP "_omp") 190 | set(SUFFIX_MPI "_mpi") 191 | set(SUFFIX_THREADS "_threads") 192 | set(SUFFIX_FINAL "") 193 | 194 | if(WIN32) 195 | set(SUFFIX_FINAL "-3") 196 | else() 197 | set(HINT_DIRS ${PC_FFTW3_LIBDIR} ${PC_FFTW3_LIBRARY_DIRS} 198 | $ENV{FFTW3_LIBRARY_DIR} ${FFTW3_LIBRARY_DIR} ) 199 | endif(WIN32) 200 | 201 | unset(FFTW3_LIBRARIES) 202 | set(FFTW3_INCLUDE_DIRS ${FFTW3_INCLUDE_DIR} ) # TODO what's for? 203 | set(FFTW3_FLAGS_C "") 204 | foreach(KIND SINGLE DOUBLE LONGDOUBLE) 205 | if(LOOK_FOR_${KIND} LESS 0) 206 | continue() 207 | endif() 208 | foreach(PARALLEL SERIAL MPI OPENMP THREADS) 209 | if(LOOK_FOR_${PARALLEL} LESS 0) 210 | continue() 211 | endif() 212 | find_specific_libraries(${KIND} ${PARALLEL}) 213 | endforeach() 214 | endforeach() 215 | 216 | if(FFTW3_INCLUDE_DIR) 217 | list(GET FFTW3_FIND_COMPONENTS 0 smallerrun) 218 | string(REPLACE "_" ";" RUNLIST ${smallerrun}) 219 | list(GET RUNLIST 0 KIND) 220 | list(GET RUNLIST 1 PARALLEL) 221 | unset(smallerrun) 222 | unset(RUNLIST) 223 | # suffix is quoted so it pass empty in the case of double as it's empty 224 | find_version(FFTW3_VERSION_STRING ${FFTW3_${KIND}_${PARALLEL}_LIBRARY} 225 | "${SUFFIX_${KIND}}") 226 | endif() 227 | 228 | # FIXME: fails if use REQUIRED. 229 | include(FindPackageHandleStandardArgs) 230 | # handle the QUIETLY and REQUIRED arguments and set FFTW3_FOUND to TRUE 231 | # if all listed variables are TRUE 232 | find_package_handle_standard_args(FFTW3 233 | REQUIRED_VARS FFTW3_LIBRARIES FFTW3_INCLUDE_DIR 234 | VERSION_VAR FFTW3_VERSION_STRING 235 | HANDLE_COMPONENTS 236 | ) 237 | -------------------------------------------------------------------------------- /cmake/packaging/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # ############################################################################## 2 | # PACKAGING 3 | # ############################################################################## 4 | 5 | set(CPACK_OUTPUT_FILE_PREFIX "${CMAKE_BINARY_DIR}/packages") 6 | 7 | set(CPACK_PACKAGE_NAME "${PROJECT_NAME}") 8 | set(CPACK_PACKAGE_VENDOR "MIB DAE Stuttgart") 9 | set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "FANS - Fourier Accelerated Nodal Solver" CACHE STRING "Extended summary.") 10 | set(CPACK_PACKAGE_HOMEPAGE_URL "https://github.com/DataAnalyticsEngineering/FANS") 11 | set(CPACK_DEBIAN_PACKAGE_MAINTAINER "MIB DAE Stuttgart") 12 | 13 | set(CPACK_PACKAGE_INSTALL_DIRECTORY ${CPACK_PACKAGE_NAME}) 14 | set(CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR}) 15 | set(CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR}) 16 | set(CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH}) 17 | set(CPACK_VERBATIM_VARIABLES YES) 18 | set(CPACK_DEBIAN_FILE_NAME DEB-DEFAULT) 19 | 20 | # set(CPACK_PACKAGE_DESCRIPTION_FILE ${CMAKE_CURRENT_LIST_DIR}/Description.txt) 21 | # set(CPACK_RESOURCE_FILE_WELCOME ${CMAKE_CURRENT_LIST_DIR}/Welcome.txt) 22 | # set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_CURRENT_LIST_DIR}/License.txt) 23 | # set(CPACK_RESOURCE_FILE_README ${CMAKE_CURRENT_LIST_DIR}/Readme.txt) 24 | 25 | set(CPACK_DEB_COMPONENT_INSTALL ON) 26 | set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON) 27 | 28 | set(CPACK_DEBIAN_FANS_RUNTIME_PACKAGE_NAME "fans") 29 | set(CPACK_DEBIAN_FANS_DEVELOPMENT_PACKAGE_NAME "fans-dev") 30 | 31 | # this option automatically computes the dependencies of shared libraries (by looking at the libs they are themselves 32 | # linked to). Requires 'dpkg-shlibdeps' to be available. 33 | set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON) 34 | 35 | # this package is not autodetected but is required for mpi to function properly 36 | set(CPACK_DEBIAN_FANS_RUNTIME_PACKAGE_DEPENDS "openmpi-bin") 37 | 38 | # add header packages of dependencies as recommended (they are required to build from the FANS headers). 39 | set(CPACK_DEBIAN_FANS_DEVELOPMENT_PACKAGE_DEPENDS "libhdf5-dev, libopenmpi-dev, libeigen3-dev, libfftw3-dev, libfftw3-mpi-dev") 40 | 41 | include(CPack) 42 | 43 | cpack_add_component(FANS_Runtime 44 | DISPLAY_NAME "FANS Runtime" 45 | DESCRIPTION "FANS shared library and executable" 46 | REQUIRED 47 | INSTALL_TYPES Full Developer Minimal 48 | ) 49 | cpack_add_component(FANS_Development 50 | DISPLAY_NAME "FANS Development" 51 | DESCRIPTION "FANS headers and CMake files" 52 | DEPENDS FANS_Runtime 53 | INSTALL_TYPES Full Developer 54 | ) 55 | cpack_add_install_type(Full) 56 | cpack_add_install_type(Minimal) 57 | cpack_add_install_type(Developer) 58 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # During build time, don't ask for user input (has to be included in every stage 2 | # to take effect) 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | ARG UBUNTU_VERSION=noble 5 | ARG USER=fans 6 | 7 | ################################################################################ 8 | 9 | FROM ubuntu:${UBUNTU_VERSION} AS fans_base 10 | ARG DEBIAN_FRONTEND 11 | ARG USER 12 | 13 | # Context: https://askubuntu.com/questions/1513927/ubuntu-24-04-docker-images-now-includes-user-ubuntu-with-uid-gid-1000 14 | RUN bash -c 'if id "ubuntu" &>/dev/null; then \ 15 | touch /var/mail/ubuntu && \ 16 | chown ubuntu /var/mail/ubuntu && \ 17 | userdel -r ubuntu && \ 18 | echo "Deleted user ubuntu."; \ 19 | fi' 20 | 21 | # Create a non-root user 22 | RUN useradd -m -s /bin/bash ${USER} 23 | 24 | ################################################################################ 25 | 26 | FROM fans_base AS fans_ci 27 | ARG DEBIAN_FRONTEND 28 | 29 | RUN apt-get update -qq && apt-get install -y --no-install-recommends \ 30 | # Build basics 31 | software-properties-common \ 32 | build-essential \ 33 | # CMake + git for FetchContent + file for CPack 34 | cmake \ 35 | git \ 36 | file \ 37 | # FANS dependencies \ 38 | libhdf5-dev \ 39 | libopenmpi-dev \ 40 | libeigen3-dev \ 41 | libfftw3-dev \ 42 | libfftw3-mpi-dev \ 43 | # Required for preCICE Micro Manager Python bindings 44 | python3-dev \ 45 | # Clean up 46 | && apt-get clean \ 47 | && apt-get autoremove --purge -y \ 48 | && rm -rf /var/lib/apt/lists/* 49 | 50 | ################################################################################ 51 | 52 | FROM fans_ci AS fans_dev 53 | ARG DEBIAN_FRONTEND 54 | ARG USER 55 | ARG FANS_venv=FANS_venv 56 | 57 | RUN apt-get update -qq && apt-get install -y --no-install-recommends \ 58 | # Packages required for setting up the non-root user 59 | sudo \ 60 | gosu \ 61 | # Some additional packages for convenience 62 | time \ 63 | htop \ 64 | vim \ 65 | python3-pip \ 66 | python3-venv \ 67 | python-is-python3 \ 68 | # Clean up 69 | && apt-get clean \ 70 | && apt-get autoremove --purge -y \ 71 | && rm -rf /var/lib/apt/lists/* 72 | 73 | # Create a python venv for test/h52xdmf.py script 74 | USER ${USER} 75 | 76 | RUN python -m venv /home/${USER}/venvs/${FANS_venv} && \ 77 | echo "\nsource /home/${USER}/venvs/${FANS_venv}/bin/activate\n" >> /home/${USER}/.bashrc && \ 78 | . /home/${USER}/venvs/${FANS_venv}/bin/activate && \ 79 | python -m pip install --no-cache-dir h5py lxml 80 | 81 | USER root 82 | 83 | # Add fans user to sudoers 84 | RUN echo ${USER} ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/${USER} \ 85 | && chmod 440 /etc/sudoers.d/${USER} 86 | 87 | # Entrypoint script changes UID and GID to match given host UID and GID 88 | COPY --chmod=755 docker/Dockerfile_user_env_entrypoint.sh /entrypoint.sh 89 | ENTRYPOINT ["/entrypoint.sh"] 90 | 91 | CMD ["bash"] 92 | -------------------------------------------------------------------------------- /docker/Dockerfile_user_env_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash --login 2 | 3 | # Abort script at first error, when a command exits with non-zero status (except in until or while loops, if-tests, list constructs) 4 | set -e 5 | 6 | ### workaround to fix permissions in mounted volumes ### 7 | # This is necessary because the user in the container has a different UID and GID than the user on the host. 8 | # USAGE: docker run -e HOST_UID=$(id -u) -e HOST_GID=$(id -g) ... 9 | # open issue on this topic: https://github.com/docker/roadmap/issues/398 10 | hostgroup="hostgroup" 11 | container_user="fans" 12 | 13 | if [ "$(id -u -n)" = "root" ]; then 14 | if [ -n "$HOST_UID" ] && [ -n "$HOST_GID" ]; then 15 | echo "Setting UID and GID to match provided host UID and GID..." 16 | # echo "'id' before changes: $(id $container_user)" 17 | 18 | if ! getent group $hostgroup >/dev/null; then 19 | groupadd -o -g $HOST_GID $hostgroup 20 | fi 21 | 22 | old_group=$(id -g -n $container_user) 23 | 24 | if ! id -nG $container_user | grep -qw $hostgroup; then 25 | usermod -g $hostgroup $container_user 26 | fi 27 | 28 | if ! id -nG $container_user | grep -qw $old_group; then 29 | usermod -a -G $old_group $container_user 30 | fi 31 | 32 | if [ "$(id -u $container_user)" != "$HOST_UID" ]; then 33 | usermod -u $HOST_UID $container_user 34 | fi 35 | 36 | # echo "'id' after changes: $(id $container_user)" 37 | else 38 | echo "WARNING: Please provide HOST_UID and HOST_GID as environment variables (docker run -e)! UID and GID will not be changed. This will probably lead to permission issues with mounted volumes." 39 | fi 40 | else 41 | echo "WARNING: Can't change UID and GID to given host UID and GID. entrypoint.sh must run as root! UID and GID will not be changed. This will probably lead to permission issues with mounted volumes." 42 | fi 43 | 44 | # drop privileges and execute given commands as the user $container_user 45 | exec gosu $container_user "$@" 46 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | # Docker 2 | 3 | We provide a set of docker images for different use cases on our [Dockerhub profile](https://hub.docker.com/u/unistuttgartdae): 4 | 5 | - **fans-ci**: Contains the minimum tools to build FANS (including dev packages of dependencies with the required headers), but does not include FANS itself. Meant for a CI workflow. 6 | - **fans-dev**: Based upon fans-ci, but offers a non-root user (`fans`) and handling of UID and GID to not mess up permissions when volume mounting into the container. Meant as an quick to setup build environment for FANS. 7 | 8 | Both images are built for linux/amd64 and linux/arm64 as well as for the two most recent Ubuntu LTS versions (jammy and noble). The Ubuntu version can be selected through tags, e.g. `fans-dev:jammy`; `noble` is equivalent to the `latest` tag. The architecture is selected automatically depending on your host platform. 9 | 10 | ## Set up a Container 11 | 12 | Set up a development container with your current working directory (in there, use `git clone` to obtain the latest FANS version) mounted into it. You need to have [Docker Desktop](https://www.docker.com/products/docker-desktop/) installed on your machine. 13 | 14 | First, clone FANS: 15 | 16 | ```bash 17 | git clone https://github.com/DataAnalyticsEngineering/FANS.git 18 | cd FANS 19 | ``` 20 | 21 | Then we create the container using our `fans-dev` image. 22 | 23 | ### In a Linux, MacOS or Windows Subsystem for Linux (WSL) Shell 24 | 25 | ```bash 26 | docker create --name fans-dev -it \ 27 | -e HOST_UID=$(id -u) \ 28 | -e HOST_GID=$(id -g) \ 29 | -v /etc/localtime:/etc/localtime:ro \ 30 | -v /etc/timezone:/etc/timezone:ro \ 31 | -v $PWD/:/FANS/ \ 32 | unistuttgartdae/fans-dev:latest 33 | ``` 34 | 35 | The `-e` options provide the entrypoint script of the container with your host user ID and GID, such that the user ID and GID inside the container can be adapted to match yours. This is done to not mess up file permissions in the mounted volumes. The two volume mounts of `/etc/localtime` and `/etc/timezone` are required to have the host date and time inside the container. 36 | 37 | ### In Windows PowerShell 38 | 39 | Using PowerShell is not recommended since it only has limited support of file permissions and completely ignores file ownership in the WSL->Container direction. 40 | 41 | ```bash 42 | docker create --name fans-dev -it ` 43 | --env HOST_UID=1000 ` 44 | --env HOST_GID=1000 ` 45 | --env TZ=Europe/Berlin ` 46 | --volume ${PWD}:/FANS/ ` 47 | unistuttgartdae/fans-dev 48 | ``` 49 | 50 | ## Working with the container 51 | 52 | The following workflow is suggested: You would work on the code as usual on your host; and only to build and run FANS you would attach to the container: 53 | 54 | ```bash 55 | docker start fans-dev 56 | docker attach fans-dev 57 | 58 | cd /FANS 59 | mkdir build 60 | cd build 61 | cmake .. 62 | cmake --build . -j 63 | 64 | cd ../test 65 | ./FANS 66 | ./run_tests.sh 67 | cat nohup_test_*.log 68 | ``` 69 | 70 | For convenience we added some basic utilities to our `fans-dev` image including `htop`, `vim` and `python`. 71 | 72 | ### Attaching Visual Studio Code 73 | 74 | You can attach VS Code to the newly created container in order to actually work inside the container. This has the benefit that IntelliSense and other static analysis tools have access to all the headers of FANS' dependencies which would not be possible when developing on the host and only using the container for building FANS. 75 | 76 | To attach VS Code you need to install the `Remote Development Extension Pack` and the `Docker` Extension. Then open the Docker menu, right click our newly created `fans-dev` container and select "Start" (if not running already) and then "Attach Visual Studio Code". 77 | 78 | After attaching VS Code you unfortunately are user `root` in VS Code due to the way the UID and GID mapping is implemented: The container starts as root, executes the entrypoint script which changes UID and GID and only then drops privileges using `gosu`. VS Code though skips the entrypoint script and thus doesn't switch to the non-root user `fans`. You however can do so manually by typing `gosu fans bash` in your terminal sessions inside VS Code. 79 | 80 | For further reading and alternative approaches like a full DevContainer setup have a look at 81 | 82 | - [Developing inside a Container](https://code.visualstudio.com/docs/devcontainers/containers) 83 | - [Attach to a running Container](https://code.visualstudio.com/docs/devcontainers/attach-container) 84 | - [Specifying the default container user](https://code.visualstudio.com/remote/advancedcontainers/add-nonroot-user#_specifying-the-default-container-user) 85 | 86 | ### Calling Containerized FANS from the Host 87 | 88 | By building inside the container, FANS is linked against the container's libs and therefore must run inside the container. After attaching to the container you can then continue to use FANS as described in the main [README](../README.md#usage). Just remember that any input and output files need to visible to the container and thus must lie somewhere inside the mounted volumes. 89 | 90 | Special care has to be taken if you need to use FANS within scripts on the host, as Docker's interactive mode (`-i`) is not suitable in this case. Instead you need to use `docker exec`. One basically replaces the original `FANS` call by `docker exec -u fans -w /FANS/test fans-dev [original call]`. For example in conjunction with nohup: 91 | 92 | ```bash 93 | docker start fans-dev 94 | nohup /usr/bin/time -v docker exec -u fans -w /FANS/test fans-dev [original call] & 95 | docker stop fans-dev 96 | ``` 97 | -------------------------------------------------------------------------------- /docs/ReleaseGuide.md: -------------------------------------------------------------------------------- 1 | # Guide to release new version of FANS 2 | 3 | The developer who is releasing a new version of FANS is expected to follow this work flow: 4 | 5 | The release of the `FANS` repository is made directly from a release branch called `FANS-v1.2.3`. This branch is mainly needed to help other developers with testing. 6 | 7 | 1. Create a branch called `FANS-v1.2.3` from the latest commit of the `develop` branch. 8 | 9 | 2. Bump the version in the `CHANGELOG.md`, the base `CMakeLists.txt`, and in the file `FANS_Dashboard/pyproject.toml` on the branch `FANS-v1.2.3`. 10 | 11 | 3. Assuming you have pixi installed, run the command `pixi lock` in the base directory file to update the version of the FANS dashboard in the pixi lock file. 12 | 13 | 4. If it is a real release, [open a Pull Request `main` <-- `FANS-v1.2.3`](https://github.com/DataAnalyticsEngineering/FANS/compare/main...main) named after the version (i.e. `Release v1.2.3`) and briefly describe the new features of the release in the PR description. 14 | 15 | 5. [Draft a new release](https://github.com/DataAnalyticsEngineering/FANS/releases/new) in the `Releases` section of the repository page in a web browser. The release tag needs to be the exact version number (i.e.`v1.2.3` or `v1.2.3rc1`, compare to [existing tags](https://github.com/DataAnalyticsEngineering/FANS/tags)). Use `@target:main`. Release title is also the version number (i.e. `v1.2.3` or `v1.2.3rc1`, compare to [existing releases](https://github.com/DataAnalyticsEngineering/FANS/tags)). 16 | 17 | * *Note:* If it is a pre-release then the option *This is a pre-release* needs to be selected at the bottom of the page. Use `@target:FANS-v1.2.3` for a pre-release, since we will never merge a pre-release into `main`. 18 | * Use the `Auto-generate release notes` feature. 19 | 20 | a) If a pre-release is made: Directly hit the "Publish release" button in your Release Draft. 21 | 22 | b) If this is a "real" release: As soon as one approving review is made, merge the release PR (from `FANS-v1.2.3`) into `main`. 23 | 24 | 6. Merge `main` into `develop` for synchronization of `develop`. 25 | 26 | 7. If everything is in order up to this point then the new version can be released by hitting the "Publish release" button in your Release Draft. This will create the corresponding tag. 27 | -------------------------------------------------------------------------------- /docs/images/FANS_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataAnalyticsEngineering/FANS/3c681507289fe30d459fe6b378b2ccb6687414ab/docs/images/FANS_example.png -------------------------------------------------------------------------------- /include/general.h: -------------------------------------------------------------------------------- 1 | 2 | 3 | #ifndef GENERAL_H_ 4 | #define GENERAL_H_ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | using namespace std; 18 | 19 | // JSON 20 | #include 21 | using nlohmann::json; 22 | using namespace nlohmann; 23 | 24 | // Packages 25 | #include "fftw3-mpi.h" 26 | #include "fftw3.h" // this includes the serial fftw as well as mpi header files! See https://fftw.org/doc/MPI-Files-and-Data-Types.html 27 | 28 | #include "H5Cpp.h" 29 | 30 | #include "reader.h" 31 | 32 | #include 33 | using namespace Eigen; 34 | 35 | #include "mpi.h" 36 | #include "sys/stat.h" 37 | 38 | #endif 39 | 40 | #ifndef FANS_MALLOC_H 41 | #define FANS_MALLOC_H 42 | 43 | /* Usage: V *data = FANS_malloc(n); */ 44 | template 45 | inline V *FANS_malloc(std::size_t n) 46 | { 47 | if (n == 0) 48 | throw std::invalid_argument("FANS_malloc: zero-byte request"); 49 | void *p = fftw_malloc(n * sizeof(V)); // SIMD-friendly alignment 50 | if (!p) 51 | throw std::bad_alloc(); 52 | return static_cast(p); 53 | } 54 | template 55 | inline void FANS_free(V *p) 56 | { 57 | fftw_free(p); 58 | } 59 | #endif // FANS_MALLOC_H 60 | 61 | #define VERBOSITY 0 62 | 63 | // #define EIGEN_RUNTIME_NO_MALLOC 64 | -------------------------------------------------------------------------------- /include/material_models/GBDiffusion.h: -------------------------------------------------------------------------------- 1 | #ifndef GBDIFFUSION_H 2 | #define GBDIFFUSION_H 3 | 4 | #include "matmodel.h" 5 | #include // For Eigen's aligned_allocator 6 | 7 | /** 8 | * @class GBDiffusion 9 | * @brief Material model for grain boundary diffusion in polycrystals 10 | * 11 | * This model implements diffusion in a polycrystalline material, differentiating between 12 | * bulk crystal diffusion (isotropic) and grain boundary diffusion (transversely isotropic). 13 | * The grain boundaries are characterized by their normal vectors and can have different 14 | * diffusion properties parallel and perpendicular to the boundary plane. 15 | * 16 | * The model extends both ThermalModel and LinearModel<1> to provide a linear diffusion 17 | * formulation that can be used in a thermal-like solver in FANS. 18 | * 19 | * @details The model: 20 | * - Reads microstructure data containing grain boundaries from HDF5 files 21 | * - Supports uniform or material-specific diffusivity values 22 | * - Handles bulk regions with isotropic diffusion (D_bulk) 23 | * - Handles grain boundaries with transversely isotropic diffusion (D_par, D_perp) 24 | * - Provides visualization of grain boundary normals in post-processing 25 | * 26 | * Required material parameters in JSON format: 27 | * - GB_unformity: Boolean flag for uniform GB properties 28 | * 29 | * When GB_unformity is true (uniform properties): 30 | * { 31 | * "GB_unformity": true, 32 | * "D_bulk": 1.0, // Isotropic diffusion coefficient for all crystals 33 | * "D_par": 2.0, // Diffusion coefficient parallel to the grain boundary for all GBs 34 | * "D_perp": 0.5 // Diffusion coefficient perpendicular to the grain boundary for all GBs 35 | * } 36 | * 37 | * When GB_unformity is false (tag-specific properties): 38 | * { 39 | * "GB_unformity": false, 40 | * "D_bulk": [...], // Array of length (num_crystals + num_GB elements), but D_bulk is only used for crystals (0 to num_crystals) 41 | * "D_par": [...], // Array of length (num_crystals + num_GB elements), but D_par is only used for GBs (num_crystals to num_crystals + num_GB) 42 | * "D_perp": [...] // Array of length (num_crystals + num_GB elements), but D_perp is only used for GBs (num_crystals to num_crystals + num_GB) 43 | * } 44 | */ 45 | class GBDiffusion : public ThermalModel, public LinearModel<1> { 46 | public: 47 | GBDiffusion(Reader &reader) 48 | : ThermalModel(reader.l_e) 49 | { 50 | try { 51 | // Read num_crystals, num_GB and GBVoxelInfo from the microstructure dataset attributes 52 | H5::H5File file(reader.ms_filename, H5F_ACC_RDONLY); 53 | H5::DataSet ds = file.openDataSet(reader.ms_datasetname); 54 | ds.openAttribute("num_crystals").read(H5::PredType::NATIVE_INT64, &num_crystals); 55 | ds.openAttribute("num_GB").read(H5::PredType::NATIVE_INT64, &num_GB); 56 | std::string json_text; 57 | H5::Attribute attr = ds.openAttribute("GBVoxelInfo"); 58 | H5::StrType strType = attr.getStrType(); 59 | attr.read(strType, json_text); 60 | 61 | n_mat = num_crystals + num_GB; 62 | GBnormals = FANS_malloc(n_mat * 3); 63 | auto gbInfo = json::parse(json_text); 64 | for (auto &kv : gbInfo.items()) { 65 | int tag = kv.value().at("GB_tag").get(); 66 | auto &normal = kv.value()["GB_normal"]; 67 | 68 | GBnormals[(tag) * 3] = normal[0].get(); 69 | GBnormals[(tag) * 3 + 1] = normal[1].get(); 70 | GBnormals[(tag) * 3 + 2] = normal[2].get(); 71 | } 72 | GB_unformity = reader.materialProperties["GB_unformity"].get(); 73 | 74 | D_bulk.resize(n_mat, 0.0); 75 | D_par.resize(n_mat, 0.0); 76 | D_perp.resize(n_mat, 0.0); 77 | 78 | if (GB_unformity) { 79 | double bulk_val = reader.materialProperties["D_bulk"].get(); 80 | double par_val = reader.materialProperties["D_par"].get(); 81 | double perp_val = reader.materialProperties["D_perp"].get(); 82 | 83 | fill_n(D_bulk.begin(), num_crystals, bulk_val); 84 | fill_n(D_par.begin() + num_crystals, num_GB, par_val); 85 | fill_n(D_perp.begin() + num_crystals, num_GB, perp_val); 86 | } else { 87 | for (int i = 0; i < n_mat; ++i) { 88 | D_bulk[i] = reader.materialProperties["D_bulk"][i].get(); 89 | D_par[i] = reader.materialProperties["D_par"][i].get(); 90 | D_perp[i] = reader.materialProperties["D_perp"][i].get(); 91 | } 92 | } 93 | 94 | } catch (const std::exception &e) { 95 | throw std::runtime_error("Error in GBDiffusion initialization: " + std::string(e.what())); 96 | } 97 | 98 | kapparef_mat = Matrix3d::Zero(3, 3); 99 | Matrix3d phase_kappa; 100 | phase_stiffness = new Matrix[n_mat]; 101 | 102 | for (size_t i = 0; i < n_mat; ++i) { 103 | phase_stiffness[i] = Matrix::Zero(); 104 | if (i < num_crystals) { 105 | // Bulk is isotropic 106 | phase_kappa = D_bulk[i] * Matrix3d::Identity(); 107 | } else if (i < n_mat) { 108 | // Grain boundary is transversely isotropic 109 | N = Vector3d(GBnormals[3 * i + 0], GBnormals[3 * i + 1], GBnormals[3 * i + 2]); 110 | N = N.normalized(); 111 | phase_kappa = D_par[i] * (Matrix3d::Identity() - N * N.transpose()) + D_perp[i] * N * N.transpose(); 112 | } else { 113 | throw std::runtime_error("GBDiffusion: Unknown material index"); 114 | } 115 | kapparef_mat += phase_kappa; 116 | for (int p = 0; p < 8; ++p) { 117 | phase_stiffness[i] += B_int[p].transpose() * phase_kappa * B_int[p] * v_e * 0.1250; 118 | } 119 | } 120 | kapparef_mat /= n_mat; 121 | } 122 | ~GBDiffusion() 123 | { 124 | FANS_free(GBnormals); 125 | delete[] phase_stiffness; 126 | phase_stiffness = nullptr; 127 | } 128 | 129 | void get_sigma(int i, int mat_index, ptrdiff_t element_idx) override 130 | { 131 | if (mat_index < num_crystals) { 132 | sigma.block<3, 1>(i, 0) = D_bulk[mat_index] * eps.block<3, 1>(i, 0); 133 | } else if (mat_index < n_mat) { 134 | const ptrdiff_t base_idx = 3 * mat_index; 135 | double nx = GBnormals[base_idx]; 136 | double ny = GBnormals[base_idx + 1]; 137 | double nz = GBnormals[base_idx + 2]; 138 | 139 | // Pre-compute products for the projector matrix (N⊗N) 140 | double nxnx = nx * nx; 141 | double nxny = nx * ny; 142 | double nxnz = nx * nz; 143 | double nyny = ny * ny; 144 | double nynz = ny * nz; 145 | double nznz = nz * nz; 146 | 147 | // Pre-compute coefficients 148 | double d_diff = D_par[mat_index] - D_perp[mat_index]; 149 | 150 | // Cache epsilon values to avoid repeated memory access 151 | double ex = eps(i, 0); 152 | double ey = eps(i + 1, 0); 153 | double ez = eps(i + 2, 0); 154 | 155 | // Calculate directly without constructing full matrices 156 | sigma(i, 0) = D_par[mat_index] * ex - d_diff * (nxnx * ex + nxny * ey + nxnz * ez); 157 | sigma(i + 1, 0) = D_par[mat_index] * ey - d_diff * (nxny * ex + nyny * ey + nynz * ez); 158 | sigma(i + 2, 0) = D_par[mat_index] * ez - d_diff * (nxnz * ex + nynz * ey + nznz * ez); 159 | } else { 160 | throw std::runtime_error("GBDiffusion: Unknown material index"); 161 | } 162 | } 163 | 164 | void postprocess(Solver<1> &solver, Reader &reader, const char *resultsFileName, int load_idx, int time_idx) override 165 | { 166 | // Write GBnormals to HDF5 file if requested 167 | if (find(reader.resultsToWrite.begin(), reader.resultsToWrite.end(), "GBnormals") != reader.resultsToWrite.end()) { 168 | double *GBnormals_field = FANS_malloc(solver.local_n0 * solver.n_y * solver.n_z * 3); 169 | for (ptrdiff_t element_idx = 0; element_idx < solver.local_n0 * solver.n_y * solver.n_z; ++element_idx) { 170 | int mat_index = solver.ms[element_idx]; 171 | if (mat_index >= num_crystals) { 172 | GBnormals_field[element_idx * 3] = GBnormals[3 * mat_index]; 173 | GBnormals_field[element_idx * 3 + 1] = GBnormals[3 * mat_index + 1]; 174 | GBnormals_field[element_idx * 3 + 2] = GBnormals[3 * mat_index + 2]; 175 | } 176 | } 177 | for (int i = 0; i < solver.world_size; ++i) { 178 | if (i == solver.world_rank) { 179 | char name[5096]; 180 | sprintf(name, "%s/load%i/time_step%i/GBnormals", reader.ms_datasetname, load_idx, time_idx); 181 | reader.WriteSlab(GBnormals_field, 3, resultsFileName, name); 182 | } 183 | MPI_Barrier(MPI_COMM_WORLD); 184 | } 185 | FANS_free(GBnormals_field); 186 | } 187 | } 188 | 189 | private: 190 | int num_crystals = 0; 191 | int num_GB = 0; 192 | bool GB_unformity; 193 | 194 | vector D_bulk; 195 | vector D_par; 196 | vector D_perp; 197 | 198 | double *GBnormals = nullptr; 199 | Vector3d N; 200 | }; 201 | 202 | #endif // GBDIFFUSION_H 203 | -------------------------------------------------------------------------------- /include/material_models/LinearElastic.h: -------------------------------------------------------------------------------- 1 | #ifndef LINEARELASTIC_H 2 | #define LINEARELASTIC_H 3 | 4 | #include "matmodel.h" 5 | #include // For Eigen's aligned_allocator 6 | 7 | class LinearElasticIsotropic : public MechModel, public LinearModel<3> { 8 | public: 9 | LinearElasticIsotropic(vector l_e, json materialProperties) 10 | : MechModel(l_e) 11 | { 12 | try { 13 | bulk_modulus = materialProperties["bulk_modulus"].get>(); 14 | mu = materialProperties["shear_modulus"].get>(); 15 | } catch (const std::exception &e) { 16 | throw std::runtime_error("Missing material properties for the requested material model."); 17 | } 18 | 19 | n_mat = bulk_modulus.size(); 20 | lambda.resize(n_mat); 21 | mu.resize(n_mat); 22 | 23 | for (int i = 0; i < n_mat; ++i) { 24 | lambda[i] = bulk_modulus[i] - (2.0 / 3.0) * mu[i]; 25 | } 26 | 27 | double lambda_ref = (*max_element(lambda.begin(), lambda.end()) + 28 | *min_element(lambda.begin(), lambda.end())) / 29 | 2; 30 | double mu_ref = (*max_element(mu.begin(), mu.end()) + 31 | *min_element(mu.begin(), mu.end())) / 32 | 2; 33 | 34 | kapparef_mat = Matrix::Zero(); 35 | kapparef_mat.topLeftCorner(3, 3).setConstant(lambda_ref); 36 | kapparef_mat += 2 * mu_ref * Matrix::Identity(); 37 | 38 | phase_stiffness = new Matrix[n_mat]; 39 | Matrix phase_kappa; 40 | 41 | for (int i = 0; i < n_mat; i++) { 42 | phase_kappa.setZero(); 43 | phase_stiffness[i] = Matrix::Zero(); 44 | 45 | phase_kappa.topLeftCorner(3, 3).setConstant(lambda[i]); 46 | phase_kappa += 2 * mu[i] * Matrix::Identity(); 47 | 48 | for (int p = 0; p < 8; ++p) { 49 | phase_stiffness[i] += B_int[p].transpose() * phase_kappa * B_int[p] * v_e * 0.1250; 50 | } 51 | } 52 | } 53 | 54 | void get_sigma(int i, int mat_index, ptrdiff_t element_idx) override 55 | { 56 | double buf1 = lambda[mat_index] * (eps(i, 0) + eps(i + 1, 0) + eps(i + 2, 0)); 57 | double buf2 = 2 * mu[mat_index]; 58 | sigma(i + 0, 0) = buf1 + buf2 * eps(i + 0, 0); 59 | sigma(i + 1, 0) = buf1 + buf2 * eps(i + 1, 0); 60 | sigma(i + 2, 0) = buf1 + buf2 * eps(i + 2, 0); 61 | sigma(i + 3, 0) = buf2 * eps(i + 3, 0); 62 | sigma(i + 4, 0) = buf2 * eps(i + 4, 0); 63 | sigma(i + 5, 0) = buf2 * eps(i + 5, 0); 64 | } 65 | 66 | private: 67 | vector bulk_modulus; 68 | vector lambda; 69 | vector mu; 70 | }; 71 | 72 | class LinearElasticTriclinic : public MechModel, public LinearModel<3> { 73 | public: 74 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW // Ensure proper alignment for Eigen structures 75 | 76 | LinearElasticTriclinic(vector l_e, json materialProperties) 77 | : MechModel(l_e) 78 | { 79 | vector C_keys = { 80 | "C_11", "C_12", "C_13", "C_14", "C_15", "C_16", 81 | "C_22", "C_23", "C_24", "C_25", "C_26", 82 | "C_33", "C_34", "C_35", "C_36", 83 | "C_44", "C_45", "C_46", 84 | "C_55", "C_56", 85 | "C_66"}; 86 | 87 | try { 88 | n_mat = materialProperties.at("C_11").get>().size(); 89 | size_t num_constants = C_keys.size(); 90 | 91 | // Initialize matrix to hold all constants 92 | C_constants.resize(num_constants, n_mat); 93 | 94 | // Load material constants into matrix 95 | for (size_t k = 0; k < num_constants; ++k) { 96 | const auto &values = materialProperties.at(C_keys[k]).get>(); 97 | if (values.size() != n_mat) { 98 | throw std::runtime_error("Inconsistent size for material property: " + C_keys[k]); 99 | } 100 | C_constants.row(k) = Eigen::Map(values.data(), values.size()); 101 | } 102 | } catch (const std::exception &e) { 103 | throw std::runtime_error("Missing or inconsistent material properties for the requested material model."); 104 | } 105 | 106 | // Assemble stiffness matrices for each material 107 | C_mats.resize(n_mat); 108 | kapparef_mat = Matrix::Zero(); 109 | 110 | for (size_t i = 0; i < n_mat; ++i) { 111 | Matrix C_i = Matrix::Zero(); 112 | int k = 0; // Index for C_constants 113 | 114 | // Assign constants to the upper triangular part 115 | for (int row = 0; row < 6; ++row) { 116 | for (int col = row; col < 6; ++col) { 117 | C_i(row, col) = C_constants(k++, i); 118 | } 119 | } 120 | 121 | // Symmetrize the matrix 122 | C_i = C_i.selfadjointView(); 123 | 124 | C_mats[i] = C_i; 125 | kapparef_mat += C_i; 126 | } 127 | 128 | kapparef_mat /= n_mat; 129 | 130 | // Compute phase stiffness matrices 131 | phase_stiffness = new Matrix[n_mat]; 132 | for (size_t i = 0; i < n_mat; ++i) { 133 | phase_stiffness[i] = Matrix::Zero(); 134 | for (int p = 0; p < 8; ++p) { 135 | phase_stiffness[i] += B_int[p].transpose() * C_mats[i] * B_int[p] * v_e * 0.1250; 136 | } 137 | } 138 | } 139 | 140 | void get_sigma(int i, int mat_index, ptrdiff_t element_idx) override 141 | { 142 | sigma.segment<6>(i) = C_mats[mat_index] * eps.segment<6>(i); 143 | } 144 | 145 | private: 146 | std::vector, Eigen::aligned_allocator>> C_mats; 147 | MatrixXd C_constants; 148 | }; 149 | 150 | #endif // LINEARELASTIC_H 151 | -------------------------------------------------------------------------------- /include/material_models/LinearThermal.h: -------------------------------------------------------------------------------- 1 | #ifndef LINEARTHERMAL_H 2 | #define LINEARTHERMAL_H 3 | 4 | #include "matmodel.h" 5 | #include // For Eigen's aligned_allocator 6 | 7 | class LinearThermalIsotropic : public ThermalModel, public LinearModel<1> { 8 | public: 9 | LinearThermalIsotropic(vector l_e, json materialProperties) 10 | : ThermalModel(l_e) 11 | { 12 | try { 13 | conductivity = materialProperties["conductivity"].get>(); 14 | } catch (const std::exception &e) { 15 | throw std::runtime_error("Missing material properties for the requested material model."); 16 | } 17 | n_mat = conductivity.size(); 18 | 19 | double kappa_ref = (*max_element(conductivity.begin(), conductivity.end()) + 20 | *min_element(conductivity.begin(), conductivity.end())) / 21 | 2; 22 | kapparef_mat = kappa_ref * Matrix3d::Identity(); 23 | 24 | Matrix3d phase_kappa; 25 | phase_stiffness = new Matrix[n_mat]; 26 | 27 | for (size_t i = 0; i < n_mat; ++i) { 28 | phase_stiffness[i] = Matrix::Zero(); 29 | phase_kappa = conductivity[i] * Matrix3d::Identity(); 30 | 31 | for (int p = 0; p < 8; ++p) { 32 | phase_stiffness[i] += B_int[p].transpose() * phase_kappa * B_int[p] * v_e * 0.1250; 33 | } 34 | } 35 | } 36 | 37 | void get_sigma(int i, int mat_index, ptrdiff_t element_idx) override 38 | { 39 | sigma(i + 0, 0) = conductivity[mat_index] * eps(i + 0, 0); 40 | sigma(i + 1, 0) = conductivity[mat_index] * eps(i + 1, 0); 41 | sigma(i + 2, 0) = conductivity[mat_index] * eps(i + 2, 0); 42 | } 43 | 44 | private: 45 | vector conductivity; 46 | }; 47 | 48 | class LinearThermalTriclinic : public ThermalModel, public LinearModel<1> { 49 | public: 50 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW // Ensure proper alignment for Eigen structures 51 | 52 | LinearThermalTriclinic(vector l_e, json materialProperties) 53 | : ThermalModel(l_e) 54 | { 55 | vector K_keys = { 56 | "K_11", "K_12", "K_13", 57 | "K_22", "K_23", 58 | "K_33"}; 59 | 60 | try { 61 | n_mat = materialProperties.at("K_11").get>().size(); 62 | size_t num_constants = K_keys.size(); 63 | 64 | // Initialize matrix to hold all constants 65 | K_constants.resize(num_constants, n_mat); 66 | 67 | // Load material constants into matrix 68 | for (size_t k = 0; k < num_constants; ++k) { 69 | const auto &values = materialProperties.at(K_keys[k]).get>(); 70 | if (values.size() != n_mat) { 71 | throw std::runtime_error("Inconsistent size for material property: " + K_keys[k]); 72 | } 73 | K_constants.row(k) = Eigen::Map(values.data(), values.size()); 74 | } 75 | } catch (const std::exception &e) { 76 | throw std::runtime_error("Missing or inconsistent material properties for the requested material model."); 77 | } 78 | 79 | // Assemble conductivity matrices for each material 80 | K_mats.resize(n_mat); 81 | kapparef_mat = Matrix3d::Zero(); 82 | 83 | for (size_t i = 0; i < n_mat; ++i) { 84 | Matrix3d K_i; 85 | K_i << K_constants(0, i), K_constants(1, i), K_constants(2, i), 86 | K_constants(1, i), K_constants(3, i), K_constants(4, i), 87 | K_constants(2, i), K_constants(4, i), K_constants(5, i); 88 | 89 | K_mats[i] = K_i; 90 | kapparef_mat += K_i; 91 | } 92 | 93 | kapparef_mat /= n_mat; 94 | 95 | // Compute phase stiffness matrices 96 | phase_stiffness = new Matrix[n_mat]; 97 | for (size_t i = 0; i < n_mat; ++i) { 98 | phase_stiffness[i] = Matrix::Zero(); 99 | for (int p = 0; p < 8; ++p) { 100 | phase_stiffness[i] += B_int[p].transpose() * K_mats[i] * B_int[p] * v_e * 0.1250; 101 | } 102 | } 103 | } 104 | 105 | void get_sigma(int i, int mat_index, ptrdiff_t element_idx) override 106 | { 107 | sigma.segment<3>(i) = K_mats[mat_index] * eps.segment<3>(i); 108 | } 109 | 110 | private: 111 | std::vector> K_mats; 112 | MatrixXd K_constants; 113 | }; 114 | 115 | #endif // LINEARTHERMAL_H 116 | -------------------------------------------------------------------------------- /include/material_models/PseudoPlastic.h: -------------------------------------------------------------------------------- 1 | /** 2 | * @file PseudoPlastic.h 3 | * @brief This file contains the declaration of the PseudoPlastic class and its derived classes. 4 | * 5 | * The PseudoPlastic class is a base class that represents a pseudo-plastic material model. 6 | * The models implemented are described in https://doi.org/10.1016/j.euromechsol.2017.11.007 -> Appendix A.1 and A.2. 7 | * It contains common properties and methods for all pseudo-plastic material models. The derived classes, 8 | * PseudoPlasticLinearHardening and PseudoPlasticNonLinearHardening, implement specific variations 9 | * of the pseudo-plastic material model with linear and nonlinear hardening, respectively. 10 | * 11 | * The PseudoPlastic class and its derived classes are used in the FANS for simulating 12 | * mechanical behavior of materials. 13 | */ 14 | 15 | #ifndef PSEUDOPLASTIC_H 16 | #define PSEUDOPLASTIC_H 17 | 18 | #include "matmodel.h" 19 | #include "solver.h" 20 | 21 | class PseudoPlastic : public MechModel { 22 | public: 23 | PseudoPlastic(vector l_e, json materialProperties) 24 | : MechModel(l_e) 25 | { 26 | try { 27 | bulk_modulus = materialProperties["bulk_modulus"].get>(); 28 | shear_modulus = materialProperties["shear_modulus"].get>(); 29 | yield_stress = materialProperties["yield_stress"].get>(); 30 | } catch (const std::exception &e) { 31 | throw std::runtime_error("Missing material properties for the requested material model."); 32 | } 33 | n_mat = bulk_modulus.size(); 34 | 35 | // Initialize stiffness matrix (assuming for two materials, otherwise needs extension) 36 | Matrix *Ce = new Matrix[n_mat]; 37 | Matrix topLeft = Matrix::Zero(); 38 | topLeft.topLeftCorner(3, 3).setConstant(1); 39 | 40 | kapparef_mat = Matrix::Zero(); 41 | for (int i = 0; i < n_mat; ++i) { 42 | Ce[i] = 3 * bulk_modulus[i] * topLeft + 43 | 2 * shear_modulus[i] * (-1.0 / 3.0 * topLeft + Matrix::Identity()); 44 | kapparef_mat += Ce[i]; 45 | } 46 | kapparef_mat /= n_mat; 47 | } 48 | 49 | void initializeInternalVariables(ptrdiff_t num_elements, int num_gauss_points) override 50 | { 51 | plastic_flag.resize(num_elements, VectorXi::Zero(num_gauss_points)); 52 | } 53 | 54 | virtual void get_sigma(int i, int mat_index, ptrdiff_t element_idx) override = 0; // Pure virtual method 55 | 56 | void postprocess(Solver<3> &solver, Reader &reader, const char *resultsFileName, int load_idx, int time_idx) override 57 | { 58 | VectorXf element_plastic_flag = VectorXf::Zero(solver.local_n0 * solver.n_y * solver.n_z); 59 | for (ptrdiff_t elem_idx = 0; elem_idx < solver.local_n0 * solver.n_y * solver.n_z; ++elem_idx) { 60 | element_plastic_flag(elem_idx) = plastic_flag[elem_idx].cast().mean(); 61 | } 62 | 63 | if (find(reader.resultsToWrite.begin(), reader.resultsToWrite.end(), "plastic_flag") != reader.resultsToWrite.end()) { 64 | for (int i = 0; i < solver.world_size; ++i) { 65 | if (i == solver.world_rank) { 66 | char name[5096]; 67 | sprintf(name, "%s/load%i/time_step%i/plastic_flag", reader.ms_datasetname, load_idx, time_idx); 68 | reader.WriteSlab(element_plastic_flag.data(), 1, resultsFileName, name); 69 | } 70 | MPI_Barrier(MPI_COMM_WORLD); 71 | } 72 | } 73 | } 74 | 75 | protected: 76 | vector bulk_modulus; 77 | vector shear_modulus; 78 | vector yield_stress; 79 | vector eps_crit; 80 | vector plastic_flag; 81 | Matrix dev_eps; 82 | double treps, norm_dev_eps, buf1, buf2; 83 | }; 84 | 85 | class PseudoPlasticLinearHardening : public PseudoPlastic { 86 | public: 87 | PseudoPlasticLinearHardening(vector l_e, json materialProperties) 88 | : PseudoPlastic(l_e, materialProperties) 89 | { 90 | try { 91 | hardening_parameter = materialProperties["hardening_parameter"].get>(); 92 | } catch (const std::exception &e) { 93 | throw std::runtime_error("Missing material properties for the requested material model."); 94 | } 95 | 96 | E_s.resize(n_mat); 97 | eps_crit.resize(n_mat); 98 | 99 | for (int i = 0; i < n_mat; ++i) { 100 | eps_crit[i] = sqrt(2. / 3.) * yield_stress[i] / (2. * shear_modulus[i]); 101 | E_s[i] = (3. * shear_modulus[i]) / (3. * shear_modulus[i] + hardening_parameter[i]); 102 | } 103 | } 104 | 105 | void get_sigma(int i, int mat_index, ptrdiff_t element_idx) override 106 | { 107 | treps = eps.block<3, 1>(i, 0).sum(); 108 | dev_eps.head<3>() = eps.block<3, 1>(i, 0) - (1.0 / 3.0) * treps * Matrix::Ones(); 109 | dev_eps.tail<3>() = eps.block<3, 1>(i + 3, 0); 110 | 111 | norm_dev_eps = dev_eps.norm(); 112 | buf1 = bulk_modulus[mat_index] * treps; 113 | 114 | if (norm_dev_eps <= eps_crit[mat_index]) { 115 | buf2 = 2.0 * shear_modulus[mat_index]; 116 | plastic_flag[element_idx](i / n_str) = mat_index; 117 | } else { 118 | buf2 = (b * yield_stress[mat_index] + a * E_s[mat_index] * hardening_parameter[mat_index] * 119 | (norm_dev_eps - eps_crit[mat_index])) / 120 | norm_dev_eps; 121 | plastic_flag[element_idx](i / n_str) = this->n_mat + mat_index; 122 | } 123 | sigma.block<3, 1>(i, 0).setConstant(buf1); 124 | sigma.block<3, 1>(i, 0) += buf2 * dev_eps.head<3>(); 125 | sigma.block<3, 1>(i + 3, 0) = buf2 * dev_eps.tail<3>(); 126 | } 127 | 128 | private: 129 | vector hardening_parameter; 130 | vector E_s; 131 | double a = 2. / 3; 132 | double b = sqrt(a); 133 | }; 134 | 135 | class PseudoPlasticNonLinearHardening : public PseudoPlastic { 136 | public: 137 | PseudoPlasticNonLinearHardening(vector l_e, json materialProperties) 138 | : PseudoPlastic(l_e, materialProperties) 139 | { 140 | try { 141 | hardening_exponent = materialProperties["hardening_exponent"].get>(); 142 | eps_0 = materialProperties["eps_0"].get>(); // ε0 parameter 143 | } catch (const std::exception &e) { 144 | throw std::runtime_error("Missing material properties for the requested material model."); 145 | } 146 | 147 | eps_crit.resize(n_mat); 148 | for (int i = 0; i < n_mat; ++i) { 149 | eps_crit[i] = eps_0[i] * pow(yield_stress[i] / (3.0 * shear_modulus[i] * eps_0[i]), 1.0 / (1.0 - hardening_exponent[i])); 150 | } 151 | } 152 | 153 | void get_sigma(int i, int mat_index, ptrdiff_t element_idx) override 154 | { 155 | treps = eps.block<3, 1>(i, 0).sum(); 156 | dev_eps.head<3>() = eps.block<3, 1>(i, 0) - (1.0 / 3.0) * treps * Matrix::Ones(); 157 | dev_eps.tail<3>() = eps.block<3, 1>(i + 3, 0); 158 | 159 | norm_dev_eps = sqrt(2.0 / 3.0) * dev_eps.norm(); // ε_eq 160 | 161 | buf1 = bulk_modulus[mat_index] * treps; 162 | sigma.block<6, 1>(i, 0).setConstant(0); 163 | if (norm_dev_eps <= eps_crit[mat_index]) { 164 | buf2 = 2.0 * shear_modulus[mat_index]; 165 | sigma.block<3, 1>(i, 0).setConstant(buf1); 166 | sigma.block<3, 1>(i, 0) += buf2 * dev_eps.head<3>(); 167 | sigma.block<3, 1>(i + 3, 0) = buf2 * dev_eps.tail<3>(); 168 | 169 | plastic_flag[element_idx](i / n_str) = mat_index; 170 | } else { 171 | buf2 = sqrt(2.0 / 3.0) * yield_stress[mat_index] * 172 | pow(norm_dev_eps / eps_0[mat_index], hardening_exponent[mat_index]); 173 | sigma.block<3, 1>(i, 0).setConstant(buf1); 174 | sigma.block<6, 1>(i, 0) += buf2 * dev_eps / dev_eps.norm(); 175 | 176 | plastic_flag[element_idx](i / n_str) = this->n_mat + mat_index; 177 | } 178 | } 179 | 180 | private: 181 | vector hardening_exponent; 182 | vector eps_0; 183 | }; 184 | 185 | #endif // PSEUDOPLASTIC_H 186 | -------------------------------------------------------------------------------- /include/matmodel.h: -------------------------------------------------------------------------------- 1 | 2 | #ifndef MATMODEL_H 3 | #define MATMODEL_H 4 | 5 | #include "general.h" 6 | 7 | constexpr int get_n_str(int h) 8 | { 9 | switch (h) { 10 | case 1: 11 | return 3; 12 | case 3: 13 | return 6; 14 | } 15 | } 16 | template 17 | class Solver; 18 | 19 | template 20 | class Matmodel { 21 | public: 22 | EIGEN_MAKE_ALIGNED_OPERATOR_NEW // see http://eigen.tuxfamily.org/dox-devel/group__TopicStructHavingEigenMembers.html 23 | 24 | const static int n_str = get_n_str(howmany); // length of strain and stress 25 | 26 | int verbosity; //!< output verbosity 27 | int n_mat; // Number of Materials 28 | 29 | double *strain; //!< Gradient 30 | double *stress; //!< Flux 31 | 32 | Matmodel(vector l_e); 33 | 34 | Matrix Compute_Reference_ElementStiffness(); 35 | Matrix &element_residual(Matrix &ue, int mat_index, ptrdiff_t element_idx); 36 | void getStrainStress(double *strain, double *stress, Matrix &ue, int mat_index, ptrdiff_t element_idx); 37 | void setGradient(vector _g0); 38 | 39 | virtual void postprocess(Solver &solver, Reader &reader, const char *resultsFileName, int load_idx, int time_idx) {} 40 | 41 | virtual void initializeInternalVariables(ptrdiff_t num_elements, int num_gauss_points) {} 42 | virtual void updateInternalVariables() {} 43 | 44 | vector macroscale_loading; 45 | Matrix kapparef_mat; // Reference conductivity matrix 46 | 47 | virtual ~Matmodel() = default; 48 | 49 | protected: 50 | double l_e_x; 51 | double l_e_y; 52 | double l_e_z; 53 | double v_e; 54 | 55 | Matrix B_el_mean; //!< precomputed mean B matrix over the element 56 | Matrix B_int[8]; //!< precomputed B matrix at all integration points 57 | Matrix B; 58 | 59 | Matrix eps; 60 | Matrix g0; //!< Macro-scale Gradient 61 | Matrix sigma; 62 | Matrix res_e; 63 | 64 | Matrix Compute_basic_B(const double x, const double y, const double z) const; 65 | virtual Matrix Compute_B(const double x, const double y, const double z) = 0; 66 | void Construct_B(); 67 | 68 | virtual void get_sigma(int i, int mat_index, ptrdiff_t element_idx) = 0; 69 | }; 70 | 71 | template 72 | Matmodel::Matmodel(vector l_e) 73 | { 74 | l_e_x = l_e[0]; 75 | l_e_y = l_e[1]; 76 | l_e_z = l_e[2]; 77 | 78 | v_e = l_e_x * l_e_y * l_e_z; 79 | } 80 | 81 | template 82 | void Matmodel::Construct_B() 83 | { 84 | const double xi_p = 0.5 + sqrt(3.) / 6.; 85 | const double xi_m = 0.5 - sqrt(3.) / 6.; 86 | const double xi[8][3] = {{xi_m, xi_m, xi_m}, {xi_p, xi_m, xi_m}, {xi_m, xi_p, xi_m}, {xi_p, xi_p, xi_m}, {xi_m, xi_m, xi_p}, {xi_p, xi_m, xi_p}, {xi_m, xi_p, xi_p}, {xi_p, xi_p, xi_p}}; 87 | 88 | B_el_mean = Compute_B(0.5, 0.5, 0.5); 89 | 90 | // fetch B at the integration sites 91 | for (int p = 0; p < 8; p++) { 92 | B_int[p] = Compute_B(xi[p][0], xi[p][1], xi[p][2]); 93 | B.block(n_str * p, 0, n_str, howmany * 8) = B_int[p]; 94 | } 95 | } 96 | 97 | template 98 | Matrix Matmodel::Compute_basic_B(const double x, const double y, const double z) const 99 | { 100 | Matrix out; 101 | out(0, 0) = -(1. - y) * (1. - z) / l_e_x; 102 | out(0, 1) = (1. - y) * (1. - z) / l_e_x; 103 | out(0, 2) = -y * (1. - z) / l_e_x; 104 | out(0, 3) = y * (1. - z) / l_e_x; 105 | out(0, 4) = -(1. - y) * z / l_e_x; 106 | out(0, 5) = (1. - y) * z / l_e_x; 107 | out(0, 6) = -y * z / l_e_x; 108 | out(0, 7) = y * z / l_e_x; 109 | 110 | out(1, 0) = -(1. - x) * (1. - z) / l_e_y; 111 | out(1, 1) = -x * (1. - z) / l_e_y; 112 | out(1, 2) = (1. - x) * (1. - z) / l_e_y; 113 | out(1, 3) = x * (1. - z) / l_e_y; 114 | out(1, 4) = -(1. - x) * z / l_e_y; 115 | out(1, 5) = -x * z / l_e_y; 116 | out(1, 6) = (1. - x) * z / l_e_y; 117 | out(1, 7) = x * z / l_e_y; 118 | 119 | out(2, 0) = -(1. - x) * (1. - y) / l_e_z; 120 | out(2, 1) = -x * (1. - y) / l_e_z; 121 | out(2, 2) = -(1. - x) * y / l_e_z; 122 | out(2, 3) = -x * y / l_e_z; 123 | out(2, 4) = (1. - x) * (1. - y) / l_e_z; 124 | out(2, 5) = x * (1. - y) / l_e_z; 125 | out(2, 6) = (1. - x) * y / l_e_z; 126 | out(2, 7) = x * y / l_e_z; 127 | return out; 128 | } 129 | 130 | template 131 | Matrix &Matmodel::element_residual(Matrix &ue, int mat_index, ptrdiff_t element_idx) 132 | { 133 | 134 | eps.noalias() = B * ue + g0; 135 | 136 | for (int i = 0; i < 8; ++i) { 137 | get_sigma(n_str * i, mat_index, element_idx); 138 | } 139 | res_e.noalias() = B.transpose() * sigma * v_e * 0.125; 140 | return res_e; 141 | } 142 | template 143 | void Matmodel::getStrainStress(double *strain, double *stress, Matrix &ue, int mat_index, ptrdiff_t element_idx) 144 | { 145 | eps.noalias() = B * ue + g0; 146 | sigma.setZero(); 147 | for (int i = 0; i < 8; ++i) { 148 | get_sigma(n_str * i, mat_index, element_idx); 149 | } 150 | 151 | Matrix avg_strain = Matrix::Zero(); 152 | Matrix avg_stress = Matrix::Zero(); 153 | 154 | for (int i = 0; i < 8; ++i) { 155 | for (int j = 0; j < n_str; ++j) { 156 | avg_strain(j) += eps(i * n_str + j) * 0.125; 157 | avg_stress(j) += sigma(i * n_str + j) * 0.125; 158 | } 159 | } 160 | 161 | for (int i = 0; i < n_str; ++i) { 162 | strain[i] = avg_strain(i); 163 | stress[i] = avg_stress(i); 164 | } 165 | } 166 | 167 | template 168 | void Matmodel::setGradient(vector _g0) 169 | { 170 | macroscale_loading = _g0; 171 | for (int i = 0; i < n_str; i++) { 172 | for (int j = 0; j < 8; ++j) { 173 | g0(n_str * j + i, 0) = _g0[i]; 174 | } 175 | } 176 | } 177 | template 178 | Matrix Matmodel::Compute_Reference_ElementStiffness() 179 | { 180 | Matrix Reference_ElementStiffness = Matrix::Zero(); 181 | Matrix tmp = Matrix::Zero(); 182 | 183 | for (int p = 0; p < 8; ++p) { 184 | tmp += B_int[p].transpose() * kapparef_mat * B_int[p] * v_e * 0.1250; 185 | } 186 | // before: 8 groups of howmany after: howmany groups of 8 187 | for (int i = 0; i < howmany * 8; ++i) { 188 | for (int j = 0; j < howmany * 8; ++j) { 189 | Reference_ElementStiffness((i % howmany) * 8 + i / howmany, (j % howmany) * 8 + j / howmany) = tmp(i, j); 190 | } 191 | } 192 | return Reference_ElementStiffness; 193 | } 194 | 195 | class ThermalModel : public Matmodel<1> { 196 | public: 197 | ThermalModel(vector l_e) 198 | : Matmodel(l_e) 199 | { 200 | Construct_B(); 201 | }; 202 | 203 | protected: 204 | Matrix Compute_B(const double x, const double y, const double z); 205 | }; 206 | 207 | inline Matrix ThermalModel::Compute_B(const double x, const double y, const double z) 208 | { 209 | return Matmodel<1>::Compute_basic_B(x, y, z); 210 | } 211 | 212 | class MechModel : public Matmodel<3> { 213 | public: 214 | MechModel(vector l_e) 215 | : Matmodel(l_e) 216 | { 217 | Construct_B(); 218 | }; 219 | 220 | protected: 221 | Matrix Compute_B(const double x, const double y, const double z); 222 | }; 223 | 224 | inline Matrix MechModel::Compute_B(const double x, const double y, const double z) 225 | { 226 | Matrix out = Matrix::Zero(); 227 | Matrix B_tmp = Matmodel<3>::Compute_basic_B(x, y, z); 228 | const double sqrt_half = 7.071067811865476e-01; 229 | 230 | for (int q = 0; q < 8; q++) { 231 | out(0, 3 * q + 0) = B_tmp(0, q); 232 | out(1, 3 * q + 1) = B_tmp(1, q); 233 | out(2, 3 * q + 2) = B_tmp(2, q); 234 | 235 | out(3, 3 * q + 0) = sqrt_half * B_tmp(1, q); 236 | out(4, 3 * q + 0) = sqrt_half * B_tmp(2, q); 237 | out(5, 3 * q + 1) = sqrt_half * B_tmp(2, q); 238 | 239 | out(3, 3 * q + 1) = sqrt_half * B_tmp(0, q); 240 | out(4, 3 * q + 2) = sqrt_half * B_tmp(0, q); 241 | out(5, 3 * q + 2) = sqrt_half * B_tmp(1, q); 242 | } 243 | return out; 244 | } 245 | 246 | template 247 | class LinearModel { 248 | public: 249 | Matrix *phase_stiffness; 250 | }; 251 | 252 | #endif // MATMODEL_H 253 | -------------------------------------------------------------------------------- /include/mixedBCs.h: -------------------------------------------------------------------------------- 1 | #ifndef MIXED_BC_H 2 | #define MIXED_BC_H 3 | 4 | // ============================================================================ 5 | // mixedBCs.h 6 | // -------------------------------------------------------------------------- 7 | // • Holds MixedBC + LoadCase structs 8 | // • Provides: MixedBC::from_json(), finalize() 9 | // ============================================================================ 10 | 11 | #include 12 | using namespace Eigen; 13 | #include 14 | using nlohmann::json; 15 | 16 | // --------------------------------------------------------------------------- 17 | struct MixedBC { 18 | /* Index sets (0‑based) */ 19 | VectorXi idx_E; // strain‑controlled components 20 | VectorXi idx_F; // stress‑controlled components 21 | 22 | /* Time paths */ 23 | MatrixXd F_E_path; // (#steps × |E|) 24 | MatrixXd P_F_path; // (#steps × |F|) 25 | 26 | /* Projectors & auxiliary matrix */ 27 | MatrixXd Q_E, Q_F, M; // M = (Q_Fᵀ C0 Q_F)⁻¹ 28 | 29 | // ------------------------------------------------------------ 30 | // build Q_E, Q_F, M 31 | // ------------------------------------------------------------ 32 | void finalize(const MatrixXd &C0) 33 | { 34 | const int n_str = static_cast(C0.rows()); 35 | 36 | Q_E = MatrixXd::Zero(n_str, idx_E.size()); 37 | for (int c = 0; c < idx_E.size(); ++c) 38 | Q_E(idx_E(c), c) = 1.0; 39 | 40 | Q_F = MatrixXd::Zero(n_str, idx_F.size()); 41 | for (int c = 0; c < idx_F.size(); ++c) 42 | Q_F(idx_F(c), c) = 1.0; 43 | 44 | if (idx_F.size() > 0) 45 | M = (Q_F.transpose() * C0 * Q_F).inverse(); 46 | else 47 | M.resize(0, 0); // pure‑strain case 48 | } 49 | 50 | // ------------------------------------------------------------ 51 | // Factory: parse MixedBC from a JSON object 52 | // ------------------------------------------------------------ 53 | static MixedBC from_json(const json &jc, int n_str) 54 | { 55 | auto toEigen = [](const vector &v) { 56 | VectorXi e(v.size()); 57 | for (size_t i = 0; i < v.size(); ++i) 58 | e(static_cast(i)) = v[i]; 59 | return e; 60 | }; 61 | 62 | MixedBC bc; 63 | 64 | if (!jc.contains("strain_indices") || !jc.contains("stress_indices")) 65 | throw runtime_error("mixed BC: strain_indices or stress_indices missing"); 66 | 67 | bc.idx_E = toEigen(jc["strain_indices"].get>()); 68 | bc.idx_F = toEigen(jc["stress_indices"].get>()); 69 | 70 | // ---- sanity: disjoint + complementary ----- 71 | vector present(n_str, 0); 72 | for (int i = 0; i < bc.idx_E.size(); ++i) { 73 | int k = bc.idx_E(i); 74 | if (k < 0 || k >= n_str) 75 | throw runtime_error("strain index out of range"); 76 | present[k] = 1; 77 | } 78 | for (int i = 0; i < bc.idx_F.size(); ++i) { 79 | int k = bc.idx_F(i); 80 | if (k < 0 || k >= n_str) 81 | throw runtime_error("stress index out of range"); 82 | if (present[k]) 83 | throw runtime_error("index appears in both strain_indices and stress_indices"); 84 | present[k] = 1; 85 | } 86 | for (int k = 0; k < n_str; ++k) 87 | if (!present[k]) 88 | throw runtime_error("each component must be either strain‑ or stress‑controlled"); 89 | 90 | // ---- parse 2‑D arrays (allow empty for |E|==0 etc.) ----- 91 | auto get2D = [](const json &arr) { 92 | return arr.get>>(); 93 | }; 94 | 95 | vector> strain_raw, stress_raw; 96 | size_t n_steps = 0; 97 | 98 | if (bc.idx_E.size() > 0) { 99 | if (!jc.contains("strain")) 100 | throw runtime_error("strain array missing"); 101 | strain_raw = get2D(jc["strain"]); 102 | n_steps = strain_raw.size(); 103 | } 104 | if (bc.idx_F.size() > 0) { 105 | if (!jc.contains("stress")) 106 | throw runtime_error("stress array missing"); 107 | stress_raw = get2D(jc["stress"]); 108 | n_steps = max(n_steps, stress_raw.size()); 109 | } 110 | if (n_steps == 0) 111 | throw runtime_error("mixed BC: at least one of strain/stress must have timesteps"); 112 | 113 | // default‑fill for missing part (constant 0) 114 | if (strain_raw.empty()) 115 | strain_raw.resize(n_steps, vector(0)); 116 | if (stress_raw.empty()) 117 | stress_raw.resize(n_steps, vector(0)); 118 | 119 | // consistency checks & build Eigen matrices 120 | bc.F_E_path = MatrixXd::Zero(n_steps, bc.idx_E.size()); 121 | bc.P_F_path = MatrixXd::Zero(n_steps, bc.idx_F.size()); 122 | 123 | for (size_t t = 0; t < n_steps; ++t) { 124 | if (strain_raw[t].size() != static_cast(bc.idx_E.size())) 125 | throw runtime_error("strain row length mismatch"); 126 | for (int c = 0; c < bc.idx_E.size(); ++c) 127 | bc.F_E_path(static_cast(t), c) = (bc.idx_E.size() ? strain_raw[t][c] : 0.0); 128 | 129 | if (stress_raw[t].size() != static_cast(bc.idx_F.size())) 130 | throw runtime_error("stress row length mismatch"); 131 | for (int c = 0; c < bc.idx_F.size(); ++c) 132 | bc.P_F_path(static_cast(t), c) = (bc.idx_F.size() ? stress_raw[t][c] : 0.0); 133 | } 134 | return bc; 135 | } 136 | }; 137 | 138 | // --------------------------------------------------------------------------- 139 | // LoadCase : covers both legacy and mixed formats (used by Reader) 140 | // --------------------------------------------------------------------------- 141 | struct LoadCase { 142 | bool mixed = false; 143 | vector> g0_path; // legacy pure‑strain 144 | MixedBC mbc; // mixed BC data 145 | size_t n_steps = 0; // number of time steps 146 | }; 147 | 148 | // --------------------------------------------------------------------------- 149 | // Helper mix‑in with enable/update for Solver (header‑only) 150 | // --------------------------------------------------------------------------- 151 | template 152 | struct MixedBCController { 153 | bool mixed_active = false; 154 | 155 | protected: 156 | MixedBC mbc_local; 157 | size_t step_idx = 0; 158 | VectorXd g0_vec; // current macro strain (size n_str) 159 | 160 | // call from user code after v_u update each iteration 161 | template 162 | void update(SolverType &solver) 163 | { 164 | if (!mixed_active) 165 | return; 166 | 167 | VectorXd Pbar = solver.get_homogenized_stress(); 168 | 169 | VectorXd PF = (mbc_local.idx_F.size() ? mbc_local.P_F_path.row(step_idx).transpose() : VectorXd()); 170 | 171 | if (mbc_local.idx_F.size()) { 172 | VectorXd rhs = PF - mbc_local.Q_F.transpose() * Pbar; 173 | VectorXd delta_E = mbc_local.M * rhs; 174 | g0_vec += mbc_local.Q_F * delta_E; 175 | } 176 | 177 | vector gvec(g0_vec.data(), g0_vec.data() + g0_vec.size()); 178 | solver.matmodel->setGradient(gvec); 179 | } 180 | 181 | template 182 | void activate(SolverType &solver, const MixedBC &mbc_in, size_t t) 183 | { 184 | mixed_active = true; 185 | mbc_local = mbc_in; 186 | step_idx = t; 187 | mbc_local.finalize(solver.matmodel->kapparef_mat); 188 | 189 | int n_str = solver.matmodel->n_str; 190 | g0_vec = VectorXd::Zero(n_str); 191 | if (mbc_local.idx_E.size()) 192 | g0_vec += mbc_local.Q_E * mbc_local.F_E_path.row(t).transpose(); 193 | 194 | vector gvec(g0_vec.data(), g0_vec.data() + n_str); 195 | solver.matmodel->setGradient(gvec); 196 | 197 | // Do one update to set the initial strain 198 | solver.updateMixedBC(); 199 | } 200 | }; 201 | 202 | #endif // MIXED_BC_H 203 | -------------------------------------------------------------------------------- /include/setup.h: -------------------------------------------------------------------------------- 1 | #include "solverCG.h" 2 | #include "solverFP.h" 3 | 4 | // Thermal models 5 | #include "material_models/LinearThermal.h" 6 | #include "material_models/GBDiffusion.h" 7 | 8 | // Mechanical models 9 | #include "material_models/LinearElastic.h" 10 | #include "material_models/PseudoPlastic.h" 11 | #include "material_models/J2Plasticity.h" 12 | 13 | template 14 | Matmodel *createMatmodel(const Reader &reader); 15 | 16 | template <> 17 | Matmodel<1> *createMatmodel(const Reader &reader) 18 | { 19 | if (reader.matmodel == "LinearThermalIsotropic") { 20 | return new LinearThermalIsotropic(reader.l_e, reader.materialProperties); 21 | } else if (reader.matmodel == "LinearThermalTriclinic") { 22 | return new LinearThermalTriclinic(reader.l_e, reader.materialProperties); 23 | } else if (reader.matmodel == "GBDiffusion") { 24 | return new GBDiffusion(const_cast(reader)); 25 | } else { 26 | throw std::invalid_argument(reader.matmodel + " is not a valid matmodel for thermal problem"); 27 | } 28 | } 29 | 30 | template <> 31 | Matmodel<3> *createMatmodel(const Reader &reader) 32 | { 33 | // Linear Elastic models 34 | if (reader.matmodel == "LinearElasticIsotropic") { 35 | return new LinearElasticIsotropic(reader.l_e, reader.materialProperties); 36 | } else if (reader.matmodel == "LinearElasticTriclinic") { 37 | return new LinearElasticTriclinic(reader.l_e, reader.materialProperties); 38 | 39 | // Pseudo Plastic models 40 | } else if (reader.matmodel == "PseudoPlasticLinearHardening") { 41 | return new PseudoPlasticLinearHardening(reader.l_e, reader.materialProperties); 42 | } else if (reader.matmodel == "PseudoPlasticNonLinearHardening") { 43 | return new PseudoPlasticNonLinearHardening(reader.l_e, reader.materialProperties); 44 | 45 | // J2 Plastic models 46 | } else if (reader.matmodel == "J2ViscoPlastic_LinearIsotropicHardening") { 47 | return new J2ViscoPlastic_LinearIsotropicHardening(reader.l_e, reader.materialProperties); 48 | } else if (reader.matmodel == "J2ViscoPlastic_NonLinearIsotropicHardening") { 49 | return new J2ViscoPlastic_NonLinearIsotropicHardening(reader.l_e, reader.materialProperties); 50 | 51 | } else { 52 | throw std::invalid_argument(reader.matmodel + " is not a valid matmodel for mechanical problem"); 53 | } 54 | } 55 | 56 | template 57 | Solver *createSolver(const Reader &reader, Matmodel *matmodel) 58 | { 59 | if (reader.method == "fp") { 60 | return new SolverFP(reader, matmodel); 61 | } else if (reader.method == "cg") { 62 | return new SolverCG(reader, matmodel); 63 | } else { 64 | throw std::invalid_argument(reader.method + " is not a valid method"); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /include/solverCG.h: -------------------------------------------------------------------------------- 1 | #ifndef SOLVER_CG_H 2 | #define SOLVER_CG_H 3 | 4 | #include "solver.h" 5 | 6 | template 7 | class SolverCG : public Solver { 8 | public: 9 | using Solver::n_x; 10 | using Solver::n_y; 11 | using Solver::n_z; 12 | using Solver::local_n0; 13 | using Solver::local_n1; 14 | using Solver::v_u_real; 15 | using Solver::v_r_real; 16 | 17 | SolverCG(Reader reader, Matmodel *matmodel); 18 | 19 | double *s; 20 | double *d; 21 | double *rnew; 22 | RealArray s_real; 23 | RealArray d_real; 24 | RealArray rnew_real; 25 | 26 | void internalSolve(); 27 | void LineSearchSecant(); 28 | double dotProduct(RealArray &a, RealArray &b); 29 | 30 | protected: 31 | using Solver::iter; 32 | }; 33 | 34 | template 35 | SolverCG::SolverCG(Reader reader, Matmodel *mat) 36 | : Solver(reader, mat), 37 | 38 | s(fftw_alloc_real(reader.alloc_local * 2)), 39 | s_real(s, n_z * howmany, local_n0 * n_y, OuterStride<>((n_z + 2) * howmany)), 40 | 41 | rnew(fftw_alloc_real((local_n0 + 1) * n_y * n_z * howmany)), 42 | rnew_real(rnew, n_z * howmany, local_n0 * n_y, OuterStride<>(n_z * howmany)), 43 | 44 | d(fftw_alloc_real((local_n0 + 1) * n_y * n_z * howmany)), 45 | d_real(d, n_z * howmany, local_n0 * n_y, OuterStride<>(n_z * howmany)) 46 | { 47 | this->CreateFFTWPlans(this->v_r, (fftw_complex *) s, s); 48 | } 49 | 50 | template 51 | double SolverCG::dotProduct(RealArray &a, RealArray &b) 52 | { 53 | double local_value = (a * b).sum(); 54 | double result; 55 | MPI_Allreduce(&local_value, &result, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); 56 | return result; 57 | } 58 | 59 | template 60 | void SolverCG::internalSolve() 61 | { 62 | if (this->world_rank == 0) 63 | printf("\n# Start FANS - Conjugate Gradient Solver \n"); 64 | 65 | LinearModel *linearModel = dynamic_cast *>(this->matmodel); 66 | bool islinear = (linearModel == NULL) ? false : true; 67 | 68 | s_real.setZero(); 69 | d_real.setZero(); 70 | for (ptrdiff_t i = local_n0 * n_y * n_z * howmany; i < (local_n0 + 1) * n_y * n_z * howmany; i++) { 71 | d[i] = 0; 72 | } 73 | 74 | this->template compute_residual<2>(v_r_real, v_u_real); 75 | 76 | iter = 0; 77 | double err_rel = this->compute_error(v_r_real); 78 | 79 | double delta, delta0, deltamid; 80 | delta = 1.0L; 81 | 82 | while ((iter < this->n_it) && (err_rel > this->TOL)) { 83 | 84 | deltamid = dotProduct(v_r_real, s_real); 85 | 86 | this->convolution(); 87 | 88 | s_real *= -1; 89 | delta0 = delta; 90 | delta = dotProduct(v_r_real, s_real); 91 | 92 | d_real = s_real + fmax(0, (delta - deltamid) / delta0) * d_real; 93 | 94 | if (islinear && !this->isMixedBCActive()) { 95 | Matrix res_e; 96 | this->template compute_residual_basic<0>(rnew_real, d_real, 97 | [&](Matrix &ue, int mat_index, ptrdiff_t element_idx) -> Matrix & { 98 | res_e.noalias() = linearModel->phase_stiffness[mat_index] * ue; 99 | return res_e; 100 | }); 101 | 102 | double alpha = delta / dotProduct(d_real, rnew_real); 103 | v_r_real -= alpha * rnew_real; 104 | v_u_real -= alpha * d_real; 105 | } else { 106 | LineSearchSecant(); 107 | } 108 | 109 | iter++; 110 | err_rel = this->compute_error(v_r_real); 111 | } 112 | if (this->world_rank == 0) 113 | printf("# Complete FANS - Conjugate Gradient Solver \n"); 114 | } 115 | 116 | template 117 | void SolverCG::LineSearchSecant() 118 | { 119 | double err = 10.0; 120 | int MaxIter = 5; 121 | double tol = 1e-2; 122 | int _iter = 0; 123 | double alpha_new = 0.0001; 124 | double alpha_old = 0; 125 | 126 | double r1pd; 127 | double rpd = dotProduct(v_r_real, d_real); 128 | 129 | while (((_iter < MaxIter) && (err > tol))) { 130 | 131 | v_u_real += d_real * (alpha_new - alpha_old); 132 | this->updateMixedBC(); 133 | this->template compute_residual<0>(rnew_real, v_u_real); 134 | r1pd = dotProduct(rnew_real, d_real); 135 | 136 | alpha_old = alpha_new; 137 | alpha_new *= rpd / (rpd - r1pd); 138 | 139 | err = fabs(alpha_new - alpha_old); 140 | _iter++; 141 | } 142 | v_u_real += d_real * (alpha_new - alpha_old); 143 | v_r_real = rnew_real; 144 | if (this->world_rank == 0) 145 | printf("line search iter %i, alpha %f - error %e - ", _iter, alpha_new, err); 146 | } 147 | #endif 148 | -------------------------------------------------------------------------------- /include/solverFP.h: -------------------------------------------------------------------------------- 1 | #ifndef SOLVER_FP_H 2 | #define SOLVER_FP_H 3 | 4 | #include "solver.h" 5 | 6 | template 7 | class SolverFP : public Solver { 8 | public: 9 | using Solver::n_x; 10 | using Solver::n_y; 11 | using Solver::n_z; 12 | using Solver::local_n0; 13 | using Solver::local_n1; 14 | using Solver::v_u_real; 15 | using Solver::v_r_real; 16 | 17 | SolverFP(Reader reader, Matmodel *matmodel); 18 | 19 | void internalSolve(); 20 | 21 | protected: 22 | using Solver::iter; 23 | }; 24 | 25 | template 26 | SolverFP::SolverFP(Reader reader, Matmodel *matmodel) 27 | : Solver(reader, matmodel) 28 | { 29 | this->CreateFFTWPlans(this->v_r, (fftw_complex *) this->v_r, this->v_r); 30 | } 31 | 32 | template 33 | void SolverFP::internalSolve() 34 | { 35 | if (this->world_rank == 0) 36 | printf("\n# Start FANS - Fixed Point Solver \n"); 37 | 38 | this->template compute_residual<2>(v_r_real, v_u_real); 39 | 40 | iter = 0; 41 | double err_rel = this->compute_error(v_r_real); 42 | 43 | while ((iter < this->n_it) && (err_rel > this->TOL)) { 44 | 45 | this->convolution(); 46 | v_u_real -= v_r_real; 47 | this->updateMixedBC(); 48 | this->template compute_residual<2>(v_r_real, v_u_real); 49 | 50 | iter++; 51 | err_rel = this->compute_error(v_r_real); 52 | } 53 | if (this->world_rank == 0) 54 | printf("# Complete FANS - Fixed Point Solver \n"); 55 | } 56 | #endif 57 | -------------------------------------------------------------------------------- /include/version.h.in: -------------------------------------------------------------------------------- 1 | #ifndef VERSION_H 2 | #define VERSION_H 3 | 4 | #define PROJECT_VERSION_MAJOR @PROJECT_VERSION_MAJOR@ 5 | #define PROJECT_VERSION_MINOR @PROJECT_VERSION_MINOR@ 6 | #define PROJECT_VERSION_PATCH @PROJECT_VERSION_PATCH@ 7 | #define PROJECT_VERSION "@PROJECT_VERSION@" 8 | 9 | #endif // VERSION_H 10 | -------------------------------------------------------------------------------- /pixi.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | channels = ["conda-forge"] 3 | name = "FANS" 4 | platforms = ["linux-64", "win-64", "osx-64", "osx-arm64"] 5 | 6 | [dependencies] 7 | python = ">=3.13.3,<3.14" 8 | pytest = ">=8.3.5,<9" 9 | pre-commit = ">=4.2.0,<5" 10 | sympy = ">=1.14.0,<2" 11 | quaternion = ">=2024.0.8,<2025" 12 | beartype = ">=0.20.2,<0.21" 13 | 14 | 15 | [pypi-dependencies] 16 | fans-dashboard = { path = "FANS_Dashboard", editable = true } 17 | msutils = {git = "https://github.com/DataAnalyticsEngineering/MSUtils.git"} 18 | pyrecest = {git = "https://github.com/FlorianPfaff/pyRecEst.git"} 19 | 20 | 21 | [tasks] 22 | test = "pytest -v -s" 23 | precommit = "pre-commit run --all-files" 24 | h52xdmf = { args = ["file"], cmd = "cd \"$INIT_CWD\" && python -m fans_dashboard.plotting.h52xdmf -t -v {{file}}" } 25 | -------------------------------------------------------------------------------- /pyfans/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | pybind11_add_module(PyFANS micro.hpp micro.cpp) 2 | target_link_libraries(PyFANS PRIVATE FANS::FANS) 3 | 4 | add_custom_command( 5 | TARGET PyFANS 6 | POST_BUILD 7 | COMMAND ${CMAKE_COMMAND} -E create_symlink 8 | $ 9 | ${CMAKE_CURRENT_SOURCE_DIR}/../test/test_pyfans/$ 10 | COMMENT "Create a symlink for FANS python bindings to ${CMAKE_CURRENT_SOURCE_DIR}/../test/" 11 | ) 12 | -------------------------------------------------------------------------------- /pyfans/README.md: -------------------------------------------------------------------------------- 1 | # pyFANS 2 | 3 | pyFANS is a Python-wrapped library to control FANS via the [Micro Manager](https://precice.org/tooling-micro-manager-overview.html). The main idea is to create a large number of FANS simulations, and couple them to one macro-scale simulation typically in Abaqus, CalculiX, etc. The library follows the [API of the Micro Manager](https://precice.org/tooling-micro-manager-prepare-micro-simulation.html). 4 | 5 | ## Dependencies 6 | 7 | - [pybind11](https://pybind11.readthedocs.io/en/stable/index.html) 8 | 9 | ## Building 10 | 11 | To build FANS as a Micro Manager compatible Python library, set the CMake variable `FANS_LIB` to `ON`. The CMake command to compile FANS would then be `cmake .. -DFANS_LIBRARY_FOR_MICRO_MANAGER=ON`. 12 | 13 | ## Usage 14 | 15 | pyFANS is intended to be used with the Micro Manager and preCICE for two-scale coupled simulations. However, standalone use of the library is not restricted per se. Look at the [test_pyfans](../test/test_pyfans/) example to see how the library is used in a Python script. 16 | -------------------------------------------------------------------------------- /pyfans/micro.cpp: -------------------------------------------------------------------------------- 1 | // Micro simulation for mechanical problems 2 | // In this file we solve a micro problem with FANS which is controlled by the Micro Manager 3 | // This file is compiled with pybind11 to be available as a python module 4 | // 5 | 6 | #include "micro.hpp" 7 | #include "setup.h" 8 | #include "matmodel.h" 9 | 10 | py::array_t merge_arrays(py::array_t array1, py::array_t array2) 11 | { 12 | // Ensure arrays are contiguous for efficient merging 13 | array1 = array1.attr("copy")(); 14 | array2 = array2.attr("copy")(); 15 | 16 | // Get numpy concatenate function 17 | py::object np = py::module::import("numpy"); 18 | py::object concatenate = np.attr("concatenate"); 19 | 20 | // Concatenate the two arrays 21 | py::tuple arrays = py::make_tuple(array1, array2); 22 | py::array_t result = concatenate(arrays, py::int_(0)).cast>(); 23 | 24 | return result; 25 | } 26 | 27 | MicroSimulation::MicroSimulation(int sim_id, char *input_file) 28 | { 29 | // initialize fftw mpi 30 | fftw_mpi_init(); 31 | 32 | // Input file name is hardcoded. TODO: Make it configurable 33 | reader.ReadInputFile(input_file); 34 | 35 | reader.ReadMS(3); 36 | matmodel = createMatmodel<3>(reader); 37 | solver = createSolver<3>(reader, matmodel); 38 | } 39 | 40 | py::dict MicroSimulation::solve(py::dict macro_data, double dt) 41 | { 42 | // Time step value dt is not used currently, but is available for future use 43 | 44 | // Create a pybind style Numpy array from macro_write_data["micro_vector_data"], which is a Numpy array 45 | py::array_t strain1 = macro_data["strains1to3"].cast>(); 46 | py::array_t strain2 = macro_data["strains4to6"].cast>(); 47 | 48 | py::array_t strain = merge_arrays(strain1, strain2); 49 | std::vector g0 = std::vector(strain.data(), strain.data() + strain.size()); // convert numpy array to std::vector. 50 | 51 | VectorXd homogenized_stress; 52 | 53 | matmodel->setGradient(g0); 54 | 55 | solver->solve(); 56 | 57 | homogenized_stress = solver->get_homogenized_stress(); 58 | 59 | auto C = solver->get_homogenized_tangent(pert_param); 60 | 61 | // Convert data to a py::dict again to send it back to the Micro Manager 62 | py::dict micro_write_data; 63 | 64 | // Add stress and stiffness matrix data to Python dict to be returned 65 | std::vector stress13 = {homogenized_stress[0], homogenized_stress[1], homogenized_stress[2]}; 66 | micro_write_data["stresses1to3"] = stress13; 67 | std::vector stress46 = {homogenized_stress[3], homogenized_stress[4], homogenized_stress[5]}; 68 | micro_write_data["stresses4to6"] = stress46; 69 | std::vector C_1 = {C(0, 0), C(0, 1), C(0, 2)}; 70 | micro_write_data["cmat1"] = C_1; 71 | std::vector C_2 = {C(0, 3), C(0, 4), C(0, 5)}; 72 | micro_write_data["cmat2"] = C_2; 73 | std::vector C_3 = {C(1, 1), C(1, 2), C(1, 3)}; 74 | micro_write_data["cmat3"] = C_3; 75 | std::vector C_4 = {C(1, 4), C(1, 5), C(2, 2)}; 76 | micro_write_data["cmat4"] = C_4; 77 | std::vector C_5 = {C(2, 3), C(2, 4), C(2, 5)}; 78 | micro_write_data["cmat5"] = C_5; 79 | std::vector C_6 = {C(3, 3), C(3, 4), C(3, 5)}; 80 | micro_write_data["cmat6"] = C_6; 81 | std::vector C_7 = {C(4, 4), C(4, 5), C(5, 5)}; 82 | micro_write_data["cmat7"] = C_7; 83 | 84 | return micro_write_data; 85 | } 86 | 87 | PYBIND11_MODULE(PyFANS, m) 88 | { 89 | // optional docstring 90 | m.doc() = "FANS for Micro Manager"; 91 | 92 | py::class_(m, "MicroSimulation") 93 | .def(py::init()) 94 | .def("solve", &MicroSimulation::solve); 95 | } 96 | -------------------------------------------------------------------------------- /pyfans/micro.hpp: -------------------------------------------------------------------------------- 1 | // This is the header file for the micro simulation class. 2 | // It is included in the micro_cpp_dummy.cpp file and the micro_cpp_dummy.cpp file is compiled with pybind11 to create a python module. 3 | // The python module is then imported in the Micro Manager. 4 | 5 | #pragma once 6 | #include 7 | #include 8 | 9 | #include "pybind11/pybind11.h" 10 | #include "pybind11/numpy.h" // numpy arrays 11 | #include "pybind11/stl.h" // std::vector conversion 12 | 13 | #include "general.h" 14 | #include "matmodel.h" 15 | #include "solver.h" 16 | 17 | namespace py = pybind11; 18 | 19 | class MicroSimulation { 20 | public: 21 | MicroSimulation(int sim_id, char *input_file = "input.json"); 22 | py::dict solve(py::dict macro_write_data, double dt); 23 | 24 | private: 25 | int _sim_id; 26 | Reader reader; 27 | // Hardcoding mechanical models because these definitions need information from the input file. 28 | Matmodel<3> *matmodel; 29 | Solver<3> *solver; 30 | double pert_param = 1e-6; // scalar strain perturbation parameter 31 | }; 32 | -------------------------------------------------------------------------------- /src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "general.h" 2 | #include "matmodel.h" 3 | #include "setup.h" 4 | #include "solver.h" 5 | 6 | // Version 7 | #include "version.h" 8 | 9 | template 10 | void runSolver(Reader &reader, const char *output_file_basename) 11 | { 12 | reader.ReadMS(howmany); 13 | 14 | for (size_t load_path_idx = 0; load_path_idx < reader.load_cases.size(); ++load_path_idx) { 15 | Matmodel *matmodel = createMatmodel(reader); 16 | Solver *solver = createSolver(reader, matmodel); 17 | 18 | for (size_t time_step_idx = 0; time_step_idx < reader.load_cases[load_path_idx].n_steps; ++time_step_idx) { 19 | if (reader.load_cases[load_path_idx].mixed) { 20 | solver->enableMixedBC(reader.load_cases[load_path_idx].mbc, time_step_idx); 21 | } else { 22 | const auto &g0 = reader.load_cases[load_path_idx].g0_path[time_step_idx]; 23 | matmodel->setGradient(g0); 24 | } 25 | solver->solve(); 26 | solver->postprocess(reader, output_file_basename, load_path_idx, time_step_idx); 27 | } 28 | delete solver; 29 | delete matmodel; 30 | } 31 | } 32 | 33 | int main(int argc, char *argv[]) 34 | { 35 | if (argc > 1 && string(argv[1]) == "--version") { 36 | cout << "FANS version " << PROJECT_VERSION << endl; 37 | return 0; 38 | } 39 | 40 | if (argc != 3) { 41 | fprintf(stderr, "USAGE: %s [input file basename] [output file basename]\n", argv[0]); 42 | return 10; 43 | } 44 | 45 | MPI_Init(NULL, NULL); 46 | fftw_mpi_init(); 47 | 48 | Reader reader; 49 | reader.ReadInputFile(argv[1]); 50 | 51 | if (reader.problemType == "thermal") { 52 | runSolver<1>(reader, argv[2]); 53 | } else if (reader.problemType == "mechanical") { 54 | runSolver<3>(reader, argv[2]); 55 | } else { 56 | throw std::invalid_argument(reader.problemType + " is not a valid problem type"); 57 | } 58 | 59 | MPI_Finalize(); 60 | return 0; 61 | } 62 | -------------------------------------------------------------------------------- /test/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(FANS_TEST_INPUT_DIR ${CMAKE_CURRENT_SOURCE_DIR}) 2 | set(FANS_TEST_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR}) 3 | set(FANS_EXECUTABLE $) 4 | 5 | # determine MPI process count. The discretization of the test geometry allows for max. 8 processes. 6 | set(FANS_N_MPI_PROCESSES_MAX 8) 7 | cmake_host_system_information(RESULT FANS_CORES_AVAILABLE QUERY NUMBER_OF_PHYSICAL_CORES) 8 | if (FANS_N_MPI_PROCESSES_MAX LESS FANS_CORES_AVAILABLE) 9 | set(FANS_N_MPI_PROCESSES ${FANS_N_MPI_PROCESSES_MAX}) 10 | else() 11 | set(FANS_N_MPI_PROCESSES ${FANS_CORES_AVAILABLE}) 12 | endif() 13 | message(STATUS "Will use ${FANS_N_MPI_PROCESSES} processes for MPI test cases.") 14 | 15 | set(FANS_TEST_CASES 16 | J2Plasticity 17 | LinearElastic 18 | LinearThermal 19 | PseudoPlastic 20 | ) 21 | 22 | list(LENGTH FANS_TEST_CASES N_TESTS) 23 | math(EXPR N_TESTS "${N_TESTS} - 1") 24 | 25 | foreach(N RANGE ${N_TESTS}) 26 | list(GET FANS_TEST_CASES ${N} FANS_TEST_CASE) 27 | 28 | add_test( 29 | NAME ${FANS_TEST_CASE} 30 | COMMAND mpiexec -n ${FANS_N_MPI_PROCESSES} ${FANS_EXECUTABLE} input_files/test_${FANS_TEST_CASE}.json ${FANS_TEST_OUTPUT_DIR}/test_${FANS_TEST_CASE}.h5 31 | WORKING_DIRECTORY ${FANS_TEST_INPUT_DIR} 32 | ) 33 | endforeach() 34 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Tests 2 | 3 | This directory contains tests for FANS. Tests serve two purposes: 4 | 5 | 1. Verify the correct functioning of the code. 6 | 2. Provide example problems that demonstrate how FANS can be used. 7 | 8 | ## Test Structure 9 | 10 | The test directory includes: 11 | 12 | - `CMakeLists.txt` - Configures tests for CTest integration 13 | - `input_files/` - Example input JSON files for each test case 14 | - Python based validation tests in `pytest/` directory for validating results 15 | 16 | ## Running Tests 17 | 18 | ### Using CTest 19 | 20 | Tests are configured by CMake when FANS is built. If your CMake build folder is named `build`, run the FANS tests in the following way 21 | 22 | ```bash 23 | cd ../build 24 | ctest fans 25 | ``` 26 | 27 | This will run all test cases and generate result files in `build/test/`. 28 | 29 | ## Result Validation 30 | 31 | After running the tests, the results are verified using pytest. We recommend running pytest via a pre-configured Pixi task, 32 | 33 | ```bash 34 | pixi run test 35 | ``` 36 | 37 | Note: The validation tests expect result files to be in `build/test/` directory, so make sure to run the tests first. 38 | 39 | ## Available Test Cases 40 | 41 | For a 3D microstructure image of resolution `32 x 32 x 32` with a single spherical inclusion, the following test cases are available: 42 | 43 | - Linear thermal homogenization problem with isotropic heat conductivity - `test_LinearThermal.json` 44 | - Small strain mechanical homogenization problem with linear elasticity - `test_LinearElastic.json` 45 | - Small strain mechanical homogenization problem with nonlinear pseudoplasticity - `test_PseudoPlastic.json` 46 | - Small strain mechanical homogenization problem with Von-Mises plasticity - `test_J2Plasticity.json` 47 | - Small strain mechanical homogenization problem with linear pseudoplasticity and mixed stress-strain control boundary conditions - `test_MixedBCs.json` 48 | 49 | Each test case has corresponding input JSON files in the `input_files/` directory. Tests can be run individually as example problems. For instance, 50 | 51 | ```bash 52 | mpiexec -n 2 ./FANS input_files/test_LinearElastic.json test_results.h5 53 | ``` 54 | 55 | To quickly visualize the test results, and accompanying XDMF for the HDF5 output can be generated which can be directly opened in ParaView for 3D visualization and analysis: 56 | 57 | ```bash 58 | pixi run h52xdmf test_results.h5 59 | ``` 60 | -------------------------------------------------------------------------------- /test/input_files/test_LinearElastic.json: -------------------------------------------------------------------------------- 1 | { 2 | "microstructure": { 3 | "filepath": "microstructures/sphere32.h5", 4 | "datasetname": "/sphere/32x32x32/ms", 5 | "L": [1.0, 1.0, 1.0] 6 | }, 7 | 8 | "problem_type": "mechanical", 9 | "matmodel": "LinearElasticIsotropic", 10 | "material_properties":{ 11 | "bulk_modulus": [62.5000, 222.222], 12 | "shear_modulus": [28.8462, 166.6667] 13 | }, 14 | 15 | "method": "cg", 16 | "error_parameters":{ 17 | "measure": "Linfinity", 18 | "type": "absolute", 19 | "tolerance": 1e-10 20 | }, 21 | "n_it": 100, 22 | "macroscale_loading": [ 23 | [[0.001, -0.002, 0.003, 0.0015, -0.0025, 0.001]] 24 | ], 25 | 26 | "results": ["homogenized_tangent", "stress_average", "strain_average", "absolute_error", 27 | "microstructure", "displacement", "displacement_fluctuation", "stress", "strain"] 28 | } 29 | -------------------------------------------------------------------------------- /test/input_files/test_LinearThermal.json: -------------------------------------------------------------------------------- 1 | { 2 | "microstructure": { 3 | "filepath": "microstructures/sphere32.h5", 4 | "datasetname": "/sphere/32x32x32/ms", 5 | "L": [1.0, 1.0, 1.0] 6 | }, 7 | 8 | "problem_type": "thermal", 9 | "matmodel": "LinearThermalIsotropic", 10 | "material_properties":{ 11 | "conductivity": [1, 10] 12 | }, 13 | 14 | "method": "cg", 15 | "error_parameters":{ 16 | "measure": "Linfinity", 17 | "type": "absolute", 18 | "tolerance": 1e-10 19 | }, 20 | "n_it": 100, 21 | "macroscale_loading": [ 22 | [[0.01, 0.02, -0.01]] 23 | ], 24 | 25 | "results": ["homogenized_tangent", "stress_average", "strain_average", "absolute_error", 26 | "microstructure", "displacement", "displacement_fluctuation", "stress", "strain"] 27 | } 28 | -------------------------------------------------------------------------------- /test/input_files/test_MixedBCs.json: -------------------------------------------------------------------------------- 1 | { 2 | "microstructure": { 3 | "filepath": "microstructures/sphere32.h5", 4 | "datasetname": "/sphere/32x32x32/ms", 5 | "L": [1.0, 1.0, 1.0] 6 | }, 7 | 8 | "problem_type": "mechanical", 9 | "matmodel": "PseudoPlasticLinearHardening", 10 | "material_properties":{ 11 | "bulk_modulus": [62.5000, 222.222], 12 | "shear_modulus": [28.8462, 166.6667], 13 | "yield_stress": [0.1, 100000], 14 | "hardening_parameter": [0.0, 0.0] 15 | }, 16 | 17 | "method": "cg", 18 | "error_parameters":{ 19 | "measure": "Linfinity", 20 | "type": "absolute", 21 | "tolerance": 1e-10 22 | }, 23 | "n_it": 1000, 24 | "macroscale_loading": [ { 25 | "strain_indices" : [2,3,4,5], 26 | "stress_indices" : [0,1], 27 | "strain" : [[0.0005, 0.0, 0.0, 0.0], 28 | [0.001 , 0.0, 0.0, 0.0], 29 | [0.0015, 0.0, 0.0, 0.0], 30 | [0.002 , 0.0, 0.0, 0.0], 31 | [0.0025, 0.0, 0.0, 0.0], 32 | [0.003 , 0.0, 0.0, 0.0], 33 | [0.0035, 0.0, 0.0, 0.0], 34 | [0.004 , 0.0, 0.0, 0.0], 35 | [0.0045, 0.0, 0.0, 0.0], 36 | [0.005 , 0.0, 0.0, 0.0], 37 | [0.0055, 0.0, 0.0, 0.0], 38 | [0.006 , 0.0, 0.0, 0.0], 39 | [0.0065, 0.0, 0.0, 0.0], 40 | [0.007 , 0.0, 0.0, 0.0], 41 | [0.0075, 0.0, 0.0, 0.0], 42 | [0.008 , 0.0, 0.0, 0.0], 43 | [0.0085, 0.0, 0.0, 0.0], 44 | [0.009 , 0.0, 0.0, 0.0], 45 | [0.0095, 0.0, 0.0, 0.0], 46 | [0.010 , 0.0, 0.0, 0.0], 47 | [0.0105, 0.0, 0.0, 0.0], 48 | [0.011 , 0.0, 0.0, 0.0], 49 | [0.0115, 0.0, 0.0, 0.0], 50 | [0.012 , 0.0, 0.0, 0.0]], 51 | "stress" : [[0.0, 0.0], 52 | [0.0, 0.0], 53 | [0.0, 0.0], 54 | [0.0, 0.0], 55 | [0.0, 0.0], 56 | [0.0, 0.0], 57 | [0.0, 0.0], 58 | [0.0, 0.0], 59 | [0.0, 0.0], 60 | [0.0, 0.0], 61 | [0.0, 0.0], 62 | [0.0, 0.0], 63 | [0.0, 0.0], 64 | [0.0, 0.0], 65 | [0.0, 0.0], 66 | [0.0, 0.0], 67 | [0.0, 0.0], 68 | [0.0, 0.0], 69 | [0.0, 0.0], 70 | [0.0, 0.0], 71 | [0.0, 0.0], 72 | [0.0, 0.0], 73 | [0.0, 0.0], 74 | [0.0, 0.0]] 75 | }, 76 | { 77 | "strain_indices" : [], 78 | "stress_indices" : [0,1,2,3,4,5], 79 | "strain" : [[],[]], 80 | "stress" : [[-0.05, -0.05, -0.05, 0.0, 0.0, 0.0], 81 | [-0.1 , -0.1 , -0.1 , 0.0, 0.0, 0.0]] 82 | }, 83 | [[-0.000201177817616389, -0.00020117781761638944, -0.0002011778176163894, 4.374921101288884e-22, 1.0822171975093186e-22, 1.424850916865268e-22], 84 | [-0.00040235563523281944, -0.0004023556352328199, -0.0004023556352328192, -2.5780780156705695e-21, 1.5365687671739435e-22, -4.551108956175545e-22]] 85 | ], 86 | 87 | "results": ["stress_average", "strain_average", "absolute_error", "phase_stress_average", "phase_strain_average", 88 | "microstructure", "displacement", "displacement_fluctuation", "stress", "strain"] 89 | } 90 | -------------------------------------------------------------------------------- /test/input_files/test_PseudoPlastic.json: -------------------------------------------------------------------------------- 1 | { 2 | "microstructure": { 3 | "filepath": "microstructures/sphere32.h5", 4 | "datasetname": "/sphere/32x32x32/ms", 5 | "L": [1.0, 1.0, 1.0] 6 | }, 7 | 8 | "problem_type": "mechanical", 9 | "matmodel": "PseudoPlasticNonLinearHardening", 10 | "material_properties":{ 11 | "bulk_modulus": [62.5000, 222.222], 12 | "shear_modulus": [28.8462, 166.6667], 13 | "yield_stress": [0.1, 10000], 14 | "hardening_parameter": [0.0, 0.0], 15 | "hardening_exponent": [0.2, 0.2], 16 | "eps_0": [0.01, 0.01] 17 | }, 18 | 19 | "method": "cg", 20 | "error_parameters":{ 21 | "measure": "Linfinity", 22 | "type": "absolute", 23 | "tolerance": 1e-10 24 | }, 25 | "n_it": 100, 26 | "macroscale_loading": [ 27 | [ 28 | [0.0000, -0.0000, -0.0000, 0, 0, 0], 29 | [0.0001, -5e-05, -5e-05, 0, 0, 0], 30 | [0.0002, -0.0001, -0.0001, 0, 0, 0], 31 | [0.0003, -0.00015, -0.00015, 0, 0, 0], 32 | [0.0004, -0.0002, -0.0002, 0, 0, 0], 33 | [0.0005, -0.00025, -0.00025, 0, 0, 0], 34 | [0.0006, -0.0003, -0.0003, 0, 0, 0], 35 | [0.0007, -0.00035, -0.00035, 0, 0, 0], 36 | [0.0008, -0.0004, -0.0004, 0, 0, 0], 37 | [0.0009, -0.00045, -0.00045, 0, 0, 0], 38 | [0.001, -0.0005, -0.0005, 0, 0, 0], 39 | [0.0011, -0.00055, -0.00055, 0, 0, 0], 40 | [0.0012, -0.0006, -0.0006, 0, 0, 0], 41 | [0.0013, -0.00065, -0.00065, 0, 0, 0], 42 | [0.0014, -0.0007, -0.0007, 0, 0, 0], 43 | [0.0015, -0.00075, -0.00075, 0, 0, 0], 44 | [0.0016, -0.0008, -0.0008, 0, 0, 0], 45 | [0.0017, -0.00085, -0.00085, 0, 0, 0], 46 | [0.0018, -0.0009, -0.0009, 0, 0, 0], 47 | [0.0019, -0.00095, -0.00095, 0, 0, 0], 48 | [0.002, -0.001, -0.001, 0, 0, 0], 49 | [0.0021, -0.00105, -0.00105, 0, 0, 0], 50 | [0.0022, -0.0011, -0.0011, 0, 0, 0], 51 | [0.0023, -0.00115, -0.00115, 0, 0, 0], 52 | [0.0024, -0.0012, -0.0012, 0, 0, 0], 53 | [0.0025, -0.00125, -0.00125, 0, 0, 0], 54 | [0.0026, -0.0013, -0.0013, 0, 0, 0], 55 | [0.0027, -0.00135, -0.00135, 0, 0, 0], 56 | [0.0028, -0.0014, -0.0014, 0, 0, 0], 57 | [0.0029, -0.00145, -0.00145, 0, 0, 0], 58 | [0.003, -0.0015, -0.0015, 0, 0, 0], 59 | [0.0031, -0.00155, -0.00155, 0, 0, 0], 60 | [0.0032, -0.0016, -0.0016, 0, 0, 0], 61 | [0.0033, -0.00165, -0.00165, 0, 0, 0], 62 | [0.0034, -0.0017, -0.0017, 0, 0, 0], 63 | [0.0035, -0.00175, -0.00175, 0, 0, 0], 64 | [0.0036, -0.0018, -0.0018, 0, 0, 0], 65 | [0.0037, -0.00185, -0.00185, 0, 0, 0], 66 | [0.0038, -0.0019, -0.0019, 0, 0, 0], 67 | [0.0039, -0.00195, -0.00195, 0, 0, 0], 68 | [0.004, -0.002, -0.002, 0, 0, 0], 69 | [0.0041, -0.00205, -0.00205, 0, 0, 0], 70 | [0.0042, -0.0021, -0.0021, 0, 0, 0], 71 | [0.0043, -0.00215, -0.00215, 0, 0, 0], 72 | [0.0044, -0.0022, -0.0022, 0, 0, 0], 73 | [0.0045, -0.00225, -0.00225, 0, 0, 0], 74 | [0.0046, -0.0023, -0.0023, 0, 0, 0], 75 | [0.0047, -0.00235, -0.00235, 0, 0, 0], 76 | [0.0048, -0.0024, -0.0024, 0, 0, 0], 77 | [0.0049, -0.00245, -0.00245, 0, 0, 0], 78 | [0.005, -0.0025, -0.0025, 0, 0, 0] 79 | ] 80 | ], 81 | 82 | "results": ["stress_average", "strain_average", "absolute_error", 83 | "microstructure", "displacement", "displacement_fluctuation", "stress", "strain", 84 | "plastic_flag"] 85 | } 86 | -------------------------------------------------------------------------------- /test/microstructures/sphere32.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataAnalyticsEngineering/FANS/3c681507289fe30d459fe6b378b2ccb6687414ab/test/microstructures/sphere32.h5 -------------------------------------------------------------------------------- /test/pytest/test_displacement_averaging.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import pytest 5 | from fans_dashboard.core.utils import identify_hierarchy, extract_and_organize_data 6 | 7 | 8 | @pytest.fixture( 9 | params=[ 10 | "test_J2Plasticity", 11 | "test_LinearElastic", 12 | "test_LinearThermal", 13 | "test_PseudoPlastic", 14 | ] 15 | ) 16 | def test_files(request): 17 | json_base_dir = os.path.join( 18 | os.path.dirname(os.path.abspath(__file__)), "../input_files/" 19 | ) 20 | h5_base_dir = os.path.join( 21 | os.path.dirname(os.path.abspath(__file__)), "../../build/test/" 22 | ) 23 | 24 | json_path = os.path.join(json_base_dir, f"{request.param}.json") 25 | h5_path = os.path.join(h5_base_dir, f"{request.param}.h5") 26 | 27 | if os.path.exists(json_path) and os.path.exists(h5_path): 28 | return json_path, h5_path 29 | pytest.skip(f"Required test files not found: {json_path} or {h5_path}") 30 | 31 | 32 | def test_displacement_averaging(test_files): 33 | """ 34 | This test verifies that the average of displacement fluctuations is zero for all 35 | microstructures and load cases. 36 | 37 | Parameters 38 | ---------- 39 | test_files : tuple 40 | A tuple containing (input_json_file, results_h5_file) paths. 41 | - input_json_file: Path to the JSON file containing configuration data 42 | - results_h5_file: Path to the HDF5 file containing simulation results 43 | """ 44 | input_json_file, results_h5_file = test_files 45 | 46 | # Load the input json file to check which fields are requested 47 | with open(input_json_file, "r") as f: 48 | input_data = json.load(f) 49 | 50 | # Check which fields are available in the results 51 | results = input_data.get("results", []) 52 | 53 | # Check if displacement_fluctuation field is available 54 | if "displacement_fluctuation" not in results: 55 | pytest.skip( 56 | f"Skipping test: No displacement_fluctuation field found in {input_json_file}" 57 | ) 58 | return 59 | 60 | # Extract hierarchy information from the h5 file 61 | hierarchy = identify_hierarchy(results_h5_file) 62 | 63 | # Load the data from the HDF5 file 64 | microstructures_to_load = list(hierarchy.keys()) 65 | 66 | quantities_to_load = ["displacement_fluctuation"] 67 | 68 | time_steps_to_load = [] 69 | load_cases_to_load = [] 70 | 71 | # Get all unique load cases across all microstructures 72 | for microstructure in microstructures_to_load: 73 | for load_case in hierarchy[microstructure].keys(): 74 | if load_case not in load_cases_to_load: 75 | load_cases_to_load.append(load_case) 76 | 77 | # Extract the specified data, organized and sorted by time steps 78 | data = extract_and_organize_data( 79 | results_h5_file, 80 | hierarchy, 81 | quantities_to_load, 82 | microstructures_to_load, 83 | load_cases_to_load, 84 | time_steps_to_load, 85 | ) 86 | 87 | print(f"\nVerifying displacement fluctuation averages are zero...") 88 | 89 | for microstructure in microstructures_to_load: 90 | for load_case in load_cases_to_load: 91 | if load_case in hierarchy[microstructure]: 92 | if "displacement_fluctuation" not in data[microstructure][load_case]: 93 | print( 94 | f"Skipping {microstructure}/{load_case}: Missing displacement_fluctuation field" 95 | ) 96 | continue 97 | 98 | displacement_data = data[microstructure][load_case][ 99 | "displacement_fluctuation" 100 | ] 101 | 102 | # Compute average manually by averaging over spatial dimensions (1, 2, 3) 103 | # displacement_data shape: time_steps x Nx x Ny x Nz x components 104 | computed_average = np.mean(displacement_data, axis=(1, 2, 3)) 105 | 106 | # Check if the computed average is approximately zero 107 | zero_array = np.zeros_like(computed_average) 108 | 109 | assert np.allclose( 110 | computed_average, zero_array, rtol=1e-5, atol=1e-8 111 | ), ( 112 | f"For microstructure {microstructure}, load case {load_case}: " 113 | f"Average of displacement fluctuations is not zero." 114 | f"\nComputed average shape: {computed_average.shape}, Value: {computed_average}" 115 | ) 116 | 117 | print( 118 | f"Verified: {microstructure}, load case {load_case} - displacement fluctuation average is zero" 119 | ) 120 | 121 | 122 | if __name__ == "__main__": 123 | pytest.main(["-v", "-s", __file__]) 124 | -------------------------------------------------------------------------------- /test/pytest/test_homogenization_consistency.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import pytest 5 | from fans_dashboard.core.utils import identify_hierarchy, extract_and_organize_data 6 | 7 | 8 | @pytest.fixture( 9 | params=[ 10 | "test_J2Plasticity", 11 | "test_LinearElastic", 12 | "test_LinearThermal", 13 | "test_PseudoPlastic", 14 | ] 15 | ) 16 | def test_files(request): 17 | json_base_dir = os.path.join( 18 | os.path.dirname(os.path.abspath(__file__)), "../input_files/" 19 | ) 20 | h5_base_dir = os.path.join( 21 | os.path.dirname(os.path.abspath(__file__)), "../../build/test/" 22 | ) 23 | 24 | json_path = os.path.join(json_base_dir, f"{request.param}.json") 25 | h5_path = os.path.join(h5_base_dir, f"{request.param}.h5") 26 | 27 | if os.path.exists(json_path) and os.path.exists(h5_path): 28 | return json_path, h5_path 29 | pytest.skip(f"Required test files not found: {json_path} or {h5_path}") 30 | 31 | 32 | def test_homogenization_consistency(test_files): 33 | """ 34 | This test verifies that the relationship stress_average = homogenized_tangent * strain_average 35 | holds for all microstructures and load cases. 36 | 37 | Parameters 38 | ---------- 39 | test_files : tuple 40 | A tuple containing (input_json_file, results_h5_file) paths. 41 | - input_json_file: Path to the JSON file containing configuration data 42 | - results_h5_file: Path to the HDF5 file containing simulation results 43 | """ 44 | input_json_file, results_h5_file = test_files 45 | 46 | # Load the input json file to check which fields are requested 47 | with open(input_json_file, "r") as f: 48 | input_data = json.load(f) 49 | 50 | # Check which fields are available in the results 51 | results = input_data.get("results", []) 52 | 53 | # Check if required fields are available 54 | required_fields = ["strain_average", "stress_average", "homogenized_tangent"] 55 | missing_fields = [field for field in required_fields if field not in results] 56 | 57 | if missing_fields: 58 | pytest.skip( 59 | f"Skipping test: Missing required fields {', '.join(missing_fields)} in {input_json_file}" 60 | ) 61 | 62 | # Extract hierarchy information from the h5 file 63 | hierarchy = identify_hierarchy(results_h5_file) 64 | 65 | # Load the data from the HDF5 file 66 | microstructures_to_load = list(hierarchy.keys()) 67 | 68 | quantities_to_load = ["strain_average", "stress_average", "homogenized_tangent"] 69 | 70 | time_steps_to_load = [] 71 | load_cases_to_load = [] 72 | 73 | # Get all unique load cases across all microstructures 74 | for microstructure in microstructures_to_load: 75 | for load_case in hierarchy[microstructure].keys(): 76 | if load_case not in load_cases_to_load: 77 | load_cases_to_load.append(load_case) 78 | 79 | # Extract the specified data, organized and sorted by time steps 80 | data = extract_and_organize_data( 81 | results_h5_file, 82 | hierarchy, 83 | quantities_to_load, 84 | microstructures_to_load, 85 | load_cases_to_load, 86 | time_steps_to_load, 87 | ) 88 | 89 | print(f"\nVerifying stress_average = homogenized_tangent * strain_average...") 90 | 91 | for microstructure in microstructures_to_load: 92 | for load_case in load_cases_to_load: 93 | if load_case in hierarchy[microstructure]: 94 | # Check if all required fields exist for this microstructure and load case 95 | required_data = { 96 | field: field in data[microstructure][load_case] 97 | for field in quantities_to_load 98 | } 99 | 100 | if not all(required_data.values()): 101 | missing = [ 102 | field for field, exists in required_data.items() if not exists 103 | ] 104 | print( 105 | f"Skipping {microstructure}/{load_case}: Missing fields: {', '.join(missing)}" 106 | ) 107 | continue 108 | 109 | strain_avg = data[microstructure][load_case]["strain_average"] 110 | stress_avg = data[microstructure][load_case]["stress_average"] 111 | h_tangent = data[microstructure][load_case]["homogenized_tangent"] 112 | 113 | # Loop through time steps 114 | for t in range(len(strain_avg)): 115 | # Compute stress using homogenized tangent and strain 116 | computed_stress = np.matmul(h_tangent[t], strain_avg[t]) 117 | 118 | # Check if computed stress matches the reported average stress 119 | assert np.allclose( 120 | computed_stress, stress_avg[t], rtol=1e-4, atol=1e-1 121 | ), ( 122 | f"For microstructure {microstructure}, load case {load_case}, time step {t}: " 123 | f"stress_average != homogenized_tangent * strain_average" 124 | f"\nComputed stress: {computed_stress}" 125 | f"\nReported stress: {stress_avg[t]}" 126 | ) 127 | 128 | print( 129 | f"Verified: {microstructure}, load case {load_case} - " 130 | f"stress_average = homogenized_tangent * strain_average for all time steps" 131 | ) 132 | 133 | 134 | if __name__ == "__main__": 135 | pytest.main(["-v", "-s", __file__]) 136 | -------------------------------------------------------------------------------- /test/pytest/test_homogenized_tangent_spd.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import pytest 5 | from fans_dashboard.core.utils import identify_hierarchy, extract_and_organize_data 6 | from scipy.linalg import eigvalsh 7 | 8 | 9 | @pytest.fixture( 10 | params=[ 11 | "test_J2Plasticity", 12 | "test_LinearElastic", 13 | "test_LinearThermal", 14 | "test_PseudoPlastic", 15 | ] 16 | ) 17 | def test_files(request): 18 | json_base_dir = os.path.join( 19 | os.path.dirname(os.path.abspath(__file__)), "../input_files/" 20 | ) 21 | h5_base_dir = os.path.join( 22 | os.path.dirname(os.path.abspath(__file__)), "../../build/test/" 23 | ) 24 | 25 | json_path = os.path.join(json_base_dir, f"{request.param}.json") 26 | h5_path = os.path.join(h5_base_dir, f"{request.param}.h5") 27 | 28 | if os.path.exists(json_path) and os.path.exists(h5_path): 29 | return json_path, h5_path 30 | pytest.skip(f"Required test files not found: {json_path} or {h5_path}") 31 | 32 | 33 | def test_homogenized_tangent_spd(test_files): 34 | """ 35 | This test verifies that the homogenized tangent is strictly Symmetric Positive Definite (SPD) 36 | for all microstructures and load cases. 37 | 38 | Parameters 39 | ---------- 40 | test_files : tuple 41 | A tuple containing (input_json_file, results_h5_file) paths. 42 | - input_json_file: Path to the JSON file containing configuration data 43 | - results_h5_file: Path to the HDF5 file containing simulation results 44 | """ 45 | input_json_file, results_h5_file = test_files 46 | 47 | # Load the input json file to check which fields are requested 48 | with open(input_json_file, "r") as f: 49 | input_data = json.load(f) 50 | 51 | # Check which fields are available in the results 52 | results = input_data.get("results", []) 53 | 54 | # Check if homogenized_tangent field is available 55 | if "homogenized_tangent" not in results: 56 | pytest.skip( 57 | f"Skipping test: No homogenized_tangent field found in {input_json_file}" 58 | ) 59 | return 60 | 61 | # Extract hierarchy information from the h5 file 62 | hierarchy = identify_hierarchy(results_h5_file) 63 | 64 | # Load the data from the HDF5 file 65 | microstructures_to_load = list(hierarchy.keys()) 66 | 67 | quantities_to_load = ["homogenized_tangent"] 68 | 69 | time_steps_to_load = [] 70 | load_cases_to_load = [] 71 | 72 | # Get all unique load cases across all microstructures 73 | for microstructure in microstructures_to_load: 74 | for load_case in hierarchy[microstructure].keys(): 75 | if load_case not in load_cases_to_load: 76 | load_cases_to_load.append(load_case) 77 | 78 | # Extract the specified data, organized and sorted by time steps 79 | data = extract_and_organize_data( 80 | results_h5_file, 81 | hierarchy, 82 | quantities_to_load, 83 | microstructures_to_load, 84 | load_cases_to_load, 85 | time_steps_to_load, 86 | ) 87 | 88 | print(f"\nVerifying homogenized tangent is strictly SPD...") 89 | 90 | for microstructure in microstructures_to_load: 91 | for load_case in load_cases_to_load: 92 | if load_case in hierarchy[microstructure]: 93 | if "homogenized_tangent" not in data[microstructure][load_case]: 94 | print( 95 | f"Skipping {microstructure}/{load_case}: Missing homogenized_tangent field" 96 | ) 97 | continue 98 | 99 | tangent_data = data[microstructure][load_case]["homogenized_tangent"] 100 | 101 | # Check each time step 102 | for time_idx, tangent in enumerate(tangent_data): 103 | # Check symmetry 104 | is_symmetric = np.allclose(tangent, tangent.T, rtol=1e-5, atol=1e-8) 105 | 106 | # Check positive definiteness by computing eigenvalues 107 | eigenvalues = eigvalsh(tangent) 108 | is_positive_definite = np.all(eigenvalues > 0) 109 | 110 | assert is_symmetric, ( 111 | f"For microstructure {microstructure}, load case {load_case}, time step {time_idx}: " 112 | f"Homogenized tangent is not symmetric." 113 | f"\nTangent shape: {tangent.shape}, Max asymmetry: {np.max(np.abs(tangent - tangent.T))}" 114 | ) 115 | 116 | assert is_positive_definite, ( 117 | f"For microstructure {microstructure}, load case {load_case}, time step {time_idx}: " 118 | f"Homogenized tangent is not positive definite." 119 | f"\nEigenvalues: {eigenvalues}, Min eigenvalue: {np.min(eigenvalues)}" 120 | ) 121 | 122 | print( 123 | f"Verified: {microstructure}, load case {load_case} - homogenized tangent is strictly SPD " 124 | f"({tangent_data.shape[0]} time steps)" 125 | ) 126 | 127 | 128 | if __name__ == "__main__": 129 | pytest.main(["-v", "-s", __file__]) 130 | -------------------------------------------------------------------------------- /test/pytest/test_homogenized_tangent_within_VRbounds.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import pytest 5 | from fans_dashboard.core.utils import identify_hierarchy, extract_and_organize_data 6 | from fans_dashboard.core.tensortools import ( 7 | Ciso, 8 | compute_VoigtReuss_bounds, 9 | is_spd, 10 | compute_volume_fractions, 11 | ) 12 | 13 | 14 | @pytest.fixture( 15 | params=[ 16 | "test_J2Plasticity", 17 | "test_LinearElastic", 18 | "test_LinearThermal", 19 | "test_PseudoPlastic", 20 | ] 21 | ) 22 | def test_files(request): 23 | json_base_dir = os.path.join( 24 | os.path.dirname(os.path.abspath(__file__)), "../input_files/" 25 | ) 26 | h5_base_dir = os.path.join( 27 | os.path.dirname(os.path.abspath(__file__)), "../../build/test/" 28 | ) 29 | 30 | json_path = os.path.join(json_base_dir, f"{request.param}.json") 31 | h5_path = os.path.join(h5_base_dir, f"{request.param}.h5") 32 | 33 | if os.path.exists(json_path) and os.path.exists(h5_path): 34 | return json_path, h5_path 35 | pytest.skip(f"Required test files not found: {json_path} or {h5_path}") 36 | 37 | 38 | def test_homogenized_tangent_within_VRbounds(test_files): 39 | """ 40 | This test verifies that the homogenized tangent is within Voigt and Reuss bounds 41 | for all microstructures and load cases for linear elastic and thermal problems. 42 | 43 | Parameters 44 | ---------- 45 | test_files : tuple 46 | A tuple containing (input_json_file, results_h5_file) paths. 47 | - input_json_file: Path to the JSON file containing configuration data 48 | - results_h5_file: Path to the HDF5 file containing simulation results 49 | """ 50 | input_json_file, results_h5_file = test_files 51 | print(f"Running test on: {input_json_file} and {results_h5_file}") 52 | 53 | # Load the input json file to check material model and properties 54 | with open(input_json_file, "r") as f: 55 | input_data = json.load(f) 56 | 57 | # Try to access results or top-level fields 58 | if "results" not in input_data: 59 | print(f"ERROR: 'results' field not found in input_data") 60 | for key in input_data: 61 | print(f"- {key}: {type(input_data[key])}") 62 | results = input_data.get("results", []) 63 | 64 | # Get material model 65 | mat_model = input_data.get("matmodel") 66 | 67 | # Skip if not a linear model we're interested in 68 | if mat_model not in ["LinearElasticIsotropic", "LinearThermalIsotropic"]: 69 | pytest.skip( 70 | f"Skipping test: Material model {mat_model} is not supported for bounds check" 71 | ) 72 | return 73 | 74 | # Check if homogenized_tangent field is available 75 | if "homogenized_tangent" not in results: 76 | pytest.skip( 77 | f"Skipping test: No homogenized_tangent field found in {input_json_file}" 78 | ) 79 | return 80 | 81 | # Check if microstructure field is available 82 | if "microstructure" not in results: 83 | pytest.skip( 84 | f"Skipping test: No microstructure field found in {input_json_file}" 85 | ) 86 | return 87 | 88 | # Extract hierarchy information from the h5 file 89 | hierarchy = identify_hierarchy(results_h5_file) 90 | 91 | # Load the data from the HDF5 file 92 | microstructures_to_load = list(hierarchy.keys()) 93 | 94 | quantities_to_load = ["homogenized_tangent", "microstructure"] 95 | 96 | time_steps_to_load = [] 97 | load_cases_to_load = [] 98 | 99 | # Get all unique load cases across all microstructures 100 | for microstructure in microstructures_to_load: 101 | for load_case in hierarchy[microstructure].keys(): 102 | if load_case not in load_cases_to_load: 103 | load_cases_to_load.append(load_case) 104 | 105 | # Extract the specified data, organized and sorted by time steps 106 | data = extract_and_organize_data( 107 | results_h5_file, 108 | hierarchy, 109 | quantities_to_load, 110 | microstructures_to_load, 111 | load_cases_to_load, 112 | time_steps_to_load, 113 | ) 114 | 115 | print(f"\nVerifying homogenized tangent is within Voigt and Reuss bounds...") 116 | 117 | # Get material properties based on material model 118 | material_properties = input_data.get("material_properties", {}) 119 | 120 | for microstructure_name in microstructures_to_load: 121 | # Get the microstructure data and compute volume fractions 122 | if ( 123 | "microstructure" 124 | not in data[microstructure_name][list(data[microstructure_name].keys())[0]] 125 | ): 126 | print(f"Skipping {microstructure_name}: Missing microstructure data") 127 | continue 128 | 129 | microstructure_data = data[microstructure_name][ 130 | list(data[microstructure_name].keys())[0] 131 | ]["microstructure"] 132 | volume_fractions = compute_volume_fractions(microstructure_data) 133 | 134 | # Create phase tensors based on material model 135 | phase_tensors = [] 136 | if mat_model == "LinearThermalIsotropic": 137 | conductivities = material_properties.get("conductivity") 138 | for k in conductivities: 139 | # Create conductivity tensor (diagonal matrix with conductivity values) 140 | phase_tensors.append(k * np.eye(3)) 141 | elif mat_model == "LinearElasticIsotropic": 142 | bulk_moduli = material_properties.get("bulk_modulus") 143 | shear_moduli = material_properties.get("shear_modulus") 144 | for k, g in zip(bulk_moduli, shear_moduli): 145 | phase_tensors.append(Ciso(k, g)) 146 | 147 | # Compute Voigt and Reuss bounds 148 | voigt, reuss = compute_VoigtReuss_bounds(phase_tensors, volume_fractions) 149 | 150 | for load_case in load_cases_to_load: 151 | if load_case in hierarchy[microstructure_name]: 152 | if "homogenized_tangent" not in data[microstructure_name][load_case]: 153 | print( 154 | f"Skipping {microstructure_name}/{load_case}: Missing homogenized_tangent field" 155 | ) 156 | continue 157 | 158 | tangent_data = data[microstructure_name][load_case][ 159 | "homogenized_tangent" 160 | ] 161 | 162 | # Check each time step 163 | for time_idx, homogenized_tangent in enumerate(tangent_data): 164 | # Check if homogenized_tangent is within bounds 165 | # Voigt - homogenized_tangent should be SPD (positive eigenvalues) 166 | voigt_minus_hom, voigt_eigs = is_spd(voigt - homogenized_tangent) 167 | 168 | # homogenized_tangent - Reuss should be SPD (positive eigenvalues) 169 | hom_minus_reuss, reuss_eigs = is_spd(homogenized_tangent - reuss) 170 | 171 | assert voigt_minus_hom, ( 172 | f"For microstructure {microstructure_name}, load case {load_case}, time step {time_idx}: " 173 | f"Homogenized tangent exceeds Voigt bound." 174 | f"\nMin eigenvalue of (Voigt - Homogenized): {min(voigt_eigs)}" 175 | ) 176 | 177 | assert hom_minus_reuss, ( 178 | f"For microstructure {microstructure_name}, load case {load_case}, time step {time_idx}: " 179 | f"Homogenized tangent is below Reuss bound." 180 | f"\nMin eigenvalue of (Homogenized - Reuss): {min(reuss_eigs)}" 181 | ) 182 | 183 | print( 184 | f"Verified: {microstructure_name}, load case {load_case} - homogenized tangent is within bounds " 185 | f"({tangent_data.shape[0]} time steps)" 186 | ) 187 | 188 | 189 | if __name__ == "__main__": 190 | pytest.main(["-v", "-s", __file__]) 191 | -------------------------------------------------------------------------------- /test/pytest/test_loading_to_strain_average.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import pytest 5 | from fans_dashboard.core.utils import identify_hierarchy, extract_and_organize_data 6 | 7 | 8 | @pytest.fixture( 9 | params=[ 10 | "test_J2Plasticity", 11 | "test_LinearElastic", 12 | "test_LinearThermal", 13 | "test_PseudoPlastic", 14 | ] 15 | ) 16 | def test_files(request): 17 | json_base_dir = os.path.join( 18 | os.path.dirname(os.path.abspath(__file__)), "../input_files/" 19 | ) 20 | h5_base_dir = os.path.join( 21 | os.path.dirname(os.path.abspath(__file__)), "../../build/test/" 22 | ) 23 | 24 | json_path = os.path.join(json_base_dir, f"{request.param}.json") 25 | h5_path = os.path.join(h5_base_dir, f"{request.param}.h5") 26 | 27 | if os.path.exists(json_path) and os.path.exists(h5_path): 28 | return json_path, h5_path 29 | pytest.skip(f"Required test files not found: {json_path} or {h5_path}") 30 | 31 | 32 | def test_loading_to_strain_average(test_files): 33 | """ 34 | This test verifies that the strain_average field in the results matches the macroscale_loading 35 | specified in the input JSON file for all microstructures and load cases. 36 | 37 | Parameters 38 | ---------- 39 | test_files : tuple 40 | A tuple containing (input_json_file, results_h5_file) paths. 41 | - input_json_file: Path to the JSON file containing macroscale_loading data 42 | - results_h5_file: Path to the HDF5 file containing simulation results 43 | """ 44 | input_json_file, results_h5_file = test_files 45 | 46 | # Load the macroscale_loading field from the input json file 47 | with open(input_json_file, "r") as f: 48 | input_data = json.load(f) 49 | 50 | # Check if 'strain_average' exists in the results field 51 | results = input_data.get("results", []) 52 | if "strain_average" not in results: 53 | pytest.skip( 54 | f"Skipping test: 'strain_average' not requested in {input_json_file}" 55 | ) 56 | return 57 | 58 | macroscale_loading = np.array(input_data.get("macroscale_loading", {})) 59 | 60 | # Extract hierarchy information from the h5 file 61 | hierarchy = identify_hierarchy(results_h5_file) 62 | 63 | # Load the data from the HDF5 file 64 | microstructures_to_load = list(hierarchy.keys()) 65 | quantities_to_load = ["strain_average"] 66 | time_steps_to_load = [] 67 | 68 | load_cases_to_load = [] 69 | # Get all unique load cases across all microstructures 70 | for microstructure in microstructures_to_load: 71 | for load_case in hierarchy[microstructure].keys(): 72 | if load_case not in load_cases_to_load: 73 | load_cases_to_load.append(load_case) 74 | 75 | # Extract the specified data, organized and sorted by time steps 76 | data = extract_and_organize_data( 77 | results_h5_file, 78 | hierarchy, 79 | quantities_to_load, 80 | microstructures_to_load, 81 | load_cases_to_load, 82 | time_steps_to_load, 83 | ) 84 | 85 | # Comprehensive check for all microstructures and load cases 86 | for i, microstructure in enumerate(microstructures_to_load): 87 | for j, load_case in enumerate(load_cases_to_load): 88 | if load_case in hierarchy[microstructure]: 89 | strain_average = data[microstructure][load_case]["strain_average"] 90 | 91 | # Get corresponding macroscale loading 92 | # Assuming macroscale_loading is organized to match load cases 93 | current_loading = ( 94 | np.array(macroscale_loading)[j] 95 | if j < len(macroscale_loading) 96 | else None 97 | ) 98 | 99 | if current_loading is not None: 100 | assert np.allclose(strain_average, current_loading), ( 101 | f"For microstructure {microstructure}, load case {load_case}: " 102 | f"strain_average and macroscale_loading are not close." 103 | ) 104 | print( 105 | f"Verified: microstructure {microstructure}, load case {load_case}" 106 | ) 107 | 108 | 109 | if __name__ == "__main__": 110 | 111 | pytest.main(["-v", "-s", __file__]) 112 | -------------------------------------------------------------------------------- /test/pytest/test_strain_stress_averaging.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import json 4 | import pytest 5 | from fans_dashboard.core.utils import identify_hierarchy, extract_and_organize_data 6 | 7 | 8 | @pytest.fixture( 9 | params=[ 10 | "test_J2Plasticity", 11 | "test_LinearElastic", 12 | "test_LinearThermal", 13 | "test_PseudoPlastic", 14 | ] 15 | ) 16 | def test_files(request): 17 | json_base_dir = os.path.join( 18 | os.path.dirname(os.path.abspath(__file__)), "../input_files/" 19 | ) 20 | h5_base_dir = os.path.join( 21 | os.path.dirname(os.path.abspath(__file__)), "../../build/test/" 22 | ) 23 | 24 | json_path = os.path.join(json_base_dir, f"{request.param}.json") 25 | h5_path = os.path.join(h5_base_dir, f"{request.param}.h5") 26 | 27 | if os.path.exists(json_path) and os.path.exists(h5_path): 28 | return json_path, h5_path 29 | pytest.skip(f"Required test files not found: {json_path} or {h5_path}") 30 | 31 | 32 | def test_strain_stress_averaging(test_files): 33 | """ 34 | This test verifies that the average of strain/stress fields matches the strain_average/stress_average 35 | fields in the results for all microstructures and load cases. 36 | 37 | Parameters 38 | ---------- 39 | test_files : tuple 40 | A tuple containing (input_json_file, results_h5_file) paths. 41 | - input_json_file: Path to the JSON file containing configuration data 42 | - results_h5_file: Path to the HDF5 file containing simulation results 43 | """ 44 | input_json_file, results_h5_file = test_files 45 | 46 | # Load the input json file to check which fields are requested 47 | with open(input_json_file, "r") as f: 48 | input_data = json.load(f) 49 | 50 | # Check which fields are available in the results 51 | results = input_data.get("results", []) 52 | fields_to_check = [] 53 | 54 | # Check pairs of fields to compare (field and its average) 55 | if "strain" in results and "strain_average" in results: 56 | fields_to_check.append(("strain", "strain_average")) 57 | if "stress" in results and "stress_average" in results: 58 | fields_to_check.append(("stress", "stress_average")) 59 | 60 | if not fields_to_check: 61 | pytest.skip( 62 | f"Skipping test: No compatible strain/stress and average pairs found in {input_json_file}" 63 | ) 64 | return 65 | 66 | # Extract hierarchy information from the h5 file 67 | hierarchy = identify_hierarchy(results_h5_file) 68 | 69 | # Load the data from the HDF5 file 70 | microstructures_to_load = list(hierarchy.keys()) 71 | 72 | quantities_to_load = [] 73 | for field, avg_field in fields_to_check: 74 | quantities_to_load.extend([field, avg_field]) 75 | 76 | time_steps_to_load = [] 77 | load_cases_to_load = [] 78 | 79 | # Get all unique load cases across all microstructures 80 | for microstructure in microstructures_to_load: 81 | for load_case in hierarchy[microstructure].keys(): 82 | if load_case not in load_cases_to_load: 83 | load_cases_to_load.append(load_case) 84 | 85 | # Extract the specified data, organized and sorted by time steps 86 | data = extract_and_organize_data( 87 | results_h5_file, 88 | hierarchy, 89 | quantities_to_load, 90 | microstructures_to_load, 91 | load_cases_to_load, 92 | time_steps_to_load, 93 | ) 94 | 95 | # Check each field pair (field and its average) 96 | for field, avg_field in fields_to_check: 97 | print(f"\nVerifying {field} averages match {avg_field}...") 98 | 99 | for microstructure in microstructures_to_load: 100 | for load_case in load_cases_to_load: 101 | if load_case in hierarchy[microstructure]: 102 | if ( 103 | field not in data[microstructure][load_case] 104 | or avg_field not in data[microstructure][load_case] 105 | ): 106 | print( 107 | f"Skipping {microstructure}/{load_case}: Missing {field} or {avg_field}" 108 | ) 109 | continue 110 | 111 | field_data = data[microstructure][load_case][field] 112 | avg_field_data = data[microstructure][load_case][avg_field] 113 | 114 | # Compute average manually by averaging over spatial dimensions (1, 2, 3) 115 | # field_data shape: time_steps x Nx x Ny x Nz x components 116 | computed_average = np.mean(field_data, axis=(1, 2, 3)) 117 | 118 | # Check if the computed average matches the stored average 119 | assert np.allclose( 120 | computed_average, avg_field_data, rtol=1e-5, atol=1e-8 121 | ), ( 122 | f"For microstructure {microstructure}, load case {load_case}: " 123 | f"Computed {field} average and stored {avg_field} do not match." 124 | f"\nComputed shape: {computed_average.shape}, Stored shape: {avg_field_data.shape}" 125 | ) 126 | 127 | print( 128 | f"Verified: {microstructure}, load case {load_case} - {field} average matches {avg_field}" 129 | ) 130 | 131 | 132 | if __name__ == "__main__": 133 | 134 | pytest.main(["-v", "-s", __file__]) 135 | -------------------------------------------------------------------------------- /test/pytest/test_tensortools.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | from fans_dashboard.core.tensortools import Full2Mandel, Mandel2Full 4 | from fans_dashboard.core.tensortools import VoigtStrain2Mandel, VoigtStress2Mandel 5 | from fans_dashboard.core.tensortools import Mandel2VoigtStrain, Mandel2VoigtStress 6 | from fans_dashboard.core.tensortools import Ciso, IsoProjectionC, IsoProjectionKappa 7 | 8 | 9 | def test_Conversion(): 10 | """Test various conversions, e.g., Full tensor <-> Mandel, Mandel <-> Voigt (both orderings)""" 11 | n_test = 10 12 | for i in range(n_test): 13 | # generate random tensor 14 | A = np.random.uniform(-1, 1, size=(3, 3)) 15 | A = A + A.T 16 | A_m = Full2Mandel(A) 17 | assert ( 18 | np.linalg.norm(A - Mandel2Full(A_m)) < 1.0e-10 19 | ), "Full->Mandel->Full failed for " + str(A) 20 | A_m = np.random.uniform(-1, 1, size=(6,)) 21 | assert ( 22 | np.linalg.norm(A_m - Full2Mandel(Mandel2Full(A_m))) < 1.0e-10 23 | ), "Mandel->Full->Mandel failed for A_m=" + str(A_m) 24 | 25 | assert ( 26 | np.linalg.norm(A_m - VoigtStrain2Mandel(Mandel2VoigtStrain(A_m))) < 1.0e-10 27 | ), "Mandel->Voigt_eps->Mandel failed for A_m=" + str(A_m) 28 | assert ( 29 | np.linalg.norm(A_m - VoigtStress2Mandel(Mandel2VoigtStress(A_m))) < 1.0e-10 30 | ), "Mandel->Voigt_sig->Mandel failed for A_m=" + str(A_m) 31 | 32 | assert ( 33 | np.linalg.norm( 34 | A_m 35 | - VoigtStrain2Mandel(Mandel2VoigtStrain(A_m, order="abq"), order="abq") 36 | ) 37 | < 1.0e-10 38 | ), "Mandel->Voigt_eps,ABQ->Mandel failed for A_m=" + str(A_m) 39 | assert ( 40 | np.linalg.norm( 41 | A_m 42 | - VoigtStress2Mandel(Mandel2VoigtStress(A_m, order="abq"), order="abq") 43 | ) 44 | < 1.0e-10 45 | ), "Mandel->Voigt_sig,ABQ->Mandel failed for A_m=" + str(A_m) 46 | 47 | 48 | def test_Ciso(): 49 | """test the isotropic projection for 4-tensors using randomized data""" 50 | n_test = 10 51 | G = np.random.uniform(0, 10, size=n_test) 52 | K = np.random.uniform(0, 10, size=n_test) 53 | for k, g in zip(K, G): 54 | C = Ciso(k, g) 55 | k_fit, g_fit = IsoProjectionC(C) 56 | assert ( 57 | np.abs(k - k_fit) + np.abs(g - g_fit) < 1.0e-10 58 | ), f"Error in isotropic projection (4-tensor): K, G true = [{k}, {g}]; K, G projected = [{k_fit}, {g_fit}]" 59 | 60 | 61 | def test_kappaiso(): 62 | """test the isotropic projection for 2-tensors using randomized data""" 63 | n_test = 10 64 | Id_m = np.array((1.0, 1.0, 1.0, 0.0, 0.0, 0.0)) 65 | for kappa in np.random.uniform(0, 10, size=n_test): 66 | kappa_fit = IsoProjectionKappa(kappa * Id_m) 67 | assert ( 68 | np.abs(kappa - kappa_fit) < 1.0e-10 69 | ), f"Error in isotropic projection (2-tensor): kappa = {kappa}; kappa projected = {kappa_fit}" 70 | 71 | 72 | if __name__ == "__main__": 73 | pytest.main(["-v", "-s", __file__]) 74 | -------------------------------------------------------------------------------- /test/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the number of processes is provided as a command line argument 4 | if [ $# -ne 2 ] || [ "$1" != "-n" ]; then 5 | echo "Usage: $0 -n " 6 | exit 1 7 | fi 8 | 9 | num_processes=$2 10 | 11 | TIME_CMD="command time -v" 12 | [[ "$OSTYPE" == "darwin"* ]] && TIME_CMD="command gtime -v" 13 | 14 | # Run the jobs serially 15 | $TIME_CMD mpiexec -n $num_processes ./FANS input_files/test_LinearThermal.json test_LinearThermal.h5 > test_LinearThermal.log 2>&1 16 | 17 | $TIME_CMD mpiexec -n $num_processes ./FANS input_files/test_LinearElastic.json test_LinearElastic.h5 > test_LinearElastic.log 2>&1 18 | 19 | $TIME_CMD mpiexec -n $num_processes ./FANS input_files/test_PseudoPlastic.json test_PseudoPlastic.h5 > test_PseudoPlastic.log 2>&1 20 | 21 | $TIME_CMD mpiexec -n $num_processes ./FANS input_files/test_J2Plasticity.json test_J2Plasticity.h5 > test_J2Plasticity.log 2>&1 22 | 23 | $TIME_CMD mpiexec -n $num_processes ./FANS input_files/test_MixedBCs.json test_MixedBCs.h5 > test_MixedBCs.log 2>&1 24 | -------------------------------------------------------------------------------- /test/test_pyfans/README.md: -------------------------------------------------------------------------------- 1 | # Test pyFANS 2 | 3 | Test pyFANS as standalone library called from a Python script. 4 | 5 | ## Build pyFANS 6 | 7 | Configure the FANS CMake build with the variable `FANS_LIB` set to `ON`. 8 | 9 | ## Dependencies 10 | 11 | Install the following dependencies 12 | 13 | ## Run the test 14 | 15 | The test runs a dummy macro problem (unit cube) which is coupled via preCICE to the Micro Manager. The Micro Manager controls micro simulations created using pyFANS. Run the test by running 16 | 17 | ```bash 18 | python macro-cube.py & micro-manager-precice micro-manager-config.json 19 | ``` 20 | -------------------------------------------------------------------------------- /test/test_pyfans/input.json: -------------------------------------------------------------------------------- 1 | { 2 | "microstructure": { 3 | "filepath": "../microstructures/sphere32.h5", 4 | "datasetname": "/sphere/32x32x32/ms", 5 | "L": [1.0, 1.0, 1.0] 6 | }, 7 | 8 | "problem_type": "mechanical", 9 | "matmodel": "LinearElasticIsotropic", 10 | "material_properties":{ 11 | "bulk_modulus": [62.5000, 222.222], 12 | "shear_modulus": [28.8462, 166.6667] 13 | }, 14 | 15 | "method": "cg", 16 | "error_parameters":{ 17 | "measure": "Linfinity", 18 | "type": "absolute", 19 | "tolerance": 1e-10 20 | }, 21 | "n_it": 100, 22 | "macroscale_loading": [ 23 | [[0,0,0,0,0,0]] 24 | ], 25 | 26 | "results": [] 27 | } 28 | -------------------------------------------------------------------------------- /test/test_pyfans/macro-cube.py: -------------------------------------------------------------------------------- 1 | """ 2 | Run FANS as a Python callable library. 3 | """ 4 | import numpy as np 5 | import precice 6 | 7 | 8 | def main(): 9 | np_axis = 2 10 | 11 | # preCICE setup 12 | participant = precice.Participant("macro-cube", "precice-config.xml", 0, 1) 13 | mesh_name = "cube" 14 | 15 | # Coupling mesh - unit cube 16 | x_coords, y_coords, z_coords = np.meshgrid( 17 | np.linspace(0, 1, np_axis), 18 | np.linspace(0, 1, np_axis), 19 | np.linspace(0, 1, np_axis), 20 | ) 21 | 22 | nv = np_axis ** participant.get_mesh_dimensions(mesh_name) 23 | coords = np.zeros((nv, participant.get_mesh_dimensions(mesh_name))) 24 | 25 | # Define unit cube coordinates 26 | for z in range(np_axis): 27 | for y in range(np_axis): 28 | for x in range(np_axis): 29 | n = x + y * np_axis + z * np_axis * np_axis 30 | coords[n, 0] = x_coords[x, y, z] 31 | coords[n, 1] = y_coords[x, y, z] 32 | coords[n, 2] = z_coords[x, y, z] 33 | 34 | vertex_ids = participant.set_mesh_vertices(mesh_name, coords) 35 | 36 | participant.initialize() 37 | 38 | dt = participant.get_max_time_step_size() 39 | 40 | strains1to3 = np.full((nv, 3), 0.005) 41 | strains4to6 = np.full((nv, 3), 0.0005) 42 | 43 | # time loop 44 | while participant.is_coupling_ongoing(): 45 | stress1to3 = participant.read_data(mesh_name, "stresses1to3", vertex_ids, dt) 46 | stress4to6 = participant.read_data(mesh_name, "stresses4to6", vertex_ids, dt) 47 | cmat1 = participant.read_data(mesh_name, "cmat1", vertex_ids, dt) 48 | cmat2 = participant.read_data(mesh_name, "cmat2", vertex_ids, dt) 49 | cmat3 = participant.read_data(mesh_name, "cmat3", vertex_ids, dt) 50 | cmat4 = participant.read_data(mesh_name, "cmat4", vertex_ids, dt) 51 | cmat5 = participant.read_data(mesh_name, "cmat5", vertex_ids, dt) 52 | cmat6 = participant.read_data(mesh_name, "cmat6", vertex_ids, dt) 53 | cmat7 = participant.read_data(mesh_name, "cmat7", vertex_ids, dt) 54 | 55 | participant.write_data(mesh_name, "strains1to3", vertex_ids, strains1to3) 56 | participant.write_data(mesh_name, "strains4to6", vertex_ids, strains4to6) 57 | 58 | participant.advance(dt) 59 | dt = participant.get_max_time_step_size() 60 | 61 | participant.finalize() 62 | 63 | 64 | if __name__ == "__main__": 65 | main() 66 | -------------------------------------------------------------------------------- /test/test_pyfans/micro-manager-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "micro_file_name": "PyFANS", 3 | "coupling_params": { 4 | "precice_config_file_name": "precice-config.xml", 5 | "macro_mesh_name": "cube", 6 | "read_data_names": {"strains1to3": "vector", 7 | "strains4to6": "vector" 8 | }, 9 | "write_data_names": {"stresses1to3": "vector", 10 | "stresses4to6": "vector", 11 | "cmat1":"vector", 12 | "cmat2":"vector", 13 | "cmat3":"vector", 14 | "cmat4":"vector", 15 | "cmat5":"vector", 16 | "cmat6":"vector", 17 | "cmat7":"vector" 18 | } 19 | }, 20 | "simulation_params": { 21 | "micro_dt": 1e-1, 22 | "macro_domain_bounds": [0, 1, 0, 1, 0, 1] 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /test/test_pyfans/precice-config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | --------------------------------------------------------------------------------