├── ffthompy ├── general │ ├── __init__.py │ ├── solver_pp.py │ ├── unittest_solver.py │ └── base.py ├── mechanics │ ├── __init__.py │ └── unittest_matcoef.py ├── tensorsLowRank │ ├── __init__.py │ ├── objects │ │ ├── __init__.py │ │ ├── tensors.py │ │ └── sparseTensorWrapper.py │ ├── projection.py │ ├── fft1.py │ ├── materials.py │ ├── decompositions.py │ └── solver.py ├── matvecs │ ├── __init__.py │ ├── unittest_matvec.py │ └── applications.py ├── __init__.py ├── tensors │ ├── __init__.py │ ├── fft.py │ ├── unittest_tensors.py │ ├── projection.py │ └── unittest_operators.py ├── postprocess.py ├── unittest_materials.py ├── problem.py ├── applications.py └── trigpol.py ├── examples ├── FFTHvsFEM │ ├── __init__.py │ ├── README.md │ ├── FFTH_Ga.py │ ├── FEM.py │ ├── FFTH_GaNi.py │ └── functions.py ├── lowRankTensorApproximations │ ├── __init__.py │ ├── diffusion.py │ ├── fig_pars.py │ ├── diffusion_comp_error.py │ ├── README.md │ ├── diffusion_comp_residua.py │ ├── diffusion_comp_time.py │ ├── diffusion_comp_time_stochastic_material.py │ └── setting.py ├── scalar │ ├── topology.txt │ ├── from_file.py │ ├── scalar_3d.py │ └── scalar_2d.py └── elasticity │ └── linelas_3d.py ├── doc ├── figures │ └── fig_grid.png ├── index.rst ├── Makefile └── users_guide.rst ├── test_results └── python3 │ ├── from_file_prob1 │ ├── from_file_prob2 │ ├── linelas_3d_prob1 │ ├── linelas_3d_prob2 │ ├── scalar_2d_prob1 │ ├── scalar_2d_prob2 │ ├── scalar_2d_prob3 │ ├── scalar_2d_prob4 │ ├── scalar_2d_prob5 │ ├── scalar_3d_prob1 │ ├── scalar_3d_prob2 │ └── scalar_3d_prob3 ├── contributors.txt ├── .gitignore ├── main.py ├── LICENSE ├── README.md ├── run_unittests.py └── tutorials ├── 04_exact_integration_fast.py ├── 03_exact_integration_simple.py └── 01_trig_pol.py /ffthompy/general/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/FFTHvsFEM/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ffthompy/mechanics/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ffthompy/tensorsLowRank/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /examples/lowRankTensorApproximations/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ffthompy/matvecs/__init__.py: -------------------------------------------------------------------------------- 1 | from .objects import Matrix, VecTri, Scalar, DFT, LinOper 2 | -------------------------------------------------------------------------------- /examples/scalar/topology.txt: -------------------------------------------------------------------------------- 1 | 0 0 0 0 0 2 | 0 1 1 1 0 3 | 0 1 1 1 0 4 | 0 1 1 1 0 5 | 0 0 0 0 0 6 | -------------------------------------------------------------------------------- /doc/figures/fig_grid.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/doc/figures/fig_grid.png -------------------------------------------------------------------------------- /test_results/python3/from_file_prob1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/from_file_prob1 -------------------------------------------------------------------------------- /test_results/python3/from_file_prob2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/from_file_prob2 -------------------------------------------------------------------------------- /test_results/python3/linelas_3d_prob1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/linelas_3d_prob1 -------------------------------------------------------------------------------- /test_results/python3/linelas_3d_prob2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/linelas_3d_prob2 -------------------------------------------------------------------------------- /test_results/python3/scalar_2d_prob1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/scalar_2d_prob1 -------------------------------------------------------------------------------- /test_results/python3/scalar_2d_prob2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/scalar_2d_prob2 -------------------------------------------------------------------------------- /test_results/python3/scalar_2d_prob3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/scalar_2d_prob3 -------------------------------------------------------------------------------- /test_results/python3/scalar_2d_prob4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/scalar_2d_prob4 -------------------------------------------------------------------------------- /test_results/python3/scalar_2d_prob5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/scalar_2d_prob5 -------------------------------------------------------------------------------- /test_results/python3/scalar_3d_prob1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/scalar_3d_prob1 -------------------------------------------------------------------------------- /test_results/python3/scalar_3d_prob2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/scalar_3d_prob2 -------------------------------------------------------------------------------- /test_results/python3/scalar_3d_prob3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vondrejc/FFTHomPy/HEAD/test_results/python3/scalar_3d_prob3 -------------------------------------------------------------------------------- /contributors.txt: -------------------------------------------------------------------------------- 1 | Dishi Liu 2 | Jaroslav Vondrejc 3 | Martin Ladecky 4 | Nachiketa Mishra 5 | -------------------------------------------------------------------------------- /ffthompy/__init__.py: -------------------------------------------------------------------------------- 1 | from ffthompy.general.base import * 2 | 3 | __author__ = "Jaroslav Vondrejc" 4 | __copyright__ = """Copyright 2016, Jaroslav Vondrejc""" 5 | __email__ = "vondrejc@gmail.com" 6 | -------------------------------------------------------------------------------- /ffthompy/tensorsLowRank/objects/__init__.py: -------------------------------------------------------------------------------- 1 | from .sparseTensorWrapper import SparseTensor 2 | from .canoTensor import CanoTensor 3 | from .tucker import Tucker 4 | from .tensorTrain import TensorTrain 5 | -------------------------------------------------------------------------------- /ffthompy/tensors/__init__.py: -------------------------------------------------------------------------------- 1 | from .objects import Tensor 2 | from .operators import (DFT, grad, div, symgrad, potential, Operator, matrix2tensor, 3 | grad_div_tensor, grad_tensor, div_tensor, outer) 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Eclipse 2 | .project 3 | .pydevproject 4 | 5 | # Sphinx documentation 6 | docs/_build/ 7 | 8 | # auxiliary 9 | *test* 10 | *.eps 11 | *.pdf 12 | *.pkl 13 | *.txt 14 | .settings/* 15 | output/* 16 | *temp* 17 | *~ 18 | _* 19 | !__* 20 | !unittest* 21 | *.py[cod] 22 | data/* 23 | 24 | examples/lowRankTensorApproximations/data_for_plot/ 25 | 26 | \.idea/ 27 | 28 | *.png 29 | 30 | examples/lowRankTensorApproximations/Dishis\.py 31 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | from ffthompy.problem import Problem, import_file 4 | from optparse import OptionParser 5 | 6 | parser = OptionParser() 7 | _, args = parser.parse_args() 8 | 9 | print('###################################################') 10 | print('## FFT-based homogenization in Python (FFTHomPy) ##') 11 | print('###################################################') 12 | 13 | if (len(args) == 1): 14 | input_file = args[0] 15 | elif (len(args) == 0): 16 | raise ValueError("The input argument (input file name) is missing.") 17 | else: 18 | raise ValueError("Too many input arguments") 19 | 20 | conf = import_file(input_file) 21 | 22 | for conf_problem in conf.problems: 23 | prob = Problem(conf_problem, conf) 24 | prob.calculate() 25 | prob.postprocessing() 26 | 27 | print('The calculation is finished!') 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Jaroslav Vondrejc 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ffthompy/tensorsLowRank/projection.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ffthompy.trigpol import Grid 3 | from ffthompy.tensorsLowRank.objects import SparseTensor 4 | 5 | def grad_tensor(N, Y, kind='TensorTrain'): 6 | assert(kind.lower() in ['cano','canotensor','tucker','tt','tensortrain']) 7 | 8 | dim=Y.size 9 | freq=Grid.get_xil(N, Y, fft_form='c') 10 | hGrad_s=[] 11 | 12 | for ii in range(dim): 13 | basis=[] 14 | for jj in range(dim): 15 | if ii==jj: 16 | basis.append(np.atleast_2d(freq[jj]*2*np.pi*1j)) 17 | else: 18 | basis.append(np.atleast_2d(np.ones(N[jj]))) 19 | 20 | if kind.lower() in ['cano', 'canotensor','tucker']: 21 | hGrad_s.append(SparseTensor(kind=kind, name='hGrad({})'.format(ii), core=np.array([1.]), 22 | basis=basis, Fourier=True, fft_form='c').set_fft_form()) 23 | elif kind.lower() in ['tt','tensortrain']: 24 | cl = [bas.reshape((1,-1,1)) for bas in basis] 25 | hGrad_s.append(SparseTensor(kind=kind, core=cl, name='hGrad({})'.format(ii), 26 | Fourier=True, fft_form='c').set_fft_form()) 27 | 28 | return hGrad_s 29 | -------------------------------------------------------------------------------- /ffthompy/tensors/fft.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.fft as fft 3 | 4 | def cfftnc(x, N): 5 | """ 6 | real and Fourier centered n-dimensional FFT algorithm 7 | """ 8 | ax=tuple(np.setdiff1d(list(range(x.ndim)), list(range(x.ndim-N.__len__())), assume_unique=True)) 9 | return 1./np.prod(N)*fft.fftshift(fft.fftn(fft.ifftshift(x, ax), N), ax) 10 | 11 | def icfftnc(Fx, N): 12 | """ 13 | real and Fourier centered n-dimensional inverse FFT algorithm 14 | """ 15 | ax=tuple(np.setdiff1d(list(range(Fx.ndim)), list(range(Fx.ndim-N.__len__())), assume_unique=True)) 16 | return fft.fftshift(fft.ifftn(fft.ifftshift(Fx, ax), N), ax).real*np.prod(N) 17 | 18 | def fftnc(x, N): 19 | """ 20 | Fourier centered n-dimensional FFT algorithm 21 | """ 22 | ax=tuple(np.setdiff1d(list(range(x.ndim)), list(range(x.ndim-N.__len__())), assume_unique=True)) 23 | return 1./np.prod(N)*fft.fftshift(fft.fftn(x, N), ax) 24 | 25 | def icfftn(Fx, N): 26 | """ 27 | Fourier centered n-dimensional inverse FFT algorithm 28 | """ 29 | ax=tuple(np.setdiff1d(list(range(Fx.ndim)), list(range(Fx.ndim-N.__len__())), assume_unique=True)) 30 | return fft.ifftn(fft.ifftshift(Fx, ax), N).real*np.prod(N) 31 | 32 | 33 | def fftn(x, N): # normalised FFT 34 | return 1./np.prod(N)*fft.fftn(x, N) 35 | 36 | def ifftn(x, N): # normalised FFT 37 | return fft.ifftn(x, N).real*np.prod(N) 38 | 39 | def rfftn(x, N): # real-valued FFT 40 | return fft.rfftn(x, N) 41 | 42 | def irfftn(x, N): # real-valued FFT 43 | return fft.irfftn(x, N) 44 | -------------------------------------------------------------------------------- /ffthompy/matvecs/unittest_matvec.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | from ffthompy.matvecs import DFT, VecTri 4 | 5 | 6 | class Test_matvec(unittest.TestCase): 7 | 8 | def setUp(self): 9 | pass 10 | 11 | def tearDown(self): 12 | pass 13 | 14 | def test_matrix_versions(self): 15 | print('\nChecking Matrices...') 16 | for dim in [2, 3]: 17 | for n in [4, 5]: 18 | N = n*np.ones(dim, dtype=np.int) 19 | ur = VecTri(name='rand', dim=2, N=N, valtype='rand') 20 | FN = DFT(name='FN', inverse=False, N=N, d=dim) 21 | FiN = DFT(name='FiN', inverse=True, N=N, d=dim) 22 | msg = 'Operations .matrix() .vec() do not work properly!' 23 | Fur = FN(ur) 24 | val = np.linalg.norm(Fur.vec()-FN.matrix().dot(ur.vec())) 25 | self.assertAlmostEqual(0, val, msg=msg, delta=1e-13) 26 | val = np.linalg.norm(ur.vec()-FiN.matrix().dot(Fur.vec())) 27 | self.assertAlmostEqual(0, val, msg=msg, delta=1e-13) 28 | print('...ok') 29 | 30 | def test_projection(self): 31 | print('\nChecking projections...') 32 | for dim in [2, 3]: 33 | for n in [5]: 34 | N = n*np.ones(dim, dtype=np.int) 35 | uN = VecTri(name='rand', dim=dim, N=N, valtype='rand') 36 | for i in range(2): 37 | msg='dim={0}, n={1}, i={2}'.format(dim, n, i) 38 | self.assertAlmostEqual(0, uN==uN.project(2*N-i).project(N), 39 | msg=msg, delta=1e-13) 40 | print('...ok') 41 | 42 | if __name__ == "__main__": 43 | unittest.main() 44 | -------------------------------------------------------------------------------- /examples/FFTHvsFEM/README.md: -------------------------------------------------------------------------------- 1 | Energy-based comparison between the Fourier-Galerkin method and the finite element 2 | ================================================================================== 3 | 4 | This folder [examples/FFTHvsFEM](#examples/FFTHvsFEM) contains the implementation comparing the FFT-homogenisation 5 | with the finite element method (FEM). It is based on the following publication: 6 | 7 | - Vondřejc, J., & de Geus, T. W. J. (2019). Energy-based comparison between the Fourier--Galerkin method and the finite element method. Journal of Computational and Applied Mathematics. https://doi.org/10.1016/j.cam.2019.112585 8 | 9 | 10 | This file explain the basic usage of the attached code that is written in Python3 11 | (https://www.python.org/ using version 3.6). 12 | Each script can be run using command 'python ', e.g. 'python FEM.py'. 13 | The code depends on the numerical libraries 14 | - NumPy (http://www.numpy.org/ using version 1.17.2), 15 | - SciPy (https://www.scipy.org/ using version 1.3.1), and 16 | - finite element software FEniCS (https://fenicsproject.org/ using version 2019.1.0). 17 | 18 | 19 | file: FEM.py 20 | ------------ 21 | Solves the homogenisation problem using the finite element method. 22 | 23 | 24 | file: FFTH_GaNi.py 25 | ------------------ 26 | Solves the homogenisation problem using the Fourier-Galerkin method with numerical integration which is fully equivalent to Moulinec-Suquet scheme. 27 | 28 | 29 | file: FFTH_Ga.py 30 | ---------------- 31 | Solves the homogenisation problem using the Fourier-Galerkin method with exact integration. It provides the best approximation using trigonometric polynomials. 32 | 33 | 34 | file: functions.py 35 | ------------------ 36 | This file contains auxiliary functions. -------------------------------------------------------------------------------- /ffthompy/tensorsLowRank/fft1.py: -------------------------------------------------------------------------------- 1 | """ 2 | collection of 1-D FFTs for Fourier transform of basis, all done on the 2nd dimension of the basis. 3 | """ 4 | 5 | from scipy import fftpack 6 | import numpy.fft as npfft 7 | 8 | 9 | def cfftc(x, N): 10 | """ 11 | centered 1-dimensional FFT algorithm 12 | """ 13 | return npfft.fftshift(npfft.fft(npfft.ifftshift(x, axes=1), axis=1), axes=1)/N 14 | 15 | def icfftc(Fx, N,real_output=False): 16 | """ 17 | centered 1-dimensional inverse FFT algorithm 18 | """ 19 | if real_output: 20 | return npfft.fftshift(npfft.ifft(npfft.ifftshift(Fx, axes=1), axis=1), axes=1).real*N 21 | else: 22 | return npfft.fftshift(npfft.ifft(npfft.ifftshift(Fx, axes=1), axis=1), axes=1)*N 23 | 24 | def fftc(x, N): 25 | """ 26 | centered 1-dimensional FFT algorithm 27 | """ 28 | return npfft.fftshift(npfft.fft(x, axis=1),axes=1)/N 29 | 30 | def icfft(Fx, N,real_output=False): 31 | """ 32 | centered 1-dimensional inverse FFT algorithm 33 | """ 34 | if real_output: 35 | return npfft.ifft(npfft.ifftshift(Fx, axes=1), axis=1).real*N 36 | else: 37 | return npfft.ifft(npfft.ifftshift(Fx, axes=1), axis=1)*N 38 | 39 | def fft(x, N): 40 | return npfft.fft(x, axis=1)/N # numpy.fft.fft 41 | 42 | def ifft(x, N,real_output=False): 43 | if real_output: 44 | return npfft.ifft(x, axis=1).real*N # numpy.fft.fft 45 | else: 46 | return npfft.ifft(x, axis=1)*N # numpy.fft.fft 47 | 48 | def rfft(x, N): 49 | return npfft.rfft(x.real, axis=1)/N # real version of numpy.fft.fft 50 | 51 | def irfft(x, N, real_output=True): 52 | return npfft.irfft(x, axis=1)*N # real version of numpy.fft.fft 53 | 54 | def srfft(x, N): 55 | return fftpack.rfft(x.real, axis=1)/N # 1-D real fft from scipy.fftpack.rfft 56 | 57 | def sirfft(x, N, real_output=True): 58 | return fftpack.irfft(x.real, axis=1)*N # 1-D real inverse fft from scipy.fftpack.irfft 59 | -------------------------------------------------------------------------------- /ffthompy/mechanics/unittest_matcoef.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | from numpy.linalg import norm 4 | from ffthompy.mechanics.matcoef import ElasticTensor as ET 5 | 6 | 7 | class Test_matcoef(unittest.TestCase): 8 | 9 | def setUp(self): 10 | pass 11 | 12 | def tearDown(self): 13 | pass 14 | 15 | @staticmethod 16 | def get_rand_sym(dim, ndim): 17 | A = np.random.random(ndim*(dim,)) 18 | if ndim == 2: 19 | A = A+A.T 20 | else: 21 | A = 0.5*(A+np.einsum('ijkl->klij',A)) 22 | A = 0.5*(A+np.einsum('ijkl->ijlk',A)) 23 | A = 0.5*(A+np.einsum('ijkl->jikl',A)) 24 | return A 25 | 26 | def test_mechanics(self): 27 | print('\nChecking mechanics...') 28 | self._mandel() 29 | self._plane() 30 | print('...ok') 31 | 32 | def _mandel(self): 33 | print(" Mandel's notation") 34 | for dim in [2, 3]: 35 | for ndim in [2, 4]: 36 | A = self.get_rand_sym(dim=dim, ndim=ndim) 37 | 38 | Am = ET.create_mandel(A, ndim=None) 39 | self.assertAlmostEqual(0, norm(Am-Am.T)) 40 | A2 = ET.dispose_mandel(Am, ndim=None) 41 | Am2 = ET.create_mandel(A2, ndim=None) 42 | self.assertAlmostEqual(0, norm(Am-Am2), 43 | msg='mandel in dim={0} and ndim={1}'.format(dim, ndim), 44 | delta=1e-14) 45 | self.assertAlmostEqual(0, norm(A-A2), 46 | msg='mandel in dim={0} and ndim={1}'.format(dim, ndim), 47 | delta=1e-14) 48 | 49 | def _plane(self): 50 | print(' plane strain and stress') 51 | A = self.get_rand_sym(dim=3, ndim=4) 52 | Am = ET.create_mandel(A) 53 | Amplane = ET.get_plane_in_engineering(Am).squeeze() 54 | 55 | Aplane = ET.get_plane_in_tensor(A) 56 | Aplanem = ET.create_mandel(Aplane).squeeze() 57 | self.assertAlmostEqual(0, norm(Aplanem - Amplane)) 58 | 59 | 60 | if __name__ == "__main__": 61 | unittest.main() 62 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. FFTHomPy documentation master file, created by 2 | sphinx-quickstart on Fri Jan 23 17:52:11 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to FFTHomPy's documentation! 7 | ==================================== 8 | 9 | *FFTHomPy* is a Python implementation of FFT-based homogenization based on following papers: 10 | 11 | * \J. Zeman, T. W. J. de Geus, J. Vondřejc, R. H. J. Peerlings, and M. G. D. Geers: A finite element perspective on non-linear FFT-based micromechanical simulations. 111 (10), pp. 903-926, 2017. arXiv:1601.05970 12 | 13 | * \N. Mishra, J. Vondřejc, J. Zeman: A comparative study on low-memory iterative solvers for FFT-based homogenization of periodic media, *Journal of Computational Physics*, 321, pp. 151-168, 2016. arXiv: 1508.02045 14 | 15 | * \J. Vondřejc: Improved guaranteed computable bounds on homogenized properties of periodic media by Fourier-Galerkin Method with exact integration, *International Journal for Numerical Methods in Engineering*, 107 (13), pp.~1106-1135, 2016. arXiv:1412.2033 16 | 17 | * \J. Vondřejc, J. Zeman, I. Marek: Guaranteed upper-lower bounds on homogenized properties by FFT-based Galerkin method, *Comuter methods in Applied Mechanics and Engineering*, 297, pp.~258-291, 2015. arXiv:1404.3614 18 | 19 | * \J. Vondřejc, J. Zeman, I. Marek: An FFT-based Galerkin method for homogenization of periodic media, *Computers and Mathematics with Applications*, 68, pp. 156-173, 2014. arXiv:1311.0089 20 | 21 | * \J. Zeman, J. Vondřejc, J. Novák and I. Marek Accelerating a FFT-based solver for numerical homogenization of periodic media by conjugate gradients, *Journal of Computational Physics*, 229 (21), pp.~8065-8071, 2010. arXiv:1004.1122. 22 | 23 | License: `MIT `_ 24 | 25 | News: 26 | ----- 27 | The software now contains tutorials in folder '/tutorial', which also contains implementation of the method based on exact integration. 28 | 29 | Links: 30 | ------ 31 | 32 | * Source code - git repository: https://github.com/vondrejc/FFTHomPy.git 33 | 34 | Contents: 35 | --------- 36 | .. toctree:: 37 | :maxdepth: 3 38 | 39 | users_guide 40 | 41 | -------------------------------------------------------------------------------- /ffthompy/tensors/unittest_tensors.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from ffthompy.tensors import Tensor 3 | import numpy as np 4 | import itertools 5 | 6 | fft_forms=['r',0,'c'] 7 | 8 | 9 | class Test_tensors(unittest.TestCase): 10 | 11 | def test_even(self): 12 | print('\nChecking Tensors with even grid points...') 13 | 14 | for dim, n, fft_form in itertools.product([2,3], [4,5], fft_forms): 15 | msg='Tensors with: dim={}, n={}, fft_form={}'.format(dim, n, fft_form) 16 | 17 | N=dim*(n,) 18 | M=tuple(2*np.array(N)) 19 | 20 | u=Tensor(name='test', shape=(), N=N, Fourier=False, fft_form=fft_form) 21 | u.randomize() 22 | Fu=u.fourier(copy=True) 23 | FuM=Fu.project(M) 24 | uM=FuM.fourier() 25 | 26 | if n%2 == 0: 27 | self.assertGreaterEqual(u.norm(), FuM.norm(), msg=msg) 28 | self.assertGreaterEqual(u.norm(componentwise=True), FuM.norm(componentwise=True), 29 | msg=msg) 30 | self.assertGreaterEqual(u.norm(), uM.norm(), msg=msg) 31 | self.assertGreaterEqual(u.norm(componentwise=True), uM.norm(componentwise=True), 32 | msg=msg) 33 | else: 34 | self.assertAlmostEqual(u.norm(), FuM.norm(), msg=msg) 35 | self.assertAlmostEqual(u.norm(componentwise=True), FuM.norm(componentwise=True), 36 | msg=msg) 37 | self.assertAlmostEqual(u.norm(), uM.norm(), msg=msg) 38 | self.assertAlmostEqual(u.norm(componentwise=True), uM.norm(componentwise=True), 39 | msg=msg) 40 | 41 | self.assertAlmostEqual(0, u.mean()-FuM.mean(), msg=msg) 42 | self.assertAlmostEqual(u.mean(), uM.mean(), msg=msg) 43 | 44 | # testing that that interpolation on double grid have exactly the same values 45 | slc=tuple(u.order*[slice(None),]+[slice(0,M[i],2) for i in range(dim)]) 46 | self.assertAlmostEqual(0, np.linalg.norm(u.val-uM.val[slc]), msg=msg) 47 | print('...ok') 48 | 49 | 50 | if __name__ == "__main__": 51 | unittest.main() 52 | -------------------------------------------------------------------------------- /examples/elasticity/linelas_3d.py: -------------------------------------------------------------------------------- 1 | """ 2 | Input file for a scalar linear elliptic problems. 3 | """ 4 | 5 | import numpy as np 6 | from ffthompy.mechanics.matcoef import ElasticTensor 7 | import os 8 | from ffthompy.general.base import get_base_dir 9 | 10 | base_dir = get_base_dir() 11 | 12 | dim = 3 13 | N = 5*np.ones(dim, dtype=np.int32) 14 | 15 | matcoefM = ElasticTensor(bulk=1, mu=1) 16 | matcoefI = ElasticTensor(bulk=10, mu=5) 17 | 18 | materials = {'square': {'inclusions': ['square', 'otherwise'], 19 | 'positions': [np.zeros(dim), ''], 20 | 'params': [0.6*np.ones(dim), ''], # size of sides 21 | 'vals': [matcoefI.mandel, matcoefM.mandel], 22 | 'Y': np.ones(dim), 23 | 'order': None, 24 | 'P': N, 25 | }, 26 | } 27 | 28 | 29 | problems = [ 30 | {'name': 'prob1', 31 | 'physics': 'elasticity', 32 | 'material': 'square', 33 | 'solve': {'kind': 'GaNi', 34 | 'N': N, 35 | 'primaldual': ['primal', 'dual']}, 36 | 'postprocess': [{'kind': 'GaNi'}, 37 | {'kind': 'Ga', 38 | 'order': None}, 39 | {'kind': 'Ga', 40 | 'order': 0, 41 | 'P': N}, 42 | {'kind': 'Ga', 43 | 'order': 1, 44 | 'P': N}], 45 | 'solver': {'kind': 'CG', 46 | 'tol': 1e-6, 47 | 'maxiter': 1e3}, 48 | 'save': {'filename': os.path.join(base_dir, 'temp/linelas_3d_prob1'), 49 | 'data': 'all'}, 50 | }, 51 | {'name': 'prob2', 52 | 'physics': 'elasticity', 53 | 'material': 'square', 54 | 'solve': {'kind': 'Ga', 55 | 'N': N, 56 | 'primaldual': ['primal', 'dual']}, 57 | 'postprocess': [{'kind': 'Ga', 58 | 'order': None}], 59 | 'solver': {'kind': 'CG', 60 | 'tol': 1e-6, 61 | 'maxiter': 1e3}, 62 | 'save': {'filename': os.path.join(base_dir, 'temp/linelas_3d_prob2'), 63 | 'data': 'all'}, 64 | }, 65 | ] 66 | 67 | if __name__=='__main__': 68 | import subprocess 69 | subprocess.call(['../../main.py', __file__]) 70 | -------------------------------------------------------------------------------- /examples/scalar/from_file.py: -------------------------------------------------------------------------------- 1 | """ 2 | Input file for a scalar linear elliptic problems. 3 | """ 4 | 5 | import numpy as np 6 | import os 7 | from ffthompy.general.base import get_base_dir 8 | 9 | base_dir = get_base_dir() 10 | input_dir = os.path.dirname(os.path.abspath(__file__)) 11 | file_name = os.path.join(input_dir, 'topology.txt') 12 | 13 | 14 | def get_topo(): 15 | topo = np.loadtxt(file_name) 16 | return topo 17 | 18 | topo = get_topo() 19 | P = np.array(topo.shape) # image resolution 20 | dim = P.size 21 | 22 | 23 | def get_mat(coord=None): 24 | topo = get_topo() 25 | if coord is not None and not (topo.shape == coord.shape[1:]): 26 | raise ValueError() 27 | matrix_phase = np.eye(dim) 28 | inclusion = 11.*np.eye(dim) 29 | mat_vals = np.einsum('ij...,k...->ijk...', matrix_phase, topo == 0) 30 | mat_vals += np.einsum('ij...,k...->ijk...', inclusion, topo == 1) 31 | return mat_vals 32 | 33 | materials = {'file': {'fun': get_mat, 34 | 'Y': np.ones(dim), 35 | 'order': 0, 36 | 'P': P}} 37 | 38 | maxiter = 1e3 39 | tol = 1e-6 40 | 41 | problems = [ 42 | {'name': 'prob1', 43 | 'physics': 'scalar', 44 | 'material': 'file', 45 | 'solve': {'kind': 'GaNi', 46 | 'N': P, 47 | 'primaldual': ['primal', 'dual']}, 48 | 'postprocess': [{'kind': 'GaNi'}, 49 | {'kind': 'Ga', 50 | 'order': 0, 51 | 'P': P}, 52 | {'kind': 'Ga', 53 | 'order': 1, 54 | 'P': P}], 55 | 'solver': {'kind': 'CG', 56 | 'tol': tol, 57 | 'maxiter': maxiter}, 58 | 'save': {'filename': os.path.join(base_dir, 'temp/from_file_prob1'), 59 | 'data': 'all'}}, 60 | {'name': 'prob2', 61 | 'physics': 'scalar', 62 | 'material': 'file', 63 | 'solve': {'kind': 'Ga', 64 | 'N': P, 65 | 'primaldual': ['primal', 'dual']}, 66 | 'postprocess': [{'kind': 'Ga'}], 67 | 'solver': {'kind': 'CG', 68 | 'tol': tol, 69 | 'maxiter': maxiter}, 70 | 'save': {'filename': os.path.join(base_dir, 'temp/from_file_prob2'), 71 | 'data': 'all'}} 72 | ] 73 | 74 | if __name__=='__main__': 75 | import subprocess 76 | subprocess.call(['../../main.py', __file__]) 77 | -------------------------------------------------------------------------------- /ffthompy/general/solver_pp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ffthompy.matvecs import VecTri 3 | from ffthompy.tensors import Tensor 4 | 5 | 6 | class CallBack(): 7 | def __init__(self, **kwargs): 8 | self.__dict__.update(kwargs) 9 | self.iter = -1 10 | self.res_norm = [] 11 | self.energy_norm = [] 12 | 13 | def __call__(self, x): 14 | self.iter += 1 15 | if isinstance(x, np.ndarray): 16 | if isinstance(self.B, VecTri): 17 | X = VecTri(val=np.reshape(x, self.B.dN())) 18 | elif isinstance(self.B, VecTri): 19 | X = Tensor(val=np.reshape(x, self.B.dN()), shape=self.B.shape) 20 | else: 21 | X = x 22 | res = self.B - self.A(X) 23 | self.res_norm.append(res.norm()) 24 | return 25 | 26 | def __repr__(self): 27 | try: 28 | ss = '' 29 | ss += ' iterations : %d\n' % self.iter 30 | ss += ' res_norm : %g' % self.res_norm[-1] 31 | ss += '\n' 32 | except: 33 | ss = 'the results are not initialized yet' 34 | return ss 35 | 36 | 37 | class CallBack_GA(): 38 | def __init__(self, **kwargs): 39 | self.__dict__.update(kwargs) 40 | self.iter = -1 41 | self.res_norm = [] 42 | self.bound = [] 43 | self.nonconformity = [] 44 | 45 | def __call__(self, x): 46 | self.iter += 1 47 | if not isinstance(x, VecTri): 48 | X = VecTri(val=np.reshape(x, self.E2N.dN())) 49 | else: 50 | X = x 51 | 52 | if np.linalg.norm(X.mean() - self.E2N.mean()) < 1e-8: 53 | res = self.A(X) 54 | eN = X 55 | else: 56 | res = self.B-self.A(X) 57 | eN = X + self.E2N 58 | 59 | self.res_norm.append(res.norm()) 60 | GeN = self.GN*eN + self.E2N 61 | GeN_E = GeN + self.E2N 62 | self.bound.append(self.Aex*GeN_E*GeN_E) 63 | self.nonconformity.append((GeN-eN).norm()) 64 | return 65 | 66 | def __repr__(self): 67 | try: 68 | ss = '' 69 | ss += ' iterations : %d\n' % self.iter 70 | ss += ' res_norm : %g\n' % self.res_norm[-1] 71 | ss += ' bound : %g\n' % self.bound[-1] 72 | ss += ' nonconformity : %g' % self.bound[-1] 73 | except: 74 | ss = 'no output' 75 | return ss 76 | -------------------------------------------------------------------------------- /examples/FFTHvsFEM/FFTH_Ga.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import itertools 3 | from scipy.sparse.linalg import cg, LinearOperator 4 | from functions import material_coef_at_grid_points, get_matinc, square_weights 5 | 6 | # PARAMETERS 7 | dim = 2 # dimension (works for 2D and 3D) 8 | N = 5*np.ones(dim, dtype=np.int) # number of grid points 9 | phase = 10. # material contrast 10 | assert(np.array_equal(N % 2, np.ones(dim, dtype=np.int))) 11 | 12 | dN = 2*N-1 # grid value 13 | vec_shape=(dim,)+tuple(dN) # shape of the vector for storing DOFs 14 | 15 | # OPERATORS 16 | Agani = material_coef_at_grid_points(N, phase) 17 | dot = lambda A, B: np.einsum('ij...,j...->i...', A, B) 18 | fft = lambda x, N: np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(x), N)) / np.prod(N) 19 | ifft = lambda x, N: np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(x), N)) * np.prod(N) 20 | freq = [np.arange(np.fix(-n/2.), np.fix(n/2.+0.5)) for n in dN] 21 | 22 | # SYSTEM MATRIX for Galerkin approximation with exact integration (FFTH-Ga) 23 | mat, inc = get_matinc(dim, phase) 24 | h = 0.6*np.ones(dim) # size of square (rectangle) / cube 25 | char_square = ifft(square_weights(h, dN, freq), dN).real 26 | Aga = np.einsum('ij...,...->ij...', mat+inc, char_square) \ 27 | + np.einsum('ij...,...->ij...', mat, 1.-char_square) 28 | 29 | # PROJECTION 30 | Ghat = np.zeros((dim,dim)+ tuple(dN)) # zero initialize 31 | indices = [range(int((dN[k]-N[k])/2), int((dN[k]-N[k])/2+N[k])) for k in range(dim)] 32 | for i,j in itertools.product(range(dim),repeat=2): 33 | for ind in itertools.product(*indices): 34 | q = np.array([freq[ii][ind[ii]] for ii in range(dim)]) # frequency vector 35 | if not q.dot(q) == 0: # zero freq. -> mean 36 | Ghat[(i,j)+ind] = -(q[i]*q[j])/(q.dot(q)) 37 | 38 | # OPERATORS 39 | G_fun = lambda X: np.real(ifft(dot(Ghat, fft(X, dN)), dN)).reshape(-1) 40 | A_fun = lambda x: dot(Aga, x.reshape(vec_shape)) 41 | GA_fun = lambda x: G_fun(A_fun(x)) 42 | 43 | # CONJUGATE GRADIENT SOLVER 44 | X = np.zeros((dim,) + tuple(dN), dtype=np.float) 45 | E = np.zeros(vec_shape); E[0] = 1. # macroscopic value 46 | b = -GA_fun(E.reshape(-1)) 47 | 48 | Alinoper = LinearOperator(shape=(X.size, X.size), matvec=GA_fun, dtype=np.float) 49 | eE, info = cg(A=Alinoper, b=b, x0=X.reshape(-1)) # conjugate gradients 50 | aux = eE.reshape(vec_shape) + E 51 | 52 | # POSTPROCESSING to calculate guaranteed bound 53 | AH_11 = np.sum(dot(Aga, aux)*aux)/np.prod(dN) 54 | print('homogenised component AH11 = {} (FFTH-Ga)'.format(AH_11)) 55 | 56 | print('END') 57 | -------------------------------------------------------------------------------- /examples/FFTHvsFEM/FEM.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from functions import get_matinc, PeriodicBoundary 3 | from fenics import (Constant, FunctionSpace, UnitSquareMesh, UnitCubeMesh, inner, 4 | MeshFunction, SubDomain, between, Measure, DirichletBC, 5 | TrialFunction, TestFunction, Function, grad, assemble, solve) 6 | 7 | # PARAMETERS 8 | dim = 2 # dimension (works for 2D and 3D) 9 | N = 5*np.ones(dim, dtype=np.int) # number of voxels (assumed equal for all directions) 10 | phase = 10. # material contrast 11 | order = 1 # polynomial order in FE space 12 | 13 | # auxiliary values 14 | prodN = np.prod(np.array(N)) # number of grid points 15 | vec_shape=(dim,)+tuple(N) # shape of the vector for storing DOFs 16 | 17 | # PROBLEM DEFINITION 18 | _mat, _inc = get_matinc(dim, phase) # material coef. for matrix (mat) and inclusion (inc) 19 | mat=Constant(_mat) 20 | inc=Constant(_mat+_inc) 21 | 22 | class Inclusion_2d(SubDomain): # square inclusion 23 | def inside(self, x, on_boundary): 24 | return (between(x[1], (0.2, 0.8)) and between(x[0], (0.2, 0.8))) 25 | 26 | class Inclusion_3d(SubDomain): # cube inclusion 27 | def inside(self, x, on_boundary): 28 | return (between(x[2], (0.2, 0.8)) and between(x[1], (0.2, 0.8)) and between(x[0], (0.2, 0.8))) 29 | 30 | E=np.zeros(dim); E[0]=1. # macroscopic value 31 | E=Constant(E) 32 | 33 | if dim==2: 34 | mesh=UnitSquareMesh(*N) # generation of mesh 35 | inclusion=Inclusion_2d() 36 | point0="near(x[0], 0) && near(x[1], 0)" 37 | elif dim==3: 38 | mesh=UnitCubeMesh(*N) # generation of mesh 39 | inclusion=Inclusion_3d() 40 | point0="near(x[0], 0) && near(x[1], 0) && near(x[2], 0)" 41 | 42 | V=FunctionSpace(mesh, "CG", order, constrained_domain=PeriodicBoundary(dim)) 43 | 44 | # setting the elements that lies in inclusion and in matrix phase 45 | domains=MeshFunction("size_t", mesh, dim) 46 | domains.set_all(0) 47 | inclusion.mark(domains, 1) 48 | dx=Measure('dx', subdomain_data=domains) 49 | 50 | def bilf(up, vp): # bilinear form 51 | return inner(mat*up, vp)*dx(0)+inner(inc*up, vp)*dx(1) 52 | 53 | bc0=DirichletBC(V, Constant(0.), point0, method='pointwise') 54 | 55 | # SOLVER 56 | u=TrialFunction(V) 57 | v=TestFunction(V) 58 | uE = Function(V) 59 | 60 | solve(bilf(grad(u), grad(v)) == -bilf(E, grad(v)), uE, bcs=[bc0]) 61 | 62 | # POSTPROCESSING evaluation of guaranteed bound 63 | AH11 = assemble(bilf(grad(uE)+E, grad(uE)+E)) 64 | print('homogenised component A11 = {} (FEM)'.format(AH11)) 65 | 66 | print('END') 67 | -------------------------------------------------------------------------------- /ffthompy/general/unittest_solver.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | from numpy.linalg import norm 4 | from ffthompy import PrintControl 5 | from ffthompy.tensors import Tensor, DFT, Operator 6 | from ffthompy.tensors.projection import scalar as scalar_tensor 7 | from ffthompy.projections import scalar 8 | from ffthompy.general.solver import linear_solver 9 | 10 | prt=PrintControl() 11 | 12 | 13 | class Test_solvers(unittest.TestCase): 14 | 15 | def setUp(self): 16 | pass 17 | 18 | def tearDown(self): 19 | pass 20 | 21 | def test_projections(self): 22 | print('\nChecking projections...') 23 | dim=2 24 | n=5 25 | N = n*np.ones(dim, dtype=np.int) 26 | 27 | hG0N, hG1N, hG2N = scalar(N, Y=np.ones(dim)) 28 | hG0Nt, hG1Nt, hG2Nt = scalar_tensor(N, Y=np.ones(dim)) 29 | 30 | self.assertAlmostEqual(0, norm(hG0N.val-hG0Nt.val), delta=1e-13) 31 | self.assertAlmostEqual(0, norm(hG1N.val-hG1Nt.val), delta=1e-13) 32 | self.assertAlmostEqual(0, norm(hG2N.val-hG2Nt.val), delta=1e-13) 33 | print('...ok') 34 | 35 | def test_solvers(self): 36 | print('\nChecking solvers...') 37 | dim=2 38 | n=5 39 | N = n*np.ones(dim, dtype=np.int) 40 | 41 | _, hG1Nt, _ = scalar_tensor(N, Y=np.ones(dim)) 42 | 43 | FN=DFT(name='FN', inverse=False, N=N) 44 | FiN=DFT(name='FiN', inverse=True, N=N) 45 | 46 | G1N=Operator(name='G1', mat=[[FiN, hG1Nt, FN]]) 47 | 48 | A=Tensor(name='A', val=np.einsum('ij,...->ij...', np.eye(dim), 1.+10.*np.random.random(N)), 49 | order=2, N=N, multype=21) 50 | 51 | E=np.zeros((dim,)+dim*(n,)); E[0] = 1. # set macroscopic loading 52 | E=Tensor(name='E', val=E, order=1, N=N) 53 | 54 | GAfun=Operator(name='GA', mat=[[G1N, A]]) 55 | GAfun.define_operand(E) 56 | 57 | B=GAfun(-E) 58 | x0=E.copy(name='x0') 59 | x0.val[:]=0 60 | 61 | par={'tol': 1e-10, 62 | 'maxiter': int(1e3), 63 | 'alpha': 0.5*(1.+10.), 64 | 'eigrange':[1., 10.]} 65 | 66 | # reference solution 67 | X,_=linear_solver(Afun=GAfun, B=B, x0=x0, par=par, solver='CG') 68 | 69 | prt.disable() 70 | for solver in ['CG', 'scipy_cg', 'richardson', 'chebyshev']: 71 | x,_=linear_solver(Afun=GAfun, B=B, x0=x0, par=par, solver=solver) 72 | self.assertAlmostEqual(0, norm(X.val-x.val), delta=1e-8, msg=solver) 73 | prt.enable() 74 | 75 | print('...ok') 76 | 77 | 78 | if __name__ == "__main__": 79 | unittest.main() 80 | -------------------------------------------------------------------------------- /examples/lowRankTensorApproximations/diffusion.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | 3 | import numpy as np 4 | from ffthompy.tensorsLowRank.homogenisation import (homog_Ga_full_potential, homog_GaNi_full_potential, 5 | homog_Ga_sparse, homog_GaNi_sparse) 6 | 7 | from examples.lowRankTensorApproximations.setting import get_material_coef, get_default_parameters 8 | 9 | 10 | # PARAMETERS ############################################################## 11 | dim=2 12 | N=5*3**2 13 | material=0 14 | kind=0 # from kind_list=['cano','tucker','tt'] 15 | 16 | pars, pars_sparse=get_default_parameters(dim, N, material, kind) 17 | pars_sparse.debug=True 18 | pars_sparse.solver.update(dict(rank=10)) 19 | 20 | print('== format={}, N={}, dim={}, material={}, rank={} ===='.format(pars_sparse.kind, N, dim, 21 | material, 22 | pars_sparse.solver['rank'])) 23 | print('dofs = {}'.format(N**dim)) 24 | 25 | # get material coefficients 26 | Aga, Agani, Agas, Aganis=get_material_coef(material, pars, pars_sparse) 27 | 28 | print('\n== Full solution with potential by CG (GaNi)===========') 29 | resP_GaNi=homog_GaNi_full_potential(Agani, Aga, pars) 30 | print('mean of solution={}'.format(resP_GaNi.Fu.mean())) 31 | print('homogenised properties (component 11) = {}'.format(resP_GaNi.AH)) 32 | 33 | print('\n== Full solution with potential by CG (Ga) ===========') 34 | resP_Ga=homog_Ga_full_potential(Aga, pars) 35 | print('mean of solution={}'.format(resP_Ga.Fu.mean())) 36 | print('homogenised properties (component 11) = {}'.format(resP_Ga.AH)) 37 | 38 | print('\n== SPARSE solver with preconditioner (Ga) =======================') 39 | resS_Ga=homog_Ga_sparse(Agas, pars_sparse) 40 | print('mean of solution={}'.format(resS_Ga.Fu.mean())) 41 | print('homogenised properties (component 11) = {}'.format(resS_Ga.AH)) 42 | print('iterations={}'.format(resS_Ga.solver['kit'])) 43 | print('norm(resP)={}'.format(resS_Ga.solver['norm_res'])) 44 | 45 | print('\n== SPARSE solver with preconditioner (GaNi) =======================') 46 | resS_GaNi=homog_GaNi_sparse(Aganis, Agas, pars_sparse) 47 | print('mean of solution={}'.format(resS_GaNi.Fu.mean())) 48 | print('homogenised properties (component 11) = {}'.format(resS_GaNi.AH)) 49 | print('iterations={}'.format(resS_GaNi.solver['kit'])) 50 | if np.array_equal(pars.N, pars_sparse.N): 51 | print('norm(dif)={}'.format(np.linalg.norm(resP_GaNi.Fu.fourier(Fourier=False).val-resS_GaNi.Fu.fourier().full().val))) 52 | print('norm(resP)={}'.format(resS_GaNi.solver['norm_res'])) 53 | 54 | print('END') 55 | -------------------------------------------------------------------------------- /examples/FFTHvsFEM/FFTH_GaNi.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.sparse.linalg as sp 3 | import itertools 4 | from functions import get_matinc, material_coef_at_grid_points, enlarge, square_weights 5 | 6 | # PARAMETERS 7 | dim = 2 # dimension (works for 2D and 3D) 8 | N = 5*np.ones(dim, dtype=np.int) # number of grid points 9 | phase = 10. # material contrast 10 | assert(np.array_equal(N % 2, np.ones(dim, dtype=np.int))) 11 | 12 | # auxiliary values 13 | ndof = dim*np.prod(N) # number of degrees-of-freedom 14 | vec_shape=(dim,)+tuple(N) # shape of the vector for storing DOFs 15 | 16 | # PROJECTION IN FOURIER SPACE 17 | Ghat = np.zeros((dim,dim)+ tuple(N)) # zero initialize 18 | freq = [np.arange(-(N[ii]-1)/2.,+(N[ii]+1)/2.) for ii in range(dim)] 19 | for i,j in itertools.product(range(dim),repeat=2): 20 | for ind in itertools.product(*[range(n) for n in N]): 21 | q = np.array([freq[ii][ind[ii]] for ii in range(dim)]) # frequency vector 22 | if not q.dot(q) == 0: # zero freq. -> mean 23 | Ghat[i,j][ind] = -(q[i]*q[j])/(q.dot(q)) 24 | 25 | # OPERATORS 26 | Agani = material_coef_at_grid_points(N, phase) 27 | dot = lambda A,v: np.einsum('ij...,j... ->i...',A,v) 28 | fft = lambda V, N: np.fft.fftshift(np.fft.fftn (np.fft.ifftshift(V),N)) / np.prod(N) 29 | ifft = lambda V, N: np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(V),N)) * np.prod(N) 30 | G_fun = lambda V: np.real(ifft(dot(Ghat,fft(V, N)), N)).reshape(-1) 31 | A_fun = lambda v: dot(Agani,v.reshape(vec_shape)) 32 | GA_fun = lambda v: G_fun(A_fun(v)) 33 | 34 | # CONJUGATE GRADIENT SOLVER 35 | E = np.zeros(vec_shape); E[0] = 1. # macroscopic value 36 | b = -GA_fun(E) # right-hand side 37 | e, _= sp.cg(A=sp.LinearOperator(shape=(ndof, ndof), matvec=GA_fun, dtype='float'), b=b) 38 | 39 | # POSTPROCESSING to calculate energetic value influenced by numerical integration 40 | aux = e+E.reshape(-1) 41 | AH11 = np.inner(A_fun(aux).reshape(-1), aux)/np.prod(N) 42 | print('homogenised component AH11 = {} (FFTH-GaNi - nonconforming)'.format(AH11)) 43 | 44 | # POSTPROCESSING to calculate guaranteed bound 45 | dN = 2*N-1 46 | freq = [np.arange(-(dN[ii]-1)/2.,+(dN[ii]+1)/2.) for ii in range(dim)] 47 | mat, inc = get_matinc(dim, phase) 48 | h = 0.6*np.ones(dim) # size of square (rectangle) / cube 49 | char_square = ifft(square_weights(h, dN, freq), dN).real 50 | Aga = np.einsum('ij...,...->ij...', mat+inc, char_square) \ 51 | + np.einsum('ij...,...->ij...', mat, 1.-char_square) 52 | 53 | # interpolation/projection of microscopic field on double grid 54 | Fe = fft(np.reshape(e, vec_shape), N) 55 | Fe2 = np.zeros((dim,)+tuple(dN), dtype=np.complex) 56 | for di in range(dim): 57 | Fe2[di]=enlarge(Fe[di], dN) 58 | e2 = ifft(Fe2, dN).real 59 | 60 | # evaluation of homogenised property 61 | E2 = np.zeros((dim,)+tuple(dN)); E2[0] = 1. # macroscopic value 62 | aux2 = e2 + E2 63 | AH11 = np.sum(dot(Aga, aux2)*aux2)/np.prod(dN) 64 | print('homogenised component AH11 = {} (FFTH-GaNi - conforming - upper bound on homogenised properties)'.format(AH11)) 65 | print('END') 66 | -------------------------------------------------------------------------------- /ffthompy/postprocess.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ffthompy.general.base import Timer 3 | import itertools 4 | 5 | 6 | def postprocess(pb, A, mat, solutions, results, primaldual): 7 | """ 8 | The function post-process the results. 9 | """ 10 | tim = Timer(name='postprocessing') 11 | print('\npostprocessing') 12 | matrices = {} 13 | for pp in pb.postprocess: 14 | if pp['kind'] in ['GaNi', 'gani']: 15 | order_name = '' 16 | Nname = '' 17 | if A.name is not 'A_GaNi': 18 | A = mat.get_A_GaNi(pb.solve['N'], primaldual) 19 | 20 | elif pp['kind'] in ['Ga', 'ga']: 21 | if 'order' in pp: 22 | Nbarpp = tuple(2*np.array(pb.solve['N']) - 1) 23 | if pp['order'] is None: 24 | Nname = '' 25 | order_name = '' 26 | A = mat.get_A_Ga(Nbar=Nbarpp, primaldual=primaldual, order=pp['order']) 27 | else: 28 | order_name = '_o' + str(pp['order']) 29 | Nname = '_P%d' % np.mean(pp['P']) 30 | A = mat.get_A_Ga(Nbar=Nbarpp, primaldual=primaldual, 31 | order=pp['order'], P=pp['P']) 32 | else: 33 | order_name = '' 34 | Nname = '' 35 | else: 36 | ValueError() 37 | 38 | name = 'AH_%s%s%s_%s' % (pp['kind'], order_name, Nname, primaldual) 39 | print(('calculating: ' + name)) 40 | 41 | AH = assembly_matrix(A, solutions) 42 | 43 | if primaldual is 'primal': 44 | matrices[name] = AH 45 | else: 46 | matrices[name] = np.linalg.inv(AH) 47 | tim.measure() 48 | 49 | pb.output.update({'sol_' + primaldual: solutions, 50 | 'res_' + primaldual: results, 51 | 'mat_' + primaldual: matrices}) 52 | 53 | def assembly_matrix(Afun, solutions): 54 | """ 55 | The function assembles the homogenized matrix from minimizers (corrector 56 | functions). 57 | """ 58 | dim = len(solutions) 59 | if not np.allclose(Afun.N, solutions[0].N): 60 | Nbar = Afun.N 61 | sol = [] 62 | for ii in np.arange(dim): 63 | sol.append(solutions[ii].project(Nbar)) 64 | else: 65 | sol = solutions 66 | 67 | AH = np.zeros([dim, dim]) 68 | for ii, jj in itertools.product(list(range(dim)), repeat=2): 69 | AH[ii, jj] = Afun(sol[ii]) * sol[jj] 70 | return AH 71 | 72 | 73 | def add_macro2minimizer(X, E): 74 | """ 75 | The function takes the minimizers (corrector function with zero-mean 76 | property or equaling to macroscopic value) and returns a corrector function 77 | with mean that equals to macroscopic value E. 78 | """ 79 | if np.allclose(X.mean(), E): 80 | return X 81 | elif np.allclose(X.mean(), np.zeros_like(E)): 82 | EN = X.zeros_like(name='EN') 83 | EN.set_mean(E) 84 | return X + EN 85 | else: 86 | raise ValueError("Field is neither zero-mean nor E-mean.") 87 | -------------------------------------------------------------------------------- /ffthompy/tensors/projection.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ffthompy.trigpol import Grid, get_Nodd, mean_index, fft_form_default 3 | from .objects import Tensor 4 | import itertools 5 | 6 | def scalar(N, Y, fft_form=fft_form_default): 7 | dim = np.size(N) 8 | N = np.array(N, dtype=np.int) 9 | 10 | xi = Grid.get_freq(N, Y, fft_form=fft_form) 11 | N_fft=tuple(xi[i].size for i in range(dim)) 12 | hGrad = np.zeros((dim,)+N_fft) # zero initialize 13 | for ind in itertools.product(*[list(range(n)) for n in N_fft]): 14 | for i in range(dim): 15 | hGrad[i][ind] = xi[i][ind[i]] 16 | 17 | kok= np.einsum('i...,j...->ij...', hGrad, hGrad).real 18 | k2 = np.einsum('i...,i...', hGrad, hGrad).real 19 | ind_center=mean_index(N, fft_form=fft_form) 20 | k2[ind_center]=1. 21 | 22 | G0lval=np.zeros_like(kok) 23 | Ival=np.zeros_like(kok) 24 | for ii in range(dim): # diagonal components 25 | G0lval[ii, ii][ind_center] = 1 26 | Ival[ii, ii] = 1 27 | G1l=Tensor(name='G1', val=kok/k2, order=2, N=N, Y=Y, multype=21, Fourier=True, fft_form=fft_form) 28 | G0l=Tensor(name='G1', val=G0lval, order=2, N=N, Y=Y, multype=21, Fourier=True, fft_form=fft_form) 29 | I = Tensor(name='I', val=Ival, order=2, N=N, Y=Y, multype=21, Fourier=True, fft_form=fft_form) 30 | G2l=I-G1l-G0l 31 | return G0l, G1l, G2l 32 | 33 | def elasticity_small_strain(N, Y, fft_form=fft_form_default): 34 | N = np.array(N, dtype=np.int) 35 | dim = N.size 36 | assert(dim==3) 37 | freq = Grid.get_freq(N, Y, fft_form=fft_form) 38 | N_fft=tuple(freq[i].size for i in range(dim)) 39 | Ghat = np.zeros(np.hstack([dim*np.ones(4, dtype=np.int), N_fft])) 40 | delta = lambda i,j: np.float(i==j) 41 | 42 | for i, j, k, l in itertools.product(list(range(dim)), repeat=4): 43 | for x, y, z in np.ndindex(*N_fft): 44 | q = np.array([freq[0][x], freq[1][y], freq[2][z]]) 45 | if not q.dot(q) == 0: 46 | Ghat[i,j,k,l,x,y,z] = -q[i]*q[j]*q[k]*q[l]/(q.dot(q))**2 + \ 47 | .5*(delta(i, k)*q[j]*q[l]+delta(i, l)*q[j]*q[k]+ 48 | delta(j, k)*q[i]*q[l]+delta(j, l)*q[i]*q[k])/(q.dot(q)) 49 | 50 | Ghat_tensor = Tensor(name='Ghat', val=Ghat, N=N, order=4, multype=42, Fourier=True, fft_form=fft_form) 51 | return Ghat_tensor 52 | 53 | def elasticity_large_deformation(N, Y, fft_form=fft_form_default): 54 | N = np.array(N, dtype=np.int) 55 | dim = N.size 56 | assert(dim==3) 57 | freq = Grid.get_freq(N, Y, fft_form=fft_form) 58 | N_fft=tuple(freq[i].size for i in range(dim)) 59 | Ghat = np.zeros(np.hstack([dim*np.ones(4, dtype=np.int), N_fft])) 60 | delta = lambda i,j: np.float(i==j) 61 | 62 | for i, j, k, l in itertools.product(list(range(dim)), repeat=4): 63 | for x, y, z in np.ndindex(*N_fft): 64 | q = np.array([freq[0][x], freq[1][y], freq[2][z]]) 65 | if not q.dot(q) == 0: 66 | Ghat[i,j,k,l,x,y,z] = delta(i,k)*q[j]*q[l] / (q.dot(q)) 67 | 68 | Ghat_tensor = Tensor(name='Ghat', val=Ghat, order=4, N=N, multype=42, 69 | Fourier=True, fft_form=fft_form) 70 | return Ghat_tensor 71 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | FFTHomPy 2 | ======== 3 | 4 | FFT-based homogenization in Python is a numerical software for evaluating guaranteed upper-lower bounds on homogenized properties. The algorithms implemented here are based on the papers in [references](#references) . 5 | 6 | ## News 7 | 8 | - The code now contains modelling using tesors with a low rank tensor approximation. 9 | 10 | ## Manual 11 | 12 | The basic manual can be found at 13 | - http://FFTHomPy.bitbucket.io 14 | 15 | or downloaded at 16 | - http://FFTHomPy.bitbucket.io/FFTHomPy.pdf 17 | 18 | Tutorials can be found in a folder '/tutorial'. 19 | 20 | ## Requirements and installation 21 | 22 | No special installation is required. However, the folder with the code has to be in the python path. 23 | 24 | The code is optimised for [Python](https://www.python.org) (version 3.6) and 25 | depends on the following numerical libraries: 26 | - [NumPy](http://www.numpy.org) (version 1.16) and 27 | - [SciPy](https://www.scipy.org) (version 1.3) for scientific computing as well as on the 28 | - [Matplotlib](https://matplotlib.org/) (version 3.1) for plotting 29 | - [StoPy](https://github.com/vondrejc/StoPy) for uncertainty quantification 30 | - [ttpy](https://github.com/oseledets/ttpy) Python implementation of the Tensor Train (TT)-Toolbox 31 | 32 | ## References 33 | 34 | The code is based on the following papers, where you can find more theoretical information. 35 | 36 | - J. Vondřejc, D. Liu, M. Ladecký, and H.G. Matthies: *FFT-Based Homogenisation Accelerated by Low-Rank Tensor Approximations.* Computer Methods in Applied Mechanics and Engineering, 364, pp. 112890, 2020. https://doi.org/10.1016/j.cma.2020.112890 37 | - J. Vondřejc, T.W.J. de Geus: Energy-based comparison between the Fourier--Galerkin method and the finite element method. Journal of Computational and Applied Mathematics. 374, pp. 112585, 2020. https://doi.org/10.1016/j.cam.2019.112585 38 | - J. Zeman, T. W. J. de Geus, J. Vondřejc, R. H. J. Peerlings, and M. G. D. Geers: *A finite element perspective on non-linear FFT-based micromechanical simulations.* International Journal for Numerical Methods in Engineering, 111 (10), pp. 903-926, 2017. arXiv:1601.05970 39 | - N. Mishra, J. Vondřejc, J. Zeman: *A comparative study on low-memory iterative solvers for FFT-based homogenization of periodic media.* Journal of Computational Physics, 321, pp. 151-168, 2016. arXiv:1508.02045 40 | - J. Vondřejc: *Improved guaranteed computable bounds on homogenized properties of periodic media by Fourier-Galerkin method with exact integration.* International Journal for Numerical Methods in Engineering, 107 (13), pp.~1106-1135, 2016. arXiv:1412.2033 41 | - J. Vondřejc, J. Zeman, I. Marek: *Guaranteed upper-lower bounds on homogenized properties by FFT-based Galerkin method.* Computer Methods in Applied Mechanics and Engineering, 297, pp. 258–291, 2015. arXiv:1404.3614 42 | - J. Vondřejc, J. Zeman, I. Marek: *An FFT-based Galerkin method for homogenization of periodic media.* Computers and Mathematics with Applications, 68, pp. 156-173, 2014. arXiv:1311.0089 43 | - J. Zeman, J. Vondřejc, J. Novák and I. Marek: *Accelerating a FFT-based solver for numerical homogenization of periodic media by conjugate gradients.* Journal of Computational Physics, 229 (21), pp. 8065-8071, 2010. arXiv:1004.1122 44 | 45 | -------------------------------------------------------------------------------- /examples/lowRankTensorApproximations/fig_pars.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | 3 | def set_pars(mpl): 4 | mpl.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath,bm,amsfonts}"] 5 | params={'text.usetex': True, 6 | 'font.family': 'serif', 7 | 'font.size': 12, 8 | 'legend.fontsize': 10, 9 | } 10 | mpl.rcParams.update(params) 11 | fig_par={'dpi': 1000, 12 | 'facecolor': 'w', 13 | 'edgecolor': 'k', 14 | 'figsize': (4, 3), 15 | 'figsize3D': (4, 4), 16 | 'pad_inches': 0.02, 17 | } 18 | 19 | return fig_par 20 | 21 | def set_labels(): 22 | lines={'Gafull': 'bo-', 23 | 'GaNifull': '--', 24 | 'Gacano': 'bx-', 25 | 'Gatucker': 'ro-', 26 | 'Gatt': 'kv-', 27 | 28 | 'GaSparse': 'rx-', 29 | 'GaSparse_2': 'ro-', 30 | 'GaSparse_3': 'r<-', 31 | 'GaSparse_4': 'r*-', 32 | 'GaSparse_5': 'rd-', 33 | 'GaSparse_6': 'rv-', 34 | 'GaSparse_7': 'r^-', 35 | 36 | 'full': '--', 37 | 'mem_cano': ['bx--','bo--','bx--','bv--','bo--','bx--','b<--'], 38 | 'mem_tucker': ['rx--', 'ro--', 'rx--', 'rv--', 'ro--', 'rx--', 'r<--'], 39 | 'mem_tt': ['kx-', 'ko-', 'kx--', 'kv--', 'ko--', 'kx--', 'k<--'], 40 | 41 | 'Ga_cano': ['bx-', 'bo-', 'bx-', 'bv-', 'bo-', 'bx-', 'b<-'], 42 | 'Ga_tucker': ['rx-', 'ro-', 'rx-', 'rv-', 'ro-', 'rx-', 'r<-'], 43 | 'Ga_tt': ['kx-', 'ko-','kx--', 'kv-', 'ko-', 'kx-', 'k<-'], 44 | 45 | 'GaNi_cano': ['bx--', 'bo--', 'bx--', 'bv--', 'bo--', 'bx--', 'b<--'], 46 | 'GaNi_tucker': ['rx--', 'ro--', 'rx--', 'rv--', 'ro--', 'rx--', 'r<--'], 47 | 'GaNi_tt': ['kx--', 'ko--', 'kx--', 'kv--', 'ko--', 'kx--', 'k<--'], 48 | 49 | 50 | 'Ga': ['-','x-','<-','|-','^-','x-','o-', '<-', 'v-','^-','d-',], 51 | 'GaNi': ['--', 'x--', '<--', '|--', '^--', 'x--', 'o--', '<--', 'v--', '^--', 'd--'], 52 | 53 | } 54 | 55 | labels = {'full': 'Full', 56 | 'tensorsLowRank': 'Sparse', 57 | 'Gafull': 'Ga Full', 58 | 'Gacano': 'Ga CP', 59 | 'Gatucker': 'Ga Tucker', 60 | 'Gatt': 'Ga TT', 61 | 62 | 'Garank': 'Solution rank', 63 | 'GaNirank': 'Solution rank', 64 | 65 | 'GaNifull': 'GaNi Full', 66 | 67 | 'GaNicano': 'GaNi CP', 68 | 'GaNicanoN': 'Cano N=', 69 | 70 | 'GaNitucker': 'GaNi Tucker', 71 | 'GaNituckerN': 'Tucker N=', 72 | 73 | 'GaNitt': 'GaNi TT', 74 | 'GaNittN': 'TT N=', 75 | } 76 | return lines, labels 77 | 78 | def copy_files(src, dest, files='all'): 79 | import os 80 | from shutil import copy 81 | src_files=os.listdir(src) 82 | for file_name in src_files: 83 | if files=='all' or file_name in files: 84 | full_file_name=os.path.join(src, file_name) 85 | if (os.path.isfile(full_file_name)): 86 | copy(full_file_name, dest) 87 | else: 88 | continue 89 | print('copy of files is finished') 90 | return 91 | print((plt.style.available)) 92 | -------------------------------------------------------------------------------- /ffthompy/tensorsLowRank/objects/tensors.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ffthompy.tensors.objects import TensorFuns 3 | import itertools 4 | from copy import deepcopy as copy 5 | from ffthompy.tensorsLowRank.fft1 import fft, ifft, fftc, icfft, cfftc, icfftc, srfft,sirfft 6 | 7 | fft_form_default='sr' # scipy rfft 8 | 9 | def multiply(A, B, *args, **kwargs): 10 | """element-wise (Hadamard) product of A and B""" 11 | dim=A.__len__() 12 | assert(dim==B.__len__()) 13 | C=[] 14 | for ii in range(dim): 15 | shape=(A[ii].shape[0]*B[ii].shape[0], A[ii].shape[1]) 16 | val=np.empty(shape) 17 | for iimn, (mm, nn) in enumerate(itertools.product(list(range(A[ii].shape[0])), list(range(B[ii].shape[0])))): 18 | val[iimn] = A[ii][mm]*B[ii][nn] 19 | C.append(val) 20 | return C 21 | 22 | 23 | class LowRankTensorFuns(TensorFuns): 24 | 25 | def mean_index(self): 26 | if self.fft_form in [0, 'sr']: 27 | return tuple(np.zeros_like(self.N, dtype=np.int)) 28 | elif self.fft_form in ['c', 'cc']: 29 | return tuple(np.array(np.fix(np.array(self.N)/2), dtype=np.int)) 30 | 31 | def _set_fft(self, fft_form): 32 | assert(fft_form in ['cc', 'c', 'sr', 0]) # 'sr' for scipy.fftpack.rfft 33 | if fft_form in [0]: 34 | self.N_fft=self.N 35 | self.fft=fft 36 | self.ifft=ifft 37 | elif fft_form in ['c']: 38 | self.N_fft=self.N 39 | self.fft=fftc 40 | self.ifft=icfft 41 | elif fft_form in ['cc']: 42 | self.N_fft=self.N 43 | self.fft=cfftc 44 | self.ifft=icfftc 45 | elif fft_form in ['sr']: 46 | self.N_fft=self.N 47 | self.fft=srfft 48 | self.ifft=sirfft 49 | self.fft_form=fft_form 50 | return self 51 | 52 | def fourier(self, real_output=False, copy=True): 53 | "(inverse) discrete Fourier transform" 54 | 55 | if self.Fourier: 56 | fftfun=lambda Fx, N, real_output: self.ifft(Fx, N, real_output) 57 | name='Fi({})'.format(self.name) 58 | else: 59 | fftfun=lambda x, N, real_output: self.fft(x, N) 60 | name='F({})'.format(self.name) 61 | 62 | basis=[] 63 | for ii in range(self.order): 64 | basis.append(fftfun(self.basis[ii], self.N[ii], real_output)) 65 | 66 | if copy: 67 | return self.copy(name=name, basis=basis, Fourier=not self.Fourier, orthogonal=False) 68 | else: 69 | self.basis=basis 70 | self.Fourier=not self.Fourier 71 | self.orthogonal=False 72 | return self 73 | 74 | def __radd__(self, other): 75 | if other is None: 76 | return self 77 | return other + self 78 | 79 | if __name__=='__main__': 80 | # check multiplication 81 | r=2 82 | n=50 83 | m=55 84 | As = [np.random.random([r,n]), np.random.random([r, m])] 85 | A = np.einsum('ij,ik->jk', As[0], As[1]) 86 | Bs = [np.random.random([r,n]), np.random.random([r, m])] 87 | B = np.einsum('ij,ik->jk', Bs[0], Bs[1]) 88 | C0 = A*B 89 | C1s = multiply(As, Bs) 90 | C1 = np.einsum('ij,ik->jk', C1s[0], C1s[1]) 91 | print((np.linalg.norm(C0-C1))) 92 | print('END') 93 | -------------------------------------------------------------------------------- /examples/FFTHvsFEM/functions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import itertools 3 | from fenics import SubDomain, near 4 | 5 | 6 | def get_matinc(dim, phase): 7 | # set up the material coefficients for matrix phase (mat) and inclusion (inc) 8 | Rfun=lambda alp: np.array([[np.cos(alp), np.sin(alp)], [-np.sin(alp), np.cos(alp)]]) 9 | if dim==2: 10 | R=Rfun(np.pi/3) 11 | elif dim==3: 12 | RA=np.eye(dim) 13 | RA[:2, :2]=Rfun(np.pi/3.) 14 | RB=np.eye(dim) 15 | RB[1:, 1:]=Rfun(np.pi/6.) 16 | R=RA.dot(RB) 17 | mat=R.dot(np.diag(np.arange(1, dim+1)).dot(R.T)) 18 | inc=R.dot(phase*np.eye(dim).dot(R.T)) 19 | return mat, inc 20 | 21 | def material_coef_at_grid_points(N, phase): 22 | # calculates the material coefficients at grid points for a square inclusion 23 | dim = N.__len__() 24 | assert(np.array_equal(N % 5, np.zeros(dim))) 25 | mat, inc = get_matinc(dim, phase) # material coef. for matrix (mat) and inclusion (inc) 26 | topology = np.zeros(N) 27 | subindices=[slice(int(N[i]/5*1),int(N[i]/5*4)) for i in range(dim)] 28 | topology[tuple(subindices)]=1 29 | A = np.einsum('ij,...->ij...',mat,1.-topology) \ 30 | + np.einsum('ij,...->ij...',mat+inc,topology) # material coefficients 31 | return A 32 | 33 | def enlarge(xN, M): 34 | """ 35 | Enlarge an array of Fourier coefficients by zeros. 36 | 37 | Parameters 38 | ---------- 39 | xN : numpy.ndarray of shape = N 40 | input array that is to be enlarged 41 | 42 | Returns 43 | ------- 44 | xM : numpy.ndarray of shape = M 45 | output array that is enlarged 46 | M : array like 47 | number of grid points 48 | """ 49 | xM = np.zeros(M, dtype=xN.dtype) 50 | M = np.array(M, dtype=np.float) 51 | N = np.array(xN.shape, dtype=np.float) 52 | if np.allclose(M, N): 53 | return xN 54 | dim = N.size 55 | ibeg = np.ceil((M-N)/2).astype(dtype=np.int) 56 | iend = np.ceil((M+N)/2).astype(dtype=np.int) 57 | if dim == 3: 58 | xM[ibeg[0]:iend[0], ibeg[1]:iend[1], ibeg[2]:iend[2]] = xN 59 | elif dim == 2: 60 | xM[ibeg[0]:iend[0], ibeg[1]:iend[1]] = xN 61 | elif dim == 1: 62 | xM[ibeg[0]:iend[0]] = xN 63 | return xM 64 | 65 | def square_weights(h, dN, freq): 66 | # calculation of integral weights of rectangular function for FFTH-Ga 67 | dim = h.size 68 | Wphi = np.zeros(dN) # integral weights 69 | for ind in itertools.product(*[range(n) for n in dN]): 70 | Wphi[ind] = np.prod(h) 71 | for ii in range(dim): 72 | Wphi[ind] *= np.sinc(h[ii]*freq[ii][ind[ii]]) 73 | return Wphi 74 | 75 | class PeriodicBoundary(SubDomain): 76 | # periodic boundary conditions for FEM 77 | def __init__(self, dim=2): 78 | SubDomain.__init__(self) 79 | self.dim = dim 80 | 81 | def inside(self, x, on_boundary): 82 | """ return True if on left or bottom boundary AND NOT on one of the 83 | two corners (0, 1) and (1, 0) """ 84 | zero_boundary = False 85 | one_boundary = False 86 | corner = True 87 | for ii in range(self.dim): 88 | zero_boundary = zero_boundary or near(x[ii], 0.) 89 | one_boundary = one_boundary or near(x[ii], 1.) 90 | corner = corner and (near(x[ii], 0.) or near(x[ii], 1.)) 91 | return bool(on_boundary and zero_boundary and not one_boundary) 92 | 93 | def map(self, x, y): 94 | for ii in range(self.dim): 95 | if near(x[ii], 1.): 96 | y[ii] = x[ii] - 1. 97 | else: 98 | y[ii] = x[ii] 99 | -------------------------------------------------------------------------------- /examples/lowRankTensorApproximations/diffusion_comp_error.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | 4 | from ffthompy.tensorsLowRank.homogenisation import (homog_Ga_full_potential, homog_GaNi_full_potential, 5 | homog_Ga_sparse, homog_GaNi_sparse) 6 | from examples.lowRankTensorApproximations.setting import get_material_coef, kind_list, get_default_parameters 7 | from examples.lowRankTensorApproximations.plots import plot_error, save_experiment_settings 8 | import itertools 9 | 10 | os.nice(19) 11 | 12 | print('running comparison residual...') 13 | ####################################################### 14 | 15 | Ns = {'2': [1215], 16 | '3': [135]} 17 | 18 | kinds = {'2': [0], 19 | '3': [1,2]} 20 | 21 | material_list = [0,2] 22 | 23 | sol_rank_range_set={'2': [2,5,10,20,30], 24 | '3': [2,5,10,20]} 25 | 26 | data_folder = 'data_for_plot/error' 27 | save_experiment_settings(kind_list,Ns,kinds,sol_rank_range_set,material_list,data_folder=data_folder) 28 | 29 | for dim, material in itertools.product([2, 3], material_list): 30 | if not os.path.exists('{}/dim_{}/mat_{}/'.format(data_folder,dim, material)): 31 | os.makedirs('{}/dim_{}/mat_{}/'.format(data_folder,dim, material)) 32 | 33 | for N, kind in itertools.product(Ns['{}'.format(dim)], kinds['{}'.format(dim)]): 34 | ################ MATERAL DATA AND SETTINGS ################ 35 | ## parameters 36 | pars, pars_sparse=get_default_parameters(dim, N, material, kind) 37 | pars_sparse.solver.update(dict(rank=1, # rank of solution vector 38 | )) 39 | 40 | print('== format={}, N={}, dim={}, material={} ===='.format(pars_sparse.kind, 41 | N, dim, material)) 42 | 43 | # get material settings for experiment 44 | Aga, Agani, Agas, Aganis=get_material_coef(material, pars, pars_sparse) 45 | 46 | ####################################################################### 47 | 48 | ### COMPUTING FULL SOLUTION ### 49 | sols_Ga = list() 50 | sols_GaNi = list() 51 | 52 | ## Compute Full solutions 53 | resP_Ga = homog_Ga_full_potential(Aga, pars) 54 | resP_GaNi = homog_GaNi_full_potential(Agani, Aga, pars) 55 | 56 | ############ SPARSE SOLUTIONS ############### 57 | sols_Ga_Spar = list() 58 | sols_GaNi_Spar = list() 59 | 60 | for sol_rank in sol_rank_range_set['{}'.format(dim)]: # rank of solution vector 61 | print('solution rank={}'.format(sol_rank)) 62 | pars_sparse.solver.update(dict(rank=sol_rank)) 63 | 64 | sols_Ga.append(resP_Ga.AH) 65 | sols_GaNi.append(resP_GaNi.AH) 66 | 67 | resS_Ga = homog_Ga_sparse(Agas, pars_sparse) 68 | sols_Ga_Spar.append(resS_Ga.AH) 69 | 70 | resS_GaNi = homog_GaNi_sparse(Aganis, Agas, pars_sparse) 71 | sols_GaNi_Spar.append(resS_GaNi.AH) 72 | 73 | pickle.dump(sols_Ga_Spar, open("{}/dim_{}/mat_{}/sols_Ga_Spar_{}_{}_{}.p" 74 | .format(data_folder, dim, material, kind, N, 75 | pars_sparse.solver['method']), "wb")) 76 | pickle.dump(sols_GaNi_Spar, open("{}/dim_{}/mat_{}/sols_GaNi_Spar_{}_{}_{}.p" 77 | .format(data_folder, dim, material, kind, N, 78 | pars_sparse.solver['method']), "wb")) 79 | 80 | pickle.dump(sols_Ga, open("{}/dim_{}/mat_{}/sols_Ga_{}.p" 81 | .format(data_folder, dim, material, N), "wb")) 82 | pickle.dump(sols_GaNi, open("{}/dim_{}/mat_{}/sols_GaNi_{}.p" 83 | .format(data_folder, dim, material, N), "wb")) 84 | 85 | plot_error() 86 | 87 | print('END') 88 | -------------------------------------------------------------------------------- /examples/lowRankTensorApproximations/README.md: -------------------------------------------------------------------------------- 1 | Numerical experiments 2 | ======== 3 | Folder [examples/lowRankTensorApproximations](#examples/lowRankTensorApproximations) contains files working with low -rank tensor implementation of scalar homogenisation problem described in paper: 4 | 5 | - J. Vondřejc, Liu, D, Ladecký, M., and Matthies, H. G.: *FFT-Based Homogenisation Accelerated by Low-Rank Tensor Approximations.* 2019. arXiv:1902.07455 6 | 7 | ## Model problem 8 | In file [diffusion.py](#diffusion.py) a model scalar elliptic homogenisation problem described in section 2.1. of the paper is implemented. 9 | For predefined: 10 | 11 | - dimension (dim= 2 or 3), 12 | - grid size (N= odd number), 13 | - material (material=0 - square inclusion, 1 - pyramid inclusion, 2 - stochastic material, 3 - square inclusion with anisotropic material, 14 | 4 - stochastic material with anisotropy), 15 | - low-rank tensor format (kind=0 - canonical, 1 - Tucker, 2 - Tensor-Train) 16 | 17 | [diffusion.py](#diffusion.py) compute one element of homogenised material property of the material (see section 2.2. of the paper). 18 | The problem solution is computed by two different approaches: 19 | 20 | - (Ga) Galerkin approximation , 21 | - (GaNi) Galerkin approximation with Numerical Integration, 22 | 23 | and two different formats: 24 | 25 | - full tensor format, 26 | - low-rank format (canonical, Tucker or Tensor-Train). 27 | 28 | Materials and solver settings are predefined in [setting.py](#setting.py). 29 | 30 | ## Generate results 31 | 32 | Results shown in section 4 of the paper are computed with files [diffusion_comp_error.py](#diffusion_comp_error.py), 33 | [diffusion_comp_residua.py](#diffusion_compresidua.py), [diffusion_comp_time.py](#diffusion_comp_time.py) and [diffusion_comp_time_stochastic_material.py](#diffusion_comp_time_stochastic_material.py). 34 | 35 | File [diffusion_comp_error.py](#diffusion_comp_error.py) computes problem defined 36 | in [diffusion.py](#diffusion.py) with different solution rank-r and compare relative errors of approximation with full tensor approach. For more details see section 4.3. of the paper. 37 | 38 | File [diffusion_comp_residua.py](#diffusion_compresidua.py) computes problem defined 39 | in [diffusion.py](#diffusion.py) with different solution rank-r and shows the evolution of the norm during the Minimal Residual iteration. For more details see section 4.2. of the paper. 40 | 41 | File [diffusion_comp_time.py](#diffusion_comp_time.py) use problem defined in [diffusion.py](#diffusion.py) 42 | with material 0 (square inclusion) and material 3 (square inclusion with anisotropic material). 43 | This file computes the computational time at the same level of accuracy for the scheme with exact integration (Ga). 44 | The full solution is calculated on a grid of size (N,...,N) while the low-rank solution on the grid (3N,...,3N) 45 | with a solution rank to achieve the same level of accuracy as full scheme. For more details see section 4.4. of the paper. 46 | 47 | File [diffusion_comp_time_stochastic_material.py](#diffusion_comp_time_stochastic_material.py) use problem defined 48 | in [diffusion.py](#diffusion.py) with material 2 49 | (stochastic material) and material 4 (stochastic material with anisotropy). 50 | Both full and low-rank solutions are calculated on a grid of size (N,...,N). The ranks of the low-rank solution are 51 | chosen such that it achieves a relative error below $10^-3$ or $10^−6$. For more details see section 4.4. of the paper. 52 | 53 | ## Plot results 54 | 55 | File [plots.py](#plots.py) contains procedures which creates .pdf figures with results. 56 | Procedures plot_error() use data generated by [diffusion_comp_error.py](#diffusion_comp_error.py), plot_residuals() use data generated by [diffusion_comp_residua.py](#diffusion_comp_residua.py), and 57 | and plot_time() uses data generated by [diffusion_comp_time_stochastic_material.py](#diffusion_comp_time.py) and [diffusion_comp_time.py](#diffusion_comp_time_stochastic_material.py). 58 | Visual style, lines and labels are defined in [fig_pars.py](#fig_pars.py). 59 | 60 | 61 | -------------------------------------------------------------------------------- /ffthompy/tensorsLowRank/objects/sparseTensorWrapper.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from tt.core.vector import vector 3 | from ffthompy.tensorsLowRank.objects.tucker import Tucker 4 | from ffthompy.tensorsLowRank.objects.canoTensor import CanoTensor 5 | from ffthompy.tensorsLowRank.objects.tensorTrain import TensorTrain 6 | from ffthompy.tensorsLowRank.objects.tensors import fft_form_default 7 | 8 | 9 | def SparseTensor(kind='tt', val=None, core=None, basis=None, eps=None, rank=None, 10 | Fourier=False, name='unnamed', vectorObj=None, fft_form=fft_form_default): 11 | """ 12 | A uniform wrapper of different tensorsLowRank tensor format 13 | 14 | :param kind: type of tensorsLowRank tensor, can be 'cano','tucker' or 'tt', or more variants (see the code). 15 | :type kind: string 16 | 17 | :param val: a full tensor to be approximated 18 | :type val: n-D array 19 | 20 | :param core: core for canonical, tucker or TT tensorsLowRank tensor 21 | :type core: 1-D array for canonical tensor, n-D arary for tucker, list of arrays for TT. 22 | 23 | :param basis: basis for canonical or tucker tensorsLowRank tensor. 24 | :type basis: list of arrays. 25 | 26 | :param eps: approximation accuracy. 27 | :type eps: float. 28 | 29 | :param rank: rank of the cano and tucker tensorsLowRank tensor, maximum rank of TT tensorsLowRank tensor. 30 | :type rank: int for cano and TT, list of int for tucker. 31 | 32 | :param vectorObj: a TTPY vector class object, to be cast into tensorTrain object. 33 | :type vectorObj: TTPY vector 34 | 35 | :returns: object of tensorsLowRank tensor. 36 | """ 37 | if type(rank) is list or type(rank) is np.ndarray: 38 | rmax=max(rank) 39 | r=min(rank) 40 | else: 41 | rmax=r=rank 42 | 43 | if kind.lower() in ['cano', 'canotensor']: 44 | return CanoTensor(name=name, val=val, core=core,basis=basis,Fourier=Fourier,fft_form=fft_form).truncate(rank=r, tol=eps) 45 | elif kind.lower() in ['tucker']: 46 | return Tucker(name=name, val=val, core=core, basis=basis,Fourier=Fourier,fft_form=fft_form).truncate(rank=rank, tol=eps) 47 | elif kind.lower() in ['tt', 'tensortrain']: 48 | return TensorTrain(val=val, core=core, eps=eps, rmax=rmax, name=name, 49 | Fourier=Fourier, vectorObj=vectorObj,fft_form=fft_form) 50 | else: 51 | raise ValueError("Unexpected argument value: '" + kind +"'") 52 | 53 | 54 | if __name__=='__main__': 55 | 56 | print() 57 | print('----testing "Repeat" function ----') 58 | print() 59 | 60 | v1 = np.random.rand(3, 3,3) 61 | 62 | tt = SparseTensor(kind='tt', val=v1) 63 | # tt.fourier() 64 | print((tt.full())) 65 | tt.repeat(6) 66 | 67 | print((tt.full())) 68 | 69 | print('\n----testing wrapper function ----\n') 70 | 71 | v1=np.random.rand(20, 30) 72 | 73 | cano=SparseTensor(kind='cano', val=v1) 74 | print(cano) 75 | 76 | cano2=SparseTensor(kind='cano', val=v1, rank=10) 77 | print(cano2) 78 | 79 | cano3=SparseTensor(kind='cano', core=np.array([1.]), basis=[np.atleast_2d(np.ones(5)) for ii in range(2)], Fourier=False) 80 | print(cano3) 81 | 82 | v1=np.random.rand(20, 30, 40) 83 | 84 | tucker1=SparseTensor(kind='tucker', val=v1) 85 | print(tucker1) 86 | 87 | tucker2=SparseTensor(kind='tucker', val=v1, rank=[10, 20, 35]) 88 | print(tucker2) 89 | 90 | tucker3=SparseTensor(kind='tucker', core=np.array([1.]), basis=[np.atleast_2d(np.ones(5)) for ii in range(3)]) 91 | print(tucker3) 92 | 93 | tt1=SparseTensor(kind='tt', val=v1) 94 | print(tt1) 95 | 96 | tt2=SparseTensor(kind='tt', val=v1, eps=2e-1) 97 | print(tt2) 98 | 99 | tt_vec=vector(v1) 100 | tt3=SparseTensor(kind='TT', vectorObj=tt_vec) 101 | print(tt3) 102 | 103 | tt4=SparseTensor() 104 | print (tt4) 105 | 106 | v1=np.random.rand(20, 30) 107 | cano=SparseTensor(kind='CAno', val=v1) 108 | print(cano) 109 | 110 | print('END') 111 | -------------------------------------------------------------------------------- /run_unittests.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import unittest 4 | import numpy as np 5 | from ffthompy import PrintControl 6 | from ffthompy.problem import Problem, import_file 7 | import pickle as Pickle 8 | import os 9 | import sys 10 | 11 | prt=PrintControl() 12 | 13 | 14 | class Test_main(unittest.TestCase): 15 | 16 | def setUp(self): 17 | self.input_files = ['examples/scalar/scalar_2d.py', 18 | 'examples/scalar/scalar_3d.py', 19 | 'examples/scalar/from_file.py', 20 | 'examples/elasticity/linelas_3d.py'] 21 | self.tutorial_files = ['tutorials/01_trig_pol.py', 22 | 'tutorials/02_homogenisation.py', 23 | 'tutorials/03_exact_integration_simple.py', 24 | 'tutorials/04_exact_integration_fast.py'] 25 | 26 | def tearDown(self): 27 | pass 28 | 29 | def test_examples(self): # testing example files 30 | print('\nControling input files...') 31 | for input_file in self.input_files: 32 | print(' control of file: {}'.format(input_file)) 33 | self.examples(input_file) 34 | print('...ok') 35 | 36 | def examples(self, input_file): # test a particular example file 37 | basen = os.path.basename(input_file) 38 | conf = import_file(input_file) 39 | 40 | for conf_problem in conf.problems: 41 | prob = Problem(conf_problem, conf) 42 | prt.disable() 43 | prob.calculate() 44 | prt.enable() 45 | py_version = sys.version_info[0] 46 | file_res = 'test_results/python%d/%s_%s' \ 47 | % (py_version, basen.split('.')[0], prob.name) 48 | if py_version == 2: 49 | with open(file_res, 'r') as frs: 50 | res = Pickle.load(frs) 51 | elif py_version == 3: 52 | with open(file_res, 'rb') as frs: 53 | res = Pickle.load(frs) 54 | else: 55 | raise NotImplementedError('Python version!') 56 | 57 | # check the homogenized matrices 58 | for primdual in prob.solve['primaldual']: 59 | kwpd = 'mat_'+primdual 60 | for kw in prob.output[kwpd]: 61 | dif = prob.output[kwpd][kw]-res[kwpd][kw] 62 | val = np.linalg.norm(dif.ravel(), np.inf) 63 | msg = 'Incorrect ({}) in problem ({})'.format(kw, prob.name) 64 | self.assertAlmostEqual(0, val, msg=msg, delta=1e-9) 65 | prt.disable() 66 | prob.postprocessing() 67 | prt.enable() 68 | 69 | def test_tutorials(self): # test tutorials 70 | print('\nControling tutorials...') 71 | for filen in self.tutorial_files: 72 | print(' control of file: {}'.format(filen)) 73 | prt.disable() 74 | exec(compile(open(filen).read(), filen, 'exec'), {'__name__': 'test'}) 75 | prt.enable() 76 | print('...ok') 77 | 78 | if __name__ == "__main__": 79 | from ffthompy.matvecs.unittest_matvec import Test_matvec 80 | from ffthompy.tensors.unittest_operators import Test_operators 81 | from ffthompy.tensors.unittest_tensors import Test_tensors 82 | from ffthompy.mechanics.unittest_matcoef import Test_matcoef 83 | from ffthompy.general.unittest_solver import Test_solvers 84 | from ffthompy.unittest_materials import Test_materials 85 | from ffthompy.tensorsLowRank.unittest_sparse import Test_tensorsLowRank 86 | 87 | suite = unittest.TestSuite() 88 | suite.addTest(unittest.makeSuite(Test_main)) 89 | suite.addTest(unittest.makeSuite(Test_matvec)) 90 | suite.addTest(unittest.makeSuite(Test_operators)) 91 | suite.addTest(unittest.makeSuite(Test_tensors)) 92 | suite.addTest(unittest.makeSuite(Test_matcoef)) 93 | suite.addTest(unittest.makeSuite(Test_solvers)) 94 | suite.addTest(unittest.makeSuite(Test_materials)) 95 | suite.addTest(unittest.makeSuite(Test_tensorsLowRank)) 96 | 97 | runner=unittest.TextTestRunner() 98 | runner.run(suite) 99 | -------------------------------------------------------------------------------- /tutorials/04_exact_integration_fast.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | 3 | print(""" 4 | This example shows how to interactively solve a problem of 5 | FFT-based homogenization with exact integration, which is described in 6 | J. Vondrejc, Improved guaranteed computable bounds on homogenized properties 7 | of periodic media by FourierGalerkin method with exact integration, 8 | Int. J. Numer. Methods Eng., 2016. 9 | This publication will be referred as IJNME2016. 10 | """) 11 | 12 | print("""Problem is defined via problem definition with instance 'pb' 13 | with a following keys: 14 | 15 | material : dictionary 16 | there are two possibilities to define material: 17 | a) with a function that returns a material coefficients at grid points 18 | b) using 'inclusions' (square or circle) with given 19 | 'positions', 20 | 'params' (defining a size of inclusion), 21 | 'vals' (defining the material parameters across inclusion) 22 | 23 | solve : dictionary 24 | it defines what to solve with following keys: 25 | 'kind' (stores the method of numerical integration) 26 | 'N' (no. of grid points) 27 | 'primaldual' (defines if it is to solve primal or dual formulation) 28 | 29 | postprocess : list 30 | the way for evaluation of homogenized properties 31 | 32 | solver : dictionary 33 | stores the parameters for linear solver 34 | """) 35 | 36 | import os 37 | import sys 38 | sys.path.insert(0, os.path.normpath(os.path.join(sys.path[0], '..'))) 39 | 40 | import numpy as np 41 | from ffthompy.materials import Material 42 | import ffthompy.projections as proj 43 | from ffthompy.tensors import DFT, Tensor, Operator, matrix2tensor 44 | from ffthompy.general.solver import linear_solver 45 | 46 | 47 | dim = 2 # topological dimension of a problem 48 | N = 25*np.ones(dim, dtype=np.int32) # no. of discretisation points 49 | P = 5*np.ones(dim, dtype=np.int32) # resolution of material 50 | 51 | pb = {'name': 'prob1', 52 | 'physics': 'elasticity', 53 | 'material': {'Y': np.ones(dim), # size of cell 54 | 'inclusions': ['square', 'otherwise'], # types of inclusions 55 | 'positions': [np.zeros(dim), ''], # position of inclusions 56 | 'params': [0.6*np.ones(dim), ''], # sizes of inclusions 57 | 'vals': [11*np.eye(dim), np.eye(dim)], # material coef. 58 | 'order': 1, # approximation order of material coef. 59 | 'P': P, # resolution of material 60 | }, 61 | 'solve': {'kind': 'Ga', # defines a way of numerical integration 62 | 'N': N, # no. of discretisation points (order of trig. polynomials) 63 | 'primaldual': ['primal']}, # distiguish primal and dual formul. 64 | 'postprocess': [{'kind': 'Ga'}], 65 | 'solver': {'kind': 'CG', 66 | 'tol': 1e-8, 67 | 'maxiter': 1e3}} 68 | 69 | 70 | # definition of material coefficients based on grid-based composite 71 | mat = Material(pb['material']) 72 | Nbar = 2*pb['solve']['N'] - 1 73 | A = mat.get_A_Ga(Nbar=Nbar, primaldual=pb['solve']['primaldual'][0]) 74 | 75 | # projections in Fourier space 76 | _, hG1N, _ = proj.scalar(pb['solve']['N'], pb['material']['Y'], NyqNul=True, tensor=True) 77 | # increasing the projection with zeros to comply with a projection 78 | # on double grid, see Definition 24 in IJNME2016 79 | hG1N = hG1N.enlarge(Nbar) 80 | 81 | FN = DFT(name='FN', inverse=False, N=Nbar) # discrete Fourier transform (DFT) 82 | FiN = DFT(name='FiN', inverse=True, N=Nbar) # inverse DFT 83 | 84 | G1N = Operator(name='G1', mat=[[FiN, hG1N, FN]]) # projection in original space 85 | Afun = Operator(name='FiGFA', mat=[[G1N, A]]) # lin. operator for a linear system 86 | 87 | E = np.zeros(dim); E[0] = 1 # macroscopic load 88 | EN = Tensor(name='EN', N=Nbar, shape=(dim,), Fourier=False) # constant trig. pol. 89 | EN.set_mean(E) 90 | 91 | x0 = Tensor(N=Nbar, shape=(dim,), Fourier=False) # initial approximation to solvers 92 | B = Afun(-EN) # right-hand side of linear system 93 | 94 | X, info = linear_solver(solver='CG', Afun=Afun, B=B, 95 | x0=x0, par=pb['solver'], callback=None) 96 | 97 | print('homogenised properties (component 11) =', A(X + EN)*(X + EN)) 98 | 99 | if __name__ == "__main__": 100 | ## plotting of local fields ################## 101 | X.plot(ind=0, N=N) 102 | 103 | print('END') 104 | -------------------------------------------------------------------------------- /ffthompy/unittest_materials.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | from ffthompy import PrintControl 4 | from ffthompy.materials import Material 5 | from ffthompy.mechanics.matcoef import ElasticTensor 6 | 7 | prt=PrintControl() 8 | 9 | class Test_materials(unittest.TestCase): 10 | 11 | def setUp(self): 12 | pass 13 | 14 | def tearDown(self): 15 | pass 16 | 17 | def test_operators(self): 18 | print('\nChecking materials...') 19 | for dim in [2,3]: #[2, 3]: 20 | for mat in ['square','pyramid']: #,'square' 21 | N = dim*(5,) 22 | print(('...checking dim={}; material="{}"'.format(dim,mat))) 23 | materials=dict( 24 | square0={'inclusions': ['square', 'otherwise'], 25 | 'positions': [np.zeros(dim), ''], 26 | 'params': [0.6*np.ones(dim), ''], 27 | 'vals': [10*np.eye(dim), 1.*np.eye(dim)], 28 | 'Y': np.ones(dim), 29 | 'order': None, }, 30 | square1={'inclusions': ['square', 'otherwise'], 31 | 'positions': [np.zeros(dim), ''], 32 | 'params': [0.6*np.ones(dim), ''], 33 | 'vals': [10*np.eye(dim), 1.*np.eye(dim)], 34 | 'Y': np.ones(dim), 35 | 'P': np.array(N), 36 | 'order': 0, }, 37 | pyramid0={'inclusions': ['pyramid', 'all'], 38 | 'positions': [np.zeros(dim), ''], 39 | 'params': [0.8*np.ones(dim), ''], 40 | 'vals': [10*np.eye(dim), 1.*np.eye(dim)], 41 | 'Y': np.ones(dim), 42 | 'order': None, }, 43 | pyramid1={'inclusions': ['pyramid', 'all'], 44 | 'positions': [np.zeros(dim), ''], 45 | 'params': [0.8*np.ones(dim), ''], 46 | 'vals': [10*np.eye(dim), 1.*np.eye(dim)], 47 | 'Y': np.ones(dim), 48 | 'P': np.array(N), 49 | 'order': 1, },) 50 | 51 | mat0=Material(materials[mat+'0']) 52 | Aga0=mat0.get_A_Ga(N, primaldual='primal') 53 | mat1=Material(materials[mat+'1']) 54 | Aga1=mat1.get_A_Ga(N, primaldual='primal') 55 | 56 | msg='dim={}; material={}'.format(dim,mat) 57 | self.assertAlmostEqual(0, np.linalg.norm(Aga0.val[0, 0]-Aga1.val[0, 0]), 58 | msg=msg, delta=1e-13) 59 | print('...ok') 60 | 61 | def test_eigenvalues(self): 62 | print('\nEigenvalues of material tensors...') 63 | 64 | # SCALAR PROBLEM 65 | dim=2 66 | scalar={'inclusions': ['square', 'otherwise'], 67 | 'positions': [np.zeros(dim), ''], 68 | 'params': [0.6*np.ones(dim), ''], 69 | 'vals': [10*np.eye(dim), 1.*np.eye(dim)], 70 | 'N': dim*(5,), 71 | 'Y': np.ones(dim), 72 | 'order': None,} 73 | 74 | # ELASTICITY 75 | dim = 3 76 | matcoefM=ElasticTensor(bulk=1, mu=1) 77 | matcoefI=ElasticTensor(bulk=10, mu=5) 78 | 79 | elastic={'inclusions': ['square', 'otherwise'], 80 | 'positions': [np.zeros(dim), ''], 81 | 'params': [0.6*np.ones(dim), ''], # size of sides 82 | 'vals': [matcoefI.mandel, matcoefM.mandel], 83 | 'Y': np.ones(dim), 84 | 'order': None, 85 | 'N': dim*(5,), 86 | 'P': dim*(5,), } 87 | 88 | elastic2={'inclusions': ['square', 'otherwise'], 89 | 'positions': [np.zeros(dim), ''], 90 | 'params': [0.6*np.ones(dim), ''], # size of sides 91 | 'vals': [matcoefI.val, matcoefM.val], 92 | 'Y': np.ones(dim), 93 | 'order': None, 94 | 'N': dim*(5,), 95 | 'P': dim*(5,), } 96 | 97 | for mat_conf in [scalar, elastic, elastic2]: 98 | mat=Material(mat_conf) 99 | A=mat.get_A_GaNi(mat_conf['N'], primaldual='primal') 100 | _=A.calc_eigs(sort=True, symmetric=True) 101 | 102 | print('...ok') 103 | 104 | 105 | if __name__ == "__main__": 106 | unittest.main() 107 | -------------------------------------------------------------------------------- /examples/lowRankTensorApproximations/diffusion_comp_residua.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | 4 | from ffthompy.tensorsLowRank.homogenisation import (homog_Ga_full_potential, homog_GaNi_full_potential, 5 | homog_Ga_sparse, homog_GaNi_sparse) 6 | from examples.lowRankTensorApproximations.setting import get_material_coef, kind_list, get_default_parameters 7 | from examples.lowRankTensorApproximations.plots import plot_residuals, save_experiment_settings 8 | import itertools 9 | 10 | os.nice(19) 11 | 12 | print('running comparison residual...') 13 | ####################################################### 14 | 15 | Ns = {'2': [1215], 16 | '3': [135]} 17 | 18 | kinds = {'2': [0], 19 | '3': [1,2]} 20 | 21 | material_list = [0,2] 22 | 23 | sol_rank_range_set={'2': [2,5,10,20,30], 24 | '3': [2,5,10,20]} 25 | 26 | data_folder = "data_for_plot/residua" 27 | 28 | save_experiment_settings(kind_list, Ns, kinds, sol_rank_range_set, material_list, 29 | data_folder=data_folder) 30 | 31 | for dim, material in itertools.product([2,3], material_list): 32 | if not os.path.exists('{}/dim_{}/mat_{}/'.format(data_folder,dim, material)): 33 | os.makedirs('{}/dim_{}/mat_{}/'.format(data_folder,dim, material)) 34 | 35 | for N, kind in itertools.product(Ns['{}'.format(dim)], kinds['{}'.format(dim)]): 36 | ################ MATERAL DATA AND SETTINGS ################ 37 | ## parameters 38 | pars, pars_sparse=get_default_parameters(dim, N, material, kind) 39 | pars_sparse.solver.update(dict(rank=1, # rank of solution vector 40 | minres_fails=int(1e6), # obtain info from all iters 41 | )) 42 | 43 | print('== format={}, N={}, dim={}, material={} ===='.format(pars_sparse.kind, 44 | N, dim, material)) 45 | 46 | # get material settings for experiment 47 | Aga, Agani, Agas, Aganis=get_material_coef(material, pars, pars_sparse) 48 | 49 | ####################################################################### 50 | 51 | ### COMPUTING FULL SOLUTION ### 52 | iter_Ga = list() 53 | iter_GaNi = list() 54 | 55 | ## Compute Full solutions 56 | resP_Ga = homog_Ga_full_potential(Aga, pars) 57 | resP_GaNi = homog_GaNi_full_potential(Agani, Aga, pars) 58 | 59 | ############ SPARSE SOLUTIONS ############### 60 | iter_Ga_Spar = list() 61 | res_Ga_Spar = list() 62 | 63 | iter_GaNi_Spar = list() 64 | res_GaNi_Spar = list() 65 | 66 | for sol_rank in sol_rank_range_set['{}'.format(dim)]: # rank of solution vector 67 | print('solution rank={}'.format(sol_rank)) 68 | pars_sparse.solver.update(dict(rank=sol_rank)) 69 | 70 | iter_Ga.append(resP_Ga.info['kit']) 71 | iter_GaNi.append(resP_GaNi.info['kit']) 72 | 73 | resS_Ga = homog_Ga_sparse(Agas, pars_sparse) 74 | res_Ga_Spar.append(resS_Ga.solver['norm_res']) 75 | iter_Ga_Spar.append(resS_Ga.solver['kit']) 76 | 77 | resS_GaNi = homog_GaNi_sparse(Aganis, Agas, pars_sparse) 78 | res_GaNi_Spar.append(resS_GaNi.solver['norm_res']) 79 | iter_GaNi_Spar.append(resS_GaNi.solver['kit']) 80 | 81 | pickle.dump(res_Ga_Spar, open("{}/dim_{}/mat_{}/res_Ga_Spar_{}_{}_{}.p" 82 | .format(data_folder, dim, material, kind, N, 83 | pars_sparse.solver['method']), "wb")) 84 | pickle.dump(iter_Ga_Spar, open("{}/dim_{}/mat_{}/iter_Ga_Spar_{}_{}_{}.p" 85 | .format(data_folder, dim, material, kind, N, 86 | pars_sparse.solver['method']), "wb")) 87 | 88 | pickle.dump(res_GaNi_Spar, open("{}/dim_{}/mat_{}/res_GaNi_Spar_{}_{}_{}.p" 89 | .format(data_folder, dim, material, kind, N, 90 | pars_sparse.solver['method']), "wb")) 91 | pickle.dump(iter_GaNi_Spar, open("{}/dim_{}/mat_{}/iter_GaNi_Spar_{}_{}_{}.p" 92 | .format(data_folder, dim, material, kind, N, 93 | pars_sparse.solver['method']), "wb")) 94 | 95 | pickle.dump(iter_Ga, open("{}/dim_{}/mat_{}/iter_Ga_{}.p" 96 | .format(data_folder, dim, material, N), "wb")) 97 | pickle.dump(iter_GaNi, open("{}/dim_{}/mat_{}/iter_GaNi_{}.p" 98 | .format(data_folder, dim, material, N), "wb")) 99 | 100 | plot_residuals() 101 | 102 | print('END') 103 | -------------------------------------------------------------------------------- /examples/lowRankTensorApproximations/diffusion_comp_time.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | 4 | from ffthompy import Struct 5 | from ffthompy.tensorsLowRank.homogenisation import (homog_Ga_full_potential, 6 | homog_Ga_sparse, ) 7 | from examples.lowRankTensorApproximations.setting import get_material_coef, getMat_conf, get_default_parameters 8 | 9 | from ffthompy.tensorsLowRank.materials import LowRankMaterial 10 | import itertools 11 | 12 | 13 | print('running time-comparison for material with square inclusion...') 14 | 15 | kinds = {'2': 0, 16 | '3': 2,} 17 | 18 | N_lists = {'2': [45,135,405,1215], 19 | '3': [5,15,45,135,175]} 20 | 21 | kind_list=['cano','tucker','tt'] 22 | 23 | data_folder = "data_for_plot/time" 24 | 25 | for dim, material in itertools.product([2,3], [0,3]): 26 | if not os.path.exists('{}/dim_{}/mat_{}/'.format(data_folder,dim, material)): 27 | os.makedirs('{}/dim_{}/mat_{}/'.format(data_folder,dim, material)) 28 | 29 | N_list = N_lists['{}'.format(dim)] 30 | 31 | kind =kinds['{}'.format(dim)] 32 | 33 | full_time_list = [None]*len(N_list) 34 | sparse_time_list = [None]*len(N_list) 35 | rank_list = [None]*len(N_list) 36 | memory_list = [None]*len(N_list) 37 | 38 | for i, N in enumerate(N_list): 39 | # PARAMETERS ############################################################## 40 | pars, pars_sparse=get_default_parameters(dim, N, material, kind) 41 | pars.solver.update(dict(tol=1e-6)) 42 | 43 | # generating material coefficients 44 | Aga, Agani, Agas, Aganis=get_material_coef(material, pars, pars_sparse) 45 | 46 | print('\n== Full solution with potential by CG (Ga) ===========') 47 | resP_Ga=homog_Ga_full_potential(Aga, pars) 48 | print('mean of solution={}'.format(resP_Ga.Fu.mean())) 49 | print('homogenised properties (component 11) = {}'.format(resP_Ga.AH)) 50 | full_time_list[i]=resP_Ga.time 51 | 52 | # PARAMETERS FOR SPARSE SOLVER s######################### 53 | alp=3 # multiplier to increase the discretisation grid for tensorsLowRank solver, 54 | # which enables to achieve the same level of accuracy as the full solver. 55 | pars.update(Struct(N=dim*(alp*N,),)) # number of voxels (assumed equal for all directions) 56 | # ---------------------------- 57 | 58 | for r in range(4, N+1, 2): 59 | pars_sparse.solver.update(dict(rank=r)) # rank of solution vector 60 | 61 | print('== format={}, N={}, dim={}, material={} ===='.format(pars_sparse.kind, 62 | N, dim, material)) 63 | 64 | # PROBLEM DEFINITION ###################################################### 65 | # generating material coefficients 66 | pars, pars_sparse, mat_conf=getMat_conf(material, pars, pars_sparse) 67 | mats=LowRankMaterial(mat_conf, pars_sparse.kind) 68 | Agas=mats.get_A_Ga(pars_sparse.Nbar(pars_sparse.N), primaldual='primal', 69 | k=pars_sparse.matrank) 70 | 71 | print('\n== SPARSE solver with preconditioner (Ga) =======================') 72 | resS_Ga=homog_Ga_sparse(Agas, pars_sparse) 73 | print('mean of solution={}'.format(resS_Ga.Fu.mean())) 74 | print('homogenised properties (component 11) = {}'.format(resS_Ga.AH)) 75 | print('norm(resP)={}'.format(resS_Ga.solver['norm_res'])) 76 | print('memory efficiency = {0}/{1} = {2}'.format(resS_Ga.Fu.memory, resP_Ga.Fu.val.size, resS_Ga.Fu.memory/resP_Ga.Fu.val.size)) 77 | print("solution discrepancy", resS_Ga.AH - resP_Ga.AH) 78 | 79 | if resS_Ga.AH - resP_Ga.AH <= 0: 80 | rank_list[i]=r 81 | sparse_time_list[i]=resS_Ga.time 82 | memory_list[i]=resS_Ga.Fu.memory/resP_Ga.Fu.val.size # memory efficiency 83 | print("tensorsLowRank solver time:",sparse_time_list) 84 | print("full solver time:",full_time_list) 85 | print("rank:",rank_list) 86 | break 87 | 88 | print("tensorsLowRank solver time:",sparse_time_list) 89 | print("full solver time:",full_time_list) 90 | print("rank:",rank_list) 91 | 92 | pickle.dump(N_list, open("{}/dim_{}/mat_{}/N_list_{}.p" 93 | .format(data_folder,dim, material,kind_list[kind]), "wb")) 94 | pickle.dump(full_time_list, open("{}/dim_{}/mat_{}/full_time_list_{}.p" 95 | .format(data_folder,dim, material,kind_list[kind]), "wb")) 96 | pickle.dump(sparse_time_list, open("{}/dim_{}/mat_{}/sparse_time_list_{}.p" 97 | .format(data_folder,dim, material,kind_list[kind]), "wb")) 98 | 99 | print('END') 100 | -------------------------------------------------------------------------------- /tutorials/03_exact_integration_simple.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | 3 | print(""" 4 | Numerical homogenisation based on exact integration, which is described in 5 | J. Vondrejc, Improved guaranteed computable bounds on homogenized properties 6 | of periodic media by FourierGalerkin method with exact integration, 7 | Int. J. Numer. Methods Eng., 2016. 8 | 9 | This is a self-contained tutorial implementing scalar problem in dim=2 or dim=3 10 | on a unit periodic cell Y=(-0.5,0.5)**dim 11 | with a square (2D) or cube (3D) inclusion of size 0.6 (side). 12 | The material is identity I in matrix phase and 11*I in inclusion phase. 13 | """) 14 | 15 | import numpy as np 16 | import itertools 17 | from scipy.sparse.linalg import cg, LinearOperator 18 | 19 | dim = 3 # number of spatial dimensions 20 | N = dim*(5,) # number of discretization points 21 | dN = tuple(2*np.array(N)-1) # double grid value 22 | vec_shape=(dim,)+dN 23 | 24 | # indicator function indicating the phase per grid point (square inclusion) 25 | P = dim*(5,) # material resolution in each spatial dimension 26 | phi = np.zeros(P, dtype='float') 27 | if dim==2: 28 | phi[1:4, 1:4] = 1 29 | elif dim==3: 30 | phi[1:4, 1:4, 1:4] = 1 31 | 32 | # material coefficients at grid points 33 | C = np.einsum('ij,...->ij...', 11*np.eye(dim), phi) 34 | C += np.einsum('ij,...->ij...', 1*np.eye(dim), 1-phi) 35 | 36 | # tensor products / (inverse) Fourier transform / frequencies 37 | dot = lambda A, B: np.einsum('ij...,j...->i...', A, B) 38 | fft = lambda x, N: np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(x), N))/np.prod(np.array(N)) 39 | ifft = lambda x, N: np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(x), N))*np.prod(np.array(N)) 40 | freq_fun = lambda N: np.arange(np.fix(-N/2.), np.fix(N/2.+0.5)) 41 | freq = [freq_fun(n) for n in dN] 42 | 43 | def get_weights(h): # calculation of integral weights of rectangular function 44 | Wphi = np.zeros(dN) # integral weights 45 | for ind in itertools.product(*[range(n) for n in dN]): 46 | Wphi[ind] = np.prod(h) 47 | for ii in range(dim): 48 | Wphi[ind] *= np.sinc(h[ii]*freq[ii][ind[ii]]) 49 | return Wphi 50 | 51 | def decrease(val, dN): # auxiliary function to remove unnecesary Fourier freq. 52 | dN=np.array(dN) 53 | N=np.array(val.shape[-dN.size:]) 54 | ibeg = np.array(np.fix((N-dN+(dN % 2))/2), dtype=np.int) 55 | iend = np.array(np.fix((N+dN+(dN % 2))/2), dtype=np.int) 56 | if dN.size==2: 57 | return val[:,:,ibeg[0]:iend[0],ibeg[1]:iend[1]] 58 | elif dN.size==3: 59 | return val[:,:,ibeg[0]:iend[0],ibeg[1]:iend[1],ibeg[2]:iend[2]] 60 | 61 | ## GRID-BASED COMPOSITE ######### evaluate the matrix of Galerkin approximation 62 | hC0 = np.prod(np.array(P))*fft(C, P) 63 | if P == dN: 64 | hCex = hC0 65 | elif P > dN: 66 | hCex = decrease(hC0, dN) 67 | elif P < dN: 68 | factor = np.max(np.ceil(np.array(dN) / np.array(P))) 69 | hCper = np.tile(hC0, int(2*factor-1)*np.ones(dim, dtype=np.int)) 70 | hCex = decrease(hCper, dN) 71 | Cex = ifft(np.einsum('ij...,...->ij...', hCex, get_weights(1./np.array(P))), dN).real 72 | 73 | ## INCLUSION-BASED COMPOSITE #### another expression of Cex 74 | Wraw = get_weights(0.6*np.ones(dim)) 75 | """HINT: the size 0.6 corresponds to the size of square inclusion; it is exactly 76 | the size of topology generated by phi, i.e. 3x3 pixels in 5x5 image of PUC with 77 | PUC size 1; then 0.6 = 3./5. 78 | """ 79 | char_square = ifft(Wraw, dN).real 80 | Cex2 = np.einsum('ij...,...->ij...', 11*np.eye(dim), char_square) 81 | Cex2 += np.einsum('ij...,...->ij...', 1*np.eye(dim), 1.-char_square) 82 | 83 | ## checking that the Cex2 is the same 84 | print('zero check:', np.linalg.norm(Cex-Cex2)) 85 | 86 | Gamma = np.zeros((dim,dim)+ tuple(dN)) # zero initialize 87 | for i,j in itertools.product(range(dim),repeat=2): 88 | for ind in itertools.product(*[range(int((dN[k]-N[k])/2), int((dN[k]-N[k])/2+N[k])) for k in range(dim)]): 89 | q = np.array([freq[ii][ind[ii]] for ii in range(dim)]) # frequency vector 90 | if not q.dot(q) == 0: # zero freq. -> mean 91 | Gamma[(i,j)+ind] = -(q[i]*q[j])/(q.dot(q)) 92 | 93 | # - convert to operators 94 | G = lambda X: np.real(ifft(dot(Gamma, fft(X, dN)), dN)).reshape(-1) 95 | A = lambda x: dot(Cex, x.reshape(vec_shape)) 96 | GA = lambda x: G(A(x)) 97 | 98 | # initiate strain/stress (2nd order tensor for each grid point) 99 | X = np.zeros(vec_shape, dtype=np.float) 100 | x = X.reshape(-1) 101 | # macroscopic value 102 | E = np.zeros_like(X); E[0] = 1. 103 | b = -GA(E.reshape(-1)) 104 | 105 | # iterative solution of the linear system 106 | Alinoper = LinearOperator(shape=(x.size, x.size), matvec=GA, dtype=np.float) 107 | x, info = cg(A=Alinoper, b=b, x0=X.reshape(-1)) # conjugate gradients 108 | state = x.reshape(vec_shape) + E 109 | flux = dot(Cex, state) 110 | 111 | AH_11 = np.sum(flux*state)/np.prod(np.array(dN)) # homogenised properties 112 | print('homogenised coefficient (component 11) =', AH_11) 113 | 114 | print('END') 115 | -------------------------------------------------------------------------------- /examples/scalar/scalar_3d.py: -------------------------------------------------------------------------------- 1 | """ 2 | Input file for a scalar linear elliptic problems. 3 | """ 4 | 5 | import numpy as np 6 | import os 7 | from ffthompy.general.base import get_base_dir 8 | 9 | base_dir = get_base_dir() 10 | 11 | dim = 3 12 | N = 5*np.ones(dim, dtype=np.int32) 13 | 14 | 15 | materials = {'cube': {'inclusions': ['cube', 'otherwise'], 16 | 'positions': [np.zeros(dim), ''], 17 | 'params': [0.7*np.ones(dim), ''], # size of sides 18 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 19 | 'Y': np.ones(dim), 20 | 'order': None, 21 | }, 22 | 'cube_Ga': {'inclusions': ['cube', 'otherwise'], 23 | 'positions': [np.zeros(dim), ''], 24 | 'params': [1.4*np.ones(dim), ''], # size of sides 25 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 26 | 'Y': 2.*np.ones(dim), 27 | 'order': 0, 28 | 'P': 2*N, 29 | }, 30 | 'ball': {'inclusions': ['ball', 'otherwise'], 31 | 'positions': [np.zeros(dim), ''], 32 | 'params': [1., ''], # diamater 33 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 34 | 'Y': np.ones(dim), 35 | }, 36 | 'ball2': {'inclusions': ['ball', 'otherwise'], 37 | 'positions': [np.zeros(dim), ''], 38 | 'params': [2., ''], # diamater 39 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 40 | 'Y': 2.*np.ones(dim), 41 | }, 42 | 'laminate': {'inclusions': ['cube', 'otherwise'], 43 | 'positions': [np.zeros(dim), ''], 44 | 'params': [np.array([1., 1., 0.5]), ''], 45 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 46 | 'Y': np.ones(dim), 47 | }, 48 | 'laminate2': {'inclusions': ['cube', 'otherwise'], 49 | 'positions': [np.zeros(dim), ''], 50 | 'params': [np.array([2., 2., 1.]), ''], 51 | 'vals': [11.*np.eye(dim), 1.*np.eye(dim)], 52 | 'Y': 2.*np.ones(dim), 53 | }, 54 | 'prism': {'inclusions': ['cube', 'otherwise'], 55 | 'positions': [np.zeros(dim), ''], 56 | 'params': [np.array([1., 0.7, 0.7]), ''], 57 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 58 | 'Y': np.ones(dim), 59 | }, 60 | 'prism2': {'inclusions': ['cube', 'otherwise'], 61 | 'positions': [np.zeros(dim), ''], 62 | 'params': [np.array([2., 1.4, 1.4]), ''], 63 | 'vals': [11.*np.eye(dim), 1.*np.eye(dim)], 64 | 'Y': 2.*np.ones(dim), 65 | }, 66 | } 67 | 68 | 69 | 70 | problems = [ 71 | {'name': 'prob1', 72 | 'physics': 'scalar', 73 | 'material': 'cube', 74 | 'solve': {'kind': 'GaNi', 75 | 'N': N, 76 | 'primaldual': ['primal', 'dual']}, 77 | 'postprocess': [{'kind': 'GaNi'}, 78 | {'kind': 'Ga', 79 | 'order': None}, 80 | {'kind': 'Ga', 81 | 'order': 0, 82 | 'P': 3*N}, 83 | {'kind': 'Ga', 84 | 'order': 1, 85 | 'P': 3*N}], 86 | 'solver': {'kind': 'CG', 87 | 'tol': 1e-6, 88 | 'maxiter': 1e3}, 89 | 'save': {'filename': os.path.join(base_dir, 'temp/scalar_3d_prob1'), 90 | 'data': 'all'}, 91 | }, 92 | {'name': 'prob2', 93 | 'physics': 'scalar', 94 | 'material': 'cube', 95 | 'solve': {'kind': 'Ga', 96 | 'N': N, 97 | 'primaldual': ['primal', 'dual']}, 98 | 'postprocess': [{'kind': 'Ga', 99 | }], 100 | 'solver': {'kind': 'CG', 101 | 'tol': 1e-6, 102 | 'maxiter': 1e3}, 103 | 'save': {'filename': os.path.join(base_dir, 'temp/scalar_3d_prob2'), 104 | 'data': 'all'}, 105 | }, 106 | {'name': 'prob3', 107 | 'physics': 'scalar', 108 | 'material': 'cube_Ga', 109 | 'solve': {'kind': 'Ga', 110 | 'N': N, 111 | 'primaldual': ['primal', 'dual']}, 112 | 'postprocess': [{'kind': 'Ga', 113 | },], 114 | 'solver': {'kind': 'CG', 115 | 'tol': 1e-6, 116 | 'maxiter': 1e3}, 117 | 'save': {'filename': os.path.join(base_dir, 'temp/scalar_3d_prob3'), 118 | 'data': 'all'}, 119 | }, 120 | ] 121 | 122 | if __name__=='__main__': 123 | import subprocess 124 | subprocess.call(['../../main.py', __file__]) 125 | -------------------------------------------------------------------------------- /ffthompy/tensorsLowRank/materials.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ffthompy.materials import Material 3 | from ffthompy.tensorsLowRank.objects import SparseTensor 4 | from ffthompy.trigpol import Grid 5 | 6 | 7 | class LowRankMaterial(Material): 8 | 9 | def __init__(self, material_conf, kind='tt'): 10 | Material.__init__(self, material_conf) 11 | self.mat=Material(material_conf) 12 | self.kind=kind 13 | 14 | def get_A_GaNi(self, N, P=None, primaldual='primal', k=None, tol=None): 15 | if P is None: 16 | P=self.conf['P'] 17 | 18 | A_GaNi=self.mat.get_A_GaNi(P, primaldual='primal') 19 | A_GaNis=SparseTensor(kind=self.kind, val=A_GaNi.val[0, 0], rank=k, name='A_GaNi') 20 | 21 | return A_GaNis.repeat(N) 22 | 23 | def get_A_Ga(self, Nbar, primaldual='primal', P=None, tol=None, k=None): 24 | if P is None and 'P' in self.conf: 25 | P=self.conf['P'] 26 | As=self.get_A_GaNi(N=P, primaldual=primaldual, k=k, tol=tol) 27 | As.set_fft_form(fft_form='c') 28 | FAs=As.fourier() 29 | 30 | h=self.Y/P 31 | if self.conf['order'] in [0, 'constant']: 32 | Wraw=get_weights_con(h, Nbar, self.Y, self.kind) 33 | elif self.conf['order'] in [1, 'bilinear']: 34 | Wraw=get_weights_lin(h, Nbar, self.Y, self.kind) 35 | else: 36 | raise ValueError() 37 | 38 | FAs*=np.prod(np.array(P)) 39 | if np.allclose(P, Nbar): 40 | hAM=FAs 41 | elif np.all(np.greater_equal(P, Nbar)): 42 | hAM=FAs.decrease(Nbar) 43 | elif np.all(np.less(P, Nbar)): 44 | factor=np.ceil(np.array(Nbar, dtype=np.float64)/P) 45 | hAM0per=tile(FAs, 2*np.array(factor, dtype=np.int)-1) 46 | hAM=hAM0per.decrease(Nbar) 47 | 48 | WFAs=(Wraw*hAM).fourier(real_output=True) 49 | WFAs.name='Agas' 50 | return WFAs.set_fft_form() 51 | 52 | 53 | def tile(FAs, N): 54 | assert(FAs.Fourier is True) 55 | 56 | kind=FAs.__class__.__name__ 57 | 58 | if kind.lower() in ['cano', 'canotensor', 'tucker']: 59 | basis=[] 60 | for ii, n in enumerate(N): 61 | basis.append(np.tile(FAs.basis[ii], (1, n))) 62 | return SparseTensor(kind=kind, name=FAs.name+'_tiled', core=FAs.core, basis=basis, 63 | Fourier=FAs.Fourier, fft_form=FAs.fft_form) 64 | elif kind.lower() in ['tt', 'tensortrain']: 65 | cl=FAs.to_list(FAs) 66 | cl_new=[None]*FAs.d 67 | for i in range(FAs.d): 68 | cl_new[i]=np.tile(cl[i], (1, N[i], 1)) 69 | 70 | return SparseTensor(kind=kind, core=cl_new, name=FAs.name+'_tiled', 71 | Fourier=FAs.Fourier, fft_form=FAs.fft_form) 72 | 73 | 74 | def get_weights_con(h, Nbar, Y, kind): 75 | """ 76 | it evaluates integral weights, 77 | which are used for upper-lower bounds calculation, 78 | with constant rectangular inclusion 79 | 80 | Parameters 81 | ---------- 82 | h - the parameter determining the size of inclusion 83 | Nbar - no. of points of regular grid where the weights are evaluated 84 | Y - the size of periodic unit cell 85 | Returns 86 | ------- 87 | Wphi - integral weights at regular grid sizing Nbar 88 | """ 89 | dim=np.size(Y) 90 | meas_puc=np.prod(Y) 91 | ZN2l=Grid.get_ZNl(Nbar, fft_form='c') 92 | Wphi=[] 93 | for ii in np.arange(dim): 94 | Nshape=np.ones(dim, dtype=np.int) 95 | Nshape[ii]=Nbar[ii] 96 | Nrep=np.copy(Nbar) 97 | Nrep[ii]=1 98 | # Wphi.append(np.reshape(h[ii]/meas_puc*np.sinc(h[ii]*ZN2l[ii]/Y[ii]), (1,-1, 1))) # since it is rank 1 99 | Wphi.append(np.atleast_2d(h[ii]/meas_puc*np.sinc(h[ii]*ZN2l[ii]/Y[ii]))) 100 | 101 | if kind.lower() in ['cano', 'canotensor', 'tucker']: 102 | return SparseTensor(kind=kind, core=np.array([1.]), basis=Wphi, Fourier=True, fft_form='c') 103 | elif kind.lower() in ['tt', 'tensortrain']: 104 | cl=[cr.reshape((1,-1, 1)) for cr in Wphi] 105 | return SparseTensor(kind=kind, core=cl, Fourier=True, fft_form='c') 106 | 107 | 108 | def get_weights_lin(h, Nbar, Y, kind): 109 | """ 110 | it evaluates integral weights, 111 | which are used for upper-lower bounds calculation, 112 | with bilinear inclusion at rectangular area 113 | 114 | Parameters 115 | ---------- 116 | h - the parameter determining the size of inclusion (half-size of support) 117 | Nbar - no. of points of regular grid where the weights are evaluated 118 | Y - the size of periodic unit cell 119 | 120 | Returns 121 | ------- 122 | Wphi - integral weights at regular grid sizing Nbar 123 | """ 124 | d=np.size(Y) 125 | meas_puc=np.prod(Y) 126 | ZN2l=Grid.get_ZNl(Nbar, fft_form='c') 127 | Wphi=[] 128 | for ii in np.arange(d): 129 | Nshape=np.ones(d, dtype=np.int) 130 | Nshape[ii]=Nbar[ii] 131 | Nrep=np.copy(Nbar) 132 | Nrep[ii]=1 133 | Wphi.append(np.atleast_2d(h[ii]/meas_puc*np.sinc(h[ii]*ZN2l[ii]/Y[ii])**2)) 134 | 135 | if kind.lower() in ['cano', 'canotensor', 'tucker']: 136 | return SparseTensor(kind=kind, core=np.array([1.]), basis=Wphi, Fourier=True, fft_form='c') 137 | elif kind.lower() in ['tt', 'tensortrain']: 138 | cl=[cr.reshape((1,-1, 1)) for cr in Wphi] 139 | return SparseTensor(kind=kind, core=cl, Fourier=True, fft_form='c') 140 | -------------------------------------------------------------------------------- /ffthompy/tensorsLowRank/decompositions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.linalg import svd, norm, qr 3 | from numpy import tensordot 4 | 5 | def unfold(T, dim): 6 | """ 7 | Unfolds a tensor T into a matrix, taking the dimension "dim" of T as the first dimension of the matrix, 8 | and flattening all the other dimensions into the other one dimension of the matrix. 9 | 10 | dim starts from 0. 11 | 12 | :param T: a tensor . 13 | :type T: numpy.ndarray 14 | :param dim: the dimension based on which the unfolding is made 15 | :type dim: int 16 | :returns: 2D numpy.array -- a matricisation of T. 17 | """ 18 | Tm=np.moveaxis(T, dim, 0) 19 | return Tm.reshape(T.shape[dim],-1) 20 | 21 | def nModeProduct(T, M, n): 22 | """ 23 | n-Mode product of a tensor T and a matrix M, the summation is made along the nth dim. 24 | definition in paper "A MULTILINEAR SINGULAR VALUE DECOMPOSITION" by LIEVEN DE LATHAUWER , BART DE MOOR , AND JOOS VANDEWALLE 25 | 26 | For example, n with value 0, 1, or 2, would specify the 1st, 2nd or 3rd dim of the tensor T. 27 | For the matrix M, this function always take the second dimension, as if to multiply T by M on the left side. 28 | 29 | :param T: a tensor . 30 | :type T: numpy.ndarray 31 | :param M: a matrix 32 | :type M: numpy.array 33 | :param n: serial number of a dimension of T along which the summation is made. 34 | :type n: int 35 | :returns: numpy.ndarray -- a result tensor . 36 | """ 37 | 38 | P=tensordot(T, M, axes=([n], [1])) 39 | return np.rollaxis(P, len(T.shape)-1, n) 40 | 41 | def subTensor(T, k=None, index=None): 42 | """ 43 | extract a sub-tensor t = T[:k,:k,:k,...] or t = T[index]. 44 | 45 | :param T: a tensor . 46 | :type T: numpy.ndarray 47 | :param k: a list of integer k, indices larger than k is to be truncated. if "index" presents, this argument is overridden. 48 | :type k: list 49 | :param index: a list of list of integer indicating extracted indices on every dimension. 50 | :type index: list 51 | :returns: numpy.ndarray -- a sub-tensor . 52 | """ 53 | if k.any() is None and index.any() is None: 54 | return T 55 | elif index is not None: 56 | return T[np.ix_(*index)] 57 | else: 58 | if isinstance(k, int): # if only one integer is assigned to k 59 | k=k*np.ones((len(T.shape),), dtype=int) 60 | 61 | index=[None]*len(k) 62 | for i in range(len(k)): 63 | index[i]=list(range(k[i])) 64 | 65 | return T[np.ix_(*index)] 66 | 67 | 68 | def HOSVD(A, k=None, tol=None): 69 | """ 70 | High order svd of d-dim tensor A. so that A = S (*1) u1 (*2) u2 (*3) u3 ... (*d) ud, 71 | "(*n)" means n-mode product. S is the core, u1,u2,u3, ... are orthogonal basis. 72 | definition in paper "A MULTILINEAR SINGULAR VALUE DECOMPOSITION" 73 | by LIEVEN DE LATHAUWER , BART DE MOOR , AND JOOS VANDEWALLE 74 | 75 | :param A: a full tensor . 76 | :type A: numpy.ndarray 77 | 78 | :param k: rank for the truncation. 79 | :type k: numpy.list of integer or a single integer 80 | 81 | :param tol: error torlerance of the truncation 82 | :type tol: numpy.float 83 | 84 | :returns: numpy.ndarray -- the core tensor S, 85 | numpy.list -- a list of array containing basis 86 | """ 87 | 88 | d=len(A.shape) 89 | 90 | if d==2: 91 | u, s, vt=svd(A, full_matrices=False) 92 | U=[u, vt.T] 93 | S=np.diag(s) 94 | else: 95 | U=[None]*d 96 | for j in range(0, d): 97 | U[j], s, vt=svd(unfold(A, j), full_matrices=False) 98 | 99 | S=A.copy() 100 | for i in range(0, d): 101 | S=nModeProduct(S, U[i].T, i) 102 | 103 | if k is not None: 104 | if isinstance(k, int): # if only one integer is assigned to k 105 | k=k*np.ones((len(A.shape),), dtype=int) 106 | 107 | S=subTensor(S, k=k) 108 | for j in range(0, d): 109 | U[j]=U[j][:, :k[j]] 110 | 111 | return S, U 112 | 113 | def fast_qr(A): 114 | """ 115 | This is a recursive partioned QR. about two times faster than QR for tall matrices 116 | """ 117 | N,M=A.shape 118 | if N < M: 119 | Q, R =qr(A) # not tall matrices, use normal qr 120 | 121 | elif M>16: # the minimal partition size is 32 122 | R=np.zeros((M,M)) 123 | 124 | k= np.ceil(M/2).astype(int) 125 | qa, R[:k, :k]=fast_qr(A[:,:k]) 126 | 127 | R[:k,k:] = np.dot(qa.T, A[:,k:]) 128 | 129 | qb, R[k:, k:]=fast_qr(A[:,k:]-np.dot(qa, R[:k,k:])) 130 | 131 | Q = np.hstack((qa, qb)) 132 | else: # reach the smallest partition size, no more partitions, use normal qr 133 | Q, R =qr(A) 134 | 135 | return Q, R 136 | 137 | def new_expand_dims(a, axes): 138 | """ 139 | This is a new version of extend_dims() function of numpy, it extend multiple dims rather than one. 140 | """ 141 | # if int is passed, retain the same behaviour 142 | if type(axes) == int: 143 | return np.expand_dims(a, axes) 144 | # insert axes to given indices 145 | for ax in sorted(axes): 146 | a = np.expand_dims(a, ax) 147 | return a 148 | 149 | if __name__=='__main__': 150 | 151 | # A = np.random.rand(7,7) 152 | A=np.arange(49).reshape((7, 7)) 153 | print(A) 154 | 155 | S, U=HOSVD(A) 156 | print(S) 157 | print(U) 158 | print(norm(A-np.dot(U[0], np.dot(S, U[1].T)))) 159 | 160 | S2, U2=HOSVD(A, 5) 161 | print(S2) 162 | print(U2) 163 | print(norm(A-np.dot(U2[0], np.dot(S2, U2[1].T)))) 164 | # S,U=HOSVD2(A) 165 | # print S 166 | # print U 167 | # print norm(A-np.dot(U[0], np.dot(S, U[1].T))) 168 | 169 | print('END') 170 | -------------------------------------------------------------------------------- /tutorials/01_trig_pol.py: -------------------------------------------------------------------------------- 1 | from __future__ import division, print_function 2 | 3 | print(""" 4 | This tutorial explains the usage of trigonometric polynomials and relating 5 | operators for the use in FFT-based homogenization. 6 | 7 | The basic classes, which are implemented in module "homogenize.matvec", 8 | are listed here along with their important characteristics: 9 | Grid : contains method "Grid.get_grid_coordinates", which returns coordinates 10 | of grid points 11 | Tensor : this class represents a tensor-valued trigonometric polynomial and is 12 | thus the most important part of FFT-based homogenization 13 | DFT : this class represents matrices of Discrete Fourier Transform, which is 14 | implemented via central version of FFT algorithm 15 | ----""") 16 | 17 | import os 18 | import sys 19 | sys.path.insert(0, os.path.normpath(os.path.join(sys.path[0], '..'))) 20 | import numpy as np 21 | from ffthompy.trigpol import Grid 22 | from ffthompy.tensors import Tensor, DFT 23 | 24 | print(""" 25 | The work with trigonometric polynomials is shown for""") 26 | d = 2 27 | N = 5*np.ones(d, dtype=np.int32) 28 | print('dimension d =', d) 29 | print('number of grid points N =', N) 30 | print('which is implemented as a numpy.ndarray.') 31 | 32 | print(""" 33 | Particularly, the vector-valued trigonometric polynomial is created as an instance 'xN' of class 34 | 'Tensor' and the random values are assigned. 35 | """) 36 | 37 | xN = Tensor(name='trigpol_rand', shape=(d,), N=N) 38 | xN.randomize() 39 | 40 | print(""" 41 | Basic properties of a trigonometric polynomials can be printed with a norm 42 | corresponding to L2 norm of trigonometric polynomial, i.e. 43 | xN =""") 44 | print(xN) 45 | 46 | print(""" 47 | The values of trigonometric polynomials are stored in atribute val of type 48 | numpy.ndarray with shape = (self.d,) + tuple(self.N), i.e. 49 | xN.val.shape =""") 50 | print(xN.val.shape) 51 | print("xN.val = xN[:] =") 52 | print(xN.val) 53 | 54 | print(""" 55 | In order to calculate Fourier coefficients of trigonometric polynomial, 56 | we define DFT operators that are provided in class 'DFT'. The operation 57 | is provided by central version of FFT algorithm and is implemented in method 58 | 'DFT.__call__' and/or 'DFT.__mul__'. 59 | """) 60 | 61 | FN = DFT(name='forward DFT', N=N, inverese=False) 62 | FiN = DFT(name='inverse DFT', N=N, inverse=True) 63 | print("FN = ") 64 | print(FN) 65 | print("FiN = ") 66 | print(FiN) 67 | print(""" 68 | The result of DFT is again the same trigonometric polynomial 69 | with representation in Fourier domain (with Fourier coefficients); 70 | FxN = FN*xN = FN(xN) =""") 71 | FxN = FN*xN # Fourier coefficients of xN 72 | print(FxN) 73 | 74 | print(""" 75 | The forward and inverse DFT are mutually inverse operations that can 76 | be observed by calculation of variable 'xN2': 77 | xN2 = FiN(FxN) = FiN(FN(xN)) =""") 78 | xN2 = FiN(FxN) # values of trigonometric polynomial at grid points 79 | print(xN2) 80 | print("and its comparison with initial trigonometric polynomial 'xN2'") 81 | print("(xN == xN2) = ") 82 | print(xN == xN2) 83 | 84 | print(""" 85 | The norm of trigonometric polynomial calculated from Fourier 86 | coefficients corresponds to L^2 norm and is the same like for values at grid 87 | points, which is a consequence of Parseval's identity: 88 | xN.norm() = np.linalg.norm(xN.val)/np.prod(xN.N)**0.5 = 89 | = (np.sum(xN.val*xN.val)/np.prod(xN.N))**0.5 = """) 90 | print(xN.norm()) 91 | print("""FxN.norm() = np.linalg.norm(FxN.val) = 92 | = np.sum(FxN.val*np.conj(FxN.val)).real**0.5 =""") 93 | print(FxN.norm()) 94 | 95 | print(""" 96 | The trigonometric polynomials can be also multiplied. The standard 97 | multiplication with '*' operations corresponds to scalar product 98 | leading to a square of norm, i.e. 99 | FxN.norm() = xN.norm() = (xN*xN)**0.5 = (FxN*FxN)**0.5 =""") 100 | print((xN*xN)**0.5) 101 | print((FxN*FxN)**0.5) 102 | 103 | 104 | print(""" 105 | The mean value of trigonometric polynomial is calculated independently for 106 | each component of vector-field of trigonometric polynomial. In the real space, 107 | it can be calculated as a mean of trigonometric polynomial at grid points, 108 | while in Fourier space, it corresponds to zero frequency placed in the 109 | center of grid, i.e. 110 | xN.mean()[0] = xN[0].mean() = xN.val[0].mean() = FxN[0, 2, 2].real =""") 111 | print(xN.mean()[0]) 112 | print(xN[0].mean()) 113 | print(xN.val[0].mean()) 114 | print(FxN[0, 2, 2].real) 115 | 116 | 117 | print("""======================== 118 | Finally, we will plot the fundamental trigonometric polynomial, which 119 | satisfies dirac-delta property at grid points and which 120 | plays a major way in a theory of FFT-based homogenization. 121 | phi =""") 122 | phi = Tensor(name='phi_N,k', N=N, shape=()) 123 | phi.val[2, 2] = 1 124 | print(phi) 125 | print("phi.val =") 126 | print(phi.val) 127 | 128 | print(""" 129 | Fourier coefficients of phi 130 | Fphi = FN*phi = FN(phi) =""") 131 | Fphi = FN*phi 132 | print(Fphi) 133 | print("Fphi.val =") 134 | print(Fphi.val) 135 | 136 | print(""" 137 | In order to create a plot of this polynomial, it is 138 | evaluated on a fine grid sizing 139 | M = 16*N =""") 140 | M = 16*N 141 | print(M) 142 | 143 | print("phi_fine = phi.project(M) =") 144 | phi_fine = phi.project(M) 145 | print(phi_fine) 146 | print("""The procedure is provided by VecTri.enlarge(M) function, which consists of 147 | a calculation of Fourier coefficients, putting zeros to Fourier coefficients 148 | with high frequencies, and inverse FFT that evaluates the polynomial on 149 | a fine grid. 150 | """) 151 | 152 | 153 | print("""In order to plot this polynomial, we also set a size of a cell 154 | Y =""") 155 | Y = np.ones(d) # size of a cell 156 | print(Y) 157 | print(""" and evaluate the coordinates of grid points, which are stored in 158 | numpy.ndarray of following shape: 159 | coord.shape =""") 160 | coord = Grid.get_coordinates(M, Y) 161 | print(coord.shape) 162 | 163 | if __name__ == "__main__": 164 | print(""" 165 | Now, the plot of fundamental trigonometric polynomial is shown:""") 166 | from mpl_toolkits.mplot3d import axes3d 167 | import matplotlib.pyplot as plt 168 | fig = plt.figure() 169 | ax = fig.add_subplot(111, projection='3d') 170 | ax.plot_wireframe(coord[0], coord[1], phi_fine.val) 171 | plt.show() 172 | 173 | print('END') 174 | -------------------------------------------------------------------------------- /examples/lowRankTensorApproximations/diffusion_comp_time_stochastic_material.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pickle 3 | 4 | from ffthompy.tensorsLowRank.homogenisation import (homog_Ga_full_potential, 5 | homog_GaNi_full_potential, 6 | homog_Ga_sparse, 7 | homog_GaNi_sparse) 8 | from examples.lowRankTensorApproximations.setting import get_material_coef, kind_list, get_default_parameters 9 | import itertools 10 | 11 | 12 | print('running time-comparison for stochastic material...') 13 | 14 | kinds = {'2': 0, 15 | '3': 2,} 16 | 17 | N_lists = {'2': [45, 135, 320, 405, 640, 1215, 2560,3645,5120], 18 | '3': [5, 15, 45, 80, 135, 175, 225, 305, 375]} 19 | 20 | err_tol_list=[1e-4, 1e-6] 21 | method=1 # 0-Ga, 1-GaNi 22 | 23 | data_folder = "data_for_plot/time" 24 | 25 | for dim, material in itertools.product([2, 3], [2, 4]): 26 | if not os.path.exists('{}/dim_{}/mat_{}/'.format(data_folder,dim, material)): 27 | os.makedirs('{}/dim_{}/mat_{}/'.format(data_folder,dim, material)) 28 | 29 | N_list = N_lists['{}'.format(dim)] 30 | kind=kinds['{}'.format(dim)] 31 | 32 | full_time_list = [None]*len(N_list) 33 | sparse_time_list = [[None]*len(N_list), [None]*len(N_list)] 34 | rank_list = [[None]*len(N_list), [None]*len(N_list)] 35 | memory_list = [[None]*len(N_list), [None]*len(N_list)] 36 | 37 | for i, N in enumerate(N_list): 38 | # PARAMETERS ############################################################## 39 | pars, pars_sparse=get_default_parameters(dim, N, material, kind) 40 | pars.solver.update(dict(tol=1e-6)) 41 | 42 | # generating material coefficients 43 | if method in ['Ga',0]: 44 | Aga, Agani, Agas, Aganis=get_material_coef(material, pars, pars_sparse) 45 | print('\n== Full solution with potential by CG (Ga) ===========') 46 | resP_Ga=homog_Ga_full_potential(Aga, pars) 47 | print('mean of solution={}'.format(resP_Ga.Fu.mean())) 48 | print('homogenised properties (component 11) = {}'.format(resP_Ga.AH)) 49 | full_time_list[i]=resP_Ga.time 50 | elif method in ['GaNi',1]: 51 | Aga, Agani, Agas, Aganis=get_material_coef(material, pars, pars_sparse, ga=False) 52 | print('\n== Full solution with potential by CG (GaNi)===========') 53 | resP=homog_GaNi_full_potential(Agani, Aga, pars) 54 | print('mean of solution={}'.format(resP.Fu.mean())) 55 | print('homogenised properties (component 11) = {}'.format(resP.AH)) 56 | else: 57 | raise ValueError() 58 | 59 | full_time_list[i]=resP.time 60 | 61 | for counter, err_tol in enumerate(err_tol_list): 62 | 63 | for r in range(4, N+1, 2): 64 | pars_sparse.solver.update(dict(rank=r)) # rank of solution vector 65 | 66 | print('\n== format={}, N={}, dim={}, material={}, rank={}, err_tol={} ===='.format(pars_sparse.kind, 67 | N, dim, material, pars_sparse.solver['rank'], err_tol)) 68 | 69 | # PROBLEM DEFINITION ###################################################### 70 | if method in ['Ga',0]: 71 | print('\n== SPARSE solver with preconditioner (Ga) =======================') 72 | resS=homog_Ga_sparse(Agas, pars_sparse) 73 | print('mean of solution={}'.format(resS.Fu.mean())) 74 | print('homogenised properties (component 11) = {}'.format(resS.AH)) 75 | print('norm(resP)={}'.format(resS.solver['norm_res'])) 76 | elif method in ['GaNi',1]: 77 | print('\n== SPARSE solver with preconditioner (GaNi) =======================') 78 | resS=homog_GaNi_sparse(Aganis, Agas, pars_sparse) 79 | print('mean of solution={}'.format(resS.Fu.mean())) 80 | print('homogenised properties (component 11) = {}'.format(resS.AH)) 81 | print('iterations={}'.format(resS.solver['kit'])) 82 | print('norm(resP)={}'.format(resS.solver['norm_res'])) 83 | print('memory efficiency = {0}/{1} = {2}'.format(resS.Fu.memory, resP.Fu.val.size, resS.Fu.memory/resP.Fu.val.size)) 84 | print("solution discrepancy", (resS.AH - resP.AH)/resP.AH) 85 | 86 | if (resS.AH - resP.AH)/resP.AH <= err_tol: 87 | rank_list[counter][i]=r 88 | sparse_time_list[counter][i]=resS.time 89 | memory_list[counter][i]=resS.Fu.memory/resP.Fu.val.size # memory efficiency 90 | print("tensorsLowRank solver time:",sparse_time_list[counter]) 91 | print("full solver time:",full_time_list) 92 | print("rank:",rank_list[counter]) 93 | break 94 | 95 | print("tensorsLowRank solver time:",sparse_time_list) 96 | print("full solver time:",full_time_list) 97 | print("rank:",rank_list) 98 | 99 | pickle.dump(N_list, open("{}/dim_{}/mat_{}/N_list_{}.p" 100 | .format(data_folder,dim, material,kind_list[kind]), "wb")) 101 | pickle.dump(full_time_list, open("{}/dim_{}/mat_{}/full_time_list_{}.p" 102 | .format(data_folder,dim, material,kind_list[kind]), "wb")) 103 | pickle.dump(sparse_time_list[0], open(("{}/dim_{}/mat_{}/sparse_time_list_{}_"+"{:.0e}".format(err_tol_list[0])+'.p') 104 | .format(data_folder,dim, material,kind_list[kind]), "wb")) 105 | pickle.dump(sparse_time_list[1], open(("{}/dim_{}/mat_{}/sparse_time_list_{}_"+"{:.0e}".format(err_tol_list[1])+'.p') 106 | .format(data_folder,dim, material,kind_list[kind]), "wb")) 107 | pickle.dump(rank_list[0], open(("{}/dim_{}/mat_{}/rank_list_{}_"+"{:.0e}".format(err_tol_list[0])+'.p') 108 | .format(data_folder,dim, material,kind_list[kind]), "wb")) 109 | pickle.dump(rank_list[1], open(("{}/dim_{}/mat_{}/rank_list_{}_"+"{:.0e}".format(err_tol_list[1])+'.p') 110 | .format(data_folder,dim, material,kind_list[kind]), "wb")) 111 | 112 | print('END') 113 | -------------------------------------------------------------------------------- /ffthompy/problem.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import ffthompy.applications 3 | from ffthompy.general.base import get_base_dir, Timer 4 | import os 5 | import sys 6 | from copy import deepcopy 7 | 8 | 9 | class Problem(object): 10 | """ 11 | Class that parse input file, calculates the physical problem, 12 | and post-process the results (calculates the homogenized properties.) 13 | """ 14 | def __init__(self, conf_problem=None, conf=None): 15 | """ 16 | Parameters 17 | ---------- 18 | conf_problem : dictionary 19 | particular problem from problems in input file; dictionary that 20 | usually contains following keywords: 'name', 'material', 'solve', 21 | 'solver', 'postprocess' 22 | conf : module 23 | configuration data from input file 24 | """ 25 | self.__dict__.update(conf_problem) 26 | if isinstance(self.material, str): 27 | conf_material = conf.materials[self.material] 28 | else: 29 | conf_material = self.material 30 | 31 | self.material = self.parse_material(conf_material) 32 | 33 | self.Y = np.array(self.material['Y'], dtype=np.float64) 34 | self.dim = self.Y.size 35 | 36 | if self.physics == 'scalar': 37 | self.shape = (self.dim,) 38 | elif self.physics == 'elasticity': 39 | self.shape = (int(self.dim*(self.dim+1)/2),) 40 | 41 | self.output = {} 42 | 43 | @staticmethod 44 | def parse_material(conf_material): 45 | """ 46 | Parse material from input file. 47 | """ 48 | if 'fun' in conf_material: 49 | material = conf_material 50 | else: 51 | material = conf_material 52 | for incl in material['inclusions']: 53 | if incl in ['all', 'otherwise']: 54 | n_incl = material['inclusions'].count(incl) 55 | if n_incl == 0: 56 | continue 57 | elif n_incl == 1: 58 | ind = material['inclusions'].index(incl) 59 | if ind == material['inclusions'].__len__() - 1: 60 | continue 61 | else: 62 | msg='inclusion ("all" or "otherwise") has to be at the end of the list' 63 | raise ValueError(msg) 64 | else: 65 | msg = "Maximal one occurrence of inclusion \ 66 | 'otherwise' or 'all' is allowed!" 67 | raise ValueError(msg) 68 | return material 69 | 70 | def calculate(self): 71 | """ 72 | Calculates the problem according to physical model. 73 | """ 74 | print('\n==============================') 75 | tim = Timer(name='application') 76 | if hasattr(ffthompy.applications, self.physics): 77 | eval('ffthompy.applications.{}(self)'.format(self.physics)) 78 | else: 79 | msg = 'Not implemented physics ({0}).\n' \ 80 | 'Hint: Implement function ({1}) into module' \ 81 | ' ffthompy!'.format(self.physics, self.physics) 82 | raise NotImplementedError(msg) 83 | tim.measure() 84 | 85 | def postprocessing(self): 86 | """ 87 | Post-process the results. Usually consists of plotting of homogenized 88 | properties. 89 | """ 90 | output = self.output 91 | if self.physics in ['scalar', 'elasticity']: 92 | print('\nHomogenized matrices') 93 | for primaldual in self.solve['primaldual']: 94 | for key, val in list(output['mat_'+primaldual].items()): 95 | print(key) 96 | print(val) 97 | 98 | if hasattr(self, 'save'): 99 | if 'data' not in self.save: 100 | self.save['data'] = 'all' 101 | 102 | if self.save['data'] == 'all': 103 | data = self.output 104 | data.update({'physics': self.physics, 105 | 'solve': self.solve, 106 | 'solver': self.solver, 107 | 'postprocess': self.postprocess, 108 | 'save': self.save, 109 | 'material': self.material}) 110 | 111 | filename = self.save['filename'] 112 | dirs = os.path.dirname(filename) 113 | if not os.path.exists(dirs) and dirs != '': 114 | os.makedirs(dirs) 115 | 116 | import pickle 117 | py_version = sys.version_info[0] 118 | if py_version == 2: 119 | with open(filename, 'w') as fop: 120 | data = deepcopy(self.output) 121 | pickle.dump(data, fop, protocol=None) 122 | elif py_version == 3: 123 | with open(filename, 'wb') as fop: 124 | pickle.dump(self.output, fop, protocol=3) 125 | else: 126 | raise NotImplementedError('Python version!') 127 | 128 | def __repr__(self): 129 | ss = "Class : {}\n".format(self.__class__.__name__) 130 | ss += ' name : {}\n'.format(self.name) 131 | ss += ' physics = {}\n'.format(self.physics) 132 | ss += ' dim = {} (dimension)\n'.format(self.dim) 133 | ss += ' Y = {} (PUC size)\n'.format(self.Y) 134 | ss += ' material:\n' 135 | for key, val in list(self.material.items()): 136 | ss += ' {0} : {1}\n'.format(key, str(val)) 137 | ss += ' solve:\n' 138 | for key, val in list(self.solve.items()): 139 | ss += ' {0} : {1}\n'.format(key, str(val)) 140 | return ss 141 | 142 | 143 | def import_file(file_name): 144 | base_dir = get_base_dir() 145 | module_path = os.path.dirname(os.path.join(base_dir, file_name)) 146 | 147 | if module_path not in sys.path: 148 | sys.path.insert(0, module_path) 149 | remove_path = True 150 | else: 151 | remove_path = False 152 | 153 | module_name = os.path.splitext(os.path.basename(file_name))[0] 154 | 155 | conf = __import__(module_name) 156 | 157 | if remove_path: 158 | sys.path.pop(0) 159 | 160 | return conf 161 | 162 | if __name__ == '__main__': 163 | exec(compile(open('../main_test.py').read(), '../main_test.py', 'exec')) 164 | -------------------------------------------------------------------------------- /ffthompy/applications.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import ffthompy.projections as proj 3 | from ffthompy.materials import Material 4 | from ffthompy.postprocess import postprocess, add_macro2minimizer 5 | from ffthompy.general.solver import linear_solver 6 | from ffthompy.general.solver_pp import CallBack, CallBack_GA 7 | from ffthompy.general.base import Timer 8 | from ffthompy.tensors import Tensor, DFT, Operator 9 | 10 | 11 | def scalar(problem): 12 | """ 13 | Homogenization of scalar elliptic problem. 14 | 15 | Parameters 16 | ---------- 17 | problem : object 18 | """ 19 | print(' ') 20 | pb = problem 21 | print(pb) 22 | 23 | # Fourier projections 24 | _, hG1N, hG2N = proj.scalar(pb.solve['N'], pb.Y, NyqNul=True, tensor=True) 25 | 26 | if pb.solve['kind'] is 'GaNi': 27 | Nbar = pb.solve['N'] 28 | elif pb.solve['kind'] is 'Ga': 29 | Nbar = 2*pb.solve['N'] - 1 30 | hG1N = hG1N.enlarge(Nbar) 31 | hG2N = hG2N.enlarge(Nbar) 32 | 33 | FN = DFT(name='FN', inverse=False, N=Nbar) 34 | FiN = DFT(name='FiN', inverse=True, N=Nbar) 35 | 36 | G1N = Operator(name='G1', mat=[[FiN, hG1N, FN]]) 37 | G2N = Operator(name='G2', mat=[[FiN, hG2N, FN]]) 38 | 39 | for primaldual in pb.solve['primaldual']: 40 | tim = Timer(name='primal-dual') 41 | print(('\nproblem: ' + primaldual)) 42 | solutions = np.zeros(pb.shape).tolist() 43 | results = np.zeros(pb.shape).tolist() 44 | 45 | # material coefficients 46 | mat = Material(pb.material) 47 | 48 | if pb.solve['kind'] is 'GaNi': 49 | A = mat.get_A_GaNi(pb.solve['N'], primaldual) 50 | elif pb.solve['kind'] is 'Ga': 51 | A = mat.get_A_Ga(Nbar=Nbar, primaldual=primaldual) 52 | 53 | if primaldual is 'primal': 54 | GN = G1N 55 | else: 56 | GN = G2N 57 | 58 | Afun = Operator(name='FiGFA', mat=[[GN, A]]) 59 | 60 | for iL in np.arange(pb.dim): # iteration over unitary loads 61 | E = np.zeros(pb.dim) 62 | E[iL] = 1 63 | print(('macroscopic load E = ' + str(E))) 64 | EN = Tensor(name='EN', N=Nbar, shape=(pb.dim,), Fourier=False) 65 | EN.set_mean(E) 66 | # initial approximation for solvers 67 | x0 = Tensor(name='x0', N=Nbar, shape=(pb.dim,), Fourier=False) 68 | 69 | B = Afun(-EN) # RHS 70 | 71 | if not hasattr(pb.solver, 'callback'): 72 | cb = CallBack(A=Afun, B=B) 73 | elif pb.solver['callback'] == 'detailed': 74 | cb = CallBack_GA(A=Afun, B=B, EN=EN, A_Ga=A, GN=GN) 75 | else: 76 | raise NotImplementedError("The solver callback (%s) is not \ 77 | implemented" % (pb.solver['callback'])) 78 | 79 | print(('solver : {}'.format(pb.solver['kind']))) 80 | X, info = linear_solver(solver=pb.solver['kind'], Afun=Afun, B=B, 81 | x0=x0, par=pb.solver, callback=cb) 82 | 83 | solutions[iL] = add_macro2minimizer(X, E) 84 | results[iL] = {'cb': cb, 'info': info} 85 | print(cb) 86 | tim.measure() 87 | 88 | # POSTPROCESSING 89 | del Afun, B, E, EN, GN, X 90 | postprocess(pb, A, mat, solutions, results, primaldual) 91 | 92 | 93 | def elasticity(problem): 94 | """ 95 | Homogenization of linear elasticity. 96 | 97 | Parameters 98 | ---------- 99 | problem : object 100 | """ 101 | print(' ') 102 | pb = problem 103 | print(pb) 104 | 105 | # Fourier projections 106 | _, hG1hN, hG1sN, hG2hN, hG2sN = proj.elasticity(pb.solve['N'], pb.Y, NyqNul=True) 107 | del _ 108 | 109 | if pb.solve['kind'] is 'GaNi': 110 | Nbar = pb.solve['N'] 111 | elif pb.solve['kind'] is 'Ga': 112 | Nbar = 2*pb.solve['N'] - 1 113 | hG1hN = hG1hN.enlarge(Nbar) 114 | hG1sN = hG1sN.enlarge(Nbar) 115 | hG2hN = hG2hN.enlarge(Nbar) 116 | hG2sN = hG2sN.enlarge(Nbar) 117 | 118 | FN = DFT(name='FN', inverse=False, N=Nbar) 119 | FiN = DFT(name='FiN', inverse=True, N=Nbar) 120 | 121 | G1N = Operator(name='G1', mat=[[FiN, hG1hN + hG1sN, FN]]) 122 | G2N = Operator(name='G2', mat=[[FiN, hG2hN + hG2sN, FN]]) 123 | 124 | for primaldual in pb.solve['primaldual']: 125 | tim = Timer(name='primal-dual') 126 | print(('\nproblem: ' + primaldual)) 127 | solutions = np.zeros(pb.shape).tolist() 128 | results = np.zeros(pb.shape).tolist() 129 | 130 | # material coefficients 131 | mat = Material(pb.material) 132 | 133 | if pb.solve['kind'] is 'GaNi': 134 | A = mat.get_A_GaNi(pb.solve['N'], primaldual) 135 | elif pb.solve['kind'] is 'Ga': 136 | A = mat.get_A_Ga(Nbar=Nbar, primaldual=primaldual) 137 | 138 | if primaldual is 'primal': 139 | GN = G1N 140 | else: 141 | GN = G2N 142 | 143 | Afun = Operator(name='FiGFA', mat=[[GN, A]]) 144 | 145 | D = int(pb.dim*(pb.dim+1)/2) 146 | for iL in range(D): # iteration over unitary loads 147 | E = np.zeros(D) 148 | E[iL] = 1 149 | print(('macroscopic load E = ' + str(E))) 150 | EN = Tensor(name='EN', N=Nbar, shape=(D,), Fourier=False) 151 | EN.set_mean(E) 152 | # initial approximation for solvers 153 | x0 = EN.zeros_like(name='x0') 154 | 155 | B = Afun(-EN) # RHS 156 | 157 | if not hasattr(pb.solver, 'callback'): 158 | cb = CallBack(A=Afun, B=B) 159 | elif pb.solver['callback'] == 'detailed': 160 | cb = CallBack_GA(A=Afun, B=B, EN=EN, A_Ga=A, GN=GN) 161 | else: 162 | raise NotImplementedError("The solver callback (%s) is not \ 163 | implemented" % (pb.solver['callback'])) 164 | 165 | print(('solver : %s' % pb.solver['kind'])) 166 | X, info = linear_solver(solver=pb.solver['kind'], Afun=Afun, B=B, 167 | x0=x0, par=pb.solver, callback=cb) 168 | 169 | solutions[iL] = add_macro2minimizer(X, E) 170 | results[iL] = {'cb': cb, 'info': info} 171 | print(cb) 172 | tim.measure() 173 | 174 | # POSTPROCESSING 175 | del Afun, B, E, EN, GN, X 176 | postprocess(pb, A, mat, solutions, results, primaldual) 177 | 178 | 179 | if __name__ == '__main__': 180 | exec(compile(open('../main_test.py').read(), '../main_test.py', 'exec')) 181 | -------------------------------------------------------------------------------- /ffthompy/matvecs/applications.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import ffthompy.projections as proj 3 | from ffthompy.matvecs import VecTri, DFT, LinOper 4 | from ffthompy.materials import Material 5 | from ffthompy.postprocess import postprocess, add_macro2minimizer 6 | from ffthompy.general.solver import linear_solver 7 | from ffthompy.general.solver_pp import CallBack, CallBack_GA 8 | from ffthompy.general.base import Timer 9 | 10 | 11 | def scalar(problem): 12 | """ 13 | Homogenization of scalar elliptic problem. 14 | 15 | Parameters 16 | ---------- 17 | problem : object 18 | """ 19 | print(' ') 20 | pb = problem 21 | print(pb) 22 | 23 | # Fourier projections 24 | _, hG1N, hG2N = proj.scalar(pb.solve['N'], pb.Y, centered=True, 25 | NyqNul=True) 26 | 27 | if pb.solve['kind'] is 'GaNi': 28 | Nbar = pb.solve['N'] 29 | elif pb.solve['kind'] is 'Ga': 30 | Nbar = 2*pb.solve['N'] - 1 31 | hG1N = hG1N.enlarge(Nbar) 32 | hG2N = hG2N.enlarge(Nbar) 33 | 34 | FN = DFT(name='FN', inverse=False, N=Nbar) 35 | FiN = DFT(name='FiN', inverse=True, N=Nbar) 36 | 37 | G1N = LinOper(name='G1', mat=[[FiN, hG1N, FN]]) 38 | G2N = LinOper(name='G2', mat=[[FiN, hG2N, FN]]) 39 | 40 | for primaldual in pb.solve['primaldual']: 41 | tim = Timer(name='primal-dual') 42 | print(('\nproblem: ' + primaldual)) 43 | solutions = np.zeros(pb.shape).tolist() 44 | results = np.zeros(pb.shape).tolist() 45 | 46 | # material coefficients 47 | mat = Material(pb.material) 48 | 49 | if pb.solve['kind'] is 'GaNi': 50 | A = mat.get_A_GaNi(pb.solve['N'], primaldual) 51 | elif pb.solve['kind'] is 'Ga': 52 | A = mat.get_A_Ga(Nbar=Nbar, primaldual=primaldual) 53 | 54 | if primaldual is 'primal': 55 | GN = G1N 56 | else: 57 | GN = G2N 58 | 59 | Afun = LinOper(name='FiGFA', mat=[[GN, A]]) 60 | 61 | for iL in np.arange(pb.dim): # iteration over unitary loads 62 | E = np.zeros(pb.dim) 63 | E[iL] = 1 64 | print(('macroscopic load E = ' + str(E))) 65 | EN = VecTri(name='EN', macroval=E, N=Nbar, Fourier=False) 66 | # initial approximation for solvers 67 | x0 = VecTri(name='x0', N=Nbar, Fourier=False) 68 | 69 | B = Afun(-EN) # RHS 70 | 71 | if not hasattr(pb.solver, 'callback'): 72 | cb = CallBack(A=Afun, B=B) 73 | elif pb.solver['callback'] == 'detailed': 74 | cb = CallBack_GA(A=Afun, B=B, EN=EN, A_Ga=A, GN=GN) 75 | else: 76 | raise NotImplementedError("The solver callback (%s) is not \ 77 | implemented" % (pb.solver['callback'])) 78 | 79 | print(('solver : %s' % pb.solver['kind'])) 80 | X, info = linear_solver(solver=pb.solver['kind'], Afun=Afun, B=B, 81 | x0=x0, par=pb.solver, callback=cb) 82 | 83 | solutions[iL] = add_macro2minimizer(X, E) 84 | results[iL] = {'cb': cb, 'info': info} 85 | print(cb) 86 | tim.measure() 87 | 88 | # POSTPROCESSING 89 | del Afun, B, E, EN, GN, X 90 | postprocess(pb, A, mat, solutions, results, primaldual) 91 | 92 | 93 | def elasticity(problem): 94 | """ 95 | Homogenization of linear elasticity. 96 | 97 | Parameters 98 | ---------- 99 | problem : object 100 | """ 101 | print(' ') 102 | pb = problem 103 | print(pb) 104 | 105 | # Fourier projections 106 | _, hG1hN, hG1sN, hG2hN, hG2sN = proj.elasticity(pb.solve['N'], pb.Y, 107 | centered=True, NyqNul=True) 108 | del _ 109 | 110 | if pb.solve['kind'] is 'GaNi': 111 | Nbar = pb.solve['N'] 112 | elif pb.solve['kind'] is 'Ga': 113 | Nbar = 2*pb.solve['N'] - 1 114 | hG1hN = hG1hN.enlarge(Nbar) 115 | hG1sN = hG1sN.enlarge(Nbar) 116 | hG2hN = hG2hN.enlarge(Nbar) 117 | hG2sN = hG2sN.enlarge(Nbar) 118 | 119 | FN = DFT(name='FN', inverse=False, N=Nbar) 120 | FiN = DFT(name='FiN', inverse=True, N=Nbar) 121 | 122 | G1N = LinOper(name='G1', mat=[[FiN, hG1hN + hG1sN, FN]]) 123 | G2N = LinOper(name='G2', mat=[[FiN, hG2hN + hG2sN, FN]]) 124 | 125 | for primaldual in pb.solve['primaldual']: 126 | tim = Timer(name='primal-dual') 127 | print(('\nproblem: ' + primaldual)) 128 | solutions = np.zeros(pb.shape).tolist() 129 | results = np.zeros(pb.shape).tolist() 130 | 131 | # material coefficients 132 | mat = Material(pb.material) 133 | 134 | if pb.solve['kind'] is 'GaNi': 135 | A = mat.get_A_GaNi(pb.solve['N'], primaldual) 136 | elif pb.solve['kind'] is 'Ga': 137 | A = mat.get_A_Ga(Nbar=Nbar, primaldual=primaldual) 138 | 139 | if primaldual is 'primal': 140 | GN = G1N 141 | else: 142 | GN = G2N 143 | 144 | Afun = LinOper(name='FiGFA', mat=[[GN, A]]) 145 | 146 | D = int(pb.dim*(pb.dim+1)/2) 147 | for iL in range(D): # iteration over unitary loads 148 | E = np.zeros(D) 149 | E[iL] = 1 150 | print(('macroscopic load E = ' + str(E))) 151 | EN = VecTri(name='EN', macroval=E, N=Nbar, Fourier=False) 152 | # initial approximation for solvers 153 | x0 = VecTri(N=Nbar, d=D, Fourier=False) 154 | 155 | B = Afun(-EN) # RHS 156 | 157 | if not hasattr(pb.solver, 'callback'): 158 | cb = CallBack(A=Afun, B=B) 159 | elif pb.solver['callback'] == 'detailed': 160 | cb = CallBack_GA(A=Afun, B=B, EN=EN, A_Ga=A, GN=GN) 161 | else: 162 | raise NotImplementedError("The solver callback (%s) is not \ 163 | implemented" % (pb.solver['callback'])) 164 | 165 | print(('solver : %s' % pb.solver['kind'])) 166 | X, info = linear_solver(solver=pb.solver['kind'], Afun=Afun, B=B, 167 | x0=x0, par=pb.solver, callback=cb) 168 | 169 | solutions[iL] = add_macro2minimizer(X, E) 170 | results[iL] = {'cb': cb, 'info': info} 171 | print(cb) 172 | tim.measure() 173 | 174 | # POSTPROCESSING 175 | del Afun, B, E, EN, GN, X 176 | postprocess(pb, A, mat, solutions, results, primaldual) 177 | 178 | 179 | if __name__ == '__main__': 180 | exec(compile(open('../main_test.py').read(), '../main_test.py', 'exec')) 181 | -------------------------------------------------------------------------------- /examples/scalar/scalar_2d.py: -------------------------------------------------------------------------------- 1 | """ 2 | Input file for a scalar linear elliptic problems. 3 | """ 4 | 5 | import numpy as np 6 | import os 7 | from ffthompy.general.base import get_base_dir 8 | 9 | base_dir = get_base_dir() 10 | 11 | dim = 2 12 | N = 5*np.ones(dim, dtype=np.int32) 13 | tol = 1e-8 14 | 15 | materials = {'square': {'inclusions': ['square', 'otherwise'], 16 | 'positions': [np.zeros(dim), ''], 17 | 'params': [0.6*np.ones(dim), ''], # size of sides 18 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 19 | 'Y': np.ones(dim), 20 | 'order': None, 21 | }, 22 | 'square_Ga': {'inclusions': ['square', 'otherwise'], 23 | 'positions': [np.zeros(dim), ''], 24 | 'params': [0.6*np.ones(dim), ''], # size of sides 25 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 26 | 'Y': np.ones(dim), 27 | 'order': 0, 28 | 'P': N, 29 | }, 30 | 'square2': {'inclusions': ['square', 'otherwise'], 31 | 'positions': [np.zeros(dim), ''], 32 | 'params': [1.2*np.ones(dim), ''], # size of sides 33 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 34 | 'Y': 2.*np.ones(dim), 35 | 'order': None 36 | }, 37 | 'ball': {'inclusions': ['ball', 'otherwise'], 38 | 'positions': [np.zeros(dim), ''], 39 | 'params': [1., ''], # diamater 40 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 41 | 'Y': 1.*np.ones(dim), 42 | 'order': None 43 | }, 44 | 'ball2': {'inclusions': ['ball', 'otherwise'], 45 | 'positions': [np.zeros(dim), ''], 46 | 'params': [2., ''], # diamater 47 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 48 | 'Y': 2.*np.ones(dim), 49 | 'order': None 50 | }, 51 | 'laminate': {'inclusions': ['square', 'otherwise'], 52 | 'positions': [np.zeros(dim), ''], 53 | 'params': [np.array([1., 0.5]), ''], 54 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 55 | 'Y': 1.*np.ones(dim), 56 | 'order': None 57 | }, 58 | 'laminate2': {'inclusions': ['square', 'otherwise'], 59 | 'positions': [np.zeros(dim), ''], 60 | 'params': [np.array([2., 1.0]), ''], 61 | 'vals': [11.*np.eye(dim), 1.*np.eye(dim)], 62 | 'Y': 2.*np.ones(dim), 63 | 'order': None 64 | }, 65 | 'pyramid': {'inclusions': ['pyramid', 'all'], 66 | 'positions': [np.zeros(dim), ''], 67 | 'params': [1.*np.ones(dim), ''], # size of sides 68 | 'vals': [10.*np.eye(dim), 1.*np.eye(dim)], 69 | 'Y': np.ones(dim), 70 | 'order': None, 71 | }, 72 | } 73 | 74 | 75 | problems = [ 76 | {'name': 'prob1', 77 | 'physics': 'scalar', 78 | 'material': 'square', 79 | 'solve': {'kind': 'GaNi', 80 | 'N': N, 81 | 'primaldual': ['primal', 'dual']}, 82 | 'postprocess': [{'kind': 'GaNi'}, 83 | {'kind': 'Ga', 84 | 'order': None}, 85 | {'kind': 'Ga', 86 | 'order': 0, 87 | 'P': N}, 88 | {'kind': 'Ga', 89 | 'order': 1, 90 | 'P': 27*N}], 91 | 'solver': {'kind': 'CG', 92 | 'tol': tol, 93 | 'maxiter': 1e3}, 94 | 'save': {'filename': os.path.join(base_dir, 'temp/scalar_2d_prob1'), 95 | 'data': 'all'}, 96 | }, 97 | {'name': 'prob2', 98 | 'physics': 'scalar', 99 | 'material': 'square', 100 | 'solve': {'kind': 'Ga', 101 | 'N': N, 102 | 'primaldual': ['primal', 'dual']}, 103 | 'postprocess': [{'kind': 'Ga', 104 | }], 105 | 'solver': {'kind': 'CG', 106 | 'tol': tol, 107 | 'maxiter': 1e3}, 108 | 'save': {'filename': os.path.join(base_dir, 'temp/scalar_2d_prob2'), 109 | 'data': 'all'}, 110 | }, 111 | {'name': 'prob3', 112 | 'physics': 'scalar', 113 | 'material': 'square_Ga', 114 | 'solve': {'kind': 'Ga', 115 | 'N': N, 116 | 'primaldual': ['primal', 'dual']}, 117 | 'postprocess': [{'kind': 'Ga', 118 | },], 119 | 'solver': {'kind': 'CG', 120 | 'tol': tol, 121 | 'maxiter': 1e3}, 122 | 'save': {'filename': os.path.join(base_dir, 'temp/scalar_2d_prob3'), 123 | 'data': 'all'}, 124 | }, 125 | {'name': 'prob4', 126 | 'physics': 'scalar', 127 | 'material': 'pyramid', 128 | 'solve': {'kind': 'Ga', 129 | 'N': N, 130 | 'primaldual': ['primal']}, 131 | 'postprocess': [{'kind': 'Ga', 132 | 'order': None}, 133 | {'kind': 'Ga', 134 | 'order': 0, 135 | 'P': N}, 136 | {'kind': 'Ga', 137 | 'order': 1, 138 | 'P': N}], 139 | 'solver': {'kind': 'CG', 140 | 'tol': tol, 141 | 'maxiter': 1e3}, 142 | 'save': {'filename': os.path.join(base_dir, 'temp/scalar_2d_prob4'), 143 | 'data': 'all'}, 144 | }, 145 | {'name': 'prob5', 146 | 'physics': 'scalar', 147 | 'material': 'ball', 148 | 'solve': {'kind': 'Ga', 149 | 'N': N, 150 | 'primaldual': ['primal', 'dual']}, 151 | 'postprocess': [{'kind': 'Ga', 152 | 'order': None}, 153 | {'kind': 'Ga', 154 | 'order': 0, 155 | 'P': N}], 156 | 'solver': {'kind': 'CG', 157 | 'tol': tol, 158 | 'maxiter': 1e3}, 159 | 'save': {'filename': os.path.join(base_dir, 'temp/scalar_2d_prob5'), 160 | 'data': 'all'}, 161 | },] 162 | 163 | 164 | if __name__=='__main__': 165 | import subprocess 166 | subprocess.call(['../../main.py', __file__]) 167 | -------------------------------------------------------------------------------- /ffthompy/trigpol.py: -------------------------------------------------------------------------------- 1 | """Basic methods for trigonometric polynomials.""" 2 | 3 | 4 | import numpy as np 5 | 6 | fft_form_default='r' # real input data 7 | 8 | 9 | class Grid(): 10 | @staticmethod 11 | def get_ZNl(N, fft_form=fft_form_default): 12 | r""" 13 | it produces index set ZNl=\underline{\set{Z}}^d_N : 14 | ZNl[i][j]\in\set{Z} : -N[i]/2 <= ZNl[i] < N[i]/2 15 | """ 16 | ZNl = [] 17 | N = np.atleast_1d(np.array(N, dtype=np.int)) 18 | for m in range(N.size): 19 | ZNl.append(np.arange(np.fix(-N[m]/2.), np.fix(N[m]/2.+0.5), 20 | dtype=np.int)) 21 | if fft_form in ['r',0]: 22 | return [np.fft.ifftshift(val) for val in ZNl] 23 | else: 24 | return ZNl 25 | 26 | @staticmethod 27 | def get_xil(N, Y, fft_form=fft_form_default): 28 | """ 29 | it produces discrete frequencies of Fourier series 30 | xil[i] = ZNl[i]/Y[i] 31 | """ 32 | xil = [] 33 | for m in np.arange(np.size(N)): 34 | xil.append(np.arange(np.fix(-N[m]/2.), np.fix(N[m]/2.+0.5))/Y[m]) 35 | if fft_form in ['r',]: 36 | xil=[np.fft.ifftshift(xi) for xi in xil] 37 | xil[-1] = xil[-1][:int(np.fix(N[-1]/2)+1)] 38 | elif fft_form in [0]: 39 | xil = [np.fft.ifftshift(xi) for xi in xil] 40 | return xil 41 | 42 | @staticmethod 43 | def get_freq(N, Y, fft_form=fft_form_default): 44 | return Grid.get_xil(N, Y, fft_form=fft_form) 45 | 46 | @staticmethod 47 | def get_product(xi): 48 | xis = np.atleast_2d(xi[0]) 49 | for ii in range(1, len(xi)): 50 | xis_new = np.tile(xi[ii], xis.shape[1]) 51 | xis_old = np.repeat(xis, xi[ii].size, axis=1) 52 | xis = np.vstack([xis_old, xis_new]) 53 | return xis 54 | 55 | @staticmethod 56 | def get_coordinates(N, Y): 57 | """ 58 | It produces coordinates of the set of nodal points 59 | Coord[i][j] = x_N^{(i,j)} 60 | """ 61 | d = np.size(N) 62 | ZNl = Grid.get_ZNl(N, fft_form='c') 63 | coord = np.zeros(np.hstack([d, N])) 64 | for ii in np.arange(d): 65 | x = Y[ii]*ZNl[ii]/N[ii] 66 | Nshape = np.ones(d, dtype=np.int) 67 | Nshape[ii] = N[ii] 68 | Nrep = np.copy(N) 69 | Nrep[ii] = 1 70 | coord[ii] = np.tile(np.reshape(x, Nshape), Nrep) 71 | return coord 72 | 73 | 74 | class TrigPolBasis(Grid): 75 | """ 76 | This represents a basis functions of trigonometric polynomials. 77 | """ 78 | def eval_phi_k_N(self, x): 79 | val = np.zeros_like(x, dtype=np.complex128) 80 | coef = 1./np.prod(self.N) 81 | for ii in self.get_ZNl(self.N)[0]: 82 | val += coef*np.exp(2*1j*np.pi*ii*(x/self.Y - self.order/self.N)) 83 | return val 84 | 85 | def eval_phi_k(self, x): 86 | val = np.exp(2*np.pi*1j*x*self.order/self.Y) 87 | return val 88 | 89 | def get_nodes(self): 90 | ZNl = self.get_ZNl(self.N)[0] 91 | x_nodes = ZNl*self.Y/self.N 92 | vals = np.zeros_like(x_nodes) 93 | ind = self.order + np.fix(self.N/2) 94 | vals[ind] = 1 95 | return x_nodes, vals 96 | 97 | def __init__(self, order, N=None, Y=None): 98 | self.order = order 99 | self.dim = np.size(order) 100 | self.N = np.array(N) 101 | 102 | if Y is None: 103 | self.Y = np.ones(self.dim) 104 | 105 | if N is None: 106 | self.Fourier = True 107 | self.eval = self.eval_phi_k 108 | else: 109 | self.Fourier = False 110 | self.eval = self.eval_phi_k_N 111 | 112 | def __repr__(self): 113 | if self.Fourier: 114 | ss = "Fourier basis function for k = %d" % (self.order,) 115 | else: 116 | ss = "Shape basis function for k = %d and N = %s" \ 117 | % (self.order, str(self.N)) 118 | return ss 119 | 120 | def get_inverse(A): 121 | """ 122 | It calculates the inverse of conductivity coefficients at grid points, 123 | i.e. of matrix A_GaNi 124 | 125 | Parameters 126 | ---------- 127 | A : numpy.ndarray 128 | 129 | Returns 130 | ------- 131 | invA : numpy.ndarray 132 | """ 133 | B = np.copy(A) 134 | N = np.array(A.shape[2:]) 135 | d = A.shape[0] 136 | if A.shape[0] != A.shape[1]: 137 | raise NotImplementedError("Non-square matrix!") 138 | 139 | invA = np.eye(d).tolist() 140 | for m in np.arange(d): 141 | Bdiag = np.copy(B[m][m]) 142 | B[m][m] = np.ones(N) 143 | for n in np.arange(m+1, d): 144 | B[m][n] = B[m][n]/Bdiag 145 | for n in np.arange(d): 146 | invA[m][n] = invA[m][n]/Bdiag 147 | for k in np.arange(m+1, d): 148 | Bnull = np.copy(B[k][m]) 149 | for l in np.arange(d): 150 | B[k][l] = B[k][l] - B[m][l]*Bnull 151 | invA[k][l] = invA[k][l] - invA[m][l]*Bnull 152 | for m in np.arange(d-1, -1, -1): 153 | for k in np.arange(m-1, -1, -1): 154 | Bnull = np.copy(B[k][m]) 155 | for l in np.arange(d): 156 | B[k][l] = B[k][l] - B[m][l]*Bnull 157 | invA[k][l] = invA[k][l] - invA[m][l]*Bnull 158 | invA = np.array(invA) 159 | return invA 160 | 161 | 162 | def enlarge(xN, M): 163 | """ 164 | Enlarge an array of Fourier coefficients by zeros. 165 | 166 | Parameters 167 | ---------- 168 | xN : numpy.ndarray of shape = N 169 | input array that is to be enlarged 170 | 171 | Returns 172 | ------- 173 | xM : numpy.ndarray of shape = M 174 | output array that is enlarged 175 | M : array like 176 | number of grid points 177 | """ 178 | M = np.array(M, dtype=np.float) 179 | N = np.array(xN.shape, dtype=np.float) 180 | if np.allclose(M, N): 181 | return xN 182 | 183 | ibeg = np.ceil((M-N)/2).astype(dtype=np.int) 184 | iend = np.ceil((M+N)/2).astype(dtype=np.int) 185 | 186 | slc=[slice(ibeg[i],iend[i],1) for i in range(N.size)] 187 | xM = np.zeros(M.astype(dtype=np.int), dtype=xN.dtype) 188 | xM[tuple(slc)]=xN 189 | return xM 190 | 191 | def decrease(xN, M): 192 | """ 193 | Decreases an array of Fourier coefficients by omitting the highest 194 | frequencies. 195 | 196 | Parameters 197 | ---------- 198 | xN : numpy.ndarray of shape = N 199 | input array that is to be enlarged 200 | 201 | Returns 202 | ------- 203 | xM : numpy.ndarray of shape = M 204 | output array that is enlarged 205 | M : array like 206 | number of grid points 207 | """ 208 | M = np.array(M, dtype=np.float) 209 | N = np.array(xN.shape, dtype=np.float) 210 | ibeg = np.fix((N-M+(M % 2))/2).astype(dtype=np.int) 211 | iend = np.fix((N+M+(M % 2))/2).astype(dtype=np.int) 212 | 213 | slc=[slice(ibeg[i],iend[i],1) for i in range(N.size)] 214 | return xN[tuple(slc)] 215 | 216 | def get_Nodd(N): 217 | Nodd = N - ((N + 1) % 2) 218 | return Nodd 219 | 220 | def mean_index(N, fft_form=fft_form_default): 221 | if fft_form in [0, 'r']: 222 | return tuple(np.zeros_like(N, dtype=np.int)) 223 | elif fft_form in ['c']: 224 | return tuple(np.array(np.fix(np.array(N)/2), dtype=np.int)) 225 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/FFTHomPy.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/FFTHomPy.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/FFTHomPy" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/FFTHomPy" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /ffthompy/tensorsLowRank/solver.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ffthompy import Timer 3 | from ffthompy.tensorsLowRank.objects import SparseTensor 4 | 5 | def linear_solver(method, Afun, B, par): 6 | if method in ['Richardson','richardson','r','R']: 7 | Fu, ress=richardson(Afun=Afun, B=B, par=par) 8 | elif method in ['Chebyshev','chebyshev','c','C']: 9 | Fu, ress=cheby2TERM(Afun=Afun, B=B, par=par) 10 | elif method in ['minimal_residual','mr','m','M']: 11 | Fu, ress=minimal_residual(Afun=Afun, B=B, par=par) 12 | elif method in ['minimal_residual_debug','mrd']: 13 | Fu, ress=minimal_residual_debug(Afun=Afun, B=B, par=par) 14 | return Fu, ress 15 | 16 | def cheby2TERM(Afun, B, x0=None, par={}, callback=None): 17 | """ 18 | Chebyshev two-term iterative solver 19 | 20 | Parameters 21 | ---------- 22 | Afun : a function, represnting linear function A in the system Ax =B 23 | B : tensorsLowRank tensor representing vector B in the right-hand side of linear system 24 | x0 : tensorsLowRank tensor representing initial approximation of solution of linear system 25 | par : dict 26 | parameters of the method 27 | callback : 28 | 29 | Returns 30 | ------- 31 | x : resulting unknown vector 32 | res : dict 33 | results 34 | """ 35 | 36 | if 'tol' not in par: 37 | par['tol'] = 1e-06 38 | if 'maxiter' not in par: 39 | par['maxiter'] = 1e7 40 | if 'eigrange' not in par: 41 | raise NotImplementedError("It is necessary to calculate eigenvalues.") 42 | else: 43 | Egv = par['eigrange'] 44 | 45 | res={'norm_res': [], 46 | 'kit': 0} 47 | 48 | bnrm2 = B.norm() 49 | Ib = 1.0/bnrm2 50 | if bnrm2 == 0: 51 | bnrm2 = 1.0 52 | 53 | if x0 is None: 54 | x=B 55 | else: 56 | x=x0 57 | 58 | r = B - Afun(x) 59 | r0=r.norm() 60 | res['norm_res'].append(Ib*r0)# For Normal Residue 61 | 62 | if res['norm_res'][-1] < par['tol']: # if errnorm is less than tol 63 | return x, res 64 | 65 | M=SparseTensor(kind=x.kind, val=np.ones(x.N.size*[3,]), rank=1) # constant field 66 | FM=M.fourier().enlarge(x.N) 67 | 68 | d = (Egv[1]+Egv[0])/2.0 # np.mean(par['eigrange']) 69 | c = (Egv[1]-Egv[0])/2.0 # par['eigrange'][1] - d 70 | v = x*0.0 71 | while (res['norm_res'][-1] > par['tol']) and (res['kit'] < par['maxiter']): 72 | res['kit'] += 1 73 | x_prev = x 74 | if res['kit'] == 1: 75 | p = 0 76 | w = 1/d 77 | elif res['kit'] == 2: 78 | p = -(1/2)*(c/d)*(c/d) 79 | w = 1/(d-c*c/2/d) 80 | else: 81 | p = -(c*c/4)*w*w 82 | w = 1/(d-c*c*w/4) 83 | v = (r - p*v).truncate(rank=par['rank'], tol=par['tol_truncate']) 84 | x = (x_prev + w*v) 85 | x=(-FM*x.mean()+x).truncate(rank=par['tol'], tol=par['tol_truncate']) # setting correct mean 86 | r = B - Afun(x) 87 | 88 | res['norm_res'].append((1.0/r0)*r.norm()) 89 | 90 | if callback is not None: 91 | callback(x) 92 | 93 | if par['tol'] < res['norm_res']: # if tolerance is less than error norm 94 | print("Chebyshev solver does not converges!") 95 | else: 96 | print("Chebyshev solver converges.") 97 | 98 | if res['kit'] == 0: 99 | res['norm_res'] = 0 100 | return x, res 101 | 102 | def minimal_residual(Afun, B, x0=None, par=None): 103 | fast=par.get('fast') 104 | 105 | res={'norm_res': [], 106 | 'kit': 0} 107 | if x0 is None: 108 | x=B*(1./par['alpha']) 109 | else: 110 | x=x0 111 | x_sol=x # solution with minimal residuum 112 | 113 | if 'norm' not in par: 114 | norm=lambda X: X.norm(normal_domain=False) 115 | 116 | residuum=B-Afun(x) 117 | res['norm_res'].append(norm(residuum)) 118 | beta=Afun(residuum.truncate(tol=par['tol_truncate'], fast=fast)) 119 | 120 | M=SparseTensor(kind=x.kind, val=np.ones(x.N.size*[3,]), rank=1) # constant field 121 | FM=M.fourier().enlarge(x.N) 122 | minres_fail_counter=0 123 | 124 | while (res['norm_res'][-1] > par['tol'] and res['kit'] < par['maxiter']): 125 | res['kit']+=1 126 | 127 | if par['approx_omega']: 128 | omega=res['norm_res'][-1] / norm(beta) # approximate omega 129 | else: 130 | omega= beta.inner(residuum)/norm(beta)**2 # exact formula 131 | 132 | x=(x+residuum*omega) 133 | 134 | # setting correct mean 135 | x=(-FM*x.mean()+x).truncate(rank=par['rank'], tol=par['tol_truncate'], fast=fast) 136 | 137 | residuum=B-Afun(x) 138 | 139 | res['norm_res'].append(norm(residuum)) 140 | 141 | if res['norm_res'][-1] <= np.min(res['norm_res'][:-1]): 142 | x_sol=x 143 | else: 144 | minres_fail_counter+=1 145 | if minres_fail_counter>=par['minres_fails']: 146 | print('Residuum has risen up {} times -> ending solver.'.format(par['minres_fails'])) 147 | break 148 | 149 | beta=Afun(residuum.truncate(tol=min([res['norm_res'][-1]/1e1, par['tol']]), fast=fast)) 150 | 151 | return x_sol, res 152 | 153 | 154 | def minimal_residual_debug(Afun, B, x0=None, par=None): 155 | fast=par.get('fast') 156 | 157 | M=SparseTensor(kind=B.kind, val=np.ones(B.N.size*[3, ]), rank=1) # constant field 158 | FM=M.fourier().enlarge(B.N) 159 | 160 | res={'norm_res': [], 161 | 'kit': 0} 162 | if x0 is None: 163 | x=B*(1./par['alpha']) 164 | else: 165 | x=x0 166 | 167 | if 'norm' not in par: 168 | norm=lambda X: X.norm(normal_domain=False) 169 | 170 | residuum=(B-Afun(x)).truncate(rank=None, tol=par['tol'], fast=fast) 171 | res['norm_res'].append(norm(residuum)) 172 | beta=Afun(residuum) 173 | 174 | norm_res=res['norm_res'][res['kit']] 175 | 176 | while (norm_res>par['tol'] and res['kit']res['norm_res'][-1]: 198 | break 199 | res['norm_res'].append(norm_res) 200 | 201 | tic=Timer('truncate residuum') 202 | # residuum_for_beta=residuum.truncate(rank=rank, tol=tol) 203 | # residuum_for_beta=residuum.truncate(rank=None, tol=1-4) 204 | tol=min([norm_res/1e1, par['tol']]) 205 | residuum_for_beta=residuum.truncate(rank=None, tol=tol, fast=fast) 206 | tic.measure() 207 | print('tolerance={}, rank={}'.format(tol, residuum_for_beta.r)) 208 | print('residuum_for_beta.r={}'.format(residuum_for_beta.r)) 209 | tic=Timer('compute beta') 210 | beta=Afun(residuum_for_beta) 211 | tic.measure() 212 | pass 213 | return x, res 214 | 215 | def richardson(Afun, B, x0=None, rank=None, tol=None, par=None, norm=None): 216 | if isinstance(par['alpha'], float): 217 | omega=1./par['alpha'] 218 | else: 219 | raise ValueError() 220 | res={'norm_res': [], 221 | 'kit': 0} 222 | if x0 is None: 223 | x=B*omega 224 | else: 225 | x=x0 226 | 227 | if norm is None: 228 | norm=lambda X: X.norm() 229 | 230 | res['norm_res'].append(norm(B)) 231 | 232 | M=SparseTensor(kind=x.kind, val=np.ones(x.N.size*[3,]), rank=1) # constant field 233 | FM=M.fourier().enlarge(x.N) 234 | 235 | norm_res=1e15 236 | while (norm_res>par['tol'] and res['kit']res['norm_res'][res['kit']-1]: 241 | break 242 | 243 | x=(x+residuum*omega) 244 | x=(-FM*x.mean()+x).truncate(rank=rank, tol=tol, fast=True) # setting correct mean 245 | 246 | res['norm_res'].append(norm_res) 247 | 248 | return x, res 249 | -------------------------------------------------------------------------------- /ffthompy/tensors/unittest_operators.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | from numpy.linalg import norm 4 | from ffthompy import PrintControl 5 | import ffthompy.projections as proj 6 | from ffthompy.tensors import Tensor, DFT, grad, div, symgrad, potential, Operator, grad_div_tensor 7 | from ffthompy.tensors.projection import scalar, elasticity_small_strain, elasticity_large_deformation 8 | import itertools 9 | from copy import copy 10 | 11 | prt=PrintControl() 12 | fft_forms=[0, 'r', 'c'] 13 | 14 | 15 | class Test_operators(unittest.TestCase): 16 | 17 | def setUp(self): 18 | pass 19 | 20 | def tearDown(self): 21 | pass 22 | 23 | def test_operators(self): 24 | print('\nChecking operators...') 25 | for dim, fft_form in itertools.product([2, 3], fft_forms): 26 | N=5*np.ones(dim, dtype=np.int) 27 | F=DFT(N=N, inverse=False, fft_form=fft_form) 28 | iF=DFT(N=N, inverse=True, fft_form=fft_form) 29 | 30 | # Fourier transform 31 | prt.disable() 32 | print(F) # checking representation 33 | prt.enable() 34 | 35 | u=Tensor(name='u', shape=(), N=N, Fourier=False, 36 | fft_form=fft_form).randomize() 37 | Fu=F(u) 38 | u2=iF(Fu) 39 | self.assertAlmostEqual(0, (u==u2)[1], delta=1e-13, msg='Fourier transform') 40 | 41 | fft_formsC=copy(fft_forms) 42 | fft_formsC.remove(fft_form) 43 | for fft_formc in fft_formsC: 44 | FuC=Fu.set_fft_form(fft_formc, copy=True) 45 | Fu2=FuC.set_fft_form(fft_form, copy=True) 46 | msg='Tensor.set_fft_form()' 47 | self.assertAlmostEqual(0, Fu.norm()-FuC.norm(), delta=1e-13, msg=msg) 48 | self.assertAlmostEqual(0, norm(Fu.mean()-FuC.mean()), delta=1e-13, msg=msg) 49 | self.assertAlmostEqual(0, (Fu==Fu2)[1], delta=1e-13, msg=msg) 50 | 51 | # scalar problem 52 | u=Tensor(name='u', shape=(1,), N=N, Fourier=False, 53 | fft_form=fft_form).randomize() 54 | u.val-=np.mean(u.val) 55 | Fu=F(u) 56 | Fu2=potential(grad(Fu)) 57 | self.assertAlmostEqual(0, (Fu==Fu2)[1], delta=1e-13, 58 | msg='scalar problem, Fourier=True') 59 | 60 | u2=potential(grad(u)) 61 | self.assertAlmostEqual(0, (u==u2)[1], delta=1e-13, 62 | msg='scalar problem, Fourier=False') 63 | 64 | hG, hD=grad_div_tensor(N, fft_form=fft_form) 65 | self.assertAlmostEqual(0, (hD(hG(Fu))==div(grad(Fu)))[1], delta=1e-13, 66 | msg='scalar problem, Fourier=True') 67 | 68 | # vectorial problem 69 | u=Tensor(name='u', shape=(dim,), N=N, Fourier=False, fft_form=fft_form) 70 | u.randomize() 71 | u.add_mean(-u.mean()) 72 | 73 | Fu=F(u) 74 | Fu2=potential(grad(Fu)) 75 | self.assertAlmostEqual(0, (Fu==Fu2)[1], delta=1e-13, 76 | msg='vectorial problem, Fourier=True') 77 | 78 | u2=potential(grad(u)) 79 | self.assertAlmostEqual(0, (u==u2)[1], delta=1e-13, 80 | msg='vectorial problem, Fourier=False') 81 | 82 | # 'vectorial problem - symetric gradient 83 | Fu2=potential(symgrad(Fu), small_strain=True) 84 | self.assertAlmostEqual(0, (Fu==Fu2)[1], delta=1e-13, 85 | msg='vectorial - sym, Fourier=True') 86 | 87 | u2=potential(symgrad(u), small_strain=True) 88 | self.assertAlmostEqual(0, (u==u2)[1], delta=1e-13, 89 | msg='vectorial - sym, Fourier=False') 90 | 91 | # matrix version of DFT 92 | u=Tensor(name='u', shape=(1,), N=N, Fourier=False, 93 | fft_form='c').randomize() 94 | F=DFT(N=N, inverse=False, fft_form='c') 95 | Fu=F(u) 96 | dft=F.matrix(shape=u.shape) 97 | Fu2=dft.dot(u.val.ravel()) 98 | self.assertAlmostEqual(0, norm(Fu.val.ravel()-Fu2), delta=1e-13) 99 | 100 | print('...ok') 101 | 102 | def test_compatibility(self): 103 | print('\nChecking compatibility...') 104 | for dim, fft_form in itertools.product([3], fft_forms): 105 | N=5*np.ones(dim, dtype=np.int) 106 | F=DFT(inverse=False, N=N, fft_form=fft_form) 107 | iF=DFT(inverse=True, N=N, fft_form=fft_form) 108 | 109 | # scalar problem 110 | _, G1l, G2l=scalar(N, Y=np.ones(dim), fft_form=fft_form) 111 | P1=Operator(name='P1', mat=[[iF, G1l, F]]) 112 | P2=Operator(name='P2', mat=[[iF, G2l, F]]) 113 | u=Tensor(name='u', shape=(1,), N=N, Fourier=False, fft_form=fft_form) 114 | u.randomize() 115 | 116 | grad_u=grad(u) 117 | self.assertAlmostEqual(0, (P1(grad_u)-grad_u).norm(), delta=1e-13) 118 | self.assertAlmostEqual(0, P2(grad_u).norm(), delta=1e-13) 119 | 120 | e=P1(Tensor(name='u', shape=(dim,), N=N, 121 | Fourier=False, fft_form=fft_form).randomize()) 122 | e2=grad(potential(e)) 123 | self.assertAlmostEqual(0, (e-e2).norm(), delta=1e-13) 124 | 125 | # vectorial problem 126 | hG=elasticity_large_deformation(N=N, Y=np.ones(dim), fft_form=fft_form) 127 | P1=Operator(name='P', mat=[[iF, hG, F]]) 128 | u=Tensor(name='u', shape=(dim,), N=N, Fourier=False, fft_form=fft_form) 129 | u.randomize() 130 | grad_u=grad(u) 131 | val=(P1(grad_u)-grad_u).norm() 132 | self.assertAlmostEqual(0, val, delta=1e-13) 133 | 134 | e=Tensor(name='F', shape=(dim, dim), N=N, Fourier=False, fft_form=fft_form) 135 | e=P1(e.randomize()) 136 | e2=grad(potential(e)) 137 | self.assertAlmostEqual(0, (e-e2).norm(), delta=1e-13) 138 | 139 | # transpose 140 | P1TT=P1.transpose().transpose() 141 | self.assertTrue(P1(grad_u)==P1TT(grad_u)) 142 | 143 | self.assertTrue(hG==(hG.transpose_left().transpose_left())) 144 | self.assertTrue(hG==(hG.transpose_right().transpose_right())) 145 | 146 | # vectorial problem - symetric gradient 147 | hG=elasticity_small_strain(N=N, Y=np.ones(dim), fft_form=fft_form) 148 | P1=Operator(name='P', mat=[[iF, hG, F]]) 149 | u=Tensor(name='u', shape=(dim,), N=N, Fourier=False, fft_form=fft_form) 150 | u.randomize() 151 | grad_u=symgrad(u) 152 | val=(P1(grad_u)-grad_u).norm() 153 | self.assertAlmostEqual(0, val, delta=1e-13) 154 | 155 | e=Tensor(name='strain', shape=(dim, dim), N=N, 156 | Fourier=False, fft_form=fft_form) 157 | e=P1(e.randomize()) 158 | e2=symgrad(potential(e, small_strain=True)) 159 | self.assertAlmostEqual(0, (e-e2).norm(), delta=1e-13) 160 | 161 | # means 162 | Fu=F(u) 163 | E=np.random.random(u.shape) 164 | u.set_mean(E) 165 | self.assertAlmostEqual(0, norm(u.mean()-E), delta=1e-13) 166 | Fu.set_mean(E) 167 | self.assertAlmostEqual(0, norm(Fu.mean()-E), delta=1e-13) 168 | 169 | # __repr__ 170 | prt.disable() 171 | print(P1) 172 | print(u) 173 | prt.enable() 174 | self.assertAlmostEqual(0, (P1==P1.transpose()), delta=1e-13) 175 | print('...ok') 176 | 177 | def test_projections(self): 178 | print('\nChecking projections...') 179 | 180 | for dim, fft_form in itertools.product([2, 3], fft_forms): 181 | N=dim*(5,) 182 | Y=np.ones(dim) 183 | 184 | # scalar projections 185 | projections = proj.scalar(N, Y, tensor=True, fft_form=fft_form) 186 | 187 | for P,Q in itertools.product(projections, repeat=2): 188 | if (P==Q)[0]: 189 | self.assertAlmostEqual(0, (P*P-P).norm(), delta=1e-13) # idempotent 190 | else: 191 | self.assertAlmostEqual(0, (P*Q).norm(), delta=1e-13) # orthgonality 192 | 193 | # linear elasticity projections 194 | projections = proj.elasticity(N, Y, tensor=True, fft_form=fft_form) 195 | 196 | # checking idempotent property 197 | for P,Q in itertools.product(projections, repeat=2): 198 | if (P==Q)[0]: 199 | self.assertAlmostEqual(0, (P*P-P).norm(), delta=1e-13) # idempotent 200 | else: 201 | self.assertAlmostEqual(0, (P*Q).norm(), delta=1e-13) # orthgonality 202 | 203 | print('...ok') 204 | 205 | if __name__=="__main__": 206 | unittest.main() 207 | -------------------------------------------------------------------------------- /doc/users_guide.rst: -------------------------------------------------------------------------------- 1 | Users guide 2 | =========== 3 | 4 | Installation 5 | ------------ 6 | 7 | There is no special installation required. It can be downloaded from https://github.com/vondrejc/FFTHomPy.git or using Git by:: 8 | 9 | git clone https://github.com/vondrejc/FFTHomPy.git 10 | 11 | The code is implemented in `python `_ and supports versions 2 and 3. 12 | 13 | The software also requires the following numerical libraries: 14 | * `numpy `_ 15 | * `scipy `_ 16 | 17 | 18 | Running the program 19 | ------------------- 20 | 21 | Command line usage:: 22 | 23 | $ python main.py examples/scalar/scalar_2d.py 24 | 25 | or only as:: 26 | 27 | $ ./main.py examples/scalar/scalar_2d.py 28 | 29 | where ``examples/scalar/scalar_2d.py`` is an input file for *FFTHomPY*. 30 | 31 | Definition of input file 32 | ------------------------ 33 | 34 | Input file for *FFTHomPy* consists of material definition and problem definition. 35 | 36 | Material definition 37 | ^^^^^^^^^^^^^^^^^^^ 38 | Material coefficients can be defined as matrix-inclusion composites or grid-based composites. 39 | 40 | .. _matrix-inclusions: 41 | 42 | Matrix-inclusions composites 43 | """""""""""""""""""""""""""" 44 | 45 | In this case, material is expressed at points :math:`\Vx` of periodic cell :math:`\puc=\prod_{i=1}^d (-\frac{Y_i}{2},\frac{Y_i}{2})` as 46 | 47 | .. math:: 48 | \TA(\Vx) &= \sum_{i=1}^n f\incl{i} (\Vx-\Vx\incl{i}) \TA\incl{i} 49 | 50 | where functions :math:`f\incl{i}` describe inclusion topologies located at :math:`\Vx\incl{i}` with material coefficients :math:`\TA\incl{i}\in\xRdd`. 51 | An example of material coefficients is named ``'square'`` 52 | :: 53 | 54 | import numpy as np 55 | 56 | materials = {'square': {'Y': np.ones(dim), 57 | 'inclusions': ['square', 'otherwise'], 58 | 'positions': [np.zeros(dim), ''], 59 | 'params': [0.6*np.ones(dim), ''], 60 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)]}} 61 | 62 | and the used keywords have following meanings: 63 | 64 | - ``'Y'``: ``numpy.array`` of shape ``(dim,)`` describes the size of periodic cell :math:`\puc` in dimension ``dim`` 65 | - ``'inclusions'``: list of inclusions :math:`f\incl{i}` of following types 66 | * ``'square'``, ``'circle'``, and ``'otherwise'`` in two-dimensional settings 67 | * ``'cube'``, ``'ball'``, and ``'otherwise'`` in two-dimensional settings 68 | - ``'positions'``: list of positions :math:`\Vx\incl{i}` corresponding to individual inclusions 69 | * the position corresponds to center of gravity with respect to coordinate system; the inclusion ``'otherwise'`` has no position because it represents the area in periodic cell omitted by inclusions 70 | - ``'params'``: list of parameters determining the inclusions 71 | * for ``'square'`` and ``'cube'``, it corresponds to sizes of individual sides 72 | * for ``'circle'`` and ``'ball'``, it corresponds to diameter 73 | - ``'vals'``: list of material coefficients for individual inclusions; coefficients are represented as ``numpy.array`` of shape corresponding to physical problem according to problem definition; for scalar elliptic problem, the shape is ``(dim, dim)`` while for linearized elasticity the shape is ``(D, D)`` where ``D = dim*(dim+1)/2``. 74 | 75 | 76 | Grid-based composites 77 | """"""""""""""""""""" 78 | Contrary to :ref:`matrix-inclusions`, grid-based composites are defined on grid points: 79 | 80 | .. math:: 81 | \xPk 82 | = 83 | \sum_{\alp} \frac{Y_{\alp}k_{\alp}}{P_{\alp}} 84 | \cb{\alp}\quad 85 | \text{for } 86 | \Vk \in \ZPd = 87 | \biggl\{ \Vk \in \set{Z}^d : 88 | -\frac{P_\alpha}{2} \leq k_\alpha < \frac{P_\alpha}{2} \biggr\} 89 | 90 | for some number of points :math:`\VP\in\xNd` and the size :math:`\VY\in\xRd` of periodic cell :math:`\puc=\prod_{i=1}^d (-\frac{Y_i}{2},\frac{Y_i}{2})\subset\xRd`; examples for odd and even grids are depicted in following figure 91 | 92 | .. image:: figures/fig_grid.png 93 | :scale: 100 % 94 | 95 | for periodic cell :math:`\puc=\prod_{i=1}^d (-1,1)` with the cell size :math:`\VY=(2,2)`. 96 | 97 | The material is then approximated with the following formula 98 | 99 | .. math:: 100 | \TA(\Vx) &= 101 | \sum_{\Vk\in \set{Z}^d_{\VP}} \psi(\Vx-\Vx^{\Vk}_{\VP}) \TA(\Vx_{\VP}^{\Vk}) 102 | \quad\text{for }\VP\in\xNd \text{ and } \Vx\in\puc 103 | :label: grid-based_composite 104 | 105 | where function :math:`\psi:\puc\rightarrow\xRd` is taken either by 106 | 107 | .. math:: 108 | \rect_{\Vh}(\Vx) &= 109 | \begin{cases} 110 | 1 111 | & 112 | \text{if } 113 | |x_\alp| < \frac{h_\alp}{2}\text{ for all }\alp 114 | \\ 115 | 0 116 | & 117 | \text{otherwise} 118 | \end{cases} 119 | \quad\text{for }\Vh=\left(\frac{Y_\alp}{P_\alp}\right)_{\alp=1}^d 120 | :label: constant_approx 121 | 122 | leading to piece-wise constant approximation of material coefficients, or by 123 | 124 | .. math:: 125 | \tri_{\Vh}(\Vx) &= \prod_{\alp}\max\{1-|\frac{x_\alp}{h_\alp}|,0\} 126 | \quad\text{for }\Vh=\left(\frac{Y_\alp}{P_\alp}\right)_{\alp=1}^d 127 | :label: bilinear_approx 128 | 129 | leading to piece-wise bilinear approximation of material coefficients. 130 | 131 | In comparison to :ref:`matrix-inclusions`, the material coefficients definition 132 | :: 133 | 134 | materials.update({'square_Ga': {'Y': np.ones(dim), 135 | 'inclusions': ['square', 'otherwise'], 136 | 'positions': [np.zeros(dim), ''], 137 | 'params': [0.6*np.ones(dim), ''], 138 | 'vals': [11*np.eye(dim), 1.*np.eye(dim)], 139 | 'order': 0, 140 | 'P': 5*np.array(dim)}}) 141 | 142 | contains two additional parameters: 143 | - ``'P'``: ``numpy.array`` of shape ``(dim,)`` describes the resolution of approximation in :eq:`grid-based_composite` 144 | - ``'order'``: define approximation order: 145 | * ``0``: constant approximation according to :eq:`constant_approx` 146 | * ``1``: bilinear approximation according to :eq:`bilinear_approx`. 147 | 148 | Problem definition 149 | ^^^^^^^^^^^^^^^^^^ 150 | Here, the example of problem description is stated: 151 | :: 152 | 153 | problems = [{'name': 'prob1', 154 | 'physics': 'scalar', 155 | 'material': 'square', 156 | 'solve': {'kind': 'GaNi', 157 | 'N': N, 158 | 'primaldual': ['primal', 'dual']}, 159 | 'postprocess': [{'kind': 'GaNi'}, 160 | {'kind': 'Ga', 161 | 'order': None}, 162 | {'kind': 'Ga', 163 | 'order': 0, 164 | 'P': N}, 165 | {'kind': 'Ga', 166 | 'order': 1, 167 | 'P': 27*N}], 168 | 'solver': {'kind': 'CG', 169 | 'tol': 1e-6, 170 | 'maxiter': 1e3}}] 171 | 172 | The individual keywords are explained: 173 | - ``'name'``: the name of a problem 174 | - ``'physics'``: defines the physical problem that is solved; following alternatives are implemented: 175 | * ``'scalar'``: scalar linear elliptic problem (diffusion, stationary heat transfer, or electric conductivity) 176 | * ``'elasticity'``: linearized elasticity (small strain) 177 | - ``'material'``: keyword refering to dictionary ``materials`` or directly dictionary defining the material coefficients 178 | - ``'solve'``: defines the problem discretization, the way how to solve minizers (corrector functions) 179 | * ``'kind'``: is either ``'Ga'`` (Galerkin approximation) or ``'GaNi'`` (Galerkin approximation with numerical integration); it thus corresponds to the discretizaiton way 180 | * ``'N'``: is a ``numpy.array`` defining the approximation order of trigonometric polynomials; the higher the value is, the better approximation is provided 181 | * ``'primaldual'``: determine if primal, dual, or both formulations are calculated 182 | - ``'solver'``: defines the linear solver and relating parameters 183 | * ``'kind'``: linear solver one of ``'CG'`` for Conjugate gradients, ``'BiCG'`` for Biconjugate gradients, ``'richardson'`` for Richardson's iterative solution, ``'scipy_cg'`` for ``scipy.sparse.linalg.cg``, and ``'scipy_bicg'`` for ``scipy.sparse.linalg.bicg``, 184 | * ``'tol'``: the required tolerance (float) for the convergence of linear solver 185 | * ``'maxit'``: the maximal number of iterations 186 | - ``'postprocess'``: defines the way for calculating homogenized material coefficients from minimizers that are obtained with a way defined in ``'solver'`` 187 | * ``'kind'``: is either ``'Ga'`` (Galerkin approximation) or ``'GaNi'`` (Galerkin approximation with numerical integration); it thus corresponds to the discretizaiton way 188 | * ``'order'``: applicable only for ``'Ga'``, it defines approximation order according to :eq:`constant_approx` or :eq:`bilinear_approx` 189 | * ``'P'``: applicable only for ``'Ga'``, this ``numpy.array`` of shape ``(dim,)`` describes the resolution of approximation in :eq:`grid-based_composite` 190 | 191 | -------------------------------------------------------------------------------- /ffthompy/general/base.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import time 5 | from copy import copy, deepcopy 6 | import collections 7 | 8 | 9 | def get_base_dir(): 10 | module_dir = os.path.dirname(os.path.abspath(__file__)) 11 | base_dir = os.path.normpath(os.path.join(module_dir, '../..')) 12 | return base_dir 13 | 14 | def run_file(filen=''): 15 | base_dir = get_base_dir() 16 | main = base_dir + '/main.py' 17 | sys.argv = [main, filen] 18 | exec(compile(open(main).read(), main, 'exec')) 19 | 20 | def print_dict(d): 21 | print('-- print dictionary -------------------') 22 | for key, vals in list(d.items()): 23 | print((key + ' =', vals)) 24 | 25 | def end(): 26 | print('end') 27 | sys.exit() 28 | 29 | class Representation(): 30 | def _repr(self, keys, skip=4*' '): 31 | ss="Class : {0} \n".format(self.__class__.__name__) 32 | nstr=np.array([key.__len__() for key in keys]).max() 33 | 34 | for key in keys: 35 | attr=getattr(self, key) 36 | if isinstance(attr, collections.Callable): 37 | ss+='{0}{1}{3} = {2}\n'.format(skip, key, str(attr()), (nstr-key.__len__())*' ') 38 | else: 39 | ss+='{0}{1}{3} = {2}\n'.format(skip, key, str(attr), (nstr-key.__len__())*' ') 40 | return ss 41 | 42 | class PrintControl(): 43 | flag=True 44 | 45 | def __init__(self, flag=True): 46 | self.flag=True 47 | 48 | def activate(self): 49 | self.flag=True 50 | 51 | def deactivate(self): 52 | self.flag=False 53 | 54 | def disable(self): 55 | if self.flag: 56 | sys.stdout = open(os.devnull, 'w') 57 | 58 | def enable(self): 59 | if self.flag: 60 | sys.stdout.close() 61 | sys.stdout = sys.__stdout__ 62 | 63 | class Timer(): 64 | def __init__(self, name='time', start=True): 65 | self.name = name 66 | if start: 67 | self.start() 68 | 69 | def start(self): 70 | self.vals = [] 71 | self.ttin = [time.process_time(), time.clock(), time.time()] 72 | 73 | def measure(self, print_time=True): 74 | self.vals.append([time.process_time()-self.ttin[0], 75 | time.clock()-self.ttin[1], 76 | time.time()-self.ttin[2]]) 77 | if print_time: 78 | print(self) 79 | 80 | def __repr__(self): 81 | return 'time (%s): %s' % (self.name, str(self.vals)) 82 | 83 | 84 | class Struct(object): 85 | def __init__(self, **kwargs): 86 | if kwargs: 87 | self.__dict__.update(kwargs) 88 | 89 | def _format_sequence(self, seq, threshold): 90 | threshold_half = threshold / 2 91 | 92 | if len(seq) > threshold: 93 | out = ', '.join(str(ii) for ii in seq[:threshold_half]) \ 94 | + ', ..., ' \ 95 | + ', '.join(str(ii) for ii in seq[-threshold_half:]) 96 | 97 | else: 98 | out = str(seq) 99 | 100 | return out 101 | 102 | def __str__(self): 103 | """Print instance class, name and items in alphabetical order. 104 | 105 | If the class instance has '_str_attrs' attribute, only the attributes 106 | listed there are taken into account. Other attributes are provided only 107 | as a list of attribute names (no values). 108 | 109 | For attributes that are Struct instances, if 110 | the listed attribute name ends with '.', the attribute is printed fully 111 | by calling str(). Otherwise only its class name/name are printed. 112 | 113 | Attributes that are NumPy arrays or SciPy sparse matrices are 114 | printed in a brief form. 115 | 116 | Only keys of dict attributes are printed. For the dict keys as 117 | well as list or tuple attributes only several edge items are 118 | printed if their length is greater than the threshold value 20. 119 | """ 120 | return self._str() 121 | 122 | def _str(self, keys=None, threshold=20): 123 | ss = '%s' % self.__class__.__name__ 124 | if hasattr(self, 'name'): 125 | ss += ':%s' % self.name 126 | ss += '\n' 127 | 128 | if keys is None: 129 | keys = list(self.__dict__.keys()) 130 | 131 | str_attrs = sorted(Struct.get(self, '_str_attrs', keys)) 132 | printed_keys = [] 133 | for key in str_attrs: 134 | if key[-1] == '.': 135 | key = key[:-1] 136 | full_print = True 137 | else: 138 | full_print = False 139 | 140 | printed_keys.append(key) 141 | 142 | try: 143 | val = getattr(self, key) 144 | 145 | except AttributeError: 146 | continue 147 | 148 | if isinstance(val, Struct): 149 | if not full_print: 150 | ss += ' %s:\n %s' % (key, val.__class__.__name__) 151 | if hasattr(val, 'name'): 152 | ss += ':%s' % val.name 153 | ss += '\n' 154 | 155 | else: 156 | aux = '\n' + str(val) 157 | aux = aux.replace('\n', '\n ') 158 | ss += ' %s:\n%s\n' % (key, aux[1:]) 159 | 160 | elif isinstance(val, dict): 161 | sval = self._format_sequence(list(val.keys()), threshold) 162 | sval = sval.replace('\n', '\n ') 163 | ss += ' %s:\n dict with keys: %s\n' % (key, sval) 164 | 165 | elif isinstance(val, list): 166 | sval = self._format_sequence(val, threshold) 167 | sval = sval.replace('\n', '\n ') 168 | ss += ' %s:\n list: %s\n' % (key, sval) 169 | 170 | elif isinstance(val, tuple): 171 | sval = self._format_sequence(val, threshold) 172 | sval = sval.replace('\n', '\n ') 173 | ss += ' %s:\n tuple: %s\n' % (key, sval) 174 | 175 | elif isinstance(val, np.ndarray): 176 | ss += ' %s:\n %s array of %s\n' \ 177 | % (key, val.shape, val.dtype) 178 | 179 | else: 180 | aux = '\n' + str(val) 181 | aux = aux.replace('\n', '\n ') 182 | ss += ' %s:\n%s\n' % (key, aux[1:]) 183 | 184 | other_keys = sorted(set(keys).difference(set(printed_keys))) 185 | if len(other_keys): 186 | ss += ' other attributes:\n %s\n' \ 187 | % '\n '.join(key for key in other_keys) 188 | 189 | return ss.rstrip() 190 | 191 | def __repr__(self): 192 | ss = "%s" % self.__class__.__name__ 193 | if hasattr(self, 'name'): 194 | ss += ":%s" % self.name 195 | return ss 196 | 197 | def __add__(self, other): 198 | new = copy(self) 199 | for key, val in list(other.__dict__.items()): 200 | if hasattr(new, key): 201 | sval = getattr(self, key) 202 | if issubclass(sval.__class__, Struct) and \ 203 | issubclass(val.__class__, Struct): 204 | setattr(new, key, sval + val) 205 | else: 206 | setattr(new, key, val) 207 | else: 208 | setattr(new, key, val) 209 | return new 210 | 211 | def str_class(self): 212 | return self._str(list(self.__class__.__dict__.keys())) 213 | 214 | def str_all(self): 215 | ss = "%s\n" % self.__class__ 216 | for key, val in list(self.__dict__.items()): 217 | if issubclass(self.__dict__[key].__class__, Struct): 218 | ss += " %s:\n" % key 219 | aux = "\n" + self.__dict__[key].str_all() 220 | aux = aux.replace("\n", "\n ") 221 | ss += aux[1:] + "\n" 222 | else: 223 | aux = "\n" + str(val) 224 | aux = aux.replace("\n", "\n ") 225 | ss += " %s:\n%s\n" % (key, aux[1:]) 226 | return(ss.rstrip()) 227 | 228 | def to_dict(self): 229 | return copy(self.__dict__) 230 | 231 | def get(self, key, default=None, msg_if_none=None): 232 | out = getattr(self, key, default) 233 | 234 | if (out is None) and (msg_if_none is not None): 235 | raise ValueError(msg_if_none) 236 | 237 | return out 238 | 239 | def keys(self): 240 | return list(self.__dict__.keys()) 241 | 242 | def values(self): 243 | return list(self.__dict__.values()) 244 | 245 | def update(self, other, **kwargs): 246 | if other is None: return 247 | 248 | if not isinstance(other, dict): 249 | other = other.to_dict() 250 | self.__dict__.update(other, **kwargs) 251 | 252 | def set_default(self, key, default=None): 253 | return self.__dict__.setdefault(key, default) 254 | 255 | def copy(self, deep=False, name=None): 256 | if deep: 257 | other = deepcopy(self) 258 | else: 259 | other = copy(self) 260 | 261 | if hasattr(self, 'name') and name is not None: 262 | other.name = self.name + '_copy' 263 | 264 | return other 265 | 266 | def to_array(self): 267 | log = deepcopy(self) 268 | for key, val in list(log.__dict__.items()): 269 | try: 270 | log.__dict__.update({key: np.array(val)}) 271 | except: 272 | pass 273 | return log 274 | 275 | if __name__ == '__main__': 276 | run_file() 277 | -------------------------------------------------------------------------------- /examples/lowRankTensorApproximations/setting.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ffthompy import Struct, Timer 3 | from ffthompy.materials import Material 4 | from ffthompy.tensorsLowRank.materials import LowRankMaterial 5 | 6 | try: 7 | from uq.decomposition import KL_Fourier 8 | except: 9 | import warnings 10 | warnings.warn('Package StoPy (https://github.com/vondrejc/StoPy) is not available; required for Karhunen-Loeve decomposition.') 11 | 12 | 13 | kind_list=['cano','tucker','tt'] 14 | 15 | def get_default_parameters(dim, N, material, kind): 16 | pars=Struct(dim=dim, # number of dimensions (works for 2D and 3D) 17 | N=dim*(N,), # number of voxels (assumed equal for all directions) 18 | Y=np.ones(dim), # size of periodic cell 19 | recover_sparse=1, # recalculate full material coefficients from tensorsLowRank one 20 | solver=dict(tol=1e-8, 21 | maxiter=50), 22 | ) 23 | pars_sparse=pars.copy() 24 | pars_sparse.update(Struct(debug=False, 25 | kind=kind_list[kind], # type of tensorsLowRank tensor: 'cano', 'tucker', or 'tt' 26 | precond_rank=10, 27 | N=dim*(N,), 28 | rhs_tol=1e-8, 29 | solver=dict(method='mr', # method could be 'Richardson'(r),'minimal_residual'(mr), or 'Chebyshev'(c) 30 | rank=10, # rank of solution vector 31 | tol_truncate=None, 32 | tol=1e-8, 33 | maxiter=30, # no. of iterations for a solver 34 | minres_fails=6, # stop the solver when the residuum fails to decrease n times 35 | fast=True, # fast truncation 36 | approx_omega=False, # inner product of tuckers could be so slow 37 | # that using an approximate omega could gain. 38 | ), 39 | )) 40 | return pars, pars_sparse 41 | 42 | def get_material_coef(material, pars, pars_sparse, ga=True): 43 | # get configuration settings 44 | pars, pars_sparse, mat_conf = getMat_conf(material, pars, pars_sparse) 45 | 46 | # generating material coefficients 47 | mat=Material(mat_conf) 48 | mats=LowRankMaterial(mat_conf, pars_sparse.kind) 49 | 50 | Agani=mat.get_A_GaNi(pars.N, primaldual='primal') 51 | Aganis=mats.get_A_GaNi(pars_sparse.N, primaldual='primal', k=pars_sparse.matrank) 52 | Agani.val=recover_Agani(Agani, Aganis) 53 | 54 | if ga: 55 | Aga=mat.get_A_Ga(pars.Nbar(pars.N), primaldual='primal') 56 | Agas=mats.get_A_Ga(pars_sparse.Nbar(pars_sparse.N), primaldual='primal', k=pars_sparse.matrank) 57 | Aga.val=recover_Aga(Aga, Agas) 58 | else: 59 | Aga=None 60 | Agas=None 61 | 62 | if 'Aniso' in mat_conf: # workaround for anisotropic material 63 | Aniso=mat_conf['Aniso'] 64 | Agani.add_mean(Aniso) 65 | 66 | if ga: 67 | Aga.add_mean(Aniso) 68 | 69 | pars_sparse.update(Struct(Aniso=Aniso)) 70 | 71 | tic=Timer('calc_eig') 72 | eigs=Agani.calc_eigs(symmetric=True) 73 | tic.measure() 74 | 75 | pars_sparse.solver['alpha']=0.5*(eigs.min()+eigs.max()) 76 | else: 77 | pars_sparse.solver['alpha']=0.5*(Agani[0, 0].min()+Agani[0, 0].max()) 78 | 79 | return Aga, Agani, Agas, Aganis 80 | 81 | def getMat_conf(material, pars, pars_sparse): 82 | 83 | dim=pars.dim 84 | N=pars.N[0] 85 | 86 | if dim==2: 87 | pars_sparse.update(Struct(N=dim*(1*N,),)) 88 | elif dim==3: 89 | pars_sparse.update(Struct(N=dim*(1*N,),)) 90 | 91 | # ## auxiliary operator 92 | Nbar=lambda N: 2*np.array(N)-1 93 | pars.update(Struct(Nbar=lambda N: 2*np.array(N)-1)) 94 | pars_sparse.update(Struct(Nbar=lambda N: 2*np.array(N)-1)) 95 | 96 | # PROBLEM DEFINITION ###################################################### 97 | if material in [0,3]: # square inclusion 98 | mat_conf={'inclusions': ['square', 'otherwise'], 99 | 'positions': [0. * np.ones(dim), ''], 100 | 'params': [0.6*np.ones(dim), ''], # size of sides 101 | 'vals': [10*np.eye(dim), 1. * np.eye(dim)], 102 | 'Y': np.ones(dim), 103 | 'P': dim*(5,), 104 | 'order': 0, } 105 | pars_sparse.update(Struct(matrank=2)) 106 | 107 | elif material in [1]: # pyramid-like inclusion 108 | mat_conf={'inclusions': ['pyramid', 'all'], 109 | 'positions': [0. * np.ones(dim), ''], 110 | 'params': [0.8*np.ones(dim), ''], # size of sides 111 | 'vals': [10*np.eye(dim), 1. * np.eye(dim)], 112 | 'Y': np.ones(dim), 113 | 'P': pars.N, 114 | 'order': 1, } 115 | pars_sparse.update(Struct(matrank=2)) 116 | 117 | elif material in [2,4]: # stochastic material 118 | pars_sparse.update(Struct(matrank=10)) 119 | 120 | kl=KL_Fourier(covfun=2, cov_pars={'rho': 0.15, 'sigma': 1.}, N=pars.N, puc_size=pars.Y, 121 | transform=lambda x: np.exp(x)) 122 | if dim==2: 123 | kl.calc_modes(relerr=0.1) 124 | elif dim==3: 125 | kl.calc_modes(relerr=0.4) 126 | ip=np.random.random(kl.modes.n_kl)-0.5 127 | np.set_printoptions(precision=8) 128 | print('ip={}\n'.format(ip.__repr__())) 129 | if dim==2: 130 | ip=np.array( 131 | [0.24995, 0.009014,-0.004228, 0.266437, 0.345009,-0.29721,-0.291875,-0.125469, 132 | 0.495526, 133 | -0.452405,-0.333025, 0.208331, 0.045902,-0.441424,-0.274428,-0.243702,-0.146728, 134 | 0.239476, 135 | 0.404311, 0.214929]) 136 | if dim==3: 137 | ip=np.array( 138 | [-0.39561222,-0.37849801, 0.46069148,-0.0354164, 0.04269214,-0.00624889, 0.18498634, 139 | 0.31043535,-0.14730729,-0.39756328, 0.48918557, 0.15098372,-0.11217825,-0.26506403, 140 | 0.2006125,-0.2596631,-0.16854476,-0.44617782,-0.19412459, 0.32968464,-0.18441118, 141 | -0.15455307, 0.1779399,-0.21214177, 0.18394519,-0.24561992]) 142 | 143 | def mat_fun(coor, contrast=10): 144 | val=np.zeros_like(coor[0]) 145 | for ii in range(kl.modes.n_kl): 146 | val+=ip[ii]*kl.mode_fun(ii, coor) 147 | val=(val-val.min())/(val.max()-val.min())*np.log(contrast) 148 | return np.einsum('ij,...->ij...', np.eye(dim), kl.transform(val)) 149 | 150 | mat_conf={'fun': mat_fun, 151 | 'Y': np.ones(dim), 152 | 'P': pars.N, 153 | 'order': 1, } 154 | 155 | else: 156 | raise ValueError() 157 | 158 | if material in [3,4]: # adding anisotropic material 159 | if dim==2: 160 | Aniso=np.array([[ 5.5,-4.5], 161 | [-4.5, 5.5]]) 162 | elif dim==3: 163 | Aniso=np.array([[ 4.25, -3.25, -1.76776695], 164 | [-3.25, 4.25, 1.76776695], 165 | [-1.76776695, 1.76776695, 7.5 ]]) 166 | 167 | mat_conf.update({'Aniso': Aniso}) 168 | 169 | return pars, pars_sparse, mat_conf 170 | 171 | 172 | def recover_Aga(Aga, Agas): 173 | 174 | print('recovering full material tensors for Ga...') 175 | Aga.val=np.einsum('ij,...->ij...', np.eye(Aga.dim), Agas.full().val) 176 | 177 | print('Norm of difference in mat properties: {}'.format(np.linalg.norm(Aga.val[0, 0]-Agas.full().val))) 178 | return Aga.val 179 | 180 | 181 | def recover_Agani(Agani, Aganis): 182 | 183 | print('recovering full material tensors for GaNi...') 184 | Agani.val=np.einsum('ij,...->ij...', np.eye(Agani.dim), Aganis.full().val) 185 | 186 | print('Norm of difference in mat properties: {}'.format(np.linalg.norm(Agani.val[0, 0]-Aganis.full().val))) 187 | return Agani.val 188 | 189 | 190 | def getGaData(mat, mats, pars, pars_sparse): 191 | 192 | Aga=mat.get_A_Ga(pars.Nbar(pars.N), primaldual='primal') 193 | Agas=mats.get_A_Ga(pars_sparse.Nbar(pars_sparse.N), primaldual='primal', k=pars_sparse.matrank).set_fft_form() 194 | 195 | # 196 | if np.array_equal(pars.N, pars_sparse.N): 197 | print(np.linalg.norm(Aga.val[0, 0]-Agas.full().val)) 198 | 199 | if pars.recover_sparse: 200 | print('recovering full material tensors...') 201 | Aga.val=np.einsum('ij,...->ij...', np.eye(pars.dim), Agas.full().val) 202 | 203 | if np.array_equal(pars.N, pars_sparse.N): 204 | print(np.linalg.norm(Aga.val[0, 0]-Agas.full().val)) 205 | 206 | return Aga, Agas 207 | 208 | 209 | def getGaNiData(mat, mats, pars, pars_sparse): 210 | Agani=mat.get_A_GaNi(pars.N, primaldual='primal') 211 | Aganis=mats.get_A_GaNi(pars_sparse.N, primaldual='primal', k=pars_sparse.matrank) 212 | 213 | if np.array_equal(pars.N, pars_sparse.N): 214 | print(np.linalg.norm(Agani.val[0, 0]-Aganis.full().val)) 215 | 216 | if pars.recover_sparse: 217 | print('recovering full material tensors...') 218 | Agani.val=np.einsum('ij,...->ij...', np.eye(pars.dim), Aganis.full().val) 219 | 220 | if np.array_equal(pars.N, pars_sparse.N): 221 | print(np.linalg.norm(Agani.val[0, 0]-Aganis.full().val)) 222 | 223 | return Agani, Aganis 224 | --------------------------------------------------------------------------------