├── .gitignore ├── 1D_karhunen_loeve_identification_example.py ├── 1D_karhunen_loeve_simulation_example.py ├── 2D_karhunen_loeve_identification_example.py ├── 2D_karhunen_loeve_simulation_example.py ├── README.md ├── randomfields ├── __init__.py ├── graphs.py ├── simulation.py └── utils.py └── setup.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.txt 3 | *.png 4 | -------------------------------------------------------------------------------- /1D_karhunen_loeve_identification_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf8 -*- 3 | 4 | """ 5 | ================================================================== 6 | Identification of a 1D random field using Karhunen-Loeve expansion 7 | ================================================================== 8 | 9 | This scripts shows how to identify the parameters characterizing a random 10 | field H(x) from a set of sample paths using the Karhunen-Loeve expansion 11 | method. 12 | 13 | The set of sample paths is loaded from an ascii file assuming the first line 14 | contains the indexing variable values of the 1D random field and the subsequent 15 | lines contain the corresponding sample paths values. 16 | These assumptions hold for this script only, note that it is possible to use 17 | the KarhunenLoeveExpansion in other contexts such as different unstructured 18 | grid formats for each sample path by first building a callable version of each 19 | sample paths using ANY suitable interpolation technique (interp1d is used 20 | here). 21 | 22 | From `randomfields`, it uses `KarhunenLoeveExpansion` for: 23 | 24 | - solving the spectral decomposition problem of the estimated covariance 25 | function of the field H. 26 | 27 | - calculating the coefficients of the Karhunen-Loeve expansion truncated 28 | to a given order depending on the decrease of the formerly computed 29 | eigenvalues. These coefficients are uncorrelated, zero mean and unit 30 | variance random variables. 31 | 32 | Their joint distribution is then estimated (here, using kernel smoothing) and 33 | new sample paths are eventually generated using the discretized random field 34 | computed by the instanciated KarhunenLoeveExpansion. 35 | 36 | The identification procedure consists in the following steps: 37 | 38 | 1. Modelling of the sample paths by some interpolation technique. 39 | 40 | 2. Estimation of the mean and covariance functions using the usual statistical 41 | estimators on the interpolated sample paths. 42 | 43 | 3. Resolution of the integral eigenvalue problem of the covariance function 44 | (i.e. discretization of the random field into a Karhunen-Loeve expansion). 45 | 46 | 4. Computation of the coefficients of the Karhunen-Loeve expansion by 47 | functional projection using Gauss-Legendre quadrature. 48 | 49 | 5. Identification of the joint distribution of these coefficients. 50 | Here, this resorts to a kernel smoothing non-parametric technique for the 51 | marginal distributions and a Gaussian copula is assumed for illustration. 52 | 53 | 6. Generation of new sample paths. 54 | """ 55 | 56 | # Author: Marine Marcilhac 57 | # Vincent Dubourg 58 | # License: BSD 59 | 60 | import numpy as np 61 | import pylab as pl 62 | from scipy.interpolate import interp1d 63 | import openturns as ot 64 | from randomfields import KarhunenLoeveExpansion, \ 65 | matrix_plot 66 | 67 | # Name of the input file that contains the indexing variable and sample paths 68 | # values 69 | input_file_name = '1D_sample_paths.txt' 70 | 71 | # Parameters (see KarhunenLoeveExpansion's docstring) 72 | verbose = True 73 | truncation_order = 10 74 | galerkin_scheme = 'legendre' 75 | legendre_galerkin_order = 20 76 | legendre_quadrature_order = 41 77 | 78 | # Load input data from txt file 79 | x = np.loadtxt(input_file_name)[0] 80 | sample_paths_values = np.loadtxt(input_file_name)[1:] 81 | n_sample_paths, res = sample_paths_values.shape 82 | 83 | # Rectangular domain definition 84 | lower_bound = np.array([0.] * 1) 85 | upper_bound = np.array([10.] * 1) 86 | 87 | # Interpolation of the sample paths 88 | sample_paths = [interp1d(x, sample_paths_values[i], kind='linear') 89 | for i in xrange(n_sample_paths)] 90 | 91 | 92 | def estimated_mean(x): 93 | y = np.vstack([sample_paths[i](x) for i in xrange(n_sample_paths)]) 94 | return np.mean(y, axis=0) 95 | 96 | 97 | def estimated_covariance(xx): 98 | xx = np.atleast_2d(xx) 99 | y1 = np.vstack([sample_paths[i](xx[:, 0]) 100 | for i in xrange(n_sample_paths)]) 101 | y2 = np.vstack([sample_paths[i](xx[:, 1]) 102 | for i in xrange(n_sample_paths)]) 103 | cov = np.sum((y1 - y1.mean(axis=0)) * (y2 - y2.mean(axis=0)), 104 | axis=0) / (n_sample_paths - 1.) 105 | return cov 106 | 107 | # Discretization of the random field using Karhunen-Loeve expansion 108 | # from its estimated theoretical moments 109 | estimated_random_field = KarhunenLoeveExpansion( 110 | estimated_mean, 111 | estimated_covariance, 112 | truncation_order, 113 | [lower_bound, upper_bound], 114 | domain_expand_factor=1., 115 | verbose=verbose, 116 | galerkin_scheme=galerkin_scheme, 117 | legendre_galerkin_order=legendre_galerkin_order, 118 | legendre_quadrature_order=legendre_quadrature_order) 119 | truncation_order = estimated_random_field._truncation_order 120 | 121 | # Plot eigenvalues and eigenfunctions 122 | pl.figure() 123 | pl.title('Eigensolutions') 124 | for i in xrange(truncation_order): 125 | pl.plot(x, estimated_random_field._eigenfunctions[i](x), 126 | label='$\lambda_{%d} = %.2f$' % (i, 127 | estimated_random_field._eigenvalues[i])) 128 | pl.xlabel('$x$') 129 | pl.ylabel('$\\varphi_i(x)$') 130 | pl.grid() 131 | pl.legend(loc='lower center', ncol=3) 132 | pl.savefig('1D_identification_eigensolutions.png') 133 | pl.close() 134 | 135 | # Calculation of the KL coefficients by functional projection using 136 | # Gauss-Legendre quadrature 137 | xi = estimated_random_field.compute_coefficients(sample_paths) 138 | 139 | # Statistical inference of the KL coefficients' distribution 140 | kernel_smoothing = ot.KernelSmoothing(ot.Normal()) 141 | xi_marginal_distributions = ot.DistributionCollection( 142 | [kernel_smoothing.build(xi[:, i][:, np.newaxis]) 143 | for i in xrange(truncation_order)]) 144 | try: 145 | xi_copula = ot.NormalCopulaFactory().build(xi) 146 | except RuntimeError: 147 | print('ERR: The normal copula correlation matrix built from the given\n' 148 | + 'Spearman correlation matrix is not definite positive.\n' 149 | + 'This would require expert judgement on the correlation\n' 150 | + 'coefficients significance (using e.g. Spearman test).\n' 151 | + 'Assuming an independent copula in the sequel...') 152 | xi_copula = ot.IndependentCopula(truncation_order) 153 | xi_estimated_distribution = ot.ComposedDistribution(xi_marginal_distributions, 154 | xi_copula) 155 | 156 | # Matrix plot of the empirical KL coefficients & their estimated distribution 157 | matrix_plot(xi, ot_distribution=xi_estimated_distribution, 158 | labels=[('$\\xi_{%d}$' % i) for i in xrange(truncation_order)]) 159 | pl.suptitle('Karhunen-Loeve coefficients ' 160 | + '(observations and estimated distribution)') 161 | pl.savefig('1D_identification_KL_coefficients_joint_distribution.png') 162 | pl.close() 163 | 164 | # Plot the ten first observed sample paths reconstructed from the estimated 165 | # random field and an adequation plot with respect to the original observed 166 | # sample paths 167 | reconstructed_sample_paths_values = estimated_random_field(x[:, np.newaxis], 168 | xi[:10]) 169 | pl.figure() 170 | pl.title('Adequation: observation vs. model') 171 | for i in xrange(10): 172 | pl.plot(sample_paths_values[i], reconstructed_sample_paths_values[i], '.', 173 | label='$h^{(%d)}(x)$' % i) 174 | pl.plot([reconstructed_sample_paths_values.min(), 175 | reconstructed_sample_paths_values.max()], 176 | [reconstructed_sample_paths_values.min(), 177 | reconstructed_sample_paths_values.max()], 'k--', lw=1.5) 178 | for lim in [pl.xlim, pl.ylim]: 179 | lim(reconstructed_sample_paths_values.min(), 180 | reconstructed_sample_paths_values.max()) 181 | pl.axis('equal') 182 | pl.xlabel('$H(x)$') 183 | pl.ylabel('$\hat{H}(x)$') 184 | pl.grid() 185 | pl.legend(loc='lower right', ncol=2) 186 | pl.savefig('1D_identification_adequation_plot.png') 187 | pl.close() 188 | -------------------------------------------------------------------------------- /1D_karhunen_loeve_simulation_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf8 -*- 3 | 4 | """ 5 | ============================================================== 6 | Simulation of a 1D random field using Karhunen-Loeve expansion 7 | ============================================================== 8 | 9 | This script simulates a Gaussian random field using Karhunen-Loeve expansion. 10 | The randomfield is defined over the interval [0; 10]. It is supposed to have a 11 | zero mean, unit variance and a squared exponential covariance function with 12 | correlation length 2. 13 | """ 14 | 15 | # Author: Marine Marcilhac 16 | # Vincent Dubourg 17 | # License: BSD 18 | 19 | import numpy as np 20 | import pylab as pl 21 | import openturns as ot 22 | from randomfields import KarhunenLoeveExpansion 23 | 24 | # Parameters (see KarhunenLoeveExpansion's docstring) 25 | integral_range = 2. 26 | domain_expand_factor = 1.1 27 | verbose = True 28 | truncation_order = 10 29 | galerkin_scheme = 'legendre' 30 | legendre_galerkin_order = 20 31 | legendre_quadrature_order = 41 32 | n_sample_paths = 1000 33 | n_index_values = 100 34 | 35 | # Rectangular domain definition 36 | lower_bound = np.array([0.] * 1) 37 | upper_bound = np.array([10.] * 1) 38 | 39 | 40 | def mean(x): 41 | x = np.asanyarray(x) 42 | if x.ndim <= 1: 43 | x = np.atleast_2d(x).T 44 | x = np.atleast_2d(x) 45 | return np.zeros(x.shape[0]) 46 | 47 | 48 | def covariance(xx): 49 | xx = np.atleast_2d(xx) 50 | dd = xx[:, :1] - xx[:, 1:] 51 | ll = np.atleast_2d(integral_range) 52 | return np.exp(- np.sum((dd / ll) ** 2., axis=1)) 53 | 54 | # Discretization of the random field using Karhunen-Loeve expansion 55 | # from its given theoretical moments 56 | random_field = KarhunenLoeveExpansion( 57 | mean, 58 | covariance, 59 | truncation_order, 60 | [lower_bound, upper_bound], 61 | domain_expand_factor=domain_expand_factor, 62 | verbose=verbose, 63 | galerkin_scheme=galerkin_scheme, 64 | legendre_galerkin_order=legendre_galerkin_order, 65 | legendre_quadrature_order=legendre_quadrature_order) 66 | truncation_order = random_field._truncation_order 67 | 68 | # Plot the relative covariance discretization error 69 | res = 50 70 | x1, x2 = np.meshgrid(np.linspace(lower_bound, upper_bound, res), 71 | np.linspace(lower_bound, upper_bound, res)) 72 | xx = np.vstack([x1.ravel(), x2.ravel()]).T 73 | approximated_covariance = \ 74 | random_field.compute_approximated_covariance(xx) 75 | true_covariance = covariance(xx) 76 | covariance_error = true_covariance - approximated_covariance 77 | covariance_relative_error = 100. * covariance_error / true_covariance 78 | pl.figure() 79 | im = pl.imshow(np.flipud(covariance_relative_error.reshape((res, res))), 80 | extent=(lower_bound[0], upper_bound[0], ) * 2, 81 | cmap=pl.matplotlib.cm.jet) 82 | cb = pl.colorbar(im) 83 | cb.set_label('Covariance relative error (%)') 84 | pl.title('min = %.2f %%, max = %.2f %%, avg = %.2f %%' 85 | % (covariance_relative_error.min(), 86 | covariance_relative_error.max(), 87 | covariance_relative_error.mean())) 88 | pl.xlabel("$x$") 89 | pl.ylabel("$x'$") 90 | pl.grid() 91 | pl.savefig('1D_simulation_covariance_discretization_error.png') 92 | pl.close() 93 | 94 | # Plot the relative variance discretization error 95 | res = 1000 96 | x = np.linspace(lower_bound[0], upper_bound[0], res) 97 | xx = np.vstack([x, x]).T 98 | approximated_variance = \ 99 | random_field.compute_approximated_covariance(xx) 100 | true_variance = covariance(xx) 101 | variance_error = true_variance - approximated_variance 102 | variance_relative_error = 100. * variance_error / true_variance 103 | pl.figure() 104 | pl.plot(x, variance_relative_error) 105 | pl.title('min = %.2f %%, max = %.2f %%, avg = %.2f %%' 106 | % (variance_relative_error.min(), 107 | variance_relative_error.max(), 108 | variance_relative_error.mean())) 109 | pl.xlabel("$x$") 110 | pl.ylabel('Variance relative error (%)') 111 | pl.grid() 112 | pl.savefig('1D_simulation_variance_discretization_error.png') 113 | pl.close() 114 | 115 | # Simulation of the (Gaussian) random field 116 | res = n_index_values 117 | x = np.linspace(lower_bound[0], upper_bound[0], res) 118 | xi_theoretical_distribution = ot.Normal(truncation_order) 119 | xi = xi_theoretical_distribution.getSample(n_sample_paths) 120 | sample_paths_values = random_field(x[:, np.newaxis], xi) 121 | 122 | # Plot a few sample paths 123 | pl.figure() 124 | pl.title('A few sample paths') 125 | for i in range(min(n_sample_paths, 10)): 126 | pl.plot(x, sample_paths_values[i], label='$h^{(%d)}$' % i) 127 | pl.xlabel('$x$') 128 | pl.ylabel('$H(x)$') 129 | pl.grid() 130 | pl.legend(loc='lower center', ncol=5) 131 | pl.savefig('1D_simulation_sample_paths.png') 132 | pl.close() 133 | 134 | # Save the sample paths to ascii file 135 | np.savetxt('1D_sample_paths.txt', 136 | np.vstack([x, sample_paths_values])) 137 | -------------------------------------------------------------------------------- /2D_karhunen_loeve_identification_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf8 -*- 3 | 4 | """ 5 | ================================================================== 6 | Identification of a 2D random field using Karhunen-Loeve expansion 7 | ================================================================== 8 | 9 | This scripts shows how to identify the parameters characterizing a random 10 | field H(x) from a set of sample paths using the Karhunen-Loeve expansion 11 | method. 12 | 13 | The set of sample paths is loaded from an ascii file assuming the first two 14 | lines contain the indexing variables values of the 2D random field and the 15 | subsequent lines contain the corresponding sample paths values. The indexing 16 | variables values are additionally assumed to form a regular grid. 17 | These assumptions hold for this script only, note that it is possible to use 18 | the KarhunenLoeveExpansion in other contexts such as different unstructured 19 | grid formats for each sample path by first building a callable version of each 20 | sample paths using ANY suitable interpolation technique (LinearNDInterpolator 21 | is used here). 22 | 23 | From randomfields, it uses `KarhunenLoeveExpansion` for: 24 | 25 | - solving the spectral decomposition problem of the estimated covariance 26 | function of the field H. 27 | 28 | - calculating the coefficients of the Karhunen-Loeve expansion truncated 29 | to a given order depending on the decrease of the formerly computed 30 | eigenvalues. These coefficients are uncorrelated, zero mean and unit 31 | variance random variables. 32 | 33 | Their joint distribution is then estimated (here, using kernel smoothing) and 34 | new sample paths are eventually generated using the discretized random field 35 | computed by the instanciated KarhunenLoeveExpansion. 36 | 37 | The identification procedure consists in the following steps: 38 | 39 | 1. Modelling of the sample paths by some interpolation technique. 40 | 41 | 2. Estimation of the mean and covariance functions using the usual statistical 42 | estimators on the interpolated sample paths. 43 | 44 | 3. Resolution of the integral eigenvalue problem of the covariance function 45 | (i.e. discretization of the random field into a Karhunen-Loeve expansion). 46 | 47 | 4. Computation of the coefficients of the Karhunen-Loeve expansion by 48 | functional projection using Gauss-Legendre quadrature. 49 | 50 | 5. Identification of the joint distribution of these coefficients. 51 | Here, this resorts to a kernel smoothing non-parametric technique for the 52 | marginal distributions and a Gaussian copula is assumed for illustration. 53 | 54 | 6. Generation of new sample paths. 55 | """ 56 | 57 | # Author: Marine Marcilhac 58 | # Vincent Dubourg 59 | # License: BSD 60 | 61 | import numpy as np 62 | import pylab as pl 63 | from mpl_toolkits.mplot3d import Axes3D 64 | from scipy.interpolate import LinearNDInterpolator 65 | import openturns as ot 66 | from randomfields import KarhunenLoeveExpansion, \ 67 | matrix_plot 68 | 69 | # Name of the input file that contains the indexing variable and sample paths 70 | # values 71 | input_file_name = '2D_sample_paths.txt' 72 | 73 | # Parameters (see KarhunenLoeveExpansion's docstring) 74 | verbose = True 75 | truncation_order = 30 76 | galerkin_scheme = 'legendre' 77 | legendre_galerkin_order = 8 78 | legendre_quadrature_order = 17 79 | 80 | # Load input data from txt file 81 | xx = np.loadtxt(input_file_name)[:2].T 82 | sample_paths_values = np.loadtxt(input_file_name)[2:] 83 | n_sample_paths, res2 = sample_paths_values.shape 84 | res = np.sqrt(res2) 85 | x1, x2 = xx[:, 0].reshape((res, res)), xx[:, 1].reshape((res, res)) 86 | 87 | # Rectangular domain definition 88 | lower_bound = np.array([0.] * 2) 89 | upper_bound = np.array([10.] * 2) 90 | 91 | # Interpolation of the sample paths 92 | sample_paths = [LinearNDInterpolator(xx, sample_paths_values[i]) 93 | for i in xrange(n_sample_paths)] 94 | 95 | 96 | def estimated_mean(x): 97 | y = np.vstack([sample_paths[i](x) for i in xrange(n_sample_paths)]) 98 | return np.mean(y, axis=0) 99 | 100 | 101 | def estimated_covariance(xx): 102 | xx = np.atleast_2d(xx) 103 | y1 = np.vstack([sample_paths[i](xx[:, :2]) 104 | for i in xrange(n_sample_paths)]) 105 | y2 = np.vstack([sample_paths[i](xx[:, 2:]) 106 | for i in xrange(n_sample_paths)]) 107 | cov = np.sum((y1 - y1.mean(axis=0)) * (y2 - y2.mean(axis=0)), 108 | axis=0) / (n_sample_paths - 1.) 109 | return cov 110 | 111 | # Discretization of the random field using Karhunen-Loeve expansion 112 | # from its estimated theoretical moments 113 | estimated_random_field = KarhunenLoeveExpansion( 114 | estimated_mean, 115 | estimated_covariance, 116 | truncation_order, 117 | [lower_bound, upper_bound], 118 | domain_expand_factor=1., 119 | verbose=verbose, 120 | galerkin_scheme=galerkin_scheme, 121 | legendre_galerkin_order=legendre_galerkin_order, 122 | legendre_quadrature_order=legendre_quadrature_order) 123 | truncation_order = estimated_random_field._truncation_order 124 | 125 | # Plot eigenvalues and eigenfunctions 126 | for i in xrange(truncation_order): 127 | fig = pl.figure() 128 | ax = Axes3D(fig) 129 | pl.title('Eigensolution \#%d ($\lambda_{%d} = %.2f$)' 130 | % (i, i, estimated_random_field._eigenvalues[i])) 131 | ax.plot_surface(x1, x2, 132 | np.reshape(estimated_random_field._eigenfunctions[i](xx), 133 | (res, res)), 134 | rstride=1, cstride=1, cmap=pl.matplotlib.cm.jet) 135 | ax.set_xlabel('$x_1$') 136 | ax.set_ylabel('$x_2$') 137 | ax.set_zlabel('$\\varphi_i(\mathbf{x})$') 138 | pl.savefig('2D_identification_eigensolution_%d.png' % i) 139 | pl.close() 140 | 141 | # Calculation of the KL coefficients by functional projection using 142 | # Gauss-Legendre quadrature 143 | xi = estimated_random_field.compute_coefficients(sample_paths) 144 | 145 | # Statistical inference of the KL coefficients' distribution 146 | kernel_smoothing = ot.KernelSmoothing(ot.Normal()) 147 | xi_marginal_distributions = ot.DistributionCollection( 148 | [kernel_smoothing.build(xi[:, i][:, np.newaxis]) 149 | for i in xrange(truncation_order)]) 150 | try: 151 | xi_copula = ot.NormalCopulaFactory().build(xi) 152 | except RuntimeError: 153 | print('ERR: The normal copula correlation matrix built from the given\n' 154 | + 'Spearman correlation matrix is not definite positive.\n' 155 | + 'This would require expert judgement on the correlation\n' 156 | + 'coefficients significance (using e.g. Spearman test).\n' 157 | + 'Assuming an independent copula in the sequel...') 158 | xi_copula = ot.IndependentCopula(truncation_order) 159 | xi_estimated_distribution = ot.ComposedDistribution(xi_marginal_distributions, 160 | xi_copula) 161 | 162 | # Matrix plot of the empirical KL coefficients & their estimated distribution 163 | matrix_plot(xi, ot_distribution=xi_estimated_distribution, 164 | labels=[('$\\xi_{%d}$' % i) for i in xrange(truncation_order)]) 165 | pl.suptitle('Karhunen-Loeve coefficients ' 166 | + '(observations and estimated distribution)') 167 | pl.savefig('2D_identification_KL_coefficients_joint_distribution.png') 168 | pl.close() 169 | 170 | # Plot the ten first observed sample paths reconstructed from the estimated 171 | # random field and an adequation plot with respect to the original observed 172 | # sample paths 173 | reconstructed_sample_paths_values = estimated_random_field(xx, xi[:10]) 174 | pl.figure() 175 | pl.title('Adequation: observation vs. model') 176 | for i in xrange(10): 177 | pl.plot(sample_paths_values[i], reconstructed_sample_paths_values[i], '.', 178 | label='$h^{(%d)}(\mathbf{x})$' % i) 179 | pl.plot([reconstructed_sample_paths_values.min(), 180 | reconstructed_sample_paths_values.max()], 181 | [reconstructed_sample_paths_values.min(), 182 | reconstructed_sample_paths_values.max()], 'k--', lw=1.5) 183 | for lim in [pl.xlim, pl.ylim]: 184 | lim(reconstructed_sample_paths_values.min(), 185 | reconstructed_sample_paths_values.max()) 186 | pl.axis('equal') 187 | pl.xlabel('$H(\mathbf{x})$') 188 | pl.ylabel('$\hat{H}(\mathbf{x})$') 189 | pl.grid() 190 | pl.legend(loc='lower right', ncol=2) 191 | pl.savefig('2D_identification_adequation_plot.png') 192 | pl.close() 193 | -------------------------------------------------------------------------------- /2D_karhunen_loeve_simulation_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf8 -*- 3 | 4 | """ 5 | ============================================================== 6 | Simulation of a 2D random field using Karhunen-Loeve expansion 7 | ============================================================== 8 | 9 | This script simulates a Gaussian random field using Karhunen-Loeve expansion. 10 | The randomfield is defined over the square domain [0; 10]^2. It is supposed to 11 | have a zero mean and unit variance and a stationary squared exponential 12 | covariance function whose correlation lengths are 3 and 2. 13 | """ 14 | 15 | # Author: Marine Marcilhac 16 | # Vincent Dubourg 17 | # License: BSD 18 | 19 | import numpy as np 20 | import pylab as pl 21 | from mpl_toolkits.mplot3d import Axes3D 22 | import openturns as ot 23 | from randomfields import KarhunenLoeveExpansion 24 | 25 | # Parameters (see KarhunenLoeveExpansion's docstring) 26 | integral_range = np.array([3., 2.]) 27 | domain_expand_factor = 1.1 28 | verbose = True 29 | truncation_order = 30 30 | galerkin_scheme = 'legendre' 31 | legendre_galerkin_order = 8 32 | legendre_quadrature_order = 17 33 | n_sample_paths = 1000 34 | n_index_values = 50 35 | 36 | # Rectangular domain definition 37 | lower_bound = np.array([0.] * 2) 38 | upper_bound = np.array([10.] * 2) 39 | 40 | 41 | def mean(x): 42 | x = np.atleast_2d(x) 43 | return np.zeros(x.shape[0]) 44 | 45 | 46 | def covariance(xx): 47 | xx = np.atleast_2d(xx) 48 | dd = np.abs(xx[:, :2] - xx[:, 2:]) 49 | ll = np.atleast_2d(integral_range) 50 | return np.exp(- np.sum((dd / ll) ** 2., axis=1)) 51 | 52 | # Discretization of the random field using Karhunen-Loeve expansion 53 | random_field = KarhunenLoeveExpansion( 54 | mean, 55 | covariance, 56 | truncation_order, 57 | [lower_bound, upper_bound], 58 | domain_expand_factor=domain_expand_factor, 59 | verbose=verbose, 60 | galerkin_scheme=galerkin_scheme, 61 | legendre_galerkin_order=legendre_galerkin_order, 62 | legendre_quadrature_order=legendre_quadrature_order) 63 | truncation_order = random_field._truncation_order 64 | 65 | # Plot the relative variance discretization error 66 | res = 50 67 | x1, x2 = np.meshgrid(np.linspace(lower_bound[0], upper_bound[0], res), 68 | np.linspace(lower_bound[1], upper_bound[1], res)) 69 | xx = np.vstack([x1.ravel(), x2.ravel()]).T 70 | xxxx = np.hstack([xx, xx]) 71 | approximated_variance = random_field.compute_approximated_covariance(xxxx) 72 | variance = covariance(xxxx) 73 | variance_error = variance - approximated_variance 74 | variance_relative_error = 100. * variance_error / variance 75 | pl.figure() 76 | im = pl.imshow(np.flipud(variance_relative_error.reshape((res, res))), 77 | extent=(lower_bound[0], upper_bound[0], 78 | lower_bound[1], upper_bound[1]), 79 | cmap=pl.matplotlib.cm.jet) 80 | cb = pl.colorbar(im) 81 | cb.set_label('Relative variance discretization error (%)') 82 | pl.title('min = %.2f %%, max = %.2f %%, avg = %.2f %%' 83 | % (variance_relative_error.min(), 84 | variance_relative_error.max(), 85 | variance_relative_error.mean())) 86 | pl.xlabel("$x$") 87 | pl.ylabel("$x'$") 88 | pl.grid() 89 | pl.savefig('2D_simulation_discretization_error.png') 90 | pl.close() 91 | 92 | # Simulation of the (Gaussian) random field 93 | res = n_index_values 94 | x1, x2 = np.meshgrid(np.linspace(lower_bound[0], upper_bound[0], res), 95 | np.linspace(lower_bound[0], upper_bound[0], res)) 96 | x = np.vstack([x1.ravel(), x2.ravel()]).T 97 | xi_distribution = ot.Normal(truncation_order) 98 | xi = xi_distribution.getSample(n_sample_paths) 99 | sample_paths_values = random_field(x, xi) 100 | 101 | # Plot a sample path 102 | fig = pl.figure() 103 | ax = Axes3D(fig) 104 | ax.plot_surface(x1, x2, sample_paths_values[0].reshape((res, res)), 105 | rstride=1, cstride=1, cmap=pl.matplotlib.cm.jet) 106 | ax.set_xlabel('$x_1$') 107 | ax.set_ylabel('$x_2$') 108 | ax.set_zlabel('$h(\mathbf{x})$') 109 | pl.savefig('2D_simulation_sample_path.png') 110 | pl.close() 111 | 112 | # Save the sample paths to ascii file 113 | np.savetxt('2D_sample_paths.txt', 114 | np.vstack([x1.ravel(), x2.ravel(), sample_paths_values])) 115 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | python-randomfields 2 | =================== 3 | 4 | A Python module that implements tools for the simulation and identification of 5 | random fields using the Karhunen-Loeve expansion representation. 6 | 7 | Folder description 8 | ------------------ 9 | 10 | This folder contains: 11 | 12 | * a Python module named randomfields, 13 | 14 | * 4 Python scripts implementing basic examples, showing the ways the module 15 | functionalities can be used. 16 | NB: *_simulation_* scripts must be run before their corresponding 17 | *_identification_* counterpart because the simulation scripts generate 18 | input randomfield data (dumped to ascii file) for identification. 19 | 20 | Requirements 21 | ------------ 22 | 23 | The present Python module and its examples rely on: 24 | 25 | * OpenTURNS (>= 1.4) 26 | 27 | * Numpy (>= 1.6) 28 | 29 | * Scipy (>= 0.9) 30 | 31 | * Matplotlib (>= 1.0) 32 | 33 | Installation 34 | ------------ 35 | 36 | The example scripts can be run from this folder for testing. They'll import the 37 | randomfields module from the local folder. 38 | 39 | In order to make the randomfields module installation systemwide, you may 40 | either: 41 | 42 | * copy the randomfields module (directory) in the "site-package" directory 43 | of your Python distribution (e.g. /usr/local/lib/python2.7/site-package). 44 | NB: You might need admin rights to do so. 45 | 46 | * append the parent directory of the randomfields module (directory) to 47 | your PYTHONPATH environment variable. 48 | 49 | Documentation 50 | ------------- 51 | 52 | The randomfields module uses Python docstrings. Use either "help(object)" in a 53 | classic Python shell or "object?" in an improved Python (IPython) shell. 54 | 55 | Authors and terms of use 56 | ------------------------ 57 | 58 | This module was implemented by Phimeca Engineering SA, EdF and Institut Navier 59 | (ENPC). It is shipped as is without any warranty of any kind. 60 | 61 | Todo 62 | ----- 63 | 64 | Contributions are welcome. 65 | 66 | * Implement other Galerkin schemes such as the Haar wavelet Galerkin scheme 67 | proposed by Phoon et al. (2002). More advanced (smoother) wavelets could 68 | also be used. 69 | 70 | * Call for data: if you have any, please contribute, possibly along with 71 | an identification example. 72 | 73 | * Any other idea within the scope of the module is welcome! 74 | 75 | References 76 | ----------- 77 | 78 | * Phoon, K.; Huang, S. & Quek, S. 79 | [Implementation of Karhunen-Loeve expansion for simulation using a 80 | wavelet-Galerkin scheme]( 81 | http://www.eng.nus.edu.sg/civil/people/cvepkk/JPaper_2002_vol17.pdf) 82 | Prob. Eng. Mech., 2002, 17, 293-303 83 | 84 | * Ghanem, R. & Spanos, P. 85 | Stochastic Finite Elements: A Spectral Approach (Revised edition) 86 | Dover Publications Inc., 2003, 224 87 | 88 | * Sudret, B. & Der Kiureghian, A. 89 | [Stochastic Finite Element Methods and Reliability, A State-of-the-Art 90 | report](http://www.ibk.ethz.ch/su/publications/Reports/SFE-report-Sudret.pdf>) 91 | University of California, Berkeley, 2000 92 | 93 | * Desceliers, C.; Soize, C. & Ghanem, R. 94 | [Identification of chaos representations of elastic properties of random media 95 | using experimental vibration tests]( 96 | http://hal.archives-ouvertes.fr/docs/00/68/61/50/PDF/publi-2007-CM-39_6_831-838-desceliers-soize-ghanem-preprint.pdf) 97 | Comput. Mech., 2007, 39, 831-838 98 | 99 | -------------------------------------------------------------------------------- /randomfields/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | ============ 6 | Randomfields 7 | ============ 8 | 9 | A module that implements tools for the simulation and identification of 10 | random fields using the Karhunen-Loeve expansion representation. 11 | """ 12 | 13 | # Author: Marine Marcilhac 14 | # Vincent Dubourg 15 | # License: BSD 16 | # This module was implemented by Phimeca Engineering SA, EdF and Institut 17 | # Navier (ENPC). It is shipped as is without any warranty of any kind. 18 | 19 | from .simulation import KarhunenLoeveExpansion 20 | from .graphs import matrix_plot 21 | 22 | __version__ = 0 23 | -------------------------------------------------------------------------------- /randomfields/graphs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding: utf-8 -*- 3 | 4 | import pylab as pl 5 | import numpy as np 6 | import openturns as ot 7 | 8 | 9 | def matrix_plot(X, ot_distribution=None, ot_kernel=None, 10 | labels=None, res=1000, grid=False): 11 | """ 12 | Return a handle to a matplotlib figure containing a 'matrix plot' 13 | representation of the sample in X. It plots: 14 | - the marginal distributions on the diagonal terms, 15 | - the dependograms on the lower terms, 16 | - scatter plots on the upper terms. 17 | One may also add representation of the original distribution provided it 18 | is known, and/or a kernel smoothing (based on OpenTURNS). 19 | 20 | Parameters 21 | ---------- 22 | X: array_like 23 | The sample to plot with shape (n_samples, n_features). 24 | ot_distribution: OpenTURNS Distribution of dimension n_features, optional. 25 | The underlying multivariate distribution if known. 26 | Default is set to None. 27 | ot_kernel: A list of n_features OpenTURNS KernelSmoothing's ready for 28 | build, optional. 29 | Kernel smoothing for the margins. 30 | Default is set to None. 31 | labels: A list of n_features strings, optional. 32 | Variates' names for labelling X & Y axes. 33 | Default is set to None. 34 | res: int, optional. 35 | Number of points used for plotting the marginal PDFs. 36 | Default is set to 1000. 37 | grid: bool, optional. 38 | Whether a grid should be added or not. 39 | Default is set to False (no grid). 40 | 41 | Returns 42 | ------- 43 | ax: matplotlib.Axes instance. 44 | A handle to the matplotlib figure. 45 | 46 | Example 47 | ------- 48 | >>> import pylab as pl 49 | >>> from phimeca.graphs import plot_matrix 50 | >>> import openturns as ot 51 | >>> probabilistic_model = ot.Normal(3) 52 | >>> sample = probabilistic_model.getSample(100) 53 | >>> ax = plot_matrix(sample, 54 | ot_distribution=X, 55 | ot_kernel=[ot.KernelSmoothing(ot.Epanechnikov())] * 3, 56 | labels=[('$X_%d$' % i) for i in xrange(3)], 57 | grid=True) 58 | >>> pl.show() 59 | """ 60 | 61 | X = np.array(X) 62 | n_samples, n_features = X.shape 63 | if ot_distribution is None: 64 | ranks = np.array(ot.NumericalSample(X).rank()) 65 | else: 66 | ranks = np.zeros_like(X) 67 | for i in xrange(n_features): 68 | ranks[:, i] = np.ravel(ot_distribution.getMarginal(i).computeCDF( 69 | np.atleast_2d(X[:, i]).T)) 70 | ranks[:, i] *= n_samples 71 | 72 | pl.figure(figsize=(8, 8)) 73 | n = 0 74 | for i in xrange(n_features): 75 | for j in xrange(n_features): 76 | n += 1 77 | pl.subplot(n_features, n_features, n) 78 | if i == j: 79 | n_bins = int(1 + np.log2(n_samples)) + 1 80 | pl.hist(X[:, j], bins=n_bins, normed=True, 81 | cumulative=False, bottom=None, 82 | edgecolor='grey', color='grey', alpha=.25) 83 | if ot_distribution is not None: 84 | Xi = ot_distribution.getMarginal(i) 85 | a = Xi.getRange().getLowerBound()[0] 86 | b = Xi.getRange().getUpperBound()[0] 87 | middle = (a + b) / 2. 88 | width = b - a 89 | if Xi.computePDF(a - .1 * width / 2.) == 0.: 90 | a = middle - 1.1 * width / 2. 91 | if Xi.computePDF(b + .1 * width / 2.) == 0.: 92 | b = middle + 1.1 * width / 2. 93 | support = np.linspace(a, b, res) 94 | pdf = Xi.computePDF(np.atleast_2d(support).T) 95 | pl.plot(support, pdf, color='b', alpha=.5, lw=1.5) 96 | if ot_kernel is not None: 97 | Xi = ot_kernel[i].build(np.atleast_2d(X[:, i]).T) 98 | if ot_distribution is None: 99 | a = Xi.getRange().getLowerBound()[0] 100 | b = Xi.getRange().getUpperBound()[0] 101 | support = np.linspace(a, b, res) 102 | pdf = Xi.computePDF(np.atleast_2d(support).T) 103 | pl.plot(support, pdf, color='r', alpha=.5, lw=1.5) 104 | pl.xticks([pl.xlim()[0], np.mean(pl.xlim()), pl.xlim()[1]]) 105 | pl.yticks([]) 106 | elif i < j: 107 | pl.plot(X[:, j], X[:, i], 108 | 'o', color='grey', alpha=0.25) 109 | pl.xticks([pl.xlim()[0], np.mean(pl.xlim()), pl.xlim()[1]], 110 | ('', ) * 3) 111 | pl.yticks([pl.ylim()[0], np.mean(pl.ylim()), pl.ylim()[1]], 112 | ('', ) * 3) 113 | else: 114 | pl.plot(ranks[:, j].astype(float) / n_samples, 115 | ranks[:, i].astype(float) / n_samples, 116 | 'o', color='grey', alpha=0.25) 117 | pl.xticks([0., 1.]) 118 | pl.yticks([0., 1.]) 119 | 120 | if j == 0 and labels is not None: 121 | pl.ylabel(labels[i]) 122 | 123 | if i == n_features - 1 and labels is not None: 124 | pl.xlabel(labels[j]) 125 | 126 | if grid: 127 | pl.grid() 128 | 129 | return pl.gcf() 130 | -------------------------------------------------------------------------------- /randomfields/simulation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import numpy as np 5 | from numpy.core.umath_tests import inner1d 6 | from .utils import get_available_memory 7 | from scipy import linalg 8 | import openturns as ot 9 | 10 | 11 | class KarhunenLoeveExpansion: 12 | 13 | """ 14 | A class that implements the Karhunen-Loeve expansion (KLE) representation 15 | of second-order random fields with arbitrary mean and covariance functions 16 | over a rectangular domain. 17 | 18 | The Karhunen-Loeve expansion of a random field H reads: 19 | 20 | .. math:: 21 | 22 | \hat{H}(x) = \mu(x) + \sum_i {\sqrt(\lambda_i) \Xi_i \phi_i(x)} 23 | 24 | where: 25 | - x is the indexing variable which spans a given rectangular domain. 26 | - \mu is the given mean of the random field. 27 | - \lambda_i and \phi_i are solutions of the spectral decomposition of 28 | the given autocovariance function C of the random field. 29 | - \Xi_i are uncorrelated, zero-mean and unit-variance random variables 30 | whose distribution depends on the desired random field distribution. 31 | 32 | NB: If the random field is assumed to be Gaussian, then the \Xi 33 | random vector is a standard Gaussian random vector. Otherwise, the 34 | user may use any distribution matching the requirements enunced above. 35 | 36 | Parameters 37 | ---------- 38 | 39 | mean : callable 40 | A mean function for the random field. The function is assumed to take a 41 | unique input X in array_like format with shape (size, dimension). 42 | 43 | covariance : callable 44 | An autocovariance function for the random field. The function is 45 | assumed to take an input XX in array_like format with shape 46 | (size, 2 * dimension). 47 | 48 | truncation_order : integer 49 | The number of terms to keep in the truncated KL expansion. 50 | 51 | domain : array_like with shape (2, dimension) 52 | The boundaries of the rectangular domain on which the random field is 53 | defined [lower_bound, upper_bound]. 54 | 55 | domain_expand_factor : double >= 1., optional 56 | A factor used for expanding the domain at the discretization step. 57 | Indeed, the relative mean squared error usually gets large at the 58 | border. Augmenting the size of the domain prevents this error from 59 | being large in the domain of interest. 60 | Default does not alter the domain (domain_expand_factor = 1.). 61 | 62 | verbose : boolean, optional 63 | A boolean specifying the verbose level. 64 | Default assumes verbose = False. 65 | 66 | galerkin_scheme : string, optional 67 | A string specifying the basis that should be used in the Galerkin 68 | discretization scheme of the Fredholm integral equation. 69 | Default uses Legendre polynomial basis (galerkin_scheme='legendre'). 70 | TODO: Implement 'haar_wavelet' based Galekin scheme. 71 | 72 | Depending on the chosen Galerkin scheme, other optional parameters might be 73 | specified: 74 | 75 | - For the Legendre Galerkin scheme (galerkin_scheme='legendre'): 76 | 77 | legendre_galerkin_order : integer, optional 78 | An integer specifying the maximum order of the tensorized Legendre 79 | polynomials used for approximating eigenfunctions. 80 | Default uses tensorized Legendre polynoms of maximal order 10. 81 | 82 | legendre_quadrature_order : integer, optional 83 | An integer specifying the required quadrature order for estimating 84 | the integrals using Gauss-Legendre quadrature. 85 | Default uses 2 * legendre_galerkin_order + 1. 86 | 87 | Returns 88 | ------- 89 | 90 | discretized_random_field : callable 91 | The discretized random field taking two inputs X and xi: 92 | - x : array_like with shape (n_points, dimension) 93 | The points on which the random field should be evaluated. 94 | 95 | - xi : array_like with shape (n_samples, truncation_order) 96 | The Karhunen-Loeve coefficients. The truncation_order must 97 | match the one specified at instanciation. 98 | The sample paths are returned as an array with shape 99 | (n_samples, n_points). 100 | 101 | See also 102 | -------- 103 | 104 | self.__call__? 105 | self.compute_coefficients? 106 | self.compute_approximated_covariance? 107 | """ 108 | 109 | _implemented_galerkin_schemes = ['legendre'] 110 | 111 | def __init__(self, mean, covariance, truncation_order, domain, 112 | domain_expand_factor=1., verbose=False, 113 | galerkin_scheme='legendre', **kwargs): 114 | 115 | # Input checks and storage 116 | if not callable(mean): 117 | raise ValueError('mean must be a callable function.') 118 | else: 119 | self._mean = mean 120 | 121 | if not callable(covariance): 122 | raise ValueError('covariance must be a callable function.') 123 | else: 124 | self._covariance = covariance 125 | 126 | domain = np.atleast_2d(domain) 127 | if domain.shape[0] != 2: 128 | raise ValueError('domain must contain exactly 2 rows') 129 | else: 130 | self._lower_bound = domain[0] 131 | self._upper_bound = domain[1] 132 | 133 | if truncation_order <= 0: 134 | raise ValueError('truncation_order must be a positive integer.') 135 | else: 136 | self._truncation_order = truncation_order 137 | 138 | if type(verbose) is not bool: 139 | raise ValueError('verbose should be of type bool.') 140 | else: 141 | self.verbose = verbose 142 | 143 | if galerkin_scheme not in self._implemented_galerkin_schemes: 144 | raise ValueError('The Galerkin scheme should be selected amongst' 145 | + ' the implemented Galerkin schemes: %s.' 146 | % self._implemented_galerkin_schemes 147 | + ' Got %s instead.' % galerkin_scheme) 148 | else: 149 | self._galerkin_scheme = galerkin_scheme 150 | 151 | # Expand the domain for reducing the discretization error at the 152 | # borders 153 | center = (self._lower_bound + self._upper_bound) / 2. 154 | width = self._upper_bound - self._lower_bound 155 | self._lower_bound = center - width * domain_expand_factor / 2. 156 | self._upper_bound = center + width * domain_expand_factor / 2. 157 | 158 | # Discretize the random field using the chosen Galerkin scheme 159 | if self._galerkin_scheme == 'legendre': 160 | self._legendre_galerkin_scheme(**kwargs) 161 | 162 | def __call__(self, x, xi): 163 | """ 164 | Calculates sample paths values of the discretized random field. 165 | 166 | Parameters 167 | ---------- 168 | 169 | x: array_like with shape (n_index_values, dimension) 170 | The index values at which the sample path(s) should be calculated. 171 | 172 | xi: array_like with shape (n_sample_paths, truncation_order) 173 | The sample of Karhunen-Loeve coefficients values whose distribution 174 | depends on that of the random field (e.g. for a Gaussian random 175 | field, Xi is a standard Gaussian random vector). 176 | 177 | Returns 178 | ------- 179 | 180 | sample_paths_values: array with shape (n_sample_paths, n_index_values) 181 | The required sample paths values. 182 | """ 183 | 184 | # Input checks 185 | dimension = self._lower_bound.size 186 | truncation_order = self._truncation_order 187 | x = np.atleast_2d(x) 188 | xi = np.atleast_2d(xi) 189 | if x.shape[1] != dimension: 190 | raise ValueError('The number of columns in x must equal the ' 191 | + 'dimension of the random field which is %d.' 192 | % dimension) 193 | if xi.shape[1] != truncation_order: 194 | raise ValueError('The number of columns in xi must equal the ' 195 | + 'truncation order of the random field which ' 196 | + 'is %d.' % truncation_order) 197 | 198 | PHI = np.vstack([np.sqrt(self._eigenvalues[k]) 199 | * self._eigenfunctions[k](x) 200 | for k in range(self._truncation_order)]) 201 | sample_paths_values = np.ravel(self._mean(x)) + np.dot(xi, PHI) 202 | 203 | return sample_paths_values 204 | 205 | def compute_approximated_covariance(self, xx): 206 | """ 207 | Calculates the values of the covariance function of the discretized 208 | random field. 209 | 210 | Parameters 211 | ---------- 212 | 213 | xx: array_like with shape (n_index_values, 2 * dimension) 214 | The couples of index values at which the covariance should be 215 | calculated. 216 | 217 | Returns 218 | ------- 219 | 220 | covariance_values: array with shape (n_index_values, ) 221 | The required sample paths values. 222 | """ 223 | 224 | # Input checks 225 | dimension = self._lower_bound.size 226 | xx = np.atleast_2d(xx) 227 | if xx.shape[1] != 2 * dimension: 228 | raise ValueError('The number of columns in xx must be %d.' 229 | % (2 * dimension)) 230 | 231 | x1, x2 = xx[:, :dimension], xx[:, dimension:] 232 | PHI1 = np.vstack([np.sqrt(self._eigenvalues[k]) 233 | * self._eigenfunctions[k](x1) 234 | for k in range(self._truncation_order)]) 235 | PHI2 = np.vstack([np.sqrt(self._eigenvalues[k]) 236 | * self._eigenfunctions[k](x2) 237 | for k in range(self._truncation_order)]) 238 | covariance_values = inner1d(PHI1.T, PHI2.T) 239 | 240 | return covariance_values 241 | 242 | def compute_coefficients(self, sample_paths, **kwargs): 243 | """ 244 | Calculates the coefficients in the Karhunen-Loeve expansion by 245 | projection of the observed sample path(s) onto the eigenfunctions. 246 | This uses the appropriate method depending on the chosen Galerkin 247 | scheme. 248 | 249 | Parameters 250 | ---------- 251 | 252 | sample_paths: callable or collection of callables 253 | The observed sample path(s). 254 | 255 | Depending on the chosen Galerkin scheme, other optional parameters 256 | might be specified: 257 | 258 | - For the Legendre Galerkin scheme (galerkin_scheme='legendre'): 259 | 260 | legendre_quadrature_order : integer, optional 261 | An integer specifying the required quadrature order for 262 | estimating the integrals using Gauss-Legendre quadrature. 263 | Default uses the same order as the one used for discretizing 264 | the random field. 265 | 266 | Returns 267 | ------- 268 | 269 | xi : array with shape (n_sample_paths, truncation_order) 270 | The Karhunen-Loeve coefficients associated to the given 271 | sample path(s). 272 | """ 273 | 274 | # Input checks 275 | if not(callable(sample_paths) or 276 | hasattr(sample_paths, '__getitem__')): 277 | raise ValueError('sample_paths must be callable or a ' 278 | + 'collection of callables.') 279 | 280 | if hasattr(sample_paths, '__getitem__'): 281 | if not callable(sample_paths[0]): 282 | raise ValueError('sample_paths must be callable or a ' 283 | + 'collection of callables.') 284 | else: 285 | sample_paths = [sample_paths] 286 | 287 | # Compute the coefficients using the chosen Galerkin scheme 288 | if self._galerkin_scheme == 'legendre': 289 | return self._compute_coefficients_legendre(sample_paths, **kwargs) 290 | 291 | def _legendre_galerkin_scheme(self, 292 | legendre_galerkin_order=10, 293 | legendre_quadrature_order=None): 294 | 295 | # Input checks 296 | if legendre_galerkin_order <= 0: 297 | raise ValueError('legendre_galerkin_order must be a positive ' 298 | + 'integer!') 299 | 300 | if legendre_quadrature_order is not None: 301 | if legendre_quadrature_order <= 0: 302 | raise ValueError('legendre_quadrature_order must be a ' 303 | + 'positive integer!') 304 | 305 | # Settings 306 | dimension = self._lower_bound.size 307 | truncation_order = self._truncation_order 308 | galerkin_size = ot.EnumerateFunction( 309 | dimension).getStrataCumulatedCardinal(legendre_galerkin_order) 310 | if legendre_quadrature_order is None: 311 | legendre_quadrature_order = 2 * legendre_galerkin_order + 1 312 | 313 | # Check if the current settings are compatible 314 | if truncation_order > galerkin_size: 315 | raise ValueError('The truncation order must be less than or ' 316 | + 317 | 'equal to the size of the functional basis in the chosen ' 318 | + 319 | 'Legendre Galerkin scheme. Current size of the galerkin basis ' 320 | + 321 | 'only allows to get %d terms in the KL expansion.' 322 | % galerkin_size) 323 | 324 | # Construction of the Galerkin basis: tensorized Legendre polynomials 325 | tensorized_legendre_polynomial_factory = \ 326 | ot.PolynomialFamilyCollection([ot.LegendreFactory()] * dimension) 327 | tensorized_legendre_polynomial_factory = \ 328 | ot.OrthogonalProductPolynomialFactory( 329 | tensorized_legendre_polynomial_factory) 330 | tensorized_legendre_polynomials = \ 331 | [tensorized_legendre_polynomial_factory.build(i) 332 | for i in range(galerkin_size)] 333 | 334 | # Compute matrix C coefficients using Gauss-Legendre quadrature 335 | polyColl = ot.PolynomialFamilyCollection( 336 | [ot.LegendreFactory()] * dimension * 2) 337 | polynoms = ot.OrthogonalProductPolynomialFactory(polyColl) 338 | U, W = polynoms.getNodesAndWeights( 339 | ot.Indices([legendre_quadrature_order] * dimension * 2)) 340 | W = np.ravel(W) 341 | scale = (self._upper_bound - self._lower_bound) / 2. 342 | shift = (self._upper_bound + self._lower_bound) / 2. 343 | U = np.array(U) 344 | X = np.repeat(scale, 2) * U + np.repeat(shift, 2) 345 | 346 | if self.verbose: 347 | print('Computing matrix C...') 348 | 349 | try: 350 | available_memory = int(.9 * get_available_memory()) 351 | except: 352 | if self.verbose: 353 | print('WRN: Available memory estimation failed! ' 354 | 'Assuming 1Gb is available (first guess).') 355 | available_memory = 1024 ** 3 356 | max_size = int(available_memory / 8 / galerkin_size ** 2) 357 | batch_size = min(W.size, max_size) 358 | if self.verbose and batch_size < W.size: 359 | print('RAM: %d Mb available' % (available_memory / 1024 ** 2)) 360 | print('RAM: %d allocable terms / %d total terms' % (max_size, 361 | W.size)) 362 | print('RAM: %d loops required' % np.ceil(float(W.size) / max_size)) 363 | while True: 364 | C = np.zeros((galerkin_size, galerkin_size)) 365 | try: 366 | n_done = 0 367 | while n_done < W.size: 368 | covariance_at_X = self._covariance( 369 | X[n_done:(n_done + batch_size)]) 370 | H1 = np.vstack([np.ravel( 371 | tensorized_legendre_polynomials[i]( 372 | U[n_done:(n_done + batch_size), :dimension])) 373 | for i in range(galerkin_size)]) 374 | H2 = np.vstack([np.ravel( 375 | tensorized_legendre_polynomials[i]( 376 | U[n_done:(n_done + batch_size), dimension:])) 377 | for i in range(galerkin_size)]) 378 | C += np.sum(W[np.newaxis, np.newaxis, 379 | n_done:(n_done + batch_size)] 380 | * covariance_at_X[np.newaxis, np.newaxis, :] 381 | * H1[np.newaxis, :, :] 382 | * H2[:, np.newaxis, :], axis=-1) 383 | del covariance_at_X, H1, H2 384 | n_done += batch_size 385 | break 386 | except MemoryError: 387 | batch_size /= 2 388 | C *= np.prod(self._upper_bound - self._lower_bound) ** 2. 389 | 390 | # Matrix B is orthonormal up to some constant 391 | B = np.diag(np.repeat(np.prod(self._upper_bound - self._lower_bound), 392 | galerkin_size)) 393 | 394 | # Solve the generalized eigenvalue problem C D = L B D in L, D 395 | if self.verbose: 396 | print('Solving generalized eigenvalue problem...') 397 | eigenvalues, eigenvectors = linalg.eigh(C, b=B, lower=True) 398 | eigenvalues, eigenvectors = eigenvalues.real, eigenvectors.real 399 | 400 | # Sort the eigensolutions in the descending order of eigenvalues 401 | order = eigenvalues.argsort()[::-1] 402 | eigenvalues = eigenvalues[order] 403 | eigenvectors = eigenvectors[:, order] 404 | 405 | # Truncate the expansion 406 | eigenvalues = eigenvalues[:truncation_order] 407 | eigenvectors = eigenvectors[:, :truncation_order] 408 | 409 | # Eliminate unsignificant negative eigenvalues 410 | if eigenvalues.min() <= 0.: 411 | if eigenvalues.min() > .01 * eigenvalues.max(): 412 | raise Exception('The smallest significant eigenvalue seems ' 413 | + 414 | 'to be negative... Check the positive definiteness of the ' 415 | + 'covariance function.') 416 | else: 417 | truncation_order = np.nonzero(eigenvalues <= 0)[0][0] 418 | eigenvalues = eigenvalues[:truncation_order] 419 | eigenvectors = eigenvectors[:, :truncation_order] 420 | self._truncation_order = truncation_order 421 | print('WRN: truncation_order was too large.') 422 | print('It has been reset to: %d' % truncation_order) 423 | 424 | # Define eigenfunctions 425 | class LegendrePolynomialsBasedEigenFunction(): 426 | 427 | def __init__(self, vector): 428 | self._vector = vector 429 | 430 | def __call__(self, x): 431 | x = np.asanyarray(x) 432 | if x.ndim <= 1: 433 | x = np.atleast_2d(x).T 434 | u = (x - shift) / scale 435 | return np.sum([np.ravel(tensorized_legendre_polynomials[i](u)) 436 | * self._vector[i] 437 | for i in range(truncation_order)], axis=0) 438 | 439 | # Set attributes 440 | self._eigenvalues = eigenvalues 441 | self._eigenfunctions = [LegendrePolynomialsBasedEigenFunction(vector) 442 | for vector in eigenvectors.T] 443 | self._legendre_galerkin_order = legendre_galerkin_order 444 | self._legendre_quadrature_order = legendre_quadrature_order 445 | 446 | def _compute_coefficients_legendre(self, sample_paths, 447 | legendre_quadrature_order=None): 448 | 449 | dimension = self._lower_bound.size 450 | truncation_order = self._truncation_order 451 | if legendre_quadrature_order is None: 452 | legendre_quadrature_order = self._legendre_quadrature_order 453 | elif type(legendre_quadrature_order) is not int \ 454 | or legendre_quadrature_order <= 0: 455 | raise ValueError('legendre_quadrature_order must be a positive ' 456 | + 'integer.') 457 | n_sample_paths = len(sample_paths) 458 | 459 | # Gauss-Legendre quadrature nodes and weights 460 | polyColl = ot.PolynomialFamilyCollection( 461 | [ot.LegendreFactory()] * dimension) 462 | polynoms = ot.OrthogonalProductPolynomialFactory(polyColl) 463 | U, W = polynoms.getNodesAndWeights( 464 | ot.Indices([legendre_quadrature_order] * dimension)) 465 | W = np.ravel(W) 466 | U = np.array(U) 467 | scale = (self._upper_bound - self._lower_bound) / 2. 468 | shift = (self._upper_bound + self._lower_bound) / 2. 469 | X = scale * U + shift 470 | 471 | # Compute coefficients 472 | try: 473 | available_memory = int(.9 * get_available_memory()) 474 | except: 475 | if self.verbose: 476 | print('WRN: Available memory estimation failed! ' 477 | 'Assuming 1Gb is available (first guess).') 478 | available_memory = 1024 ** 3 479 | max_size = int(available_memory 480 | / 8 / truncation_order / n_sample_paths) 481 | batch_size = min(W.size, max_size) 482 | if self.verbose and batch_size < W.size: 483 | print('RAM: %d Mb available' % (available_memory / 1024 ** 2)) 484 | print('RAM: %d allocable terms / %d total terms' % (max_size, 485 | W.size)) 486 | print('RAM: %d loops required' % np.ceil(float(W.size) / max_size)) 487 | while True: 488 | coefficients = np.zeros((n_sample_paths, truncation_order)) 489 | try: 490 | n_done = 0 491 | while n_done < W.size: 492 | sample_paths_values = np.vstack([np.ravel(sample_paths[i]( 493 | X[n_done:(n_done + batch_size)])) 494 | for i in range(n_sample_paths)]) 495 | mean_values = np.ravel(self._mean( 496 | X[n_done:(n_done + batch_size)]))[np.newaxis, :] 497 | centered_sample_paths_values = \ 498 | sample_paths_values - mean_values 499 | del sample_paths_values, mean_values 500 | eigenelements_values = np.vstack([self._eigenfunctions[k]( 501 | X[n_done:(n_done + batch_size)]) 502 | / np.sqrt(self._eigenvalues[k]) 503 | for k in range(truncation_order)]) 504 | coefficients += np.sum( 505 | W[np.newaxis, np.newaxis, n_done:(n_done + batch_size)] 506 | * centered_sample_paths_values[:, np.newaxis, :] 507 | * eigenelements_values[np.newaxis, :, :], axis=-1) 508 | del centered_sample_paths_values, eigenelements_values 509 | n_done += batch_size 510 | break 511 | except MemoryError: 512 | batch_size /= 2 513 | coefficients *= np.prod(self._upper_bound - self._lower_bound) 514 | 515 | return coefficients 516 | -------------------------------------------------------------------------------- /randomfields/utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | import ctypes 6 | 7 | 8 | def get_available_memory(): 9 | """Returns the available memory on the system in bytes.""" 10 | 11 | if os.name == "nt": 12 | kernel32 = ctypes.windll.kernel32 13 | c_ulong = ctypes.c_ulong 14 | 15 | class MEMORYSTATUS(ctypes.Structure): 16 | _fields_ = [ 17 | ("dwLength", c_ulong), 18 | ("dwMemoryLoad", c_ulong), 19 | ("dwTotalPhys", c_ulong), 20 | ("dwAvailPhys", c_ulong), 21 | ("dwTotalPageFile", c_ulong), 22 | ("dwAvailPageFile", c_ulong), 23 | ("dwTotalVirtual", c_ulong), 24 | ("dwAvailVirtual", c_ulong) 25 | ] 26 | memoryStatus = MEMORYSTATUS() 27 | memoryStatus.dwLength = ctypes.sizeof(MEMORYSTATUS) 28 | kernel32.GlobalMemoryStatus(ctypes.byref(memoryStatus)) 29 | memory_in_bytes = memoryStatus.dwTotalPhys / 1024 ** 2 30 | elif os.name == 'posix': 31 | answer = os.popen("free -b") 32 | if answer.readlines()[0].split()[2] != 'free': 33 | raise Exception("Can't find out how much memory is available!") 34 | memory_in_bytes = os.popen("free -b").readlines()[1].split()[3] 35 | else: 36 | raise Exception("Can't find out how much memory is available!") 37 | 38 | return int(memory_in_bytes) 39 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | from distutils.core import setup 4 | #from distutils.core import Command 5 | 6 | 7 | # class test(Command): 8 | #"""Run only the tests concerning features of OTTemplate. 9 | #""" 10 | #description = "Automatically run the core test suite for OTTemplate." 11 | # user_options = [] # distutils complains if this is not here. 12 | # tests=['ottemplate/test/MyClass_test.py'] 13 | 14 | # def initialize_options(self): # distutils wants this 15 | # pass 16 | 17 | # def finalize_options(self): # this too 18 | # pass 19 | 20 | # def run(self): 21 | # for test in self.tests: 22 | #os.system(sys.executable +' '+ test) 23 | 24 | setup(name='randomfields', 25 | version='randomfields.__version__', 26 | packages=['randomfields'], 27 | url="https://github.com/dubourg/python-randomfields", 28 | description=("A Python module that implements tools for the simulation and identification of random fields using \ 29 | the Karhunen-Loeve expansion representation.") 30 | ) 31 | --------------------------------------------------------------------------------