├── .coveragerc ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── pull_request_template.md ├── .gitignore ├── .pylintrc ├── .readthedocs.yaml ├── CITATION.cff ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── GitVersion.yml ├── LICENSE ├── README.rst ├── azure-pipelines.yml ├── binder └── requirements.txt ├── docs ├── Makefile ├── code │ ├── RunModel │ │ ├── Abaqus_Example │ │ │ ├── abaqus_fire_analysis.py │ │ │ ├── abaqus_input.py │ │ │ ├── abaqus_output_script.py │ │ │ ├── extract_abaqus_output.py │ │ │ └── run_sfe_example.sh │ │ ├── ClusterScript_Example │ │ │ ├── add_numbers.py │ │ │ ├── addition_run.py │ │ │ ├── inputRealization.json │ │ │ ├── process_addition_output.py │ │ │ └── run_script.sh │ │ ├── LS-Dyna_Examples │ │ │ ├── multi_job │ │ │ │ ├── dyna_input.k │ │ │ │ ├── dyna_script.py │ │ │ │ └── run_LSDyna.sh │ │ │ └── single_job │ │ │ │ ├── dyna_input.k │ │ │ │ ├── dyna_script.py │ │ │ │ └── run_LSDyna.sh │ │ ├── Matlab_Example │ │ │ ├── matlab_model.py │ │ │ ├── matlab_model_det.py │ │ │ ├── matlab_model_det_index.py │ │ │ ├── matlab_model_det_partial.py │ │ │ ├── matlab_model_sum_scalar.py │ │ │ ├── matlab_model_sum_vector.py │ │ │ ├── matlab_model_sum_vector_indexed.py │ │ │ ├── process_matlab_output.py │ │ │ ├── prod_determinant.m │ │ │ ├── prod_determinant_index.m │ │ │ ├── prod_determinant_partial.m │ │ │ ├── sum_scalar.m │ │ │ ├── sum_vector.m │ │ │ └── sum_vector_indexed.m │ │ ├── OpenSees_Example │ │ │ ├── RCsection.tcl │ │ │ ├── columnsdimensions.tcl │ │ │ ├── import_variables.tcl │ │ │ ├── opensees_model.py │ │ │ ├── process_opensees_output.py │ │ │ ├── run_OpenSees.sh │ │ │ └── test.tcl │ │ ├── Python_Example │ │ │ ├── Python_Model_with_Heterogeneous_Data.ipynb │ │ │ └── python_model.py │ │ ├── README.rst │ │ ├── abaqus_example.py │ │ ├── cluster_script_example.py │ │ ├── ls_dyna_example_multijob.py │ │ ├── ls_dyna_example_singlejob.py │ │ ├── matlab_example.py │ │ ├── opensees_example.py │ │ └── python_example.py │ ├── dimension_reduction │ │ ├── diffusion_maps │ │ │ ├── README.rst │ │ │ ├── diffusion_maps_circle.py │ │ │ ├── dmaps_swiss_role.py │ │ │ └── grassmann_dmaps.py │ │ ├── grassmann │ │ │ ├── README.rst │ │ │ ├── plot_grassmann_distances.py │ │ │ ├── plot_grassmann_karcher.py │ │ │ ├── plot_grassmann_kernel.py │ │ │ └── plot_grassmann_log_exp.py │ │ └── pod │ │ │ ├── DiffusionEquation.py │ │ │ ├── README.rst │ │ │ ├── pod_1.py │ │ │ └── pod_diffusion.py │ ├── distributions │ │ ├── continuous_1d │ │ │ ├── README.rst │ │ │ ├── plot_distribution_continuous_1D.py │ │ │ └── plot_distribution_normal_fitting.py │ │ ├── discrete_1d │ │ │ ├── README.rst │ │ │ └── plot_distribution_discrete_1D.py │ │ ├── multivariate │ │ │ ├── README.rst │ │ │ ├── plot_joint_independent.py │ │ │ ├── plot_multivariate_copulas.py │ │ │ └── plot_multivariate_normal.py │ │ └── user_defined │ │ │ ├── README.rst │ │ │ └── plot_user_defined.py │ ├── inference │ │ ├── bayes_model_selection │ │ │ ├── README.rst │ │ │ ├── bayes_model_selection.py │ │ │ └── local_pfn_models.py │ │ ├── bayes_parameter_estimation │ │ │ ├── README.rst │ │ │ ├── bayes_parameter_IS_regression.py │ │ │ ├── bayes_parameter_MCMC_regression.py │ │ │ ├── local_pfn_models.py │ │ │ ├── plot_bayes_parameter_IS_probability.py │ │ │ └── plot_bayes_parameter_MCMC_probability.py │ │ ├── info_model_selection │ │ │ ├── README.rst │ │ │ ├── pfn_models.py │ │ │ ├── plot_selection_distributions.py │ │ │ └── selection_regression_model.py │ │ └── mle │ │ │ ├── README.rst │ │ │ ├── local_pfn_models.py │ │ │ ├── plot_complex_probability_model.py │ │ │ ├── plot_learn_distribution_model.py │ │ │ └── regression_model.py │ ├── reliability │ │ ├── form │ │ │ ├── FORM_linear_function_2d.py │ │ │ ├── FORM_linear_function_3d.py │ │ │ ├── FORM_structural_reliability.py │ │ │ ├── README.rst │ │ │ └── local_pfn.py │ │ ├── inverse_form │ │ │ ├── README.rst │ │ │ ├── inverse_form_cantilever.py │ │ │ └── local_pfn.py │ │ ├── sorm │ │ │ ├── README.rst │ │ │ ├── SORM_nonlinear_function.py │ │ │ ├── local_model4.py │ │ │ └── local_pfn.py │ │ └── subset_simulation │ │ │ ├── README.rst │ │ │ ├── local_Resonance_pfn.py │ │ │ ├── local_Rosenbrock.py │ │ │ ├── local_Rosenbrock_pfn.py │ │ │ ├── plot_subset_rosenbrock.py │ │ │ └── subset_resonance.py │ ├── sampling │ │ ├── adaptive_kriging │ │ │ ├── README.rst │ │ │ ├── adaptive_kriging_branin_hoo.py │ │ │ ├── adaptive_kriging_normal.py │ │ │ ├── branin.png │ │ │ ├── local_BraninHoo.py │ │ │ └── local_series.py │ │ ├── importance_sampling │ │ │ ├── README.rst │ │ │ ├── plot_importance_sampling_diagnostics.py │ │ │ └── plot_importance_sampling_rosenbrock.py │ │ ├── latin_hypercube │ │ │ ├── README.rst │ │ │ ├── plot_latin_hypercube_simple.py │ │ │ └── plot_latin_hypercube_user_criterion.py │ │ ├── mcmc │ │ │ ├── README.rst │ │ │ ├── mcmc_algorithm_comparison.py │ │ │ ├── mcmc_diagnostics.py │ │ │ └── mcmc_metropolis_hastings.py │ │ ├── monte_carlo │ │ │ ├── README.rst │ │ │ └── monte_carlo.py │ │ ├── refined_stratified_sampling │ │ │ ├── README.rst │ │ │ ├── local_python_model_function.py │ │ │ ├── refined_stratified_rectangular_gradient.py │ │ │ ├── refined_stratified_rectangular_random.py │ │ │ ├── refined_stratified_voronoi_gradient.py │ │ │ └── refined_stratified_voronoi_random.py │ │ ├── simplex │ │ │ ├── README.rst │ │ │ └── plot_simplex.py │ │ ├── tempering │ │ │ ├── README.rst │ │ │ ├── local_reliability_funcs.py │ │ │ ├── parallel_tempering.py │ │ │ └── sequential_tempering.py │ │ ├── theta_criterion │ │ │ ├── README.rst │ │ │ └── pce_theta_criterion.py │ │ └── true_stratified_sampling │ │ │ ├── README.rst │ │ │ ├── plot_true_stratified_delaunay.py │ │ │ ├── plot_true_stratified_rectangular.py │ │ │ ├── strata.txt │ │ │ └── true_stratified_voronoi.py │ ├── scientific_machine_learning │ │ ├── README.rst │ │ ├── bayesian_quickstart │ │ │ ├── README.rst │ │ │ ├── bayesian_quickstart_testing.py │ │ │ └── bayesian_quickstart_training.py │ │ ├── bbb_trainer │ │ │ ├── README.rst │ │ │ ├── bbbtrainer_quadratic.py │ │ │ └── bbbtrainer_trig.py │ │ ├── deep_operator_network │ │ │ ├── README.rst │ │ │ ├── bayesian_linear_elastic.py │ │ │ ├── integral_1d.py │ │ │ ├── linear_elastic.py │ │ │ ├── linear_elastic_data.mat │ │ │ └── local_integral_data.py │ │ ├── fourier_neural_operator │ │ │ ├── README.rst │ │ │ ├── burgers.py │ │ │ ├── burgers_solutions_test.pt │ │ │ ├── burgers_solutions_train.pt │ │ │ ├── fno_state_dict.pt │ │ │ ├── initial_conditions_test.pt │ │ │ └── initial_conditions_train.pt │ │ ├── hmc_trainer │ │ │ ├── README.rst │ │ │ ├── burgers_fno.py │ │ │ ├── burgers_solutions_test.pt │ │ │ ├── burgers_solutions_train.pt │ │ │ ├── initial_conditions_test.pt │ │ │ └── initial_conditions_train.pt │ │ ├── mcd_trainer │ │ │ ├── NeuralNetwork_MCD.py │ │ │ └── README.rst │ │ ├── trainer │ │ │ ├── README.rst │ │ │ └── trainer_trig.py │ │ └── unet │ │ │ ├── README.rst │ │ │ ├── data │ │ │ ├── X_tr.npy │ │ │ ├── X_ts.npy │ │ │ ├── X_val.npy │ │ │ ├── Y_tr.npy │ │ │ ├── Y_ts.npy │ │ │ └── Y_val.npy │ │ │ ├── figures │ │ │ ├── test_set_predictions.png │ │ │ ├── test_set_predictions_with_mae.png │ │ │ ├── training_validation_loss.png │ │ │ └── unet_weights.pth │ │ │ ├── unet_example.py │ │ │ ├── unet_example_plot.ipynb │ │ │ └── unet_example_plot.py │ ├── sensitivity │ │ ├── chatterjee │ │ │ ├── README.rst │ │ │ ├── chatterjee_exponential.py │ │ │ ├── chatterjee_ishigami.py │ │ │ ├── chatterjee_sobol_func.py │ │ │ ├── local_exponential.py │ │ │ ├── local_ishigami.py │ │ │ └── local_sobol_func.py │ │ ├── comparison │ │ │ ├── README.rst │ │ │ ├── additive.py │ │ │ ├── ishigami.py │ │ │ ├── local_additive.py │ │ │ └── local_ishigami.py │ │ ├── cramer_von_mises │ │ │ ├── README.rst │ │ │ ├── cvm_exponential.py │ │ │ ├── cvm_sobol_func.py │ │ │ ├── local_exponential.py │ │ │ └── local_sobol_func.py │ │ ├── generalised_sobol │ │ │ ├── README.rst │ │ │ ├── generalised_sobol_mechanical_oscillator_ODE.py │ │ │ ├── generalised_sobol_multioutput.py │ │ │ ├── local_mechanical_oscillator_ODE.py │ │ │ └── local_multioutput.py │ │ ├── morris │ │ │ ├── README.rst │ │ │ ├── local_pfn.py │ │ │ ├── plot_12_dimensional_gfunction.py │ │ │ ├── plot_morris_2d_gfunction.py │ │ │ └── plot_morris_nonlinearities.py │ │ └── sobol │ │ │ ├── README.rst │ │ │ ├── local_additive.py │ │ │ ├── local_ishigami.py │ │ │ ├── local_mechanical_oscillator_ODE.py │ │ │ ├── local_sobol_func.py │ │ │ ├── mechanical_oscillator_ODE.py │ │ │ ├── sobol_additive.py │ │ │ ├── sobol_func.py │ │ │ └── sobol_ishigami.py │ ├── stochastic_processes │ │ ├── bispectral │ │ │ ├── README.rst │ │ │ ├── bispectral_1d.py │ │ │ └── bispectral_nd.py │ │ ├── karhunen_loeve_1d │ │ │ ├── README.rst │ │ │ └── plot_karhunen_loeve_1d.py │ │ ├── karhunen_loeve_2d │ │ │ ├── README.rst │ │ │ └── plot_karhunen_loeve_2d.py │ │ ├── spectral │ │ │ ├── README.rst │ │ │ ├── spectral_1d_1v.py │ │ │ ├── spectral_1d_mv.py │ │ │ ├── spectral_nd_1d.py │ │ │ └── spectral_nd_mv.py │ │ └── translation │ │ │ ├── README.rst │ │ │ └── translation.py │ ├── surrogates │ │ ├── gpr │ │ │ ├── README.rst │ │ │ ├── gpr_constraints.py │ │ │ ├── gpr_custom2D.py │ │ │ ├── local_python_model_1Dfunction.py │ │ │ ├── local_python_model_function.py │ │ │ ├── plot_gpr_no_noise.py │ │ │ ├── plot_gpr_noisy.py │ │ │ └── plot_gpr_sine.py │ │ ├── pce │ │ │ ├── Example_Camel_function.png │ │ │ ├── Example_RobotArm_function.png │ │ │ ├── Example_Sphere_function.png │ │ │ ├── README.rst │ │ │ ├── pce_euler_UQ.py │ │ │ ├── pce_ishigami.py │ │ │ ├── pce_robot_arm.py │ │ │ ├── pce_sparsity_lars.py │ │ │ ├── plot_pce_camel.py │ │ │ ├── plot_pce_exponential.py │ │ │ ├── plot_pce_friedman.py │ │ │ ├── plot_pce_helmholtz.py │ │ │ ├── plot_pce_oakley.py │ │ │ ├── plot_pce_sinusoidal.py │ │ │ ├── plot_pce_sphere.py │ │ │ └── plot_pce_wave.py │ │ └── srom │ │ │ ├── README.rst │ │ │ ├── local_eigenvalue_model.py │ │ │ ├── plot_srom_eigenvalues.py │ │ │ ├── plot_srom_gamma.py │ │ │ └── plot_srom_gamma_2.py │ └── transformations │ │ └── nataf │ │ ├── README.rst │ │ └── nataf.py ├── doc.sh ├── make.bat ├── requirements.txt └── source │ ├── _static │ ├── Inference_models.png │ ├── Inference_schematic.png │ ├── JHU_logo.jpg │ ├── Reliability_FORM.png │ ├── Reliability_example_form.png │ ├── Runmodel_directory_1.png │ ├── Runmodel_directory_2.png │ ├── Runmodel_directory_3.png │ ├── Runmodel_workflow.png │ ├── SampleMethods_IS_samples.png │ ├── SampleMethods_MCMC_samples.png │ ├── SampleMethods_Simplex.png │ ├── Transformations_correlate.png │ ├── Transformations_uncorrelate.png │ ├── architecture │ │ ├── adaptive_kriging_functions.png │ │ ├── dimension_reduction.png │ │ ├── distributions.png │ │ ├── inference.png │ │ ├── mcmc.png │ │ ├── reliability.png │ │ ├── run_model.png │ │ ├── sensitivity.png │ │ ├── stochastic_process.png │ │ ├── stratified_sampling.png │ │ ├── surrogates.png │ │ └── transformations.png │ ├── logo.jpg │ ├── logo.png │ ├── logo2.jpg │ └── morris_indices.png │ ├── architecture.rst │ ├── bibliography.bib │ ├── bibliography.rst │ ├── binder │ └── requirements.txt │ ├── conf.py │ ├── dimension_reduction │ ├── direct_pod.rst │ ├── dmaps.rst │ ├── grassmann │ │ ├── grassmann_interpolation.rst │ │ ├── grassmann_operations.rst │ │ ├── index.rst │ │ └── manifold_projections.rst │ ├── hosvd.rst │ ├── index.rst │ ├── pod.rst │ └── snapshot_pod.rst │ ├── distributions │ ├── continuous_1d_collection.rst │ ├── copulas.rst │ ├── discrete_1d_collection.rst │ ├── distribution_parent.rst │ ├── distributions_continuous_1d.rst │ ├── distributions_discrete_1d.rst │ ├── distributions_multivariate.rst │ ├── index.rst │ ├── joint_from_independent.rst │ ├── joint_from_marginals_copula.rst │ ├── multivariate_distributions.rst │ └── user_defined_distributions.rst │ ├── index.rst │ ├── inference │ ├── bayes_model_selection.rst │ ├── bayes_parameter_estimation.rst │ ├── index.rst │ ├── inference_models.rst │ ├── info_model_selection.rst │ └── mle.rst │ ├── news_doc.rst │ ├── paper.rst │ ├── reliability │ ├── form.rst │ ├── index.rst │ ├── inverse_form.rst │ ├── sorm.rst │ ├── subset.rst │ └── taylor_series.rst │ ├── runmodel_doc.rst │ ├── sampling │ ├── akmcs.rst │ ├── importance_sampling.rst │ ├── index.rst │ ├── latin_hypercube.rst │ ├── latin_hypercube │ │ ├── lhs_class.rst │ │ ├── lhs_criteria.rst │ │ └── lhs_user_criterion.rst │ ├── mcmc │ │ ├── dram.rst │ │ ├── dream.rst │ │ ├── index.rst │ │ ├── mh.rst │ │ ├── mmh.rst │ │ ├── stretch.rst │ │ └── tempering.rst │ ├── monte_carlo.rst │ ├── refined_stratified_sampling.rst │ ├── simplex.rst │ ├── strata │ │ ├── adding_new_strata.rst │ │ ├── delaunay_strata.rst │ │ ├── rectangular_strata.rst │ │ ├── strata_class.rst │ │ └── voronoi_strata.rst │ ├── stratified_sampling.rst │ └── theta_criterion.rst │ ├── scientific_machine_learning │ ├── figures │ │ └── uq4ml.png │ ├── functional │ │ ├── index.rst │ │ ├── losses.rst │ │ └── spectral_conv.rst │ ├── index.rst │ ├── layers │ │ ├── bayesian_baseclass.rst │ │ ├── bayesian_layers.rst │ │ ├── dropout_baseclass.rst │ │ ├── dropout_layers.rst │ │ ├── fourier_layers.rst │ │ ├── index.rst │ │ └── normalizers.rst │ ├── losses.rst │ ├── neural_networks │ │ ├── deep_operator_network.rst │ │ ├── feed_forward_example.txt │ │ ├── feed_forward_neural_network.rst │ │ ├── figures │ │ │ ├── Unet_schematic.pdf │ │ │ ├── approximating_functions.png │ │ │ ├── approximating_operators.png │ │ │ ├── deep_operator_network_diagram.png │ │ │ ├── deep_operator_network_shapes.png │ │ │ └── fourier_network_diagram.pdf │ │ ├── fourier_neural_operator.rst │ │ ├── index.rst │ │ ├── neural_network_parent.rst │ │ └── unet_neural_network.rst │ └── trainers │ │ ├── bbb_trainer.rst │ │ ├── hmc_trainer.rst │ │ ├── index.rst │ │ └── trainer.rst │ ├── sensitivity │ ├── chatterjee.rst │ ├── cramer_von_mises.rst │ ├── generalised_sobol.rst │ ├── index.rst │ ├── morris.rst │ ├── pce.rst │ └── sobol.rst │ ├── sg_execution_times.rst │ ├── stochastic_process │ ├── bispectral_representation.rst │ ├── index.rst │ ├── karhunen_loeve_1d.rst │ ├── karhunen_loeve_2d.rst │ ├── spectral_representation.rst │ └── translation.rst │ ├── surrogates │ ├── gpr.rst │ ├── index.rst │ ├── pce │ │ ├── pce.rst │ │ ├── physics_informed.rst │ │ ├── polynomial_bases.rst │ │ ├── polynomials.rst │ │ └── regressions.rst │ ├── polynomial_chaos.rst │ └── srom.rst │ ├── transformations │ ├── correlate.rst │ ├── decorrelate.rst │ ├── index.rst │ └── nataf.rst │ └── utilities │ ├── distances │ ├── euclidean_distances.rst │ ├── grassmann_distances.rst │ └── index.rst │ ├── grassmann_point.rst │ ├── index.rst │ └── kernels │ ├── euclidean_kernels.rst │ ├── grassmann_kernels.rst │ ├── index.rst │ └── sum_product_kernels.rst ├── logo.jpg ├── meta.yaml ├── pytest.ini ├── requirements.txt ├── setup.py ├── src └── UQpy │ ├── __init__.py │ ├── dimension_reduction │ ├── __init__.py │ ├── diffusion_maps │ │ ├── DiffusionMaps.py │ │ └── __init__.py │ ├── grassmann_manifold │ │ ├── GrassmannInterpolation.py │ │ ├── GrassmannOperations.py │ │ ├── __init__.py │ │ └── projections │ │ │ ├── SVDProjection.py │ │ │ ├── __init__.py │ │ │ └── baseclass │ │ │ ├── GrassmannProjection.py │ │ │ └── __init__.py │ ├── hosvd │ │ ├── HigherOrderSVD.py │ │ └── __init__.py │ └── pod │ │ ├── DirectPOD.py │ │ ├── SnapshotPOD.py │ │ ├── __init__.py │ │ └── baseclass │ │ ├── POD.py │ │ └── __init__.py │ ├── distributions │ ├── __init__.py │ ├── baseclass │ │ ├── Copula.py │ │ ├── Distribution.py │ │ ├── Distribution1D.py │ │ ├── DistributionContinuous1D.py │ │ ├── DistributionDiscrete1D.py │ │ ├── DistributionND.py │ │ └── __init__.py │ ├── collection │ │ ├── Beta.py │ │ ├── Binomial.py │ │ ├── Cauchy.py │ │ ├── ChiSquare.py │ │ ├── Exponential.py │ │ ├── Gamma.py │ │ ├── GeneralizedExtreme.py │ │ ├── InverseGaussian.py │ │ ├── JointCopula.py │ │ ├── JointIndependent.py │ │ ├── Laplace.py │ │ ├── Levy.py │ │ ├── Logistic.py │ │ ├── Lognormal.py │ │ ├── Maxwell.py │ │ ├── Multinomial.py │ │ ├── MultivariateNormal.py │ │ ├── Normal.py │ │ ├── Pareto.py │ │ ├── Poisson.py │ │ ├── Rayleigh.py │ │ ├── TruncatedNormal.py │ │ ├── Uniform.py │ │ └── __init__.py │ └── copulas │ │ ├── Clayton.py │ │ ├── Frank.py │ │ ├── Gumbel.py │ │ └── __init__.py │ ├── inference │ ├── BayesModelSelection.py │ ├── BayesParameterEstimation.py │ ├── InformationModelSelection.py │ ├── MLE.py │ ├── __init__.py │ ├── evidence_methods │ │ ├── HarmonicMean.py │ │ ├── __init__.py │ │ └── baseclass │ │ │ ├── EvidenceMethod.py │ │ │ └── __init__.py │ ├── inference_models │ │ ├── ComputationalModel.py │ │ ├── DistributionModel.py │ │ ├── LogLikelihoodModel.py │ │ ├── __init__.py │ │ └── baseclass │ │ │ ├── InferenceModel.py │ │ │ └── __init__.py │ └── information_criteria │ │ ├── AIC.py │ │ ├── AICc.py │ │ ├── BIC.py │ │ ├── __init__.py │ │ └── baseclass │ │ ├── InformationCriterion.py │ │ └── __init__.py │ ├── reliability │ ├── SubsetSimulation.py │ ├── __init__.py │ └── taylor_series │ │ ├── FORM.py │ │ ├── InverseFORM.py │ │ ├── SORM.py │ │ ├── __init__.py │ │ └── baseclass │ │ ├── TaylorSeries.py │ │ └── __init__.py │ ├── run_model │ ├── RunModel.py │ ├── __init__.py │ └── model_execution │ │ ├── ClusterExecution.py │ │ ├── ParallelExecution.py │ │ ├── PythonModel.py │ │ ├── SerialExecution.py │ │ ├── ThirdPartyModel.py │ │ └── __init__.py │ ├── sampling │ ├── AdaptiveKriging.py │ ├── ImportanceSampling.py │ ├── MonteCarloSampling.py │ ├── SimplexSampling.py │ ├── ThetaCriterionPCE.py │ ├── __init__.py │ ├── adaptive_kriging_functions │ │ ├── ExpectedFeasibility.py │ │ ├── ExpectedImprovement.py │ │ ├── ExpectedImprovementGlobalFit.py │ │ ├── UFunction.py │ │ ├── WeightedUFunction.py │ │ ├── __init__.py │ │ └── baseclass │ │ │ ├── LearningFunction.py │ │ │ └── __init__.py │ ├── mcmc │ │ ├── DRAM.py │ │ ├── DREAM.py │ │ ├── MetropolisHastings.py │ │ ├── ModifiedMetropolisHastings.py │ │ ├── Stretch.py │ │ ├── __init__.py │ │ ├── baseclass │ │ │ ├── MCMC.py │ │ │ └── __init__.py │ │ └── tempering_mcmc │ │ │ ├── ParallelTemperingMCMC.py │ │ │ ├── SequentialTemperingMCMC.py │ │ │ ├── __init__.py │ │ │ └── baseclass │ │ │ ├── TemperingMCMC.py │ │ │ └── __init__.py │ └── stratified_sampling │ │ ├── LatinHypercubeSampling.py │ │ ├── RefinedStratifiedSampling.py │ │ ├── TrueStratifiedSampling.py │ │ ├── __init__.py │ │ ├── baseclass │ │ ├── StratifiedSampling.py │ │ └── __init__.py │ │ ├── latin_hypercube_criteria │ │ ├── Centered.py │ │ ├── MaxiMin.py │ │ ├── MinCorrelation.py │ │ ├── Random.py │ │ ├── __init__.py │ │ └── baseclass │ │ │ ├── Criterion.py │ │ │ └── __init__.py │ │ ├── refinement │ │ ├── GradientEnhancedRefinement.py │ │ ├── RandomRefinement.py │ │ ├── __init__.py │ │ └── baseclass │ │ │ ├── Refinement.py │ │ │ └── __init__.py │ │ └── strata │ │ ├── DelaunayStrata.py │ │ ├── RectangularStrata.py │ │ ├── SamplingCriterion.py │ │ ├── VoronoiStrata.py │ │ ├── __init__.py │ │ └── baseclass │ │ ├── Strata.py │ │ └── __init__.py │ ├── scientific_machine_learning │ ├── __init__.py │ ├── baseclass │ │ ├── Layer.py │ │ ├── Loss.py │ │ ├── NeuralNetwork.py │ │ ├── NormalBayesianLayer.py │ │ ├── ProbabilisticDropoutLayer.py │ │ └── __init__.py │ ├── functional │ │ ├── __init__.py │ │ ├── gaussian_kullback_leibler_divergence.py │ │ ├── generalized_jensen_shannon_divergence.py │ │ ├── geometric_jensen_shannon_divergence.py │ │ ├── mc_kullback_leibler_divergence.py │ │ ├── spectral_conv1d.py │ │ ├── spectral_conv2d.py │ │ └── spectral_conv3d.py │ ├── layers │ │ ├── BayesianConv1d.py │ │ ├── BayesianConv2d.py │ │ ├── BayesianConv3d.py │ │ ├── BayesianFourier1d.py │ │ ├── BayesianFourier2d.py │ │ ├── BayesianFourier3d.py │ │ ├── BayesianLinear.py │ │ ├── Fourier1d.py │ │ ├── Fourier2d.py │ │ ├── Fourier3d.py │ │ ├── GaussianNormalizer.py │ │ ├── Permutation.py │ │ ├── ProbabilisticDropout.py │ │ ├── ProbabilisticDropout1d.py │ │ ├── ProbabilisticDropout2d.py │ │ ├── ProbabilisticDropout3d.py │ │ ├── RangeNormalizer.py │ │ └── __init__.py │ ├── losses │ │ ├── GaussianKullbackLeiblerDivergence.py │ │ ├── GeneralizedJensenShannonDivergence.py │ │ ├── GeometricJensenShannonDivergence.py │ │ ├── LpLoss.py │ │ ├── MCKullbackLeiblerDivergence.py │ │ └── __init__.py │ ├── neural_networks │ │ ├── DeepOperatorNetwork.py │ │ ├── FeedForwardNeuralNetwork.py │ │ ├── Unet.py │ │ └── __init__.py │ └── trainers │ │ ├── BBBTrainer.py │ │ ├── Trainer.py │ │ └── __init__.py │ ├── sensitivity │ ├── ChatterjeeSensitivity.py │ ├── CramerVonMisesSensitivity.py │ ├── GeneralisedSobolSensitivity.py │ ├── MorrisSensitivity.py │ ├── PceSensitivity.py │ ├── PostProcess.py │ ├── SobolSensitivity.py │ ├── __init__.py │ └── baseclass │ │ ├── PickFreeze.py │ │ ├── Sensitivity.py │ │ └── __init__.py │ ├── stochastic_process │ ├── BispectralRepresentation.py │ ├── InverseTranslation.py │ ├── KarhunenLoeveExpansion.py │ ├── KarhunenLoeveExpansion2D.py │ ├── SpectralRepresentation.py │ ├── Translation.py │ ├── __init__.py │ └── supportive │ │ ├── __init__.py │ │ ├── inverse_wiener_khinchin_transform.py │ │ ├── scaling_correlation_function.py │ │ └── wiener_khinchin_transform.py │ ├── surrogates │ ├── __init__.py │ ├── baseclass │ │ ├── Surrogate.py │ │ └── __init__.py │ ├── gaussian_process │ │ ├── GaussianProcessRegression.py │ │ ├── __init__.py │ │ ├── constraints │ │ │ ├── NonNegative.py │ │ │ ├── __init__.py │ │ │ └── baseclass │ │ │ │ ├── Constraints.py │ │ │ │ └── __init__.py │ │ └── regression_models │ │ │ ├── ConstantRegression.py │ │ │ ├── LinearRegression.py │ │ │ ├── QuadraticRegression.py │ │ │ ├── __init__.py │ │ │ └── baseclass │ │ │ ├── Regression.py │ │ │ └── __init__.py │ ├── polynomial_chaos │ │ ├── PolynomialChaosExpansion.py │ │ ├── __init__.py │ │ ├── physics_informed │ │ │ ├── ConstrainedPCE.py │ │ │ ├── PdeData.py │ │ │ ├── PdePCE.py │ │ │ ├── ReducedPCE.py │ │ │ ├── Utilities.py │ │ │ └── __init__.py │ │ ├── polynomials │ │ │ ├── Hermite.py │ │ │ ├── HyperbolicBasis.py │ │ │ ├── Legendre.py │ │ │ ├── PolynomialsND.py │ │ │ ├── TensorProductBasis.py │ │ │ ├── TotalDegreeBasis.py │ │ │ ├── __init__.py │ │ │ └── baseclass │ │ │ │ ├── PolynomialBasis.py │ │ │ │ ├── Polynomials.py │ │ │ │ └── __init__.py │ │ └── regressions │ │ │ ├── LassoRegression.py │ │ │ ├── LeastAngleRegression.py │ │ │ ├── LeastSquareRegression.py │ │ │ ├── RidgeRegression.py │ │ │ ├── __init__.py │ │ │ └── baseclass │ │ │ ├── Regression.py │ │ │ └── __init__.py │ └── stochastic_reduced_order_models │ │ ├── SROM.py │ │ └── __init__.py │ ├── transformations │ ├── Correlate.py │ ├── Decorrelate.py │ ├── Nataf.py │ └── __init__.py │ └── utilities │ ├── Constants.py │ ├── DistanceMetric.py │ ├── FminCobyla.py │ ├── GrassmannPoint.py │ ├── MinimizeOptimizer.py │ ├── NoPublicConstructor.py │ ├── UQpyLoggingFormatter.py │ ├── Utilities.py │ ├── ValidationTypes.py │ ├── __init__.py │ ├── distances │ ├── __init__.py │ ├── baseclass │ │ ├── Distance.py │ │ ├── EuclideanDistance.py │ │ ├── GrassmannianDistance.py │ │ └── __init__.py │ ├── euclidean_distances │ │ ├── BrayCurtisDistance.py │ │ ├── CanberraDistance.py │ │ ├── ChebyshevDistance.py │ │ ├── CityBlockDistance.py │ │ ├── CorrelationDistance.py │ │ ├── CosineDistance.py │ │ ├── L2Distance.py │ │ ├── MinkowskiDistance.py │ │ └── __init__.py │ └── grassmannian_distances │ │ ├── AsimovDistance.py │ │ ├── BinetCauchyDistance.py │ │ ├── FubiniStudyDistance.py │ │ ├── GeodesicDistance.py │ │ ├── MartinDistance.py │ │ ├── ProcrustesDistance.py │ │ ├── ProjectionDistance.py │ │ ├── SpectralDistance.py │ │ └── __init__.py │ └── kernels │ ├── GaussianKernel.py │ ├── __init__.py │ ├── baseclass │ ├── EuclideanKernel.py │ ├── GrassmannianKernel.py │ ├── Kernel.py │ └── __init__.py │ ├── euclidean_kernels │ ├── Matern.py │ ├── RBF.py │ └── __init__.py │ └── grassmannian_kernels │ ├── BinetCauchyKernel.py │ ├── ProjectionKernel.py │ └── __init__.py └── tests ├── integration_tests └── scientific_machine_learning │ ├── probabilistic_dropout │ ├── test_dropout1d_integration.py │ ├── test_dropout2d_integration.py │ ├── test_dropout3d_integration.py │ └── test_dropout_integration.py │ ├── test_feed_forward_methods.py │ └── trainers │ ├── test_bbbtrainer_integration.py │ └── test_trainer_integration.py └── unit_tests ├── dimension_reduction ├── test_POD.py ├── test_distances.py ├── test_dmaps.py ├── test_grassman.py ├── test_karcher.py ├── test_kernel.py └── test_log_exp_maps.py ├── distributions ├── test__independent_distributions.py └── test_distribution_methods.py ├── eigenvalue_model.py ├── inference ├── data_ex1a.txt ├── pfn_cubic.py ├── pfn_linear.py ├── pfn_models.py ├── pfn_quadratic.py ├── test_bayes_model_selection.py ├── test_bayes_parameter_estimation.py ├── test_inference_distribution.py ├── test_inference_runmodel.py ├── test_info_model_selection.py └── test_mle.py ├── pfn.py ├── pfn_models.py ├── python_model_1Dfunction.py ├── python_model_function.py ├── reliability ├── Resonance_pfn.py ├── Rosenbrock.py ├── Rosenbrock_pfn.py ├── example_7_2.py ├── pfn.py ├── pfn1.py ├── pfn2.py ├── pfn3.py ├── pfn4.py ├── pfn5.py ├── test_form.py ├── test_inverse_form.py ├── test_sorm.py ├── test_subset.py └── test_taylor_series.py ├── run_model ├── process_third_party_output.py ├── process_third_party_output_blank.py ├── process_third_party_output_class.py ├── python_model.py ├── python_model_blank.py ├── python_model_class.py ├── python_model_function.py ├── python_model_sum_scalar.py ├── python_model_sum_scalar_default.py ├── sum_scalar.py ├── sum_scalar_default.py └── test_RunModel.py ├── sampling ├── BraninHoo.py ├── MCMC │ └── test_mcmc_algorithms.py ├── python_model_1Dfunction.py ├── python_model_function.py ├── series.py ├── test_adaptive_kriging.py ├── test_importance_sampling.py ├── test_latin_hypercube.py ├── test_monte_carlo.py ├── test_refined_stratified.py ├── test_tempering.py └── test_true_stratified.py ├── scientific_machine_learning ├── functional │ ├── test_functional_gaussian_kl_divergence.py │ ├── test_functional_generalized_js_divergence.py │ ├── test_functional_geometric_js_divergence.py │ ├── test_functional_mc_kl_divergence.py │ ├── test_spectral_conv1d.py │ ├── test_spectral_conv2d.py │ └── test_spectral_conv3d.py ├── layers │ ├── test_bayesian_conv1d.py │ ├── test_bayesian_conv2d.py │ ├── test_bayesian_conv3d.py │ ├── test_bayesian_fourier1d.py │ ├── test_bayesian_fourier2d.py │ ├── test_bayesian_fourier3d.py │ ├── test_bayesian_linear.py │ ├── test_dropout.py │ ├── test_dropout1d.py │ ├── test_dropout2d.py │ ├── test_dropout3d.py │ ├── test_fourier1d.py │ ├── test_fourier2d.py │ ├── test_fourier3d.py │ ├── test_gaussian_normalizer.py │ ├── test_permutation.py │ └── test_range_normalizer.py ├── losses │ ├── test_gaussian_kl_divergence.py │ ├── test_generalized_js_divergence.py │ ├── test_geometric_js_divergence.py │ ├── test_lp_loss.py │ └── test_mc_kl_divergence.py └── neural_networks │ ├── test_bayesian_neural_network.py │ ├── test_deep_operator_network.py │ ├── test_feed_forward_neural_network.py │ └── test_u_net.py ├── sensitivity ├── exponential.py ├── ishigami.py ├── multioutput.py ├── pfn.py ├── sobol_func.py ├── test_baseclass.py ├── test_chatterjee.py ├── test_cramer_von_mises.py ├── test_generalised_sobol.py ├── test_morris.py └── test_sobol.py ├── series.py ├── stochastic_process ├── test_bispectral_1d.py ├── test_inverse_translation.py ├── test_karhunen_loeve_1d.py ├── test_karhunen_loeve_2d.py ├── test_spectral_1d_1v.py ├── test_spectral_1d_mv.py ├── test_spectral_nd_1v.py ├── test_spectral_nd_mv.py └── test_translation.py ├── strata.txt ├── surrogates ├── python_model_1Dfunction.py ├── python_model_function.py ├── test_gpr.py └── test_pce.py └── transformations ├── test_correlate.py ├── test_decorrelate.py └── test_nataf.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = tests/* 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Additional context** 32 | Add any other context about the problem here. -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | sphinx: 3 | builder: html 4 | configuration: docs/source/conf.py 5 | fail_on_warning: False 6 | build: 7 | os: ubuntu-20.04 8 | tools: 9 | python: "3.9" 10 | python: 11 | install: 12 | - requirements: docs/requirements.txt 13 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | title: "UQpy" 4 | version: 3.0.0 5 | date-released: 2021-10-23 6 | url: "https://github.com/SURGroup/UQpy" 7 | preferred-citation: 8 | type: article 9 | authors: 10 | - family-names: "Olivier" 11 | given-names: "Audrey" 12 | - family-names: "Giovanis" 13 | given-names: "Dimitris" 14 | - family-names: "B.S." 15 | given-names: "Aakash" 16 | - family-names: "Chauhan" 17 | given-names: "Mohit" 18 | - family-names: "Vandanapu" 19 | given-names: "Lohit" 20 | - family-names: "Shields" 21 | given-names: "Michael" 22 | doi: "https://doi.org/10.1016/j.jocs.2020.101204" 23 | journal: "Journal of Computational Science" 24 | month: 9 25 | start: 101204 26 | title: "UQpy: A general purpose Python package and development environment for uncertainty quantification" 27 | volume: 47 28 | year: 2020 29 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | #RUN curl -fsSL https://get.docker.com -o get-docker.sh 2 | #RUN chmod +x get-docker.sh 3 | #RUN sh get-docker.sh 4 | 5 | # Build the image based on the official Python version 3.9 image 6 | FROM python:3.9 7 | 8 | # Use RUN to install Python packages (numpy and scipy) via pip, Python's package manager 9 | RUN pip3 install UQpy -------------------------------------------------------------------------------- /GitVersion.yml: -------------------------------------------------------------------------------- 1 | mode: Mainline 2 | major-version-bump-message: '\+semver:\s?(breaking|major)' 3 | minor-version-bump-message: '\+semver:\s?(feature|minor)' 4 | patch-version-bump-message: '\+semver:\s?(fix|patch)' 5 | commit-message-incrementing: Enabled 6 | branches: {} 7 | ignore: 8 | sha: [] 9 | merge-message-formats: {} 10 | 11 | -------------------------------------------------------------------------------- /binder/requirements.txt: -------------------------------------------------------------------------------- 1 | UQpy -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/code/RunModel/Abaqus_Example/abaqus_fire_analysis.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import os 3 | 4 | 5 | def run_fire_analysis(index): 6 | index = int(index) 7 | print('Example: Started analysis for sample %d' % index) 8 | abaqus_script_path = os.path.join(os.getcwd(), 'InputFiles', 'abaqus_input_' + str(index) + ".py") 9 | command = "abaqus cae nogui=" + abaqus_script_path 10 | try: 11 | o = os.system(command) 12 | if o == 0: 13 | print('Example: Ran successfully.') 14 | except Exception as err: 15 | print(err) 16 | 17 | 18 | if __name__ == '__main__': 19 | fire.Fire(run_fire_analysis) 20 | -------------------------------------------------------------------------------- /docs/code/RunModel/ClusterScript_Example/add_numbers.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import json 4 | import numpy as np 5 | 6 | 7 | def addNumbers(): 8 | inputPath = sys.argv[1] 9 | outputPath = sys.argv[2] 10 | 11 | # Open JSON file 12 | with open(inputPath, "r") as jsonFile: 13 | data = json.load(jsonFile) 14 | 15 | # Read generated numbers 16 | number1 = data["number1"] 17 | number2 = data["number2"] 18 | 19 | randomAddition = number1 + number2 20 | 21 | # Write addition to file 22 | with open(outputPath, 'w') as outputFile: 23 | outputFile.write('{}\n'.format(randomAddition)) 24 | 25 | 26 | if __name__ == '__main__': 27 | addNumbers() 28 | -------------------------------------------------------------------------------- /docs/code/RunModel/ClusterScript_Example/addition_run.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import fire 4 | 5 | 6 | def runAddition(index): 7 | index = int(index) 8 | 9 | inputRealizationPath = os.path.join(os.getcwd(), 'run_' + str(index), 'InputFiles', 'inputRealization_' \ 10 | + str(index) + ".json") 11 | outputPath = os.path.join(os.getcwd(), 'OutputFiles') 12 | 13 | # This is where pre-processing commands would be executed prior to running the cluster script. 14 | command1 = ("echo \"This is where pre-processing would be happening\"") 15 | 16 | os.system(command1) 17 | 18 | 19 | if __name__ == '__main__': 20 | fire.Fire(runAddition) 21 | -------------------------------------------------------------------------------- /docs/code/RunModel/ClusterScript_Example/inputRealization.json: -------------------------------------------------------------------------------- 1 | { 2 | "number1" : , 3 | "number2" : 4 | } 5 | -------------------------------------------------------------------------------- /docs/code/RunModel/ClusterScript_Example/process_addition_output.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pathlib import Path 3 | 4 | 5 | class OutputProcessor: 6 | 7 | def __init__(self, index): 8 | filePath = Path("./OutputFiles/qoiFile_" + str(index) + ".txt") 9 | self.numberOfColumns = 0 10 | self.numberOfLines = 0 11 | addedNumbers = [] 12 | 13 | # Check if file exists 14 | if filePath.is_file(): 15 | # Now, open and read data 16 | with open(filePath) as f: 17 | for line in f: 18 | currentLine = line.split() 19 | 20 | if len(currentLine) != 0: 21 | addedNumbers.append(currentLine[:]) 22 | 23 | if not addedNumbers: 24 | self.qoi = np.empty(shape=(0, 0)) 25 | else: 26 | self.qoi = np.vstack(addedNumbers) 27 | -------------------------------------------------------------------------------- /docs/code/RunModel/LS-Dyna_Examples/multi_job/dyna_script.py: -------------------------------------------------------------------------------- 1 | import os 2 | import fire 3 | import numpy as np 4 | import shutil 5 | 6 | 7 | def run_dyna_model(index): 8 | index = int(index) 9 | input_file_name = 'dyna_input_' + str(index) + '.k' 10 | input_file_path = os.path.join(os.getcwd(), 'InputFiles', input_file_name) 11 | print(input_file_path) 12 | 13 | command = 'ls-dyna i=' + input_file_path + ' memory=300000000' 14 | command1 = 'rm d3* adptmp *.inc *.tmp scr* disk* mes* kill* bg*' 15 | 16 | print(command) 17 | os.system(command) 18 | os.system(command1) 19 | 20 | 21 | if __name__ == '__main__': 22 | fire.Fire(run_dyna_model) 23 | -------------------------------------------------------------------------------- /docs/code/RunModel/LS-Dyna_Examples/multi_job/run_LSDyna.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #SBATCH 4 | #SBATCH --job-name=UQpy_LSDyna_Test_Parallel 5 | #SBATCH --time=00-2:00:00 6 | #SBATCH --nodes=3 7 | #SBATCH --ntasks-per-node=24 8 | #SBATCH --partition=parallel 9 | #SBATCH --mail-type=end 10 | #SBATCH --mail-user=michael.shields@jhu.edu 11 | 12 | module load ls-dyna/10.1.0 13 | module load python 14 | module load parallel 15 | 16 | #python run_LSDyna_python.py 17 | python dyna_model.py 18 | 19 | 20 | 21 | #echo ">>>Begin LS-Dyna test Shields..." 22 | #ls-dyna i=Shields_5.k memory=300000000 23 | #echo ">>>Finish LS-Dyna test!" 24 | -------------------------------------------------------------------------------- /docs/code/RunModel/LS-Dyna_Examples/single_job/dyna_script.py: -------------------------------------------------------------------------------- 1 | import os 2 | import fire 3 | import numpy as np 4 | import shutil 5 | 6 | 7 | def run_dyna_model(index): 8 | index = int(index) 9 | print(os.getcwd()) 10 | input_file_name = 'dyna_input_' + str(index) + '.k' 11 | input_file_path = os.path.join(os.getcwd(), 'InputFiles', input_file_name) 12 | print(input_file_path) 13 | 14 | # command = 'mkdir junk' 15 | command = 'ls-dyna i=' + input_file_path + ' memory=300000000' 16 | command1 = 'rm d3* adptmp *.inc *.tmp scr* disk* mes* kill* bg*' 17 | 18 | print(command) 19 | os.system(command) 20 | os.system(command1) 21 | 22 | 23 | if __name__ == '__main__': 24 | fire.Fire(run_dyna_model) 25 | -------------------------------------------------------------------------------- /docs/code/RunModel/LS-Dyna_Examples/single_job/run_LSDyna.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #SBATCH 4 | #SBATCH --job-name=UQpy_LSDyna_Test_Parallel 5 | #SBATCH --time=00-2:00:00 6 | #SBATCH --nodes=2 7 | #SBATCH --ntasks-per-node=24 8 | #SBATCH --partition=parallel 9 | #SBATCH --mail-type=end 10 | #SBATCH --mail-user=michael.shields@jhu.edu 11 | 12 | module load ls-dyna/10.1.0 13 | module load python 14 | module load parallel 15 | 16 | #python run_LSDyna_python.py 17 | python dyna_model.py 18 | 19 | 20 | 21 | #echo ">>>Begin LS-Dyna test Shields..." 22 | #ls-dyna i=Shields_5.k memory=300000000 23 | #echo ">>>Finish LS-Dyna test!" 24 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/matlab_model.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import os 3 | 4 | 5 | def matlab(index): 6 | command1 = "cp ./InputFiles/dummy_model_" + str(index) + ".m ." 7 | command2 = "/Applications/MATLAB_R2018a.app/bin/matlab " \ 8 | "-nosplash -nojvm -nodisplay -nodesktop -r 'run dummy_model_" + str(index) + ".m; exit'" 9 | command3 = "mv ./OutputFiles/oupt.out ./OutputFiles/oupt_" + str(index) + ".out" 10 | command4 = "rm dummy_model_" + str(index) + ".m" 11 | os.system(command1) 12 | os.system(command2) 13 | os.system(command3) 14 | os.system(command4) 15 | 16 | 17 | if __name__ == '__main__': 18 | fire.Fire(matlab) 19 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/matlab_model_det.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import os 3 | 4 | 5 | def matlab(index): 6 | command1 = "cp ./InputFiles/prod_determinant_" + str(index) + ".m ." 7 | # The user will need to modify command2 to point to the Matlab application on their system. 8 | command2 = "/Applications/MATLAB_R2018a.app/bin/matlab " \ 9 | "-nosplash -nojvm -nodisplay -nodesktop -r 'run prod_determinant_" + str(index) + ".m; exit'" 10 | command3 = "mv ./OutputFiles/oupt.out ./OutputFiles/oupt_" + str(index) + ".out" 11 | command4 = "rm prod_determinant_" + str(index) + ".m" 12 | os.system(command1) 13 | os.system(command2) 14 | os.system(command3) 15 | os.system(command4) 16 | 17 | 18 | if __name__ == '__main__': 19 | fire.Fire(matlab) 20 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/matlab_model_det_index.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import os 3 | 4 | 5 | def matlab(index): 6 | command1 = "cp ./InputFiles/prod_determinant_index_" + str(index) + ".m ." 7 | # The user will need to modify command2 to point to the Matlab application on their system. 8 | command2 = "/Applications/MATLAB_R2018a.app/bin/matlab " \ 9 | "-nosplash -nojvm -nodisplay -nodesktop -r 'run prod_determinant_index_" + str(index) + ".m; exit'" 10 | command3 = "mv ./OutputFiles/oupt.out ./OutputFiles/oupt_" + str(index) + ".out" 11 | command4 = "rm prod_determinant_index_" + str(index) + ".m" 12 | os.system(command1) 13 | os.system(command2) 14 | os.system(command3) 15 | os.system(command4) 16 | 17 | 18 | if __name__ == '__main__': 19 | fire.Fire(matlab) 20 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/matlab_model_det_partial.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import os 3 | 4 | 5 | def matlab(index): 6 | command1 = "cp ./InputFiles/prod_determinant_partial_" + str(index) + ".m ." 7 | # The user will need to modify command2 to point to the Matlab application on their system. 8 | command2 = "/Applications/MATLAB_R2018a.app/bin/matlab " \ 9 | "-nosplash -nojvm -nodisplay -nodesktop -r 'run prod_determinant_partial_" + str(index) + ".m; exit'" 10 | command3 = "mv ./OutputFiles/oupt.out ./OutputFiles/oupt_" + str(index) + ".out" 11 | command4 = "rm prod_determinant_partial_" + str(index) + ".m" 12 | os.system(command1) 13 | os.system(command2) 14 | os.system(command3) 15 | os.system(command4) 16 | 17 | 18 | if __name__ == '__main__': 19 | fire.Fire(matlab) 20 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/matlab_model_sum_scalar.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import os 3 | 4 | 5 | def matlab(index): 6 | command1 = "cp ./InputFiles/sum_scalar_" + str(index) + ".m ." 7 | # The user will need to modify command2 to point to the Matlab application on their system. 8 | command2 = "/Applications/MATLAB_R2018a.app/bin/matlab " \ 9 | "-nosplash -nojvm -nodisplay -nodesktop -r 'run sum_scalar_" + str(index) + ".m; exit'" 10 | command3 = "mv ./OutputFiles/oupt.out ./OutputFiles/oupt_" + str(index) + ".out" 11 | command4 = "rm sum_scalar_" + str(index) + ".m" 12 | os.system(command1) 13 | os.system(command2) 14 | os.system(command3) 15 | os.system(command4) 16 | 17 | 18 | if __name__ == '__main__': 19 | fire.Fire(matlab) 20 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/matlab_model_sum_vector.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import os 3 | 4 | 5 | def matlab(index): 6 | command1 = "cp ./InputFiles/sum_vector_" + str(index) + ".m ." 7 | # The user will need to modify command2 to point to the Matlab application on their system. 8 | command2 = "/Applications/MATLAB_R2018a.app/bin/matlab " \ 9 | "-nosplash -nojvm -nodisplay -nodesktop -r 'run sum_vector_" + str(index) + ".m; exit'" 10 | command3 = "mv ./OutputFiles/oupt.out ./OutputFiles/oupt_" + str(index) + ".out" 11 | command4 = "rm sum_vector_" + str(index) + ".m" 12 | os.system(command1) 13 | os.system(command2) 14 | os.system(command3) 15 | os.system(command4) 16 | 17 | 18 | if __name__ == '__main__': 19 | fire.Fire(matlab) 20 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/matlab_model_sum_vector_indexed.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import os 3 | 4 | 5 | def matlab(index): 6 | command1 = "cp ./InputFiles/sum_vector_indexed_" + str(index) + ".m ." 7 | # The user will need to modify command2 to point to the Matlab application on their system. 8 | command2 = "/Applications/MATLAB_R2018a.app/bin/matlab " \ 9 | "-nosplash -nojvm -nodisplay -nodesktop -r 'run sum_vector_indexed_" + str(index) + ".m; exit'" 10 | command3 = "mv ./OutputFiles/oupt.out ./OutputFiles/oupt_" + str(index) + ".out" 11 | command4 = "rm sum_vector_indexed_" + str(index) + ".m" 12 | os.system(command1) 13 | os.system(command2) 14 | os.system(command3) 15 | os.system(command4) 16 | 17 | 18 | if __name__ == '__main__': 19 | fire.Fire(matlab) 20 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/process_matlab_output.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def read_output(index): 5 | x = np.loadtxt("./OutputFiles/oupt_%d.out" % index) 6 | return x 7 | 8 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/prod_determinant.m: -------------------------------------------------------------------------------- 1 | x = ; 2 | y = []; 3 | y = reshape(y,3,3)'; 4 | output = x*det(y); 5 | if ~ exist('OutputFiles', 'dir') 6 | status = mkdir('OutputFiles'); 7 | end 8 | csvwrite(sprintf('OutputFiles/oupt.out'),output); 9 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/prod_determinant_index.m: -------------------------------------------------------------------------------- 1 | x = ; 2 | y = [, , ; , , ; , , ]; 3 | output = x*det(y); 4 | if ~ exist('OutputFiles', 'dir') 5 | status = mkdir('OutputFiles'); 6 | end 7 | csvwrite(sprintf('OutputFiles/oupt.out'),output); 8 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/prod_determinant_partial.m: -------------------------------------------------------------------------------- 1 | x = ; 2 | y = [; ; ]; 3 | output = x*det(y); 4 | if ~ exist('OutputFiles', 'dir') 5 | status = mkdir('OutputFiles'); 6 | end 7 | csvwrite(sprintf('OutputFiles/oupt.out'),output); 8 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/sum_scalar.m: -------------------------------------------------------------------------------- 1 | x = zeros(3,1); 2 | x(1) = ; 3 | x(2) = ; 4 | x(3) = ; 5 | output = sum(x); 6 | if ~ exist('OutputFiles', 'dir') 7 | status = mkdir('OutputFiles'); 8 | end 9 | csvwrite(sprintf('OutputFiles/oupt.out'),output); 10 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/sum_vector.m: -------------------------------------------------------------------------------- 1 | x = []; 2 | output = sum(x); 3 | if ~ exist('OutputFiles', 'dir') 4 | status = mkdir('OutputFiles'); 5 | end 6 | csvwrite(sprintf('OutputFiles/oupt.out'),output); 7 | -------------------------------------------------------------------------------- /docs/code/RunModel/Matlab_Example/sum_vector_indexed.m: -------------------------------------------------------------------------------- 1 | x = zeros(3,1); 2 | x(1) = ; 3 | x(2) = ; 4 | x(3) = ; 5 | output = sum(x); 6 | if ~ exist('OutputFiles', 'dir') 7 | status = mkdir('OutputFiles'); 8 | end 9 | csvwrite(sprintf('OutputFiles/oupt.out'),output); 10 | -------------------------------------------------------------------------------- /docs/code/RunModel/OpenSees_Example/columnsdimensions.tcl: -------------------------------------------------------------------------------- 1 | set h1 0.850000000000000 ; 2 | set b1 0.850000000000000 ; 3 | set reinf1 2.940530710749999E-002 ; 4 | set h2 0.750000000000000 ; 5 | set b2 0.800000000000000 ; 6 | set reinf2 2.261946720000000E-002 ; 7 | set h3 0.700000000000000 ; 8 | set b3 0.550000000000000 ; 9 | set reinf3 1.357168043000000E-002 ; 10 | set h4 0.700000000000000 ; 11 | set b4 0.550000000000000 ; 12 | set reinf4 1.357168043000000E-002 ; 13 | set h5 0.550000000000000 ; 14 | set b5 0.550000000000000 ; 15 | set reinf5 1.176212292750000E-002 ; 16 | set h6 0.600000000000000 ; 17 | set b6 0.550000000000000 ; 18 | set reinf6 1.266705000000000E-002 ; 19 | set hx 0.600000000000000 ; 20 | set bx 0.550000000000000 ; 21 | set reinf7 1.316955651000000E-002 ; 22 | set hy 0.600000000000000 ; 23 | set by 0.550000000000000 ; 24 | set reinf8 1.099557426000000E-002 ; 25 | -------------------------------------------------------------------------------- /docs/code/RunModel/OpenSees_Example/import_variables.tcl: -------------------------------------------------------------------------------- 1 | set fc1 ; 2 | set fy1 ; 3 | set Es1 ; 4 | set fc2 ; 5 | set fy2 ; 6 | set Es2 ; 7 | set fc3 ; 8 | set fy3 ; 9 | set Es3 ; 10 | set fc4 ; 11 | set fy4 ; 12 | set Es4 ; 13 | set fc5 ; 14 | set fy5 ; 15 | set Es5 ; 16 | set fc6 ; 17 | set fy6 ; 18 | set Es6 ; 19 | -------------------------------------------------------------------------------- /docs/code/RunModel/OpenSees_Example/opensees_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import fire 4 | 5 | 6 | def opensees_run(index): 7 | name_before = "import_variables.tcl" 8 | name_ = "import_variables_" + str(index) + ".tcl" 9 | 10 | command0 = "cp ./InputFiles/import_variables_" + str(index) + ".tcl ./import_variables.tcl" 11 | command1 = "module load opensees && OpenSees test.tcl" 12 | 13 | os.system(command0) 14 | os.system(command1) 15 | current_dir_problem = os.getcwd() 16 | path_data = os.path.join(os.sep, current_dir_problem, 'OutputFiles') 17 | print(path_data) 18 | os.makedirs(path_data, exist_ok=True) 19 | command3 = "cp ./node20001.out ./OutputFiles/node20001_" + str(index) + ".out " 20 | os.system(command3) 21 | 22 | 23 | if __name__ == '__main__': 24 | fire.Fire(opensees_run) 25 | 26 | -------------------------------------------------------------------------------- /docs/code/RunModel/OpenSees_Example/process_opensees_output.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def read_output(index): 5 | x = np.loadtxt("./OutputFiles/node20001_%d.out" % index) 6 | return x[-1, 1] 7 | 8 | -------------------------------------------------------------------------------- /docs/code/RunModel/OpenSees_Example/run_OpenSees.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #SBATCH 4 | #SBATCH --job-name=Opensees 5 | #SBATCH --time=00-00:05:00 6 | #SBATCH --nodes=1 7 | #SBATCH --ntasks-per-node=5 8 | #SBATCH --partition=express 9 | #SBATCH -o out_file.txt 10 | #SBATCH -e err_file.txt 11 | #SBATCH --mail-type=end 12 | #SBATCH --mail-user=michael.shields@jhu.edu 13 | 14 | module load python 15 | module load opensees/3.2.0 16 | module load parallel 17 | 18 | python run_opensees_UQpy.py 19 | 20 | -------------------------------------------------------------------------------- /docs/code/RunModel/OpenSees_Example/test.tcl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/RunModel/OpenSees_Example/test.tcl -------------------------------------------------------------------------------- /docs/code/RunModel/Python_Example/python_model.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def sum_rvs(samples=None): 5 | x = np.sum(samples, axis=1) 6 | return x 7 | 8 | 9 | def sum_rvs_vec(samples=None): 10 | x = np.sum(samples, axis=2) 11 | return x 12 | 13 | 14 | class SumRVs: 15 | def __init__(self, samples=None): 16 | 17 | self.qoi = np.sum(samples, axis=1) 18 | 19 | class SumRVsVec: 20 | def __init__(self, samples=None): 21 | 22 | self.qoi = np.sum(samples, axis=2) 23 | 24 | 25 | def det_rvs(samples=None): 26 | 27 | x = samples[:][0] * np.linalg.det(samples[:][1]) 28 | return x 29 | 30 | 31 | def det_rvs_par(samples=None): 32 | x = samples[0][0] * np.linalg.det(samples[0][1]) 33 | return x 34 | 35 | 36 | class DetRVs: 37 | def __init__(self, samples=None): 38 | 39 | self.qoi = samples[0][0] * np.linalg.det(samples[0][1]) 40 | 41 | 42 | def det_rvs_fixed(samples=None, coeff=None): 43 | 44 | x = coeff * np.linalg.det(samples[:]) 45 | return x 46 | -------------------------------------------------------------------------------- /docs/code/RunModel/README.rst: -------------------------------------------------------------------------------- 1 | RunModel Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/dimension_reduction/diffusion_maps/README.rst: -------------------------------------------------------------------------------- 1 | Diffusion Maps Examples 2 | ============================ 3 | -------------------------------------------------------------------------------- /docs/code/dimension_reduction/grassmann/README.rst: -------------------------------------------------------------------------------- 1 | Grassmann Manifold Examples 2 | ============================ 3 | -------------------------------------------------------------------------------- /docs/code/dimension_reduction/pod/README.rst: -------------------------------------------------------------------------------- 1 | Proper Orthogonal Decomposition Examples 2 | ========================================= 3 | -------------------------------------------------------------------------------- /docs/code/distributions/continuous_1d/README.rst: -------------------------------------------------------------------------------- 1 | Distribution Continuous 1D Examples 2 | =================================== 3 | -------------------------------------------------------------------------------- /docs/code/distributions/continuous_1d/plot_distribution_normal_fitting.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Distribution fitting 4 | ================================== 5 | 6 | This examples showcases the calculation of a distributions parameters based on fitting available data 7 | """ 8 | 9 | #%% md 10 | # 11 | # Initially we have to import the necessary modules. 12 | 13 | #%% 14 | 15 | from UQpy.distributions.collection.Normal import Normal 16 | 17 | #%% md 18 | # 19 | # Define a Normal distribution and use the fit method. 20 | # ------------------------------------------------------ 21 | # 22 | # Parameters to be learnt should be instantiated as None. 23 | # Note that the fit method of each distribution returns a dictionary containing as keys the names of parameters 24 | # and its values are their data fitted values. 25 | 26 | #%% 27 | 28 | normal1 = Normal(loc=None, scale=None) 29 | fitted_parameters1 = normal1.fit(data=[-4, 2, 2, 1]) 30 | print(fitted_parameters1) 31 | 32 | normal2 = Normal(loc=0., scale=None) 33 | fitted_parameters2 = normal2.fit(data=[-4, 2, 2, 1]) 34 | print(fitted_parameters2) 35 | 36 | -------------------------------------------------------------------------------- /docs/code/distributions/discrete_1d/README.rst: -------------------------------------------------------------------------------- 1 | Distribution Discrete 1D Examples 2 | =================================== 3 | -------------------------------------------------------------------------------- /docs/code/distributions/multivariate/README.rst: -------------------------------------------------------------------------------- 1 | Distributions ND Examples 2 | =================================== 3 | -------------------------------------------------------------------------------- /docs/code/distributions/user_defined/README.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | =================================== 3 | -------------------------------------------------------------------------------- /docs/code/inference/bayes_model_selection/local_pfn_models.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | import numpy as np 8 | 9 | domain = np.linspace(0, 10, 50) 10 | 11 | 12 | def model_quadratic(theta): 13 | # this one takes one parameter vector theta and return one qoi 14 | inpt = np.array(theta).reshape((-1,)) 15 | return inpt[0] * domain + inpt[1] * domain ** 2 16 | 17 | 18 | def model_linear(theta): 19 | # this one takes one parameter vector theta and return one qoi 20 | inpt = np.array(theta).reshape((-1,)) 21 | return inpt[0] * domain 22 | 23 | 24 | def model_cubic(theta): 25 | # this one takes one parameter vector theta and return one qoi 26 | inpt = np.array(theta).reshape((-1,)) 27 | return inpt[0] * domain + inpt[1] * domain ** 2 + inpt[2] * domain ** 3 -------------------------------------------------------------------------------- /docs/code/inference/bayes_parameter_estimation/README.rst: -------------------------------------------------------------------------------- 1 | Bayesian Parameter Estimation Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | -------------------------------------------------------------------------------- /docs/code/inference/bayes_parameter_estimation/local_pfn_models.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | import numpy as np 8 | 9 | 10 | 11 | def model_quadratic(theta): 12 | domain = np.linspace(0, 10, 50) 13 | # this one takes one parameter vector theta and return one qoi 14 | inpt = np.array(theta).reshape((-1,)) 15 | return inpt[0] * domain + inpt[1] * domain ** 2 -------------------------------------------------------------------------------- /docs/code/inference/info_model_selection/pfn_models.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | import numpy as np 8 | 9 | domain = np.linspace(0, 10, 50) 10 | 11 | 12 | def model_quadratic(theta): 13 | # this one takes one parameter vector theta and return one qoi 14 | inpt = np.array(theta).reshape((-1,)) 15 | return inpt[0] * domain + inpt[1] * domain ** 2 16 | 17 | 18 | def model_linear(theta): 19 | # this one takes one parameter vector theta and return one qoi 20 | inpt = np.array(theta).reshape((-1,)) 21 | return inpt[0] * domain 22 | 23 | 24 | def model_cubic(theta): 25 | # this one takes one parameter vector theta and return one qoi 26 | inpt = np.array(theta).reshape((-1,)) 27 | return inpt[0] * domain + inpt[1] * domain ** 2 + inpt[2] * domain ** 3 -------------------------------------------------------------------------------- /docs/code/inference/mle/README.rst: -------------------------------------------------------------------------------- 1 | Maximum Likelihood Estimation Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | These notebooks illustrate the use of the Inference Model alternatives to create a model for inference, and the MLE class to perform maximum likelihood estimation of the parameters of that model. Recall that a maximum likelihood estimate is simply the parameter vector that maximizes the likelihood: 5 | 6 | .. math:: \theta_{ML} = argmax_{\theta} \quad p(data \vert \theta) 7 | -------------------------------------------------------------------------------- /docs/code/inference/mle/local_pfn_models.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | import numpy as np 8 | 9 | domain = np.linspace(0, 10, 50) 10 | 11 | 12 | def model_quadratic(theta): 13 | # this one takes one parameter vector theta and return one qoi 14 | inpt = np.array(theta).reshape((-1,)) 15 | return inpt[0] * domain + inpt[1] * domain ** 2 -------------------------------------------------------------------------------- /docs/code/reliability/form/README.rst: -------------------------------------------------------------------------------- 1 | FORM Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/reliability/form/local_pfn.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | import numpy as np 8 | 9 | 10 | def example1(samples=None): 11 | g = np.zeros(samples.shape[0]) 12 | for i in range(samples.shape[0]): 13 | R = samples[i, 0] 14 | S = samples[i, 1] 15 | g[i] = R - S 16 | return g 17 | 18 | 19 | def example2(samples=None): 20 | d = 2 21 | beta = 3.0902 22 | g = np.zeros(samples.shape[0]) 23 | for i in range(samples.shape[0]): 24 | g[i] = -1/np.sqrt(d) * (samples[i, 0] + samples[i, 1]) + beta 25 | return g 26 | 27 | 28 | def example3(samples=None): 29 | g = np.zeros(samples.shape[0]) 30 | for i in range(samples.shape[0]): 31 | g[i] = 6.2*samples[i, 0] - samples[i, 1]*samples[i, 2]**2 32 | return g 33 | 34 | 35 | def example4(samples=None): 36 | g = np.zeros(samples.shape[0]) 37 | for i in range(samples.shape[0]): 38 | g[i] = samples[i, 0]*samples[i, 1] - 80 39 | return g 40 | -------------------------------------------------------------------------------- /docs/code/reliability/inverse_form/README.rst: -------------------------------------------------------------------------------- 1 | Inverse FORM Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/reliability/inverse_form/local_pfn.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | import numpy as np 8 | 9 | 10 | def cantilever_beam(samples=None): 11 | """Performance function from Chapter 7 Example 7.2 from Du 2005""" 12 | elastic_modulus = 30e6 13 | length = 100 14 | width = 2 15 | height = 4 16 | d_0 = 3 17 | 18 | g = np.zeros(samples.shape[0]) 19 | for i in range(samples.shape[0]): 20 | x = (samples[i, 0] / width**2) ** 2 21 | y = (samples[i, 1] / height**2) ** 2 22 | d = ((4 * length**3) / (elastic_modulus * width * height)) * np.sqrt(x + y) 23 | g[i] = d_0 - d 24 | return g 25 | -------------------------------------------------------------------------------- /docs/code/reliability/sorm/README.rst: -------------------------------------------------------------------------------- 1 | SORM Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/reliability/sorm/local_model4.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def example4(samples=None): 5 | g = np.zeros(samples.shape[0]) 6 | for i in range(samples.shape[0]): 7 | g[i] = samples[i, 0] * samples[i, 1] - 80 8 | return g -------------------------------------------------------------------------------- /docs/code/reliability/sorm/local_pfn.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | import numpy as np 8 | 9 | 10 | def example1(samples=None): 11 | g = np.zeros(samples.shape[0]) 12 | for i in range(samples.shape[0]): 13 | R = samples[i, 0] 14 | S = samples[i, 1] 15 | g[i] = R - S 16 | return g 17 | 18 | 19 | def example2(samples=None): 20 | import numpy as np 21 | d = 2 22 | beta = 3.0902 23 | g = np.zeros(samples.shape[0]) 24 | for i in range(samples.shape[0]): 25 | g[i] = -1 / np.sqrt(d) * (samples[i, 0] + samples[i, 1]) + beta 26 | return g 27 | 28 | 29 | def example3(samples=None): 30 | g = np.zeros(samples.shape[0]) 31 | for i in range(samples.shape[0]): 32 | g[i] = 6.2 * samples[i, 0] - samples[i, 1] * samples[i, 2] ** 2 33 | return g 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /docs/code/reliability/subset_simulation/README.rst: -------------------------------------------------------------------------------- 1 | Subset Simulation Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/code/reliability/subset_simulation/local_Resonance_pfn.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Resonance Auxiliary File 4 | ====================================================================== 5 | 6 | 7 | """ 8 | import numpy as np 9 | 10 | 11 | class RunPythonModel: 12 | 13 | def __init__(self, samples=None): 14 | 15 | self.samples = samples 16 | self.qoi = [0]*self.samples.shape[0] 17 | 18 | self.omega = 6. 19 | self.epsilon = 0.0001 20 | 21 | for i in range(self.samples.shape[0]): 22 | add = self.samples[i][1] - self.samples[i][0]*(self.omega+self.epsilon)**2 23 | diff = self.samples[i][0]*(self.omega-self.epsilon)**2 - self.samples[i][1] 24 | self.qoi[i] = np.maximum(add, diff) -------------------------------------------------------------------------------- /docs/code/reliability/subset_simulation/local_Rosenbrock.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Rosenbrock Distribution Auxiliary File 4 | ====================================================================== 5 | 6 | """ 7 | from UQpy.distributions import DistributionND 8 | import numpy as np 9 | 10 | class Rosenbrock(DistributionND): 11 | def __init__(self, p=20.): 12 | super().__init__(p=p) 13 | 14 | def pdf(self, x): 15 | return np.exp(-(100*(x[:, 1]-x[:, 0]**2)**2+(1-x[:, 0])**2) / self.parameters['p']) 16 | 17 | def log_pdf(self, x): 18 | return -(100*(x[:, 1]-x[:, 0]**2)**2+(1-x[:, 0])**2)/self.parameters['p'] 19 | -------------------------------------------------------------------------------- /docs/code/reliability/subset_simulation/local_Rosenbrock_pfn.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Rosenbrock Distribution Auxiliary File 2 4 | ====================================================================== 5 | 6 | """ 7 | class RunPythonModel: 8 | 9 | def __init__(self, samples=None): 10 | 11 | self.samples = samples 12 | self.qoi = [0]*self.samples.shape[0] 13 | 14 | for i in range(self.samples.shape[0]): 15 | self.qoi[i] = 120 - self.samples[i][1] - 3*self.samples[i][0] -------------------------------------------------------------------------------- /docs/code/sampling/adaptive_kriging/README.rst: -------------------------------------------------------------------------------- 1 | Adaptive Kriging Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/sampling/adaptive_kriging/branin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/sampling/adaptive_kriging/branin.png -------------------------------------------------------------------------------- /docs/code/sampling/adaptive_kriging/local_BraninHoo.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Branin-Hoo File 4 | ================================== 5 | 6 | 7 | """ 8 | import numpy as np 9 | 10 | 11 | def function(z, a=1, b=5.1/(4*np.pi**2), c=5/np.pi, r=6, s=10, t=1/(8*np.pi)): 12 | f = a*(z[:, 1] - b*z[:, 0]**2 + c*z[:, 0] - r)**2 + s*(1 - t)*np.cos(z[:, 0]) + s + 5*z[:, 0] 13 | return f -------------------------------------------------------------------------------- /docs/code/sampling/adaptive_kriging/local_series.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary File 1 4 | ================================== 5 | 6 | """ 7 | import numpy as np 8 | 9 | 10 | def series(z, k=7): 11 | t1 = 3 + 0.1 * (z[:, 1] - z[:, 0]) ** 2 - (z[:, 1] + z[:, 0]) / np.sqrt(2) 12 | t2 = 3 + 0.1 * (z[:, 1] - z[:, 0]) ** 2 + (z[:, 1] + z[:, 0]) / np.sqrt(2) 13 | t3 = z[:, 1] - z[:, 0] + k / np.sqrt(2) 14 | t4 = z[:, 0] - z[:, 1] + k / np.sqrt(2) 15 | return min([t1, t2, t3, t4]) 16 | -------------------------------------------------------------------------------- /docs/code/sampling/importance_sampling/README.rst: -------------------------------------------------------------------------------- 1 | Importance Sampling Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/sampling/latin_hypercube/README.rst: -------------------------------------------------------------------------------- 1 | Examples 2 | ^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/sampling/mcmc/README.rst: -------------------------------------------------------------------------------- 1 | Markov Chain Monte Carlo Sampling Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/sampling/monte_carlo/README.rst: -------------------------------------------------------------------------------- 1 | Monte Carlo Sampling Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/sampling/refined_stratified_sampling/README.rst: -------------------------------------------------------------------------------- 1 | Refined Stratified Sampling Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/sampling/refined_stratified_sampling/local_python_model_function.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary File 4 | ============================================================ 5 | 6 | """ 7 | 8 | def y_func(z): 9 | return 1/(6.2727*(abs(0.3-z[:, 0]**2-z[:, 1]**2)+0.01)) 10 | # return np.sqrt(z[:, 0]**2+z[:, 1]**2) 11 | 12 | -------------------------------------------------------------------------------- /docs/code/sampling/simplex/README.rst: -------------------------------------------------------------------------------- 1 | Simplex Sampling Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/sampling/tempering/README.rst: -------------------------------------------------------------------------------- 1 | Tempering MCMC Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/sampling/tempering/local_reliability_funcs.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def correlated_gaussian(samples, b_eff, d): 5 | return [b_eff * np.sqrt(d) - np.sum(samples[i, :]) for i in range(samples.shape[0])] 6 | -------------------------------------------------------------------------------- /docs/code/sampling/theta_criterion/README.rst: -------------------------------------------------------------------------------- 1 | Theta Criterion PCE Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/sampling/true_stratified_sampling/README.rst: -------------------------------------------------------------------------------- 1 | True Stratified Sampling Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | -------------------------------------------------------------------------------- /docs/code/sampling/true_stratified_sampling/strata.txt: -------------------------------------------------------------------------------- 1 | 0.0 0.0 0.5 0.33333 2 | 0.0 0.33333 0.5 0.33333 3 | 0.0 0.66667 0.5 0.33333 4 | 0.5 0.0 0.5 0.5 5 | 0.5 0.5 0.25 0.5 6 | 0.75 0.5 0.25 0.5 -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/README.rst: -------------------------------------------------------------------------------- 1 | Scientific Machine Learning Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/bayesian_quickstart/README.rst: -------------------------------------------------------------------------------- 1 | Bayesian Quickstart 2 | ^^^^^^^^^^^^^^^^^^^ -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/bbb_trainer/README.rst: -------------------------------------------------------------------------------- 1 | Bayes By Backpropagation Trainer Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/deep_operator_network/README.rst: -------------------------------------------------------------------------------- 1 | Deep Operator Network Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/deep_operator_network/linear_elastic_data.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/deep_operator_network/linear_elastic_data.mat -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/fourier_neural_operator/README.rst: -------------------------------------------------------------------------------- 1 | Fourier Neural Operator Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/fourier_neural_operator/burgers_solutions_test.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/fourier_neural_operator/burgers_solutions_test.pt -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/fourier_neural_operator/burgers_solutions_train.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/fourier_neural_operator/burgers_solutions_train.pt -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/fourier_neural_operator/fno_state_dict.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/fourier_neural_operator/fno_state_dict.pt -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/fourier_neural_operator/initial_conditions_test.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/fourier_neural_operator/initial_conditions_test.pt -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/fourier_neural_operator/initial_conditions_train.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/fourier_neural_operator/initial_conditions_train.pt -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/hmc_trainer/README.rst: -------------------------------------------------------------------------------- 1 | Hamiltonian Monte Carlo Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/hmc_trainer/burgers_solutions_test.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/hmc_trainer/burgers_solutions_test.pt -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/hmc_trainer/burgers_solutions_train.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/hmc_trainer/burgers_solutions_train.pt -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/hmc_trainer/initial_conditions_test.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/hmc_trainer/initial_conditions_test.pt -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/hmc_trainer/initial_conditions_train.pt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/hmc_trainer/initial_conditions_train.pt -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/mcd_trainer/README.rst: -------------------------------------------------------------------------------- 1 | Monte Carlo Dropout Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/trainer/README.rst: -------------------------------------------------------------------------------- 1 | Trainer Examples 2 | ^^^^^^^^^^^^^^^^ -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/README.rst: -------------------------------------------------------------------------------- 1 | U-net Examples 2 | ^^^^^^^^^^^^^^ -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/data/X_tr.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/unet/data/X_tr.npy -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/data/X_ts.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/unet/data/X_ts.npy -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/data/X_val.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/unet/data/X_val.npy -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/data/Y_tr.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/unet/data/Y_tr.npy -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/data/Y_ts.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/unet/data/Y_ts.npy -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/data/Y_val.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/unet/data/Y_val.npy -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/figures/test_set_predictions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/unet/figures/test_set_predictions.png -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/figures/test_set_predictions_with_mae.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/unet/figures/test_set_predictions_with_mae.png -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/figures/training_validation_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/unet/figures/training_validation_loss.png -------------------------------------------------------------------------------- /docs/code/scientific_machine_learning/unet/figures/unet_weights.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/scientific_machine_learning/unet/figures/unet_weights.pth -------------------------------------------------------------------------------- /docs/code/sensitivity/chatterjee/README.rst: -------------------------------------------------------------------------------- 1 | Chatterjee indices 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | These examples serve as a guide for using the Chatterjee sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly. 4 | 5 | 1. **Ishigami function** 6 | 7 | In addition to the Pick and Freeze scheme, the Sobol indices can be estimated using the rank statistics approach :cite:`gamboa2020global`. We demonstrate this estimation of the Sobol indices using the Ishigami function. 8 | 9 | 2. **Exponential function** 10 | 11 | For the Exponential model, analytical Cramér-von Mises indices are available :cite:`CVM` and since they are equivalent to the Chatterjee indices in the sample limit, they are shown here. 12 | 13 | 3. **Sobol function** 14 | 15 | This example was considered in :cite:`gamboa2020global` (page 18) to compare the Pick and Freeze scheme with the rank statistics approach for estimating the Sobol indices. 16 | -------------------------------------------------------------------------------- /docs/code/sensitivity/chatterjee/local_exponential.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | 10 | 11 | def evaluate(X: np.array) -> np.array: 12 | r"""A non-linear function that is used to demonstrate sensitivity index. 13 | 14 | .. math:: 15 | f(x) = \exp(x_1 + 2*x_2) 16 | """ 17 | 18 | Y = np.exp(X[:, 0] + 2 * X[:, 1]) 19 | 20 | return Y 21 | -------------------------------------------------------------------------------- /docs/code/sensitivity/chatterjee/local_ishigami.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | 10 | 11 | def evaluate(X, params=[7, 0.1]): 12 | """Non-monotonic Ishigami-Homma three parameter test function""" 13 | 14 | a = params[0] 15 | b = params[1] 16 | 17 | Y = ( 18 | np.sin(X[:, 0]) 19 | + a * np.power(np.sin(X[:, 1]), 2) 20 | + b * np.power(X[:, 2], 4) * np.sin(X[:, 0]) 21 | ) 22 | 23 | return Y 24 | -------------------------------------------------------------------------------- /docs/code/sensitivity/chatterjee/local_sobol_func.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | import copy 10 | 11 | 12 | def evaluate(X, a_values): 13 | 14 | dims = len(a_values) 15 | g = 1 16 | 17 | for i in range(dims): 18 | g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i]) 19 | g *= g_i 20 | 21 | return g 22 | 23 | 24 | def sensitivities(a_values): 25 | 26 | dims = len(a_values) 27 | 28 | Total_order = np.zeros((dims, 1)) 29 | 30 | V_i = 1 / (3 * (1 + a_values) ** 2) 31 | 32 | total_variance = np.prod(1 + V_i) - 1 33 | 34 | First_order = V_i / total_variance 35 | 36 | for i in range(dims): 37 | 38 | rem_First_order = copy.deepcopy(V_i) 39 | rem_First_order[i] = 0 40 | Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance 41 | 42 | return First_order.reshape(-1, 1), Total_order 43 | -------------------------------------------------------------------------------- /docs/code/sensitivity/comparison/README.rst: -------------------------------------------------------------------------------- 1 | Comparison of Sensitivity indices 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | In this section we compare the sensitivity indices (Sobol, Cramér-von Mises and Chatterjee) available in the package using the 'Ishigami function' and the 'Additive model' to illustrate the differences. 5 | 6 | In both the examples, we note that the Cramér-von Mises indices and the Chatterjee indices are almost equal (as the Chatterjee indices converge to the Cramér-von Mises indices in the sample limit). -------------------------------------------------------------------------------- /docs/code/sensitivity/comparison/local_additive.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | 10 | 11 | def evaluate(X, params) -> np.array: 12 | r"""A linear function that is used to demonstrate sensitivity indices. 13 | 14 | .. math:: 15 | f(x) = a \cdot x_1 + b \cdot x_2 16 | """ 17 | a, b = params 18 | 19 | Y = a * X[:, 0] + b * X[:, 1] 20 | 21 | return Y 22 | -------------------------------------------------------------------------------- /docs/code/sensitivity/comparison/local_ishigami.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | 10 | 11 | def evaluate(X, params=[7, 0.1]): 12 | """Non-monotonic Ishigami-Homma three parameter test function""" 13 | 14 | a = params[0] 15 | b = params[1] 16 | 17 | Y = ( 18 | np.sin(X[:, 0]) 19 | + a * np.power(np.sin(X[:, 1]), 2) 20 | + b * np.power(X[:, 2], 4) * np.sin(X[:, 0]) 21 | ) 22 | 23 | return Y 24 | -------------------------------------------------------------------------------- /docs/code/sensitivity/cramer_von_mises/README.rst: -------------------------------------------------------------------------------- 1 | Cramér-von Mises Sensitivity indices 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | These examples serve as a guide for using the Cramér-von Mises sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly. 4 | 5 | 1. **Exponential function** 6 | 7 | For the Exponential model, analytical Cramér-von Mises indices are available :cite:`CVM`. 8 | 9 | 2. **Sobol function** 10 | 11 | The Cramér-von Mises indices are computed using the Pick and Freeze approach :cite:`CVM`. These model evaluations can be used to estimate the Sobol indices as well. We demonstrate this using the Sobol function. 12 | -------------------------------------------------------------------------------- /docs/code/sensitivity/cramer_von_mises/local_exponential.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | 10 | 11 | def evaluate(X: np.array) -> np.array: 12 | r"""A non-linear function that is used to demonstrate sensitivity index. 13 | 14 | .. math:: 15 | f(x) = \exp(x_1 + 2*x_2) 16 | """ 17 | 18 | Y = np.exp(X[:, 0] + 2 * X[:, 1]) 19 | 20 | return Y 21 | -------------------------------------------------------------------------------- /docs/code/sensitivity/cramer_von_mises/local_sobol_func.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | import copy 10 | 11 | 12 | def evaluate(X, a_values): 13 | 14 | dims = len(a_values) 15 | g = 1 16 | 17 | for i in range(dims): 18 | g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i]) 19 | g *= g_i 20 | 21 | return g 22 | 23 | 24 | def sensitivities(a_values): 25 | 26 | dims = len(a_values) 27 | 28 | Total_order = np.zeros((dims, 1)) 29 | 30 | V_i = (3 * (1 + a_values) ** 2) ** (-1) 31 | 32 | total_variance = np.prod(1 + V_i) - 1 33 | 34 | First_order = V_i / total_variance 35 | 36 | for i in range(dims): 37 | 38 | rem_First_order = copy.deepcopy(V_i) 39 | rem_First_order[i] = 0 40 | Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance 41 | 42 | return First_order.reshape(-1, 1), Total_order 43 | -------------------------------------------------------------------------------- /docs/code/sensitivity/generalised_sobol/README.rst: -------------------------------------------------------------------------------- 1 | Generalised Sobol Sensitivity indices 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | These examples serve as a guide for using the GSI sensitivity module. They have been taken from various papers to enable validation of the implementation and have been referenced accordingly. 5 | 6 | 1. **Mechanical oscillator ODE** 7 | 8 | The GSI sensitivity indices are computed for a mechanical oscillator governed by a second-order differential equation :cite:`GSI`. The model outputs the displacement of the oscillator for a given time period. Unlike the pointwise-in-time Sobol indices, which provide the sensitivity of the model parameters at each point in time, the GSI indices summarise the sensitivities of the model parameters over the entire time period. 9 | 10 | 2. **Toy example** 11 | 12 | The GSI sensitivity indices are computed for a toy model whose analytical solution is given in :cite:`GSI`. 13 | -------------------------------------------------------------------------------- /docs/code/sensitivity/generalised_sobol/local_multioutput.py: -------------------------------------------------------------------------------- 1 | """" 2 | This is the toy example with multiple outputs from [1]_. 3 | 4 | References 5 | ---------- 6 | 7 | .. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. 8 | Sensitivity analysis for multidimensional and functional outputs. 9 | Electronic journal of statistics 2014; 8(1): 575-603. 10 | 11 | """ 12 | 13 | import numpy as np 14 | 15 | 16 | def evaluate(X): 17 | 18 | """ 19 | 20 | * **Input:** 21 | 22 | * **X** (`ndarray`): 23 | Samples from the input distribution. 24 | Shape: (n_samples, 2) 25 | 26 | * **Output:** 27 | 28 | * **Y** (`ndarray`): 29 | Model evaluations. 30 | Shape: (2, n_samples) 31 | 32 | """ 33 | 34 | n_samples = X.shape[0] 35 | 36 | output = np.zeros((2, n_samples)) 37 | 38 | output[0, :] = X[:, 0] + X[:, 1] + X[:, 0] * X[:, 1] 39 | 40 | output[1, :] = 2 * X[:, 0] + X[:, 1] + 3 * X[:, 0] * X[:, 1] 41 | 42 | return output 43 | -------------------------------------------------------------------------------- /docs/code/sensitivity/morris/local_pfn.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | import sys 10 | 11 | 12 | class RunPythonModel: 13 | 14 | def __init__(self, samples=None, dimension=None): 15 | 16 | self.samples = samples 17 | self.dimension = dimension 18 | self.qoi = [0]*self.samples.shape[0] 19 | 20 | P = 750 21 | for i in range(self.samples.shape[0]): 22 | self.qoi[i] = 1800-np.maximum(self.samples[i,0],self.samples[i,1])-2*np.sqrt(2)/3*P 23 | 24 | 25 | def gfun_sensitivity(samples, a_values): 26 | gi_xi = [(np.abs(4. * Xi - 2) + ai) / (1. + ai) for Xi, ai in zip(np.array(samples).T, a_values)] 27 | gfun = np.prod(np.array(gi_xi), axis=0) 28 | return list(gfun) 29 | 30 | 31 | def fun2_sensitivity(samples): 32 | fun_vals = 0.01 * samples[:, 0] + 1. * samples[:, 1] + 0.4 * samples[:, 2] ** 2 + samples[:, 3] * samples[:, 4] 33 | return list(fun_vals) 34 | 35 | -------------------------------------------------------------------------------- /docs/code/sensitivity/sobol/local_additive.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | 10 | 11 | def evaluate(X, params) -> np.array: 12 | r"""A linear function that is used to demonstrate sensitivity indices. 13 | 14 | .. math:: 15 | f(x) = a \cdot x_1 + b \cdot x_2 16 | """ 17 | a, b = params 18 | 19 | Y = a * X[:, 0] + b * X[:, 1] 20 | 21 | return Y 22 | -------------------------------------------------------------------------------- /docs/code/sensitivity/sobol/local_ishigami.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | 10 | 11 | def evaluate(X, params=[7, 0.1]): 12 | """Non-monotonic Ishigami-Homma three parameter test function""" 13 | 14 | a = params[0] 15 | b = params[1] 16 | 17 | Y = ( 18 | np.sin(X[:, 0]) 19 | + a * np.power(np.sin(X[:, 1]), 2) 20 | + b * np.power(X[:, 2], 4) * np.sin(X[:, 0]) 21 | ) 22 | 23 | return Y 24 | -------------------------------------------------------------------------------- /docs/code/sensitivity/sobol/local_sobol_func.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary file 4 | ============================================== 5 | 6 | """ 7 | 8 | import numpy as np 9 | import copy 10 | 11 | 12 | def evaluate(X, a_values): 13 | 14 | dims = len(a_values) 15 | g = 1 16 | 17 | for i in range(dims): 18 | g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i]) 19 | g *= g_i 20 | 21 | return g 22 | 23 | 24 | def sensitivities(a_values): 25 | 26 | dims = len(a_values) 27 | 28 | Total_order = np.zeros((dims, 1)) 29 | 30 | V_i = (3 * (1 + a_values) ** 2) ** (-1) 31 | 32 | total_variance = np.prod(1 + V_i) - 1 33 | 34 | First_order = V_i / total_variance 35 | 36 | for i in range(dims): 37 | 38 | rem_First_order = copy.deepcopy(V_i) 39 | rem_First_order[i] = 0 40 | Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance 41 | 42 | return First_order.reshape(-1, 1), Total_order 43 | -------------------------------------------------------------------------------- /docs/code/stochastic_processes/bispectral/README.rst: -------------------------------------------------------------------------------- 1 | Bispectral Representation Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/code/stochastic_processes/karhunen_loeve_1d/README.rst: -------------------------------------------------------------------------------- 1 | Karhunen Loeve Expansion Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/code/stochastic_processes/karhunen_loeve_2d/README.rst: -------------------------------------------------------------------------------- 1 | Karhunen Loeve Expansion 2D Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/code/stochastic_processes/spectral/README.rst: -------------------------------------------------------------------------------- 1 | Spectral Representation Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/code/stochastic_processes/translation/README.rst: -------------------------------------------------------------------------------- 1 | Translation Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/code/surrogates/gpr/README.rst: -------------------------------------------------------------------------------- 1 | Gaussian Process Regression Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/code/surrogates/gpr/local_python_model_1Dfunction.py: -------------------------------------------------------------------------------- 1 | def y_func(z): 2 | import numpy as np 3 | return np.sin(z) 4 | 5 | -------------------------------------------------------------------------------- /docs/code/surrogates/gpr/local_python_model_function.py: -------------------------------------------------------------------------------- 1 | def y_func(z): 2 | return 1/(6.2727*(abs(0.3-z[:, 0]**2-z[:, 1]**2)+0.01)) 3 | 4 | -------------------------------------------------------------------------------- /docs/code/surrogates/pce/Example_Camel_function.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/surrogates/pce/Example_Camel_function.png -------------------------------------------------------------------------------- /docs/code/surrogates/pce/Example_RobotArm_function.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/surrogates/pce/Example_RobotArm_function.png -------------------------------------------------------------------------------- /docs/code/surrogates/pce/Example_Sphere_function.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/code/surrogates/pce/Example_Sphere_function.png -------------------------------------------------------------------------------- /docs/code/surrogates/pce/README.rst: -------------------------------------------------------------------------------- 1 | Polynomial Chaos Expansion Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/code/surrogates/srom/README.rst: -------------------------------------------------------------------------------- 1 | Stochastic Reduced Order Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/code/surrogates/srom/local_eigenvalue_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Auxiliary File 4 | ====================================================================== 5 | 6 | """ 7 | import numpy as np 8 | 9 | 10 | class RunPythonModel: 11 | 12 | def __init__(self, samples=None, dimension=None): 13 | 14 | self.samples = samples 15 | self.dimension = dimension 16 | self.qoi = np.zeros_like(self.samples) 17 | for i in range(self.samples.shape[0]): 18 | p = np.array([[self.samples[i, 0]+self.samples[i, 1], -self.samples[i, 1], 0], 19 | [-self.samples[i, 1], self.samples[i, 1]+self.samples[i, 2], -self.samples[i, 2]], 20 | [0, -self.samples[i, 2], self.samples[i, 2]]]) 21 | w, v = np.linalg.eig(p) 22 | self.qoi[i, :] = w 23 | 24 | -------------------------------------------------------------------------------- /docs/code/transformations/nataf/README.rst: -------------------------------------------------------------------------------- 1 | Nataf Transformations Examples 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | 5 | -------------------------------------------------------------------------------- /docs/doc.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | clear 4 | make clean 5 | make html 6 | open build/html/index.html 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | UQpy 2 | sphinx_autodoc_typehints == 1.23.0 3 | sphinx_rtd_theme == 1.2.0 4 | sphinx_gallery == 0.13.0 5 | sphinxcontrib_bibtex == 2.5.0 6 | Sphinx==6.1.3 7 | torch == 2.6.0 8 | torchinfo ~= 1.8.0 -------------------------------------------------------------------------------- /docs/source/_static/Inference_models.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/Inference_models.png -------------------------------------------------------------------------------- /docs/source/_static/Inference_schematic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/Inference_schematic.png -------------------------------------------------------------------------------- /docs/source/_static/JHU_logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/JHU_logo.jpg -------------------------------------------------------------------------------- /docs/source/_static/Reliability_FORM.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/Reliability_FORM.png -------------------------------------------------------------------------------- /docs/source/_static/Reliability_example_form.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/Reliability_example_form.png -------------------------------------------------------------------------------- /docs/source/_static/Runmodel_directory_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/Runmodel_directory_1.png -------------------------------------------------------------------------------- /docs/source/_static/Runmodel_directory_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/Runmodel_directory_2.png -------------------------------------------------------------------------------- /docs/source/_static/Runmodel_directory_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/Runmodel_directory_3.png -------------------------------------------------------------------------------- /docs/source/_static/Runmodel_workflow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/Runmodel_workflow.png -------------------------------------------------------------------------------- /docs/source/_static/SampleMethods_IS_samples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/SampleMethods_IS_samples.png -------------------------------------------------------------------------------- /docs/source/_static/SampleMethods_MCMC_samples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/SampleMethods_MCMC_samples.png -------------------------------------------------------------------------------- /docs/source/_static/SampleMethods_Simplex.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/SampleMethods_Simplex.png -------------------------------------------------------------------------------- /docs/source/_static/Transformations_correlate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/Transformations_correlate.png -------------------------------------------------------------------------------- /docs/source/_static/Transformations_uncorrelate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/Transformations_uncorrelate.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/adaptive_kriging_functions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/adaptive_kriging_functions.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/dimension_reduction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/dimension_reduction.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/distributions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/distributions.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/inference.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/inference.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/mcmc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/mcmc.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/reliability.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/reliability.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/run_model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/run_model.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/sensitivity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/sensitivity.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/stochastic_process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/stochastic_process.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/stratified_sampling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/stratified_sampling.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/surrogates.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/surrogates.png -------------------------------------------------------------------------------- /docs/source/_static/architecture/transformations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/architecture/transformations.png -------------------------------------------------------------------------------- /docs/source/_static/logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/logo.jpg -------------------------------------------------------------------------------- /docs/source/_static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/logo.png -------------------------------------------------------------------------------- /docs/source/_static/logo2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/logo2.jpg -------------------------------------------------------------------------------- /docs/source/_static/morris_indices.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/_static/morris_indices.png -------------------------------------------------------------------------------- /docs/source/bibliography.rst: -------------------------------------------------------------------------------- 1 | Bibliography 2 | ============ 3 | 4 | .. bibliography:: -------------------------------------------------------------------------------- /docs/source/binder/requirements.txt: -------------------------------------------------------------------------------- 1 | UQpy -------------------------------------------------------------------------------- /docs/source/dimension_reduction/grassmann/index.rst: -------------------------------------------------------------------------------- 1 | Grassmann manifold 2 | -------------------------------- 3 | 4 | In differential geometry the Grassmann manifold :math:`\mathcal{G}(p, n)` refers to the collection of all 5 | :math:`p`-dimensional subspaces embedded in a :math:`n`-dimensional vector space 6 | :cite:`Grassmann_1` :cite:`Grassmann_2`. A point on :math:`\mathcal{G}(p, n)` is typically represented as a 7 | :math:`n \times p` orthonormal matrix :math:`\mathbf{X}`, whose column spans the corresponding subspace. :py:mod:`UQpy` 8 | contains a set of classes and methods for data projection onto the Grassmann manifold, operations and interpolation on 9 | the Grassmann manifold. 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | :caption: Methods 14 | 15 | Grassmann Projections 16 | Grassmann Operations 17 | Grassmann Interpolation 18 | Grassmann Examples <../../auto_examples/dimension_reduction/grassmann/index> 19 | -------------------------------------------------------------------------------- /docs/source/distributions/distributions_continuous_1d.rst: -------------------------------------------------------------------------------- 1 | Continuous Distributions 1D 2 | --------------------------------------- 3 | 4 | In :py:mod:`UQpy`, univariate continuous distributions inherit from the :class:`.DistributionContinuous1D` class: 5 | 6 | Since the continuous distributions are based on :py:mod:`scipy`, all of the distribution types described posses the 7 | following methods: :py:meth:`cdf`, :py:meth:`pdf`, :py:meth:`log_pdf`, :py:meth:`icdf`, :py:meth:`rvs`, 8 | :py:meth:`moments`, :py:meth:`fit` 9 | 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | List of 1D Continuous Distributions 15 | Examples <../auto_examples/distributions/continuous_1d/index.rst> 16 | 17 | -------------------------------------------------------------------------------- /docs/source/distributions/distributions_discrete_1d.rst: -------------------------------------------------------------------------------- 1 | Discrete Distributions 1D 2 | ---------------------------------- 3 | 4 | In :py:mod:`UQpy`, univariate discrete distributions inherit from the :class:`.DistributionDiscrete1D` class. A list 5 | of the available discrete distributions can be found below. 6 | 7 | 8 | .. toctree:: 9 | :maxdepth: 1 10 | 11 | List of 1D Discrete Distributions 12 | Examples <../auto_examples/distributions/discrete_1d/index.rst> -------------------------------------------------------------------------------- /docs/source/distributions/distributions_multivariate.rst: -------------------------------------------------------------------------------- 1 | Distributions ND 2 | ---------------------------------- 3 | 4 | In :py:mod:`UQpy`, multivariate distributions inherit from the :class:`.DistributionND` class 5 | 6 | 7 | :py:mod:`UQpy` has some inbuilt multivariate distributions, which are directly child classes of :class:`.DistributionND`. 8 | Additionally, joint distributions can be built from their marginals through the use of the :class:`.JointIndependent` and 9 | :class:`.JointCopula` classes described below. 10 | 11 | 12 | .. toctree:: 13 | :maxdepth: 1 14 | 15 | List of multivariate distributions 16 | Joint from independent marginals 17 | Joint from marginals and copula 18 | Examples <../auto_examples/distributions/multivariate/index> 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /docs/source/distributions/user_defined_distributions.rst: -------------------------------------------------------------------------------- 1 | User-defined Distributions and Copulas 2 | --------------------------------------------------- 3 | 4 | Defining custom distributions in :py:mod:`UQpy`. can be done by sub-classing the appropriate parent class. 5 | The subclasses must possess the desired methods, per the parent :class:`.Distribution` class. 6 | 7 | Custom copulas can be similarly defined by subclassing the :class:`.Copula` class and defining the appropriate methods. 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | Examples <../auto_examples/distributions/user_defined/index.rst> -------------------------------------------------------------------------------- /docs/source/news_doc.rst: -------------------------------------------------------------------------------- 1 | .. _news_doc: 2 | 3 | News 4 | ==== 5 | 6 | 7/3/2020 ``UQpy`` v3.0 is released. 7 | 8/24/2020 ``UQpy`` paper published in Journal of Computational Science 8 | -------------------------------------------------------------------------------- /docs/source/sampling/latin_hypercube.rst: -------------------------------------------------------------------------------- 1 | Latin Hypercube Sampling 2 | ------------------------ 3 | 4 | The :class:`.LatinHypercubeSampling` class generates random samples from a specified probability distribution(s) using 5 | Latin hypercube sampling. LatinHypercubeSampling has the advantage that the samples generated are uniformly distributed 6 | over each marginal distribution. LatinHypercubeSampling is performed by dividing the range of each random variable 7 | into :math:`N` bins with equal probability mass, where :math:`N` is the required number of samples, generating one 8 | sample per bin, and then randomly pairing the samples. 9 | 10 | 11 | .. toctree:: 12 | :hidden: 13 | :maxdepth: 1 14 | 15 | Latin Hypercube Class 16 | List of Available Latin Hypercube Criteria 17 | Adding new Latin Hypercube Design Criteria 18 | 19 | 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /docs/source/sampling/latin_hypercube/lhs_class.rst: -------------------------------------------------------------------------------- 1 | LatinHypercubeSampling Class 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | The :class:`.LatinHypercubeSampling` class is imported using the following command: 5 | 6 | >>> from UQpy.sampling.stratified_sampling.LatinHypercubeSampling import LatinHypercubeSampling 7 | 8 | Methods 9 | ~~~~~~~~~~~~~~~~~~ 10 | .. autoclass:: UQpy.sampling.LatinHypercubeSampling 11 | :members: run 12 | 13 | Attributes 14 | ~~~~~~~~~~~~~~~~~~ 15 | .. autoattribute:: UQpy.sampling.LatinHypercubeSampling.samples 16 | .. autoattribute:: UQpy.sampling.LatinHypercubeSampling.samplesU01 17 | 18 | Examples 19 | ~~~~~~~~~~~~~~~~~~ 20 | 21 | .. toctree:: 22 | 23 | Latin Hypercube Examples <../../auto_examples/sampling/latin_hypercube/index> 24 | 25 | -------------------------------------------------------------------------------- /docs/source/sampling/latin_hypercube/lhs_criteria.rst: -------------------------------------------------------------------------------- 1 | List of Available Latin Hypercube Design Criteria 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | The :class:`.Random` class is imported using the following command: 5 | 6 | >>> from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.Random import Random 7 | 8 | .. autoclass:: UQpy.sampling.Random 9 | 10 | The :class:`.Centered` class is imported using the following command: 11 | 12 | >>> from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.Centered import Centered 13 | 14 | .. autoclass:: UQpy.sampling.Centered 15 | 16 | The :class:`.MaxiMin` class is imported using the following command: 17 | 18 | >>> from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.MaxiMin import MaxiMin 19 | 20 | .. autoclass:: UQpy.sampling.MaxiMin 21 | 22 | The :class:`.MinCorrelation` class is imported using the following command: 23 | 24 | >>> from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.MinCorrelation import MinCorrelation 25 | 26 | .. autoclass:: UQpy.sampling.MinCorrelation -------------------------------------------------------------------------------- /docs/source/sampling/mcmc/dram.rst: -------------------------------------------------------------------------------- 1 | DRAM 2 | ~~~~~~~ 3 | 4 | The :class:`.DRAM` class is imported using the following command: 5 | 6 | >>> from UQpy.sampling.mcmc.DRAM import DRAM 7 | 8 | .. autoclass:: UQpy.sampling.mcmc.DRAM 9 | :members: 10 | -------------------------------------------------------------------------------- /docs/source/sampling/mcmc/dream.rst: -------------------------------------------------------------------------------- 1 | DREAM 2 | ~~~~~~~ 3 | 4 | The :class:`.DREAM` class is imported using the following command: 5 | 6 | >>> from UQpy.sampling.mcmc.DREAM import DREAM 7 | 8 | .. autoclass:: UQpy.sampling.mcmc.DREAM 9 | :members: 10 | -------------------------------------------------------------------------------- /docs/source/sampling/mcmc/mh.rst: -------------------------------------------------------------------------------- 1 | MetropolisHastings 2 | ~~~~~~~~~~~~~~~~~~ 3 | 4 | The :class:`.MetropolisHastings` class is imported using the following command: 5 | 6 | >>> from UQpy.sampling.mcmc.MetropolisHastings import MetropolisHastings 7 | 8 | .. autoclass:: UQpy.sampling.mcmc.MetropolisHastings 9 | :members: 10 | -------------------------------------------------------------------------------- /docs/source/sampling/mcmc/mmh.rst: -------------------------------------------------------------------------------- 1 | ModifiedMetropolisHastings 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | The :class:`.ModifiedMetropolisHastings` class is imported using the following command: 5 | 6 | >>> from UQpy.sampling.mcmc.ModifiedMetropolisHastings import ModifiedMetropolisHastings 7 | 8 | .. autoclass:: UQpy.sampling.mcmc.ModifiedMetropolisHastings 9 | :members: 10 | -------------------------------------------------------------------------------- /docs/source/sampling/mcmc/stretch.rst: -------------------------------------------------------------------------------- 1 | Stretch 2 | ~~~~~~~~ 3 | 4 | The :class:`.Stretch` class is imported using the following command: 5 | 6 | >>> from UQpy.sampling.mcmc.Stretch import Stretch 7 | 8 | .. autoclass:: UQpy.sampling.mcmc.Stretch 9 | :members: 10 | -------------------------------------------------------------------------------- /docs/source/sampling/strata/adding_new_strata.rst: -------------------------------------------------------------------------------- 1 | Adding a new Strata class 2 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 3 | 4 | Adding a new type of stratification requires creating a new subclass of the :class:`.Strata` class that defines the 5 | desired geometric decomposition. This subclass must have a :meth:`stratify` method that overwrites the corresponding 6 | method in the parent class and performs the stratification. -------------------------------------------------------------------------------- /docs/source/sampling/strata/delaunay_strata.rst: -------------------------------------------------------------------------------- 1 | Delaunay 2 | ~~~~~~~~~~~~~~~~~~ 3 | 4 | The :class:`.DelaunayStrata` class is imported using the following command: 5 | 6 | >>> from UQpy.sampling.stratified_sampling.strata.DelaunayStrata import DelaunayStrata 7 | 8 | Methods 9 | """"""" 10 | .. autoclass:: UQpy.sampling.stratified_sampling.strata.DelaunayStrata 11 | :members: stratify, compute_delaunay_centroid_volume 12 | 13 | Attributes 14 | """""""""" 15 | .. autoattribute:: UQpy.sampling.stratified_sampling.strata.DelaunayStrata.delaunay 16 | .. autoattribute:: UQpy.sampling.stratified_sampling.strata.DelaunayStrata.centroids -------------------------------------------------------------------------------- /docs/source/sampling/strata/rectangular_strata.rst: -------------------------------------------------------------------------------- 1 | Rectangular 2 | ~~~~~~~~~~~~~~~~~~ 3 | 4 | The :class:`.RectangularStrata` class is imported using the following command: 5 | 6 | >>> from UQpy.sampling.stratified_sampling.strata.RectangularStrata import RectangularStrata 7 | 8 | Methods 9 | """"""" 10 | .. autoclass:: UQpy.sampling.stratified_sampling.strata.RectangularStrata 11 | :members: stratify, fullfact, plot_2d 12 | 13 | Attributes 14 | """""""""" 15 | .. autoattribute:: UQpy.sampling.stratified_sampling.RectangularStrata.strata_number 16 | .. autoattribute:: UQpy.sampling.stratified_sampling.RectangularStrata.widths -------------------------------------------------------------------------------- /docs/source/sampling/strata/voronoi_strata.rst: -------------------------------------------------------------------------------- 1 | Voronoi 2 | ~~~~~~~~~~~~~~~~~~ 3 | 4 | The :class:`.VoronoiStrata` class is imported using the following command: 5 | 6 | >>> from UQpy.sampling.stratified_sampling.strata.VoronoiStrata import VoronoiStrata 7 | 8 | Methods 9 | """"""" 10 | .. autoclass:: UQpy.sampling.stratified_sampling.strata.VoronoiStrata 11 | :members: stratify, compute_voronoi_centroid_volume, add_boundary_points_and_construct_delaunay 12 | 13 | Attributes 14 | """""""""" 15 | .. autoattribute:: UQpy.sampling.stratified_sampling.strata.VoronoiStrata.voronoi 16 | .. autoattribute:: UQpy.sampling.stratified_sampling.strata.VoronoiStrata.vertices 17 | -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/figures/uq4ml.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/scientific_machine_learning/figures/uq4ml.png -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/functional/index.rst: -------------------------------------------------------------------------------- 1 | Functional 2 | ========== 3 | 4 | This part of the module mirrors the intention and usage of :code:`torch.nn.functional`. 5 | Here we define Python functions that perform computations for neural network components of the same name. 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | :caption: Functional 10 | 11 | Losses 12 | Spectral Convolutions 13 | -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/layers/dropout_baseclass.rst: -------------------------------------------------------------------------------- 1 | Probabilistic Dropout Layer Baseclass 2 | ------------------------------------- 3 | 4 | This is the parent class to all Probabilistic Dropout methods as laid out by Gal et al :cite:`gal2016dropout`. 5 | The :class:`ProbabilisticDropoutLayer` is an abstract baseclass and a subclass of :class:`torch.nn.Module`. 6 | 7 | The documentation in the :py:meth:`forward` and :py:meth:`extra_repr` on this page may be inherited from PyTorch docstrings. 8 | 9 | Methods 10 | ~~~~~~~ 11 | 12 | .. autoclass:: UQpy.scientific_machine_learning.baseclass.ProbabilisticDropoutLayer 13 | :members: forward, extra_repr 14 | -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/neural_networks/figures/Unet_schematic.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/scientific_machine_learning/neural_networks/figures/Unet_schematic.pdf -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/neural_networks/figures/approximating_functions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/scientific_machine_learning/neural_networks/figures/approximating_functions.png -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/neural_networks/figures/approximating_operators.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/scientific_machine_learning/neural_networks/figures/approximating_operators.png -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/neural_networks/figures/deep_operator_network_diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/scientific_machine_learning/neural_networks/figures/deep_operator_network_diagram.png -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/neural_networks/figures/deep_operator_network_shapes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/scientific_machine_learning/neural_networks/figures/deep_operator_network_shapes.png -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/neural_networks/figures/fourier_network_diagram.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/docs/source/scientific_machine_learning/neural_networks/figures/fourier_network_diagram.pdf -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/neural_networks/index.rst: -------------------------------------------------------------------------------- 1 | Neural Networks 2 | =============== 3 | This section implements a handful of common neural network and neural architectures. 4 | The :class:`FeedForwardNeuralNetwork` does not change the architecture of a neural network, but provides an easy interface 5 | to control the behavior of the Bayesian and Dropout layers introduced in this module. 6 | 7 | 8 | List of Neural Networks 9 | ^^^^^^^^^^^^^^^^^^^^^^^ 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | Neural Network Parent Class 15 | Deep Operator Network 16 | Feed Forward Neural Network 17 | Fourier Neural Operator 18 | U-Net Neural Network 19 | -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/neural_networks/neural_network_parent.rst: -------------------------------------------------------------------------------- 1 | Neural Network Baseclass 2 | ------------------------ 3 | 4 | This is the parent class to all neural networks. 5 | The :class:`NeuralNetwork` is an abstract baseclass and a subclass of :class:`torch.nn.Module`. 6 | 7 | The documentation in the :class:`NeuralNetwork` class may be inherited from PyTorch docstrings. 8 | 9 | Methods 10 | ~~~~~~~ 11 | 12 | .. autoclass:: UQpy.scientific_machine_learning.baseclass.NeuralNetwork 13 | :members: forward, summary, count_parameters, drop, sample, is_deterministic, set_deterministic 14 | 15 | Attributes 16 | ~~~~~~~~~~ 17 | 18 | .. autoattribute:: UQpy.scientific_machine_learning.baseclass.NeuralNetwork.dropping 19 | .. autoattribute:: UQpy.scientific_machine_learning.baseclass.NeuralNetwork.sampling -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/trainers/bbb_trainer.rst: -------------------------------------------------------------------------------- 1 | Bayes By Backpropagation Trainer (BBBTrainer) 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | Class to train a neural network using the Bayes by Backpropagation :cite:`blundell2015weight` method and a Pytorch optimization algorithm. 5 | 6 | The :class:`.BBBTrainer` class is imported using the following command: 7 | 8 | >>> from UQpy.scientific_machine_learning.trainers.BBBTrainer import BBBTrainer 9 | 10 | 11 | Methods 12 | ------- 13 | 14 | .. autoclass:: UQpy.scientific_machine_learning.trainers.BBBTrainer 15 | :members: run 16 | 17 | Attributes 18 | ---------- 19 | 20 | .. autoattribute:: UQpy.scientific_machine_learning.trainers.BBBTrainer.history 21 | 22 | Examples 23 | -------- 24 | 25 | .. toctree:: 26 | 27 | BBBTrainer Examples <../../auto_examples/scientific_machine_learning/bbb_trainer/index> -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/trainers/hmc_trainer.rst: -------------------------------------------------------------------------------- 1 | Hamiltonian Monte Carlo Trainers 2 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 3 | 4 | UQpy does not have its own implementation of a Hamiltonian Monte Carlo (HMC) trainer. 5 | For using the HMC algorithm to train neural networks, we refer users to Pyro HMC (https://docs.pyro.ai/en/stable/mcmc.html#hmc) 6 | or Hamiltorch HMC (https://adamcobb.github.io/journal/hamiltorch.html). Both of these packages are based on PyTorch, 7 | although it may be simpler to implement ``UQpy.sml`` models using Hamiltorch. 8 | 9 | Examples 10 | -------- 11 | 12 | .. toctree:: 13 | 14 | FNO with Hamiltorch <../../auto_examples/scientific_machine_learning/hmc_trainer/burgers_fno> 15 | 16 | -------------------------------------------------------------------------------- /docs/source/scientific_machine_learning/trainers/trainer.rst: -------------------------------------------------------------------------------- 1 | Trainer 2 | ~~~~~~~ 3 | 4 | Class to train a neural network using a Pytorch optimization algorithm. 5 | 6 | The :class:`.Trainer` class is imported using the following command: 7 | 8 | >>> from UQpy.scientific_machine_learning import Trainer 9 | 10 | 11 | Methods 12 | ------- 13 | 14 | .. autoclass:: UQpy.scientific_machine_learning.trainers.Trainer 15 | :members: run 16 | 17 | Attributes 18 | ---------- 19 | 20 | .. autoattribute:: UQpy.scientific_machine_learning.trainers.Trainer.history 21 | 22 | Examples 23 | -------- 24 | 25 | .. toctree:: 26 | 27 | Trainer Examples <../../auto_examples/scientific_machine_learning/trainer/index> 28 | Monte Carlo Dropout Examples <../../auto_examples/scientific_machine_learning/mcd_trainer/index> -------------------------------------------------------------------------------- /docs/source/surrogates/index.rst: -------------------------------------------------------------------------------- 1 | Surrogates 2 | ========== 3 | 4 | This module contains functionality for all the surrogate methods supported in UQpy. 5 | 6 | The module currently contains the following classes: 7 | 8 | - :class:`.SROM`: Class to estimate a discrete approximation for a continuous random variable using Stochastic Reduced Order Model. 9 | 10 | - :class:`.GaussianProcessRegression`: Class to generate an approximate surrogate model using Gaussian Processes. 11 | 12 | - :class:`.PolynomialChaosExpansion`: Class to generate an approximate surrogate model using Polynomial chaos. 13 | 14 | 15 | .. toctree:: 16 | :hidden: 17 | :maxdepth: 2 18 | :caption: Surrogates 19 | 20 | Stochastic Reduced Order Models 21 | Gaussian Process Regression 22 | Polynomial Chaos Expansion -------------------------------------------------------------------------------- /docs/source/transformations/correlate.rst: -------------------------------------------------------------------------------- 1 | Correlate 2 | ----------------- 3 | 4 | :class:`.Correlate` is a class to induce correlation to an uncorrelated standard normal vector :math:`\textbf{u}=[U_1,...,U_n]`, 5 | given the correlation matrix :math:`\textbf{C}_z=[\rho_{ij}]`. The correlated standard normal vector 6 | :math:`\textbf{z}=[Z_1,...,Z_n]` can be calculated as: 7 | 8 | .. math:: \mathbf{z}^\intercal = \mathbf{H}\mathbf{u}^\intercal 9 | 10 | where :math:`\mathbf{H}` is the lower triangular matrix resulting from the Cholesky decomposition of the correlation matrix, i.e. :math:`\mathbf{C_z}=\mathbf{H}\mathbf{H}^\intercal`. 11 | 12 | Correlate Class 13 | ^^^^^^^^^^^^^^^^^ 14 | 15 | The :class:`.Correlate` class is imported using the following command: 16 | 17 | >>> from UQpy.transformations.Correlate import Correlate 18 | 19 | Methods 20 | """"""" 21 | .. autoclass:: UQpy.transformations.Correlate 22 | 23 | Attributes 24 | """""""""" 25 | 26 | .. autoattribute:: UQpy.transformations.Correlate.H 27 | .. autoattribute:: UQpy.transformations.Correlate.samples_z -------------------------------------------------------------------------------- /docs/source/transformations/decorrelate.rst: -------------------------------------------------------------------------------- 1 | Decorrelate 2 | ----------------- 3 | 4 | :class:`.Decorrelate` is a class to remove correlation from an correlated standard normal vector 5 | :math:`\textbf{z}=[Z_1,...,Z_n]` with correlation matrix :math:`\textbf{C}_z=[\rho_{ij}]`. The uncorrelated standard 6 | normal vector :math:`\textbf{u}=[U_1,...,U_n]` can be calculated as: 7 | 8 | .. math:: \textbf{u}^\intercal = \mathbf{H}^{-1} \mathbf{z}^\intercal 9 | 10 | Decorrelate Class 11 | ^^^^^^^^^^^^^^^^^^ 12 | 13 | The :class:`.Decorrelate` class is imported using the following command: 14 | 15 | >>> from UQpy.transformations.Decorrelate import Decorrelate 16 | 17 | Methods 18 | """"""" 19 | .. autoclass:: UQpy.transformations.Decorrelate 20 | 21 | 22 | Attributes 23 | """""""""" 24 | .. autoattribute:: UQpy.transformations.Decorrelate.H 25 | .. autoattribute:: UQpy.transformations.Decorrelate.samples_u -------------------------------------------------------------------------------- /docs/source/transformations/index.rst: -------------------------------------------------------------------------------- 1 | Transformations 2 | ================ 3 | 4 | This module contains functionality for isoprobabilistic transformations in :py:mod:`UQpy`. 5 | 6 | The module currently contains the following classes: 7 | 8 | - :class:`.Nataf`: Class to perform the Nataf isoprobabilistic transformations. 9 | - :class:`.Correlate`: Class to induce correlation to a standard normal vector. 10 | - :class:`.Decorrelate`: Class to remove correlation from a standard normal vector. 11 | 12 | 13 | 14 | .. toctree:: 15 | :maxdepth: 1 16 | :hidden: 17 | :caption: Transformations 18 | 19 | Nataf 20 | Correlate 21 | Decorrelate 22 | 23 | 24 | -------------------------------------------------------------------------------- /docs/source/utilities/grassmann_point.rst: -------------------------------------------------------------------------------- 1 | Grassmann Point 2 | ----------------------------------- 3 | 4 | The :py:mod:`UQpy` class :class:`.GrassmannPoint` offers a way to check whether a data point, given as a matrix 5 | :math:`\mathbf{X} \in \mathbb{R}^{n \times p}`, belongs on the corresponding Grassmann manifold. The class takes, as 6 | input, an orthonormal 2d :class:`.numpy.ndarray` i.e., :math:`\text{shape}(\mathbf{X})=(p, n)`, and checks if this matrix 7 | is an orthonormal basis that lies with :math:`\mathbf{X}' \mathbf{X} = \mathbf{I}` on the Grassmann manifold. If it is, 8 | then it creates the corresponding :class:`.GrassmannPoint` object. 9 | 10 | To use the :class:`.GrassmannPoint` class one needs to first import it by 11 | 12 | >>> from UQpy.utilities.GrassmannPoint import GrassmannPoint 13 | 14 | To create an object of type :class:`.GrassmannPoint` 15 | 16 | >>> X = GrassmannPoint(X) 17 | 18 | .. autoclass:: UQpy.utilities.GrassmannPoint 19 | -------------------------------------------------------------------------------- /docs/source/utilities/index.rst: -------------------------------------------------------------------------------- 1 | Utilities 2 | ================ 3 | 4 | The :py:mod:`.Utilities` module contains a set of classes that support operations in :py:mod:`UQpy` that 5 | may be useful across several modules. These utilities include the following: 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | Distances 11 | Kernels 12 | GrassmannPoint 13 | 14 | -------------------------------------------------------------------------------- /logo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/logo.jpg -------------------------------------------------------------------------------- /meta.yaml: -------------------------------------------------------------------------------- 1 | package: 2 | name: uqpy 3 | version: {{ version }} 4 | 5 | build: 6 | noarch: python 7 | number: 0 8 | 9 | requirements: 10 | host: 11 | - python 12 | - pip 13 | run: 14 | - python 15 | - beartype 0.9.1 16 | 17 | about: 18 | home: https://github.com/SURGroup/UQpy 19 | license: MIT 20 | summary: UQpy is a general purpose Python toolbox for modeling uncertainty in physical and mathematical systems. 21 | doc_url: https://uqpyproject.readthedocs.io 22 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | log_cli = 1 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy == 1.26.4 2 | scipy == 1.6.0 3 | matplotlib == 3.8.4 4 | scikit-learn == 1.4.2 5 | fire == 0.6.0 6 | pytest == 8.2.0 7 | coverage == 7.5.0 8 | pytest-cov == 5.0.0 9 | pylint == 3.1.0 10 | wheel == 0.43.0 11 | pytest-azurepipelines == 1.0.5 12 | twine == 5.0.0 13 | pathlib~=1.0.1 14 | beartype == 0.18.5 15 | setuptools~=65.5.1 16 | torch ~= 2.2.2 17 | torchinfo ~= 1.8.0 18 | -------------------------------------------------------------------------------- /src/UQpy/dimension_reduction/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.dimension_reduction.grassmann_manifold import * 2 | from UQpy.dimension_reduction.pod import * 3 | from UQpy.dimension_reduction.hosvd import * 4 | from UQpy.dimension_reduction.diffusion_maps import * 5 | -------------------------------------------------------------------------------- /src/UQpy/dimension_reduction/diffusion_maps/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.dimension_reduction.diffusion_maps.DiffusionMaps import DiffusionMaps 2 | -------------------------------------------------------------------------------- /src/UQpy/dimension_reduction/grassmann_manifold/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.dimension_reduction.grassmann_manifold.GrassmannOperations import GrassmannOperations 2 | from UQpy.dimension_reduction.grassmann_manifold.GrassmannInterpolation import GrassmannInterpolation 3 | from UQpy.dimension_reduction.grassmann_manifold.projections import * 4 | -------------------------------------------------------------------------------- /src/UQpy/dimension_reduction/grassmann_manifold/projections/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.dimension_reduction.grassmann_manifold.projections.baseclass import * 2 | from UQpy.dimension_reduction.grassmann_manifold.projections.SVDProjection import SVDProjection 3 | -------------------------------------------------------------------------------- /src/UQpy/dimension_reduction/grassmann_manifold/projections/baseclass/GrassmannProjection.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class GrassmannProjection(ABC): 5 | """ 6 | The parent class to all classes used to project data onto the Grassmann manifold. 7 | """ -------------------------------------------------------------------------------- /src/UQpy/dimension_reduction/grassmann_manifold/projections/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.dimension_reduction.grassmann_manifold.projections.baseclass.GrassmannProjection import ( 2 | GrassmannProjection, 3 | ) 4 | -------------------------------------------------------------------------------- /src/UQpy/dimension_reduction/hosvd/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.dimension_reduction.hosvd.HigherOrderSVD import HigherOrderSVD -------------------------------------------------------------------------------- /src/UQpy/dimension_reduction/pod/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.dimension_reduction.pod.DirectPOD import DirectPOD 2 | from UQpy.dimension_reduction.pod.SnapshotPOD import SnapshotPOD 3 | 4 | from UQpy.dimension_reduction.pod.baseclass import * -------------------------------------------------------------------------------- /src/UQpy/dimension_reduction/pod/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.dimension_reduction.pod.baseclass.POD import * -------------------------------------------------------------------------------- /src/UQpy/distributions/__init__.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=wildcard-import 2 | 3 | from UQpy.distributions.baseclass import * 4 | from UQpy.distributions.copulas import * 5 | from UQpy.distributions.collection import * 6 | 7 | from . import baseclass, copulas, collection 8 | -------------------------------------------------------------------------------- /src/UQpy/distributions/baseclass/DistributionDiscrete1D.py: -------------------------------------------------------------------------------- 1 | import scipy.stats as stats 2 | from UQpy.distributions.baseclass.Distribution1D import Distribution1D 3 | from abc import ABC 4 | 5 | 6 | class DistributionDiscrete1D(Distribution1D, ABC): 7 | def __init__(self, **kwargs): 8 | """ 9 | Parent class for univariate discrete distributions. 10 | """ 11 | super().__init__(**kwargs) 12 | 13 | def _construct_from_scipy(self, scipy_name=stats.rv_discrete): 14 | self.pmf = lambda x: scipy_name.pmf(k=self.check_x_dimension(x), **self.parameters) 15 | self.log_pmf = lambda x: scipy_name.logpmf(k=self.check_x_dimension(x), **self.parameters) 16 | self._retrieve_1d_data_from_scipy(scipy_name, is_continuous=False) 17 | -------------------------------------------------------------------------------- /src/UQpy/distributions/baseclass/DistributionND.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .Distribution import Distribution 3 | from abc import ABC 4 | 5 | 6 | class DistributionND(Distribution, ABC): 7 | """ 8 | Parent class for multivariate probability distributions. 9 | 10 | """ 11 | 12 | def __init__(self, **kwargs): 13 | super().__init__(**kwargs) 14 | 15 | @staticmethod 16 | def check_x_dimension(x: np.ndarray, d: int = None): 17 | """ 18 | Check the dimension of input x - must be an ndarray of shape (npoints, d) 19 | """ 20 | x = np.array(x) 21 | if len(x.shape) != 2: 22 | raise ValueError("Wrong dimension in x.") 23 | if (d is not None) and (x.shape[1] != d): 24 | raise ValueError("Wrong dimension in x.") 25 | return x 26 | -------------------------------------------------------------------------------- /src/UQpy/distributions/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | """Collection of baseclass files.""" 2 | from UQpy.distributions.baseclass.Copula import Copula 3 | from UQpy.distributions.baseclass.Distribution import Distribution 4 | from UQpy.distributions.baseclass.Distribution1D import Distribution1D 5 | from UQpy.distributions.baseclass.DistributionContinuous1D import ( 6 | DistributionContinuous1D, 7 | ) 8 | from UQpy.distributions.baseclass.DistributionDiscrete1D import DistributionDiscrete1D 9 | from UQpy.distributions.baseclass.DistributionND import DistributionND 10 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Beta.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from UQpy.distributions.baseclass import DistributionContinuous1D 5 | from beartype import beartype 6 | 7 | 8 | class Beta(DistributionContinuous1D): 9 | 10 | @beartype 11 | def __init__( 12 | self, 13 | a: Union[None, float, int], 14 | b: Union[None, float, int], 15 | loc: Union[None, float, int] = 0.0, 16 | scale: Union[None, float, int] = 1.0, 17 | ): 18 | """ 19 | 20 | :param a: first shape parameter 21 | :param b: second shape parameter 22 | :param loc: location parameter 23 | :param scale: scale parameter 24 | """ 25 | super().__init__( 26 | a=a, 27 | b=b, 28 | loc=loc, 29 | scale=scale, 30 | ordered_parameters=("a", "b", "loc", "scale"), 31 | ) 32 | self._construct_from_scipy(scipy_name=stats.beta) 33 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Binomial.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionDiscrete1D 7 | 8 | 9 | class Binomial(DistributionDiscrete1D): 10 | @beartype 11 | def __init__( 12 | self, 13 | n: Union[None, int], 14 | p: Union[None, float, int], 15 | loc: Union[None, float, int] = 0.0, 16 | ): 17 | """ 18 | 19 | :param n: number of trials, integer :math:`\ge 0` 20 | :param p: success probability for each trial, real number in :math:`[0, 1]`. 21 | :param loc: location parameter 22 | """ 23 | super().__init__(n=n, p=p, loc=loc, ordered_parameters=("n", "p", "loc")) 24 | self._construct_from_scipy(scipy_name=stats.binom) 25 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Cauchy.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Cauchy(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, loc: Union[None, float, int] = 0.0, scale: Union[None, float, int] = 1.0 14 | ): 15 | """ 16 | 17 | :param loc: location parameter 18 | :param scale: scale parameter 19 | """ 20 | super().__init__(loc=loc, scale=scale, ordered_parameters=("loc", "scale")) 21 | self._construct_from_scipy(scipy_name=stats.cauchy) 22 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/ChiSquare.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class ChiSquare(DistributionContinuous1D): 10 | @beartype 11 | def __init__( 12 | self, 13 | df: Union[None, float, int], 14 | loc: Union[None, float, int] = 0.0, 15 | scale: Union[None, float, int] = 1.0, 16 | ): 17 | """ 18 | 19 | :param df: shape parameter (degrees of freedom) (given by k in the equation) 20 | :param loc: location parameter 21 | :param scale: scale parameter 22 | """ 23 | super().__init__( 24 | df=df, loc=loc, scale=scale, ordered_parameters=("df", "loc", "scale") 25 | ) 26 | self._construct_from_scipy(scipy_name=stats.chi2) 27 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Exponential.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Exponential(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, loc: Union[None, float, int] = 0.0, scale: Union[None, float, int] = 1.0 14 | ): 15 | """ 16 | 17 | :param loc: location parameter 18 | :param scale: scale parameter 19 | """ 20 | super().__init__(loc=loc, scale=scale, ordered_parameters=("loc", "scale")) 21 | self._construct_from_scipy(scipy_name=stats.expon) 22 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Gamma.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Gamma(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, 14 | a: Union[None, float, int], 15 | loc: Union[None, float, int] = 0.0, 16 | scale: Union[None, float, int] = 1.0, 17 | ): 18 | """ 19 | 20 | :param a: shape parameter 21 | :param loc: location parameter 22 | :param scale: scale parameter 23 | """ 24 | super().__init__( 25 | a=a, loc=loc, scale=scale, ordered_parameters=("a", "loc", "scale") 26 | ) 27 | self._construct_from_scipy(scipy_name=stats.gamma) 28 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/GeneralizedExtreme.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class GeneralizedExtreme(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, 14 | c: Union[None, float, int], 15 | loc: Union[None, float, int] = 0.0, 16 | scale: Union[None, float, int] = 1.0, 17 | ): 18 | """ 19 | 20 | :param c: shape parameter 21 | :param loc: location parameter 22 | :param scale: scale parameter 23 | """ 24 | super().__init__( 25 | c=c, loc=loc, scale=scale, ordered_parameters=("c", "loc", "scale") 26 | ) 27 | self._construct_from_scipy(scipy_name=stats.genextreme) 28 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/InverseGaussian.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class InverseGauss(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, 14 | mu: Union[None, float, int], 15 | loc: Union[None, float, int] = 0.0, 16 | scale: Union[None, float, int] = 1.0, 17 | ): 18 | """ 19 | 20 | :param mu: shape parameter :math:`\mu` 21 | :param loc: location parameter 22 | :param scale: scale parameter 23 | """ 24 | super().__init__( 25 | mu=mu, loc=loc, scale=scale, ordered_parameters=("mu", "loc", "scale") 26 | ) 27 | self._construct_from_scipy(scipy_name=stats.invgauss) 28 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Laplace.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Laplace(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, loc: Union[None, float, int] = 0.0, scale: Union[None, float, int] = 1.0 14 | ): 15 | """ 16 | 17 | :param loc: location parameter 18 | :param scale: scale parameter 19 | """ 20 | super().__init__(loc=loc, scale=scale, ordered_parameters=("loc", "scale")) 21 | self._construct_from_scipy(scipy_name=stats.laplace) 22 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Levy.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Levy(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, loc: Union[None, float, int] = 0.0, scale: Union[None, float, int] = 1.0 14 | ): 15 | """ 16 | 17 | :param loc: location parameter 18 | :param scale: scale parameter 19 | """ 20 | super().__init__(loc=loc, scale=scale, ordered_parameters=("loc", "scale")) 21 | self._construct_from_scipy(scipy_name=stats.levy) 22 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Logistic.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Logistic(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, loc: Union[None, float, int] = 0.0, scale: Union[None, float, int] = 1.0 14 | ): 15 | """ 16 | 17 | :param loc: location parameter 18 | :param scale: scale parameter 19 | """ 20 | super().__init__(loc=loc, scale=scale, ordered_parameters=("loc", "scale")) 21 | self._construct_from_scipy(scipy_name=stats.logistic) 22 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Lognormal.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Lognormal(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, 14 | s: Union[None, float, int], 15 | loc: Union[None, float, int] = 0.0, 16 | scale: Union[None, float, int] = 1.0, 17 | ): 18 | """ 19 | 20 | :param s: shape parameter 21 | :param loc: location parameter 22 | :param scale: scale parameter 23 | """ 24 | super().__init__( 25 | s=s, loc=loc, scale=scale, ordered_parameters=("s", "loc", "scale") 26 | ) 27 | self._construct_from_scipy(scipy_name=stats.lognorm) 28 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Maxwell.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Maxwell(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, loc: Union[None, float, int] = 0.0, scale: Union[None, float, int] = 1.0 14 | ): 15 | """ 16 | 17 | :param loc: location parameter 18 | :param scale: scale parameter 19 | """ 20 | super().__init__(loc=loc, scale=scale, ordered_parameters=("loc", "scale")) 21 | self._construct_from_scipy(scipy_name=stats.maxwell) 22 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Normal.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | import scipy.stats as stats 3 | from beartype import beartype 4 | from UQpy.distributions.baseclass import DistributionContinuous1D 5 | 6 | 7 | class Normal(DistributionContinuous1D): 8 | 9 | @beartype 10 | def __init__( 11 | self, loc: Union[None, float, int] = 0.0, scale: Union[None, float, int] = 1.0 12 | ): 13 | """ 14 | 15 | :param loc: location parameter 16 | :param scale: scale parameter 17 | """ 18 | super().__init__(loc=loc, scale=scale, ordered_parameters=("loc", "scale")) 19 | self._construct_from_scipy(scipy_name=stats.norm) 20 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Pareto.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Pareto(DistributionContinuous1D): 10 | @beartype 11 | def __init__( 12 | self, 13 | b: Union[None, float, int], 14 | loc: Union[None, float, int] = 0.0, 15 | scale: Union[None, float, int] = 1.0, 16 | ): 17 | """ 18 | 19 | :param b: shape parameter 20 | :param loc: location parameter 21 | :param scale: scale parameter 22 | """ 23 | super().__init__( 24 | b=b, loc=loc, scale=scale, ordered_parameters=("b", "loc", "scale") 25 | ) 26 | self._construct_from_scipy(scipy_name=stats.pareto) 27 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Poisson.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionDiscrete1D 7 | 8 | 9 | class Poisson(DistributionDiscrete1D): 10 | 11 | @beartype 12 | def __init__(self, mu: Union[None, float, int], loc: Union[None, float, int] = 0.0): 13 | """ 14 | 15 | :param mu: shape parameter 16 | :param loc: location parameter 17 | """ 18 | super().__init__(mu=mu, loc=loc, ordered_parameters=("mu", "loc")) 19 | self._construct_from_scipy(scipy_name=stats.poisson) 20 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Rayleigh.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Rayleigh(DistributionContinuous1D): 10 | @beartype 11 | def __init__( 12 | self, loc: Union[None, float, int] = 0.0, scale: Union[None, float, int] = 1.0 13 | ): 14 | """ 15 | 16 | :param loc: location parameter 17 | :param scale: scale parameter 18 | """ 19 | super().__init__(loc=loc, scale=scale, ordered_parameters=("loc", "scale")) 20 | self._construct_from_scipy(scipy_name=stats.rayleigh) 21 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/TruncatedNormal.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class TruncatedNormal(DistributionContinuous1D): 10 | 11 | @beartype 12 | def __init__( 13 | self, 14 | a: Union[None, float, int], 15 | b: Union[None, float, int], 16 | loc: Union[None, float, int] = 0.0, 17 | scale: Union[None, float, int] = 1.0, 18 | ): 19 | """ 20 | 21 | :param a: shape parameter 22 | :param b: shape parameter 23 | :param loc: location parameter 24 | :param scale: scale parameter 25 | """ 26 | super().__init__( 27 | a=a, 28 | b=b, 29 | loc=loc, 30 | scale=scale, 31 | ordered_parameters=("a", "b", "loc", "scale"), 32 | ) 33 | self._construct_from_scipy(scipy_name=stats.truncnorm) 34 | -------------------------------------------------------------------------------- /src/UQpy/distributions/collection/Uniform.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import scipy.stats as stats 4 | from beartype import beartype 5 | 6 | from UQpy.distributions.baseclass import DistributionContinuous1D 7 | 8 | 9 | class Uniform(DistributionContinuous1D): 10 | @beartype 11 | def __init__( 12 | self, loc: Union[None, float, int] = 0.0, scale: Union[None, float, int] = 1.0 13 | ): 14 | """ 15 | 16 | :param loc: lower bound 17 | :param scale: range 18 | """ 19 | super().__init__(loc=loc, scale=scale, ordered_parameters=("loc", "scale")) 20 | self._construct_from_scipy(scipy_name=stats.uniform) 21 | -------------------------------------------------------------------------------- /src/UQpy/distributions/copulas/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.distributions.copulas.Gumbel import Gumbel 2 | from UQpy.distributions.copulas.Clayton import Clayton 3 | from UQpy.distributions.copulas.Frank import Frank 4 | -------------------------------------------------------------------------------- /src/UQpy/inference/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.inference.BayesModelSelection import BayesModelSelection 2 | from UQpy.inference.inference_models import * 3 | from UQpy.inference.evidence_methods import * 4 | from UQpy.inference.information_criteria import * 5 | from UQpy.inference.InformationModelSelection import InformationModelSelection 6 | from UQpy.inference.BayesParameterEstimation import BayesParameterEstimation 7 | from UQpy.inference.MLE import MLE 8 | 9 | from UQpy.inference.BayesModelSelection import * 10 | from UQpy.inference.InformationModelSelection import * 11 | from UQpy.inference.BayesParameterEstimation import * 12 | from UQpy.inference.MLE import * 13 | 14 | -------------------------------------------------------------------------------- /src/UQpy/inference/evidence_methods/HarmonicMean.py: -------------------------------------------------------------------------------- 1 | from UQpy.inference.evidence_methods.baseclass.EvidenceMethod import EvidenceMethod 2 | import numpy as np 3 | 4 | 5 | class HarmonicMean(EvidenceMethod): 6 | """ 7 | Class used for the computation of model evidence using the harmonic mean method. 8 | """ 9 | def estimate_evidence(self, inference_model, posterior_samples, log_posterior_values): 10 | log_likelihood_values = (log_posterior_values - inference_model.prior.log_pdf(x=posterior_samples)) 11 | temp = np.mean(1.0 / np.exp(log_likelihood_values)) 12 | return 1.0 / temp 13 | -------------------------------------------------------------------------------- /src/UQpy/inference/evidence_methods/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.inference.evidence_methods.baseclass import * 2 | from UQpy.inference.evidence_methods.HarmonicMean import HarmonicMean 3 | -------------------------------------------------------------------------------- /src/UQpy/inference/evidence_methods/baseclass/EvidenceMethod.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | from UQpy.utilities.ValidationTypes import NumpyFloatArray 4 | from UQpy.inference.inference_models.baseclass import InferenceModel 5 | 6 | 7 | class EvidenceMethod(ABC): 8 | @abstractmethod 9 | def estimate_evidence(self, inference_model: InferenceModel, 10 | posterior_samples: NumpyFloatArray, 11 | log_posterior_values: NumpyFloatArray) -> float: 12 | """ 13 | 14 | :param inference_model: Probabilistic model used for inference. 15 | :param posterior_samples: Samples drawn from the posterior distribution of the parameters using a 16 | :class:`.BayesParameterEstimation` object. 17 | :param log_posterior_values: Values of the ``log_pdf`` function generated during the sampling of the 18 | :class:`.BayesParameterEstimation` object. 19 | :return: The evidence of the inference specific model. 20 | """ 21 | pass 22 | -------------------------------------------------------------------------------- /src/UQpy/inference/evidence_methods/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.inference.evidence_methods.baseclass.EvidenceMethod import EvidenceMethod 2 | -------------------------------------------------------------------------------- /src/UQpy/inference/inference_models/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.inference.inference_models.baseclass import * 2 | 3 | from UQpy.inference.inference_models.ComputationalModel import ComputationalModel 4 | from UQpy.inference.inference_models.DistributionModel import DistributionModel 5 | from UQpy.inference.inference_models.LogLikelihoodModel import LogLikelihoodModel 6 | 7 | -------------------------------------------------------------------------------- /src/UQpy/inference/inference_models/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.inference.inference_models.baseclass.InferenceModel import InferenceModel 2 | -------------------------------------------------------------------------------- /src/UQpy/inference/information_criteria/AIC.py: -------------------------------------------------------------------------------- 1 | from UQpy.inference import MLE 2 | from UQpy.utilities.ValidationTypes import NumpyFloatArray 3 | from UQpy.inference.information_criteria.baseclass.InformationCriterion import InformationCriterion 4 | 5 | 6 | class AIC(InformationCriterion): 7 | def minimize_criterion(self, 8 | data: NumpyFloatArray, 9 | parameter_estimator: MLE, 10 | return_penalty: bool = False): 11 | inference_model = parameter_estimator.inference_model 12 | max_log_like = parameter_estimator.max_log_like 13 | n_parameters = inference_model.n_parameters 14 | 15 | penalty_term = self._calculate_penalty_term(n_parameters) 16 | if return_penalty: 17 | return -2 * max_log_like + penalty_term, penalty_term 18 | return -2 * max_log_like + penalty_term 19 | 20 | def _calculate_penalty_term(self, n_parameters): 21 | return 2 * n_parameters 22 | -------------------------------------------------------------------------------- /src/UQpy/inference/information_criteria/BIC.py: -------------------------------------------------------------------------------- 1 | from UQpy.inference.information_criteria.baseclass.InformationCriterion import InformationCriterion 2 | import numpy as np 3 | from UQpy.inference import MLE 4 | from UQpy.utilities.ValidationTypes import NumpyFloatArray 5 | 6 | 7 | class BIC(InformationCriterion): 8 | 9 | def minimize_criterion(self, 10 | data: NumpyFloatArray, 11 | parameter_estimator: MLE, 12 | return_penalty: bool = False): 13 | inference_model = parameter_estimator.inference_model 14 | max_log_like = parameter_estimator.max_log_like 15 | n_parameters = inference_model.n_parameters 16 | n_data = len(data) 17 | 18 | penalty_term = self._calculate_penalty_term(n_data, n_parameters) 19 | if return_penalty: 20 | return -2 * max_log_like + penalty_term, penalty_term 21 | return -2 * max_log_like + penalty_term 22 | 23 | def _calculate_penalty_term(self, n_data, n_parameters): 24 | return np.log(n_data) * n_parameters 25 | -------------------------------------------------------------------------------- /src/UQpy/inference/information_criteria/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.inference.information_criteria.baseclass import * 2 | 3 | from UQpy.inference.information_criteria.AIC import AIC 4 | from UQpy.inference.information_criteria.BIC import BIC 5 | from UQpy.inference.information_criteria.AICc import AICc -------------------------------------------------------------------------------- /src/UQpy/inference/information_criteria/baseclass/InformationCriterion.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import Union 3 | 4 | import numpy as np 5 | 6 | from UQpy.inference.BayesParameterEstimation import BayesParameterEstimation 7 | from UQpy.inference.MLE import MLE 8 | 9 | 10 | class InformationCriterion(ABC): 11 | 12 | @abstractmethod 13 | def minimize_criterion(self, data: np.ndarray, 14 | parameter_estimator: Union[MLE, BayesParameterEstimation], 15 | return_penalty: bool = False) -> float: 16 | """ 17 | Function that must be implemented by the user in order to create new concrete implementation of the 18 | :class:`.InformationCriterion` baseclass. 19 | """ 20 | pass 21 | -------------------------------------------------------------------------------- /src/UQpy/inference/information_criteria/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.inference.information_criteria.baseclass.InformationCriterion import InformationCriterion 2 | -------------------------------------------------------------------------------- /src/UQpy/reliability/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.reliability.SubsetSimulation import SubsetSimulation 2 | from UQpy.reliability.taylor_series import * 3 | 4 | from . import TaylorSeries 5 | -------------------------------------------------------------------------------- /src/UQpy/reliability/taylor_series/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.reliability.taylor_series.FORM import FORM 2 | from UQpy.reliability.taylor_series.SORM import SORM 3 | from UQpy.reliability.taylor_series.InverseFORM import InverseFORM 4 | from UQpy.reliability.taylor_series.baseclass.TaylorSeries import TaylorSeries 5 | -------------------------------------------------------------------------------- /src/UQpy/reliability/taylor_series/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.reliability.taylor_series.baseclass.TaylorSeries import * 2 | -------------------------------------------------------------------------------- /src/UQpy/run_model/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.run_model.RunModel import RunModel 2 | 3 | from UQpy.run_model.model_execution import * 4 | -------------------------------------------------------------------------------- /src/UQpy/run_model/model_execution/SerialExecution.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy as np 4 | 5 | 6 | class SerialExecution: 7 | def __init__(self): 8 | self.logger = logging.getLogger(__name__) 9 | 10 | def run(self, model, n_existing_simulations, n_new_simulations, samples): 11 | results = [] 12 | for i in range(n_existing_simulations, n_existing_simulations + n_new_simulations): 13 | sample = model.preprocess_single_sample(i, samples) 14 | 15 | execution_output = model.execute_single_sample(i, sample) 16 | 17 | results.append(model.postprocess_single_file(i, execution_output)) 18 | 19 | self.logger.info("\nUQpy: Serial execution of the python model complete.\n") 20 | return results 21 | -------------------------------------------------------------------------------- /src/UQpy/run_model/model_execution/__init__.py: -------------------------------------------------------------------------------- 1 | # from UQpy.utilities.model_execution.ParallelExecution import * 2 | from UQpy.run_model.model_execution.SerialExecution import * 3 | from UQpy.run_model.model_execution.PythonModel import * 4 | from UQpy.run_model.model_execution.ThirdPartyModel import * 5 | -------------------------------------------------------------------------------- /src/UQpy/sampling/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.mcmc import * 2 | from UQpy.sampling.adaptive_kriging_functions import * 3 | from UQpy.sampling.stratified_sampling import * 4 | from UQpy.sampling.mcmc.tempering_mcmc import * 5 | 6 | from UQpy.sampling.AdaptiveKriging import AdaptiveKriging 7 | from UQpy.sampling.ImportanceSampling import ImportanceSampling 8 | 9 | from UQpy.sampling.MonteCarloSampling import MonteCarloSampling 10 | from UQpy.sampling.SimplexSampling import SimplexSampling 11 | from UQpy.sampling.ThetaCriterionPCE import ThetaCriterionPCE 12 | -------------------------------------------------------------------------------- /src/UQpy/sampling/adaptive_kriging_functions/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.adaptive_kriging_functions.ExpectedFeasibility import * 2 | from UQpy.sampling.adaptive_kriging_functions.ExpectedImprovement import * 3 | from UQpy.sampling.adaptive_kriging_functions.ExpectedImprovementGlobalFit import * 4 | from UQpy.sampling.adaptive_kriging_functions.baseclass import * 5 | from UQpy.sampling.adaptive_kriging_functions.UFunction import * 6 | from UQpy.sampling.adaptive_kriging_functions.WeightedUFunction import * 7 | -------------------------------------------------------------------------------- /src/UQpy/sampling/adaptive_kriging_functions/baseclass/LearningFunction.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class LearningFunction(ABC): 5 | def __init(self, ordered_parameters=None, **kwargs): 6 | self.parameters = kwargs 7 | self.ordered_parameters = (ordered_parameters if ordered_parameters is not None else tuple(kwargs.keys())) 8 | if len(self.ordered_parameters) != len(self.parameters): 9 | raise ValueError("Inconsistent dimensions between order_params tuple and params dictionary.") 10 | 11 | @abstractmethod 12 | def evaluate_function(self, distributions, n_add, surrogate, population, qoi=None, samples=None): 13 | """ 14 | Abstract method that needs to be overriden by the user to create new Adaptive Kriging Learning functions. 15 | """ 16 | pass 17 | -------------------------------------------------------------------------------- /src/UQpy/sampling/adaptive_kriging_functions/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.adaptive_kriging_functions.baseclass.LearningFunction import * 2 | -------------------------------------------------------------------------------- /src/UQpy/sampling/mcmc/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.mcmc.MetropolisHastings import MetropolisHastings 2 | from UQpy.sampling.mcmc.ModifiedMetropolisHastings import ModifiedMetropolisHastings 3 | from UQpy.sampling.mcmc.Stretch import Stretch 4 | from UQpy.sampling.mcmc.DRAM import DRAM 5 | from UQpy.sampling.mcmc.DREAM import DREAM 6 | 7 | from UQpy.sampling.mcmc.baseclass.MCMC import MCMC 8 | from UQpy.sampling.mcmc.tempering_mcmc import * 9 | -------------------------------------------------------------------------------- /src/UQpy/sampling/mcmc/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.mcmc.baseclass.MCMC import MCMC 2 | -------------------------------------------------------------------------------- /src/UQpy/sampling/mcmc/tempering_mcmc/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.mcmc.tempering_mcmc.ParallelTemperingMCMC import ParallelTemperingMCMC 2 | from UQpy.sampling.mcmc.tempering_mcmc.SequentialTemperingMCMC import SequentialTemperingMCMC 3 | 4 | from UQpy.sampling.mcmc.tempering_mcmc.baseclass import * 5 | -------------------------------------------------------------------------------- /src/UQpy/sampling/mcmc/tempering_mcmc/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.mcmc.tempering_mcmc.baseclass.TemperingMCMC import TemperingMCMC 2 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.stratified_sampling.baseclass import * 2 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria import * 3 | from UQpy.sampling.stratified_sampling.strata import * 4 | from UQpy.sampling.stratified_sampling.refinement import * 5 | 6 | 7 | from UQpy.sampling.stratified_sampling.LatinHypercubeSampling import LatinHypercubeSampling 8 | from UQpy.sampling.stratified_sampling.TrueStratifiedSampling import TrueStratifiedSampling 9 | from UQpy.sampling.stratified_sampling.RefinedStratifiedSampling import RefinedStratifiedSampling 10 | 11 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/baseclass/StratifiedSampling.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | 3 | 4 | class StratifiedSampling(ABC): 5 | pass 6 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.stratified_sampling.baseclass.StratifiedSampling import StratifiedSampling 2 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/latin_hypercube_criteria/Centered.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria import Criterion 2 | import numpy as np 3 | 4 | 5 | class Centered(Criterion): 6 | def __init__(self): 7 | """ 8 | Method for generating a Latin hypercube design with samples centered in the bins. 9 | """ 10 | super().__init__() 11 | 12 | def generate_samples(self, random_state): 13 | u_temp = (self.a + self.b) / 2 14 | lhs_samples = np.zeros([self.samples.shape[0], self.samples.shape[1]]) 15 | for i in range(self.samples.shape[1]): 16 | if random_state is not None: 17 | lhs_samples[:, i] = random_state.permutation(u_temp) 18 | else: 19 | lhs_samples[:, i] = np.random.permutation(u_temp) 20 | 21 | return lhs_samples 22 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/latin_hypercube_criteria/Random.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria import Criterion 2 | import numpy as np 3 | 4 | 5 | class Random(Criterion): 6 | def __init__(self): 7 | """ 8 | Method for generating a Latin hypercube design by sampling randomly inside each bin. 9 | 10 | The :class:`Random` class takes a set of samples drawn randomly from within the Latin hypercube bins and 11 | performs a random shuffling of them to pair the variables. 12 | 13 | """ 14 | super().__init__() 15 | 16 | def generate_samples(self, random_state): 17 | lhs_samples = np.zeros_like(self.samples) 18 | samples_number = len(self.samples) 19 | for j in range(self.samples.shape[1]): 20 | if random_state is not None: 21 | order = random_state.permutation(samples_number) 22 | else: 23 | order = np.random.permutation(samples_number) 24 | lhs_samples[:, j] = self.samples[order, j] 25 | 26 | return lhs_samples 27 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/latin_hypercube_criteria/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.baseclass.Criterion import * 2 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.Random import * 3 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.Centered import * 4 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.MinCorrelation import * 5 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.MaxiMin import * 6 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.Random import * 7 | 8 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.baseclass import * 9 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/latin_hypercube_criteria/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.stratified_sampling.latin_hypercube_criteria.baseclass.Criterion import * 2 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/refinement/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.stratified_sampling.refinement.RandomRefinement import * 2 | from UQpy.sampling.stratified_sampling.refinement.GradientEnhancedRefinement import * 3 | from UQpy.sampling.stratified_sampling.refinement.baseclass import * 4 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/refinement/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.stratified_sampling.refinement.baseclass.Refinement import * 2 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/strata/SamplingCriterion.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class SamplingCriterion(Enum): 5 | RANDOM = 1 6 | CENTERED = 2 7 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/strata/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.stratified_sampling.strata.baseclass import * 2 | from UQpy.sampling.stratified_sampling.strata.DelaunayStrata import DelaunayStrata 3 | from UQpy.sampling.stratified_sampling.strata.RectangularStrata import RectangularStrata 4 | from UQpy.sampling.stratified_sampling.strata.SamplingCriterion import SamplingCriterion 5 | from UQpy.sampling.stratified_sampling.strata.VoronoiStrata import VoronoiStrata 6 | -------------------------------------------------------------------------------- /src/UQpy/sampling/stratified_sampling/strata/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sampling.stratified_sampling.strata.baseclass.Strata import Strata 2 | -------------------------------------------------------------------------------- /src/UQpy/scientific_machine_learning/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.scientific_machine_learning.layers import * 2 | from UQpy.scientific_machine_learning.losses import * 3 | from UQpy.scientific_machine_learning.neural_networks import * 4 | from UQpy.scientific_machine_learning.trainers import * 5 | -------------------------------------------------------------------------------- /src/UQpy/scientific_machine_learning/baseclass/Layer.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from abc import ABC, abstractmethod 4 | 5 | 6 | class Layer(nn.Module, ABC): 7 | def __init__(self, **kwargs): 8 | super().__init__(**kwargs) 9 | 10 | def reset_parameters(self, a, b): 11 | """Fill all parameters with samples from :math:`\mathcal{U}(a, b)`""" 12 | for p in self.parameters(): 13 | nn.init.uniform_(p, a, b) 14 | 15 | @abstractmethod 16 | def forward(self, x: torch.Tensor) -> torch.Tensor: ... 17 | 18 | @abstractmethod 19 | def extra_repr(self) -> str: ... 20 | -------------------------------------------------------------------------------- /src/UQpy/scientific_machine_learning/baseclass/Loss.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | from abc import ABC, abstractmethod 3 | 4 | 5 | class Loss(nn.Module, ABC): 6 | def __init__(self): 7 | super().__init__() 8 | 9 | @abstractmethod 10 | def forward(self, *args, **kwargs): ... 11 | -------------------------------------------------------------------------------- /src/UQpy/scientific_machine_learning/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.scientific_machine_learning.baseclass.NormalBayesianLayer import NormalBayesianLayer 2 | from UQpy.scientific_machine_learning.baseclass.ProbabilisticDropoutLayer import ProbabilisticDropoutLayer 3 | from UQpy.scientific_machine_learning.baseclass.Layer import Layer 4 | from UQpy.scientific_machine_learning.baseclass.Loss import Loss 5 | from UQpy.scientific_machine_learning.baseclass.NeuralNetwork import NeuralNetwork 6 | -------------------------------------------------------------------------------- /src/UQpy/scientific_machine_learning/functional/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.scientific_machine_learning.functional.gaussian_kullback_leibler_divergence import ( 2 | gaussian_kullback_leibler_divergence, 3 | ) 4 | from UQpy.scientific_machine_learning.functional.generalized_jensen_shannon_divergence import ( 5 | generalized_jensen_shannon_divergence, 6 | ) 7 | from UQpy.scientific_machine_learning.functional.geometric_jensen_shannon_divergence import ( 8 | geometric_jensen_shannon_divergence, 9 | ) 10 | from UQpy.scientific_machine_learning.functional.mc_kullback_leibler_divergence import ( 11 | mc_kullback_leibler_divergence, 12 | ) 13 | from UQpy.scientific_machine_learning.functional.spectral_conv1d import spectral_conv1d 14 | from UQpy.scientific_machine_learning.functional.spectral_conv2d import spectral_conv2d 15 | from UQpy.scientific_machine_learning.functional.spectral_conv3d import spectral_conv3d 16 | -------------------------------------------------------------------------------- /src/UQpy/scientific_machine_learning/layers/Permutation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from UQpy.scientific_machine_learning.baseclass import Layer 3 | 4 | 5 | class Permutation(Layer): 6 | def __init__(self, dims: tuple[int], **kwargs): 7 | """Permute the dimensions of a tensor. 8 | 9 | See :py:class:`torch.permute` for documentation 10 | 11 | :param dims: Dimensions passed to :code:`torch.permute` 12 | """ 13 | super().__init__(**kwargs) 14 | self.dims = dims 15 | 16 | def forward(self, x: torch.Tensor) -> torch.Tensor: 17 | """Calls ``torch.permute(x, dims)`` 18 | 19 | :param x: Tensor of any shape 20 | :return: Tensor of permuted shape 21 | """ 22 | return torch.permute(x, self.dims) 23 | 24 | def extra_repr(self) -> str: 25 | return f"dims={self.dims}" 26 | -------------------------------------------------------------------------------- /src/UQpy/scientific_machine_learning/losses/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.scientific_machine_learning.losses.GaussianKullbackLeiblerDivergence import ( 2 | GaussianKullbackLeiblerDivergence, 3 | ) 4 | from UQpy.scientific_machine_learning.losses.MCKullbackLeiblerDivergence import ( 5 | MCKullbackLeiblerDivergence, 6 | ) 7 | from UQpy.scientific_machine_learning.losses.GeometricJensenShannonDivergence import ( 8 | GeometricJensenShannonDivergence, 9 | ) 10 | from UQpy.scientific_machine_learning.losses.GeneralizedJensenShannonDivergence import ( 11 | GeneralizedJensenShannonDivergence, 12 | ) 13 | from UQpy.scientific_machine_learning.losses.LpLoss import LpLoss 14 | -------------------------------------------------------------------------------- /src/UQpy/scientific_machine_learning/neural_networks/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.scientific_machine_learning.neural_networks.DeepOperatorNetwork import ( 2 | DeepOperatorNetwork, 3 | ) 4 | from UQpy.scientific_machine_learning.neural_networks.Unet import ( 5 | Unet, 6 | ) 7 | from UQpy.scientific_machine_learning.neural_networks.FeedForwardNeuralNetwork import ( 8 | FeedForwardNeuralNetwork, 9 | ) 10 | -------------------------------------------------------------------------------- /src/UQpy/scientific_machine_learning/trainers/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.scientific_machine_learning.trainers.Trainer import Trainer 2 | from UQpy.scientific_machine_learning.trainers.BBBTrainer import BBBTrainer 3 | -------------------------------------------------------------------------------- /src/UQpy/sensitivity/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sensitivity.MorrisSensitivity import MorrisSensitivity 2 | from UQpy.sensitivity.PceSensitivity import PceSensitivity 3 | from UQpy.sensitivity.SobolSensitivity import SobolSensitivity 4 | from UQpy.sensitivity.CramerVonMisesSensitivity import CramerVonMisesSensitivity 5 | from UQpy.sensitivity.ChatterjeeSensitivity import ChatterjeeSensitivity 6 | from UQpy.sensitivity.GeneralisedSobolSensitivity import GeneralisedSobolSensitivity 7 | 8 | from . import MorrisSensitivity 9 | from . import PceSensitivity 10 | from . import SobolSensitivity 11 | from . import CramerVonMisesSensitivity 12 | from . import ChatterjeeSensitivity 13 | from . import GeneralisedSobolSensitivity 14 | -------------------------------------------------------------------------------- /src/UQpy/sensitivity/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.sensitivity.baseclass.Sensitivity import * 2 | from UQpy.sensitivity.baseclass.PickFreeze import * 3 | -------------------------------------------------------------------------------- /src/UQpy/stochastic_process/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.stochastic_process.BispectralRepresentation import BispectralRepresentation 2 | from UQpy.stochastic_process.InverseTranslation import InverseTranslation 3 | from UQpy.stochastic_process.KarhunenLoeveExpansion import KarhunenLoeveExpansion 4 | from UQpy.stochastic_process.KarhunenLoeveExpansion2D import KarhunenLoeveExpansion2D 5 | from UQpy.stochastic_process.SpectralRepresentation import SpectralRepresentation 6 | from UQpy.stochastic_process.Translation import Translation 7 | 8 | from .supportive import * 9 | 10 | from . import supportive 11 | -------------------------------------------------------------------------------- /src/UQpy/stochastic_process/supportive/__init__.py: -------------------------------------------------------------------------------- 1 | """Collection of baseclasses""" 2 | from UQpy.stochastic_process.supportive.inverse_wiener_khinchin_transform import ( 3 | inverse_wiener_khinchin_transform, 4 | ) 5 | from UQpy.stochastic_process.supportive.wiener_khinchin_transform import ( 6 | wiener_khinchin_transform, 7 | ) 8 | from UQpy.stochastic_process.supportive.scaling_correlation_function import ( 9 | scaling_correlation_function, 10 | ) 11 | -------------------------------------------------------------------------------- /src/UQpy/stochastic_process/supportive/scaling_correlation_function.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def scaling_correlation_function(correlation_function): 5 | """ 6 | A function to scale a correlation function such that correlation at 0 lag is equal to 1 7 | 8 | ** Input:** 9 | 10 | * **correlation_function** (`list or numpy.array`): 11 | 12 | The correlation function of the signal. 13 | 14 | **Output/Returns:** 15 | 16 | * **scaled_correlation_function** (`list or numpy.array`): 17 | 18 | The scaled correlation functions of the signal. 19 | """ 20 | scaled_correlation_function = correlation_function / np.max(correlation_function) 21 | return scaled_correlation_function 22 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.polynomial_chaos import * 2 | from UQpy.surrogates.stochastic_reduced_order_models import * 3 | from UQpy.surrogates.gaussian_process import * 4 | from UQpy.surrogates.baseclass import * 5 | 6 | from . import polynomial_chaos, stochastic_reduced_order_models, gaussian_process, baseclass 7 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/baseclass/Surrogate.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class Surrogate(ABC): 5 | @abstractmethod 6 | def fit(self, samples, values): 7 | pass 8 | 9 | @abstractmethod 10 | def predict(self, points, return_std=False): 11 | pass 12 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.baseclass.Surrogate import Surrogate 2 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/gaussian_process/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.gaussian_process.GaussianProcessRegression import GaussianProcessRegression 2 | 3 | from UQpy.surrogates.gaussian_process.regression_models import * 4 | from UQpy.surrogates.gaussian_process.constraints import * 5 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/gaussian_process/constraints/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.gaussian_process.constraints.baseclass import * 2 | from UQpy.surrogates.gaussian_process.constraints.NonNegative import NonNegative 3 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/gaussian_process/constraints/baseclass/Constraints.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class ConstraintsGPR(ABC): 5 | """ 6 | Abstract base class of all Constraints. Serves as a template for creating new Kriging constraints for log-likelihood 7 | function. 8 | """ 9 | 10 | @abstractmethod 11 | def define_arguments(self, x_train, y_train, predict_function): 12 | """ 13 | Abstract method that needs to be implemented by the user which stores all the arguments in a dictionary and 14 | return that dictionary inside a list. 15 | """ 16 | pass 17 | 18 | 19 | @staticmethod 20 | def constraints(theta_, kwargs): 21 | """ 22 | A static method, which take hyperaparameters and constraints argument and evaluate constraints value. 23 | """ 24 | pass 25 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/gaussian_process/constraints/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.gaussian_process.constraints.baseclass.Constraints import ConstraintsGPR 2 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/gaussian_process/regression_models/ConstantRegression.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from UQpy.surrogates.gaussian_process.regression_models.baseclass.Regression import Regression 3 | 4 | 5 | class ConstantRegression(Regression): 6 | def r(self, s): 7 | s = np.atleast_2d(s) 8 | # jf = np.zeros([np.size(s, 0), np.size(s, 1), 1]) 9 | return np.ones([np.size(s, 0), 1]) 10 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/gaussian_process/regression_models/LinearRegression.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from UQpy.surrogates.gaussian_process.regression_models.baseclass.Regression import Regression 3 | 4 | 5 | class LinearRegression(Regression): 6 | def r(self, s): 7 | s = np.atleast_2d(s) 8 | fx = np.concatenate((np.ones([np.size(s, 0), 1]), s), 1) 9 | # jf_b = np.zeros([np.size(s, 0), np.size(s, 1), np.size(s, 1)]) 10 | # np.einsum("jii->ji", jf_b)[:] = 1 11 | # jf = np.concatenate((np.zeros([np.size(s, 0), np.size(s, 1), 1]), jf_b), 2) 12 | return fx 13 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/gaussian_process/regression_models/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.gaussian_process.regression_models.baseclass import * 2 | from UQpy.surrogates.gaussian_process.regression_models.ConstantRegression import ConstantRegression 3 | from UQpy.surrogates.gaussian_process.regression_models.LinearRegression import LinearRegression 4 | from UQpy.surrogates.gaussian_process.regression_models.QuadraticRegression import QuadraticRegression 5 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/gaussian_process/regression_models/baseclass/Regression.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class Regression(ABC): 5 | """ 6 | Abstract base class of all Regressions. Serves as a template for creating new Gaussian Process regression 7 | functions. 8 | """ 9 | @abstractmethod 10 | def r(self, s): 11 | """ 12 | Abstract method that needs to be implemented by the user when creating a new Regression function. 13 | """ 14 | pass 15 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/gaussian_process/regression_models/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.gaussian_process.regression_models.baseclass.Regression import Regression 2 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/polynomial_chaos/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.polynomial_chaos.polynomials import * 2 | from UQpy.surrogates.polynomial_chaos.regressions import * 3 | from UQpy.surrogates.polynomial_chaos.physics_informed import * 4 | 5 | from UQpy.surrogates.polynomial_chaos.PolynomialChaosExpansion import PolynomialChaosExpansion 6 | from UQpy.surrogates.polynomial_chaos.polynomials.baseclass.Polynomials import Polynomials 7 | from UQpy.surrogates.polynomial_chaos.regressions.LassoRegression import LassoRegression 8 | from UQpy.surrogates.polynomial_chaos.regressions.LeastSquareRegression import LeastSquareRegression 9 | from UQpy.surrogates.polynomial_chaos.regressions.RidgeRegression import RidgeRegression 10 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/polynomial_chaos/physics_informed/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.polynomial_chaos.physics_informed.ConstrainedPCE import ConstrainedPCE 2 | from UQpy.surrogates.polynomial_chaos.physics_informed.PdeData import PdeData 3 | from UQpy.surrogates.polynomial_chaos.physics_informed.PdePCE import PdePCE 4 | from UQpy.surrogates.polynomial_chaos.physics_informed.ReducedPCE import ReducedPCE 5 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/polynomial_chaos/polynomials/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.polynomial_chaos.polynomials.Hermite import Hermite 2 | from UQpy.surrogates.polynomial_chaos.polynomials.Legendre import Legendre 3 | 4 | from UQpy.surrogates.polynomial_chaos.polynomials.PolynomialsND import PolynomialsND 5 | 6 | from UQpy.surrogates.polynomial_chaos.polynomials.TotalDegreeBasis import TotalDegreeBasis 7 | from UQpy.surrogates.polynomial_chaos.polynomials.TensorProductBasis import TensorProductBasis 8 | from UQpy.surrogates.polynomial_chaos.polynomials.HyperbolicBasis import HyperbolicBasis 9 | 10 | from UQpy.surrogates.polynomial_chaos.polynomials.baseclass import * 11 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/polynomial_chaos/polynomials/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.polynomial_chaos.polynomials.baseclass.Polynomials import Polynomials 2 | from UQpy.surrogates.polynomial_chaos.polynomials.baseclass.PolynomialBasis import PolynomialBasis 3 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/polynomial_chaos/regressions/LeastSquareRegression.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from UQpy.surrogates.polynomial_chaos.polynomials import PolynomialBasis 4 | from UQpy.surrogates.polynomial_chaos.regressions.baseclass.Regression import Regression 5 | 6 | 7 | class LeastSquareRegression(Regression): 8 | 9 | def run(self, x: np.ndarray, y: np.ndarray, design_matrix: np.ndarray): 10 | """ 11 | Least squares solution to compute the polynomial_chaos coefficients. 12 | 13 | :param x: :class:`numpy.ndarray` containing the training points (samples). 14 | :param y: :class:`numpy.ndarray` containing the model evaluations (labels) at the training points. 15 | :param design_matrix: matrix containing the evaluation of the polynomials at the input points **x**. 16 | :return: Returns the polynomial_chaos coefficients. 17 | """ 18 | c_, res, rank, sing = np.linalg.lstsq(design_matrix, np.array(y), rcond=None) 19 | if c_.ndim == 1: 20 | c_ = c_.reshape(-1, 1) 21 | 22 | return c_, None, np.shape(c_)[1] 23 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/polynomial_chaos/regressions/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.surrogates.polynomial_chaos.regressions.LassoRegression import LassoRegression 2 | from UQpy.surrogates.polynomial_chaos.regressions.LeastSquareRegression import LeastSquareRegression 3 | from UQpy.surrogates.polynomial_chaos.regressions.RidgeRegression import RidgeRegression 4 | from UQpy.surrogates.polynomial_chaos.regressions.LeastAngleRegression import LeastAngleRegression 5 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/polynomial_chaos/regressions/baseclass/Regression.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | from UQpy.surrogates.polynomial_chaos.polynomials.TotalDegreeBasis import PolynomialBasis 4 | 5 | 6 | class Regression(ABC): 7 | 8 | @abstractmethod 9 | def run(self, x, y, polynomial_basis): 10 | pass 11 | -------------------------------------------------------------------------------- /src/UQpy/surrogates/polynomial_chaos/regressions/baseclass/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SURGroup/UQpy/d977f2eb7df49d00620d77a9267b96b8ab681af6/src/UQpy/surrogates/polynomial_chaos/regressions/baseclass/__init__.py -------------------------------------------------------------------------------- /src/UQpy/surrogates/stochastic_reduced_order_models/__init__.py: -------------------------------------------------------------------------------- 1 | from .SROM import SROM 2 | -------------------------------------------------------------------------------- /src/UQpy/transformations/__init__.py: -------------------------------------------------------------------------------- 1 | from .Nataf import Nataf 2 | from .Correlate import Correlate 3 | from .Decorrelate import Decorrelate 4 | -------------------------------------------------------------------------------- /src/UQpy/utilities/Constants.py: -------------------------------------------------------------------------------- 1 | SKLEARN_STRING = "" 2 | -------------------------------------------------------------------------------- /src/UQpy/utilities/DistanceMetric.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class DistanceMetric(Enum): 5 | BRAYCURTIS = 1 6 | CANBERRA = 2 7 | CHEBYSHEV = 3 8 | CITYBLOCK = 4 9 | CORRELATION = 5 10 | COSINE = 6 11 | DICE = 7 12 | EUCLIDEAN = 8 13 | HAMMING = 9 14 | JACCARD = 10 15 | KULSINKSI = 11 16 | MAHALANOBIS = 12 17 | MATCHING = 13 18 | MINKOWSKI = 14 19 | ROGERSTANIMOTO = 15 20 | RUSSELLRAO = 16 21 | SEUCLIDEAN = 17 22 | SOKALMICHENER = 18 23 | SOKALSNEATH = 19 24 | SQEUCLIDEAN = 20 25 | -------------------------------------------------------------------------------- /src/UQpy/utilities/GrassmannPoint.py: -------------------------------------------------------------------------------- 1 | from typing import Annotated 2 | 3 | from beartype import beartype 4 | from beartype.vale import Is 5 | 6 | from UQpy.utilities.ValidationTypes import Numpy2DFloatArrayOrthonormal, Numpy2DFloatArray 7 | 8 | 9 | class GrassmannPoint: 10 | @beartype 11 | def __init__(self, data: Numpy2DFloatArrayOrthonormal): 12 | """ 13 | :param data: Matrix representing the point on the Grassmann manifold. 14 | """ 15 | self._data = data 16 | 17 | @property 18 | def data(self) -> Numpy2DFloatArray: 19 | """ 20 | The matrix containing the Grassmann point 21 | """ 22 | return self._data 23 | -------------------------------------------------------------------------------- /src/UQpy/utilities/NoPublicConstructor.py: -------------------------------------------------------------------------------- 1 | "Code retrieved from: https://stackoverflow.com/a/64682734/5647511" 2 | from typing import Type, Any, TypeVar 3 | 4 | 5 | T = TypeVar("T") 6 | 7 | 8 | class NoPublicConstructor(type): 9 | """Metaclass that ensures a private constructor 10 | 11 | If a class uses this metaclass like this: 12 | 13 | class SomeClass(metaclass=NoPublicConstructor): 14 | pass 15 | 16 | If you try to instantiate your class (`SomeClass()`), 17 | a `TypeError` will be thrown. 18 | """ 19 | 20 | def __call__(cls, *args, **kwargs): 21 | raise TypeError( 22 | f"{cls.__module__}.{cls.__qualname__} has no public constructor. " 23 | f"Use one of the create methods instead." 24 | ) 25 | 26 | def _create(cls: Type[T], *args: Any, **kwargs: Any) -> T: 27 | return super().__call__(*args, **kwargs) # type: ignore 28 | -------------------------------------------------------------------------------- /src/UQpy/utilities/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.utilities.Constants import * 2 | from UQpy.utilities.NoPublicConstructor import * 3 | from UQpy.utilities.UQpyLoggingFormatter import * 4 | from UQpy.utilities.Utilities import * 5 | from UQpy.utilities.ValidationTypes import * 6 | from UQpy.utilities.DistanceMetric import * 7 | from UQpy.utilities.GrassmannPoint import GrassmannPoint 8 | from UQpy.utilities.distances import * 9 | from UQpy.utilities.kernels import * 10 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.utilities.distances.baseclass import * 2 | from UQpy.utilities.distances.euclidean_distances import * 3 | from UQpy.utilities.distances.grassmannian_distances import * 4 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.utilities.distances.baseclass.GrassmannianDistance import GrassmannianDistance 2 | from UQpy.utilities.distances.baseclass.Distance import Distance 3 | from UQpy.utilities.distances.baseclass.EuclideanDistance import EuclideanDistance 4 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/euclidean_distances/BrayCurtisDistance.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from UQpy.utilities.ValidationTypes import NumpyFloatArray, Numpy2DFloatArray 4 | from UQpy.utilities.distances.baseclass.EuclideanDistance import EuclideanDistance 5 | from scipy.spatial.distance import pdist 6 | 7 | 8 | class BrayCurtisDistance(EuclideanDistance): 9 | def compute_distance(self, xi: NumpyFloatArray, xj: NumpyFloatArray) -> float: 10 | """ 11 | Given two points, this method calculates the Bray-Curtis distance. 12 | 13 | :param xi: First point. 14 | :param xj: Second point. 15 | :return: A float representing the distance between the points. 16 | """ 17 | 18 | return pdist([xi, xj], "braycurtis")[0] -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/euclidean_distances/CanberraDistance.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from UQpy.utilities.ValidationTypes import NumpyFloatArray 4 | from UQpy.utilities.distances.baseclass.EuclideanDistance import EuclideanDistance 5 | from scipy.spatial.distance import pdist 6 | 7 | 8 | class CanberraDistance(EuclideanDistance): 9 | 10 | def compute_distance(self, xi: NumpyFloatArray, xj: NumpyFloatArray) -> float: 11 | """ 12 | Given two points, this method calculates the Canberra distance. 13 | 14 | :param xi: First point. 15 | :param xj: Second point. 16 | :return: A float representing the distance between the points. 17 | """ 18 | 19 | return pdist([xi, xj], "canberra")[0] 20 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/euclidean_distances/ChebyshevDistance.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from UQpy.utilities.ValidationTypes import NumpyFloatArray 4 | from UQpy.utilities.distances.baseclass.EuclideanDistance import EuclideanDistance 5 | from scipy.spatial.distance import pdist 6 | 7 | 8 | class ChebyshevDistance(EuclideanDistance): 9 | 10 | def compute_distance(self, xi: NumpyFloatArray, xj: NumpyFloatArray) -> float: 11 | """ 12 | Given two points, this method calculates the Chebyshev distance. 13 | 14 | :param xi: First point. 15 | :param xj: Second point. 16 | :return: A float representing the distance between the points. 17 | """ 18 | return pdist([xi, xj], "chebyshev")[0] 19 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/euclidean_distances/CityBlockDistance.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from UQpy.utilities.ValidationTypes import NumpyFloatArray 4 | from UQpy.utilities.distances.baseclass.EuclideanDistance import EuclideanDistance 5 | from scipy.spatial.distance import pdist 6 | 7 | 8 | class CityBlockDistance(EuclideanDistance): 9 | 10 | def compute_distance(self, xi: NumpyFloatArray, xj: NumpyFloatArray) -> float: 11 | """ 12 | Given two points, this method calculates the City Block (Manhattan) distance. 13 | 14 | :param xi: First point. 15 | :param xj: Second point. 16 | :return: A float representing the distance between the points. 17 | """ 18 | 19 | return pdist([xi, xj], "cityblock")[0] 20 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/euclidean_distances/CorrelationDistance.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from UQpy.utilities.ValidationTypes import NumpyFloatArray 4 | from UQpy.utilities.distances.baseclass.EuclideanDistance import EuclideanDistance 5 | from scipy.spatial.distance import pdist 6 | 7 | 8 | class CorrelationDistance(EuclideanDistance): 9 | 10 | def compute_distance(self, xi: NumpyFloatArray, xj: NumpyFloatArray) -> float: 11 | """ 12 | Given two points, this method calculates the Correlation distance. 13 | 14 | :param xi: First point. 15 | :param xj: Second point. 16 | :return: A float representing the distance between the points. 17 | """ 18 | return pdist([xi, xj], "correlation")[0] 19 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/euclidean_distances/CosineDistance.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from UQpy.utilities.ValidationTypes import NumpyFloatArray 4 | from UQpy.utilities.distances.baseclass.EuclideanDistance import EuclideanDistance 5 | from scipy.spatial.distance import pdist 6 | 7 | 8 | class CosineDistance(EuclideanDistance): 9 | 10 | def compute_distance(self, xi: NumpyFloatArray, xj: NumpyFloatArray) -> float: 11 | """ 12 | Given two points, this method calculates the Cosine distance. 13 | 14 | :param xi: First point. 15 | :param xj: Second point. 16 | :return: A float representing the distance between the points. 17 | """ 18 | 19 | return pdist([xi, xj], "cosine")[0] 20 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/euclidean_distances/L2Distance.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | from scipy.spatial.distance import pdist 3 | from UQpy.utilities import NumpyFloatArray 4 | from UQpy.utilities.distances.baseclass.EuclideanDistance import EuclideanDistance 5 | 6 | 7 | class L2Distance(EuclideanDistance): 8 | 9 | def compute_distance(self, xi: NumpyFloatArray, xj: NumpyFloatArray) -> float: 10 | """ 11 | Given two points, this method calculates the L2 distance. 12 | 13 | :param xi: First point. 14 | :param xj: Second point. 15 | :return: A float representing the distance between the points. 16 | """ 17 | 18 | return pdist([xi, xj], "euclidean")[0] 19 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/euclidean_distances/MinkowskiDistance.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | from UQpy.utilities.ValidationTypes import NumpyFloatArray, Numpy2DFloatArray 4 | from UQpy.utilities.distances.baseclass.EuclideanDistance import EuclideanDistance 5 | from scipy.spatial.distance import pdist 6 | 7 | 8 | class MinkowskiDistance(EuclideanDistance): 9 | def __init__(self, p: float = 2): 10 | """ 11 | :param p: Order of the norm. 12 | """ 13 | self.p = p 14 | 15 | def compute_distance(self, xi: NumpyFloatArray, xj: NumpyFloatArray) -> float: 16 | """ 17 | Given two points, this method calculates the Minkowski distance. 18 | 19 | :param xi: First point. 20 | :param xj: Second point. 21 | :return: A float representing the distance between the points. 22 | """ 23 | return pdist([xi, xj], "minkowski", p=self.p)[0] 24 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/euclidean_distances/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.utilities.distances.euclidean_distances.BrayCurtisDistance import BrayCurtisDistance 2 | from UQpy.utilities.distances.euclidean_distances.CanberraDistance import CanberraDistance 3 | from UQpy.utilities.distances.euclidean_distances.ChebyshevDistance import ChebyshevDistance 4 | from UQpy.utilities.distances.euclidean_distances.CityBlockDistance import CityBlockDistance 5 | from UQpy.utilities.distances.euclidean_distances.CorrelationDistance import CorrelationDistance 6 | from UQpy.utilities.distances.euclidean_distances.CosineDistance import CosineDistance 7 | from UQpy.utilities.distances.euclidean_distances.L2Distance import L2Distance 8 | from UQpy.utilities.distances.euclidean_distances.MinkowskiDistance import MinkowskiDistance 9 | -------------------------------------------------------------------------------- /src/UQpy/utilities/distances/grassmannian_distances/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.utilities.distances.grassmannian_distances.AsimovDistance import AsimovDistance 2 | from UQpy.utilities.distances.grassmannian_distances.BinetCauchyDistance import BinetCauchyDistance 3 | from UQpy.utilities.distances.grassmannian_distances.FubiniStudyDistance import FubiniStudyDistance 4 | from UQpy.utilities.distances.grassmannian_distances.GeodesicDistance import GeodesicDistance 5 | from UQpy.utilities.distances.grassmannian_distances.MartinDistance import MartinDistance 6 | from UQpy.utilities.distances.grassmannian_distances.ProcrustesDistance import ProcrustesDistance 7 | from UQpy.utilities.distances.grassmannian_distances.ProjectionDistance import ProjectionDistance 8 | from UQpy.utilities.distances.grassmannian_distances.SpectralDistance import SpectralDistance 9 | -------------------------------------------------------------------------------- /src/UQpy/utilities/kernels/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.utilities.kernels.baseclass import * 2 | from UQpy.utilities.kernels.euclidean_kernels import * 3 | from UQpy.utilities.kernels.grassmannian_kernels import * 4 | 5 | from UQpy.utilities.kernels.grassmannian_kernels.BinetCauchyKernel import BinetCauchyKernel 6 | from UQpy.utilities.kernels.GaussianKernel import GaussianKernel 7 | 8 | -------------------------------------------------------------------------------- /src/UQpy/utilities/kernels/baseclass/EuclideanKernel.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | 3 | from UQpy.utilities.kernels.baseclass.Kernel import Kernel 4 | 5 | 6 | class EuclideanKernel(Kernel, ABC): 7 | """This is a blueprint for Euclidean kernels implemented in the :py:mod:`kernels` module .""" 8 | -------------------------------------------------------------------------------- /src/UQpy/utilities/kernels/baseclass/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.utilities.kernels.baseclass.EuclideanKernel import EuclideanKernel 2 | from UQpy.utilities.kernels.baseclass.GrassmannianKernel import GrassmannianKernel 3 | from UQpy.utilities.kernels.baseclass.Kernel import Kernel 4 | -------------------------------------------------------------------------------- /src/UQpy/utilities/kernels/euclidean_kernels/RBF.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | 3 | import numpy as np 4 | 5 | from UQpy.utilities.kernels.baseclass.EuclideanKernel import EuclideanKernel 6 | from UQpy.utilities.kernels.baseclass.Kernel import Kernel 7 | 8 | 9 | class RBF(EuclideanKernel): 10 | def __init__(self, kernel_parameter: Union[int, float] = 1.0): 11 | super().__init__(kernel_parameter) 12 | 13 | def calculate_kernel_matrix(self, x, s): 14 | """ 15 | This method compute the RBF kernel on sample points 'x' and 's'. 16 | 17 | :params x: An array containing training points. 18 | :params s: An array containing input points. 19 | """ 20 | stack = Kernel.check_samples_and_return_stack(x / self.kernel_parameter, s / self.kernel_parameter) 21 | self.kernel_matrix = np.exp(np.sum(-0.5 * (stack ** 2), axis=2)) 22 | return self.kernel_matrix 23 | -------------------------------------------------------------------------------- /src/UQpy/utilities/kernels/euclidean_kernels/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.utilities.kernels.euclidean_kernels.Matern import Matern 2 | from UQpy.utilities.kernels.euclidean_kernels.RBF import RBF -------------------------------------------------------------------------------- /src/UQpy/utilities/kernels/grassmannian_kernels/BinetCauchyKernel.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple 2 | 3 | import numpy as np 4 | 5 | from UQpy.utilities.kernels import GrassmannianKernel 6 | 7 | 8 | class BinetCauchyKernel(GrassmannianKernel): 9 | """ 10 | A class to calculate the Binet-Cauchy kernel. 11 | 12 | """ 13 | def element_wise_operation(self, xi_j: Tuple) -> float: 14 | """ 15 | Compute the Projection kernel entry for a tuple of points on the Grassmann manifold. 16 | 17 | :param xi_j: Tuple of orthonormal matrices representing the grassmann points. 18 | """ 19 | xi, xj = xi_j 20 | r = np.dot(xi.T, xj) 21 | det = np.linalg.det(r) 22 | return det * det 23 | -------------------------------------------------------------------------------- /src/UQpy/utilities/kernels/grassmannian_kernels/ProjectionKernel.py: -------------------------------------------------------------------------------- 1 | from typing import Union, Tuple 2 | 3 | import numpy as np 4 | 5 | from UQpy.utilities.kernels.baseclass.GrassmannianKernel import GrassmannianKernel 6 | 7 | 8 | class ProjectionKernel(GrassmannianKernel): 9 | 10 | def __init__(self, kernel_parameter: Union[int, float] = None): 11 | """ 12 | :param kernel_parameter: Number of independent p-planes of each Grassmann point. 13 | """ 14 | super().__init__(kernel_parameter) 15 | 16 | def element_wise_operation(self, xi_j: Tuple) -> float: 17 | """ 18 | Compute the Projection kernel entry for a tuple of points on the Grassmann manifold. 19 | 20 | :param xi_j: Tuple of orthonormal matrices representing the grassmann points. 21 | """ 22 | xi, xj = xi_j 23 | r = np.dot(xi.T, xj) 24 | n = np.linalg.norm(r, "fro") 25 | return n * n 26 | -------------------------------------------------------------------------------- /src/UQpy/utilities/kernels/grassmannian_kernels/__init__.py: -------------------------------------------------------------------------------- 1 | from UQpy.utilities.kernels.grassmannian_kernels.ProjectionKernel import ProjectionKernel 2 | from UQpy.utilities.kernels.grassmannian_kernels.BinetCauchyKernel import BinetCauchyKernel 3 | -------------------------------------------------------------------------------- /tests/unit_tests/eigenvalue_model.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class RunPythonModel: 5 | 6 | def __init__(self, samples=None, dimension=None): 7 | 8 | self.samples = samples 9 | self.dimension = dimension 10 | self.qoi = np.zeros_like(self.samples) 11 | for i in range(self.samples.shape[0]): 12 | p = np.array([[self.samples[i, 0]+self.samples[i, 1], -self.samples[i, 1], 0], 13 | [-self.samples[i, 1], self.samples[i, 1]+self.samples[i, 2], -self.samples[i, 2]], 14 | [0, -self.samples[i, 2], self.samples[i, 2]]]) 15 | w, v = np.linalg.eig(p) 16 | self.qoi[i, :] = w -------------------------------------------------------------------------------- /tests/unit_tests/inference/pfn_cubic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def model_cubic(theta): 4 | domain = np.linspace(0, 10, 50) 5 | # this one takes one parameter vector theta and return one qoi 6 | inpt = np.array(theta).reshape((-1,)) 7 | return inpt[0] * domain + inpt[1] * domain ** 2 + inpt[2] * domain ** 3 -------------------------------------------------------------------------------- /tests/unit_tests/inference/pfn_linear.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def model_linear(theta): 4 | domain = np.linspace(0, 10, 50) 5 | # this one takes one parameter vector theta and return one qoi 6 | inpt = np.array(theta).reshape((-1,)) 7 | return inpt[0] * domain 8 | -------------------------------------------------------------------------------- /tests/unit_tests/inference/pfn_quadratic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def model_quadratic(theta): 4 | domain = np.linspace(0, 10, 50) 5 | # this one takes one parameter vector theta and return one qoi 6 | inpt = np.array(theta).reshape((-1,)) 7 | return inpt[0] * domain + inpt[1] * domain ** 2 -------------------------------------------------------------------------------- /tests/unit_tests/pfn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def example1(samples=None): 5 | g = np.zeros(samples.shape[0]) 6 | for i in range(samples.shape[0]): 7 | R = samples[i, 0] 8 | S = samples[i, 1] 9 | g[i] = R - S 10 | return g 11 | 12 | 13 | def example2(samples=None): 14 | import numpy as np 15 | d = 2 16 | beta = 3.0902 17 | g = np.zeros(samples.shape[0]) 18 | for i in range(samples.shape[0]): 19 | g[i] = -1 / np.sqrt(d) * (samples[i, 0] + samples[i, 1]) + beta 20 | return g 21 | 22 | 23 | def example3(samples=None): 24 | g = np.zeros(samples.shape[0]) 25 | for i in range(samples.shape[0]): 26 | g[i] = 6.2 * samples[i, 0] - samples[i, 1] * samples[i, 2] ** 2 27 | return g 28 | 29 | 30 | def example4(samples=None): 31 | g = np.zeros(samples.shape[0]) 32 | for i in range(samples.shape[0]): 33 | g[i] = samples[i, 0] * samples[i, 1] - 80 34 | return g -------------------------------------------------------------------------------- /tests/unit_tests/pfn_models.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | 4 | domain = np.linspace(0, 10, 50) 5 | 6 | 7 | def model_quadratic(theta): 8 | # this one takes one parameter vector theta and return one qoi 9 | inpt = np.array(theta).reshape((-1,)) 10 | return inpt[0] * domain + inpt[1] * domain ** 2 11 | 12 | 13 | def model_linear(theta): 14 | # this one takes one parameter vector theta and return one qoi 15 | inpt = np.array(theta).reshape((-1,)) 16 | return inpt[0] * domain 17 | 18 | 19 | def model_cubic(theta): 20 | # this one takes one parameter vector theta and return one qoi 21 | inpt = np.array(theta).reshape((-1,)) 22 | return inpt[0] * domain + inpt[1] * domain ** 2 + inpt[2] * domain ** 3 -------------------------------------------------------------------------------- /tests/unit_tests/python_model_1Dfunction.py: -------------------------------------------------------------------------------- 1 | def y_func(z): 2 | import numpy as np 3 | return np.sin(z) -------------------------------------------------------------------------------- /tests/unit_tests/python_model_function.py: -------------------------------------------------------------------------------- 1 | def y_func(z): 2 | return 1/(6.2727*(abs(0.3-z[:, 0]**2-z[:, 1]**2)+0.01)) -------------------------------------------------------------------------------- /tests/unit_tests/reliability/Resonance_pfn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class RunPythonModel: 5 | 6 | def __init__(self, samples=None): 7 | self.samples = samples 8 | self.qoi = [0] * self.samples.shape[0] 9 | 10 | self.omega = 6. 11 | self.epsilon = 0.0001 12 | 13 | for i in range(self.samples.shape[0]): 14 | add = self.samples[i][1] - self.samples[i][0] * (self.omega + self.epsilon) ** 2 15 | diff = self.samples[i][0] * (self.omega - self.epsilon) ** 2 - self.samples[i][1] 16 | self.qoi[i] = np.maximum(add, diff) -------------------------------------------------------------------------------- /tests/unit_tests/reliability/Rosenbrock.py: -------------------------------------------------------------------------------- 1 | from UQpy.distributions import DistributionND 2 | import numpy as np 3 | 4 | class Rosenbrock(DistributionND): 5 | def __init__(self, p=20.): 6 | super().__init__(p=p) 7 | 8 | def pdf(self, x): 9 | return np.exp(-(100*(x[:, 1]-x[:, 0]**2)**2+(1-x[:, 0])**2) / self.parameters['p']) 10 | 11 | def log_pdf(self, x): 12 | return -(100*(x[:, 1]-x[:, 0]**2)**2+(1-x[:, 0])**2)/self.parameters['p'] -------------------------------------------------------------------------------- /tests/unit_tests/reliability/Rosenbrock_pfn.py: -------------------------------------------------------------------------------- 1 | class RunPythonModel: 2 | 3 | def __init__(self, samples=None): 4 | 5 | self.samples = samples 6 | self.qoi = [0]*self.samples.shape[0] 7 | 8 | for i in range(self.samples.shape[0]): 9 | self.qoi[i] = 120 - self.samples[i][1] - 3*self.samples[i][0] -------------------------------------------------------------------------------- /tests/unit_tests/reliability/example_7_2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def performance_function(samples=None): 5 | """Performance function from Chapter 7 Example 7.2 from Du 2005""" 6 | elastic_modulus = 30e6 7 | length = 100 8 | width = 2 9 | height = 4 10 | d_0 = 3 11 | 12 | g = np.zeros(samples.shape[0]) 13 | for i in range(samples.shape[0]): 14 | x = (samples[i, 0] / width**2) ** 2 15 | y = (samples[i, 1] / height**2) ** 2 16 | d = ((4 * length**3) / (elastic_modulus * width * height)) * np.sqrt(x + y) 17 | g[i] = d_0 - d 18 | return g 19 | -------------------------------------------------------------------------------- /tests/unit_tests/reliability/pfn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | def example2(samples=None): 16 | import numpy as np 17 | d = 2 18 | beta = 3.0902 19 | g = np.zeros(samples.shape[0]) 20 | for i in range(samples.shape[0]): 21 | g[i] = -1 / np.sqrt(d) * (samples[i, 0] + samples[i, 1]) + beta 22 | return g 23 | 24 | 25 | def example3(samples=None): 26 | g = np.zeros(samples.shape[0]) 27 | for i in range(samples.shape[0]): 28 | g[i] = 6.2 * samples[i, 0] - samples[i, 1] * samples[i, 2] ** 2 29 | return g 30 | 31 | 32 | def example4(samples=None): 33 | g = np.zeros(samples.shape[0]) 34 | for i in range(samples.shape[0]): 35 | g[i] = samples[i, 0] * samples[i, 1] - 80 36 | return g 37 | 38 | 39 | def RunPythonModel(samples, b_eff, d): 40 | 41 | qoi = list() 42 | for i in range(samples.shape[0]): 43 | qoi.append(b_eff * np.sqrt(d) - np.sum(samples[i, :])) 44 | return qoi 45 | 46 | -------------------------------------------------------------------------------- /tests/unit_tests/reliability/pfn1.py: -------------------------------------------------------------------------------- 1 | def model_i(samples): 2 | resistance = samples[0, 0] 3 | stress = samples[0, 1] 4 | return resistance - stress -------------------------------------------------------------------------------- /tests/unit_tests/reliability/pfn2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def model_j(samples): 5 | d0 = 3 6 | e = 30000000 7 | l = 100 8 | w = 2 9 | t = 4 10 | return d0 - 4 * l ** 3 / (e * w * t) * np.sqrt((samples[0, 1] / t ** 2) ** 2 + (samples[0, 0] / w ** 2) ** 2) -------------------------------------------------------------------------------- /tests/unit_tests/reliability/pfn3.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def example1(samples=None): 5 | g = np.zeros(samples.shape[0]) 6 | for i in range(samples.shape[0]): 7 | R = samples[i, 0] 8 | S = samples[i, 1] 9 | g[i] = R - S 10 | return g -------------------------------------------------------------------------------- /tests/unit_tests/reliability/pfn4.py: -------------------------------------------------------------------------------- 1 | 2 | def model_k(samples): 3 | return samples[0, 0] * samples[0, 1] - 80 -------------------------------------------------------------------------------- /tests/unit_tests/reliability/pfn5.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def Example1(samples=None): 5 | 6 | x = np.zeros(samples.shape[0]) 7 | 8 | omega = 6. 9 | epsilon = 0.0001 10 | 11 | for i in range(samples.shape[0]): 12 | add = samples[i][1] - samples[i][0]*(omega+epsilon)**2 13 | diff = samples[i][0]*(omega-epsilon)**2 - samples[i][1] 14 | x[i] = np.maximum(add, diff) 15 | 16 | return x -------------------------------------------------------------------------------- /tests/unit_tests/run_model/process_third_party_output.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def read_output(index): 5 | x = np.load("./OutputFiles/oupt_%d.npy" % index) 6 | return x 7 | -------------------------------------------------------------------------------- /tests/unit_tests/run_model/process_third_party_output_blank.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | -------------------------------------------------------------------------------- /tests/unit_tests/run_model/process_third_party_output_class.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class ReadOutput: 5 | def __init__(self, index): 6 | self.qoi = np.load("./OutputFiles/oupt_%d.npy" % index) 7 | -------------------------------------------------------------------------------- /tests/unit_tests/run_model/python_model.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def sum_rvs(samples=None): 5 | x = np.sum(samples, axis=1) 6 | return x 7 | 8 | 9 | def sum_rvs_vec(samples=None): 10 | x = np.sum(samples, axis=2) 11 | return x 12 | 13 | 14 | class SumRVs: 15 | def __init__(self, samples=None): 16 | 17 | self.qoi = np.sum(samples, axis=1) 18 | 19 | class SumRVsVec: 20 | def __init__(self, samples=None): 21 | 22 | self.qoi = np.sum(samples, axis=2) 23 | 24 | 25 | def det_rvs(samples=None): 26 | 27 | x = samples[:][0] * np.linalg.det(samples[:][1]) 28 | return x 29 | 30 | 31 | def det_rvs_par(samples=None): 32 | x = samples[0][0] * np.linalg.det(samples[0][1]) 33 | return x 34 | 35 | 36 | class DetRVs: 37 | def __init__(self, samples=None): 38 | 39 | self.qoi = samples[0][0] * np.linalg.det(samples[0][1]) 40 | 41 | 42 | def det_rvs_fixed(samples=None, coeff=None): 43 | 44 | x = coeff * np.linalg.det(samples[:]) 45 | return x 46 | -------------------------------------------------------------------------------- /tests/unit_tests/run_model/python_model_blank.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | -------------------------------------------------------------------------------- /tests/unit_tests/run_model/python_model_class.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class SumRVs: 5 | def __init__(self, samples=None): 6 | 7 | self.qoi = np.sum(samples, axis=1) 8 | -------------------------------------------------------------------------------- /tests/unit_tests/run_model/python_model_function.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def sum_rvs(samples=None): 5 | x = np.sum(samples, axis=1) 6 | return x 7 | -------------------------------------------------------------------------------- /tests/unit_tests/run_model/python_model_sum_scalar.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import os 3 | 4 | 5 | def python(index): 6 | command1 = "cp ./InputFiles/sum_scalar_" + str(index) + ".py ." 7 | # The user will need to modify command2 to point to the Matlab application on their system. 8 | command2 = "python3 sum_scalar_" + str(index) + ".py" 9 | command3 = "mv ./OutputFiles/oupt.npy ./OutputFiles/oupt_" + str(index) + ".npy" 10 | command4 = "rm sum_scalar_" + str(index) + ".py" 11 | os.system(command1) 12 | os.system(command2) 13 | os.system(command3) 14 | os.system(command4) 15 | 16 | 17 | if __name__ == '__main__': 18 | fire.Fire(python) 19 | -------------------------------------------------------------------------------- /tests/unit_tests/run_model/python_model_sum_scalar_default.py: -------------------------------------------------------------------------------- 1 | import fire 2 | import os 3 | 4 | 5 | def python(index): 6 | command1 = "cp ./InputFiles/sum_scalar_default_" + str(index) + ".py ." 7 | # The user will need to modify command2 to point to the Matlab application on their system. 8 | command2 = "python3 sum_scalar_default_" + str(index) + ".py" 9 | command3 = "mv ./OutputFiles/oupt.npy ./OutputFiles/oupt_" + str(index) + ".npy" 10 | command4 = "rm sum_scalar_" + str(index) + ".py" 11 | os.system(command1) 12 | os.system(command2) 13 | os.system(command3) 14 | os.system(command4) 15 | 16 | 17 | if __name__ == '__main__': 18 | fire.Fire(python) 19 | -------------------------------------------------------------------------------- /tests/unit_tests/run_model/sum_scalar.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | 4 | x = np.zeros(3) 5 | x[0] = 6 | x[1] = 7 | x[2] = 8 | output = sum(x) 9 | 10 | if not os.path.isdir('OutputFiles'): 11 | os.mkdir('OutputFiles') 12 | 13 | np.save('OutputFiles/oupt.npy', output) 14 | -------------------------------------------------------------------------------- /tests/unit_tests/run_model/sum_scalar_default.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | 4 | x = np.zeros(3) 5 | x[0] = 6 | x[1] = 7 | x[2] = 8 | output = sum(x) 9 | 10 | if not os.path.isdir('OutputFiles'): 11 | os.mkdir('OutputFiles') 12 | 13 | np.save('OutputFiles/oupt.npy', output) 14 | -------------------------------------------------------------------------------- /tests/unit_tests/sampling/BraninHoo.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def function(z, a=1, b=5.1/(4*np.pi**2), c=5/np.pi, r=6, s=10, t=1/(8*np.pi)): 5 | f = a*(z[:, 1] - b*z[:, 0]**2 + c*z[:, 0] - r)**2 + s*(1 - t)*np.cos(z[:, 0]) + s + 5*z[:, 0] 6 | return f -------------------------------------------------------------------------------- /tests/unit_tests/sampling/python_model_1Dfunction.py: -------------------------------------------------------------------------------- 1 | def y_func(z): 2 | import numpy as np 3 | return np.sin(z) 4 | -------------------------------------------------------------------------------- /tests/unit_tests/sampling/python_model_function.py: -------------------------------------------------------------------------------- 1 | def y_func(z): 2 | return 1/(6.2727*(abs(0.3-z[:, 0]**2-z[:, 1]**2)+0.01)) -------------------------------------------------------------------------------- /tests/unit_tests/sampling/series.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def series(z, k=7): 5 | t1 = 3 + 0.1 * (z[:, 1] - z[:, 0]) ** 2 - (z[:, 1] + z[:, 0]) / np.sqrt(2) 6 | t2 = 3 + 0.1 * (z[:, 1] - z[:, 0]) ** 2 + (z[:, 1] + z[:, 0]) / np.sqrt(2) 7 | t3 = z[:, 1] - z[:, 0] + k / np.sqrt(2) 8 | t4 = z[:, 0] - z[:, 1] + k / np.sqrt(2) 9 | return min([t1, t2, t3, t4]) -------------------------------------------------------------------------------- /tests/unit_tests/scientific_machine_learning/layers/test_fourier1d.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import UQpy.scientific_machine_learning as sml 3 | from hypothesis import given, strategies as st 4 | 5 | 6 | @given( 7 | batch_size=st.integers(min_value=1, max_value=128), 8 | width=st.integers(min_value=1, max_value=32), 9 | length=st.integers(min_value=64, max_value=256), 10 | modes=st.integers(min_value=1, max_value=32), 11 | ) 12 | def test_output_shape(batch_size, width, length, modes): 13 | """Fourier1d takes in a tensor of (batch_size, width, length) and outputs a tensor of the same shape""" 14 | x = torch.ones((batch_size, width, length)) 15 | fourier = sml.Fourier1d(width, modes) 16 | y = fourier(x) 17 | assert x.shape == y.shape 18 | 19 | 20 | def test_extra_repr(): 21 | """Customize all inputs to confirm extra_repr correctly displays non-default configuration""" 22 | fourier = sml.Fourier1d(width=1, modes=2, bias=False) 23 | assert fourier.extra_repr() == "width=1, modes=2, bias=False" 24 | -------------------------------------------------------------------------------- /tests/unit_tests/scientific_machine_learning/layers/test_permutation.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import UQpy.scientific_machine_learning as sml 3 | from hypothesis import given 4 | from hypothesis.extra.numpy import array_shapes 5 | 6 | 7 | @given(array_shapes(min_dims=3, max_dims=3, max_side=8)) 8 | def test_forward(size): 9 | """Test sml.Permutation behaves as torch.permute""" 10 | dims = (0, 2, 1) 11 | layer = sml.Permutation(dims) 12 | x = torch.zeros(size) 13 | assert layer(x).shape == torch.permute(x, dims).shape 14 | 15 | 16 | def test_extra_repr(): 17 | layer = sml.Permutation((2, 3, 4)) 18 | assert layer.extra_repr() == "dims=(2, 3, 4)" 19 | -------------------------------------------------------------------------------- /tests/unit_tests/scientific_machine_learning/neural_networks/test_u_net.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import UQpy.scientific_machine_learning as sml 3 | 4 | 5 | def test_output_shape(): 6 | n_filters = [1, 64, 128] 7 | kernel_size = 3 8 | out_channels = 3 9 | unet = sml.Unet(n_filters, kernel_size, out_channels) 10 | 11 | x = torch.rand(1, 1, 512, 512) 12 | y = unet(x) 13 | assert y.shape == torch.Size((1, out_channels, 512, 512)) 14 | -------------------------------------------------------------------------------- /tests/unit_tests/sensitivity/exponential.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def evaluate(X: np.array) -> np.array: 5 | r"""A non-linear function that is used to test Cramer-von Mises sensitivity index. 6 | 7 | .. math:: 8 | f(x) = \exp(x_1 + 2*x_2) 9 | 10 | Parameters 11 | ---------- 12 | X : np.array 13 | An `N*D` array holding values for each parameter, where `N` is the 14 | number of samples and `D` is the number of parameters 15 | (in this case, 2). 16 | 17 | Returns 18 | ------- 19 | np.array 20 | [description] 21 | """ 22 | 23 | Y = np.exp(X[:, 0] + 2 * X[:, 1]) 24 | 25 | return Y 26 | -------------------------------------------------------------------------------- /tests/unit_tests/sensitivity/ishigami.py: -------------------------------------------------------------------------------- 1 | """ 2 | Auxiliary file 3 | ============================================== 4 | """ 5 | 6 | import numpy as np 7 | 8 | def evaluate(X, params=[7, 0.1]): 9 | """Non-monotonic Ishigami-Homma three parameter test function""" 10 | 11 | a = params[0] 12 | b = params[1] 13 | 14 | Y = np.sin(X[:, 0]) + a * np.power(np.sin(X[:, 1]), 2) + \ 15 | b * np.power(X[:, 2], 4) * np.sin(X[:, 0]) 16 | 17 | return Y 18 | -------------------------------------------------------------------------------- /tests/unit_tests/sensitivity/multioutput.py: -------------------------------------------------------------------------------- 1 | """" 2 | This is the toy example with multiple outputs from [1]_. 3 | 4 | References 5 | ---------- 6 | 7 | .. [1] Gamboa F, Janon A, Klein T, Lagnoux A, others. 8 | Sensitivity analysis for multidimensional and functional outputs. 9 | Electronic journal of statistics 2014; 8(1): 575-603. 10 | 11 | """ 12 | 13 | import numpy as np 14 | 15 | 16 | def evaluate(X): 17 | 18 | """ 19 | 20 | * **Input:** 21 | 22 | * **X** (`ndarray`): 23 | Samples from the input distribution. 24 | Shape: (n_samples, 2) 25 | 26 | * **Output:** 27 | 28 | * **Y** (`ndarray`): 29 | Model evaluations. 30 | Shape: (2, n_samples) 31 | 32 | """ 33 | 34 | n_samples = X.shape[0] 35 | 36 | output = np.zeros((2, n_samples)) 37 | 38 | output[0, :] = X[:, 0] + X[:, 1] + X[:, 0] * X[:, 1] 39 | 40 | output[1, :] = 2 * X[:, 0] + X[:, 1] + 3 * X[:, 0] * X[:, 1] 41 | 42 | return output 43 | -------------------------------------------------------------------------------- /tests/unit_tests/sensitivity/pfn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def gfun_sensitivity(samples, a_values): 5 | gi_xi = [(np.abs(4. * Xi - 2) + ai) / (1. + ai) for Xi, ai in zip(np.array(samples).T, a_values)] 6 | gfun = np.prod(np.array(gi_xi), axis=0) 7 | return list(gfun) 8 | -------------------------------------------------------------------------------- /tests/unit_tests/sensitivity/sobol_func.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import copy 3 | 4 | 5 | def evaluate(X, a_values): 6 | 7 | dims = len(a_values) 8 | g = 1 9 | 10 | for i in range(dims): 11 | g_i = (np.abs(4 * X[:, i] - 2) + a_values[i]) / (1 + a_values[i]) 12 | g *= g_i 13 | 14 | return g 15 | 16 | 17 | def sensitivities(a_values): 18 | 19 | dims = len(a_values) 20 | 21 | Total_order = np.zeros((dims, 1)) 22 | 23 | V_i = (3 * (1 + a_values) ** 2) ** (-1) 24 | 25 | total_variance = np.prod(1 + V_i) - 1 26 | 27 | First_order = V_i / total_variance 28 | 29 | for i in range(dims): 30 | 31 | rem_First_order = copy.deepcopy(V_i) 32 | rem_First_order[i] = 0 33 | Total_order[i] = V_i[i] * np.prod(rem_First_order + 1) / total_variance 34 | 35 | return First_order.reshape(-1, 1), Total_order 36 | -------------------------------------------------------------------------------- /tests/unit_tests/series.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def series(z, k=7): 5 | t1 = 3 + 0.1 * (z[:, 1] - z[:, 0]) ** 2 - (z[:, 1] + z[:, 0]) / np.sqrt(2) 6 | t2 = 3 + 0.1 * (z[:, 1] - z[:, 0]) ** 2 + (z[:, 1] + z[:, 0]) / np.sqrt(2) 7 | t3 = z[:, 1] - z[:, 0] + k / np.sqrt(2) 8 | t4 = z[:, 0] - z[:, 1] + k / np.sqrt(2) 9 | return min([t1, t2, t3, t4]) -------------------------------------------------------------------------------- /tests/unit_tests/stochastic_process/test_karhunen_loeve_1d.py: -------------------------------------------------------------------------------- 1 | from UQpy.stochastic_process import KarhunenLoeveExpansion 2 | import numpy as np 3 | 4 | n_sim = 100 # Num of samples 5 | m = 400 + 1 6 | T = 1000 7 | dt = T / (m - 1) 8 | t = np.linspace(0, T, m) 9 | 10 | # Target Covariance(ACF) 11 | R = np.zeros([m, m]) 12 | for i in range(m): 13 | for j in range(m): 14 | R[i, j] = 2 * np.exp(-((t[j] - t[i]) / 281) ** 2) 15 | 16 | KLE_Object = KarhunenLoeveExpansion(n_samples=n_sim, correlation_function=R, 17 | time_interval=dt, random_state=128) 18 | samples = KLE_Object.samples 19 | 20 | 21 | def test_samples_shape(): 22 | assert samples.shape == (n_sim, 1, len(t)) 23 | 24 | 25 | def test_samples_values(): 26 | assert np.isclose(samples[27, 0, 246], 0.22392952712490516, rtol=0.01) 27 | -------------------------------------------------------------------------------- /tests/unit_tests/stochastic_process/test_spectral_1d_1v.py: -------------------------------------------------------------------------------- 1 | from UQpy.stochastic_process import SpectralRepresentation 2 | import numpy as np 3 | 4 | 5 | n_sim = 100 # Num of samples 6 | n = 1 # Num of dimensions 7 | m = 1 # Num of variables 8 | T = 100 # Time(1 / T = dw) 9 | nt = 256 # Num of Discretized Time 10 | F = 1 / T * nt / 2 # Frequency.(Hz) 11 | nw = 128 # Num of Discretized Freq. 12 | 13 | # # Generation of Input Data(Stationary) 14 | dt = T / nt 15 | t = np.linspace(0, T - dt, nt) 16 | dw = F / nw 17 | w = np.linspace(0, F - dw, nw) 18 | t_u = 2 * np.pi / 2 / F 19 | 20 | S_1d_1v = 125 / 4 * w ** 2 * np.exp(-5 * w) 21 | SRM_object = SpectralRepresentation(n_sim, S_1d_1v, dt, dw, nt, nw, random_state=128) 22 | samples_1d_1v = SRM_object.samples 23 | 24 | 25 | def test_samples_1d_1v_shape(): 26 | assert samples_1d_1v.shape == (n_sim, 1, nt) 27 | 28 | 29 | # def test_samples_1d_1v_value(): 30 | # assert np.isclose(samples_1d_1v[53, 0, 134], -0.9143690244714813) 31 | -------------------------------------------------------------------------------- /tests/unit_tests/stochastic_process/test_spectral_nd_1v.py: -------------------------------------------------------------------------------- 1 | from UQpy.stochastic_process import SpectralRepresentation 2 | import numpy as np 3 | 4 | n_sim = 10 # Num of samples 5 | n = 2 # Num of dimensions 6 | m = 1 # Num of variables 7 | T = 10 8 | nt = 200 9 | dt = T / nt 10 | t = np.linspace(0, T - dt, nt) 11 | # Frequency 12 | W = np.array([1.0, 1.5]) 13 | nw = 100 14 | dw = W / nw 15 | x_list = [np.linspace(0, W[i] - dw[i], nw) for i in range(n)] 16 | xy_list = np.array(np.meshgrid(*x_list, indexing='ij')) 17 | 18 | S_nd_1v = 125 / 4 * np.linalg.norm(xy_list, axis=0) ** 2 * np.exp(-5 * np.linalg.norm(xy_list, axis=0)) 19 | SRM_object = SpectralRepresentation(n_sim, S_nd_1v, [dt, dt], dw, [nt, nt], [nw, nw], random_state=128) 20 | samples_nd_1v = SRM_object.samples 21 | 22 | 23 | def test_samples_nd_1v_shape(): 24 | assert samples_nd_1v.shape == (n_sim, 1, nt, nt) 25 | 26 | 27 | # def test_samples_nd_1v_values(): 28 | # assert np.isclose(1.0430071116540038, samples_nd_1v[4, 0, 107, 59]) -------------------------------------------------------------------------------- /tests/unit_tests/strata.txt: -------------------------------------------------------------------------------- 1 | 0.0 0.0 0.5 0.33333 2 | 0.0 0.33333 0.5 0.33333 3 | 0.0 0.66667 0.5 0.33333 4 | 0.5 0.0 0.5 0.5 5 | 0.5 0.5 0.25 0.5 6 | 0.75 0.5 0.25 0.5 -------------------------------------------------------------------------------- /tests/unit_tests/surrogates/python_model_1Dfunction.py: -------------------------------------------------------------------------------- 1 | def y_func(z): 2 | import numpy as np 3 | return np.sin(z) -------------------------------------------------------------------------------- /tests/unit_tests/surrogates/python_model_function.py: -------------------------------------------------------------------------------- 1 | def y_func(z): 2 | return 1/(6.2727*(abs(0.3-z[:, 0]**2-z[:, 1]**2)+0.01)) -------------------------------------------------------------------------------- /tests/unit_tests/transformations/test_correlate.py: -------------------------------------------------------------------------------- 1 | # Test the Correlation module 2 | 3 | from UQpy.transformations import Correlate 4 | import numpy as np 5 | import pytest 6 | 7 | 8 | def test_samples(): 9 | samples_z = np.array([[0.3, 0.2], [0.2, 2.4]]) 10 | rz = np.array([[1.0, 0.8], [0.8, 1.0]]) 11 | ntf_obj = Correlate(samples_u=samples_z, corr_z=rz) 12 | np.testing.assert_allclose(ntf_obj.samples_z, [[0.3, 0.36], [0.2, 1.6]], rtol=1e-09) 13 | 14 | 15 | def test_samples_u(): 16 | samples_z = np.array([[0.3, 0.2], [0.2, 2.4]]) 17 | with pytest.raises(Exception): 18 | assert Correlate(samples_u=samples_z) 19 | 20 | 21 | def test_corr_z(): 22 | rz = np.array([[1.0, 0.0], [0.0, 1.0]]) 23 | with pytest.raises(Exception): 24 | assert Correlate(corr_z=rz) 25 | 26 | --------------------------------------------------------------------------------