├── .github └── workflows │ ├── pylint.yml │ ├── python-package.yml │ ├── python-publish.yml │ └── python-test-publish.yml ├── .gitignore ├── .gitlab-ci.yml ├── .travis.yml ├── CGDE ├── .directory ├── Classification_Eval.py ├── Classification_blobs.py ├── Classification_gaussian-quantiles.py ├── Classification_random.py ├── Classification_real-data.py ├── DimWiseClassification.py ├── DimWiseTry.py ├── eval_log_sorting.py └── scratches.py ├── Doxyfile ├── Example.py ├── LICENSE ├── MANIFEST.in ├── README.md ├── SGDE ├── CompareSolutions.py ├── Datasets │ ├── 2D_StroSkewB2.csv │ ├── Circles500.csv │ ├── faithful.csv │ ├── funnychess.csv │ └── moons.csv ├── Example.ipynb ├── Example.py ├── GaussianCalculateErrors.py ├── GenerateFigureErrors.py ├── GenerateTableValues.ipynb ├── SGppCompare.ipynb └── SGppCompare.py ├── UQ ├── FunctionUQCalculateErrors.py ├── FunctionUQPlotErrors.py ├── GFunctionCalculateErrors.py ├── GFunctionPlotErrors.py ├── PredatorPrey │ ├── AllStepsCalculateErrors.py │ ├── AllStepsPlotErrors.py │ ├── ErrorsOverTime.py │ ├── Functionplot.py │ ├── PredatorPreyCommon.py │ ├── SinglestepCalculateErrorsMoments.py │ ├── SinglestepCalculateErrorsPCE.py │ ├── SinglestepPlotErrors.py │ ├── gauss_2D_solutions.npy │ ├── step210_2D_solutions.npy │ └── step25_2D_solutions.npy ├── README.md ├── TestTruncatedNormal.py ├── TestsUQ.py ├── function_uq_solutions.npy ├── quickstart.py ├── quickstart2.py └── quickstart_mixed.py ├── configure.sh ├── git_hooks └── pre-commit ├── install_chaospy.sh ├── ipynb ├── Beispiele.ipynb ├── Diss │ └── Figures_Diss.ipynb ├── Extend_Split_Strategy_Tutorial.ipynb ├── Extrapolation │ ├── .gitignore │ ├── Extrapolation_Test.ipynb │ └── Results │ │ ├── 2d │ │ └── medium_hard │ │ │ ├── error_comparison_FunctionExpVar_2d.csv │ │ │ ├── error_comparison_GenzC0_2d.csv │ │ │ ├── error_comparison_GenzCornerPeak_2d.csv │ │ │ ├── error_comparison_GenzGaussian_2d.csv │ │ │ ├── error_comparison_GenzOszillatory_2d.csv │ │ │ └── error_comparison_GenzProductPeak_2d.csv │ │ ├── 5d │ │ ├── extrapolated_surplus_grid_medium_hard │ │ │ ├── error_comparison_GenzGaussian_5d.csv │ │ │ └── error_comparison_GenzOszillatory_5d.csv │ │ ├── hard │ │ │ ├── error_comparison_FunctionExpVar_5d.csv │ │ │ ├── error_comparison_GenzC0_5d.csv │ │ │ ├── error_comparison_GenzCornerPeak_5d.csv │ │ │ ├── error_comparison_GenzDiscontinious_5d.csv │ │ │ ├── error_comparison_GenzGaussian_5d.csv │ │ │ ├── error_comparison_GenzOszillatory_5d.csv │ │ │ └── error_comparison_GenzProductPeak_5d.csv │ │ └── medium_hard │ │ │ ├── error_comparison_FunctionExpVar_5d.csv │ │ │ ├── error_comparison_GenzC0_5d.csv │ │ │ ├── error_comparison_GenzCornerPeak_5d.csv │ │ │ ├── error_comparison_GenzDiscontinious_5d.csv │ │ │ ├── error_comparison_GenzGaussian_5d.csv │ │ │ ├── error_comparison_GenzOszillatory_5d.csv │ │ │ └── error_comparison_GenzProductPeak_5d.csv │ │ └── Final │ │ ├── FinalExtrapolationErrorPlots.ipynb │ │ └── FinalGrids.ipynb ├── Grid_Tutorial.ipynb ├── Results_Regression │ ├── Adaptivity_margin.csv │ ├── Adaptivity_margin_second.csv │ ├── Full_vs_sparse_Discont2.csv │ ├── Full_vs_sparse_Discont22.csv │ ├── Full_vs_sparse_Discont23.csv │ ├── Full_vs_sparse_Discont24.csv │ ├── Full_vs_sparse_Discont25.csv │ ├── Full_vs_sparse_Discont26.csv │ ├── Full_vs_sparse_Discont27.csv │ ├── Full_vs_sparse_Discont3.csv │ ├── Full_vs_sparse_Discont4.csv │ ├── Full_vs_sparse_Discont5.csv │ ├── Full_vs_sparse_Oszi2.csv │ ├── Full_vs_sparse_Oszi3.csv │ ├── Full_vs_sparse_Oszi4.csv │ ├── Full_vs_sparse_Oszi5.csv │ ├── Full_vs_sparse_Oszi6.csv │ ├── Lambda_Discont5.csv │ ├── Lambda_Gaussian.csv │ ├── Lambda_Star.csv │ ├── Opticom_standard_combi.csv │ ├── Oszi_Opticom_adaptive2.csv │ ├── POLY.csv │ └── example_csv.csv ├── Temporal_Regression.ipynb ├── Tutorial.ipynb ├── Tutorial_DEMachineLearning.ipynb ├── Tutorial_DensityEstimation.ipynb ├── Tutorial_Extrapolation.ipynb ├── Tutorial_Regression.ipynb └── Tutorial_UncertaintyQuantification.ipynb ├── requirements.txt ├── setup.py ├── sparseSpACE ├── BasisFunctions.py ├── ComponentGridInfo.py ├── DEMachineLearning.py ├── DimAdaptiveCombi.py ├── ErrorCalculator.py ├── Extrapolation.py ├── Function.py ├── Grid.py ├── GridOperation.py ├── Hierarchization.py ├── Integrator.py ├── MonteCarlo.py ├── PerformTestCase.py ├── RefinementContainer.py ├── RefinementObject.py ├── StandardCombi.py ├── Utils.py ├── __init__.py ├── combiScheme.py ├── spatiallyAdaptiveBase.py ├── spatiallyAdaptiveCell.py ├── spatiallyAdaptiveExtendSplit.py └── spatiallyAdaptiveSingleDimension2.py └── test ├── run_tests.sh ├── test_BalancedExtrapolationGrid.py ├── test_BasisFunctions.py ├── test_BinaryTreeGrid.py ├── test_DEMachineLearning.py ├── test_DensityEstimation.py ├── test_ExtrapolationGrid.py ├── test_ExtrapolationInterpolatingGrid.py ├── test_ExtrapolationSimpsonGrid.py ├── test_Hierarchization.py ├── test_Integration_UQ.py ├── test_Integrator.py ├── test_RefinementContainer.py ├── test_RefinementObject.py ├── test_Regression.py ├── test_RombergWeightFactory.py ├── test_StandardCombi.py ├── test_UncertaintyQuantification.py ├── test_Utils.py ├── test_combiScheme.py ├── test_spatiallyAdaptiveExtendSplit.py └── test_spatiallyAdaptiveSingleDimension2.py /.github/workflows/pylint.yml: -------------------------------------------------------------------------------- 1 | name: Pylint 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | - uses: actions/checkout@v2 12 | - name: Set up Python 3.8 13 | uses: actions/setup-python@v1 14 | with: 15 | python-version: 3.8 16 | - name: Install dependencies 17 | run: | 18 | python -m pip install --upgrade pip 19 | pip install pylint 20 | - name: Analysing the code with pylint 21 | run: | 22 | pylint `ls -R|grep .py$|xargs` 23 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Python package 5 | 6 | on: 7 | push: 8 | pull_request: 9 | branches: [ master ] 10 | 11 | jobs: 12 | build: 13 | 14 | runs-on: ubuntu-latest 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | python-version: [3.8, 3.9] 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | python -m pip install python-dateutil pytest mock pytest-cov coverage coveragepy-lcov flake8 30 | python -m pip install coveralls 31 | 32 | pip install -e . 33 | #- name: Lint with flake8 34 | # run: | 35 | # # stop the build if there are Python syntax errors or undefined names 36 | # flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 37 | # # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 38 | # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 39 | - name: Test with pytest 40 | run: | 41 | pytest --cov coveralls --cov-report term-missing --cov=sparseSpACE test/ 42 | #- name: Upload coverage data to coveralls.io 43 | # run: | 44 | # python -m pip install coveralls==3.0.1 45 | # coveralls --service=github 46 | # env: 47 | # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 48 | - name: Convert to lcov 49 | run: | 50 | coveragepy-lcov 51 | - name: Coveralls GitHub Action 52 | uses: coverallsapp/github-action@1.1.3 53 | with: 54 | github-token: ${{ secrets.GITHUB_TOKEN }} 55 | path-to-lcov: lcov.info 56 | #- name: Coveralls GitHub Action 57 | # uses: coverallsapp/github-action@1.1.3 58 | # with: 59 | # github-token: ${{ secrets.GITHUB_TOKEN }} 60 | #- name: Coveralls 61 | # uses: coverallsapp/github-action@master 62 | # with: 63 | # github-token: ${{ secrets.GITHUB_TOKEN }} 64 | # path-to-lcov: coverage.xml 65 | 66 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | jobs: 16 | deploy: 17 | 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: '3.x' 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | pip install build 30 | - name: Build package 31 | run: python -m build 32 | - name: Publish package 33 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 34 | with: 35 | user: __token__ 36 | password: ${{ secrets.PYPI_API_TOKEN }} 37 | -------------------------------------------------------------------------------- /.github/workflows/python-test-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a push is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package to Test PYPI 10 | 11 | on: 12 | push 13 | 14 | jobs: 15 | deploy: 16 | 17 | runs-on: ubuntu-latest 18 | 19 | steps: 20 | - uses: actions/checkout@v2 21 | - name: Set up Python 22 | if: startsWith(github.ref, 'refs/tags') 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: '3.x' 26 | - name: Install dependencies 27 | if: startsWith(github.ref, 'refs/tags') 28 | run: | 29 | python -m pip install --upgrade pip 30 | pip install build 31 | - name: Build package 32 | if: startsWith(github.ref, 'refs/tags') 33 | run: python -m build 34 | - name: Publish package to Test PyPI 35 | if: startsWith(github.ref, 'refs/tags') 36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.TEST_PYPI_API_TOKEN }} 40 | repository_url: https://test.pypi.org/legacy/ 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | .ipynb_checkpoints 3 | *.pdf 4 | *.ipynb 5 | *.pyc 6 | *.svg 7 | *.png 8 | .idea 9 | *Profile.txt 10 | *profile.txt 11 | Untitled.ipynb 12 | .directory 13 | eval_figs_* 14 | log_sg 15 | log_* 16 | .idea/* 17 | latex/ 18 | html/ 19 | chaospy/ 20 | *.out 21 | *.orig 22 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | image: "python:3.6" 2 | before_script: 3 | - pip3 install numpy 4 | - pip3 install scipy 5 | - pip3 install matplotlib 6 | - pip3 install nose 7 | - pip3 install chaospy 8 | - pip3 install sklearn 9 | test: 10 | stage: test 11 | script: 12 | - python3 -m nose 13 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | install: 3 | - pip3 install . 4 | - pip3 install coveralls 5 | script: 6 | - ls 7 | #- python3 -m nose -w test/ 8 | - python3 -m nose --with-coverage --cover-package=sparseSpACE/ test/ 9 | - coveralls 10 | -------------------------------------------------------------------------------- /CGDE/.directory: -------------------------------------------------------------------------------- 1 | [Dolphin] 2 | HeaderColumnWidths=1471,104,134 3 | Timestamp=2020,8,23,21,43,11 4 | Version=4 5 | ViewMode=1 6 | 7 | [Settings] 8 | HiddenFilesShown=true 9 | -------------------------------------------------------------------------------- /CGDE/Classification_blobs.py: -------------------------------------------------------------------------------- 1 | from sys import path 2 | path.append('../src/') 3 | path.append('../SGDE') 4 | path.append('../SGDE/Datasets') 5 | 6 | # sgde tut 7 | from Utils import * 8 | 9 | from shutil import copyfile 10 | import os 11 | 12 | 13 | from sys import path 14 | path.append('../src/') 15 | import DEMachineLearning as do 16 | from ErrorCalculator import * 17 | import logging 18 | 19 | def prev_level(l, d): 20 | if l - 2 <= 0: 21 | return 1 22 | else: 23 | return (2**(l-2) - 1) * d + prev_level(l-2, d) 24 | 25 | change_log_file('logs/log_classification_blobs') 26 | 27 | clear_log() 28 | logUtil.set_log_level(log_levels.INFO) 29 | 30 | dim = 4 31 | max_level = 8 32 | test = ((2 ** max_level) - 1) * dim - (dim - 1) + (2 ** dim) * prev_level(max_level, dim) 33 | dim = 5 34 | max_level = 7 35 | test2 = ((2 ** max_level) - 1) * dim - (dim - 1) + (2 ** dim) * prev_level(max_level, dim) 36 | 37 | tolerance = -1.0 38 | 39 | logUtil.log_info('--- Classification_blobs start ---') 40 | for dimension in [2, 3, 4, 5]: 41 | 42 | # generate a Circle-Dataset of size with the sklearn library 43 | size = 25000 44 | dim = dimension 45 | sklearn_dataset = do.datasets.make_blobs(n_samples=size, n_features=dim, centers=3) 46 | 47 | 48 | # now we can transform this dataset into a DataSet object and give it an appropriate name 49 | data = do.DataSet(sklearn_dataset, name='gaussian quantiles') 50 | data_range = (0.0, 1.0) 51 | data.scale_range(data_range) 52 | 53 | reuse_old_values = False 54 | dimWiseInitialized = False 55 | 56 | data_copy = data.copy() # deepcopied 57 | data_copy.scale_range(data_range) # scaled 58 | without_classes, with_classes = data_copy.split_without_labels() # seperated into samples with and without classes 59 | 60 | data.scale_range(data_range) 61 | 62 | data_stdCombi = data.copy() 63 | data_stdCombi_copy = data_copy.copy() 64 | 65 | data_dimCombi = data.copy() 66 | data_dimCombi_copy = data_copy.copy() 67 | 68 | max_levels = [2, 3, 4, 5, 6] 69 | start_levels = [x - 3 for x in max_levels if 1 < x - 3 < 4] 70 | if len(start_levels) == 0: 71 | start_levels = [2] 72 | #for level_max in max_levels: 73 | for margin in [0.5]: 74 | for start_level in start_levels: 75 | for error_config in [(False, ErrorCalculatorSingleDimVolumeGuided()), (True, ErrorCalculatorSingleDimVolumeGuided()), (True, ErrorCalculatorSingleDimMisclassificationGlobal())]: 76 | for rebalancing in [True, False]: 77 | dimWiseInitialized = False 78 | for level_max in max_levels: 79 | #for margin in [0.5]: 80 | one_vs_others = error_config[0] 81 | error_calc = error_config[1] 82 | logUtil.log_info('next iteration') 83 | logUtil.log_info('do.datasets.make_blobs(n_samples=size, n_features=dim centers=3)') 84 | logUtil.log_info('data size: ' + str(size)) 85 | logUtil.log_info('data dimension: ' + str(data.get_dim())) 86 | t = [i for i, x in enumerate(str(type(error_calc))) if '\'' in x] 87 | logUtil.log_info('error_calculator ' + str(type(error_calc))[t[0]+1:t[-1]]) 88 | logUtil.log_info('rebalancing: ' + str(rebalancing)) 89 | logUtil.log_info('margin: ' + str(margin)) 90 | logUtil.log_info('one_vs_others ' + str(one_vs_others)) 91 | 92 | 93 | classification = do.Classification(data_stdCombi, split_percentage=0.8, split_evenly=True) 94 | 95 | max_level = level_max 96 | print('classification max_level', max_level) 97 | logUtil.log_info('classification standardCombi max_level: ' + str(max_level)) 98 | classification.perform_classification(masslumping=False, lambd=0.0, minimum_level=1, maximum_level=max_level, one_vs_others=one_vs_others, reuse_old_values=reuse_old_values) 99 | 100 | classification.print_evaluation(print_incorrect_points=False) 101 | 102 | correct_classes = data_stdCombi.copy() 103 | correct_classes.scale_range(data_range) 104 | if not dimWiseInitialized: 105 | classification_dimwise = do.Classification(data_dimCombi, split_percentage=0.8, split_evenly=True) 106 | #max_evals = (((2**(max_level-1)) - 1) * dim) 107 | 108 | max_evals = ((2**max_level) - 1) * dim - (dim - 1) + (2**dim) * prev_level(max_level, dim) 109 | print('classification max_evaluations', max_evals) 110 | logUtil.log_info('classification dimwise max_evaluations: ' + str(max_evals)) 111 | logUtil.log_info('classification dimwise start level: ' + str(start_level)) 112 | logUtil.log_info('classification dimwise rebalance ' + str(rebalancing)) 113 | logUtil.log_info('classification dimwise margin ' + str(margin)) 114 | # after that we should immediately perform the classification for the learning data tied to the Classification object, since we can't really call any other method before that without raising an error 115 | figure_prefix = 'dimwise_plots/gaussian_quantiles' 116 | if not dimWiseInitialized: 117 | classification_dimwise.perform_classification_dimension_wise(masslumping=False, 118 | lambd=0.0, 119 | minimum_level=1, 120 | maximum_level=start_level, 121 | reuse_old_values=reuse_old_values, 122 | numeric_calculation=False, 123 | boundary=False, 124 | modified_basis=False, 125 | one_vs_others=one_vs_others, 126 | tolerance=tolerance, 127 | margin=margin, 128 | rebalancing=rebalancing, 129 | max_evaluations=max_evals, 130 | error_calculator=error_calc) 131 | dimWiseInitialized = True 132 | else: 133 | classification_dimwise.continue_dimension_wise_refinement(tolerance=tolerance, 134 | max_evaluations=max_evals) 135 | 136 | classification_dimwise.print_evaluation(print_incorrect_points=False) 137 | 138 | logUtil.log_info('iteration end') 139 | 140 | logUtil.log_info('--- Classification_eval end ---') 141 | 142 | # make a backup of the log without overwriting old ones 143 | log_backup = 'log_sg_backup' 144 | while os.path.isfile(log_backup): 145 | log_backup = log_backup + '+' 146 | copyfile(log_filename, log_backup) -------------------------------------------------------------------------------- /CGDE/Classification_gaussian-quantiles.py: -------------------------------------------------------------------------------- 1 | from sys import path 2 | path.append('../src/') 3 | path.append('../SGDE') 4 | path.append('../SGDE/Datasets') 5 | 6 | # sgde tut 7 | from Utils import * 8 | 9 | from shutil import copyfile 10 | import os 11 | 12 | 13 | from sys import path 14 | path.append('../src/') 15 | import DEMachineLearning as do 16 | from ErrorCalculator import * 17 | import logging 18 | 19 | def prev_level(l, d): 20 | if l - 2 <= 0: 21 | return 1 22 | else: 23 | return (2**(l-2) - 1) * d + prev_level(l-2, d) 24 | 25 | change_log_file('logs/log_classification_gaussian-quantiles') 26 | 27 | clear_log() 28 | # pStuff.set_data_set_used('gaussianQ') 29 | logUtil.set_log_level(log_levels.INFO) 30 | 31 | dim = 4 32 | max_level = 8 33 | test = ((2 ** max_level) - 1) * dim - (dim - 1) + (2 ** dim) * prev_level(max_level, dim) 34 | dim = 5 35 | max_level = 7 36 | test2 = ((2 ** max_level) - 1) * dim - (dim - 1) + (2 ** dim) * prev_level(max_level, dim) 37 | 38 | tolerance = -1.0 39 | 40 | logUtil.log_info('--- Classification_eval start ---') 41 | for dimension in [2, 3, 4]: 42 | 43 | # generate a Circle-Dataset of size with the sklearn library 44 | size = 50000 45 | dim = dimension 46 | sklearn_dataset = do.datasets.make_gaussian_quantiles(n_samples=size, n_features=dim, n_classes=3) 47 | 48 | data = do.DataSet(sklearn_dataset, name='gaussian quantiles') 49 | data_range = (0.0, 1.0) 50 | data.scale_range(data_range) 51 | 52 | reuse_old_values = False 53 | dimWiseInitialized = False 54 | 55 | 56 | data_copy = data.copy() # deepcopied 57 | data_copy.scale_range(data_range) # scaled 58 | 59 | without_classes, with_classes = data_copy.split_without_labels() # seperated into samples with and without classes 60 | # data_copy.plot() # plotted 61 | 62 | data.scale_range(data_range) 63 | 64 | data_stdCombi = data.copy() 65 | data_stdCombi_copy = data_copy.copy() 66 | 67 | data_dimCombi = data.copy() 68 | data_dimCombi_copy = data_copy.copy() 69 | 70 | max_levels = [2, 3, 4, 5, 6] 71 | start_levels = [x - 3 for x in max_levels if 1 < x - 3 < 4] 72 | if len(start_levels) == 0: 73 | start_levels = [2] 74 | for margin in [0.5]: 75 | #for level_max in max_levels: 76 | for start_level in start_levels: 77 | for error_config in [(False, ErrorCalculatorSingleDimVolumeGuided()), (True, ErrorCalculatorSingleDimVolumeGuided()), (True, ErrorCalculatorSingleDimMisclassificationGlobal())]: 78 | for rebalancing in [True, False]: 79 | dimWiseInitialized = False 80 | for level_max in max_levels: 81 | #for margin in [0.5]: 82 | one_vs_others = error_config[0] 83 | error_calc = error_config[1] 84 | logUtil.log_info('next iteration') 85 | logUtil.log_info('do.datasets.make_gaussian_quantiles(n_samples=size, n_features=dim, n_classes=6)') 86 | logUtil.log_info('data size: ' + str(size)) 87 | logUtil.log_info('data dimension: ' + str(data.get_dim())) 88 | t = [i for i, x in enumerate(str(type(error_calc))) if '\'' in x] 89 | logUtil.log_info('error_calculator ' + str(type(error_calc))[t[0]+1:t[-1]]) 90 | logUtil.log_info('rebalancing: ' + str(rebalancing)) 91 | logUtil.log_info('margin: ' + str(margin)) 92 | logUtil.log_info('one_vs_others ' + str(one_vs_others)) 93 | 94 | classification = do.Classification(data_stdCombi, split_percentage=0.8, split_evenly=True) 95 | 96 | max_level = level_max 97 | print('classification max_level', max_level) 98 | logUtil.log_info('classification standardCombi max_level: ' + str(max_level)) 99 | classification.perform_classification(masslumping=False, lambd=0.0, minimum_level=1, maximum_level=max_level, one_vs_others=one_vs_others, reuse_old_values=reuse_old_values) 100 | 101 | classification.print_evaluation(print_incorrect_points=False) 102 | 103 | correct_classes = data_stdCombi.copy() 104 | correct_classes.scale_range(data_range) 105 | 106 | 107 | ######################################################################################################################## 108 | ######################################################################################################################## 109 | ######################################################################################################################## 110 | ######################################################################################################################## 111 | ######################################################################################################################## 112 | 113 | # pStuff.reset_class_counter() 114 | 115 | if not dimWiseInitialized: 116 | classification_dimwise = do.Classification(data_dimCombi, split_percentage=0.8, split_evenly=True) 117 | #max_evals = (((2**(max_level-1)) - 1) * dim) 118 | 119 | max_evals = ((2**max_level) - 1) * dim - (dim - 1) + (2**dim) * prev_level(max_level, dim) 120 | print('classification max_evaluations', max_evals) 121 | logUtil.log_info('classification dimwise max_evaluations: ' + str(max_evals)) 122 | logUtil.log_info('classification dimwise start level: ' + str(start_level)) 123 | logUtil.log_info('classification dimwise rebalance ' + str(rebalancing)) 124 | logUtil.log_info('classification dimwise margin ' + str(margin)) 125 | # after that we should immediately perform the classification for the learning data tied to the Classification object, since we can't really call any other method before that without raising an error 126 | figure_prefix = 'dimwise_plots/gaussian_quantiles' 127 | if not dimWiseInitialized: 128 | classification_dimwise.perform_classification_dimension_wise(masslumping=False, 129 | lambd=0.0, 130 | minimum_level=1, maximum_level=start_level, 131 | reuse_old_values=reuse_old_values, 132 | numeric_calculation=False, 133 | boundary=False, 134 | modified_basis=False, 135 | one_vs_others=one_vs_others, 136 | tolerance=tolerance, 137 | margin=margin, 138 | rebalancing=rebalancing, 139 | max_evaluations=max_evals, 140 | error_calculator=error_calc) 141 | dimWiseInitialized = True 142 | else: 143 | classification_dimwise.continue_dimension_wise_refinement(tolerance=tolerance, max_evaluations=max_evals) 144 | 145 | classification_dimwise.print_evaluation(print_incorrect_points=False) 146 | 147 | 148 | # pStuff.reset_class_counter() 149 | 150 | logUtil.log_info('iteration end') 151 | 152 | logUtil.log_info('--- Classification_eval end ---') 153 | 154 | # make a backup of the log without overwriting old ones 155 | log_backup = 'log_sg_backup' 156 | while os.path.isfile(log_backup): 157 | log_backup = log_backup + '+' 158 | copyfile(log_filename, log_backup) -------------------------------------------------------------------------------- /CGDE/Classification_random.py: -------------------------------------------------------------------------------- 1 | from sys import path 2 | path.append('../src/') 3 | path.append('../SGDE') 4 | path.append('../SGDE/Datasets') 5 | 6 | # sgde tut 7 | from Utils import * 8 | 9 | from shutil import copyfile 10 | import os 11 | 12 | 13 | from sys import path 14 | path.append('../src/') 15 | import DEMachineLearning as do 16 | from ErrorCalculator import * 17 | 18 | def prev_level(l, d): 19 | if l - 2 <= 0: 20 | return 1 21 | else: 22 | return (2**(l-2) - 1) * d + prev_level(l-2, d) 23 | 24 | change_log_file('logs/log_classification_random') 25 | 26 | clear_log() 27 | logUtil.set_log_level(log_levels.INFO) 28 | 29 | dim = 4 30 | max_level = 8 31 | test = ((2 ** max_level) - 1) * dim - (dim - 1) + (2 ** dim) * prev_level(max_level, dim) 32 | dim = 5 33 | max_level = 7 34 | test2 = ((2 ** max_level) - 1) * dim - (dim - 1) + (2 ** dim) * prev_level(max_level, dim) 35 | 36 | tolerance = -1.0 37 | 38 | logUtil.log_info('--- Classification_eval start ---') 39 | for dimension in [2, 3, 4, 5]: 40 | 41 | # generate a Circle-Dataset of size with the sklearn library 42 | size = 25000 43 | dim = dimension 44 | sklearn_dataset = do.datasets.make_classification(size, n_features=dim, n_redundant=0, n_clusters_per_class=1, 45 | n_informative=2, n_classes=3) 46 | 47 | data = do.DataSet(sklearn_dataset, name='gaussian quantiles') 48 | data_range = (0.0, 1.0) 49 | data.scale_range(data_range) 50 | 51 | reuse_old_values = False 52 | dimWiseInitialized = False 53 | 54 | data_copy = data.copy() # deepcopied 55 | data_copy.scale_range(data_range) # scaled 56 | without_classes, with_classes = data_copy.split_without_labels() # seperated into samples with and without classes # plotted 57 | 58 | data.scale_range(data_range) 59 | 60 | data_stdCombi = data.copy() 61 | data_stdCombi_copy = data_copy.copy() 62 | 63 | data_dimCombi = data.copy() 64 | data_dimCombi_copy = data_copy.copy() 65 | 66 | max_levels = [2, 3, 4, 5, 6] 67 | start_levels = [x - 3 for x in max_levels if 1 < x - 3 < 4] 68 | if len(start_levels) == 0: 69 | start_levels = [2] 70 | #for level_max in max_levels: 71 | for margin in [0.5]: 72 | for start_level in start_levels: 73 | for error_config in [(False, ErrorCalculatorSingleDimVolumeGuided()), (True, ErrorCalculatorSingleDimVolumeGuided()), (True, ErrorCalculatorSingleDimMisclassificationGlobal())]: 74 | for rebalancing in [True, False]: 75 | dimWiseInitialized = False 76 | #for margin in [0.5]: 77 | for level_max in max_levels: 78 | one_vs_others = error_config[0] 79 | error_calc = error_config[1] 80 | logUtil.log_info('next iteration') 81 | 82 | logUtil.log_info( 83 | 'do.datasets.make_classification(size, n_features=dim, n_redundant=0, n_clusters_per_class=1, n_informative=2, n_classes=3)') 84 | logUtil.log_info('data size: ' + str(size)) 85 | logUtil.log_info('data dimension: ' + str(data.get_dim())) 86 | t = [i for i, x in enumerate(str(type(error_calc))) if '\'' in x] 87 | logUtil.log_info('error_calculator ' + str(type(error_calc))[t[0]+1:t[-1]]) 88 | logUtil.log_info('rebalancing: ' + str(rebalancing)) 89 | logUtil.log_info('margin: ' + str(margin)) 90 | logUtil.log_info('one_vs_others ' + str(one_vs_others)) 91 | 92 | classification = do.Classification(data_stdCombi, split_percentage=0.8, split_evenly=True) 93 | 94 | max_level = level_max 95 | print('classification max_level', max_level) 96 | logUtil.log_info('classification standardCombi max_level: ' + str(max_level)) 97 | classification.perform_classification(masslumping=False, lambd=0.0, minimum_level=1, maximum_level=max_level, one_vs_others=one_vs_others, reuse_old_values=reuse_old_values) 98 | 99 | classification.print_evaluation(print_incorrect_points=False) 100 | 101 | ######################################################################################################################## 102 | ######################################################################################################################## 103 | ######################################################################################################################## 104 | ######################################################################################################################## 105 | ######################################################################################################################## 106 | 107 | if not dimWiseInitialized: 108 | classification_dimwise = do.Classification(data_dimCombi, split_percentage=0.8, split_evenly=True) 109 | #max_evals = (((2**(max_level-1)) - 1) * dim) 110 | 111 | max_evals = ((2**max_level) - 1) * dim - (dim - 1) + (2**dim) * prev_level(max_level, dim) 112 | print('classification max_evaluations', max_evals) 113 | logUtil.log_info('classification dimwise max_evaluations: ' + str(max_evals)) 114 | logUtil.log_info('classification dimwise start level: ' + str(start_level)) 115 | logUtil.log_info('classification dimwise rebalance ' + str(rebalancing)) 116 | logUtil.log_info('classification dimwise margin ' + str(margin)) 117 | 118 | figure_prefix = 'dimwise_plots/gaussian_quantiles' 119 | 120 | if not dimWiseInitialized: 121 | classification_dimwise.perform_classification_dimension_wise(masslumping=False, 122 | lambd=0.0, 123 | minimum_level=1, 124 | maximum_level=start_level, 125 | reuse_old_values=reuse_old_values, 126 | numeric_calculation=False, 127 | boundary=False, 128 | modified_basis=False, 129 | one_vs_others=one_vs_others, 130 | tolerance=tolerance, 131 | margin=margin, 132 | rebalancing=rebalancing, 133 | max_evaluations=max_evals, 134 | error_calculator=error_calc) 135 | dimWiseInitialized = True 136 | else: 137 | classification_dimwise.continue_dimension_wise_refinement(tolerance=tolerance, max_evaluations=max_evals) 138 | 139 | 140 | classification_dimwise.print_evaluation(print_incorrect_points=False) 141 | 142 | logUtil.log_info('iteration end') 143 | 144 | logUtil.log_info('--- Classification_eval end ---') 145 | 146 | # make a backup of the log without overwriting old ones 147 | log_backup = 'log_sg_backup' 148 | while os.path.isfile(log_backup): 149 | log_backup = log_backup + '+' 150 | copyfile(log_filename, log_backup) -------------------------------------------------------------------------------- /Example.py: -------------------------------------------------------------------------------- 1 | import sparseSpACE 2 | import numpy as np 3 | from sparseSpACE.spatiallyAdaptiveSingleDimension2 import * 4 | from sparseSpACE.Function import * 5 | from sparseSpACE.ErrorCalculator import * 6 | from sparseSpACE.GridOperation import * 7 | 8 | # dimension of the problem 9 | dim = 2 10 | 11 | # define integration domain boundaries 12 | a = np.zeros(dim) 13 | b = np.ones(dim) 14 | 15 | # define function to be integrated 16 | midpoint = np.ones(dim) * 0.5 17 | coefficients = np.array([ 10**0 * (d+1) for d in range(dim)]) 18 | f = GenzDiscontinious(border=midpoint,coeffs=coefficients) 19 | # plot function 20 | f.plot(np.ones(dim)*a,np.ones(dim)*b) 21 | 22 | # reference integral solution for calculating errors 23 | reference_solution = f.getAnalyticSolutionIntegral(a,b) 24 | 25 | # define error estimator for refinement 26 | errorOperator = ErrorCalculatorSingleDimVolumeGuided() 27 | 28 | # define equidistant grid 29 | grid=GlobalTrapezoidalGrid(a=a, b=b, modified_basis=False, boundary=True) 30 | 31 | # NEW! define operation which shall be performed in the combination technique 32 | operation = Integration(f=f, grid=grid, dim=dim, reference_solution=reference_solution) 33 | 34 | # define SingleDim refinement strategy for Spatially Adaptive Combination Technique 35 | adaptiveCombiInstanceSingleDim = SpatiallyAdaptiveSingleDimensions2(np.ones(dim) * a, np.ones(dim) * b, operation=operation) 36 | 37 | # performing the spatially adaptive refinement with the SingleDim method 38 | adaptiveCombiInstanceSingleDim.performSpatiallyAdaptiv(1,2,errorOperator,10**-2, do_plot=False) 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![GitHub](https://img.shields.io/github/license/obersteiner/sparseSpACE) 2 | ![GitHub Workflow Status](https://img.shields.io/github/workflow/status/obersteiner/sparseSpACE/Python%20package) 3 | ![Coveralls](https://img.shields.io/coveralls/github/obersteiner/sparseSpACE) 4 | ![GitHub release (latest by date)](https://img.shields.io/github/v/release/obersteiner/sparseSpACE) 5 | ![PyPI](https://img.shields.io/pypi/v/sparseSpACE) 6 | 7 | # sparseSpACE - The Sparse Grid Spatially Adaptive Combination Environment 8 | 9 | This python projects implements different variants of the spatially adaptive Combination Technique. 10 | It was first targeted to solve high dimensional numerical integration with the spatially adaptive Combination Technique but it now supports the implementation of arbitrary grid operations. It supports already numerical integration, interpolation, Uncertainty Quantification, Sparse Grid Density Estimation (with classificationa nd clustering), regression, and PDE calculations. The github page can be found [here](https://github.com/obersteiner/sparseSpACE.git). 11 | 12 | # Installation 13 | Install from PyPI using 14 | ``` 15 | pip install sparseSpACE 16 | ``` 17 | or (Linux example): 18 | ``` 19 | git clone https://github.com/obersteiner/sparseSpACE.git 20 | cd sparseSpACE 21 | pip install -e . 22 | ``` 23 | # Tutorials 24 | 25 | A short introduction in how to use the framework can be found in the ipynb tutorials (see ipynb folder at https://github.com/obersteiner/sparseSpACE.git): 26 | - Tutorial.ipynb 27 | - Grid_Tutorial.ipynb 28 | - Extend_Split_Strategy_Tutorial.ipynb 29 | - Tutorial_DensityEstimation.ipynb 30 | - Tutorial_DEMachineLearning.ipynb 31 | - Tutorial_Extrapolation.ipynb 32 | - Tutorial_UncertaintyQuantification.ipynb 33 | - Tutorial_Regression.ipynb 34 | 35 | # Plotting 36 | 37 | The framework also supports various options for plotting the results. Examples can be found in the ipynb/Diss folder and in the Tutorials. 38 | 39 | # Software requirements 40 | 41 | These software requirements are automatically installed when using pip. But as a reference we list here the necessary libraries and versions (see requirements.txt): 42 | - python3 (3.5 or higher) 43 | - scipy (1.1.0 or higher) 44 | - numpy 45 | - matplotlib 46 | - ipython3 (for Tutorials) 47 | - ipython3 notebooks or jupyter notebook (for Tutorials) 48 | - chaospy (for UQ) 49 | - scikit-learn (for SGDE) 50 | - dill (for saving/loading the current state of the refinement to/from a file) 51 | - sympy (1.6 or higher) 52 | 53 | # Development 54 | For development clone the repository from github and use the configure script which will install the library in modifiable mode and apply the git hooks used for the project. 55 | ``` 56 | ./configure 57 | ``` 58 | -------------------------------------------------------------------------------- /SGDE/Datasets/faithful.csv: -------------------------------------------------------------------------------- 1 | "eruptions","waiting" 2 | 3.6,79 3 | 1.8,54 4 | 3.333,74 5 | 2.283,62 6 | 4.533,85 7 | 2.883,55 8 | 4.7,88 9 | 3.6,85 10 | 1.95,51 11 | 4.35,85 12 | 1.833,54 13 | 3.917,84 14 | 4.2,78 15 | 1.75,47 16 | 4.7,83 17 | 2.167,52 18 | 1.75,62 19 | 4.8,84 20 | 1.6,52 21 | 4.25,79 22 | 1.8,51 23 | 1.75,47 24 | 3.45,78 25 | 3.067,69 26 | 4.533,74 27 | 3.6,83 28 | 1.967,55 29 | 4.083,76 30 | 3.85,78 31 | 4.433,79 32 | 4.3,73 33 | 4.467,77 34 | 3.367,66 35 | 4.033,80 36 | 3.833,74 37 | 2.017,52 38 | 1.867,48 39 | 4.833,80 40 | 1.833,59 41 | 4.783,90 42 | 4.35,80 43 | 1.883,58 44 | 4.567,84 45 | 1.75,58 46 | 4.533,73 47 | 3.317,83 48 | 3.833,64 49 | 2.1,53 50 | 4.633,82 51 | 2,59 52 | 4.8,75 53 | 4.716,90 54 | 1.833,54 55 | 4.833,80 56 | 1.733,54 57 | 4.883,83 58 | 3.717,71 59 | 1.667,64 60 | 4.567,77 61 | 4.317,81 62 | 2.233,59 63 | 4.5,84 64 | 1.75,48 65 | 4.8,82 66 | 1.817,60 67 | 4.4,92 68 | 4.167,78 69 | 4.7,78 70 | 2.067,65 71 | 4.7,73 72 | 4.033,82 73 | 1.967,56 74 | 4.5,79 75 | 4,71 76 | 1.983,62 77 | 5.067,76 78 | 2.017,60 79 | 4.567,78 80 | 3.883,76 81 | 3.6,83 82 | 4.133,75 83 | 4.333,82 84 | 4.1,70 85 | 2.633,65 86 | 4.067,73 87 | 4.933,88 88 | 3.95,76 89 | 4.517,80 90 | 2.167,48 91 | 4,86 92 | 2.2,60 93 | 4.333,90 94 | 1.867,50 95 | 4.817,78 96 | 1.833,63 97 | 4.3,72 98 | 4.667,84 99 | 3.75,75 100 | 1.867,51 101 | 4.9,82 102 | 2.483,62 103 | 4.367,88 104 | 2.1,49 105 | 4.5,83 106 | 4.05,81 107 | 1.867,47 108 | 4.7,84 109 | 1.783,52 110 | 4.85,86 111 | 3.683,81 112 | 4.733,75 113 | 2.3,59 114 | 4.9,89 115 | 4.417,79 116 | 1.7,59 117 | 4.633,81 118 | 2.317,50 119 | 4.6,85 120 | 1.817,59 121 | 4.417,87 122 | 2.617,53 123 | 4.067,69 124 | 4.25,77 125 | 1.967,56 126 | 4.6,88 127 | 3.767,81 128 | 1.917,45 129 | 4.5,82 130 | 2.267,55 131 | 4.65,90 132 | 1.867,45 133 | 4.167,83 134 | 2.8,56 135 | 4.333,89 136 | 1.833,46 137 | 4.383,82 138 | 1.883,51 139 | 4.933,86 140 | 2.033,53 141 | 3.733,79 142 | 4.233,81 143 | 2.233,60 144 | 4.533,82 145 | 4.817,77 146 | 4.333,76 147 | 1.983,59 148 | 4.633,80 149 | 2.017,49 150 | 5.1,96 151 | 1.8,53 152 | 5.033,77 153 | 4,77 154 | 2.4,65 155 | 4.6,81 156 | 3.567,71 157 | 4,70 158 | 4.5,81 159 | 4.083,93 160 | 1.8,53 161 | 3.967,89 162 | 2.2,45 163 | 4.15,86 164 | 2,58 165 | 3.833,78 166 | 3.5,66 167 | 4.583,76 168 | 2.367,63 169 | 5,88 170 | 1.933,52 171 | 4.617,93 172 | 1.917,49 173 | 2.083,57 174 | 4.583,77 175 | 3.333,68 176 | 4.167,81 177 | 4.333,81 178 | 4.5,73 179 | 2.417,50 180 | 4,85 181 | 4.167,74 182 | 1.883,55 183 | 4.583,77 184 | 4.25,83 185 | 3.767,83 186 | 2.033,51 187 | 4.433,78 188 | 4.083,84 189 | 1.833,46 190 | 4.417,83 191 | 2.183,55 192 | 4.8,81 193 | 1.833,57 194 | 4.8,76 195 | 4.1,84 196 | 3.966,77 197 | 4.233,81 198 | 3.5,87 199 | 4.366,77 200 | 2.25,51 201 | 4.667,78 202 | 2.1,60 203 | 4.35,82 204 | 4.133,91 205 | 1.867,53 206 | 4.6,78 207 | 1.783,46 208 | 4.367,77 209 | 3.85,84 210 | 1.933,49 211 | 4.5,83 212 | 2.383,71 213 | 4.7,80 214 | 1.867,49 215 | 3.833,75 216 | 3.417,64 217 | 4.233,76 218 | 2.4,53 219 | 4.8,94 220 | 2,55 221 | 4.15,76 222 | 1.867,50 223 | 4.267,82 224 | 1.75,54 225 | 4.483,75 226 | 4,78 227 | 4.117,79 228 | 4.083,78 229 | 4.267,78 230 | 3.917,70 231 | 4.55,79 232 | 4.083,70 233 | 2.417,54 234 | 4.183,86 235 | 2.217,50 236 | 4.45,90 237 | 1.883,54 238 | 1.85,54 239 | 4.283,77 240 | 3.95,79 241 | 2.333,64 242 | 4.15,75 243 | 2.35,47 244 | 4.933,86 245 | 2.9,63 246 | 4.583,85 247 | 3.833,82 248 | 2.083,57 249 | 4.367,82 250 | 2.133,67 251 | 4.35,74 252 | 2.2,54 253 | 4.45,83 254 | 3.567,73 255 | 4.5,73 256 | 4.15,88 257 | 3.817,80 258 | 3.917,71 259 | 4.45,83 260 | 2,56 261 | 4.283,79 262 | 4.767,78 263 | 4.533,84 264 | 1.85,58 265 | 4.25,83 266 | 1.983,43 267 | 2.25,60 268 | 4.75,75 269 | 4.117,81 270 | 2.15,46 271 | 4.417,90 272 | 1.817,46 273 | 4.467,74 274 | -------------------------------------------------------------------------------- /SGDE/Example.py: -------------------------------------------------------------------------------- 1 | from sys import path 2 | 3 | path.append('../src/') 4 | import numpy as np 5 | from ErrorCalculator import * 6 | from GridOperation import * 7 | from StandardCombi import * 8 | from sklearn import datasets 9 | from SGppCompare import plot_comparison 10 | 11 | # dimension of the problem 12 | dim = 2 13 | 14 | # define number of samples 15 | size = 500 16 | 17 | # define boundaries 18 | a = np.zeros(dim) 19 | b = np.ones(dim) 20 | 21 | # define data (https://docs.scipy.org/doc/numpy-1.14.0/reference/routines.random.html) 22 | # random floats 23 | # data = np.random.random((size, dim)) 24 | 25 | # samples from the standard exponential distribution. 26 | # data = np.random.standard_exponential((size, dim)) 27 | 28 | # samples from the standard exponential distribution 29 | # data = np.random.standard_normal((size, dim)) 30 | 31 | # multivariate normal distribution 32 | # mean = np.array([0.0] * dim) 33 | # sigma = np.array([0.25]*dim) 34 | # cov = np.diag(sigma**2) 35 | # data = np.random.multivariate_normal(mean, cov, size) 36 | 37 | # scikit learn datasets 38 | # data = datasets.make_moons(size, noise=0.1) 39 | data = datasets.make_circles(size, noise=0.1) 40 | 41 | # csv dataset file 42 | # data = "Datasets/faithful.csv" 43 | # SGpp values for dataset 44 | # values = "Values/Circles_level_4_lambda_0.0.csv" 45 | 46 | # define lambda 47 | lambd = 0.01 48 | 49 | # define level of combigrid 50 | minimum_level = 1 51 | maximum_level = 4 52 | 53 | # define operation to be performed 54 | operation = DensityEstimation(data, dim, lambd=lambd) 55 | 56 | # create the combiObject and initialize it with the operation 57 | combiObject = StandardCombi(a, b, operation=operation) 58 | 59 | # perform the density estimation operation, has to be done before the printing and plotting 60 | combiObject.perform_operation(minimum_level, maximum_level) 61 | 62 | print("Plot of dataset:") 63 | operation.plot_dataset() 64 | 65 | print("Combination Scheme:") 66 | # when you pass the operation the function also plots the contour plot of each component grid 67 | combiObject.print_resulting_combi_scheme(operation=operation) 68 | 69 | print("Sparse Grid:") 70 | combiObject.print_resulting_sparsegrid(markersize=20) 71 | 72 | print("Plot of density estimation") 73 | # when contour = True, the contour plot is shown next to the 3D plot 74 | combiObject.plot(contour=True) 75 | 76 | print("Plot of comparison between sparseSpACE and SG++") 77 | # plot comparison between sparseSpACE and SG++ result if path to SG++ values is given 78 | # plot_comparison(dim=dim, data=data, values=values, combiObject=combiObject, plot_data=False, minimum_level=minimum_level, maximum_level=maximum_level, lambd=lambd, pointsPerDim=100) 79 | -------------------------------------------------------------------------------- /SGDE/GaussianCalculateErrors.py: -------------------------------------------------------------------------------- 1 | from mpl_toolkits import mplot3d 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | from scipy.stats import multivariate_normal 5 | from numpy.linalg import norm 6 | from GridOperation import * 7 | from StandardCombi import * 8 | import dill 9 | 10 | 11 | def numb_points_sparse_grid(combiObject: StandardCombi) -> int: 12 | """ 13 | This method calculates the number of points of the sparse grid 14 | :param combiObject: 15 | :return: number of points 16 | """ 17 | numpoints = 0 18 | for component_grid in combiObject.scheme: 19 | pointsgrid = combiObject.get_num_points_component_grid(component_grid.levelvector, False) 20 | 21 | numpoints += pointsgrid * int(component_grid.coefficient) 22 | return numpoints 23 | 24 | 25 | def plot_gaussian(filename: str = None, dim: int = 2, minimum_level: int = 1, maximum_level: int = 5) -> None: 26 | # define boundaries 27 | a = np.zeros(dim) 28 | b = np.ones(dim) 29 | 30 | data = "" 31 | 32 | # Define the gaussian distribution 33 | mean = np.array([0.5] * dim) 34 | sigma = np.array([0.25] * dim) 35 | cov = np.diag(sigma ** 2) 36 | rv = multivariate_normal(mean, cov) 37 | 38 | operation = DensityEstimation(data, dim, print_output=False) 39 | combiObject = StandardCombi(a, b, operation=operation, print_output=False) 40 | combiObject.set_combi_parameters(minimum_level, maximum_level) 41 | 42 | points = np.unique(combiObject.get_points_and_weights()[0], axis=0) 43 | 44 | x, y = zip(*points) 45 | z = rv.pdf(points) 46 | 47 | fig = plt.figure() 48 | ax = fig.add_subplot(111, projection='3d') 49 | ax.scatter(x, y, z) 50 | ax.set_xlabel('x') 51 | ax.set_ylabel('y') 52 | ax.set_zlabel('z') 53 | if filename is not None: 54 | plt.savefig(filename, bbox_inches='tight') 55 | plt.show() 56 | 57 | 58 | def calculate_gaussian_values(data, values_sgpp, dim: int = 5, minimum_level: int = 1, maximum_level: int = 5, lambd: float = 0.0, filename: str = None): 59 | # define boundaries 60 | a = np.zeros(dim) 61 | b = np.ones(dim) 62 | 63 | # define probability density function 64 | mean = np.array([0.5] * dim) 65 | sigma = np.array([0.25] * dim) 66 | cov = np.diag(sigma ** 2) 67 | rv = multivariate_normal(mean, cov) 68 | 69 | # create grid operation and combi object 70 | operation = DensityEstimation(data, dim, print_output=False, lambd=lambd) 71 | combiObject_combi = StandardCombi(a, b, operation=operation, print_output=False) 72 | combiObject_combi.perform_operation(minimum_level, maximum_level) 73 | 74 | # combiObject_combi.set_combi_parameters(minimum_level, maximum_level) 75 | # operation.surpluses = dill.load(open("../../src/DE/surpluses_" + str(maximum_level) + "_" + str(lambd) + "_" + str(dim), "rb")) 76 | # dill.dump(operation.get_result(), open("../../src/DE/surpluses_" + str(maximum_level) + "_" + str(lambd) + "_" + str(dim), "wb")) 77 | 78 | # get the sparse grid points 79 | points = np.unique(combiObject_combi.get_points_and_weights()[0], axis=0) 80 | numb_points = numb_points_sparse_grid(combiObject_combi) 81 | 82 | # calculate the reference density and calculate the difference 83 | results_density = np.vstack(rv.pdf(points)) 84 | result_combi = combiObject_combi(points) 85 | diff_combi = np.subtract(results_density, result_combi) 86 | 87 | # calculate the error norms 88 | l1_norm_combi = np.linalg.norm(diff_combi, 1) 89 | l2_norm_combi = np.linalg.norm(diff_combi, 2) 90 | lmax_norm_combi = np.linalg.norm(diff_combi, np.inf) 91 | 92 | # ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 93 | # SG++ max 94 | # load result from .csv file 95 | result_sgpp = np.genfromtxt(values_sgpp, delimiter=',') 96 | result_sgpp = np.vstack(result_sgpp) 97 | 98 | # calculate difference and error norms 99 | diff_sgpp = np.subtract(results_density, result_sgpp) 100 | l1_norm_sgpp = np.linalg.norm(diff_sgpp, 1) 101 | l2_norm_sgpp = np.linalg.norm(diff_sgpp, 2) 102 | lmax_norm_sgpp = np.linalg.norm(diff_sgpp, np.inf) 103 | 104 | # ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 105 | # Mass lumping 1-max 106 | # create grid operation and combi object 107 | operation_lumping = DensityEstimation(data, dim, masslumping=True, print_output=False, lambd=lambd) 108 | combiObject_lumping = StandardCombi(a, b, operation=operation_lumping, print_output=False) 109 | combiObject_lumping.perform_operation(1, maximum_level) 110 | 111 | # combiObject_lumping.set_combi_parameters(minimum_level, maximum_level) 112 | # operation_lumping.surpluses = dill.load(open("../../src/DE/surpluses_lumping_" + str(maximum_level) + "_" + str(lambd) + "_" + str(dim), "rb")) 113 | # dill.dump(operation_lumping.get_result(), open("../../src/DE/surpluses_lumping_" + str(maximum_level) + "_" + str(lambd) + "_" + str(dim), "wb")) 114 | 115 | # calculate difference to the actual density 116 | result_lumping = combiObject_lumping(points) 117 | diff_lumping = np.subtract(results_density, result_lumping) 118 | 119 | # calculate error norms 120 | l1_norm_lumping = np.linalg.norm(diff_lumping, 1) 121 | l2_norm_lumping = np.linalg.norm(diff_lumping, 2) 122 | lmax_norm_lumping = np.linalg.norm(diff_lumping, np.inf) 123 | 124 | # ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 125 | # Results 126 | if filename is not None: 127 | file = open(filename + ".txt", "a") 128 | else: 129 | file = None 130 | 131 | # Print output 132 | print("Config:\n\tMin: 1" + "\n\tMax: " + str(maximum_level) + "\n\tDim: " + str(dim) + "\tSize: " + str(len(operation.data)), file=file) 133 | print("\tPoints: " + str(points.tolist()), file=file) 134 | print("\tNumber of sample points: " + str(numb_points) + "\n\tLambda: " + str(lambd), file=file) 135 | print("Density Function " + str(maximum_level) + ":", file=file) 136 | print("\tResult: " + str(list(results_density.flatten())), file=file) 137 | print("\t#Points: " + str(numb_points), file=file) 138 | 139 | print("SG++ " + str(maximum_level) + ":", file=file) 140 | print("\tResult: " + str(list(result_sgpp.flatten())) + "\n\tDiff: " + str(list(diff_sgpp.flatten())), file=file) 141 | print("\tL1: " + str(l1_norm_sgpp) + "\n\tL2: " + str(l2_norm_sgpp) + "\tLmax: " + str(lmax_norm_sgpp), file=file) 142 | print("\t#Points: " + str(numb_points), file=file) 143 | 144 | print("Combi 1 " + str(maximum_level) + ":", file=file) 145 | print("\tResult: " + str(list(result_combi.flatten())) + "\n\tDiff: " + str(list(diff_combi.flatten())), file=file) 146 | print("\tL1: " + str(l1_norm_combi) + "\n\tL2: " + str(l2_norm_combi) + "\tLmax: " + str(lmax_norm_combi), file=file) 147 | print("\t#Points: " + str(numb_points), file=file) 148 | 149 | print("Lumping 1 " + str(maximum_level) + ":", file=file) 150 | print("\tResult: " + str(list(result_lumping.flatten())) + "\n\tDiff: " + str(list(diff_lumping.flatten())), file=file) 151 | print("\tL1: " + str(l1_norm_lumping) + "\n\tL2: " + str(l2_norm_lumping) + "\tLmax: " + str(lmax_norm_lumping), file=file) 152 | print("\t#Points: " + str(numb_points) + "\n", file=file) 153 | 154 | if filename is not None: 155 | file.close() 156 | -------------------------------------------------------------------------------- /SGDE/SGppCompare.py: -------------------------------------------------------------------------------- 1 | from sys import path 2 | 3 | path.append('../src/') 4 | import numpy as np 5 | from ErrorCalculator import * 6 | from GridOperation import * 7 | from StandardCombi import * 8 | from matplotlib import cm 9 | from mpl_toolkits.mplot3d import Axes3D 10 | from sklearn import datasets, preprocessing 11 | from mpl_toolkits.axes_grid1 import make_axes_locatable 12 | 13 | 14 | def plot_comparison(filename: str = None, dim: int = 2, data: str = None, values: str = None, combiObject: StandardCombi = None, plot_data: bool = False, minimum_level: int = 1, 15 | maximum_level: int = 5, lambd: float = 0.0, pointsPerDim: int = 100): 16 | if values is None: 17 | print("No values for comparison given.") 18 | return 19 | if combiObject is None and data is not None: 20 | # define integration domain boundaries 21 | a = np.zeros(dim) 22 | b = np.ones(dim) 23 | 24 | # define operation to be performed 25 | operation = DensityEstimation(data, dim, lambd=lambd) 26 | 27 | # create the combiObject and initialize it with the operation 28 | combi = StandardCombi(a, b, operation=operation) 29 | 30 | # perform the density estimation operation 31 | combi.perform_operation(minimum_level, maximum_level) 32 | elif combiObject is not None: 33 | combi = combiObject 34 | else: 35 | print("No data or combiObject given.") 36 | return 37 | 38 | if plot_data: 39 | print("Plot of dataset:") 40 | operation.plot_dataset("Figures/dataset_" + data[9:-4] + "_" + str(minimum_level) + "_" + str(maximum_level) + "_" + str(lambd) + ".png") 41 | 42 | # print("Plot of density estimation") 43 | # combiObject.plot(contour=True) 44 | 45 | X = np.linspace(0.0, 1.0, pointsPerDim) 46 | Y = np.linspace(0.0, 1.0, pointsPerDim) 47 | X, Y = np.meshgrid(X, Y) 48 | 49 | Z = combi(list(map(lambda x, y: (x, y), X.flatten(), Y.flatten()))) 50 | Z = Z.reshape((100, 100)) 51 | 52 | fontsize = 30 53 | plt.rcParams.update({'font.size': fontsize}) 54 | 55 | fig = plt.figure(figsize=(30, 20)) 56 | ax = fig.add_subplot(2, 3, 1, projection='3d') 57 | ax.title.set_text("sparseSpACE") 58 | ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0, antialiased=False) 59 | 60 | ax = fig.add_subplot(2, 3, 4) 61 | p = ax.imshow(Z, extent=[0.0, 1.0, 0.0, 1.0], origin='lower', cmap=cm.coolwarm) 62 | divider = make_axes_locatable(ax) 63 | cax = divider.append_axes("right", size="5%", pad=0.1) 64 | fig.colorbar(p, cax=cax) 65 | 66 | # read in SGpp values for the above points 67 | dataCSV = np.genfromtxt(values, delimiter=',') 68 | 69 | ax = fig.add_subplot(2, 3, 2, projection='3d') 70 | ax.title.set_text("SG++") 71 | ax.plot_surface(X, Y, dataCSV, cmap=cm.coolwarm, linewidth=0, antialiased=False) 72 | 73 | ax = fig.add_subplot(2, 3, 5) 74 | p = ax.imshow(dataCSV, extent=[0.0, 1.0, 0.0, 1.0], origin='lower', cmap=cm.coolwarm) 75 | divider = make_axes_locatable(ax) 76 | cax = divider.append_axes("right", size="5%", pad=0.1) 77 | fig.colorbar(p, cax=cax) 78 | 79 | difValues = np.subtract(Z, dataCSV) 80 | 81 | ax = fig.add_subplot(2, 3, 3, projection='3d') 82 | ax.title.set_text("Difference") 83 | ax.plot_surface(X, Y, difValues, cmap=cm.coolwarm, linewidth=0, antialiased=False) 84 | 85 | ax = fig.add_subplot(2, 3, 6) 86 | p = ax.imshow(difValues, extent=[0.0, 1.0, 0.0, 1.0], origin='lower', cmap=cm.coolwarm) 87 | divider = make_axes_locatable(ax) 88 | cax = divider.append_axes("right", size="5%", pad=0.1) 89 | fig.colorbar(p, cax=cax) 90 | 91 | if filename is not None: 92 | plt.savefig(filename, bbox_inches='tight') 93 | plt.show() 94 | plt.rcParams.update({'font.size': plt.rcParamsDefault.get('font.size')}) 95 | -------------------------------------------------------------------------------- /UQ/FunctionUQPlotErrors.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plotter 3 | import sys 4 | import os 5 | 6 | tmpdir = os.getenv("XDG_RUNTIME_DIR") 7 | results_path = tmpdir + "/uqtestFUQ.npy" 8 | assert os.path.isfile(results_path) 9 | solutions_data = list(np.load(results_path, allow_pickle=True)) 10 | 11 | typ_descs = ("full grid Gauß", "Trapez", "HighOrder", "Lagrange", "sparse Gauß", "trans Trapez", "MC Halton") 12 | 13 | datas = [[] for _ in typ_descs] 14 | for v in solutions_data: 15 | (num_evals, typid, errs) = v 16 | typid = int(typid) 17 | if typid < 0 or typid >= len(typ_descs): 18 | continue 19 | if num_evals < 7: 20 | # No adaptive refinement points 21 | continue 22 | datas[typid].append((num_evals, *errs)) 23 | 24 | for typid in range(len(typ_descs)): 25 | datas[typid] = np.array(datas[typid]).T 26 | err_descs = ("E absolute", "E relative", "Var absolute", "Var relative") 27 | 28 | figure = plotter.figure(1, figsize=(11,11)) 29 | figure.canvas.set_window_title('FunctionUQ Errors') 30 | 31 | legend_shown = False 32 | for i,desc in enumerate(err_descs): 33 | if not i & 1: 34 | continue 35 | plotter.subplot(2, 1, 1 + (i-1)//2) 36 | for typid, typdesc in enumerate(typ_descs): 37 | if len(datas[typid]) < 1: 38 | print("No points for", typdesc) 39 | continue 40 | plotter.plot(datas[typid][0], datas[typid][i + 1], ".-", label=typdesc) 41 | plotter.xlabel('function evaluations') 42 | plotter.ylabel(f'{desc} error') 43 | plotter.yscale("log") 44 | plotter.xscale("log") 45 | if not legend_shown: 46 | plotter.legend(loc="lower left") 47 | legend_shown = True 48 | plotter.grid(True) 49 | 50 | fileName = os.path.splitext(sys.argv[0])[0] + '.pdf' 51 | plotter.savefig(fileName, format='pdf') 52 | 53 | plotter.show() 54 | -------------------------------------------------------------------------------- /UQ/GFunctionCalculateErrors.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import chaospy as cp 3 | import os 4 | 5 | import sys 6 | sys.path.append('../src/') 7 | from Function import * 8 | from spatiallyAdaptiveSingleDimension2 import * 9 | from ErrorCalculator import * 10 | from GridOperation import * 11 | 12 | 13 | d = 2 14 | shifted = True 15 | verbose = False 16 | 17 | types = ("Gauss", "adaptiveTrapez", "adaptiveHO", "BSpline", "adaptiveLagrange", "sparseGauss", "adaptiveTrapezMB") 18 | 19 | a = np.zeros(d) 20 | b = np.ones(d) 21 | if shifted: 22 | f_g = FunctionGShifted(d) 23 | else: 24 | f_g = FunctionG(d) 25 | reference_expectation = f_g.get_expectation() 26 | reference_variance = f_g.get_variance() 27 | # Create the operation only once 28 | op = UncertaintyQuantificationTesting(None, "Uniform", a, b, dim=d) 29 | 30 | def run_test(typi, typid, exceed_evals=None, evals_end=None, max_time=None): 31 | 32 | f = FunctionCustom(lambda x: f_g(x)) 33 | op.f = f 34 | 35 | multiple_evals = None 36 | typ = types[typid] 37 | lmax = 3 if typ == "adaptiveTrapezMB" else 2 38 | if typ not in ("Gauss", "sparseGauss"): 39 | if typ == "adaptiveHO": 40 | if False: 41 | # A non-weighted grid can be used due to the uniform distribution 42 | # This currently does not work. 43 | assert all([v == 0 for v in a]) 44 | assert all([v == 1 for v in b]) 45 | grid = GlobalHighOrderGrid(a, b, boundary=boundary, modified_basis=modified_basis, split_up=False) 46 | else: 47 | grid = GlobalHighOrderGridWeighted(a, b, op, boundary=True) 48 | elif typ == "adaptiveTrapez": 49 | grid = GlobalTrapezoidalGridWeighted(a, b, op, boundary=True) 50 | elif typ == "adaptiveTrapezMB": 51 | grid = GlobalTrapezoidalGridWeighted(a, b, op, boundary=False, modified_basis=True) 52 | elif typ == "adaptiveLagrange": 53 | grid = GlobalLagrangeGridWeighted(a, b, op, boundary=True) 54 | elif typ == "BSpline": 55 | # ~ grid = GlobalBSplineGrid(a, b, modified_basis=True, boundary=False, p=3) 56 | grid = GlobalBSplineGrid(a, b) 57 | # ~ lmax = 3 58 | op.set_grid(grid) 59 | combiinstance = SpatiallyAdaptiveSingleDimensions2(a, b, operation=op, norm=2) 60 | 61 | error_operator = ErrorCalculatorSingleDimVolumeGuided() 62 | expectation_var_func = op.get_expectation_variance_Function() 63 | # ~ expectation_var_func.plot(a, b, filename="exp.pdf", plotdimension=0) 64 | # ~ expectation_var_func.plot(a, b, filename="mom2.pdf", plotdimension=1) 65 | mom2 = reference_variance + reference_expectation * reference_expectation 66 | reference_solution = np.array([reference_expectation, mom2]) 67 | op.set_reference_solution(reference_solution) 68 | if evals_end is not None: 69 | multiple_evals = dict() 70 | combiinstance.performSpatiallyAdaptiv(1, lmax, expectation_var_func, 71 | error_operator, tol=0, max_evaluations=evals_end, 72 | print_output=True, solutions_storage=multiple_evals, 73 | max_time=max_time) 74 | elif exceed_evals is None: 75 | combiinstance.performSpatiallyAdaptiv(1, lmax, expectation_var_func, 76 | error_operator, tol=0, 77 | max_evaluations=1, 78 | print_output=verbose) 79 | else: 80 | combiinstance.performSpatiallyAdaptiv(1, lmax, expectation_var_func, 81 | error_operator, tol=np.inf, 82 | max_evaluations=np.inf, min_evaluations=exceed_evals+1, 83 | print_output=verbose) 84 | 85 | if multiple_evals is None: 86 | (E,), (Var,) = op.calculate_expectation_and_variance(combiinstance) 87 | else: 88 | if typ == "Gauss": 89 | nodes, weights = cp.generate_quadrature(typi, op.distributions_joint, rule="G") 90 | elif typ == "sparseGauss": 91 | op.set_grid(GaussLegendreGrid(a, b)) 92 | combiinstance = StandardCombi(a, b, operation=op) 93 | combiinstance.perform_combi(1, testi+1, op.get_expectation_variance_Function()) 94 | nodes, weights = combiinstance.get_points_and_weights() 95 | nodes = nodes.T 96 | E, Var = op.calculate_expectation_and_variance_for_weights(nodes, weights) 97 | 98 | # ~ print(f"E: {E}, Var: {Var}\n") 99 | # ~ print("reference E and Var: ", reference_expectation, reference_variance) 100 | 101 | tmpdir = os.getenv("XDG_RUNTIME_DIR") 102 | results_path = tmpdir + "/uqtestG.npy" 103 | solutions_data = [] 104 | if os.path.isfile(results_path): 105 | solutions_data = list(np.load(results_path, allow_pickle=True)) 106 | 107 | if multiple_evals is None: 108 | err_E = abs((E - reference_expectation) / reference_expectation) 109 | err_Var = abs((Var - reference_variance) / reference_variance) 110 | num_evals = f.get_f_dict_size() 111 | 112 | print("evals, relative errors:", num_evals, err_E, err_Var) 113 | 114 | result_data = (num_evals, typid, err_E, err_Var) 115 | 116 | if all([any([d[i] != result_data[i] for i in range(2)]) for d in solutions_data]): 117 | solutions_data.append(result_data) 118 | np.save(results_path, solutions_data) 119 | 120 | return num_evals 121 | else: 122 | solutions = op.calculate_multiple_expectation_and_variance(multiple_evals) 123 | for num_evals, E, Var in solutions: 124 | err_E = abs((E - reference_expectation) / reference_expectation) 125 | err_Var = abs((Var - reference_variance) / reference_variance) 126 | 127 | print("evals, relative errors:", num_evals, err_E, err_Var) 128 | 129 | result_data = (num_evals, typid, err_E, err_Var) 130 | 131 | if all([any([d[i] != result_data[i] for i in range(2)]) for d in solutions_data]): 132 | solutions_data.append(result_data) 133 | np.save(results_path, solutions_data) 134 | return f.get_f_dict_size() 135 | 136 | 137 | evals_end = 4000 138 | # ~ evals_end = 25000 139 | max_time = 30 140 | # ~ max_time = 560 141 | 142 | # For testing 143 | # ~ skip_types = ("sparseGauss", "adaptiveLagrange", "BSpline", "adaptiveHO") 144 | skip_types = () 145 | assert all([typ in types for typ in skip_types]) 146 | 147 | for typid in reversed(range(len(types))): 148 | typ = types[typid] 149 | print("") 150 | if typ in skip_types: 151 | print("Skipping", typ) 152 | continue 153 | print("Calculations for", typ) 154 | testi = 0 155 | start_time = time.time() 156 | evals_num = run_test(testi, typid, evals_end=evals_end, max_time=max_time) 157 | while evals_num < evals_end and time.time() - start_time < max_time: 158 | testi = testi+1 159 | print(f"last evals: {evals_num}, testi {testi}") 160 | evals_num = run_test(testi, typid, exceed_evals=evals_num) 161 | -------------------------------------------------------------------------------- /UQ/GFunctionPlotErrors.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plotter 3 | import sys 4 | import os 5 | 6 | # ~ expectation_ylim = [10**-7, 10**-1] 7 | # ~ expectation_ylim = [10**-4, 1] 8 | expectation_ylim = None 9 | 10 | tmpdir = os.getenv("XDG_RUNTIME_DIR") 11 | results_path = tmpdir + "/uqtestG.npy" 12 | assert os.path.isfile(results_path) 13 | solutions_data = list(np.load(results_path, allow_pickle=True)) 14 | 15 | typ_descs = ("full Gauß", "Trapez", "HighOrder", "BSpline", "Lagrange", "sparse Gauß", "modified_basis Trapez") 16 | 17 | datas = [[] for _ in typ_descs] 18 | for v in solutions_data: 19 | (num_evals, typid, err_E, err_Var) = v 20 | typid = int(typid) 21 | if typid < 0 or typid >= len(typ_descs): 22 | continue 23 | if num_evals < 21: 24 | # No adaptive sparse grid results 25 | continue 26 | datas[typid].append((num_evals, err_E, err_Var)) 27 | 28 | # datas should be sorted here 29 | for typid in range(len(typ_descs)): 30 | datas[typid] = np.array(datas[typid]).T 31 | 32 | figure = plotter.figure(1, figsize=(11,11)) 33 | # ~ figure = plotter.figure(1, figsize=(11,5)) 34 | figure.canvas.set_window_title('G-Function Errors') 35 | 36 | legend_shown = False 37 | for i,desc in enumerate(("E", "Var")): 38 | plotter.subplot(2, 1, 1 + i) 39 | for typid, typdesc in enumerate(typ_descs): 40 | if len(datas[typid]) < 1: 41 | print("No points for", typdesc) 42 | continue 43 | plotter.plot(datas[typid][0], datas[typid][i + 1], ".-", label=typdesc) 44 | plotter.xlabel('function evaluations') 45 | plotter.ylabel(f"{desc} relative error") 46 | if desc == "E" and expectation_ylim is not None: 47 | # Change the height because perfect solutions sometimes appear 48 | plotter.ylim(*expectation_ylim) 49 | plotter.yscale("log") 50 | plotter.xscale("log") 51 | if not legend_shown: 52 | # ~ plotter.legend(loc="lower left") 53 | plotter.legend(loc="best") 54 | legend_shown = True 55 | plotter.grid(True) 56 | 57 | 58 | fileName = os.path.splitext(sys.argv[0])[0] + '.pdf' 59 | plotter.savefig(fileName, format='pdf') 60 | 61 | plotter.show() 62 | -------------------------------------------------------------------------------- /UQ/PredatorPrey/AllStepsPlotErrors.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plotter 3 | import sys 4 | import os 5 | 6 | tmpdir = os.getenv("XDG_RUNTIME_DIR") 7 | results_path = tmpdir + "/uqtest.npy" 8 | assert os.path.isfile(results_path) 9 | solutions_data = list(np.load(results_path, allow_pickle=True)) 10 | 11 | typ_descs = ("full grid Gauß", "Trapez", "HighOrder", "nonadaptive Trapez") 12 | 13 | datas = [[] for _ in typ_descs] 14 | for v in solutions_data: 15 | (num_evals, typid, mean_errs) = v 16 | typid = int(typid) 17 | if typid < 0 or typid >= len(typ_descs): 18 | continue 19 | if num_evals > 900: 20 | # Gauss reference 21 | continue 22 | datas[typid].append((num_evals, *mean_errs)) 23 | 24 | for typid in range(len(typ_descs)): 25 | datas[typid] = np.array(datas[typid]).T 26 | 27 | mean_err_descs = ("E prey relative", "E predator relative", "P10 prey", "P10 predator", 28 | "P90 prey", "P90 predator", "Var prey", "Var predator") 29 | 30 | figure = plotter.figure(1, figsize=(13,10)) 31 | figure.canvas.set_window_title('Predator Prey All Steps Errors') 32 | 33 | legend_shown = False 34 | for i,desc in enumerate(mean_err_descs): 35 | plotter.subplot(4, 2, 1 + i) 36 | for typid, typdesc in enumerate(typ_descs): 37 | if len(datas[typid]) < 1: 38 | print("No points for", typdesc) 39 | continue 40 | plotter.plot(datas[typid][0], datas[typid][i + 1], ".-", label=typdesc) 41 | plotter.xlabel('function evaluations') 42 | plotter.ylabel(f'{desc} mean error') 43 | plotter.yscale("log") 44 | if not legend_shown: 45 | plotter.legend(loc="upper right") 46 | legend_shown = True 47 | plotter.grid(True) 48 | 49 | fileName = os.path.splitext(sys.argv[0])[0] + '.pdf' 50 | plotter.savefig(fileName, format='pdf') 51 | 52 | plotter.show() 53 | -------------------------------------------------------------------------------- /UQ/PredatorPrey/Functionplot.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on 27.02.2015 3 | 4 | Uncertainty quantification with the Stochastic collocation approach for a predator & prey model (Lotka & Voltera) 5 | 6 | @author: Florian Kuenzner 7 | ''' 8 | 9 | import chaospy as cp 10 | import numpy as np 11 | import scipy.integrate as ode 12 | import matplotlib.pyplot as plotter 13 | import sys 14 | import time 15 | import os 16 | from math import isclose 17 | 18 | # Load spatially adaptive sparse grid related files 19 | sys.path.append('../src/') 20 | from Function import * 21 | from spatiallyAdaptiveSingleDimension2 import * 22 | from ErrorCalculator import * 23 | from GridOperation import * 24 | 25 | 26 | #predator = coyote 27 | #prey = sheep 28 | 29 | #initial parameters: sheep/coyote model!! 30 | coyoteDeathRate = 0.0005 #death rate of coyote 31 | sheepBirthRate = 0.005 #birth rate of sheep 32 | voracity = 0.00012 #the voracity rate (when a predator meets sheep and kill it) (Gefraessigkeit) 33 | augmentation = 0.002*voracity #the augmentation rate (when a coyote meets sheep and a new coyote growth) (Vermehrung) 34 | 35 | sheeps_Px0 = 2000 #initial population size of sheep population 36 | coyote_Px0 = 50 #initial population size of coyote population 37 | 38 | T = 70*365 # end of simulation 39 | NT = int(0.01 * T) # number of time steps 40 | 41 | # Standard deviations 42 | sigma_voracity = 0.000002 # no uncertainty: 0.000000001, uncertainty: 0.000001 43 | sigma_sheeps_Px0 = 1 44 | sigma_coyote_Px0 = 5 45 | 46 | # Maximum PCE polynomial degree 47 | poly_deg_max = 1 48 | 49 | # Distributions information to be passed to the UncertaintyQuantification Operation 50 | distris = [ 51 | ("Normal", voracity, sigma_voracity), 52 | # ~ ("Normal", sheeps_Px0, sigma_sheeps_Px0), 53 | ("Normal", coyote_Px0, sigma_coyote_Px0) 54 | ] 55 | dim = len(distris) 56 | # Normal distribution requires infinite boundaries 57 | a = np.array([-np.inf for _ in range(dim)]) 58 | b = np.array([np.inf for _ in range(dim)]) 59 | 60 | # population model definition: as a initial value problem 61 | def f(t, pX): 62 | ''' 63 | ODE formulation of preyBirthRate predator & prey model. 64 | 65 | Parameters 66 | ---------- 67 | pX : array[2], pX[0] is the population size of predator 68 | 69 | pX[1] is the population size of prey 70 | Mean of the distribution. 71 | t : is the time 72 | 73 | f.predatorDeathRate : death rate of predator 74 | f.preyBirthRate : birth rate of prey 75 | f.voracity : the voracity rate (when predator meets prey and kill it) 76 | f.augmentation : the augmentation rate (when predator meets prey and a new predator growth) 77 | ''' 78 | predatorPopulation, preyPopulation = pX 79 | 80 | predator = (-f.predatorDeathRate + f.augmentation*preyPopulation)*predatorPopulation 81 | prey = (f.preyBirthRate - f.voracity*predatorPopulation)*preyPopulation 82 | 83 | return [predator, prey] 84 | 85 | time_points = np.linspace(0, T, NT+1) 86 | 87 | def static_var(varname, value): 88 | def decorate(func): 89 | setattr(func, varname, value) 90 | return func 91 | return decorate 92 | 93 | @static_var("counter", 0) 94 | def solver(voracity, Px0, f): 95 | #set the parameter 96 | f.preyBirthRate = sheepBirthRate 97 | f.predatorDeathRate = coyoteDeathRate 98 | f.voracity = voracity 99 | f.augmentation = augmentation 100 | 101 | #progress bar 102 | solver.counter += 1 103 | if solver.counter % 100 == 0: 104 | sys.stdout.write(".") 105 | 106 | #solves the population model 107 | #u = ode.odeint(f, Px0, time_points) 108 | #u = ode.solve_ivp(f, [0, T], Px0, method='BDF', t_eval=time_points) 109 | u = ode.solve_ivp(f, [0, T], Px0, method='RK45', t_eval=time_points) 110 | 111 | return u 112 | 113 | measure_start = time.time() 114 | 115 | # Create a Function that can be used for refining 116 | def get_solver_values(input_values): 117 | # ~ voracity_sample, sheep_Px0_sample, coyote_Px0_sample = input_values 118 | voracity_sample, coyote_Px0_sample = input_values 119 | sheep_Px0_sample = sheeps_Px0 120 | # y contains the predator solutions and prey solutions for all time values 121 | y = solver(voracity_sample, [coyote_Px0_sample, sheep_Px0_sample], f).y 122 | return np.concatenate(y) 123 | problem_function = FunctionCustom(get_solver_values, output_dim=len(time_points) * 2) 124 | 125 | # This function is later required to bring calculated values into the right shape 126 | def reshape_result_values(vals): 127 | mid = int(len(vals) / 2) 128 | predators, preys = vals[:mid], vals[mid:] 129 | return np.array([predators, preys]).T 130 | 131 | op = UncertaintyQuantification(problem_function, distris, a, b, dim=dim) 132 | 133 | pa, pb = op.get_boundaries(0.01) 134 | tplen = len(time_points) 135 | sheep_dims = np.linspace(tplen, 2 * tplen - 1, tplen, dtype=int) 136 | # ~ sheep_dims = [sheep_dims[0], sheep_dims[1]] 137 | HOME = os.getenv("HOME") 138 | print(f"Plotting {len(sheep_dims)} result values") 139 | problem_function.plot(pa, pb, points_per_dim=51, plotdimensions=sheep_dims, 140 | filename=HOME + "/tmp_mem/prey", consistent_axes=True, show_plot=False) 141 | # for p in prey_*.png; do i=${p%.png}; i=${i#prey_}; i=$((i-256)); mv $p prey_${i}.png; done 142 | # ffmpeg -f image2 -framerate 20 -i prey_%d.png -c:v libvpx-vp9 -b:v 2M -pass 1 -an -f webm /dev/null 143 | # ffmpeg -f image2 -framerate 20 -i prey_%d.png -c:v libvpx-vp9 -b:v 2M -pass 2 output.webm 144 | -------------------------------------------------------------------------------- /UQ/PredatorPrey/PredatorPreyCommon.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Created on 27.02.2015 3 | 4 | Uncertainty quantification with the Stochastic collocation approach for a predator & prey model (Lotka & Voltera) 5 | 6 | @author: Florian Kuenzner 7 | ''' 8 | 9 | import chaospy as cp 10 | import numpy as np 11 | import scipy.integrate as ode 12 | import matplotlib.pyplot as plotter 13 | import sys 14 | import time 15 | import os 16 | from math import isclose, isinf 17 | 18 | # Load spatially adaptive sparse grid related files 19 | sys.path.append('../../src/') 20 | from Function import * 21 | from spatiallyAdaptiveSingleDimension2 import * 22 | from ErrorCalculator import * 23 | from GridOperation import * 24 | 25 | 26 | #predator = coyote 27 | #prey = sheep 28 | 29 | #initial parameters: sheep/coyote model!! 30 | coyoteDeathRate = 0.0005 #death rate of coyote 31 | sheepBirthRate = 0.005 #birth rate of sheep 32 | voracity = 0.00012 #the voracity rate (when a predator meets sheep and kill it) (Gefraessigkeit) 33 | augmentation = 0.002*voracity #the augmentation rate (when a coyote meets sheep and a new coyote growth) (Vermehrung) 34 | 35 | sheeps_Px0 = 2000 #initial population size of sheep population 36 | coyote_Px0 = 50 #initial population size of coyote population 37 | 38 | T = 70*365 # end of simulation 39 | NT = int(0.01 * T) # number of time steps 40 | 41 | # Standard deviations 42 | sigma_voracity = 0.000002 # no uncertainty: 0.000000001, uncertainty: 0.000001 43 | sigma_sheeps_Px0 = 1 44 | sigma_coyote_Px0 = 5 45 | 46 | # Maximum PCE polynomial degree 47 | poly_deg_max = 1 48 | 49 | # population model definition: as a initial value problem 50 | def f(t, pX): 51 | ''' 52 | ODE formulation of preyBirthRate predator & prey model. 53 | 54 | Parameters 55 | ---------- 56 | pX : array[2], pX[0] is the population size of predator 57 | 58 | pX[1] is the population size of prey 59 | Mean of the distribution. 60 | t : is the time 61 | 62 | f.predatorDeathRate : death rate of predator 63 | f.preyBirthRate : birth rate of prey 64 | f.voracity : the voracity rate (when predator meets prey and kill it) 65 | f.augmentation : the augmentation rate (when predator meets prey and a new predator growth) 66 | ''' 67 | predatorPopulation, preyPopulation = pX 68 | 69 | predator = (-f.predatorDeathRate + f.augmentation*preyPopulation)*predatorPopulation 70 | prey = (f.preyBirthRate - f.voracity*predatorPopulation)*preyPopulation 71 | 72 | return [predator, prey] 73 | 74 | time_points = np.linspace(0, T, NT+1) 75 | 76 | def static_var(varname, value): 77 | def decorate(func): 78 | setattr(func, varname, value) 79 | return func 80 | return decorate 81 | 82 | @static_var("counter", 0) 83 | def solver(voracity, Px0, f): 84 | #set the parameter 85 | f.preyBirthRate = sheepBirthRate 86 | f.predatorDeathRate = coyoteDeathRate 87 | f.voracity = voracity 88 | f.augmentation = augmentation 89 | 90 | #progress bar 91 | solver.counter += 1 92 | if solver.counter % 100 == 0: 93 | sys.stdout.write(".") 94 | 95 | #solves the population model 96 | #u = ode.odeint(f, Px0, time_points) 97 | #u = ode.solve_ivp(f, [0, T], Px0, method='BDF', t_eval=time_points) 98 | u = ode.solve_ivp(f, [0, T], Px0, method='RK45', t_eval=time_points) 99 | 100 | return u 101 | 102 | def get_solver_value3D(input_values): 103 | assert all([not isinf(v) for v in input_values]) 104 | voracity_sample, sheep_Px0_sample, coyote_Px0_sample = input_values 105 | if voracity_sample <= 0 or coyote_Px0_sample <= 0 or sheep_Px0_sample <= 0: 106 | print("negative input values") 107 | return 0 108 | # y contains the predator solutions and prey solutions for all time values 109 | y = solver(voracity_sample, [coyote_Px0_sample, sheep_Px0_sample], f).y 110 | assert len(y[0]) == len(y[1]) == len(time_points), y.shape 111 | return y 112 | 113 | def get_solver_value2D(input_values): 114 | assert all([not isinf(v) for v in input_values]) 115 | voracity_sample, coyote_Px0_sample = input_values 116 | sheep_Px0_sample = sheeps_Px0 117 | if voracity_sample <= 0 or coyote_Px0_sample <= 0: 118 | print("negative input values") 119 | return 0 120 | # y contains the predator solutions and prey solutions for all time values 121 | y = solver(voracity_sample, [coyote_Px0_sample, sheep_Px0_sample], f).y 122 | assert len(y[0]) == len(y[1]) == len(time_points), y.shape 123 | return y 124 | -------------------------------------------------------------------------------- /UQ/PredatorPrey/SinglestepPlotErrors.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plotter 3 | import sys 4 | import os 5 | 6 | tmpdir = os.getenv("XDG_RUNTIME_DIR") 7 | results_path = tmpdir + "/uqtestSD.npy" 8 | assert os.path.isfile(results_path) 9 | solutions_data = list(np.load(results_path, allow_pickle=True)) 10 | 11 | typ_descs = ("full grid Gauß", "Trapez", "HighOrder", "full grid Fejer", "trans BSpline", "Lagrange", "sparse Gauß", "trans Trapez") 12 | 13 | datas = [[] for _ in typ_descs] 14 | for v in solutions_data: 15 | (num_evals, timestep, typid, errs) = v 16 | typid = int(typid) 17 | if typid < 0 or typid >= len(typ_descs): 18 | continue 19 | # ~ if num_evals == 961: 20 | # Gauss reference solution 21 | # ~ continue 22 | if num_evals < 17: 23 | # No adaptive refinement points if lmax=3 24 | continue 25 | datas[typid].append((num_evals, *errs)) 26 | 27 | for typid in range(len(typ_descs)): 28 | datas[typid] = np.array(datas[typid]).T 29 | err_descs = ("E absolute", "E relative", "Var absolute", "Var relative") 30 | 31 | # ~ figure = plotter.figure(1, figsize=(13,10)) 32 | # ~ figure = plotter.figure(1, figsize=(13,6)) 33 | figure = plotter.figure(1, figsize=(11,11)) 34 | figure.canvas.set_window_title('Predator Prey Single Step Errors') 35 | 36 | legend_shown = False 37 | for i,desc in enumerate(err_descs): 38 | if not i & 1: 39 | continue 40 | # ~ plotter.subplot(4, 1, 1 + (i-1)//2) 41 | plotter.subplot(2, 1, 1 + (i-1)//2) 42 | for typid, typdesc in enumerate(typ_descs): 43 | if len(datas[typid]) < 1: 44 | print("No points for", typdesc) 45 | continue 46 | plotter.plot(datas[typid][0], datas[typid][i + 1], ".-", label=typdesc) 47 | plotter.xlabel('function evaluations') 48 | plotter.ylabel(f'{desc} error') 49 | plotter.yscale("log") 50 | plotter.xscale("log") 51 | if not legend_shown: 52 | plotter.legend(loc="upper right") 53 | legend_shown = True 54 | plotter.grid(True) 55 | 56 | fileName = os.path.splitext(sys.argv[0])[0] + '.pdf' 57 | plotter.savefig(fileName, format='pdf') 58 | 59 | plotter.show() 60 | -------------------------------------------------------------------------------- /UQ/PredatorPrey/gauss_2D_solutions.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obersteiner/sparseSpACE/0790f350e859e7bdf225dd013bf1f6e83553cf69/UQ/PredatorPrey/gauss_2D_solutions.npy -------------------------------------------------------------------------------- /UQ/PredatorPrey/step210_2D_solutions.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obersteiner/sparseSpACE/0790f350e859e7bdf225dd013bf1f6e83553cf69/UQ/PredatorPrey/step210_2D_solutions.npy -------------------------------------------------------------------------------- /UQ/PredatorPrey/step25_2D_solutions.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obersteiner/sparseSpACE/0790f350e859e7bdf225dd013bf1f6e83553cf69/UQ/PredatorPrey/step25_2D_solutions.npy -------------------------------------------------------------------------------- /UQ/README.md: -------------------------------------------------------------------------------- 1 | This folder contains Python programs for testing the single dimension spatial 2 | adaptive refinement with the UncertaintyQuantification grid 3 | operation. 4 | 5 | 6 | ## Error Plots 7 | 8 | The Python programs with the `CalculateErrors.py` or a similar suffix 9 | calculate solutions 10 | for a test function with various integration methods and number of evaluations 11 | and then save relative or absolute deviations between the results 12 | and a reference solution to a temporary file in `$XDG_RUNTIME_DIR`. 13 | Programs with the `PlotErrors.py` create a plot which shows the calculated 14 | errors of the respective function. 15 | Before rerunning the tests after changing the code, 16 | the previous errors need to be removed. 17 | ```sh 18 | # Calculate errors for the G-function test 19 | python3 GFunctionCalculateErrors.py 20 | # Plot the errors 21 | python3 GFunctionPlotErrors.py 22 | # Remove the calculated errors 23 | rm $XDG_RUNTIME_DIR/uqtest* 24 | ``` 25 | The errors are saved in a file instead of directly plotting them in the error 26 | calculation programs 27 | so that it is possible to abort the calculations at any point in time 28 | and plot the errors calculated so far; 29 | since the program may be used during testing, 30 | it could abort due to a crash. 31 | 32 | `$XDG_RUNTIME_DIR` is the path to a tmpfs file system on many linux systems; 33 | on other operating systems the path for saving and loading 34 | the errors may need to be changed 35 | in the error calculation and plot Python programs. 36 | 37 | Tests for the Predator-Prey model are located in a subfolder 38 | because there are many of them. 39 | The `ErrorsOverTime.py` program calculates solutions and errors 40 | for a fixed number of evaluations and quadrature method, 41 | and shows them on plots. 42 | 43 | 44 | ## Other Test Programs 45 | 46 | `TestsUQ.py` contains many test cases. 47 | If it is `import`ed in a jupyter notebook, 48 | the program plots functions, refinement objects, sparse grids and more. 49 | -------------------------------------------------------------------------------- /UQ/TestTruncatedNormal.py: -------------------------------------------------------------------------------- 1 | import chaospy as cp 2 | import numpy as np 3 | import math 4 | import json 5 | import os 6 | from sys import path 7 | path.append('../') 8 | from Function import * 9 | from spatiallyAdaptiveExtendSplit import * 10 | from ErrorCalculator import * 11 | model = FunctionUQ() 12 | #a = [parameter1_min, parameter2_min, parameter3_min] 13 | #b = [parameter1_max, parameter2_max, parameter3_max] 14 | #grid = TruncatedNormalDistributionGrid(mean=[2], std_dev=[0.5]) 15 | #grid.setCurrentArea([0],[100],[3]) 16 | grid = TruncatedNormalDistributionGrid(mean=[0.3], std_dev=[0.03], global_a=[-100],global_b=[100]) 17 | grid.setCurrentArea([-100],[100],[3]) 18 | print("grid",grid.get_points_and_weights()) 19 | points, weights = grid.get_points_and_weights() 20 | print(points[0][0]) 21 | print(weights) 22 | print(sum([math.sin(float(points[d][0])) * float(weights[d]) for d in range(len(points))])) 23 | 24 | -------------------------------------------------------------------------------- /UQ/function_uq_solutions.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obersteiner/sparseSpACE/0790f350e859e7bdf225dd013bf1f6e83553cf69/UQ/function_uq_solutions.npy -------------------------------------------------------------------------------- /UQ/quickstart.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # This is a simple quickstart python program to show how to use chaospy 4 | # to quantify the uncertainty of a simple function with a discontinuity. 5 | # 6 | # The goal is to create a sparse adaptive surrogate that can also be used 7 | # instead of the model() function. 8 | # 9 | # Uncertain parameter: 10 | # - parameter1 11 | # - parameter2 12 | # - parameter3 13 | # 14 | # Quantity of interest: 15 | # - some value 16 | # 17 | # Author: Florian Kuenzner 18 | # 19 | ############################################################################## 20 | 21 | 22 | import chaospy as cp 23 | import numpy as np 24 | import math 25 | import json 26 | import os 27 | from sys import path 28 | path.append('../') 29 | from Function import * 30 | from spatiallyAdaptiveExtendSplit import * 31 | from ErrorCalculator import * 32 | 33 | ################################################################################################# 34 | # parameter setup 35 | parameter1 = 0.3 # 0.3 36 | parameter1_var = 0.03 37 | parameter1_min = 0.1 #0.1 38 | parameter1_max = 0.3 #0.5 39 | parameter2 = 1.0 40 | parameter2_var = 0.5 #0.03 41 | parameter2_min = 1.0 #0.8 42 | parameter2_max = 1.2 #1.2 43 | parameter3 = 1.6 44 | parameter3_var = 0.3 45 | parameter3_min = 1.4 #1.4 46 | parameter3_max = 1.8 #1.8 47 | 48 | ################################################################################################# 49 | # setup uncertain parameter 50 | parameter1Dist = cp.Uniform(parameter1_min, parameter1_max) 51 | #parameter1Dist = cp.Normal(parameter1, parameter1_var) 52 | parameter2Dist = cp.Uniform(parameter2_min, parameter2_max) 53 | #parameter2Dist = cp.Normal(parameter2, parameter2_var) 54 | parameter3 = cp.Uniform(parameter3_min, parameter3_max) 55 | 56 | dist = cp.J(parameter1Dist, parameter2Dist, parameter3) 57 | 58 | ################################################################################################# 59 | # generate nodes and weights 60 | #q = 3 # number of collocation points for each dimension 61 | #nodes, weights = cp.generate_quadrature(q, dist, rule="G") 62 | model = FunctionUQ() 63 | a = np.array([parameter1_min, parameter2_min, parameter3_min]) 64 | b = np.array([parameter1_max, parameter2_max, parameter3_max]) 65 | grid = GaussLegendreGrid(a,b,3) 66 | errorOperator2=ErrorCalculatorExtendSplit() 67 | adaptiveCombiInstanceExtend = SpatiallyAdaptiveExtendScheme(a, b,0,grid,version=0) 68 | adaptiveCombiInstanceExtend.performSpatiallyAdaptiv(1,2,model,errorOperator2,10**-10, do_plot=False) 69 | nodes, weights = adaptiveCombiInstanceExtend.get_points_and_weights() 70 | nodes_transpose = list(zip(*nodes)) 71 | 72 | ################################################################################################# 73 | # propagate the uncertainty 74 | value_of_interests = [model(node) for node in nodes] 75 | value_of_interests = np.asarray(value_of_interests) 76 | 77 | ################################################################################################# 78 | # generate orthogonal polynomials for the distribution 79 | OP = cp.orth_ttr(3, dist) 80 | 81 | ################################################################################################# 82 | # generate the general polynomial chaos expansion polynomial 83 | gPCE = cp.fit_quadrature(OP, nodes_transpose, weights, value_of_interests) 84 | 85 | ################################################################################################# 86 | # calculate statistics 87 | E = cp.E(gPCE, dist) 88 | StdDev = cp.Std(gPCE, dist) 89 | first_order_sobol_indices = cp.Sens_m(gPCE, dist) 90 | print(first_order_sobol_indices) 91 | #print the stastics 92 | print("mean: %f" % E) 93 | print("stddev: %f" % StdDev) 94 | -------------------------------------------------------------------------------- /UQ/quickstart2.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # This is a simple quickstart python program to show how to use chaospy 4 | # to quantify the uncertainty of a simple function with a discontinuity. 5 | # 6 | # The goal is to create a sparse adaptive surrogate that can also be used 7 | # instead of the model() function. 8 | # 9 | # Uncertain parameter: 10 | # - parameter1 11 | # - parameter2 12 | # - parameter3 13 | # 14 | # Quantity of interest: 15 | # - some value 16 | # 17 | # Author: Florian Kuenzner 18 | # 19 | ############################################################################## 20 | 21 | 22 | import chaospy as cp 23 | import numpy as np 24 | import math 25 | import json 26 | import os 27 | from sys import path 28 | path.append('../') 29 | from Function import * 30 | from spatiallyAdaptiveExtendSplit import * 31 | from ErrorCalculator import * 32 | 33 | ################################################################################################# 34 | # parameter setup 35 | parameter1 = 0.3 # 0.3 36 | parameter1_var = 0.03 37 | parameter1_min = 0.1 #0.1 38 | parameter1_max = 0.3 #0.5 39 | parameter2 = 1.0 40 | parameter2_var = 0.5 #0.03 41 | parameter2_min = 1.0 #0.8 42 | parameter2_max = 1.2 #1.2 43 | parameter3 = 1.6 44 | parameter3_var = 0.3 45 | parameter3_min = 1.4 #1.4 46 | parameter3_max = 1.8 #1.8 47 | 48 | ################################################################################################# 49 | # setup uncertain parameter 50 | #parameter1Dist = cp.Uniform(parameter1_min, parameter1_max) 51 | parameter1Dist = cp.Normal(parameter1, parameter1_var) 52 | #parameter2Dist = cp.Uniform(parameter2_min, parameter2_max) 53 | parameter2Dist = cp.Normal(parameter2, parameter2_var) 54 | #parameter3Dist = cp.Uniform(parameter3_min, parameter3_max) 55 | parameter3Dist = cp.Normal(parameter3, parameter3_var) 56 | dist = cp.J(parameter1Dist, parameter2Dist, parameter3Dist) 57 | 58 | ################################################################################################# 59 | # generate nodes and weights 60 | #q = 3 # number of collocation points for each dimension 61 | #nodes, weights = cp.generate_quadrature(q, dist, rule="G") 62 | mean=[parameter1,parameter2,parameter3] 63 | std_dev=[parameter1_var, parameter2_var, parameter3_var] 64 | a = [0.2,-1,0] 65 | b = [0.4,3,3] 66 | model = FunctionUQNormal2(FunctionUQ(), mean, std_dev, a, b) 67 | 68 | grid = TruncatedNormalDistributionGrid(a=a,b=b,dim=3,mean=mean,std_dev=std_dev) 69 | errorOperator2=ErrorCalculatorExtendSplit() 70 | adaptiveCombiInstanceExtend = SpatiallyAdaptiveExtendScheme(a, b,2,grid,version=0) 71 | adaptiveCombiInstanceExtend.performSpatiallyAdaptiv(1,2,model,errorOperator2,10**-10, do_plot=False) 72 | nodes, weights = adaptiveCombiInstanceExtend.get_points_and_weights() 73 | print("Number of points:", len(nodes)) 74 | print("Sum of weights:", sum(weights)) 75 | weights = np.asarray(weights) * 1.0/sum(weights) 76 | nodes_transpose = list(zip(*nodes)) 77 | 78 | ################################################################################################# 79 | # propagate the uncertainty 80 | value_of_interests = [model(node) for node in nodes] 81 | value_of_interests = np.asarray(value_of_interests) 82 | print("Mean", np.inner(weights, value_of_interests)) 83 | ################################################################################################# 84 | # generate orthogonal polynomials for the distribution 85 | OP = cp.orth_ttr(3, dist) 86 | 87 | ################################################################################################# 88 | # generate the general polynomial chaos expansion polynomial 89 | gPCE = cp.fit_quadrature(OP, nodes_transpose, weights, value_of_interests) 90 | 91 | ################################################################################################# 92 | # calculate statistics 93 | E = cp.E(gPCE, dist) 94 | StdDev = cp.Std(gPCE, dist) 95 | 96 | #print the stastics 97 | print("mean: %f" % E) 98 | print("stddev: %f" % StdDev) 99 | -------------------------------------------------------------------------------- /UQ/quickstart_mixed.py: -------------------------------------------------------------------------------- 1 | ############################################################################## 2 | # 3 | # This is a simple quickstart python program to show how to use chaospy 4 | # to quantify the uncertainty of a simple function with a discontinuity. 5 | # 6 | # The goal is to create a sparse adaptive surrogate that can also be used 7 | # instead of the model() function. 8 | # 9 | # Uncertain parameter: 10 | # - parameter1 11 | # - parameter2 12 | # - parameter3 13 | # 14 | # Quantity of interest: 15 | # - some value 16 | # 17 | # Author: Florian Kuenzner 18 | # 19 | ############################################################################## 20 | 21 | 22 | import chaospy as cp 23 | import numpy as np 24 | import math 25 | import json 26 | import os 27 | from sys import path 28 | path.append('../') 29 | from Function import * 30 | from spatiallyAdaptiveExtendSplit import * 31 | from ErrorCalculator import * 32 | 33 | ################################################################################################# 34 | # parameter setup 35 | parameter1 = 0.3 # 0.3 36 | parameter1_var = 0.03 37 | parameter1_min = 0.1 #0.1 38 | parameter1_max = 0.3 #0.5 39 | parameter2 = 1.0 40 | parameter2_var = 0.5 #0.03 41 | parameter2_min = 1.0 #0.8 42 | parameter2_max = 1.2 #1.2 43 | parameter3 = 1.6 44 | parameter3_var = 0.3 45 | parameter3_min = 1.4 #1.4 46 | parameter3_max = 1.8 #1.8 47 | 48 | ################################################################################################# 49 | # setup uncertain parameter 50 | #parameter1Dist = cp.Uniform(parameter1_min, parameter1_max) 51 | parameter1Dist = cp.Normal(parameter1, parameter1_var) 52 | #parameter2Dist = cp.Uniform(parameter2_min, parameter2_max) 53 | parameter2Dist = cp.Normal(parameter2, parameter2_var) 54 | #parameter3Dist = cp.Uniform(parameter3_min, parameter3_max) 55 | parameter3Dist = cp.Normal(parameter3, parameter3_var) 56 | dist = cp.J(parameter1Dist, parameter2Dist, parameter3Dist) 57 | 58 | ################################################################################################# 59 | # generate nodes and weights 60 | #q = 3 # number of collocation points for each dimension 61 | #nodes, weights = cp.generate_quadrature(q, dist, rule="G") 62 | mean=[parameter1,parameter2,parameter3] 63 | std_dev=[parameter1_var, parameter2_var, parameter3_var] 64 | a = [0.2,-1.3,0] 65 | b = [0.4,3,3] 66 | # weight function that applies the normal distribution in x and y and leaves z unchanged (uniform distributed) 67 | weight_function = lambda x : np.prod([norm.pdf(x=x[d], loc=mean[d], scale=std_dev[d]) / (norm.cdf(b[d],loc=mean[d], scale=std_dev[d]) - norm.cdf(a[d],loc=mean[d], scale=std_dev[d])) for d in range(2)]) 68 | model = FunctionUQWeighted(FunctionUQ(), weight_function) 69 | gridX = TruncatedNormalDistributionGrid1D(a=a[0],b=b[0],mean=parameter1,std_dev=parameter1_var) 70 | gridY = TruncatedNormalDistributionGrid1D(a=a[1],b=b[1],mean=parameter2,std_dev=parameter2_var) 71 | gridZ = GaussLegendreGrid1D(a=a[2],b=b[2]) 72 | grid = MixedGrid(a=a,b=b,dim=3,grids=[gridX,gridY,gridZ]) 73 | errorOperator2=ErrorCalculatorExtendSplit() 74 | adaptiveCombiInstanceExtend = SpatiallyAdaptiveExtendScheme(a, b,2,grid,version=0) 75 | adaptiveCombiInstanceExtend.performSpatiallyAdaptiv(1,2,model,errorOperator2,10**-10, do_plot=False) 76 | nodes, weights = adaptiveCombiInstanceExtend.get_points_and_weights() 77 | print("Number of points:", len(nodes)) 78 | print("Sum of weights:", sum(weights)) 79 | weights = np.asarray(weights) * 1.0/sum(weights) 80 | nodes_transpose = list(zip(*nodes)) 81 | 82 | ################################################################################################# 83 | # propagate the uncertainty 84 | value_of_interests = [model(node) for node in nodes] 85 | value_of_interests = np.asarray(value_of_interests) 86 | print("Mean", np.inner(weights, value_of_interests)) 87 | ################################################################################################# 88 | # generate orthogonal polynomials for the distribution 89 | OP = cp.orth_ttr(3, dist) 90 | 91 | ################################################################################################# 92 | # generate the general polynomial chaos expansion polynomial 93 | gPCE = cp.fit_quadrature(OP, nodes_transpose, weights, value_of_interests) 94 | 95 | ################################################################################################# 96 | # calculate statistics 97 | E = cp.E(gPCE, dist) 98 | StdDev = cp.Std(gPCE, dist) 99 | 100 | #print the stastics 101 | print("mean: %f" % E) 102 | print("stddev: %f" % StdDev) 103 | -------------------------------------------------------------------------------- /configure.sh: -------------------------------------------------------------------------------- 1 | # make the hook runnable 2 | chmod +x git_hooks/pre-commit 3 | 4 | # configure git to use the new hook - it will stay with the repository 5 | git config core.hooksPath "./git_hooks" 6 | 7 | # install sparseSpACE 8 | pip3 install -e . 9 | -------------------------------------------------------------------------------- /git_hooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for .ipynb files with output cells; 4 | # ask the user if they really want to commit them 5 | ipynb_files=`git diff --staged --name-only | awk "/.ipynb/"` 6 | have_output="" 7 | 8 | if [ -n "$ipynb_files" ]; then 9 | while read filename; do 10 | if [ $(git show :$filename | grep -cm1 "\"output_type\":") -ge 1 ]; then 11 | have_output="${have_output}"$'\n'"$filename" 12 | fi 13 | done <<< $ipynb_files 14 | # use here string to run in main shell: https://stackoverflow.com/a/16854326 15 | fi 16 | 17 | if [ -n "$have_output" ]; then 18 | # https://stackoverflow.com/a/10015707 19 | exec < /dev/tty # allows us to grab user input 20 | 21 | echo "You appear to be committing Jupyter notebooks" 22 | echo "that contain output:" 23 | echo "$have_output"$'\n' 24 | while true; do 25 | read -p "Do you want to clean the notebook first (y/n) or abort (a) ?: " resp 26 | case $resp in 27 | [Yy]* ) exit_status=1; git reset $have_output;jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace $have_output; git add $have_output; break;; 28 | [Nn]* ) exit_status=0; break;; 29 | [Aa]* ) exit_status=1; break;; 30 | * ) echo "Please answer y or n";; 31 | esac 32 | done 33 | 34 | exit $exit_status 35 | fi 36 | 37 | 38 | # This is a pre-commit hook that ensures attempts to commit files that are 39 | # are larger than $limit to your _local_ repo fail, with a helpful error message. 40 | 41 | # You can override the default limit of 5MB by supplying the environment variable: 42 | # GIT_FILE_SIZE_LIMIT=42000000 git commit -m "This commit is allowed file sizes up to 42MB" 43 | 44 | # Maximum file size limit in bytes 45 | limit=${GIT_FILE_SIZE_LIMIT:-5000000} # Default 5MB 46 | limitInMB=$(( $limit / 10**6 )) 47 | 48 | # Move to the repo root so git files paths make sense 49 | repo_root=$( git rev-parse --show-toplevel ) 50 | cd $repo_root 51 | 52 | empty_tree=$( git hash-object -t tree /dev/null ) 53 | 54 | if git rev-parse --verify HEAD > /dev/null 2>&1 55 | then 56 | against=HEAD 57 | else 58 | against="$empty_tree" 59 | fi 60 | 61 | # Set split so that for loop below can handle spaces in file names by splitting on line breaks 62 | IFS=' 63 | ' 64 | 65 | echo "Checking staged file sizes" 66 | shouldFail=false 67 | for file in $( git diff-index --cached --name-only $against ); do 68 | file_size=$( ls -la $file | awk '{ print $5 }') 69 | if [ "$file_size" -gt "$limit" ]; then 70 | echo File $file is $(( $file_size / 10**6 )) MB, which is larger than our configured limit of $limitInMB MB 71 | shouldFail=true 72 | fi 73 | done 74 | 75 | if $shouldFail 76 | then 77 | echo If you really need to commit this file, you can override the size limit by setting the GIT_FILE_SIZE_LIMIT environment variable, e.g. GIT_FILE_SIZE_LIMIT=42000000 for 42MB. Or, commit with the --no-verify switch to skip the check entirely, you naughty boy! 78 | echo Commit aborted 79 | exit 1; 80 | fi 81 | 82 | -------------------------------------------------------------------------------- /install_chaospy.sh: -------------------------------------------------------------------------------- 1 | git clone https://github.com/jonathf/chaospy.git 2 | cd chaospy 3 | pip3 install . 4 | -------------------------------------------------------------------------------- /ipynb/Extend_Split_Strategy_Tutorial.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Extend-Split Strategy" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "The Extend-Split Strategy is an effective method to spatially adapt to a function with highly local behaviour for moderate dimensionality (<= 10). The adaptation process is governed by 2 operations: the split and the extend operation. The scheme starts with a refinement graph that does a standard domain decomposition of the domain into $2^{dim}$ parts. It is then possible to perform either a split or a extend for each of these subregions.\n", 15 | "The split operations splits a subregion into $2^{dim}$ equally sized subregions and at the same time doubles the points for each dimension (by adding the same number of points per dimension for the subblocks compared to the original area). This corresponds to increasing the truncation parameter or the minimum level of the combination scheme locally. In the refinement graph this corresponds to a quadtree refinement (in higher dimensions octree, ...).\n", 16 | "\n", 17 | "The Extend operation on the other hand increases the target level and therefore might increase the number of overall grids of the combination scheme. However, the refinement graph stays constant during this operation.\n", 18 | "\n", 19 | "As a result splitting enables us to get more and more fine-grained adaptation as the subregion where we can refine get smaller and smaller. However, splitting gets more and more costly the higher the dimension of the problem and it does not add new very anisotropic subspaces to the scheme. These shortcoming are however targettet by the extend scheme. Therefore, we have to consider the trade-off between splitting and extending to get the best adaptation." 20 | ] 21 | }, 22 | { 23 | "cell_type": "markdown", 24 | "metadata": {}, 25 | "source": [ 26 | "This is the example from the Tutorial which refines to grid to a discontinous function. Here we define number_of_refinements_before_extend=2 which means that for each intital subregion we first perform 2 splits before we start extending, i.e. the depth in the quadtree refinement tree that we traverse is limited to 3 (if we already count the initial $2^{dim}$ splitting as depth 1).\n", 27 | "\n", 28 | "We enable plotting to show you the evolution of the adaptive refinement. You can see the refinementgraph which does the quadtree refinement and the corresponding component and sparse grids which result from the combination." 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "%matplotlib inline\n", 38 | "from sys import path\n", 39 | "path.append('../src/')\n", 40 | "\n", 41 | "import numpy as np\n", 42 | "from spatiallyAdaptiveExtendSplit import *\n", 43 | "from Function import *\n", 44 | "from ErrorCalculator import *\n", 45 | "\n", 46 | "# dimensionality of problem\n", 47 | "dim = 2\n", 48 | "\n", 49 | "# integration boundaries\n", 50 | "a = np.zeros(dim)\n", 51 | "b = np.ones(dim)\n", 52 | "\n", 53 | "# define function\n", 54 | "midpoint = np.ones(dim) * 0.5\n", 55 | "coefficients = np.array([ 10**0 * (d+1) for d in range(dim)])\n", 56 | "f = GenzDiscontinious(border=midpoint,coeffs=coefficients)\n", 57 | "# plot function\n", 58 | "f.plot(np.ones(dim)*a,np.ones(dim)*b)\n", 59 | "\n", 60 | "# reference solution for the integration\n", 61 | "reference_solution = f.getAnalyticSolutionIntegral(a,b)\n", 62 | "\n", 63 | "# error estimator\n", 64 | "errorOperator=ErrorCalculatorExtendSplit()\n", 65 | "\n", 66 | "# used grid\n", 67 | "grid=TrapezoidalGrid(a=a, b=b)\n", 68 | "\n", 69 | "# NEW! define operation which shall be performed in the combination technique\n", 70 | "from GridOperation import *\n", 71 | "operation = Integration(f=f, grid=grid, dim=dim, reference_solution = reference_solution)\n", 72 | "\n", 73 | "# initialize Extend-Split Strategy\n", 74 | "adaptiveCombiInstanceExtend = SpatiallyAdaptiveExtendScheme(a, b, number_of_refinements_before_extend=2, version=0, operation=operation)\n", 75 | "\n", 76 | "# perform spatially adaptive integration\n", 77 | "adaptiveCombiInstanceExtend.performSpatiallyAdaptiv(minv=1,maxv=2,f=f,errorOperator=errorOperator,tol=10**-2, do_plot=True)" 78 | ] 79 | }, 80 | { 81 | "cell_type": "markdown", 82 | "metadata": {}, 83 | "source": [ 84 | "A new feature of the SpACE framework is that you can enable automatic_extend_split which means that no depth has to be specified and the algorithm itself tries to optimally decide between split and extend operations. This method performs in general quite good but of course cannot beat an perfect setting of the parameter for all situations. Below you can see an example with automatic refinement decision." 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": null, 90 | "metadata": {}, 91 | "outputs": [], 92 | "source": [ 93 | "%matplotlib inline\n", 94 | "from sys import path\n", 95 | "path.append('../src/')\n", 96 | "\n", 97 | "import numpy as np\n", 98 | "from spatiallyAdaptiveExtendSplit import *\n", 99 | "from Function import *\n", 100 | "from ErrorCalculator import *\n", 101 | "\n", 102 | "# dimensionality of problem\n", 103 | "dim = 2\n", 104 | "\n", 105 | "# integration boundaries\n", 106 | "a = np.zeros(dim)\n", 107 | "b = np.ones(dim)\n", 108 | "\n", 109 | "# define function\n", 110 | "midpoint = np.ones(dim) * 0.5\n", 111 | "coefficients = np.array([ 10**0 * (d+1) for d in range(dim)])\n", 112 | "f = GenzDiscontinious(border=midpoint,coeffs=coefficients)\n", 113 | "# plot function\n", 114 | "f.plot(np.ones(dim)*a,np.ones(dim)*b)\n", 115 | "\n", 116 | "# reference solution for the integration\n", 117 | "reference_solution = f.getAnalyticSolutionIntegral(a,b)\n", 118 | "\n", 119 | "# error estimator\n", 120 | "errorOperator=ErrorCalculatorExtendSplit()\n", 121 | "\n", 122 | "# used grid\n", 123 | "grid=TrapezoidalGrid(a=a, b=b)\n", 124 | "\n", 125 | "# NEW! define operation which shall be performed in the combination technique\n", 126 | "from GridOperation import *\n", 127 | "operation = Integration(f=f, grid=grid, dim=dim, reference_solution = reference_solution)\n", 128 | "\n", 129 | "# initialize Extend-Split Strategy\n", 130 | "adaptiveCombiInstanceExtend = SpatiallyAdaptiveExtendScheme(a, b, automatic_extend_split=True, version=0, operation=operation)\n", 131 | "\n", 132 | "# perform spatially adaptive integration\n", 133 | "adaptiveCombiInstanceExtend.performSpatiallyAdaptiv(minv=1,maxv=2,f=f,errorOperator=errorOperator,tol=10**-2, do_plot=True)" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": null, 139 | "metadata": {}, 140 | "outputs": [], 141 | "source": [] 142 | } 143 | ], 144 | "metadata": { 145 | "kernelspec": { 146 | "display_name": "Python 3", 147 | "language": "python", 148 | "name": "python3" 149 | }, 150 | "language_info": { 151 | "codemirror_mode": { 152 | "name": "ipython", 153 | "version": 3 154 | }, 155 | "file_extension": ".py", 156 | "mimetype": "text/x-python", 157 | "name": "python", 158 | "nbconvert_exporter": "python", 159 | "pygments_lexer": "ipython3", 160 | "version": "3.6.7" 161 | } 162 | }, 163 | "nbformat": 4, 164 | "nbformat_minor": 2 165 | } 166 | -------------------------------------------------------------------------------- /ipynb/Extrapolation/.gitignore: -------------------------------------------------------------------------------- 1 | Extrapolation_Test_Node2.ipynb 2 | Extrapolation_Test_Node3.ipynb 3 | Extrapolation_Test_Node4.ipynb 4 | Extrapolation_Test_Node5.ipynb 5 | Extrapolation_Test_Node6.ipynb 6 | Extrapolation_Test_Node7.ipynb 7 | *.pdf -------------------------------------------------------------------------------- /ipynb/Extrapolation/Results/5d/hard/error_comparison_FunctionExpVar_5d.csv: -------------------------------------------------------------------------------- 1 | "name"|"Trapezoidal Grid" 2 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 3 | "error"|0.396649936740354|0.24562733822281402|0.14938604260087318|0.09009293483480452|0.05476685680539761|0.034239097628482096|0.03070944881068538|0.019073833015190722|0.01753424169389095|0.01106278854626297|0.00977275460182836 4 | 5 | "name"|"Trapezoidal Grid (Rebalancing)" 6 | "num_points"|1053|2133|4674|6852|11713|14923|17813|26912|34733|37107|38997|46827|66295|68401|69670|79046|82070|84954|89092|101776 7 | "error"|0.396649936740354|0.24562733822281402|0.10511097865985752|0.06182945273971141|0.03746165596672135|0.02669607430024279|0.019254798971295117|0.010813371605969957|0.008848640421506349|0.007870516684699846|0.006930107320694101|0.005421080844863813|0.003979741896718347|0.0037969170173041134|0.0036064211403185054|0.0029913704578736766|0.002723778458979531|0.002467846217756864|0.0022645612646960966|0.0021007706816150717 8 | 9 | "name"|"HighOrder Grid" 10 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 11 | "error"|0.396649936740354|0.24562733822281402|0.14938604260087318|0.09009293483480452|0.05476685680539761|0.034239097628482096|0.03070944881068538|0.019073833015190722|0.01753424169389095|0.01106278854626297|0.00977275460182836 12 | 13 | "name"|"Simpson Grid (Balanced)" 14 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 15 | "error"|0.23237189976032058|0.12561582450664532|0.06683107986138681|0.035131755134120035|0.01832329346877526|0.009387146858625517|0.004774493128586577|0.0024165197115064707 16 | 17 | "name"|"Extrapolation Grid (Unit, Romberg, Default Romberg)" 18 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 19 | "error"|0.2197903531359624|0.08154892119225399|0.020159387550904073|0.009580881835788979|0.016418845152733508|0.02400302050385361|0.006414753305064136|0.009787609256906804|0.003739726436492119|0.004214592143308993|0.0016933416587365802 20 | 21 | "name"|"Extrapolation Grid (Unit, Romberg, Default Romberg, Balanced)" 22 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 23 | "error"|0.2197903531359624|0.12288989784905813|0.07367500525904591|0.04848079530301008|0.03565086487582747|0.018639801475330064|0.009121416222445222|0.004382513421662626 24 | 25 | "name"|"Extrapolation Grid (Grouped, Romberg, Default Romberg)" 26 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 27 | "error"|0.2197903531359624|0.08380454549138205|0.017025754784255165|0.0014996706944168103|0.0013839888106299547|0.0016760501015327112|0.003493530037828596|0.005324298370619873|0.0023457461729131657|0.002373481294462687|0.0012102668815735518 28 | 29 | "name"|"Extrapolation Grid (Grouped, Romberg, Default Romberg, Balanced)" 30 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 31 | "error"|0.2197903531359624|0.11674874169442806|0.06126219098853869|0.03182148904749482|0.01642880286214987|0.013947268808576552|0.00697327193278896|0.00342721370227439 32 | 33 | "name"|"Extrapolation Grid (Optimized, Trapezoid, Romberg, Balanced)" 34 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 35 | "error"|0.2197903531359624|0.11674874169442806|0.06126219098853869|0.03182148904749482|0.01642880286214987|0.008280111388935696|0.0041651684273470435|0.0020825232922796477 36 | 37 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Default Romberg)" 38 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 39 | "error"|0.2197903531359624|0.08380454549138205|0.029210973450586986|0.01596812389683122|0.011887511650126137|0.005819831118475838|0.005487511166968884|0.0021863553333592023|0.003031429808504127|0.001098663677159717|0.0016080979729634626 40 | 41 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Default Romberg, Balanced)" 42 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 43 | "error"|0.2197903531359624|0.11674874169442806|0.06126219098853869|0.03182148904749482|0.01642880286214987|0.008280111388935696|0.0041651684273470435|0.0020825232922796477 44 | 45 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Lagrange Romberg)" 46 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 47 | "error"|0.2197903531359624|0.058673303505940355|0.588559337114416|0.7696098171573003|0.4110026211466631|0.5701499739064453|0.2922382973342552|1.1197665999680921|0.7765422269004161|0.12601905479375075|0.49715258611079094 48 | 49 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Lagrange Full Romberg)" 50 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 51 | "error"|0.2197903531359624|0.058673303505940355|0.10767617272004992|0.240776399530358|0.3074181067328494|0.06070937510002827|0.3353039831143012|0.33614055212765437|0.00010802915079644038|0.005256631753627783|0.0007346736984626956 52 | 53 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Simpson Romberg, Balanced)" 54 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 55 | "error"|0.1712223821375325|0.03905235752044711|0.1268715042327877|0.1859675904922451|0.22049387859074798|0.10669271663518876|0.041053238863009156|0.012141093181414986 56 | 57 | "name"|"Trapezoidal Grid (Standard Combi) lmin=1" 58 | "num_points"|1053|3753|12033|36033|102785 59 | "error"|0.396649936740354|0.241176969744211|0.1417811554675812|0.0811563338912229|0.04545552676151654 60 | 61 | "name"|"Trapezoidal Grid (Standard Combi) lmin=2" 62 | "num_points"|15625|60625 63 | "error"|0.17596825331152388|0.0911485008985462 64 | 65 | "name"|"Gauss-Legendre Grid (Standard Combi) lmin=1" 66 | "num_points"|1863|9423|38943|142143|476255 67 | "error"|0.008498826595176645|0.0021973546649434184|0.00047907640308864075|9.284483956295553e-05|1.654718871568761e-05 68 | 69 | "name"|"Gauss-Legendre Grid (Standard Combi) lmin=2" 70 | "num_points"|28125|158125 71 | "error"|0.0022844431158155487|0.0005191435668319144 72 | 73 | -------------------------------------------------------------------------------- /ipynb/Extrapolation/Results/5d/hard/error_comparison_GenzOszillatory_5d.csv: -------------------------------------------------------------------------------- 1 | "name"|"Trapezoidal Grid" 2 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98685|106029 3 | "error"|838284.5355964048|266943.6016530066|1083450.6551691282|111254.24966035943|206336.1962837815|20801.7665136088|68232.51377034815|81917.78809075465|47362.5781177481|60159.14491498848|12116.66558589434|13339.806976004407|24862.543459764318|5944.155856550841|14511.147222852134|8812.400261206738 4 | 5 | "name"|"Trapezoidal Grid (Rebalancing)" 6 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98685|106029 7 | "error"|838284.5355964048|266943.6016530066|1083450.6551691282|111254.24966035943|206336.1962837815|20801.7665136088|68232.51377034815|81917.78809075465|47362.5781177481|60159.14491498848|12116.66558589434|13339.806976004407|24862.543459764318|5944.155856550841|14511.147222852134|8812.400261206738 8 | 9 | "name"|"HighOrder Grid" 10 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98685|106029 11 | "error"|838284.5355964048|266943.6016530066|1083450.6551691282|111254.24966035943|206336.1962837815|20801.7665136088|68232.51377034815|81917.78809075465|47362.5781177481|60159.14491498848|12116.66558589434|13339.806976004407|24862.543459764318|5944.155856550841|14511.147222852134|8812.400261206738 12 | 13 | "name"|"Simpson Grid (Balanced)" 14 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98037|105867 15 | "error"|4246032.626121045|676663.7382632128|5342690.766804621|637861.2869882564|4469236.222687692|2025146.2254672635|2765542.928373759|1234919.845293123|1248554.2107313175|1303296.8903562513|1275174.4662360447|447866.2256791707|1158154.0833905505|473595.4359433374|474054.96841383|442248.3024713176 16 | 17 | "name"|"Extrapolation Grid (Unit, Romberg, Default Romberg)" 18 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98685|106029 19 | "error"|4614027.60927505|383026.71552111325|6544089.278624306|1015618.1025494853|6233369.681340285|3220286.315222088|4278271.701895793|2154734.825840156|2234910.9048388377|2372198.1636880743|2397840.995692617|988017.6499307851|1367127.797920813|986007.7682890227|696630.1376805041|1288780.369078335 20 | 21 | "name"|"Extrapolation Grid (Unit, Romberg, Default Romberg, Balanced)" 22 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98037|105867 23 | "error"|4614027.60927505|383026.71552111325|6544089.278624306|1015618.1025494853|6233369.681340285|3220286.315222088|4278271.701895793|2154734.825840156|2234910.9048388377|2372198.1636880743|2397840.995692617|988017.6499307851|1367127.797920813|1044338.0291582609|1048280.1279075469|979446.2323564176 24 | 25 | "name"|"Extrapolation Grid (Grouped, Romberg, Default Romberg)" 26 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98685|106029 27 | "error"|4614027.60927505|383026.71552111325|6544089.2786243055|1015618.1025494852|6233369.681340285|3220286.3152220855|4278271.701895792|2154734.8258401565|2234910.904838836|2372198.1636880734|2397840.9956926145|988017.6499307851|2075902.7081289478|899529.0068468597|735994.0159586391|1970075.3658239131 28 | 29 | "name"|"Extrapolation Grid (Grouped, Romberg, Default Romberg, Balanced)" 30 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98037|105867 31 | "error"|4614027.60927505|383026.71552111325|6544089.2786243055|1015618.1025494852|6233369.681340285|3220286.3152220855|4278271.701895792|2154734.8258401565|2234910.904838836|2372198.1636880734|2397840.9956926145|988017.6499307851|2075902.7081289478|1024870.1317132035|1025803.3272426735|998086.6580776162 32 | 33 | "name"|"Extrapolation Grid (Optimized, Trapezoid, Romberg, Balanced)" 34 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98037|105867 35 | "error"|4614027.60927505|383026.71552111325|6544089.2786243055|1015618.1025494852|6233369.681340285|3220286.3152220855|4278271.701895792|2154734.8258401565|2234910.904838836|2372198.1636880734|2397840.9956926145|988017.6499307851|2075902.7081289478|1024870.1317132035|1025803.3272426735|998374.6025891161 36 | 37 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Default Romberg)" 38 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98685|106029 39 | "error"|4614027.60927505|383026.71552111325|6544089.2786243055|1015618.1025494852|6233369.681340285|3220286.3152220855|4278271.701895792|2154734.8258401565|2234910.904838836|2372198.1636880734|2397840.9956926145|988017.6499307851|2075902.7081289478|899529.0068468597|726700.529937869|1970075.3658239131 40 | 41 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Default Romberg, Balanced)" 42 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98037|105867 43 | "error"|4614027.60927505|383026.71552111325|6544089.2786243055|1015618.1025494852|6233369.681340285|3220286.3152220855|4278271.701895792|2154734.8258401565|2234910.904838836|2372198.1636880734|2397840.9956926145|988017.6499307851|2075902.7081289478|1024870.1317132035|1025803.3272426735|998374.6025891161 44 | 45 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Lagrange Romberg)" 46 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98685|106029 47 | "error"|4614027.60927505|383026.71552111325|6544089.2786243055|1015618.1025494852|6233369.681340285|3220286.3152220855|4278271.701895792|2154734.8258401565|2234910.904838836|2372198.1636880734|2397840.9956926145|988017.6499307851|1612597.7390774558|1481874.3422955077|982710.8714529442|1747573.110229107 48 | 49 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Lagrange Full Romberg)" 50 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98685|106029 51 | "error"|4614027.60927505|383026.71552111325|6544089.2786243055|1015618.1025494852|6233369.681340285|3220286.3152220855|4278271.701895792|2154734.8258401565|2234910.904838836|2372198.1636880734|2397840.9956926145|988017.6499307851|1612597.7390774558|1481874.3422955077|982710.8714529442|1747573.110229107 52 | 53 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Simpson Romberg, Balanced)" 54 | "num_points"|1053|1809|2349|3501|5661|12405|14997|27381|32565|37749|45189|88965|95013|96525|98037|105867 55 | "error"|8756330.979374718|471199.3648874722|12252991.557086384|2454362.4550776733|13438550.45172695|9120768.796444962|11374860.558908029|6316906.6656367965|6712876.0599965835|7066963.802535067|7302127.210241157|3167080.763243932|6728895.603261036|3365046.6461327276|3404339.9356429935|3181292.82285813 56 | 57 | "name"|"Trapezoidal Grid (Standard Combi) lmin=1" 58 | "num_points"|1053|3753|12033|36033|102785 59 | "error"|838284.5355964048|497843.34104287846|104869.45777638748|402630.7541930353|823653.922223501 60 | 61 | "name"|"Trapezoidal Grid (Standard Combi) lmin=2" 62 | "num_points"|15625|60625 63 | "error"|1337.023885283859|3725.5151666806846 64 | 65 | "name"|"Gauss-Legendre Grid (Standard Combi) lmin=1" 66 | "num_points"|1863|9423|38943|142143|476255 67 | "error"|2539671.0515347514|3092864.9066235363|1772742.118288913|373332.3476968437|1666383.353319564 68 | 69 | "name"|"Gauss-Legendre Grid (Standard Combi) lmin=2" 70 | "num_points"|28125|158125 71 | "error"|2719.300246749878|8159.990773616541 72 | 73 | -------------------------------------------------------------------------------- /ipynb/Extrapolation/Results/5d/medium_hard/error_comparison_FunctionExpVar_5d.csv: -------------------------------------------------------------------------------- 1 | "name"|"Trapezoidal Grid" 2 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 3 | "error"|0.396649936740354|0.24562733822281402|0.14938604260087318|0.09009293483480452|0.05476685680539761|0.034239097628482096|0.03070944881068538|0.019073833015190722|0.01753424169389095|0.01106278854626297|0.00977275460182836 4 | 5 | "name"|"Trapezoidal Grid (Rebalancing)" 6 | "num_points"|1053|2133|4674|6852|11713|14923|17813|26912|34733|37107|38997|46827|66295|68401|69670|79046|82070|84954|89092|101776 7 | "error"|0.396649936740354|0.24562733822281402|0.10511097865985752|0.06182945273971141|0.03746165596672135|0.02669607430024279|0.019254798971295117|0.010813371605969957|0.008848640421506349|0.007870516684699846|0.006930107320694101|0.005421080844863813|0.003979741896718347|0.0037969170173041134|0.0036064211403185054|0.0029913704578736766|0.002723778458979531|0.002467846217756864|0.0022645612646960966|0.0021007706816150717 8 | 9 | "name"|"HighOrder Grid" 10 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 11 | "error"|0.396649936740354|0.24562733822281402|0.14938604260087318|0.09009293483480452|0.05476685680539761|0.034239097628482096|0.03070944881068538|0.019073833015190722|0.01753424169389095|0.01106278854626297|0.00977275460182836 12 | 13 | "name"|"Simpson Grid (Balanced)" 14 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 15 | "error"|0.23237189976032058|0.12561582450664532|0.06683107986138681|0.035131755134120035|0.01832329346877526|0.009387146858625517|0.004774493128586577|0.0024165197115064707 16 | 17 | "name"|"Balanced Extrapolation Grid" 18 | "num_points"|11|61|71|241|331|1061|3385|7751|9133|18731|21857|40783|47541|81939|81949|154059 19 | "error"|0.05762862561278226|0.10776934338041388|0.021164013756310718|0.04288854017561028|0.014980494782564469|0.022667373834952986|0.014373996666269706|0.014157290081613372|0.010854362031726073|0.010766613300468908|0.009369067347068105|0.009334194605770785|0.008742374591491187|0.00872871041492207|0.005427222631987583|0.005176685746602683 20 | 21 | "name"|"Extrapolation Grid (Unit, Romberg, Default Romberg)" 22 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 23 | "error"|0.2197903531359624|0.08154892119225399|0.020159387550904073|0.009580881835788979|0.016418845152733508|0.02400302050385361|0.006414753305064136|0.009787609256906804|0.003739726436492119|0.004214592143308993|0.0016933416587365802 24 | 25 | "name"|"Extrapolation Grid (Unit, Romberg, Default Romberg, Balanced)" 26 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 27 | "error"|0.2197903531359624|0.12288989784905813|0.07367500525904591|0.04848079530301008|0.03565086487582747|0.018639801475330064|0.009121416222445222|0.004382513421662626 28 | 29 | "name"|"Extrapolation Grid (Grouped, Romberg, Default Romberg)" 30 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 31 | "error"|0.2197903531359624|0.08380454549138205|0.017025754784255165|0.0014996706944168103|0.0013839888106299547|0.0016760501015327112|0.003493530037828596|0.005324298370619873|0.0023457461729131657|0.002373481294462687|0.0012102668815735518 32 | 33 | "name"|"Extrapolation Grid (Grouped, Romberg, Default Romberg, Balanced)" 34 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 35 | "error"|0.2197903531359624|0.11674874169442806|0.06126219098853869|0.03182148904749482|0.01642880286214987|0.013947268808576552|0.00697327193278896|0.00342721370227439 36 | 37 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Default Romberg)" 38 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 39 | "error"|0.2197903531359624|0.08380454549138205|0.029210973450586986|0.01596812389683122|0.011887511650126137|0.005819831118475838|0.005487511166968884|0.0021863553333592023|0.003031429808504127|0.001098663677159717|0.0016080979729634626 40 | 41 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Default Romberg, Balanced)" 42 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 43 | "error"|0.2197903531359624|0.11674874169442806|0.06126219098853869|0.03182148904749482|0.01642880286214987|0.008280111388935696|0.0041651684273470435|0.0020825232922796477 44 | 45 | "name"|"Extrapolation Grid (Grouped Optimized, Trapezoid, Romberg, Balanced)" 46 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 47 | "error"|0.2197903531359624|0.11674874169442806|0.06126219098853869|0.03182148904749482|0.01642880286214987|0.008280111388935696|0.0041651684273470435|0.0020825232922796477 48 | 49 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Lagrange Romberg)" 50 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 51 | "error"|0.2197903531359624|0.058673303505940355|0.5885593371144164|0.7703232176668993|0.40921450665068804|0.5656876023669443|0.2889693050436305|1.101541366892433|0.7541824449302891|0.09413606451967782|0.5508065852685412 52 | 53 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Lagrange Full Romberg)" 54 | "num_points"|1053|2133|3897|6693|10870|16845|26197|38018|49188|71174|103278 55 | "error"|0.2197903531359624|0.058673303505940355|0.10767617272004992|0.240776399530358|0.3074181067328494|0.06070937510002827|0.3353039831143012|0.33614055212765437|0.00010802915079644038|0.005256631753627783|0.0007346736984626956 56 | 57 | "name"|"Extrapolation Grid (Grouped Optimized, Romberg, Simpson Romberg, Balanced)" 58 | "num_points"|1053|2943|6633|13083|23525|48273|86373|144105 59 | "error"|0.1712223821375325|0.03905235752044711|0.1268715042327877|0.1859675904922451|0.22049387859074798|0.10669271663518876|0.041053238863009156|0.012141093181414986 60 | 61 | "name"|"Trapezoidal Grid (Standard Combi) lmin=1" 62 | "num_points"|1053|3753|12033|36033|102785 63 | "error"|0.396649936740354|0.241176969744211|0.1417811554675812|0.0811563338912229|0.04545552676151654 64 | 65 | "name"|"Trapezoidal Grid (Standard Combi) lmin=2" 66 | "num_points"|15625|60625 67 | "error"|0.17596825331152388|0.0911485008985462 68 | 69 | "name"|"Gauss-Legendre Grid (Standard Combi) lmin=1" 70 | "num_points"|1863|9423|38943|142143|476255 71 | "error"|0.008498826595176645|0.0021973546649434184|0.00047907640308864075|9.284483956295553e-05|1.654718871568761e-05 72 | 73 | "name"|"Gauss-Legendre Grid (Standard Combi) lmin=2" 74 | "num_points"|28125|158125 75 | "error"|0.0022844431158155487|0.0005191435668319144 76 | 77 | -------------------------------------------------------------------------------- /ipynb/Results_Regression/example_csv.csv: -------------------------------------------------------------------------------- 1 | 0.25898877433836376,0.7725017140092266,0.7089768296801456,0.016141098117958143 2 | 0.2780458284278754,0.44904561956850964,0.8608952961507853,0.011693543103311442 3 | 0.049523993938629496,0.664326791699044,0.1878115082622014,0.21989154914069864 4 | 0.3678789606350046,0.34713837093733413,0.4287743543680754,0.060932187733489594 5 | 0.5474616839187536,0.13265611323198656,0.27788725148178683,0.0964116256125256 6 | 0.9303590109251649,0.5064944981013243,0.6965338520991526,0.005780021200787493 7 | 0.5315532349575761,0.2750552119400722,0.3776555455768291,0.057914710219274614 8 | 0.1492372826448839,0.05718363172330154,0.05376139219887943,0.565127581247551 9 | 0.10768144113784262,0.7565407516713646,0.2003738656782269,0.16975499341964534 10 | 0.323871148103834,0.47221323936131454,0.9214506334302047,0.008182495763913786 11 | 0.06760246812548432,0.29182554451507536,0.07832827152208122,0.47695002314950863 12 | 0.06427892393103096,0.498948363476288,0.8133390383167998,0.02063302431752126 13 | 0.11092311229889873,0.12449642868309652,0.5114811498014095,0.0914221715791726 14 | 0.2742389300524509,0.2445188855973729,0.689560803150972,0.028689044371602444 15 | 0.5572975695433214,0.9526660376202597,0.463515282992761,0.019814876031046206 16 | 0.21015959897114256,0.3406470298071137,0.5215758412769552,0.05800247523636859 17 | 0.3282750844282062,0.19742970350955547,0.8788928903196919,0.012656916761856086 18 | 0.6731415544579354,0.09333549868072244,0.63501910968475,0.01869133846086067 19 | 0.6949390098104471,0.47910074997957297,0.5042319013491303,0.020529213160830617 20 | 0.34102381982921726,0.506651506942535,0.9581500325629401,0.0065959511728425554 21 | -------------------------------------------------------------------------------- /ipynb/Tutorial_Extrapolation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Extrapolation\n", 8 | "\n", 9 | "This tutorial explains the calling conventions and various variants that can be used for extrapolation." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": { 16 | "pycharm": { 17 | "is_executing": false 18 | } 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "import sparseSpACE\n", 23 | "from sparseSpACE.spatiallyAdaptiveSingleDimension2 import *\n", 24 | "from sparseSpACE.GridOperation import *\n", 25 | "\n", 26 | "# Settings\n", 27 | "dim = 2\n", 28 | "a = np.zeros(dim)\n", 29 | "b = np.ones(dim)\n", 30 | "max_tol = 10 ** (-5)\n", 31 | "max_evaluations = 10 ** 4\n", 32 | "\n", 33 | "coeffs = np.array([np.float64(i) for i in range(1, dim + 1)])\n", 34 | "midpoint = np.ones(dim) * 0.99\n", 35 | "f = GenzGaussian(coeffs, midpoint)\n", 36 | "\n", 37 | "# plot function\n", 38 | "f.plot(np.ones(dim)*a,np.ones(dim)*b)\n", 39 | "reference_solution = f.getAnalyticSolutionIntegral(a, b)\n", 40 | "errorOperator = ErrorCalculatorSingleDimVolumeGuided()" 41 | ] 42 | }, 43 | { 44 | "cell_type": "markdown", 45 | "metadata": {}, 46 | "source": [ 47 | "## Global Romberg Grid\n", 48 | "\n", 49 | "A Romberg grid consists of (multiple) containers that each contain some slices.\n", 50 | "If a container contains only one slice then the slice is extrapolated separately according to the specified option.\n", 51 | "Otherwise the whole container is extrapolated using the method provided as an parameter.\n", 52 | "\n", 53 | "Each Romberg grid has three parameters that determine the type of extrapolation: \n", 54 | "* `slice_grouping`: This option determines how grid sliced should be grouped into larger containers.\n", 55 | " * `UNIT`: Each slice has it's own container.\n", 56 | " * `GROUPED`: Slices are grouped into containers that contain a multiple of 2 slices.\n", 57 | " * `GROUPED_OPTIMIZED`: Slices are grouped into containers that contain a multiple of 2 slices. This method also tries to maximize each containers size.\n", 58 | "* slice_version: This option determines the extrapolation type of unit slices.\n", 59 | " * `ROMBERG_DEFAULT`: sliced Romberg extrapolation.\n", 60 | " * `TRAPEZOID`: default trapezoidal rule without extrapolation.\n", 61 | " * `ROMBERG_DEFAULT_CONST_SUBTRACTION`: sliced Romberg extrapolation with subtraction of extrapolation constants.\n", 62 | "* container_version: This options determines the container type.\n", 63 | " * `ROMBERG_DEFAULT`: executes a default Romberg method inside this container.\n", 64 | " * `LAGRANGE_ROMBERG`: executes a default Romberg method inside this container while missing points are interpolated.\n", 65 | " * `LAGRANGE_FULL_GRID_ROMBERG`: the whole grid is understood as one big container. All missing points up to the maximal level are interpolated. Afterwards a default Romberg method is executed.\n", 66 | " * `SIMPSON_ROMBERG`: Instead of using trapezoidal rules as a base rule, here, the Simpson rule is used.\n", 67 | " " 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "metadata": { 74 | "pycharm": { 75 | "is_executing": false, 76 | "name": "#%%\n" 77 | } 78 | }, 79 | "outputs": [], 80 | "source": [ 81 | "grid = GlobalRombergGrid(a=a, b=b, modified_basis=False, boundary=True,\n", 82 | " slice_grouping=SliceGrouping.UNIT,\n", 83 | " slice_version=SliceVersion.ROMBERG_DEFAULT,\n", 84 | " container_version=SliceContainerVersion.ROMBERG_DEFAULT)\n" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "Afterwards we create the grid operation and spatial adaptive object.\n", 92 | "Another option that can be se is `force_balanced_refinement_tree`. \n", 93 | "If enabled, each the refinement tree of each one-dimensional grid stripe is force to a balanced refinement tree.\n", 94 | "This means that each node either has zero or two children. " 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": { 101 | "pycharm": { 102 | "is_executing": false, 103 | "name": "#%%\n" 104 | } 105 | }, 106 | "outputs": [], 107 | "source": [ 108 | "balanced = False\n", 109 | "\n", 110 | "operation = Integration(f=f, grid=grid, dim=dim, reference_solution=reference_solution)\n", 111 | "adaptiveCombiInstanceSingleDim = SpatiallyAdaptiveSingleDimensions2(a, b, operation=operation, rebalancing=False,\n", 112 | " force_balanced_refinement_tree=balanced)" 113 | ] 114 | }, 115 | { 116 | "cell_type": "markdown", 117 | "metadata": {}, 118 | "source": [ 119 | "Finally we perform the spatially adaptive refinement:" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "metadata": { 126 | "pycharm": { 127 | "is_executing": false, 128 | "name": "#%%\n" 129 | } 130 | }, 131 | "outputs": [], 132 | "source": [ 133 | "adaptiveCombiInstanceSingleDim.performSpatiallyAdaptiv(1, 2, errorOperator,\n", 134 | " max_tol, max_evaluations=max_evaluations,\n", 135 | " do_plot=True)\n", 136 | "\n", 137 | "print(\"Number of points used in refinement:\", adaptiveCombiInstanceSingleDim.get_total_num_points())\n" 138 | ] 139 | } 140 | ], 141 | "metadata": { 142 | "kernelspec": { 143 | "display_name": "Python 3", 144 | "language": "python", 145 | "name": "python3" 146 | }, 147 | "language_info": { 148 | "codemirror_mode": { 149 | "name": "ipython", 150 | "version": 3 151 | }, 152 | "file_extension": ".py", 153 | "mimetype": "text/x-python", 154 | "name": "python", 155 | "nbconvert_exporter": "python", 156 | "pygments_lexer": "ipython3", 157 | "version": "3.6.9" 158 | }, 159 | "pycharm": { 160 | "stem_cell": { 161 | "cell_type": "raw", 162 | "metadata": { 163 | "collapsed": false 164 | }, 165 | "source": [] 166 | } 167 | } 168 | }, 169 | "nbformat": 4, 170 | "nbformat_minor": 1 171 | } 172 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.22.0 2 | scipy==1.4.1 3 | matplotlib==3.1.2 4 | dill==0.2.9 5 | scikit-learn==0.22 6 | sympy 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | from setuptools import setup 3 | 4 | # The directory containing this file 5 | HERE = pathlib.Path(__file__).parent 6 | 7 | # The text of the README file 8 | README = (HERE / "README.md").read_text() 9 | 10 | # This call to setup() does all the work 11 | setup( 12 | name="sparseSpACE", 13 | version="1.1.0", 14 | description="sparseSpACE - the Sparse Grid Spatially Adaptive Combination Environment implements different variants of the spatially adaptive combination technique", 15 | long_description=README, 16 | long_description_content_type="text/markdown", 17 | url="https://github.com/obersteiner/sparseSpACE", 18 | author="Michael Obersteiner", 19 | author_email="michael.obersteiner@mytum.de", 20 | license="LGPL-3.0", 21 | classifiers=[ 22 | "Programming Language :: Python :: 3", 23 | "Programming Language :: Python :: 3.5", 24 | ], 25 | packages=["sparseSpACE"], 26 | include_package_data=True, 27 | install_requires=["numpy", "scipy","matplotlib","dill","scikit-learn","chaospy","sympy"], 28 | ) 29 | 30 | -------------------------------------------------------------------------------- /sparseSpACE/ComponentGridInfo.py: -------------------------------------------------------------------------------- 1 | class ComponentGridInfo(object): 2 | def __init__(self, levelvector, coefficient): 3 | self.levelvector = levelvector 4 | self.coefficient = coefficient -------------------------------------------------------------------------------- /sparseSpACE/DimAdaptiveCombi.py: -------------------------------------------------------------------------------- 1 | from sparseSpACE.StandardCombi import * 2 | from sparseSpACE.combiScheme import * 3 | from sparseSpACE.Grid import * 4 | 5 | 6 | # T his class implements the standard combination technique 7 | class DimAdaptiveCombi(StandardCombi): 8 | # initialization 9 | # a = lower bound of integral; b = upper bound of integral 10 | # grid = specified grid (e.g. Trapezoidal); 11 | def __init__(self, a, b, operation, norm=2, compute_no_cost: bool=False): 12 | self.log = logging.getLogger(__name__) 13 | self.dim = len(a) 14 | self.a = a 15 | self.b = b 16 | self.operation = operation 17 | self.combischeme = CombiScheme(self.dim) 18 | self.grid = self.operation.get_grid() 19 | self.norm = norm 20 | self.compute_no_cost = compute_no_cost 21 | assert (len(a) == len(b)) 22 | 23 | # standard dimension-adaptive combination scheme for quadrature 24 | # lmin = minimum level; lmax = target level 25 | # f = function to integrate; dim=dimension of problem 26 | def perform_combi(self, minv, maxv, tolerance, max_number_of_points: int=None): 27 | start = self.a 28 | end = self.b 29 | self.operation.initialize() 30 | assert maxv == 2 31 | # compute minimum and target level vector 32 | self.lmin = [minv for i in range(self.dim)] 33 | self.lmax = [maxv for i in range(self.dim)] 34 | real_integral = self.operation.get_reference_solution() 35 | assert(real_integral is not None) 36 | self.combischeme.init_adaptive_combi_scheme(maxv, minv) 37 | combiintegral = 0 38 | self.scheme = self.combischeme.getCombiScheme(self.lmin[0], self.lmax[0]) 39 | integral_dict = {} 40 | errors = [] # tracks the error evolution during the refinement procedure 41 | num_points = [] # tracks the number of points during the refinement procedure 42 | while True: 43 | combiintegral = 0 44 | self.scheme = self.combischeme.getCombiScheme(self.lmin[0], self.lmax[0], do_print=False) 45 | error_array = np.zeros(len(self.scheme)) 46 | # calculate integral for function self.operation.f 47 | for i, component_grid in enumerate(self.scheme): 48 | if tuple(component_grid.levelvector) not in integral_dict: 49 | integral = self.operation.grid.integrate(self.operation.f, component_grid.levelvector, start, end) 50 | integral_dict[tuple(component_grid.levelvector)] = integral 51 | else: 52 | integral = integral_dict[tuple(component_grid.levelvector)] 53 | combiintegral += integral * component_grid.coefficient 54 | # calculate errors 55 | for i, component_grid in enumerate(self.scheme): 56 | if self.combischeme.is_refinable(component_grid.levelvector): 57 | # as error estimator we use the error calculation from Hemcker and Griebel 58 | error_array[i] = self.calculate_surplus(component_grid, integral_dict) if self.combischeme.is_refinable(component_grid.levelvector) else 0 59 | #error_array[i] = abs(integral - real_integral) / abs(real_integral) / np.prod( 60 | # self.operation.grid.levelToNumPoints(component_grid.levelvector)) if self.combischeme.is_refinable(component_grid.levelvector) else 0 61 | do_refine = True 62 | max_points_reached = False if max_number_of_points is None else self.get_total_num_points() > max_number_of_points 63 | if max(abs(combiintegral - real_integral) / abs(real_integral)) < tolerance or max_points_reached: 64 | break 65 | print("Current combi integral:", combiintegral) 66 | print("Current relative error:", max(abs(combiintegral - real_integral) / abs(real_integral))) 67 | errors.append(max(abs(combiintegral - real_integral) / abs(real_integral))) 68 | num_points.append(self.get_total_num_points(distinct_function_evals=True)) 69 | while do_refine: 70 | grid_id = np.argmax(error_array) 71 | # print(error_array) 72 | print("Current error:", abs(combiintegral - real_integral) / abs(real_integral)) 73 | print("Refining", self.scheme[grid_id].levelvector) 74 | refined_dims = self.combischeme.update_adaptive_combi(self.scheme[grid_id].levelvector) 75 | do_refine = refined_dims == [] 76 | error_array[grid_id] = 0.0 77 | self.scheme = self.combischeme.getCombiScheme(self.lmin[0], self.lmax[0], do_print=True) 78 | for component_grid in self.scheme: 79 | for d in range(self.dim): 80 | self.lmax[d] = max(self.lmax[d], component_grid.levelvector[d]) 81 | print("Final scheme:") 82 | self.scheme = self.combischeme.getCombiScheme(self.lmin[0], self.lmax[0], do_print=True) 83 | print("CombiSolution", combiintegral) 84 | print("Analytic Solution", real_integral) 85 | print("Difference", abs(combiintegral - real_integral)) 86 | return self.scheme, abs(combiintegral - real_integral), combiintegral, errors, num_points 87 | 88 | def calculate_surplus(self, component_grid, integral_dict): 89 | assert self.combischeme.is_refinable(component_grid.levelvector) 90 | stencils = [] 91 | cost = 1 92 | for d in range(self.dim): 93 | if component_grid.levelvector[d] > self.lmin[d]: 94 | stencils.append([-1,0]) 95 | else: 96 | stencils.append([0]) 97 | cost *= 2**component_grid.levelvector[d] - 1 + 2 * int(self.grid.boundary) 98 | if self.compute_no_cost: 99 | cost = 1 100 | stencil_cross_product = get_cross_product(stencils) 101 | surplus = 0.0 102 | for stencil in stencil_cross_product: 103 | levelvector = np.array(component_grid.levelvector) + np.array(stencil) 104 | print(levelvector) 105 | integral = integral_dict[tuple(levelvector)] 106 | surplus += (-1)**sum(abs(np.array(stencil))) * integral 107 | error = LA.norm(surplus/cost,self.norm) 108 | print(error) 109 | return error 110 | -------------------------------------------------------------------------------- /sparseSpACE/ErrorCalculator.py: -------------------------------------------------------------------------------- 1 | import scipy.integrate 2 | import numpy as np 3 | import abc 4 | import logging 5 | from numpy import linalg as LA 6 | from math import copysign 7 | 8 | from sparseSpACE.Utils import LogUtility, print_levels, log_levels 9 | 10 | 11 | # This class is the general interface of an error estimator currently used by the algorithm 12 | class ErrorCalculator(object): 13 | # initialization 14 | def __init__(self, log_level: int = log_levels.WARNING, print_level: int = print_levels.NONE): 15 | self.log = logging.getLogger(__name__) 16 | self.is_global = False 17 | self.log_util = LogUtility(log_level=log_level, print_level=print_level) 18 | self.log_util.set_print_prefix('ErrorCalculator') 19 | self.log_util.set_log_prefix('ErrorCalculator') 20 | 21 | # calculates error for the function f and the integral information that was computed by the algorithm 22 | # this information contains the area specification and the approximated integral 23 | # current form is (approxIntegral,start,end) 24 | @abc.abstractmethod 25 | def calc_error(self, refine_object, norm): 26 | return 27 | 28 | 29 | # This error estimator does a surplus estimation. It outputs the absolute error. 30 | class ErrorCalculatorSurplusCell(ErrorCalculator): 31 | def calc_error(self, refine_object, norm, volume_weights=None): 32 | error = LA.norm(self.calc_area_error(refine_object.sub_integrals), norm) 33 | return error 34 | 35 | def calc_area_error(self, sub_integrals): 36 | error = 0.0 37 | for sub_integral in sub_integrals: 38 | error += sub_integral[0] * sub_integral[1] 39 | return abs(error) 40 | 41 | 42 | class ErrorCalculatorSurplusCellPunishDepth(ErrorCalculatorSurplusCell): 43 | def calc_error(self, refine_object, norm, volume_weights=None): 44 | lower_bounds = np.array(refine_object.start) 45 | upper_bounds = np.array(refine_object.end) 46 | error = LA.norm(self.calc_area_error(refine_object.sub_integrals), norm) 47 | return max(error * np.prod(upper_bounds - lower_bounds)) 48 | 49 | 50 | class ErrorCalculatorExtendSplit(ErrorCalculator): 51 | def calc_error(self, refine_object, norm, volume_weights=None): 52 | if refine_object.switch_to_parent_estimation: 53 | return LA.norm(abs(refine_object.sum_siblings - refine_object.parent_info.previous_value), norm) / ( 54 | len(refine_object.value) ** (1 / norm)) 55 | else: 56 | return LA.norm(abs(refine_object.value - refine_object.parent_info.previous_value), norm) / ( 57 | len(refine_object.value) ** (1 / norm)) 58 | 59 | 60 | class ErrorCalculatorSingleDimVolumeGuided(ErrorCalculator): 61 | def calc_error(self, refine_object, norm, volume_weights=None): 62 | # pagoda-volume 63 | volumes = refine_object.volume 64 | if volume_weights is None: 65 | return LA.norm(abs(volumes), norm) / (len(volumes) ** (1 / norm)) 66 | # Normalized volumes 67 | return LA.norm(abs(volumes * volume_weights), norm) 68 | 69 | 70 | class ErrorCalculatorSingleDimVolumeGuidedPunishedDepth(ErrorCalculator): 71 | def calc_error(self, refineObj, norm): 72 | # width of refineObj: 73 | width = refineObj.end - refineObj.start 74 | # pagoda-volume 75 | volume = LA.norm(refineObj.volume * (width), norm) / (len(refineObj.volume) ** (1 / norm)) 76 | return abs(volume) 77 | 78 | 79 | class ErrorCalculatorSingleDimMisclassification(ErrorCalculator): 80 | def calc_error(self, refine_object, norm, volume_weights=None): 81 | volumes = refine_object.volume 82 | if volume_weights is None: 83 | # return LA.norm(abs(volumes), norm) 84 | return abs(volumes) 85 | # Normalized volumes 86 | # return LA.norm(abs(volumes * volume_weights), norm) 87 | return abs(volumes * volume_weights) 88 | 89 | 90 | class ErrorCalculatorSingleDimMisclassificationGlobal(ErrorCalculator): 91 | def __init__(self): 92 | super().__init__() 93 | self.is_global = True 94 | 95 | def calc_error(self, refine_object, norm, volume_weights=None): 96 | volumes = refine_object.volume 97 | if volume_weights is None: 98 | # return LA.norm(abs(volumes), norm) 99 | return abs(volumes) 100 | # Normalized volumes 101 | # return LA.norm(abs(volumes * volume_weights), norm) 102 | return abs(volumes * volume_weights) 103 | 104 | def calc_global_error(self, data, grid_scheme): 105 | samples = data 106 | f = lambda x: grid_scheme(x) 107 | values = f(samples) 108 | for d in range(0, grid_scheme.dim): 109 | refinement_dim = grid_scheme.refinement.get_refinement_container_for_dim(d) 110 | for refinement_obj in refinement_dim.refinementObjects: 111 | # get the misclassification rate between start and end of refinement_obj 112 | hits = sum((1 for i in range(0, len(values)) 113 | if refinement_obj.start <= samples[i][d] <= refinement_obj.end 114 | and copysign(1.0, 115 | values[i][0] == copysign(1.0, grid_scheme.operation.validation_classes[i])))) 116 | 117 | misses = sum((1 for i in range(0, len(values)) 118 | if refinement_obj.start <= samples[i][d] <= refinement_obj.end 119 | and copysign(1.0, values[i][0]) != copysign(1.0, 120 | grid_scheme.operation.validation_classes[i]))) 121 | 122 | if hits + misses > 0: 123 | refinement_obj.add_volume( 124 | np.array(misses * (refinement_obj.end - refinement_obj.start))) 125 | else: 126 | # no data points were in this area 127 | refinement_obj.add_volume(np.array(0.0)) 128 | -------------------------------------------------------------------------------- /sparseSpACE/Hierarchization.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sparseSpACE.Utils import * 3 | from sparseSpACE import Grid 4 | from typing import Tuple, Sequence, Callable 5 | from sparseSpACE.Function import * 6 | from scipy.linalg import solve_triangular 7 | 8 | class HierarchizationLSG(object): 9 | def __init__(self, grid): 10 | self.grid = grid 11 | 12 | def __call__(self, grid_values: Sequence[Sequence[float]], numPoints: Sequence[int], grid: Grid) -> Sequence[Sequence[float]]: 13 | self.grid = grid 14 | self.dim = len(numPoints) 15 | for d in range(self.dim): 16 | assert numPoints[d] == len(self.grid.get_coordinates_dim(d)) 17 | for d in range(self.dim): 18 | grid_values = self.hierarchize_poles_for_dim(grid_values, numPoints, d) 19 | return grid_values 20 | 21 | # this function applies a one dimensional hierarchization (in dimension d) to the array grid_values with 22 | # numPoints (array) many points for each dimension 23 | def hierarchize_poles_for_dim(self, grid_values: Sequence[Sequence[float]], numPoints: Sequence[int], d: int) -> Sequence[Sequence[float]]: 24 | if numPoints[d] == 1: 25 | assert math.isclose(self.grid.get_basis(d, 0)(self.grid.get_coordinates_dim(d)[0]), 1.0) 26 | return grid_values 27 | self.dim = len(numPoints) 28 | offsets = np.array([int(np.prod(numPoints[d+1:])) for d in range(self.dim)]) 29 | numPoints_slice = np.array(numPoints) 30 | numPoints_slice[d] = 1 31 | value_length = np.shape(grid_values)[0] 32 | # create all indeces in d-1 dimensional slice 33 | point_indeces = get_cross_product_range(numPoints_slice) 34 | # in the first dimension we need to fill it with the actual function values 35 | 36 | # create and fill matrix for linear system of equations 37 | # evaluate all basis functions at all grid points 38 | matrix = np.empty((numPoints[d], numPoints[d])) 39 | for i in range(numPoints[d]): 40 | for j in range(numPoints[d]): 41 | matrix[i, j] = self.grid.get_basis(d, j)(self.grid.get_coordinates_dim(d)[i]) 42 | if numPoints[d] >= 15: 43 | Q, R = np.linalg.qr(matrix) 44 | #M_inv = np.linalg.inv(matrix) 45 | 46 | pole_coordinates_base = np.empty(numPoints[d], dtype=int) 47 | for i in range(numPoints[d]): 48 | pole_index = np.zeros(self.dim, dtype=int) 49 | pole_index[d] = i 50 | pole_coordinates_base[i] = self.get_1D_coordinate(pole_index, offsets) 51 | 52 | # iterate over all indeces in slice (0 at dimension d) 53 | for point_index in point_indeces: 54 | # create array of function values through pole 55 | pole_values = np.zeros((value_length, numPoints[d])) 56 | #print(pole_coordinates_base) 57 | pole_coordinates = pole_coordinates_base + int(self.get_1D_coordinate(np.asarray(point_index), offsets)) 58 | #print(pole_coordinates) 59 | # fill pole_values with function or surplus values of pole through current index 60 | for i in range(numPoints[d]): 61 | # use previous surplusses for every consecutive dimension (unidirectional principle) 62 | pole_values[:, i] = grid_values[:, pole_coordinates[i]] 63 | 64 | # solve system of linear equations for all components of our function values (typically scalar -> output.length = 1) 65 | # if the function outputs vectors then we have to iterate over all components individually 66 | # toDo replace by LU factorization to save time 67 | #(matrix, self.grid.get_coordinates_dim(d), d) 68 | for n in range(value_length): 69 | #hierarchized_values = np.linalg.solve(matrix, pole_values[n,:]) 70 | #print(hierarchized_values) 71 | if numPoints[d] >= 15: 72 | hierarchized_values = solve_triangular(R, np.inner(Q.T, pole_values[n, :]), check_finite=False) 73 | else: 74 | hierarchized_values = np.linalg.solve(matrix, pole_values[n, :]) 75 | #hierarchized_values = (np.inner(M_inv, pole_values[n,:])) 76 | #print(hierarchized_values - hierarchized_values2) 77 | #if (np.sum(hierarchized_values - hierarchized_values2) > 10**-7): 78 | # print(hierarchized_values, hierarchized_values2) 79 | for i in range(numPoints[d]): 80 | #pole_index = point_index[:d] + (i,) + point_index[d+1:] 81 | grid_values[n,pole_coordinates[i]] = hierarchized_values[i] 82 | return grid_values 83 | 84 | # this function maps the d-dimensional index to a one-dimensional array index 85 | def get_1D_coordinate(self, index_vector: Sequence[int], offsets: Sequence[int]) -> int: 86 | index = np.sum(index_vector*offsets) 87 | return index 88 | -------------------------------------------------------------------------------- /sparseSpACE/MonteCarlo.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import random 3 | 4 | #this method defines a basic monte carlo integration by evaluation random points 5 | def montecarlo(f,N,dim, a, b): 6 | position = np.zeros(dim) 7 | result = 0 8 | for n in range(N): 9 | for d in range(dim): 10 | position[d] = random.random()*(b[d] - a[d]) + a[d] 11 | result +=f.eval(position) 12 | return result/N 13 | -------------------------------------------------------------------------------- /sparseSpACE/__init__.py: -------------------------------------------------------------------------------- 1 | # __init__.py 2 | 3 | # Version of the realpython-reader package 4 | __version__ = "0.1.0" 5 | 6 | -------------------------------------------------------------------------------- /test/run_tests.sh: -------------------------------------------------------------------------------- 1 | python3 test_BasisFunctions.py 2 | python3 test_combiScheme.py 3 | python3 test_Hierarchization.py 4 | python3 test_Integration_UQ.py 5 | python3 test_Integrator.py 6 | python3 test_RefinementContainer.py 7 | python3 test_RefinementObject.py 8 | python3 test_spatiallyAdaptiveExtendSplit.py 9 | python3 test_spatiallyAdaptiveSingleDimension2.py 10 | python3 test_StandardCombi.py 11 | python3 test_UncertaintyQuantification.py 12 | python3 test_Utils.py 13 | python3 test_Regression.py 14 | 15 | -------------------------------------------------------------------------------- /test/test_BalancedExtrapolationGrid.py: -------------------------------------------------------------------------------- 1 | import math 2 | import unittest 3 | import sparseSpACE 4 | from sparseSpACE.Extrapolation import BalancedExtrapolationGrid 5 | 6 | 7 | class TestBalancedExtrapolationGrid(unittest.TestCase): 8 | def setUp(self): 9 | self.functions1d = [ 10 | lambda x: 3 * x ** 5 + 4 * x + x ** 3 - 2, 11 | lambda x: math.exp(x ** 2) * x + 4 * x 12 | ] 13 | 14 | def test_refinement_tree_initialization(self): 15 | grid = [0.0, 0.125, 0.25, 0.375, 0.5, 0.75, 1] 16 | grid_levels = [0, 3, 2, 3, 1, 2, 0] 17 | 18 | extrapolation_grid = BalancedExtrapolationGrid() 19 | extrapolation_grid.set_grid(grid, grid_levels) 20 | 21 | self.assertEqual(grid, extrapolation_grid.get_grid()) 22 | 23 | def test_weights_on_full_grid(self): 24 | grid = [0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] 25 | grid_levels = [0, 3, 2, 3, 1, 3, 2, 3, 0] 26 | 27 | extrapolation_grid = BalancedExtrapolationGrid() 28 | extrapolation_grid.set_grid(grid, grid_levels) 29 | weights = extrapolation_grid.get_weights() 30 | 31 | # Compute integral value 32 | f = lambda x: 2 * x ** 3 + 1 33 | value = 0 34 | 35 | for i, point in enumerate(grid): 36 | value += weights[i] * f(point) 37 | 38 | self.assertAlmostEqual(1.5, value) 39 | 40 | # def test_weights_on_adaptive_balanced_grid(self): 41 | # grid = [0.0, 0.125, 0.25, 0.375, 0.5, 0.75, 1] 42 | # grid_levels = [0, 3, 2, 3, 1, 2, 0] 43 | # 44 | # extrapolation_grid = BalancedExtrapolationGrid() 45 | # extrapolation_grid.set_grid(grid, grid_levels) 46 | # weights = extrapolation_grid.get_weights() 47 | # 48 | # # Compute integral value 49 | # f = lambda x: 2 * x ** 3 + 1 50 | # value = 0 51 | # 52 | # for i, point in enumerate(grid): 53 | # value += weights[i] * f(point) 54 | # 55 | # self.assertAlmostEqual(1.5, value) 56 | # 57 | # def test_weights_on_adaptive_grid(self): 58 | # grid = [0, 0.5, 0.625, 0.75, 1] 59 | # grid_levels = [0, 1, 3, 2, 0] 60 | # 61 | # extrapolation_grid = BalancedExtrapolationGrid() 62 | # extrapolation_grid.set_grid(grid, grid_levels) 63 | # weights = extrapolation_grid.get_weights() 64 | # 65 | # # Compute integral value 66 | # f = lambda x: 2 * x ** 3 + 1 67 | # value = 0 68 | # 69 | # for i, point in enumerate(grid): 70 | # value += weights[i] * f(point) 71 | # 72 | # self.assertAlmostEqual(1.5, value) 73 | 74 | 75 | if __name__ == '__main__': 76 | unittest.main() 77 | -------------------------------------------------------------------------------- /test/test_BasisFunctions.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sparseSpACE 3 | from sparseSpACE.BasisFunctions import * 4 | 5 | class TestBasisFunctions(unittest.TestCase): 6 | def test_lagrange_basis(self): 7 | for n in range(10): 8 | for i in range(n): 9 | points = np.linspace(0, 1 , n) 10 | basis = LagrangeBasis(p=n - 1, index=i, knots=points) 11 | for j in range(n): 12 | # check basic property of lagrange polynomials p_i(x_j) = \delta(i,j) 13 | if i == j: 14 | self.assertAlmostEqual(basis(points[j]), 1.0) 15 | else: 16 | self.assertEqual(basis(points[j]), 0.0) 17 | 18 | def test_lagrange_basis_restricted(self): 19 | for n in range(10): 20 | for i in range(n): 21 | points = np.linspace(0, 1 , n) 22 | basis = LagrangeBasisRestricted(p=n - 1, index=i, knots=points) 23 | for j in range(n): 24 | # check basic property of lagrange polynomials p_i(x_j) = \delta(i,j) 25 | if i == j: 26 | self.assertAlmostEqual(basis(points[j]), 1.0) 27 | else: 28 | self.assertEqual(basis(points[j]), 0.0) 29 | # restricted polynomials p_i are 0 when x < x_{i-1} or x > x_{i+1} 30 | points2 = np.linspace(0, 1 , 10 * n) 31 | for j in range(len(points2)): 32 | if points2[j] < points[max(i - 1, 0)] or points2[j] > points[min(i+1, len(points) - 1)]: 33 | self.assertEqual(basis(points2[j]), 0.0) 34 | 35 | def test_lagrange_basis_restricted_modified(self): 36 | for n in range(1, 10): 37 | for i in range(1, n+1): 38 | points = np.linspace(0, 1 , n + 2) 39 | basis = LagrangeBasisRestrictedModified(p=n + 1, index=i, knots=points, a=0, b=1, level=n) 40 | if i == 1: # test extrapolation to left 41 | self.assertTrue(basis(points[0]) > 0) 42 | if i == n: # test extrapolation to right 43 | self.assertTrue(basis(points[n+1]) > 0) 44 | for j in range(1, n+1): 45 | # check basic property of lagrange polynomials p_i(x_j) = \delta(i,j) 46 | if i == j: 47 | self.assertAlmostEqual(basis(points[j]), 1.0) 48 | else: 49 | self.assertEqual(basis(points[j]), 0.0) 50 | # restricted polynomials p_i are 0 when x < x_{i-1} or x > x_{i+1} 51 | points2 = np.linspace(0, 1 , 10 * n) 52 | for j in range(len(points2)): 53 | if points2[j] < points[max(i - 1, 0)] or points2[j] > points[min(i+1, len(points) - 1)]: 54 | self.assertEqual(basis(points2[j]), 0.0) 55 | 56 | 57 | if __name__ == '__main__': 58 | unittest.main() -------------------------------------------------------------------------------- /test/test_BinaryTreeGrid.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sparseSpACE 3 | 4 | from sparseSpACE.Extrapolation import GridBinaryTree 5 | 6 | 7 | class TestBinaryTreeGrid(unittest.TestCase): 8 | def setUp(self) -> None: 9 | self.test_grids = [ 10 | ([0.0, 0.25, 0.5, 1.0], [0.0, 0.25, 0.5, 0.75, 1.0]), 11 | ([0.0, 0.125, 0.25, 0.375, 0.5, 1.0], [0.0, 0.125, 0.25, 0.375, 0.5, 0.75, 1.0]), 12 | ([0.0, 0.25, 0.375, 0.5, 1.0], [0.0, 0.125, 0.25, 0.375, 0.5, 0.75, 1.0]), 13 | ([0.0, 0.125, 0.25, 0.375, 0.4375, 0.5, 1.0], [0.0, 0.125, 0.25, 0.3125, 0.375, 0.4375, 0.5, 0.75, 1.0]) 14 | ] 15 | 16 | self.test_grid_levels = [ 17 | ([0, 2, 1, 0], [0, 2, 1, 2, 0]), 18 | ([0, 3, 2, 3, 1, 0], [0, 3, 2, 3, 1, 2, 0]), 19 | ([0, 2, 3, 1, 0], [0, 3, 2, 3, 1, 2, 0]), 20 | ([0, 3, 2, 3, 4, 1, 0], [0, 3, 2, 4, 3, 4, 1, 2, 0]) 21 | ] 22 | 23 | self.grid_binary_tree = GridBinaryTree() 24 | 25 | # This method is testing only grids that have not been rebalanced 26 | def test_tree_init(self): 27 | for (i, (grid, full_binary_tree_grid)) in enumerate(self.test_grids): 28 | grid_levels, full_grid_levels = self.test_grid_levels[i] 29 | self.grid_binary_tree.init_tree(grid, grid_levels) 30 | 31 | self.assertEqual(grid, self.grid_binary_tree.get_grid()) 32 | self.assertEqual(grid_levels, self.grid_binary_tree.get_grid_levels()) 33 | 34 | def test_full_tree_expansion(self): 35 | for (i, (grid, full_binary_tree_grid)) in enumerate(self.test_grids): 36 | grid_levels, full_grid_levels = self.test_grid_levels[i] 37 | self.grid_binary_tree.init_tree(grid, grid_levels) 38 | self.grid_binary_tree.force_full_tree_invariant() 39 | 40 | self.assertEqual(full_binary_tree_grid, self.grid_binary_tree.get_grid()) 41 | self.assertEqual(full_grid_levels, self.grid_binary_tree.get_grid_levels()) 42 | 43 | def test_tree_init_max_level(self): 44 | a = 0 45 | b = 1 46 | max_level = 3 47 | 48 | self.grid_binary_tree.init_perfect_tree_with_max_level(a, b, max_level) 49 | 50 | expected_grid = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] 51 | expected_grid_levels = [0, 3, 2, 3, 1, 3, 2, 3, 0] 52 | 53 | self.assertEqual(expected_grid, self.grid_binary_tree.get_grid()) 54 | self.assertEqual(expected_grid_levels, self.grid_binary_tree.get_grid_levels()) 55 | 56 | def test_increment_level_in_each_subtree(self): 57 | grid = [0, 0.5, 0.75, 1] 58 | grid_levels = [0, 1, 2, 0] 59 | 60 | self.grid_binary_tree.init_tree(grid, grid_levels) 61 | self.grid_binary_tree.increment_level_in_each_subtree() 62 | 63 | expected_grid = [0, 0.5, 0.625, 0.75, 0.875, 1] 64 | expected_grid_levels = [0, 1, 3, 2, 3, 0] 65 | 66 | self.assertEqual(expected_grid, self.grid_binary_tree.get_grid()) 67 | self.assertEqual(expected_grid_levels, self.grid_binary_tree.get_grid_levels()) 68 | 69 | 70 | if __name__ == '__main__': 71 | unittest.main() 72 | -------------------------------------------------------------------------------- /test/test_ExtrapolationSimpsonGrid.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import sparseSpACE 4 | from sparseSpACE.Extrapolation import SliceGrouping, ExtrapolationGrid, SliceVersion, SliceContainerVersion 5 | from sparseSpACE.Function import Polynomial1d 6 | 7 | # ----------------------------------------------------------------------------------------------------------------- 8 | # --- Simpson Romberg Grid 9 | 10 | class TestExtrapolationGrid(unittest.TestCase): 11 | # ----------------------------------------------------------------------------------------------------------------- 12 | # --- Exactness 13 | 14 | def test_exactness_on_full_grid(self): 15 | grid = [1, 1.5, 2, 2.5, 3] 16 | grid_levels = [0, 2, 1, 2, 0] 17 | 18 | function = Polynomial1d([1, 0, 0, 2]) # Polynomial1d of degree 3 19 | 20 | for slice_grouping in SliceGrouping: 21 | romberg_grid = ExtrapolationGrid(slice_grouping=slice_grouping, 22 | slice_version=SliceVersion.ROMBERG_DEFAULT, 23 | container_version=SliceContainerVersion.SIMPSON_ROMBERG) 24 | romberg_grid.set_grid(grid, grid_levels) 25 | romberg_grid.integrate(function) 26 | 27 | self.assertAlmostEqual(0, romberg_grid.get_absolute_error(), 1) 28 | 29 | 30 | # ----------------------------------------------------------------------------------------------------------------- 31 | # --- Unit Test 32 | 33 | if __name__ == '__main__': 34 | unittest.main() 35 | -------------------------------------------------------------------------------- /test/test_Hierarchization.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sparseSpACE 3 | from sparseSpACE.StandardCombi import * 4 | import math 5 | from sparseSpACE.Hierarchization import * 6 | from sparseSpACE.Grid import * 7 | from sparseSpACE.Function import * 8 | 9 | 10 | class TestHierarchization(unittest.TestCase): 11 | 12 | 13 | def test_interpolation(self): 14 | a = -3 15 | b = 6 16 | for d in range(2, 5): 17 | grid = GlobalLagrangeGrid(a*np.ones(d), b*np.ones(d), boundary= True, modified_basis = False, p = 1) 18 | for l in range(7 - d): 19 | f = FunctionLinear([10*(i+1) for i in range(d)]) 20 | grid_points = [np.linspace(a,b,2**(l+i)+ 1) for i in range(d)] 21 | grid_levels = [np.zeros(2**(l+i) + 1, dtype=int) for i in range(d)] 22 | for i in range(d): 23 | for l2 in range(1,l+i+1): 24 | offset = 2**(l+i - l2) 25 | for j in range(offset, len(grid_levels[i]), 2*offset): 26 | grid_levels[i][j] = l2 27 | grid.set_grid(grid_points, grid_levels) 28 | grid.integrate(f, [l + i for i in range(d)], a * np.ones(d), b * np.ones(d)) 29 | component_grid = ComponentGridInfo([l+i for i in range(d)], 1) 30 | grid_points = get_cross_product_list(grid_points) 31 | f_values = grid.interpolate(grid_points, component_grid) 32 | for i, p in enumerate(grid_points): 33 | factor = abs(f(p)[0] if f(p)[0] != 0 else 1) 34 | self.assertAlmostEqual((f(p)[0] - f_values[i][0]) / factor, 0, 11) 35 | 36 | grid = GlobalBSplineGrid(a * np.ones(d), b * np.ones(d), boundary=True, modified_basis=False, p=1) 37 | for l in range(7 - d): 38 | f = FunctionLinear([10 * (i + 1) for i in range(d)]) 39 | grid_points = [list(np.linspace(a, b, 2 ** (l + i) + 1)) for i in range(d)] 40 | grid_levels = [np.zeros(2 ** (l + i) + 1, dtype=int) for i in range(d)] 41 | for i in range(d): 42 | for l2 in range(1, l + i + 1): 43 | offset = 2 ** (l + i - l2) 44 | for j in range(offset, len(grid_levels[i]), 2 * offset): 45 | grid_levels[i][j] = l2 46 | grid.set_grid(grid_points, grid_levels) 47 | grid.integrate(f, [l + i for i in range(d)], a * np.ones(d), b * np.ones(d)) 48 | component_grid = ComponentGridInfo([l + i for i in range(d)], 1) 49 | grid_points = get_cross_product_list(grid_points) 50 | f_values = grid.interpolate(grid_points, component_grid) 51 | for i, p in enumerate(grid_points): 52 | factor = abs(f(p)[0] if f(p)[0] != 0 else 1) 53 | self.assertAlmostEqual((f(p)[0] - f_values[i][0]) / factor, 0, 11) 54 | 55 | if __name__ == '__main__': 56 | unittest.main() -------------------------------------------------------------------------------- /test/test_Integration_UQ.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | import chaospy as cp 4 | 5 | import sparseSpACE 6 | from sparseSpACE.Function import * 7 | from sparseSpACE.spatiallyAdaptiveSingleDimension2 import * 8 | from sparseSpACE.ErrorCalculator import * 9 | from sparseSpACE.GridOperation import * 10 | 11 | 12 | class TestIntegrationUQ(unittest.TestCase): 13 | def test_normal_integration(self): 14 | #print("Calculating an expectation with an Integration Operation") 15 | d = 2 16 | bigvalue = 7.0 17 | a = np.array([-bigvalue, -bigvalue]) 18 | b = np.array([bigvalue, bigvalue]) 19 | 20 | distr = [] 21 | for _ in range(d): 22 | distr.append(cp.Normal(0,2)) 23 | distr_joint = cp.J(*distr) 24 | f = FunctionMultilinear([2.0, 0.0]) 25 | fw = FunctionCustom(lambda coords: f(coords)[0] 26 | * float(distr_joint.pdf(coords))) 27 | 28 | grid = GlobalBSplineGrid(a, b) 29 | op = Integration(fw, grid=grid, dim=d) 30 | 31 | error_operator = ErrorCalculatorSingleDimVolumeGuided() 32 | combiinstance = SpatiallyAdaptiveSingleDimensions2(a, b, operation=op) 33 | #print("performSpatiallyAdaptiv…") 34 | v = combiinstance.performSpatiallyAdaptiv(1, 2, error_operator, tol=10**-3, 35 | max_evaluations=40, min_evaluations=25, do_plot=False, print_output=False) 36 | integral = v[3][0] 37 | #print("expectation", integral) 38 | 39 | 40 | if __name__ == '__main__': 41 | unittest.main() 42 | -------------------------------------------------------------------------------- /test/test_Integrator.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sparseSpACE 3 | from sparseSpACE.StandardCombi import * 4 | import math 5 | from sparseSpACE.Grid import * 6 | from sparseSpACE.Integrator import * 7 | from sparseSpACE.Function import * 8 | 9 | 10 | class TestIntegrator(unittest.TestCase): 11 | 12 | def test_integrate_non_basis_functions(self): 13 | a = -3 14 | b = 6 15 | for d in range(2, 5): 16 | grid = GlobalTrapezoidalGrid(a*np.ones(d), b*np.ones(d), boundary= True, modified_basis = False) 17 | for integrator in [IntegratorArbitraryGrid(grid), IntegratorArbitraryGridScalarProduct(grid)]: 18 | grid.integrator = integrator 19 | for l in range(7 - d): 20 | f = FunctionLinear([10*(i+1) for i in range(d)]) 21 | grid_points = [np.linspace(a,b,2**(l+i)+ 1) for i in range(d)] 22 | grid_levels = [np.zeros(2**(l+i) + 1, dtype=int) for i in range(d)] 23 | for i in range(d): 24 | for l2 in range(1,l+i+1): 25 | offset = 2**(l+i - l2) 26 | for j in range(offset, len(grid_levels[i]), 2*offset): 27 | grid_levels[i][j] = l2 28 | grid.set_grid(grid_points, grid_levels) 29 | integral = grid.integrate(f, [l + i for i in range(d)], a * np.ones(d), b * np.ones(d)) 30 | #assert(False) 31 | #print(integral, f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d)), f.eval(np.ones(d))) 32 | self.assertAlmostEqual((integral[0] - f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d))) / abs(f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d))), 0.0, places=13) 33 | 34 | def test_integrate_basis_functions(self): 35 | a = -3 36 | b = 6 37 | for d in range(1, 5): 38 | for p in range(1,7): 39 | grid = GlobalLagrangeGrid(a*np.ones(d), b*np.ones(d), boundary= True, modified_basis = False, p=p) 40 | for l in range(p - 1, 8 - d): 41 | f = FunctionPolynomial([10*(i+1) for i in range(d)], degree=p) 42 | grid_points = [np.linspace(a,b,2**l+ 1) for _ in range(d)] 43 | grid_levels = [np.zeros(2**l + 1, dtype=int) for _ in range(d)] 44 | for i in range(d): 45 | for l2 in range(1,l+1): 46 | offset = 2**(l - l2) 47 | for j in range(offset, len(grid_levels[i]), 2*offset): 48 | grid_levels[i][j] = l2 49 | grid.set_grid(grid_points, grid_levels) 50 | integral = grid.integrate(f, [l for i in range(d)], a * np.ones(d), b * np.ones(d)) 51 | #print(integral, f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d)), (integral[0] - f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d))) / abs(f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d))), p) 52 | self.assertAlmostEqual((integral[0] - f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d))) / abs(f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d))), 0.0, places=12) 53 | 54 | for p in range(1, 7, 2): 55 | grid = GlobalBSplineGrid(a*np.ones(d), b*np.ones(d), boundary= True, modified_basis = False, p=p) 56 | for l in range(int(log2(p)) + 1, 8 - d): 57 | f = FunctionPolynomial([10*(i+1) for i in range(d)], degree=p) 58 | grid_points = [list(np.linspace(a,b,2**l + 1)) for _ in range(d)] 59 | grid_levels = [np.zeros(2**l + 1, dtype=int) for _ in range(d)] 60 | for i in range(d): 61 | for l2 in range(1,l+1): 62 | offset = 2**(l - l2) 63 | for j in range(offset, len(grid_levels[i]), 2*offset): 64 | grid_levels[i][j] = l2 65 | grid.set_grid(grid_points, grid_levels) 66 | integral = grid.integrate(f, [l for i in range(d)], a * np.ones(d), b * np.ones(d)) 67 | #print(integral, f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d)), p) 68 | #print(integral, f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d)), (integral[0] - f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d))) / abs(f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d))), p) 69 | # Here exactness is not guaranteed but it should be close 70 | self.assertAlmostEqual((integral[0] - f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d))) / abs(f.getAnalyticSolutionIntegral(a*np.ones(d), b*np.ones(d))), 0.0, places=11) 71 | 72 | if __name__ == '__main__': 73 | unittest.main() -------------------------------------------------------------------------------- /test/test_RefinementObject.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sparseSpACE 3 | 4 | from sparseSpACE.RefinementObject import * 5 | from sparseSpACE.combiScheme import * 6 | 7 | class TestRefinementObject(unittest.TestCase): 8 | 9 | 10 | def test_extend_split_object_is_calculated(self): 11 | a = -3 12 | b = 6 13 | for d in range(2, 5): 14 | grid = TrapezoidalGrid(np.ones(d)*a, np.ones(d)*b, d) 15 | combi_scheme = CombiScheme(dim=d) 16 | refinment_object = RefinementObjectExtendSplit(a * np.ones(d), b * np.ones(d), grid) 17 | for l in range(1, 10 - d): 18 | combi_scheme.init_adaptive_combi_scheme(lmin=1, lmax=l) 19 | combi_grids = combi_scheme.getCombiScheme(lmin=1, lmax=l, do_print=False) 20 | 21 | for component_grid in combi_grids: 22 | if l == 1: 23 | self.assertTrue(not refinment_object.is_already_calculated(tuple(component_grid.levelvector), tuple(component_grid.levelvector))) 24 | else: 25 | self.assertEqual(combi_scheme.has_forward_neighbour(component_grid.levelvector), refinment_object.is_already_calculated(tuple(component_grid.levelvector), tuple(component_grid.levelvector + np.ones(d, dtype=int)))) 26 | refinment_object.add_level(tuple(component_grid.levelvector), tuple(component_grid.levelvector)) 27 | 28 | def test_extend_split_object_coarsening_update(self): 29 | a = -3 30 | b = 6 31 | for d in range(2, 5): 32 | grid = TrapezoidalGrid(np.ones(d)*a, np.ones(d)*b, d) 33 | refinment_object = RefinementObjectExtendSplit(a * np.ones(d), b * np.ones(d), grid, splitSingleDim=False) 34 | refinment_object.update(5) 35 | self.assertEqual(refinment_object.coarseningValue, 5) 36 | refinment_object.update(5) 37 | self.assertEqual(refinment_object.coarseningValue, 10) 38 | refinment_object = RefinementObjectExtendSplit(a * np.ones(d), b * np.ones(d), grid, coarseningValue=3, splitSingleDim=False) 39 | refinment_object.update(3) 40 | self.assertEqual(refinment_object.coarseningValue, 6) 41 | refinment_objects, _, _ = refinment_object.refine() 42 | for ref_obj in refinment_objects: 43 | self.assertEqual(ref_obj.coarseningValue, 6) 44 | refinment_object = RefinementObjectExtendSplit(a * np.ones(d), b * np.ones(d), grid, coarseningValue=6, number_of_refinements_before_extend=1, splitSingleDim=False) 45 | refinment_objects, _, _ = refinment_object.refine() 46 | for ref_obj in refinment_objects: 47 | self.assertEqual(ref_obj.coarseningValue, 6) 48 | refinment_objects2, _, _ = ref_obj.refine() 49 | for ref_obj2 in refinment_objects2: 50 | self.assertEqual(ref_obj2.coarseningValue, 5) 51 | refinment_object = RefinementObjectExtendSplit(a * np.ones(d), b * np.ones(d), grid, coarseningValue=6, number_of_refinements_before_extend=0, splitSingleDim=False) 52 | refinment_objects, _, _ = refinment_object.refine() 53 | for ref_obj in refinment_objects: 54 | self.assertEqual(ref_obj.coarseningValue, 5) 55 | refinment_objects2, _, _ = ref_obj.refine() 56 | for ref_obj2 in refinment_objects2: 57 | self.assertEqual(ref_obj2.coarseningValue, 4) 58 | refinment_object = RefinementObjectExtendSplit(a * np.ones(d), b * np.ones(d), grid, coarseningValue=0, splitSingleDim=False) 59 | refinment_objects, _, _= refinment_object.refine() 60 | for ref_obj in refinment_objects: 61 | self.assertEqual(ref_obj.coarseningValue, 0) 62 | 63 | def test_extend_split_object_contains_points(self): 64 | a = -3 65 | b = 6 66 | for d in range(2, 5): 67 | grid = TrapezoidalGrid(np.ones(d)*a, np.ones(d)*b, d) 68 | refinment_object = RefinementObjectExtendSplit(a * np.ones(d), b * np.ones(d), grid) 69 | points = get_cross_product_list([np.linspace(a,b,10) for _ in range(d)]) 70 | for p in points: 71 | self.assertTrue(refinment_object.contains(p)) 72 | points2 = get_cross_product_list([list(np.linspace(b+1,b+3, 10)) + list(np.linspace(a-3, a-1, 10)) for _ in range(d)]) 73 | for p in points2: 74 | self.assertTrue(not refinment_object.contains(p)) 75 | self.assertEqual(points, refinment_object.subset_of_contained_points(points+points2)) 76 | 77 | def test_extend_split_refine(self): 78 | a = -3 79 | b = 6 80 | for d in range(2, 5): 81 | grid = TrapezoidalGrid(np.ones(d)*a, np.ones(d)*b, d) 82 | refinment_object = RefinementObjectExtendSplit(a * np.ones(d), b * np.ones(d), grid, coarseningValue=6, number_of_refinements_before_extend=1, splitSingleDim=False) 83 | refinment_objects, increase, update = refinment_object.refine() 84 | for ref_obj in refinment_objects: 85 | self.assertEqual(ref_obj.coarseningValue, 6) 86 | for dim in range(d): 87 | self.assertTrue(ref_obj.start[dim] > refinment_object.start[dim] or ref_obj.end[dim] < refinment_object.end[dim]) 88 | refinment_object_copy = list(refinment_objects) 89 | refinment_object_copy.remove(ref_obj) 90 | for other_obj in refinment_object_copy: 91 | middle = 0.5*(ref_obj.end + ref_obj.start) 92 | self.assertTrue(not other_obj.contains(middle)) 93 | self.assertEqual(len(refinment_objects), 2**d) 94 | self.assertEqual(increase, None) 95 | self.assertEqual(update, None) 96 | refinment_object = RefinementObjectExtendSplit(a * np.ones(d), b * np.ones(d), grid, coarseningValue=6, number_of_refinements_before_extend=0, splitSingleDim=False) 97 | refinment_objects, increase, update = refinment_object.refine() 98 | for ref_obj in refinment_objects: 99 | self.assertEqual(ref_obj.coarseningValue, 5) 100 | for dim in range(d): 101 | self.assertTrue(ref_obj.start[dim] == refinment_object.start[dim] or ref_obj.end[dim] == refinment_object.end[dim]) 102 | self.assertEqual(len(refinment_objects), 1) 103 | self.assertEqual(increase, None) 104 | self.assertEqual(update, None) 105 | # test return when coarseningValue == 0 106 | refinment_object = RefinementObjectExtendSplit(a * np.ones(d), b * np.ones(d), grid, coarseningValue=0, number_of_refinements_before_extend=0, splitSingleDim=False) 107 | refinment_objects, increase, update = refinment_object.refine() 108 | self.assertEqual(increase, [1 for _ in range(d)]) 109 | self.assertEqual(update, 1) 110 | 111 | def test_single_dim_refine(self): 112 | a = -3 113 | b = 6 114 | dim = 2 115 | grid = GlobalTrapezoidalGrid(np.ones(dim)*a, np.ones(dim)*b, dim) 116 | refinment_object = RefinementObjectSingleDimension(a, b, 0, dim, (0,1), grid, a, b, coarsening_level=2) 117 | refinment_objects, increase, update = refinment_object.refine() 118 | self.assertEqual(len(refinment_objects), 2) 119 | for ref_obj in refinment_objects: 120 | self.assertEqual(ref_obj.coarsening_level, 1) 121 | self.assertEqual(increase, None) 122 | self.assertEqual(update, None) 123 | self.assertEqual(refinment_objects[0].start, refinment_object.start) 124 | self.assertEqual(refinment_objects[1].start, 0.5*(refinment_object.end + refinment_object.start)) 125 | self.assertEqual(refinment_objects[0].end, 0.5*(refinment_object.end + refinment_object.start)) 126 | self.assertEqual(refinment_objects[1].end, refinment_object.end) 127 | 128 | refinment_object = RefinementObjectSingleDimension(a, b, 0, 2, (0,1), grid, a, b) 129 | refinment_objects, increase, update = refinment_object.refine() 130 | self.assertEqual(len(refinment_objects), 2) 131 | for ref_obj in refinment_objects: 132 | self.assertEqual(ref_obj.coarsening_level, 0) 133 | self.assertEqual(increase, None) 134 | self.assertEqual(update, None) 135 | 136 | 137 | if __name__ == '__main__': 138 | unittest.main() 139 | -------------------------------------------------------------------------------- /test/test_RombergWeightFactory.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sparseSpACE 3 | 4 | from sparseSpACE.Extrapolation import ExtrapolationVersion, RombergWeightFactory 5 | 6 | 7 | class TestWeightFactory(unittest.TestCase): 8 | def setUp(self) -> None: 9 | self.places = 8 10 | 11 | # ----------------------------------------------------------------------------------------------------------------- 12 | # --- Version: Romberg Linear 13 | 14 | def test_extrapolation_coefficient_version_linear(self): 15 | m = 2 16 | 17 | # Unit domain 18 | factory = RombergWeightFactory.get(0, 1, version=ExtrapolationVersion.ROMBERG_LINEAR) 19 | 20 | # WolframAlpha: ((a)^1 / ((a)^1 - (b)^1)) * ((c)^1 / ((c)^1 - (b)^1)) for a = 1/2, b = 1, c = 1/4 21 | self.assertAlmostEqual(1/3, factory.get_extrapolation_coefficient(m, 0), self.places) 22 | 23 | # WolframAlpha: ((a)^1 / ((a)^1 - (b)^1)) * ((c)^1 / ((c)^1 - (b)^1)) for a = 1, b = 1/2, c = 1/4 24 | self.assertAlmostEqual(-2, factory.get_extrapolation_coefficient(m, 1), self.places) 25 | 26 | # WolframAlpha: ((a)^1 / ((a)^1 - (b)^1)) * ((c)^1 / ((c)^1 - (b)^1)) for a = 1, b = 1/4, c = 1/2 27 | self.assertAlmostEqual(8/3, factory.get_extrapolation_coefficient(m, 2), self.places) 28 | 29 | # Non-unit domain 30 | factory = RombergWeightFactory.get(3, 1, version=ExtrapolationVersion.ROMBERG_LINEAR) 31 | 32 | # WolframAlpha: ((a)^1 / ((a)^1 - (b)^1)) * ((c)^1 / ((c)^1 - (b)^1)) for a = (3-1)/2, b = (3-1)/1, c = (3-1)/4 33 | self.assertAlmostEqual(1/3, factory.get_extrapolation_coefficient(m, 0), self.places) 34 | 35 | # WolframAlpha: ((a)^1 / ((a)^1 - (b)^1)) * ((c)^1 / ((c)^1 - (b)^1)) for a = (3-1)/1, b = (3-1)/2, c = (3-1)/4 36 | self.assertAlmostEqual(-2, factory.get_extrapolation_coefficient(m, 1), self.places) 37 | 38 | # WolframAlpha: ((a)^1 / ((a)^1 - (b)^1)) * ((c)^1 / ((c)^1 - (b)^1)) for a = (3-1)/1, b = (3-1)/4, c = (3-1)/2 39 | self.assertAlmostEqual(8/3, factory.get_extrapolation_coefficient(m, 2), self.places) 40 | 41 | def test_weights_version_linear(self): 42 | a = 0 43 | b = 1 44 | 45 | factory = RombergWeightFactory.get(a, b, version=ExtrapolationVersion.ROMBERG_LINEAR) 46 | m = 2 47 | 48 | self.assertAlmostEqual(0, factory.get_boundary_point_weight(m)) 49 | self.assertAlmostEqual(-1/3, factory.get_inner_point_weight(1, m)) 50 | self.assertAlmostEqual(2/3, factory.get_inner_point_weight(2, m)) 51 | 52 | # ----------------------------------------------------------------------------------------------------------------- 53 | # --- Version: Romberg Default 54 | 55 | # Unit domain 56 | def test_extrapolation_coefficient_version_romberg(self): 57 | m = 2 58 | 59 | # Unit domain 60 | factory = RombergWeightFactory.get(0, 1, version=ExtrapolationVersion.ROMBERG_DEFAULT) 61 | 62 | # WolframAlpha: ((a)^2 / ((a)^2 - (b)^2)) * ((c)^2 / ((c)^2 - (b)^2)) for a = 1/2, b = 1, c = 1/4 63 | self.assertAlmostEqual(1/45, factory.get_extrapolation_coefficient(m, 0), self.places) 64 | 65 | # WolframAlpha: ((a)^2 / ((a)^2 - (b)^2)) * ((c)^2 / ((c)^2 - (b)^2)) for a = 1, b = 1/2, c = 1/4 66 | self.assertAlmostEqual(-4/9, factory.get_extrapolation_coefficient(m, 1), self.places) 67 | 68 | # WolframAlpha: ((a)^2 / ((a)^2 - (b)^2)) * ((c)^2 / ((c)^2 - (b)^2)) for a = 1, b = 1/4, c = 1/2 69 | self.assertAlmostEqual(64/45, factory.get_extrapolation_coefficient(m, 2), self.places) 70 | 71 | # Non-Unit domain: Same coefficients as in unit domain (factor H out and reduce fraction) 72 | factory = RombergWeightFactory.get(1, 3, version=ExtrapolationVersion.ROMBERG_DEFAULT) 73 | 74 | # WolframAlpha: ((a)^2 / ((a)^2 - (b)^2)) * ((c)^2 / ((c)^2 - (b)^2)) 75 | # for a = (3-1) / 2, b = (3-1) / 1, c = (3-1) /4 76 | self.assertAlmostEqual(1/45, factory.get_extrapolation_coefficient(m, 0), self.places) 77 | 78 | # WolframAlpha: ((a)^2 / ((a)^2 - (b)^2)) * ((c)^2 / ((c)^2 - (b)^2)) 79 | # for a = (3-1) / 1, b = (3-1) / 2, c = (3-1) /4 80 | self.assertAlmostEqual(-4/9, factory.get_extrapolation_coefficient(m, 1), self.places) 81 | 82 | # WolframAlpha: ((a)^2 / ((a)^2 - (b)^2)) * ((c)^2 / ((c)^2 - (b)^2)) 83 | # for a = (3-1) / 1, b = (3-1) / 4, c = (3-1) /2 84 | self.assertAlmostEqual(64/45, factory.get_extrapolation_coefficient(m, 2), self.places) 85 | 86 | def test_weights_version_romberg(self): 87 | a = 0 88 | b = 1 89 | 90 | factory = RombergWeightFactory.get(a, b, version=ExtrapolationVersion.ROMBERG_DEFAULT) 91 | m = 2 92 | 93 | self.assertAlmostEqual(7/90, factory.get_boundary_point_weight(m)) 94 | self.assertAlmostEqual(2/15, factory.get_inner_point_weight(1, m)) 95 | self.assertAlmostEqual(16/45, factory.get_inner_point_weight(2, m)) 96 | 97 | 98 | if __name__ == '__main__': 99 | unittest.main() 100 | -------------------------------------------------------------------------------- /test/test_StandardCombi.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sparseSpACE 3 | from sparseSpACE.StandardCombi import * 4 | import math 5 | from sparseSpACE.Function import * 6 | 7 | class TestStandardCombi(unittest.TestCase): 8 | def test_points(self): 9 | a = -3 10 | b = math.pi 11 | for d in range(2, 6): 12 | f = FunctionLinear([10 ** i for i in range(d)]) 13 | operation = Integration(f, grid=TrapezoidalGrid(np.ones(d)*a, np.ones(d)*b, d), dim=d, reference_solution=f.getAnalyticSolutionIntegral(np.ones(d)*a, np.ones(d)*b)) 14 | standardCombi = StandardCombi(np.ones(d)*a, np.ones(d)*b, print_output=False, operation=operation) 15 | for l in range(8 - d): 16 | for l2 in range(l+1): 17 | #print(l,l2,d) 18 | standardCombi.set_combi_parameters(l2, l) 19 | standardCombi.check_combi_scheme() 20 | 21 | def test_integration(self): 22 | a = -3 23 | b = 7.3 24 | for d in range(2, 6): 25 | f = FunctionLinear([10 ** i for i in range(d)]) 26 | operation = Integration(f, grid=TrapezoidalGrid(np.ones(d)*a, np.ones(d)*b, d), dim=d, reference_solution=f.getAnalyticSolutionIntegral(np.ones(d)*a, np.ones(d)*b)) 27 | standardCombi = StandardCombi(np.ones(d)*a, np.ones(d)*b, print_output=False, operation=operation) 28 | for l in range(8 - d): 29 | for l2 in range(l+1): 30 | scheme, error, integral = standardCombi.perform_operation(l2, l) 31 | rel_error = error/f.getAnalyticSolutionIntegral(np.ones(d)*a, np.ones(d)*b) 32 | self.assertAlmostEqual(rel_error, 0.0, 13) 33 | 34 | def test_interpolation(self): 35 | a = -1 36 | b = 7 37 | for d in range(2, 5): 38 | f = FunctionLinear([10 * (i+1) for i in range(d)]) 39 | operation = Integration(f, grid=TrapezoidalGrid(np.ones(d)*a, np.ones(d)*b), dim=d, reference_solution=f.getAnalyticSolutionIntegral(np.ones(d)*a, np.ones(d)*b)) 40 | standardCombi = StandardCombi(np.ones(d)*a, np.ones(d)*b, print_output=False, operation=operation) 41 | for l in range(8 - d): 42 | for l2 in range(l+1): 43 | standardCombi.set_combi_parameters(l2, l) 44 | grid_coordinates = [np.linspace(a, b, 3, endpoint=False) for _ in range(d)] 45 | interpolated_points = standardCombi.interpolate_grid(grid_coordinates) 46 | grid_points = get_cross_product_list(grid_coordinates) 47 | for component_grid in standardCombi.scheme: 48 | interpolated_points_grid = standardCombi.interpolate_points(grid_points, component_grid) 49 | for i, p in enumerate(grid_points): 50 | factor = abs(f(p)[0] if f(p)[0] != 0 else 1) 51 | self.assertAlmostEqual((f(p)[0] - interpolated_points_grid[i][0]) / factor, 0, 13) 52 | for i, p in enumerate(grid_points): 53 | factor = abs(f(p)[0] if f(p)[0] != 0 else 1) 54 | self.assertAlmostEqual((f(p)[0] - interpolated_points[i][0])/factor, 0, 13) 55 | interpolated_points = standardCombi(grid_points) 56 | for i, p in enumerate(grid_points): 57 | factor = abs(f(p)[0] if f(p)[0] != 0 else 1) 58 | self.assertAlmostEqual((f(p)[0] - interpolated_points[i][0])/factor, 0, 13) 59 | 60 | def test_number_of_points(self): 61 | a = -3 62 | b = 7.3 63 | for d in range(2, 6): 64 | f = FunctionLinear([10 ** i for i in range(d)]) 65 | operation = Integration(f, grid=TrapezoidalGrid(np.ones(d)*a, np.ones(d)*b, d), dim=d, reference_solution=f.getAnalyticSolutionIntegral(np.ones(d)*a, np.ones(d)*b)) 66 | standardCombi = StandardCombi(np.ones(d)*a, np.ones(d)*b, print_output=False, operation=operation) 67 | for l in range(8 - d): 68 | for l2 in range(l+1): 69 | standardCombi.set_combi_parameters(l2, l) 70 | points, weights = standardCombi.get_points_and_weights() 71 | self.assertEqual(len(points), standardCombi.get_total_num_points(distinct_function_evals=False)) 72 | self.assertEqual(len(points), len(weights)) 73 | for component_grid in standardCombi.scheme: 74 | points, weights = standardCombi.get_points_and_weights_component_grid(component_grid.levelvector) 75 | self.assertEqual(len(points), np.prod(standardCombi.grid.levelToNumPoints(component_grid.levelvector))) 76 | self.assertEqual(standardCombi.get_num_points_component_grid(component_grid.levelvector, False), np.prod(standardCombi.grid.levelToNumPoints(component_grid.levelvector))) 77 | 78 | 79 | if __name__ == '__main__': 80 | unittest.main() 81 | -------------------------------------------------------------------------------- /test/test_UncertaintyQuantification.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | import sparseSpACE 5 | from sparseSpACE.Function import * 6 | from sparseSpACE.spatiallyAdaptiveSingleDimension2 import * 7 | from sparseSpACE.ErrorCalculator import * 8 | from sparseSpACE.GridOperation import * 9 | 10 | 11 | class TestUncertaintyQuantification(unittest.TestCase): 12 | def test_expectation_variance(self): 13 | # Let's select the three-dimensional discontinuous FunctionUQ 14 | # as problem function and let the input parameters be 15 | # normally distributed 16 | problem_function = FunctionUQ() 17 | dim = 3 18 | distributions = [("Normal", 0.2, 1.0) for _ in range(dim)] 19 | # a and b are the weighted integration domain boundaries. 20 | # They should be set according to the distribution. 21 | a = np.array([-np.inf] * dim) 22 | b = np.array([np.inf] * dim) 23 | 24 | # Create the grid operation and the weighted grid 25 | op = UncertaintyQuantification(problem_function, distributions, a, b) 26 | grid = GlobalTrapezoidalGridWeighted(a, b, op, boundary=False) 27 | # The grid initialization requires the weight functions from the 28 | # operation; since currently the adaptive refinement takes the grid from 29 | # the operation, it has to be passed here 30 | op.set_grid(grid) 31 | # Select the function for which the grid is refined; 32 | # here it is the expectation and variance calculation via the moments 33 | op.set_expectation_variance_Function() 34 | # Initialize the adaptive refinement instance and refine the grid until 35 | # it has at least 200 points 36 | combiinstance = SpatiallyAdaptiveSingleDimensions2(a, b, operation=op, norm=2, use_volume_weighting=True, 37 | grid_surplusses=op.get_grid()) 38 | lmax = 2 39 | error_operator = ErrorCalculatorSingleDimVolumeGuided() 40 | combiinstance.performSpatiallyAdaptiv(1, lmax, 41 | error_operator, tol=0, max_evaluations=200, print_output=False) 42 | 43 | # Calculate the expectation and variance with the adaptive sparse grid 44 | # weighted integral result 45 | (E,), (Var,) = op.calculate_expectation_and_variance(combiinstance) 46 | 47 | # Test if the results are similar to the reference values 48 | E_ref, Var_ref = (2.670603962589227, 8.813897872367328) 49 | assert abs(E - E_ref) < 0.3, E 50 | assert abs(Var - Var_ref) < 1.0, Var 51 | 52 | def test_pce(self): 53 | problem_function = FunctionUQ() 54 | dim = 3 55 | distributions = [("Normal", 0.2, 1.0) for _ in range(dim)] 56 | a = np.array([-np.inf] * dim) 57 | b = np.array([np.inf] * dim) 58 | op = UncertaintyQuantification(problem_function, distributions, a, b) 59 | grid = GlobalTrapezoidalGridWeighted(a, b, op, boundary=False) 60 | op.set_grid(grid) 61 | 62 | polynomial_degree_max = 2 63 | # The grid needs to be refined for the PCE coefficient calculation 64 | op.set_PCE_Function(polynomial_degree_max) 65 | 66 | combiinstance = SpatiallyAdaptiveSingleDimensions2(a, b, operation=op, norm=2, 67 | grid_surplusses=op.get_grid()) 68 | lmax = 2 69 | error_operator = ErrorCalculatorSingleDimVolumeGuided() 70 | combiinstance.performSpatiallyAdaptiv(1, lmax, 71 | error_operator, tol=0, max_evaluations=200, print_output=False) 72 | 73 | # Create the PCE approximation; it is saved internally in the operation 74 | op.calculate_PCE(None, combiinstance) 75 | # Calculate the expectation and variance with the PCE coefficients 76 | (E,), (Var,) = op.get_expectation_and_variance_PCE() 77 | 78 | # The PCE Variance differs from the actual variance 79 | E_ref, Var_ref = (2.66882233703942, 5.110498374118302) 80 | assert abs(E - E_ref) < 0.3, E 81 | assert abs(Var - Var_ref) < 1.0, Var 82 | 83 | 84 | if __name__ == '__main__': 85 | unittest.main() 86 | -------------------------------------------------------------------------------- /test/test_Utils.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sparseSpACE 3 | from sparseSpACE.combiScheme import * 4 | from sparseSpACE.Utils import * 5 | 6 | class TestUtils(unittest.TestCase): 7 | def test_size(self): 8 | for dim in range(1,6): 9 | for s in range(1,10): 10 | sizes = np.ones(dim, dtype=int) * s 11 | cross_product = get_cross_product_range_list(sizes) 12 | self.assertEqual(len(cross_product), s**dim) 13 | 14 | sizes = sizes + np.asarray(range(dim)) 15 | cross_product = get_cross_product_range_list(sizes) 16 | self.assertEqual(len(cross_product), np.prod(sizes)) 17 | 18 | arrays = [np.linspace(0, 1, sizes[d]) for d in range(dim)] 19 | cross_product = get_cross_product_list(arrays) 20 | self.assertEqual(len(cross_product), np.prod(sizes)) 21 | 22 | def test_valid_entries(self): 23 | for dim in range(1, 6): 24 | for s in range(1, 10): 25 | sizes = np.ones(dim, dtype=int) * s + np.asarray(range(dim)) 26 | cross_product = get_cross_product_range(sizes) 27 | for entry in cross_product: 28 | for d in range(dim): 29 | self.assertTrue(entry[d] in range(sizes[d])) 30 | 31 | arrays = [np.linspace(0, 1, sizes[d]) for d in range(dim)] 32 | cross_product = get_cross_product(arrays) 33 | for entry in cross_product: 34 | for d in range(dim): 35 | self.assertTrue(entry[d] in arrays[d]) 36 | 37 | def test_combinations_only_occuring_once(self): 38 | for dim in range(1, 6): 39 | for s in range(1, 10): 40 | sizes = np.ones(dim, dtype=int) * s + np.asarray(range(dim)) 41 | sets = [np.empty(sizes[d], dtype=set) for d in range(dim)] 42 | for d in range(dim): 43 | for i in range(len(sets[d])): 44 | sets[d][i] = set() 45 | cross_product = get_cross_product_range(sizes) 46 | for entry in cross_product: 47 | for d in range(dim): 48 | other_values = list(entry[:d]) + list(entry[d+1:]) 49 | self.assertTrue(tuple(other_values) not in sets[d][entry[d]]) 50 | sets[d][entry[d]].add(tuple(other_values)) 51 | 52 | for d in range(dim): 53 | for i in range(len(sets[d])): 54 | sets[d][i] = set() 55 | arrays = [list(np.linspace(0, 1, sizes[d])) for d in range(dim)] 56 | cross_product = get_cross_product(arrays) 57 | for entry in cross_product: 58 | for d in range(dim): 59 | other_values = list(entry[:d]) + list(entry[d + 1:]) 60 | self.assertTrue(tuple(other_values) not in sets[d][arrays[d].index(entry[d])]) 61 | sets[d][arrays[d].index(entry[d])].add(tuple(other_values)) 62 | 63 | 64 | if __name__ == '__main__': 65 | unittest.main() -------------------------------------------------------------------------------- /test/test_combiScheme.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sparseSpACE 3 | from sparseSpACE.combiScheme import * 4 | from sparseSpACE.Utils import * 5 | 6 | class TestCombiScheme(unittest.TestCase): 7 | def test_size(self): 8 | combi_scheme = CombiScheme(dim=2) 9 | for l in range(10): 10 | for l2 in range(l+1): 11 | combi_grids = combi_scheme.getCombiScheme(lmin=l2, lmax=l, do_print=False) 12 | expected_num_grids = 2*(l - l2 + 1) - 1 13 | self.assertEqual(len(combi_grids), expected_num_grids) 14 | 15 | def test_coefficients(self): 16 | for d in range(2, 6): 17 | combi_scheme = CombiScheme(dim=d) 18 | for l in range(12 - d): 19 | for l2 in range(l+1): 20 | combi_grids = combi_scheme.getCombiScheme(lmin=l2, lmax=l, do_print=False) 21 | sum_of_coefficients = 0 22 | for component_grid in combi_grids: 23 | sum_of_coefficients += component_grid.coefficient 24 | self.assertEqual(sum_of_coefficients, 1) 25 | 26 | def test_size_adaptive(self): 27 | combi_scheme = CombiScheme(dim=2) 28 | for l in range(10): 29 | for l2 in range(l+1): 30 | combi_scheme.init_adaptive_combi_scheme(lmin=l2, lmax=l) 31 | combi_grids = combi_scheme.getCombiScheme(lmin=l2, lmax=l, do_print=False) 32 | expected_num_grids = 2*(l - l2 + 1) - 1 33 | sum_of_coefficients = 0 34 | for component_grid in combi_grids: 35 | sum_of_coefficients += component_grid.coefficient 36 | self.assertEqual(len(combi_grids), expected_num_grids) 37 | 38 | def test_coefficients_adaptive(self): 39 | for d in range(2, 6): 40 | combi_scheme = CombiScheme(dim=d) 41 | for l in range(12 - d): 42 | for l2 in range(l+1): 43 | combi_scheme.init_adaptive_combi_scheme(lmin=l2, lmax=l) 44 | combi_grids = combi_scheme.getCombiScheme(lmin=l2, lmax=l, do_print=False) 45 | sum_of_coefficients = 0 46 | for component_grid in combi_grids: 47 | sum_of_coefficients += component_grid.coefficient 48 | self.assertEqual(sum_of_coefficients, 1) 49 | 50 | def test_adaptive_scheme_updates(self): 51 | for d in range(2, 6): 52 | combi_scheme = CombiScheme(dim=d) 53 | for l in range(10 - d): 54 | for l2 in range(l+1): 55 | combi_scheme.init_adaptive_combi_scheme(lmin=l2, lmax=l) 56 | for i in range(10): 57 | combi_scheme.update_adaptive_combi(list(combi_scheme.active_index_set)[0]) 58 | combi_grids = combi_scheme.getCombiScheme(lmin=l2, lmax=l, do_print=False) 59 | sum_of_coefficients = 0 60 | for component_grid in combi_grids: 61 | sum_of_coefficients += component_grid.coefficient 62 | self.assertEqual(sum_of_coefficients, 1) 63 | 64 | def test_downward_closed_adaptive(self): 65 | for d in range(2, 6): 66 | combi_scheme = CombiScheme(dim=d) 67 | for l in range(10 - d): 68 | for l2 in range(l+1): 69 | combi_scheme.init_adaptive_combi_scheme(lmin=l2, lmax=l) 70 | for i in range(10): 71 | combi_scheme.update_adaptive_combi(list(combi_scheme.active_index_set)[0]) 72 | combi_grids = combi_scheme.getCombiScheme(lmin=l2, lmax=l, do_print=False) 73 | self.assertTrue(self.is_downward_closed(combi_scheme, combi_grids, l, l2)) 74 | 75 | def is_downward_closed(self, combi_scheme: CombiScheme, combi_grids: Sequence[ComponentGridInfo], lmax: int, lmin: int) -> bool: 76 | downward_closed = True 77 | for component_grid in combi_grids: 78 | downward_closed = downward_closed and self.backward_neightbours_present(combi_scheme, component_grid.levelvector, lmax, lmin) 79 | return downward_closed 80 | 81 | def backward_neightbours_present(self, combi_scheme: CombiScheme, levelvector: Sequence[int], lmax: int, lmin: int) -> bool: 82 | dim = len(levelvector) 83 | backward_neighbours_present = True 84 | backward_range = [range(lmin, levelvector[d] + 1) for d in range(dim)] 85 | for levelvec in get_cross_product(backward_range): 86 | backward_neighbours_present = backward_neighbours_present and combi_scheme.in_index_set(levelvec) 87 | return backward_neighbours_present 88 | 89 | if __name__ == '__main__': 90 | unittest.main() -------------------------------------------------------------------------------- /test/test_spatiallyAdaptiveSingleDimension2.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sparseSpACE 3 | from sparseSpACE.StandardCombi import * 4 | import math 5 | from sparseSpACE.Grid import * 6 | from sparseSpACE.Integrator import * 7 | from sparseSpACE.Function import * 8 | from sparseSpACE.spatiallyAdaptiveSingleDimension2 import * 9 | 10 | 11 | class TestSpatiallyAdaptiveSingleDimension2(unittest.TestCase): 12 | 13 | def test_integrate(self): 14 | a = -3 15 | b = 6 16 | for d in range(2, 5): 17 | grid = GlobalTrapezoidalGrid(a * np.ones(d), b * np.ones(d), boundary=True, modified_basis=False) 18 | f = FunctionLinear([10 * (i + 1) for i in range(d)]) 19 | operation = Integration(f, grid=grid, dim=d, reference_solution=f.getAnalyticSolutionIntegral(a * np.ones(d), b * np.ones(d))) 20 | errorOperator = ErrorCalculatorSingleDimVolumeGuided() 21 | for l in range(2, 4): 22 | for num_points in np.linspace(100, 1000, 5): 23 | spatiallyAdaptive = SpatiallyAdaptiveSingleDimensions2(a * np.ones(d), b * np.ones(d), version=3, 24 | operation=operation) 25 | _, _, _, combiintegral, _, error_array, _, surplus_error_array, _, _ = spatiallyAdaptive.performSpatiallyAdaptiv( 26 | lmin=1, lmax=l, errorOperator=errorOperator, tol=-1, max_evaluations=num_points, 27 | print_output=False) 28 | self.assertEqual(combiintegral, f.getAnalyticSolutionIntegral(a * np.ones(d), b * np.ones(d))) 29 | self.assertTrue(all([error == 0.0 for error in error_array])) 30 | 31 | a = -3 32 | b = 6 33 | for d in range(2, 4): 34 | grid = GlobalLagrangeGrid(a * np.ones(d), b * np.ones(d), boundary=True, modified_basis=False, p=2) 35 | f = FunctionPolynomial([10 * (i + 1) for i in range(d)], degree=2) 36 | operation = Integration(f, grid=grid, dim=d, reference_solution=f.getAnalyticSolutionIntegral(a * np.ones(d), b * np.ones(d))) 37 | errorOperator = ErrorCalculatorSingleDimVolumeGuided() 38 | for l in range(2, 3): 39 | for num_points in np.linspace(100, 1000, 5): 40 | spatiallyAdaptive = SpatiallyAdaptiveSingleDimensions2(a * np.ones(d), b * np.ones(d), version=3, 41 | operation=operation) 42 | _, _, _, combiintegral, _, error_array, _, surplus_error_array, _, _ = spatiallyAdaptive.performSpatiallyAdaptiv( 43 | lmin=1, lmax=l, errorOperator=errorOperator, tol=-1, max_evaluations=num_points, 44 | print_output=False) 45 | self.assertAlmostEqual(combiintegral[0] / f.getAnalyticSolutionIntegral(a * np.ones(d), b * np.ones(d)), 1.0, places=12) 46 | self.assertTrue(all([ -10**-13 * combiintegral[0] <= error <= 10**-13 * combiintegral[0] for error in error_array])) 47 | 48 | 49 | a = 2 50 | b = 6 51 | for d in range(2, 4): 52 | grid = GlobalTrapezoidalGrid(a * np.ones(d), b * np.ones(d), boundary=True, modified_basis=False) 53 | f = FunctionLinear([10 * (i + 1) for i in range(d)]) 54 | operation = Integration(f, grid=grid, dim=d, reference_solution=f.getAnalyticSolutionIntegral(a * np.ones(d), b * np.ones(d))) 55 | errorOperator = ErrorCalculatorSingleDimVolumeGuided() 56 | for l in range(2, 3): 57 | for num_points in np.linspace(100, 1000, 5): 58 | spatiallyAdaptive = SpatiallyAdaptiveSingleDimensions2(a * np.ones(d), b * np.ones(d), version=3, 59 | operation=operation) 60 | _, _, _, combiintegral, _, error_array, _, surplus_error_array, _, _ = spatiallyAdaptive.performSpatiallyAdaptiv( 61 | lmin=1, lmax=l, errorOperator=errorOperator, tol=-1, max_evaluations=num_points, 62 | print_output=False) 63 | self.assertEqual(combiintegral, f.getAnalyticSolutionIntegral(a * np.ones(d), b * np.ones(d))) 64 | self.assertTrue(all([error == 0.0 for error in error_array])) 65 | 66 | a = -6 67 | b = -3 68 | for d in range(2, 4): 69 | grid = GlobalTrapezoidalGrid(a * np.ones(d), b * np.ones(d), boundary=True, modified_basis=False) 70 | f = FunctionLinear([10 * (i + 1) for i in range(d)]) 71 | operation = Integration(f, grid=grid, dim=d, reference_solution=f.getAnalyticSolutionIntegral(a * np.ones(d), b * np.ones(d))) 72 | errorOperator = ErrorCalculatorSingleDimVolumeGuided() 73 | for l in range(2, 3): 74 | for num_points in np.linspace(100, 1000, 5): 75 | spatiallyAdaptive = SpatiallyAdaptiveSingleDimensions2(a * np.ones(d), b * np.ones(d), version=3, 76 | operation=operation) 77 | _, _, _, combiintegral, _, error_array, _, surplus_error_array, _, _ = spatiallyAdaptive.performSpatiallyAdaptiv( 78 | lmin=1, lmax=l, errorOperator=errorOperator, tol=-1, max_evaluations=num_points, 79 | print_output=False) 80 | self.assertEqual(combiintegral, f.getAnalyticSolutionIntegral(a * np.ones(d), b * np.ones(d))) 81 | self.assertTrue(all([error == 0.0 for error in error_array])) 82 | 83 | def test_interpolate(self): 84 | a = -3 85 | b = 6 86 | for d in range(2, 5): 87 | grid = GlobalTrapezoidalGrid(a * np.ones(d), b * np.ones(d), boundary=True, modified_basis=False) 88 | f = FunctionLinear([10 * (i + 1) for i in range(d)]) 89 | operation = Integration(f, grid=grid, dim=d, reference_solution=f.getAnalyticSolutionIntegral(a * np.ones(d), b * np.ones(d))) 90 | errorOperator = ErrorCalculatorSingleDimVolumeGuided() 91 | for l in range(2, 4): 92 | for num_points in np.linspace(100, 1000, 5): 93 | spatiallyAdaptive = SpatiallyAdaptiveSingleDimensions2(a * np.ones(d), b * np.ones(d), version=3, 94 | operation=operation) 95 | _, _, _, combiintegral, _, error_array, _, surplus_error_array, _, _ = spatiallyAdaptive.performSpatiallyAdaptiv( 96 | lmin=1, lmax=l, errorOperator=errorOperator, tol=-1, max_evaluations=num_points, 97 | print_output=False) 98 | points = get_cross_product_list([np.linspace(a, b, 5, endpoint=False) for _ in range(d)]) 99 | f_values = spatiallyAdaptive(points) 100 | for i, value in enumerate(f_values): 101 | factor = abs(f(points[i])[0]) if abs(f(points[i])[0]) != 0 else 1 102 | self.assertAlmostEqual((value[0] - f(points[i])[0]) / factor, 0.0, places=12) 103 | 104 | a = -1 105 | b = 6 106 | for d in range(2, 5): 107 | grid = GlobalLagrangeGrid(a * np.ones(d), b * np.ones(d), boundary=True, modified_basis=False, p=2) 108 | f = FunctionPolynomial([(i + 1) for i in range(d)], degree=2) 109 | operation = Integration(f, grid=grid, dim=d, reference_solution=f.getAnalyticSolutionIntegral(a * np.ones(d), b * np.ones(d))) 110 | errorOperator = ErrorCalculatorSingleDimVolumeGuided() 111 | for l in range(2, 4): 112 | for num_points in np.linspace(100, 1000, 5): 113 | spatiallyAdaptive = SpatiallyAdaptiveSingleDimensions2(a * np.ones(d), b * np.ones(d), version=3, 114 | operation=operation) 115 | _, _, _, combiintegral, _, error_array, _, surplus_error_array, _, _ = spatiallyAdaptive.performSpatiallyAdaptiv( 116 | lmin=1, lmax=l, errorOperator=errorOperator, tol=-1, max_evaluations=num_points, 117 | print_output=False) 118 | points = get_cross_product_list([np.linspace(a, b, 3, endpoint=False) for _ in range(d)]) 119 | f_values = spatiallyAdaptive(points) 120 | for i, value in enumerate(f_values): 121 | factor = abs(f(points[i])[0]) if abs(f(points[i])[0]) != 0 else 1 122 | self.assertAlmostEqual((value[0] - f(points[i])[0]) / factor, 0.0, places=10) 123 | 124 | if __name__ == '__main__': 125 | unittest.main() 126 | --------------------------------------------------------------------------------