├── tests ├── fixtures │ ├── __init__.py │ ├── mc_saop │ │ ├── mapping.json │ │ ├── saop.zip │ │ └── biosphere.zip │ ├── empty_biosphere │ │ ├── mapping.json │ │ ├── method.zip │ │ ├── biosphere.zip │ │ └── test_db.zip │ ├── svdm.zip │ ├── svdm2.zip │ ├── mc_basic.zip │ ├── ipcc_simple.zip │ ├── mc_basic │ │ ├── mapping.json │ │ ├── method.zip │ │ ├── test_db.zip │ │ └── biosphere.zip │ ├── mc_complete.zip │ ├── basic_fixture.zip │ ├── empty_biosphere.zip │ ├── example_db │ │ ├── ipcc.zip │ │ ├── example_db.zip │ │ └── mapping.json │ ├── array_sequential.zip │ ├── bw2io_example_db.zip │ ├── seq │ │ ├── seq.0.indices.npy │ │ ├── seq.0.samples.npy │ │ ├── seq.1.indices.npy │ │ ├── seq.1.samples.npy │ │ └── datapackage.json │ ├── multi_lca_simple_1.zip │ ├── multi_lca_simple_2.zip │ ├── multi_lca_simple_3.zip │ ├── multi_lca_simple_4.zip │ ├── multi_lca_simple_5.zip │ ├── empty_characterization.zip │ ├── multi │ │ ├── multi.0.indices.npy │ │ ├── multi.0.samples.npy │ │ ├── multi.1.indices.npy │ │ ├── multi.1.samples.npy │ │ └── datapackage.json │ ├── missing_characterization.zip │ ├── multi_lca_simple_weighting.zip │ ├── unseeded │ │ ├── unseeded.0.indices.npy │ │ ├── unseeded.0.samples.npy │ │ ├── unseeded.1.indices.npy │ │ ├── unseeded.1.samples.npy │ │ └── datapackage.json │ ├── basic_fixture │ │ ├── biosphere.data.npy │ │ ├── biosphere.indices.npy │ │ ├── technosphere.data.npy │ │ ├── technosphere.flip.npy │ │ ├── technosphere.indices.npy │ │ ├── eb-characterization.data.npy │ │ ├── eb-characterization.indices.npy │ │ └── datapackage.json │ ├── multi_lca_simple_normalization.zip │ ├── single-matrix │ │ ├── sm-fixture.tar.bz2 │ │ ├── generate_fixture.py │ │ └── Test fixture.ipynb │ ├── single-sample │ │ ├── single-sample.0.indices.npy │ │ ├── single-sample.0.samples.npy │ │ ├── single-sample.1.indices.npy │ │ ├── single-sample.1.samples.npy │ │ ├── single-sample.2.indices.npy │ │ ├── single-sample.2.samples.npy │ │ └── datapackage.json │ ├── bw2io_example_db_mapping.json │ └── presamples_basic.py ├── __init__.py ├── test_compatibility.py ├── test_2d_grid.py ├── test_utils.py ├── test_result_cache.py ├── test_svdm.py ├── test_dict_man.py ├── test_method_config.py ├── conftest.py ├── test_restricted_sparse_matrix_dict.py ├── test_to_dataframe.py ├── test_fast_supply_arrays.py └── test_fast_scores.py ├── MANIFEST.in ├── renovate.json ├── .github ├── workflows │ ├── custom-config.ini │ ├── trigger_submodule_update_reuse.yml │ ├── python-package-deploy.yml │ ├── python-test-superlu.yml │ ├── python-test-pardiso.yml │ └── python-test-umfpack-macos.yml └── CODE_OF_CONDUCT.md ├── src └── bw2calc │ ├── dense_lca.py │ ├── grid.py │ ├── iterative_lca.py │ ├── caching_lca.py │ ├── least_squares.py │ ├── errors.py │ ├── restricted_sparse_matrix_dict.py │ ├── __init__.py │ ├── result_cache.py │ ├── fast_supply_arrays.py │ ├── utils.py │ ├── single_value_diagonal_matrix.py │ ├── dictionary_manager.py │ ├── log_utils.py │ ├── fast_scores.py │ ├── method_config.py │ └── lca_base.py ├── README.md ├── .pre-commit-config.yaml ├── LICENSE ├── .gitignore └── pyproject.toml /tests/fixtures/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | -------------------------------------------------------------------------------- /tests/fixtures/mc_saop/mapping.json: -------------------------------------------------------------------------------- 1 | [[["biosphere", "1"], 1], [["saop", "1"], 2]] -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | include *.md 3 | include README.rst 4 | include src/bw2calc/*.py 5 | -------------------------------------------------------------------------------- /tests/fixtures/empty_biosphere/mapping.json: -------------------------------------------------------------------------------- 1 | [[["biosphere", "1"], 1], [["t", "1"], 2], [["t", "2"], 3]] -------------------------------------------------------------------------------- /tests/fixtures/svdm.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/svdm.zip -------------------------------------------------------------------------------- /tests/fixtures/svdm2.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/svdm2.zip -------------------------------------------------------------------------------- /tests/fixtures/mc_basic.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/mc_basic.zip -------------------------------------------------------------------------------- /tests/fixtures/ipcc_simple.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/ipcc_simple.zip -------------------------------------------------------------------------------- /tests/fixtures/mc_basic/mapping.json: -------------------------------------------------------------------------------- 1 | [[["biosphere", "1"], 1], [["biosphere", "2"], 2], [["test", "1"], 3], [["test", "2"], 4]] -------------------------------------------------------------------------------- /tests/fixtures/mc_complete.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/mc_complete.zip -------------------------------------------------------------------------------- /tests/fixtures/mc_saop/saop.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/mc_saop/saop.zip -------------------------------------------------------------------------------- /tests/fixtures/basic_fixture.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/basic_fixture.zip -------------------------------------------------------------------------------- /tests/fixtures/empty_biosphere.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/empty_biosphere.zip -------------------------------------------------------------------------------- /tests/fixtures/example_db/ipcc.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/example_db/ipcc.zip -------------------------------------------------------------------------------- /tests/fixtures/mc_basic/method.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/mc_basic/method.zip -------------------------------------------------------------------------------- /tests/fixtures/array_sequential.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/array_sequential.zip -------------------------------------------------------------------------------- /tests/fixtures/bw2io_example_db.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/bw2io_example_db.zip -------------------------------------------------------------------------------- /tests/fixtures/mc_basic/test_db.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/mc_basic/test_db.zip -------------------------------------------------------------------------------- /tests/fixtures/mc_saop/biosphere.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/mc_saop/biosphere.zip -------------------------------------------------------------------------------- /tests/fixtures/seq/seq.0.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/seq/seq.0.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/seq/seq.0.samples.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/seq/seq.0.samples.npy -------------------------------------------------------------------------------- /tests/fixtures/seq/seq.1.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/seq/seq.1.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/seq/seq.1.samples.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/seq/seq.1.samples.npy -------------------------------------------------------------------------------- /tests/fixtures/mc_basic/biosphere.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/mc_basic/biosphere.zip -------------------------------------------------------------------------------- /tests/fixtures/multi_lca_simple_1.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi_lca_simple_1.zip -------------------------------------------------------------------------------- /tests/fixtures/multi_lca_simple_2.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi_lca_simple_2.zip -------------------------------------------------------------------------------- /tests/fixtures/multi_lca_simple_3.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi_lca_simple_3.zip -------------------------------------------------------------------------------- /tests/fixtures/multi_lca_simple_4.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi_lca_simple_4.zip -------------------------------------------------------------------------------- /tests/fixtures/multi_lca_simple_5.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi_lca_simple_5.zip -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /tests/fixtures/empty_biosphere/method.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/empty_biosphere/method.zip -------------------------------------------------------------------------------- /tests/fixtures/empty_characterization.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/empty_characterization.zip -------------------------------------------------------------------------------- /tests/fixtures/example_db/example_db.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/example_db/example_db.zip -------------------------------------------------------------------------------- /tests/fixtures/multi/multi.0.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi/multi.0.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/multi/multi.0.samples.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi/multi.0.samples.npy -------------------------------------------------------------------------------- /tests/fixtures/multi/multi.1.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi/multi.1.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/multi/multi.1.samples.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi/multi.1.samples.npy -------------------------------------------------------------------------------- /tests/fixtures/empty_biosphere/biosphere.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/empty_biosphere/biosphere.zip -------------------------------------------------------------------------------- /tests/fixtures/empty_biosphere/test_db.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/empty_biosphere/test_db.zip -------------------------------------------------------------------------------- /tests/fixtures/missing_characterization.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/missing_characterization.zip -------------------------------------------------------------------------------- /tests/fixtures/multi_lca_simple_weighting.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi_lca_simple_weighting.zip -------------------------------------------------------------------------------- /tests/fixtures/unseeded/unseeded.0.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/unseeded/unseeded.0.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/unseeded/unseeded.0.samples.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/unseeded/unseeded.0.samples.npy -------------------------------------------------------------------------------- /tests/fixtures/unseeded/unseeded.1.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/unseeded/unseeded.1.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/unseeded/unseeded.1.samples.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/unseeded/unseeded.1.samples.npy -------------------------------------------------------------------------------- /tests/fixtures/basic_fixture/biosphere.data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/basic_fixture/biosphere.data.npy -------------------------------------------------------------------------------- /tests/fixtures/multi_lca_simple_normalization.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/multi_lca_simple_normalization.zip -------------------------------------------------------------------------------- /tests/fixtures/single-matrix/sm-fixture.tar.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/single-matrix/sm-fixture.tar.bz2 -------------------------------------------------------------------------------- /tests/fixtures/basic_fixture/biosphere.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/basic_fixture/biosphere.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/basic_fixture/technosphere.data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/basic_fixture/technosphere.data.npy -------------------------------------------------------------------------------- /tests/fixtures/basic_fixture/technosphere.flip.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/basic_fixture/technosphere.flip.npy -------------------------------------------------------------------------------- /tests/fixtures/basic_fixture/technosphere.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/basic_fixture/technosphere.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/single-sample/single-sample.0.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/single-sample/single-sample.0.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/single-sample/single-sample.0.samples.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/single-sample/single-sample.0.samples.npy -------------------------------------------------------------------------------- /tests/fixtures/single-sample/single-sample.1.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/single-sample/single-sample.1.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/single-sample/single-sample.1.samples.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/single-sample/single-sample.1.samples.npy -------------------------------------------------------------------------------- /tests/fixtures/single-sample/single-sample.2.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/single-sample/single-sample.2.indices.npy -------------------------------------------------------------------------------- /tests/fixtures/single-sample/single-sample.2.samples.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/single-sample/single-sample.2.samples.npy -------------------------------------------------------------------------------- /tests/fixtures/basic_fixture/eb-characterization.data.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/basic_fixture/eb-characterization.data.npy -------------------------------------------------------------------------------- /tests/fixtures/basic_fixture/eb-characterization.indices.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/brightway-lca/brightway2-calc/HEAD/tests/fixtures/basic_fixture/eb-characterization.indices.npy -------------------------------------------------------------------------------- /.github/workflows/custom-config.ini: -------------------------------------------------------------------------------- 1 | [properties] 2 | umfpack-libdir = '/opt/homebrew/Cellar/suite-sparse/7.7.0/lib/' 3 | umfpack-includedir = '/opt/homebrew/Cellar/suite-sparse/7.7.0/include/suitesparse/' 4 | -------------------------------------------------------------------------------- /tests/fixtures/bw2io_example_db_mapping.json: -------------------------------------------------------------------------------- 1 | [["Driving an electric car", 1], ["Electric car", 2], ["Electric car battery", 3], ["Steel", 4], ["Electricity", 5], ["Lithium", 6], ["Driving an combustion car", 7], ["Combustion car", 8], ["CO2", 9]] -------------------------------------------------------------------------------- /src/bw2calc/dense_lca.py: -------------------------------------------------------------------------------- 1 | from numpy.linalg import solve 2 | 3 | from bw2calc.lca import LCA 4 | 5 | 6 | class DenseLCA(LCA): 7 | """Convert the `technosphere_matrix` to a numpy array and solve with `numpy.linalg`.""" 8 | 9 | def solve_linear_system(self): 10 | return solve(self.technosphere_matrix.toarray(), self.demand_array) 11 | -------------------------------------------------------------------------------- /tests/fixtures/example_db/mapping.json: -------------------------------------------------------------------------------- 1 | [[["Mobility example", "Driving an electric car"], 1], [["Mobility example", "edb43eb8b3e187759b38c55ae4228ffc"], 2], [["Mobility example", "7fabbb483617d7a9325f7dee36a11562"], 3], [["Mobility example", "Steel"], 4], [["Mobility example", "Electricity"], 5], [["Mobility example", "Lithium"], 6], [["Mobility example", "Driving an combustion car"], 7], [["Mobility example", "6f233d5e001dc3ea41b1a53bbb815521"], 8], [["Mobility example", "CO2"], 9]] -------------------------------------------------------------------------------- /.github/workflows/trigger_submodule_update_reuse.yml: -------------------------------------------------------------------------------- 1 | # This workflow re-uses the workflow 'trigger_submodule_update_main.yml' 2 | # from the brightway-documentation repository. 3 | name: Re-Use Workflow - Create Workflow Dispatch (Trigger Submodule Update) 4 | 5 | on: 6 | push: 7 | branches: 8 | - main 9 | workflow_dispatch: 10 | 11 | jobs: 12 | re-use_workflow: 13 | uses: 14 | brightway-lca/brightway-documentation/.github/workflows/trigger_submodule_update_main.yml@main 15 | secrets: inherit # https://docs.github.com/en/actions/using-workflows/reusing-workflows#passing-secrets-to-nested-workflows 16 | -------------------------------------------------------------------------------- /tests/test_compatibility.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | from bw2calc.lca import LCA 4 | 5 | fixture_dir = Path(__file__).resolve().parent / "fixtures" 6 | 7 | 8 | def test_X_dict(): 9 | packages = [fixture_dir / "basic_fixture.zip"] 10 | lca = LCA({1: 1}, data_objs=packages) 11 | lca.lci() 12 | assert lca.product_dict == {1: 0, 2: 1} 13 | assert lca.activity_dict == {101: 0, 102: 1} 14 | assert lca.biosphere_dict == {1: 0} 15 | 16 | 17 | def test_reverse_dict(): 18 | packages = [fixture_dir / "basic_fixture.zip"] 19 | lca = LCA({1: 1}, data_objs=packages) 20 | lca.lci() 21 | ra, rp, rb = lca.reverse_dict() 22 | assert ra == {0: 101, 1: 102} 23 | assert rp == {0: 1, 1: 2} 24 | assert rb == {0: 1} 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Brightway2 Calculations 3 | --- 4 | 5 | [![pypi version](https://img.shields.io/pypi/v/bw2calc.svg)](https://pypi.org/project/bw2calc/) 6 | 7 | [![conda-forge version](https://img.shields.io/conda/vn/conda-forge/bw2calc.svg)](https://anaconda.org/conda-forge/bw2calc) 8 | 9 | [![bw2calc appveyor build status](https://ci.appveyor.com/api/projects/status/uqixaochulbu6vjv?svg=true)](https://ci.appveyor.com/project/cmutel/brightway2-calc) 10 | 11 | [![Test coverage report](https://coveralls.io/repos/bitbucket/cmutel/brightway2-calc/badge.svg?branch=master)](https://coveralls.io/bitbucket/cmutel/brightway2-calc?branch=default) 12 | 13 | This package provides the calculation engine for the [Brightway2 life 14 | cycle assessment framework](https://brightway.dev). [Online 15 | documentation](https://docs.brightway.dev) is available, and the source 16 | code is hosted on 17 | [Github](https://github.com/brightway-lca/brightway2-calc). 18 | 19 | The emphasis here has been on speed of solving the linear systems, for 20 | normal LCA calculations, graph traversal, or Monte Carlo uncertainty 21 | analysis. 22 | 23 | Relies on [bw_processing](https://github.com/brightway-lca/bw_processing) 24 | for input array formatting. 25 | -------------------------------------------------------------------------------- /src/bw2calc/grid.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Mapping 2 | from typing import Any, Sequence 3 | 4 | 5 | class TwoDimensionalGrid(Mapping): 6 | def __init__(self, keys: Sequence[tuple[Any, Any]], values: Sequence[Any]): 7 | """Read-only dictionary wrapper for a strictly 2-dimensional grid. 8 | 9 | Supports a very limited type of slicing - only `foo["something", ...]`.""" 10 | if not len(keys) == len(values): 11 | raise ValueError("`keys` must have same length as `values`") 12 | self.dict_ = {k: v for k, v in zip(keys, values)} 13 | 14 | def __getitem__(self, key: Any): 15 | if not len(key) == 2: 16 | raise KeyError 17 | first, second = key 18 | if first == Ellipsis and second == Ellipsis: 19 | raise KeyError 20 | elif first == Ellipsis: 21 | return {f: v for (f, s), v in self.dict_.items() if s == second} 22 | elif second == Ellipsis: 23 | return {s: v for (f, s), v in self.dict_.items() if f == first} 24 | else: 25 | return self.dict_[(first, second)] 26 | 27 | def __iter__(self): 28 | yield from self.dict_ 29 | 30 | def __len__(self): 31 | return len(self.dict_) 32 | -------------------------------------------------------------------------------- /.github/workflows/python-package-deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Inspired from: 3 | # https://packaging.python.org/en/latest/guides/publishing-package-distribution-releases-using-github-actions-ci-cd-workflows/ 4 | name: Publish Python 🐍 distributions 📦 to PyPI and TestPyPI 5 | 6 | on: 7 | push: 8 | branches: [main, develop] 9 | tags: '*' 10 | jobs: 11 | build-n-publish: 12 | name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI 13 | runs-on: ubuntu-latest 14 | permissions: 15 | id-token: write 16 | steps: 17 | - uses: actions/checkout@master 18 | - name: Set up Python 3.10 19 | uses: actions/setup-python@v6 20 | with: 21 | python-version: "3.14" 22 | - name: Install pypa/build 23 | run: >- 24 | python -m 25 | pip install 26 | build 27 | --user 28 | - name: Build a binary wheel and a source tarball 29 | run: >- 30 | python -m 31 | build 32 | --outdir dist/ 33 | . 34 | - name: Publish distribution 📦 to Test PyPI 35 | uses: pypa/gh-action-pypi-publish@release/v1 36 | with: 37 | repository_url: https://test.pypi.org/legacy/ 38 | skip_existing: true 39 | - name: Publish distribution 📦 to PyPI 40 | if: startsWith(github.ref, 'refs/tags') 41 | uses: pypa/gh-action-pypi-publish@release/v1 42 | -------------------------------------------------------------------------------- /src/bw2calc/iterative_lca.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import numpy as np 4 | from scipy.sparse.linalg import cgs 5 | 6 | from bw2calc import spsolve 7 | from bw2calc.lca import LCA 8 | 9 | 10 | class IterativeLCA(LCA): 11 | """ 12 | Solve `Ax=b` using iterative techniques instead of 13 | [LU factorization](http://en.wikipedia.org/wiki/LU_decomposition). 14 | """ 15 | 16 | def __init__(self, *args, iter_solver=cgs, **kwargs): 17 | super().__init__(*args, **kwargs) 18 | self.iter_solver = iter_solver 19 | self.guess = None 20 | 21 | def solve_linear_system(self, demand: Optional[np.ndarray] = None) -> None: 22 | if demand is None: 23 | demand = self.demand_array 24 | if not self.iter_solver or self.guess is None: 25 | self.guess = spsolve(self.technosphere_matrix, demand) 26 | if not self.guess.shape: 27 | self.guess = self.guess.reshape((1,)) 28 | return self.guess 29 | else: 30 | solution, status = self.iter_solver( 31 | self.technosphere_matrix, 32 | demand, 33 | x0=self.guess, 34 | atol="legacy", 35 | maxiter=1000, 36 | ) 37 | if status != 0: 38 | return spsolve(self.technosphere_matrix, demand) 39 | return solution 40 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | exclude: '^docs/conf.py' 2 | 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v4.4.0 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: check-ast 9 | - id: check-json 10 | - id: check-merge-conflict 11 | - id: check-xml 12 | - id: check-yaml 13 | - id: debug-statements 14 | - id: end-of-file-fixer 15 | - id: requirements-txt-fixer 16 | - id: mixed-line-ending 17 | args: ['--fix=auto'] # replace 'auto' with 'lf' to enforce Linux/Mac line endings or 'crlf' for Windows 18 | 19 | ## If you want to avoid flake8 errors due to unused vars or imports: 20 | # - repo: https://github.com/myint/autoflake 21 | # rev: v1.4 22 | # hooks: 23 | # - id: autoflake 24 | # args: [ 25 | # --in-place, 26 | # --remove-all-unused-imports, 27 | # --remove-unused-variables, 28 | # ] 29 | 30 | - repo: https://github.com/pycqa/isort 31 | rev: 5.12.0 32 | hooks: 33 | - id: isort 34 | args: [--settings-path=pyproject.toml] 35 | 36 | - repo: https://github.com/psf/black 37 | rev: 22.12.0 38 | hooks: 39 | - id: black 40 | args: [--config=pyproject.toml] 41 | 42 | ## If like to embrace black styles even in the docs: 43 | # - repo: https://github.com/asottile/blacken-docs 44 | # rev: v1.12.0 45 | # hooks: 46 | # - id: blacken-docs 47 | # additional_dependencies: [black] 48 | 49 | - repo: https://github.com/PyCQA/flake8 50 | rev: 6.1.0 51 | hooks: 52 | - id: flake8 53 | additional_dependencies: [Flake8-pyproject] 54 | -------------------------------------------------------------------------------- /tests/test_2d_grid.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from bw2calc.grid import TwoDimensionalGrid 4 | 5 | 6 | @pytest.fixture 7 | def grid(): 8 | keys = [("a", 1), ("a", 2), ("b", 1), ("c", 3)] 9 | values = [10, 11, 12, 13] 10 | return TwoDimensionalGrid(keys, values) 11 | 12 | 13 | def test_2d_grid_inconsistent(): 14 | keys = [("a", 1), ("a", 2)] 15 | values = [10, 11, 12, 13] 16 | with pytest.raises(ValueError): 17 | TwoDimensionalGrid(keys, values) 18 | 19 | 20 | def test_2d_grid_empty(): 21 | grid = TwoDimensionalGrid([], []) 22 | assert len(grid) == 0 23 | 24 | 25 | def test_2d_grid_normal(grid): 26 | keys = [("a", 1), ("a", 2), ("b", 1), ("c", 3)] 27 | 28 | assert grid[("a", 2)] == 11 29 | assert len(grid) == 4 30 | 31 | for item, reference in zip(grid, keys): 32 | assert item == reference 33 | 34 | 35 | def test_2d_grid_get_slice(grid): 36 | assert grid[("a", ...)] == {1: 10, 2: 11} 37 | assert grid[(..., 1)] == {"a": 10, "b": 12} 38 | 39 | 40 | def test_2d_grid_slice_error(grid): 41 | with pytest.raises(KeyError): 42 | grid[(..., ...)] 43 | 44 | 45 | def test_2d_grid_wrong_keylength(grid): 46 | with pytest.raises(KeyError): 47 | grid["abc"] 48 | with pytest.raises(KeyError): 49 | grid["a"] 50 | 51 | 52 | def test_2d_grid_missing_key(grid): 53 | with pytest.raises(KeyError): 54 | grid[("d", 1)] 55 | 56 | 57 | def test_2d_grid_read_only(grid): 58 | with pytest.raises(TypeError): 59 | grid[("d", 3)] = "foo" 60 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2023, Chris Mutel. 2 | 3 | Redistribution and use in source and binary forms, with or without 4 | modification, are permitted provided that the following conditions are met: 5 | 6 | Redistributions of source code must retain the above copyright notice, this 7 | list of conditions and the following disclaimer. 8 | 9 | Redistributions in binary form must reproduce the above copyright notice, 10 | this list of conditions and the following disclaimer in the documentation 11 | and/or other materials provided with the distribution. 12 | 13 | Neither the name of ETH Zurich nor the names of its contributors may be used 14 | to endorse or promote products derived from this software without specific 15 | prior written permission. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 21 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 24 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | -------------------------------------------------------------------------------- /src/bw2calc/caching_lca.py: -------------------------------------------------------------------------------- 1 | from scipy import sparse 2 | 3 | from bw2calc.lca import LCA 4 | from bw2calc.result_cache import ResultCache 5 | 6 | 7 | class CachingLCA(LCA): 8 | """Custom class which caches supply vectors. 9 | 10 | Cache resets upon iteration. If you do weird stuff outside of iteration you should probably 11 | use the regular LCA class.""" 12 | 13 | def __init__(self, *args, **kwargs): 14 | super().__init__(*args, **kwargs) 15 | self.cache = ResultCache() 16 | 17 | def __next__(self) -> None: 18 | self.cache.reset() 19 | super().__next__(self) 20 | 21 | def lci_calculation(self) -> None: 22 | """The actual LCI calculation. 23 | 24 | Separated from ``lci`` to be reusable in cases where the matrices are already built, e.g. 25 | ``redo_lci`` and Monte Carlo classes. 26 | 27 | """ 28 | if hasattr(self, "cache") and len(self.demand) == 1: 29 | key, value = list(self.demand.items())[0] 30 | try: 31 | self.supply_array = self.cache[key] * value 32 | except KeyError: 33 | self.supply_array = self.solve_linear_system() 34 | self.cache.add(key, self.supply_array.reshape((-1, 1)) / value) 35 | else: 36 | self.supply_array = self.solve_linear_system() 37 | # Turn 1-d array into diagonal matrix 38 | count = len(self.dicts.activity) 39 | self.inventory = self.biosphere_matrix @ sparse.spdiags( 40 | [self.supply_array], [0], count, count 41 | ) 42 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | from pathlib import Path 3 | 4 | import bw_processing as bwp 5 | import pytest 6 | from bw_processing.io_helpers import generic_directory_filesystem 7 | from fsspec.implementations.zip import ZipFileSystem as ZFS 8 | 9 | from bw2calc.utils import get_datapackage, get_seed 10 | 11 | fixture_dir = Path(__file__).resolve().parent / "fixtures" 12 | 13 | 14 | def test_get_seeds_different_under_mp_pool(): 15 | with multiprocessing.Pool(processes=4) as pool: 16 | results = list(pool.map(get_seed, [None] * 10)) 17 | assert sorted(set(results)) == sorted(results) 18 | 19 | 20 | def test_consistent_global_index(): 21 | # TODO 22 | pass 23 | 24 | 25 | def test_get_datapackage(): 26 | dp = bwp.load_datapackage(ZFS(fixture_dir / "basic_fixture.zip")) 27 | assert get_datapackage(dp) is dp 28 | 29 | assert get_datapackage(ZFS(fixture_dir / "basic_fixture.zip")).metadata == dp.metadata 30 | 31 | assert get_datapackage(fixture_dir / "basic_fixture.zip").metadata == dp.metadata 32 | 33 | assert get_datapackage(str(fixture_dir / "basic_fixture.zip")).metadata == dp.metadata 34 | 35 | dp = bwp.load_datapackage(generic_directory_filesystem(dirpath=fixture_dir / "basic_fixture")) 36 | assert get_datapackage(dp) is dp 37 | 38 | assert ( 39 | get_datapackage( 40 | generic_directory_filesystem(dirpath=fixture_dir / "basic_fixture") 41 | ).metadata 42 | == dp.metadata 43 | ) 44 | 45 | assert get_datapackage(str(fixture_dir / "basic_fixture")).metadata == dp.metadata 46 | 47 | with pytest.raises(TypeError): 48 | get_datapackage(1) 49 | -------------------------------------------------------------------------------- /src/bw2calc/least_squares.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import numpy as np 4 | from scipy.sparse.linalg import lsmr 5 | 6 | from bw2calc.errors import EfficiencyWarning, NoSolutionFound 7 | from bw2calc.lca import LCA 8 | 9 | 10 | class LeastSquaresLCA(LCA): 11 | """Solve overdetermined technosphere matrix with more products than activities using 12 | least-squares approximation. 13 | 14 | See also: 15 | 16 | * `Multioutput processes in LCA `_ 17 | * `LSMR in SciPy `_ # noqa: E501 18 | * `Another least-squares algorithm in SciPy `_ # noqa: E501 19 | 20 | """ 21 | 22 | def load_lci_data(self) -> None: 23 | super().load_lci_data(nonsquare_ok=True) 24 | 25 | def solve_linear_system(self, solver=lsmr) -> np.ndarray: 26 | if self.technosphere_matrix.shape[0] == self.technosphere_matrix.shape[1]: 27 | warnings.warn("Don't use LeastSquaresLCA for square matrices", EfficiencyWarning) 28 | self.solver_results = solver(self.technosphere_matrix, self.demand_array) 29 | if self.solver_results[1] not in {1, 2}: 30 | warnings.warn( 31 | "No suitable solution found - supply array is probably nonsense", 32 | NoSolutionFound, 33 | ) 34 | return self.solver_results[0] 35 | 36 | def decompose_technosphere(self) -> None: 37 | raise NotImplementedError("Can't decompose rectangular technosphere") 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | pyrightconfig.json 107 | -------------------------------------------------------------------------------- /src/bw2calc/errors.py: -------------------------------------------------------------------------------- 1 | class BW2CalcError(Exception): 2 | """Base class for bw2calc errors""" 3 | 4 | pass 5 | 6 | 7 | class OutsideTechnosphere(BW2CalcError): 8 | """The given demand array activity is not in the technosphere matrix""" 9 | 10 | pass 11 | 12 | 13 | class EfficiencyWarning(RuntimeWarning): 14 | """Least squares is much less efficient than direct computation for square, full-rank 15 | matrices""" 16 | 17 | pass 18 | 19 | 20 | class NoSolutionFound(UserWarning): 21 | """No solution to set of linear equations found within given constraints""" 22 | 23 | pass 24 | 25 | 26 | class NonsquareTechnosphere(BW2CalcError): 27 | """The given data do not form a square technosphere matrix""" 28 | 29 | pass 30 | 31 | 32 | class MalformedFunctionalUnit(BW2CalcError): 33 | """The given functional unit cannot be understood""" 34 | 35 | pass 36 | 37 | 38 | class EmptyBiosphere(BW2CalcError): 39 | """Can't do impact assessment with no biosphere flows""" 40 | 41 | pass 42 | 43 | 44 | class AllArraysEmpty(BW2CalcError): 45 | """Can't load the numpy arrays if all of them are empty""" 46 | 47 | pass 48 | 49 | 50 | class NoArrays(BW2CalcError): 51 | """No arrays for given matrix""" 52 | 53 | pass 54 | 55 | 56 | class InconsistentGlobalIndex(BW2CalcError): 57 | """LCIA matrices are diagonal, and use the ``col`` field for regionalization. If multiple LCIA 58 | datapackages are present, they must use the same value for ``GLO``, the global location, in 59 | order for filtering for site-generic LCIA to work correctly.""" 60 | 61 | pass 62 | 63 | 64 | class MultipleValues(BW2CalcError): 65 | """Multiple values are present, but only one value is expected""" 66 | 67 | pass 68 | 69 | 70 | class InconsistentLCIA(BW2CalcError): 71 | """Provided weighting or normalization doesn't fit the impact category""" 72 | 73 | pass 74 | -------------------------------------------------------------------------------- /tests/fixtures/seq/datapackage.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "seq", 3 | "id": "seq", 4 | "profile": "data-package", 5 | "seed": "sequential", 6 | "resources": [ 7 | { 8 | "type": "technosphere", 9 | "samples": { 10 | "filepath": "seq.0.samples.npy", 11 | "md5": "1477771e61289fe84cc71525dfc62e66", 12 | "shape": [ 13 | 2, 14 | 3 15 | ], 16 | "dtype": "int64", 17 | "format": "npy", 18 | "mediatype": "application/octet-stream" 19 | }, 20 | "index": 0, 21 | "indices": { 22 | "filepath": "seq.0.indices.npy", 23 | "md5": "db4c6fed8ac37c0afee83d12c27bba16", 24 | "format": "npy", 25 | "mediatype": "application/octet-stream" 26 | }, 27 | "profile": "data-resource", 28 | "row from label": "input", 29 | "row to label": "row", 30 | "row dict": "_product_dict", 31 | "col from label": "output", 32 | "col to label": "col", 33 | "col dict": "_activity_dict", 34 | "matrix": "technosphere_matrix" 35 | }, 36 | { 37 | "type": "biosphere", 38 | "samples": { 39 | "filepath": "seq.1.samples.npy", 40 | "md5": "15a8f279f9aa8a76bda28c640616813b", 41 | "shape": [ 42 | 3, 43 | 3 44 | ], 45 | "dtype": "int64", 46 | "format": "npy", 47 | "mediatype": "application/octet-stream" 48 | }, 49 | "index": 1, 50 | "indices": { 51 | "filepath": "seq.1.indices.npy", 52 | "md5": "295c574b7ce73d59eb45e20f7df1d511", 53 | "format": "npy", 54 | "mediatype": "application/octet-stream" 55 | }, 56 | "profile": "data-resource", 57 | "row from label": "input", 58 | "row to label": "row", 59 | "row dict": "_biosphere_dict", 60 | "col from label": "output", 61 | "col to label": "col", 62 | "col dict": "_activity_dict", 63 | "matrix": "biosphere_matrix" 64 | } 65 | ], 66 | "ncols": 3 67 | } -------------------------------------------------------------------------------- /tests/fixtures/multi/datapackage.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "multi", 3 | "id": "multi", 4 | "profile": "data-package", 5 | "seed": 42, 6 | "resources": [ 7 | { 8 | "type": "technosphere", 9 | "samples": { 10 | "filepath": "multi.0.samples.npy", 11 | "md5": "1477771e61289fe84cc71525dfc62e66", 12 | "shape": [ 13 | 2, 14 | 3 15 | ], 16 | "dtype": "int64", 17 | "format": "npy", 18 | "mediatype": "application/octet-stream" 19 | }, 20 | "index": 0, 21 | "indices": { 22 | "filepath": "multi.0.indices.npy", 23 | "md5": "db4c6fed8ac37c0afee83d12c27bba16", 24 | "format": "npy", 25 | "mediatype": "application/octet-stream" 26 | }, 27 | "profile": "data-resource", 28 | "row from label": "input", 29 | "row to label": "row", 30 | "row dict": "_product_dict", 31 | "col from label": "output", 32 | "col to label": "col", 33 | "col dict": "_activity_dict", 34 | "matrix": "technosphere_matrix" 35 | }, 36 | { 37 | "type": "biosphere", 38 | "samples": { 39 | "filepath": "multi.1.samples.npy", 40 | "md5": "15a8f279f9aa8a76bda28c640616813b", 41 | "shape": [ 42 | 3, 43 | 3 44 | ], 45 | "dtype": "int64", 46 | "format": "npy", 47 | "mediatype": "application/octet-stream" 48 | }, 49 | "index": 1, 50 | "indices": { 51 | "filepath": "multi.1.indices.npy", 52 | "md5": "295c574b7ce73d59eb45e20f7df1d511", 53 | "format": "npy", 54 | "mediatype": "application/octet-stream" 55 | }, 56 | "profile": "data-resource", 57 | "row from label": "input", 58 | "row to label": "row", 59 | "row dict": "_biosphere_dict", 60 | "col from label": "output", 61 | "col to label": "col", 62 | "col dict": "_activity_dict", 63 | "matrix": "biosphere_matrix" 64 | } 65 | ], 66 | "ncols": 3 67 | } -------------------------------------------------------------------------------- /tests/fixtures/unseeded/datapackage.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "unseeded", 3 | "id": "unseeded", 4 | "profile": "data-package", 5 | "seed": null, 6 | "resources": [ 7 | { 8 | "type": "technosphere", 9 | "samples": { 10 | "filepath": "unseeded.0.samples.npy", 11 | "md5": "1477771e61289fe84cc71525dfc62e66", 12 | "shape": [ 13 | 2, 14 | 3 15 | ], 16 | "dtype": "int64", 17 | "format": "npy", 18 | "mediatype": "application/octet-stream" 19 | }, 20 | "index": 0, 21 | "indices": { 22 | "filepath": "unseeded.0.indices.npy", 23 | "md5": "db4c6fed8ac37c0afee83d12c27bba16", 24 | "format": "npy", 25 | "mediatype": "application/octet-stream" 26 | }, 27 | "profile": "data-resource", 28 | "row from label": "input", 29 | "row to label": "row", 30 | "row dict": "_product_dict", 31 | "col from label": "output", 32 | "col to label": "col", 33 | "col dict": "_activity_dict", 34 | "matrix": "technosphere_matrix" 35 | }, 36 | { 37 | "type": "biosphere", 38 | "samples": { 39 | "filepath": "unseeded.1.samples.npy", 40 | "md5": "15a8f279f9aa8a76bda28c640616813b", 41 | "shape": [ 42 | 3, 43 | 3 44 | ], 45 | "dtype": "int64", 46 | "format": "npy", 47 | "mediatype": "application/octet-stream" 48 | }, 49 | "index": 1, 50 | "indices": { 51 | "filepath": "unseeded.1.indices.npy", 52 | "md5": "295c574b7ce73d59eb45e20f7df1d511", 53 | "format": "npy", 54 | "mediatype": "application/octet-stream" 55 | }, 56 | "profile": "data-resource", 57 | "row from label": "input", 58 | "row to label": "row", 59 | "row dict": "_biosphere_dict", 60 | "col from label": "output", 61 | "col to label": "col", 62 | "col dict": "_activity_dict", 63 | "matrix": "biosphere_matrix" 64 | } 65 | ], 66 | "ncols": 3 67 | } -------------------------------------------------------------------------------- /src/bw2calc/restricted_sparse_matrix_dict.py: -------------------------------------------------------------------------------- 1 | from typing import Any 2 | 3 | from matrix_utils import SparseMatrixDict 4 | from pydantic import BaseModel 5 | 6 | 7 | class RestrictionsValidator(BaseModel): 8 | restrictions: dict[tuple[str, ...], list[tuple[str, ...]]] 9 | 10 | 11 | class RestrictedSparseMatrixDict(SparseMatrixDict): 12 | def __init__(self, restrictions: dict, *args, **kwargs): 13 | """Like SparseMatrixDict, but follows `restrictions` on what can be multiplied. 14 | 15 | Only for use with normalization and weighting.""" 16 | super().__init__(*args, **kwargs) 17 | RestrictionsValidator(restrictions=restrictions) 18 | self._restrictions = restrictions 19 | 20 | def _get_first_element(self, elem: Any) -> tuple: 21 | """Get the first LCIA key from `elem`. 22 | 23 | The keys can have the form `(("some", "lcia"), "functional-unit-id")` or 24 | `("some", "lcia").""" 25 | if isinstance(elem[0], tuple): 26 | return elem[0] 27 | else: 28 | assert isinstance(elem, tuple), f"Wrong type: {type(elem)} should be tuple" 29 | return elem 30 | 31 | def _concatenate(self, a: tuple, b: tuple) -> tuple: 32 | """Combine `a` and `b` while unwrapping `b`, if necessary.""" 33 | if isinstance(b[0], tuple): 34 | return (a, *b) 35 | else: 36 | return (a, b) 37 | 38 | def __matmul__(self, other: Any) -> SparseMatrixDict: 39 | """Define logic for `@` matrix multiplication operator. 40 | 41 | Note that the sparse matrix dict must come first, i.e. `self @ other`. 42 | """ 43 | if isinstance(other, (SparseMatrixDict, RestrictedSparseMatrixDict)): 44 | return SparseMatrixDict( 45 | { 46 | self._concatenate(a, b): c @ d 47 | for a, c in self.items() 48 | for b, d in other.items() 49 | if self._get_first_element(b) in self._restrictions[a] 50 | } 51 | ) 52 | else: 53 | return super().__matmul__(other) 54 | -------------------------------------------------------------------------------- /.github/workflows/python-test-superlu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This workflow will install Python dependencies and run tests 3 | # across operating systems, select versions of Python, and user + dev environments 4 | # For more info see: 5 | # https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 6 | 7 | name: Python 🐍 CI/CD tests 8 | 9 | on: 10 | push: 11 | branches: [main, develop] 12 | paths-ignore: # prevents workflow execution when only these types of files are modified 13 | - "**.md" # wildcards prevent file in any repo dir from trigering workflow 14 | - "**.bib" 15 | - "**.ya?ml" # captures both .yml and .yaml 16 | - "LICENSE" 17 | - ".gitignore" 18 | pull_request: 19 | branches: [main, develop] 20 | types: [opened, reopened] # excludes syncronize to avoid redundant trigger from commits on PRs 21 | paths-ignore: 22 | - "**.md" 23 | - "**.bib" 24 | - "**.ya?ml" 25 | - "LICENSE" 26 | - ".gitignore" 27 | workflow_dispatch: # also allow manual trigger, for testing purposes 28 | 29 | jobs: 30 | build: 31 | runs-on: ${{ matrix.os }} 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | os: [ubuntu-latest, windows-latest, macos-latest] 36 | py-version: ["3.14"] 37 | 38 | steps: 39 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 40 | 41 | # general Python setup 42 | - name: Set up Python ${{ matrix.py-version }} 43 | uses: actions/setup-python@v6 44 | with: 45 | python-version: ${{ matrix.py-version }} 46 | 47 | - name: Update pip & install testing pkgs 48 | run: | 49 | python -VV 50 | python -m pip install --upgrade pip setuptools wheel 51 | 52 | # install testing 53 | - name: Install package and test deps 54 | run: | 55 | pip install .[testing] # install the package and the testing deps 56 | 57 | - name: Test with pytest 58 | run: | 59 | pytest 60 | 61 | - name: Upload coverage reports to Codecov 62 | uses: codecov/codecov-action@v5 63 | -------------------------------------------------------------------------------- /.github/workflows/python-test-pardiso.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This workflow will install Python dependencies and run tests 3 | # across operating systems, select versions of Python, and user + dev environments 4 | # For more info see: 5 | # https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 6 | 7 | name: Python 🐍 CI/CD tests 8 | 9 | on: 10 | push: 11 | branches: [main, develop] 12 | paths-ignore: # prevents workflow execution when only these types of files are modified 13 | - "**.md" # wildcards prevent file in any repo dir from trigering workflow 14 | - "**.bib" 15 | - "**.ya?ml" # captures both .yml and .yaml 16 | - "LICENSE" 17 | - ".gitignore" 18 | pull_request: 19 | branches: [main, develop] 20 | types: [opened, reopened] # excludes syncronize to avoid redundant trigger from commits on PRs 21 | paths-ignore: 22 | - "**.md" 23 | - "**.bib" 24 | - "**.ya?ml" 25 | - "LICENSE" 26 | - ".gitignore" 27 | workflow_dispatch: # also allow manual trigger, for testing purposes 28 | 29 | jobs: 30 | build: 31 | runs-on: ${{ matrix.os }} 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | os: [ubuntu-latest, windows-latest] 36 | py-version: ["3.11", "3.14"] 37 | 38 | steps: 39 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 40 | 41 | # general Python setup 42 | - name: Set up Python ${{ matrix.py-version }} 43 | uses: actions/setup-python@v6 44 | with: 45 | python-version: ${{ matrix.py-version }} 46 | 47 | - name: Update pip & install testing pkgs 48 | run: | 49 | python -VV 50 | python -m pip install --upgrade pip setuptools wheel 51 | 52 | # install testing 53 | - name: Install package and test deps 54 | run: | 55 | pip install .[testing] # install the package and the testing deps 56 | pip install pypardiso 57 | 58 | - name: Test with pytest 59 | run: | 60 | pytest 61 | 62 | - name: Upload coverage reports to Codecov 63 | uses: codecov/codecov-action@v5 64 | -------------------------------------------------------------------------------- /src/bw2calc/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | __all__ = [ 3 | "CachingLCA", 4 | "DenseLCA", 5 | "LCA", 6 | "LeastSquaresLCA", 7 | "IterativeLCA", 8 | "MethodConfig", 9 | "MultiLCA", 10 | "FastScoresOnlyMultiLCA", 11 | ] 12 | 13 | __version__ = "2.3" 14 | 15 | 16 | import platform 17 | import warnings 18 | 19 | from packaging.version import Version 20 | 21 | ARM = {"arm", "arm64", "aarch64_be", "aarch64", "armv8b", "armv8l"} 22 | AMD_INTEL = {"ia64", "i386", "i686", "x86_64"} 23 | UMFPACK_WARNING = """ 24 | It seems like you have an ARM architecture, but haven't installed scikit-umfpack: 25 | 26 | https://pypi.org/project/scikit-umfpack/ 27 | 28 | Installing it could give you much faster calculations. 29 | """ 30 | PYPARDISO_WARNING = """ 31 | It seems like you have an AMD/INTEL x64 architecture, but haven't installed pypardiso: 32 | 33 | https://pypi.org/project/pypardiso/ 34 | 35 | Installing it could give you much faster calculations. 36 | """ 37 | 38 | PYPARDISO, UMFPACK = False, False 39 | 40 | try: 41 | from pypardiso import factorized, spsolve 42 | 43 | PYPARDISO = True 44 | except ImportError: 45 | pltf = platform.machine().lower() 46 | 47 | try: 48 | import scikits.umfpack 49 | 50 | UMFPACK = True 51 | except ModuleNotFoundError: 52 | if pltf in ARM: 53 | warnings.warn(UMFPACK_WARNING) 54 | elif pltf in AMD_INTEL: 55 | warnings.warn(PYPARDISO_WARNING) 56 | else: 57 | warnings.warn("No fast sparse solver found") 58 | except ImportError as e: 59 | warnings.warn(f"scikit-umfpack found but couldn't be imported. Error: {e}") 60 | 61 | from scipy.sparse.linalg import factorized, spsolve 62 | try: 63 | from presamples import PackagesDataLoader 64 | except ImportError: 65 | PackagesDataLoader = None 66 | 67 | 68 | from bw2calc.caching_lca import CachingLCA 69 | from bw2calc.dense_lca import DenseLCA 70 | from bw2calc.fast_scores import FastScoresOnlyMultiLCA 71 | from bw2calc.iterative_lca import IterativeLCA 72 | from bw2calc.lca import LCA 73 | from bw2calc.least_squares import LeastSquaresLCA 74 | from bw2calc.method_config import MethodConfig 75 | from bw2calc.multi_lca import MultiLCA 76 | -------------------------------------------------------------------------------- /.github/workflows/python-test-umfpack-macos.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This workflow will install Python dependencies and run tests 3 | # across operating systems, select versions of Python, and user + dev environments 4 | # For more info see: 5 | # https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 6 | 7 | name: Python 🐍 CI/CD tests 8 | 9 | on: 10 | push: 11 | branches: [main, develop] 12 | paths-ignore: # prevents workflow execution when only these types of files are modified 13 | - "**.md" # wildcards prevent file in any repo dir from trigering workflow 14 | - "**.bib" 15 | - "**.ya?ml" # captures both .yml and .yaml 16 | - "LICENSE" 17 | - ".gitignore" 18 | pull_request: 19 | branches: [main, develop] 20 | types: [opened, reopened] # excludes syncronize to avoid redundant trigger from commits on PRs 21 | paths-ignore: 22 | - "**.md" 23 | - "**.bib" 24 | - "**.ya?ml" 25 | - "LICENSE" 26 | - ".gitignore" 27 | workflow_dispatch: # also allow manual trigger, for testing purposes 28 | 29 | jobs: 30 | build: 31 | runs-on: ${{ matrix.os }} 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | os: [macos-latest] 36 | py-version: ["3.11", "3.14"] 37 | 38 | steps: 39 | - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6 40 | 41 | - name: Set up Python ${{ matrix.py-version }} 42 | uses: actions/setup-python@v6 43 | with: 44 | python-version: ${{ matrix.py-version }} 45 | 46 | - name: Install suitesparse dependencies 47 | run: | 48 | brew install swig suite-sparse 49 | 50 | - name: Update pip & install testing pkgs 51 | run: | 52 | python -VV 53 | python -m pip install --upgrade pip setuptools wheel 54 | 55 | # install testing 56 | - name: Install package and test deps 57 | run: | 58 | pip install .[testing] # install the package and the testing deps 59 | 60 | - name: Install scikit-umfpack 61 | run: | 62 | echo "SuiteSparse version:" 63 | ls /opt/homebrew/Cellar/suite-sparse/ 64 | pip install scikit-umfpack 65 | 66 | - name: Test with pytest 67 | run: | 68 | pytest 69 | 70 | - name: Upload coverage reports to Codecov 71 | uses: codecov/codecov-action@v5 72 | -------------------------------------------------------------------------------- /src/bw2calc/result_cache.py: -------------------------------------------------------------------------------- 1 | import math 2 | from collections.abc import Mapping 3 | from typing import List 4 | 5 | import numpy as np 6 | 7 | 8 | class ResultCache(Mapping): 9 | def __init__(self, block_size: int = 100): 10 | """This class allows supply vector results to be cached.""" 11 | self.next_index = 0 12 | self.block_size = block_size 13 | self.indices = dict() 14 | 15 | def __getitem__(self, key: int) -> np.ndarray: 16 | if not hasattr(self, "array"): 17 | raise KeyError 18 | return self.array[:, self.indices[key]] 19 | 20 | def __len__(self) -> int: 21 | return len(self.indices) 22 | 23 | def __iter__(self): 24 | return iter(self.indices) 25 | 26 | def __contains__(self, key: int) -> bool: 27 | return key in self.indices 28 | 29 | def add(self, indices: List[int], array: np.ndarray) -> None: 30 | if not hasattr(self, "array"): 31 | self.array = np.empty((array.shape[0], self.block_size), dtype=np.float32) 32 | 33 | if array.shape[0] != self.array.shape[0]: 34 | raise ValueError( 35 | f"Wrong number of rows in array ({array.shape[0]} should be {self.array.shape[0]})" 36 | ) 37 | if len(array.shape) != 2: 38 | raise ValueError( 39 | f"`array` must be a numpy array with two dimensions (got {len(array.shape)})" 40 | ) 41 | if len(indices) != array.shape[1]: 42 | raise ValueError( 43 | f"`indices` has different length than `array` ({len(indices)} vs. {array.shape[1]})" 44 | ) 45 | 46 | if (total_columns := self.next_index + array.shape[1]) > self.array.shape[1]: 47 | extra_blocks = math.ceil((total_columns - self.array.shape[1]) / self.block_size) 48 | self.array = np.hstack( 49 | (self.array, np.empty((self.array.shape[0], self.block_size * extra_blocks))) 50 | ) 51 | 52 | # Would be faster with numpy bool arrays 53 | for enum_index, data_obj_index in enumerate(indices): 54 | if data_obj_index not in self.indices: 55 | self.indices[data_obj_index] = self.next_index 56 | self.array[:, self.next_index] = array[:, enum_index] 57 | self.next_index += 1 58 | 59 | def reset(self) -> None: 60 | self.indices = dict() 61 | self.next_index = 0 62 | delattr(self, "array") 63 | -------------------------------------------------------------------------------- /tests/fixtures/single-matrix/generate_fixture.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import itertools 3 | import json 4 | import os 5 | import random 6 | import tarfile 7 | import tempfile 8 | from pathlib import Path 9 | 10 | import numpy as np 11 | 12 | 13 | def generate_fixture(): 14 | dtype = [ 15 | ("input", np.uint32), 16 | ("output", np.uint32), 17 | ("row", np.uint32), 18 | ("col", np.uint32), 19 | ("type", np.uint8), 20 | ("uncertainty_type", np.uint8), 21 | ("amount", np.float32), 22 | ("loc", np.float32), 23 | ("scale", np.float32), 24 | ("shape", np.float32), 25 | ("minimum", np.float32), 26 | ("maximum", np.float32), 27 | ("negative", bool), 28 | ] 29 | 30 | # Exchange types 31 | # "generic production": 11, 32 | # "generic consumption": 12, 33 | 34 | MAX_INT_32 = 4294967295 35 | LETTERS = "abcdefgh" 36 | NUMBERS = "1234" 37 | GREEK = "αβγδεζηθ" 38 | mapping = {k: i for i, k in enumerate(LETTERS + NUMBERS + GREEK)} 39 | 40 | a = [(a, b, random.random(), 12) for a, b in itertools.combinations(LETTERS, 2)] 41 | b = [(x, x, 1, 11) for x in LETTERS] 42 | c = [(random.choice(NUMBERS), random.choice(LETTERS), random.random(), 11) for _ in range(10)] 43 | d = [(x, x, 1, 11) for x in NUMBERS] 44 | e = [(x, y, random.random(), 11) for x, y in zip(GREEK, NUMBERS)] 45 | f = [(x, x, 1, 11) for x in GREEK] 46 | 47 | data = a + b + c + d + e + f 48 | array = np.zeros(len(data), dtype=dtype) 49 | 50 | for i, (a, b, c, d) in enumerate(data): 51 | array[i] = ( 52 | mapping[a], 53 | mapping[b], 54 | MAX_INT_32, 55 | MAX_INT_32, 56 | d, 57 | 0, 58 | c, 59 | np.NaN, 60 | np.NaN, 61 | np.NaN, 62 | np.NaN, 63 | np.NaN, 64 | False, 65 | ) 66 | 67 | with tempfile.TemporaryDirectory() as t: 68 | with tarfile.open(Path(t) / "sm-fixture.tar.bz2", "w:bz2") as f: 69 | path = os.path.join(t, "array.npy") 70 | np.save(path, array, allow_pickle=False) 71 | f.add(path, "array.npy") 72 | 73 | path = os.path.join(t, "row.mapping") 74 | with open(path, "w", encoding="utf-8") as j: 75 | json.dump(mapping, j, ensure_ascii=False) 76 | f.add(path, "row.mapping") 77 | f.add(path, "col.mapping") 78 | 79 | path = os.path.join(t, "categories.mapping") 80 | with open(path, "w", encoding="utf-8") as j: 81 | json.dump({"foo": {g: mapping[g] for g in GREEK[:5]}}, j, ensure_ascii=False) 82 | f.add(path, "categories.mapping") 83 | 84 | 85 | generate_fixture() 86 | -------------------------------------------------------------------------------- /tests/fixtures/single-sample/datapackage.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "single-sample", 3 | "id": "single-sample", 4 | "profile": "data-package", 5 | "seed": 54321, 6 | "resources": [ 7 | { 8 | "type": "technosphere", 9 | "samples": { 10 | "filepath": "single-sample.0.samples.npy", 11 | "md5": "ed9536e878bb6fdd1f7a92c6f706dc8f", 12 | "shape": [ 13 | 2, 14 | 1 15 | ], 16 | "dtype": "int64", 17 | "format": "npy", 18 | "mediatype": "application/octet-stream" 19 | }, 20 | "index": 0, 21 | "indices": { 22 | "filepath": "single-sample.0.indices.npy", 23 | "md5": "db4c6fed8ac37c0afee83d12c27bba16", 24 | "format": "npy", 25 | "mediatype": "application/octet-stream" 26 | }, 27 | "profile": "data-resource", 28 | "row from label": "input", 29 | "row to label": "row", 30 | "row dict": "_product_dict", 31 | "col from label": "output", 32 | "col to label": "col", 33 | "col dict": "_activity_dict", 34 | "matrix": "technosphere_matrix" 35 | }, 36 | { 37 | "type": "biosphere", 38 | "samples": { 39 | "filepath": "single-sample.1.samples.npy", 40 | "md5": "411dfca5e3a545469637cd22179c20e5", 41 | "shape": [ 42 | 3, 43 | 1 44 | ], 45 | "dtype": "int64", 46 | "format": "npy", 47 | "mediatype": "application/octet-stream" 48 | }, 49 | "index": 1, 50 | "indices": { 51 | "filepath": "single-sample.1.indices.npy", 52 | "md5": "295c574b7ce73d59eb45e20f7df1d511", 53 | "format": "npy", 54 | "mediatype": "application/octet-stream" 55 | }, 56 | "profile": "data-resource", 57 | "row from label": "input", 58 | "row to label": "row", 59 | "row dict": "_biosphere_dict", 60 | "col from label": "output", 61 | "col to label": "col", 62 | "col dict": "_activity_dict", 63 | "matrix": "biosphere_matrix" 64 | }, 65 | { 66 | "type": "cf", 67 | "samples": { 68 | "filepath": "single-sample.2.samples.npy", 69 | "md5": "d55c3a518f0edd8587b390f1e15d8d95", 70 | "shape": [ 71 | 1, 72 | 1 73 | ], 74 | "dtype": "int64", 75 | "format": "npy", 76 | "mediatype": "application/octet-stream" 77 | }, 78 | "index": 2, 79 | "indices": { 80 | "filepath": "single-sample.2.indices.npy", 81 | "md5": "5a3fb91a28e0c1607c4d8e329862d7ee", 82 | "format": "npy", 83 | "mediatype": "application/octet-stream" 84 | }, 85 | "profile": "data-resource", 86 | "row from label": "flow", 87 | "row to label": "row", 88 | "row dict": "_biosphere_dict", 89 | "matrix": "characterization_matrix" 90 | } 91 | ], 92 | "ncols": 1 93 | } -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=68.0"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "bw2calc" 7 | license = "BSD-3-Clause" 8 | authors = [ 9 | { name=" Chris Mutel", email="cmutel@gmail.com" } 10 | ] 11 | maintainers = [ 12 | { name=" Chris Mutel", email="cmutel@gmail.com" } 13 | ] 14 | description = "The calculation engine for the Brightway life cycle assessment framework." 15 | readme = "README.md" 16 | dynamic = ["version"] 17 | classifiers = [ 18 | "Intended Audience :: Science/Research", 19 | "Natural Language :: English", 20 | "Programming Language :: Python :: 3", 21 | "Operating System :: OS Independent", 22 | "Topic :: Scientific/Engineering", 23 | ] 24 | requires-python = ">=3.9" 25 | dependencies = [ 26 | "bw_processing >=1.0", 27 | "fsspec", 28 | "matrix_utils >=0.6", 29 | "numpy <3", 30 | "pandas", 31 | "pydantic", 32 | "scipy", 33 | "stats_arrays", 34 | "xarray", 35 | ] 36 | 37 | [project.urls] 38 | source = "https://github.com/brightway-lca/brightway2-calc" 39 | homepage = "https://github.com/brightway-lca/brightway2-calc" 40 | tracker = "https://github.com/brightway-lca/brightway2-calc/issues" 41 | 42 | [project.optional-dependencies] 43 | testing = [ 44 | "bw2calc", 45 | "bw2data>=4.5", 46 | "pytest", 47 | "pytest-cov", 48 | "pytest-randomly", 49 | "python-coveralls" 50 | ] 51 | dev = [ 52 | "build", 53 | "pre-commit", 54 | "pylint", 55 | "pytest", 56 | "pytest-cov", 57 | "setuptools", 58 | "Flake8-pyproject", 59 | ] 60 | 61 | [tool.setuptools] 62 | include-package-data = true 63 | package-dir = {"" = "src"} 64 | packages = ["bw2calc"] 65 | 66 | [tool.setuptools.dynamic] 67 | version = {attr = "bw2calc.__version__"} 68 | 69 | [tool.pytest.ini_options] 70 | addopts = "--cov bw2calc --cov-report term-missing --verbose" 71 | pythonpath = ["src"] 72 | norecursedirs = [ 73 | "dist", 74 | "build", 75 | ".tox" 76 | ] 77 | testpaths = ["tests"] 78 | 79 | [tool.flake8] 80 | # Some sane defaults for the code style checker flake8 81 | max-line-length = 100 82 | extend_ignore = ["E203", "W503"] 83 | # ^ Black-compatible 84 | # E203 and W503 have edge cases handled by black 85 | per-file-ignores = [ 86 | 'src/bw2calc/__init__.py:F401', 87 | 'src/bw2calc/__init__.py:E402', 88 | ] 89 | exclude = [ 90 | ".tox", 91 | "build", 92 | "dist", 93 | ".eggs", 94 | "docs/conf.py", 95 | ] 96 | 97 | [tool.black] 98 | line-length = 100 99 | 100 | [tool.isort] 101 | line_length = 100 102 | multi_line_output = 3 103 | include_trailing_comma = true 104 | force_grid_wrap = 0 105 | use_parentheses = true 106 | ensure_newline_before_comments = true 107 | skip = ["src/bw2calc/__init__.py"] 108 | -------------------------------------------------------------------------------- /src/bw2calc/fast_supply_arrays.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from bw2calc import PYPARDISO, UMFPACK, factorized 4 | 5 | if PYPARDISO: 6 | from pypardiso.pardiso_wrapper import PyPardisoSolver 7 | else: 8 | PyPardisoSolver = None 9 | 10 | 11 | class FastSupplyArraysMixin: 12 | """Mixin class for `LCA` which allows for fast calculations of multiple inventories. 13 | 14 | Requires either `scikits-umfpack` or `pypardiso`. Must be mixed into an `LCA` class.""" 15 | 16 | chunk_size = 50 17 | 18 | def set_chunk_size(self, chunk_size: int) -> None: 19 | if chunk_size <= 0: 20 | raise ValueError(f"Invalid chunk_size: {chunk_size}") 21 | 22 | self.chunk_size = chunk_size 23 | 24 | def calculate_supply_arrays(self, demand_arrays: list[np.ndarray]) -> np.ndarray: 25 | """Calculate multiple supply arrays in a single calculation. 26 | 27 | Much faster than individual calculations, especially when using PARDISO. 28 | 29 | Returns a numpy array with dimensions `[process scaling amounts, demands]`. `demands` are 30 | given in the same order as `demand_arrays`.""" 31 | if PYPARDISO: 32 | return self._calculate_pardiso(demand_arrays) 33 | elif UMFPACK: 34 | return self._calculate_umfpack(demand_arrays) 35 | else: 36 | raise ValueError( 37 | "`FastSupplyArraysMixin` only supported with PARDISO and UMFPACK solvers" 38 | ) 39 | 40 | def _calculate_umfpack(self, demands: list[np.ndarray]) -> np.ndarray: 41 | # There is no speed up here, but it's convenient to have the same API 42 | if not demands: 43 | # Return empty array with correct shape when no demands provided 44 | return np.zeros((self.technosphere_matrix.shape[0], 0)) 45 | 46 | solver = factorized(self.technosphere_matrix.tocsc()) 47 | supply_array = np.zeros((self.technosphere_matrix.shape[0], len(demands))) 48 | 49 | for index, arr in enumerate(demands): 50 | supply_array[:, index] = solver(arr) 51 | 52 | return supply_array 53 | 54 | def _calculate_pardiso(self, demands: list[np.ndarray]) -> np.ndarray: 55 | if not demands: 56 | # Return empty array with correct shape when no demands provided 57 | return np.zeros((self.technosphere_matrix.shape[0], 0)) 58 | 59 | demand_array = np.vstack([arr for arr in demands]).T 60 | supply_arrays = [] 61 | 62 | solver = PyPardisoSolver() 63 | solver.factorize(self.technosphere_matrix) 64 | 65 | num_chunks = demand_array.shape[1] // self.chunk_size + 1 66 | for demand_chunk in np.array_split(demand_array, num_chunks, axis=1): 67 | b = solver._check_b(self.technosphere_matrix, demand_chunk) 68 | solver.set_phase(33) 69 | supply_arrays.append(solver._call_pardiso(self.technosphere_matrix, b)) 70 | 71 | return np.hstack(supply_arrays) 72 | -------------------------------------------------------------------------------- /src/bw2calc/utils.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from pathlib import Path 3 | 4 | import bw_processing as bwp 5 | import numpy as np 6 | from bw_processing.io_helpers import generic_directory_filesystem 7 | from fsspec import AbstractFileSystem 8 | from fsspec.implementations.zip import ZipFileSystem 9 | 10 | from bw2calc.errors import InconsistentGlobalIndex 11 | 12 | 13 | def get_seed(seed=None): 14 | """Get valid Numpy random seed value""" 15 | # https://groups.google.com/forum/#!topic/briansupport/9ErDidIBBFM 16 | random = np.random.RandomState(seed) 17 | return random.randint(0, 2147483647) 18 | 19 | 20 | def consistent_global_index(packages, matrix="characterization_matrix"): 21 | global_list = [ 22 | resource.get("global_index") 23 | for package in packages 24 | for resource in package.filter_by_attribute("matrix", matrix) 25 | .filter_by_attribute("kind", "indices") 26 | .resources 27 | ] 28 | if len(set(global_list)) > 1: 29 | raise InconsistentGlobalIndex( 30 | f"Multiple global index values found: {global_list}. If multiple LCIA datapackages" 31 | + " are present, they must use the same value for ``GLO``, the global location, in " 32 | + " order for filtering for site-generic LCIA to work correctly." 33 | ) 34 | return global_list[0] if global_list else None 35 | 36 | 37 | def wrap_functional_unit(dct): 38 | """Transform functional units for effective logging. 39 | Turns ``Activity`` objects into their keys.""" 40 | data = [] 41 | for key, amount in dct.items(): 42 | if isinstance(key, int): 43 | data.append({"id": key, "amount": amount}) 44 | else: 45 | try: 46 | data.append({"database": key[0], "code": key[1], "amount": amount}) 47 | except TypeError: 48 | data.append({"key": key, "amount": amount}) 49 | return data 50 | 51 | 52 | def get_datapackage(obj): 53 | if isinstance(obj, bwp.DatapackageBase): 54 | return obj 55 | elif isinstance(obj, AbstractFileSystem): 56 | return bwp.load_datapackage(obj) 57 | elif isinstance(obj, Path) and obj.suffix.lower() == ".zip": 58 | return bwp.load_datapackage(ZipFileSystem(obj)) 59 | elif isinstance(obj, Path) and obj.is_dir(): 60 | return bwp.load_datapackage(generic_directory_filesystem(dirpath=obj)) 61 | elif isinstance(obj, str) and obj.lower().endswith(".zip") and Path(obj).is_file(): 62 | return bwp.load_datapackage(ZipFileSystem(Path(obj))) 63 | elif isinstance(obj, str) and Path(obj).is_dir(): 64 | return bwp.load_datapackage(generic_directory_filesystem(dirpath=Path(obj))) 65 | 66 | else: 67 | raise TypeError("Unknown input type for loading datapackage: {}: {}".format(type(obj), obj)) 68 | 69 | 70 | def utc_now() -> datetime.datetime: 71 | """Get current datetime compatible with Py 3.8 to 3.12""" 72 | if hasattr(datetime, "UTC"): 73 | return datetime.datetime.now(datetime.UTC) 74 | else: 75 | return datetime.datetime.utcnow() 76 | -------------------------------------------------------------------------------- /tests/test_result_cache.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from bw2calc.result_cache import ResultCache 5 | 6 | 7 | def test_first_use(): 8 | rc = ResultCache() 9 | assert not hasattr(rc, "array") 10 | rc.add([5], np.arange(5).reshape((-1, 1))) 11 | 12 | assert rc.array.shape == (5, 100) 13 | assert np.allclose(rc.array[:, 0], np.arange(5)) 14 | assert rc.indices[5] == 0 15 | 16 | 17 | def test_missing(): 18 | rc = ResultCache() 19 | rc.add([5], np.arange(5).reshape((-1, 1))) 20 | 21 | with pytest.raises(KeyError): 22 | rc[10] 23 | 24 | 25 | def test_missing_before_first_use(): 26 | rc = ResultCache() 27 | 28 | with pytest.raises(KeyError): 29 | rc[10] 30 | 31 | 32 | def test_getitem(): 33 | rc = ResultCache() 34 | rc.add([5], np.arange(5).reshape((-1, 1))) 35 | 36 | assert np.allclose(rc[5], np.arange(5)) 37 | 38 | 39 | def test_contains(): 40 | rc = ResultCache() 41 | rc.add([5], np.arange(5).reshape((-1, 1))) 42 | 43 | assert 5 in rc 44 | 45 | 46 | def test_add_errors(): 47 | rc = ResultCache() 48 | rc.add([5], np.arange(5).reshape((-1, 1))) 49 | 50 | with pytest.raises(ValueError): 51 | rc.add([5], np.arange(10).reshape((-1, 1))) 52 | with pytest.raises(ValueError): 53 | rc.add([5], np.arange(5).reshape((-1, 1, 1))) 54 | with pytest.raises(ValueError): 55 | rc.add([5, 2], np.arange(5).reshape((-1, 1))) 56 | 57 | 58 | def test_add_2d(): 59 | rc = ResultCache() 60 | rc.add([5], np.arange(5).reshape((-1, 1))) 61 | rc.add([7, 10], np.arange(5, 15).reshape((5, 2))) 62 | 63 | assert rc.array.shape == (5, 100) 64 | assert np.allclose(rc.array[:, 1], [5, 7, 9, 11, 13]) 65 | assert np.allclose(rc.array[:, 2], [6, 8, 10, 12, 14]) 66 | assert rc.indices[5] == 0 67 | assert rc.indices[7] == 1 68 | assert rc.indices[10] == 2 69 | assert np.allclose(rc[7], [5, 7, 9, 11, 13]) 70 | assert np.allclose(rc[10], [6, 8, 10, 12, 14]) 71 | 72 | 73 | def test_dont_overwrite_existing(): 74 | rc = ResultCache() 75 | rc.add([5], np.arange(5).reshape((-1, 1))) 76 | rc.add([5, 10], np.arange(5, 15).reshape((5, 2))) 77 | 78 | assert rc.array.shape == (5, 100) 79 | assert np.allclose(rc.array[:, 0], np.arange(5)) 80 | assert np.allclose(rc.array[:, 1], [6, 8, 10, 12, 14]) 81 | assert rc.indices[5] == 0 82 | assert rc.indices[10] == 1 83 | assert np.allclose(rc[5], np.arange(5)) 84 | assert np.allclose(rc[10], [6, 8, 10, 12, 14]) 85 | 86 | 87 | def test_expand(): 88 | rc = ResultCache(10) 89 | rc.add(list(range(25)), np.arange(100).reshape((4, -1))) 90 | 91 | assert rc.array.shape == (4, 30) 92 | assert np.allclose(rc.array[0, :25], range(25)) 93 | assert np.allclose(rc.array[:, 0], [0, 25, 50, 75]) 94 | 95 | 96 | def test_reset(): 97 | rc = ResultCache(10) 98 | rc.add(list(range(25)), np.arange(100).reshape((4, -1))) 99 | rc.reset() 100 | 101 | assert not hasattr(rc, "array") 102 | assert rc.indices == {} 103 | assert rc.next_index == 0 104 | -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 7 | 8 | ## Our Standards 9 | 10 | Examples of behavior that contributes to creating a positive environment include: 11 | 12 | * Using welcoming and inclusive language 13 | * Being respectful of differing viewpoints and experiences 14 | * Gracefully accepting constructive criticism 15 | * Focusing on what is best for the community 16 | * Showing empathy towards other community members 17 | 18 | Examples of unacceptable behavior by participants include: 19 | 20 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 21 | * Trolling, insulting/derogatory comments, and personal or political attacks 22 | * Public or private harassment 23 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 24 | * Other conduct which could reasonably be considered inappropriate in a professional setting 25 | 26 | ## Our Responsibilities 27 | 28 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 29 | 30 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 31 | 32 | ## Scope 33 | 34 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 35 | 36 | ## Enforcement 37 | 38 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at cmutel@gmail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 39 | 40 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 41 | 42 | ## Attribution 43 | 44 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 45 | 46 | [homepage]: http://contributor-covenant.org 47 | [version]: http://contributor-covenant.org/version/1/4/ 48 | -------------------------------------------------------------------------------- /tests/fixtures/basic_fixture/datapackage.json: -------------------------------------------------------------------------------- 1 | { 2 | "profile": "data-package", 3 | "name": "d754801d12024d63a80dd4237a9f6218", 4 | "id": "ec75e9034674415ea4d29ecc63432517", 5 | "licenses": [ 6 | { 7 | "name": "ODC-PDDL-1.0", 8 | "path": "http://opendatacommons.org/licenses/pddl/", 9 | "title": "Open Data Commons Public Domain Dedication and License v1.0" 10 | } 11 | ], 12 | "resources": [ 13 | { 14 | "profile": "data-resource", 15 | "format": "npy", 16 | "mediatype": "application/octet-stream", 17 | "name": "technosphere.indices", 18 | "matrix": "technosphere_matrix", 19 | "kind": "indices", 20 | "path": "technosphere.indices.npy", 21 | "group": "technosphere", 22 | "category": "vector", 23 | "nrows": 3 24 | }, 25 | { 26 | "profile": "data-resource", 27 | "format": "npy", 28 | "mediatype": "application/octet-stream", 29 | "name": "technosphere.data", 30 | "matrix": "technosphere_matrix", 31 | "kind": "data", 32 | "path": "technosphere.data.npy", 33 | "group": "technosphere", 34 | "category": "vector", 35 | "nrows": 3 36 | }, 37 | { 38 | "profile": "data-resource", 39 | "format": "npy", 40 | "mediatype": "application/octet-stream", 41 | "name": "technosphere.flip", 42 | "matrix": "technosphere_matrix", 43 | "kind": "flip", 44 | "path": "technosphere.flip.npy", 45 | "group": "technosphere", 46 | "category": "vector", 47 | "nrows": 3 48 | }, 49 | { 50 | "profile": "data-resource", 51 | "format": "npy", 52 | "mediatype": "application/octet-stream", 53 | "name": "biosphere.indices", 54 | "matrix": "biosphere_matrix", 55 | "kind": "indices", 56 | "path": "biosphere.indices.npy", 57 | "group": "biosphere", 58 | "category": "vector", 59 | "nrows": 1 60 | }, 61 | { 62 | "profile": "data-resource", 63 | "format": "npy", 64 | "mediatype": "application/octet-stream", 65 | "name": "biosphere.data", 66 | "matrix": "biosphere_matrix", 67 | "kind": "data", 68 | "path": "biosphere.data.npy", 69 | "group": "biosphere", 70 | "category": "vector", 71 | "nrows": 1 72 | }, 73 | { 74 | "profile": "data-resource", 75 | "format": "npy", 76 | "mediatype": "application/octet-stream", 77 | "name": "eb-characterization.indices", 78 | "matrix": "characterization_matrix", 79 | "kind": "indices", 80 | "path": "eb-characterization.indices.npy", 81 | "group": "eb-characterization", 82 | "global_index": 0, 83 | "nrows": 1, 84 | "category": "vector" 85 | }, 86 | { 87 | "profile": "data-resource", 88 | "format": "npy", 89 | "mediatype": "application/octet-stream", 90 | "name": "eb-characterization.data", 91 | "matrix": "characterization_matrix", 92 | "kind": "data", 93 | "path": "eb-characterization.data.npy", 94 | "group": "eb-characterization", 95 | "global_index": 0, 96 | "nrows": 1, 97 | "category": "vector" 98 | } 99 | ], 100 | "created": "2021-10-15T12:38:10.757116Z", 101 | "combinatorial": false, 102 | "sequential": false, 103 | "seed": null, 104 | "sum_intra_duplicates": true, 105 | "sum_inter_duplicates": false 106 | } -------------------------------------------------------------------------------- /tests/test_svdm.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | import pytest 5 | from matrix_utils.errors import AllArraysEmpty 6 | 7 | from bw2calc.errors import MultipleValues 8 | from bw2calc.single_value_diagonal_matrix import SingleValueDiagonalMatrix as SVDM 9 | from bw2calc.utils import get_datapackage 10 | 11 | fixture_dir = Path(__file__).resolve().parent / "fixtures" 12 | 13 | 14 | def test_svdm_missing_dimension_kwarg(): 15 | with pytest.raises(TypeError): 16 | SVDM(packages=["something"], matrix="something") 17 | 18 | 19 | def test_svdm_no_data(): 20 | with pytest.raises(AllArraysEmpty): 21 | SVDM( 22 | packages=[get_datapackage(fixture_dir / "basic_fixture.zip")], 23 | matrix="weighting_matrix", 24 | dimension=5, 25 | ) 26 | 27 | 28 | def test_svdm_multiple_dataresources(): 29 | with pytest.raises(MultipleValues): 30 | SVDM( 31 | packages=[ 32 | get_datapackage(fixture_dir / "svdm.zip"), 33 | get_datapackage(fixture_dir / "svdm2.zip"), 34 | ], 35 | matrix="weighting_matrix", 36 | dimension=500, 37 | ) 38 | 39 | 40 | def test_svdm_multiple_values(): 41 | with pytest.raises(MultipleValues): 42 | SVDM( 43 | packages=[get_datapackage(fixture_dir / "basic_fixture.zip")], 44 | matrix="technosphere_matrix", 45 | dimension=500, 46 | ) 47 | 48 | 49 | def test_svdm_basic(): 50 | obj = SVDM( 51 | packages=[ 52 | get_datapackage(fixture_dir / "svdm.zip"), 53 | ], 54 | matrix="weighting_matrix", 55 | use_arrays=False, 56 | dimension=500, 57 | ) 58 | assert obj.matrix.shape == (500, 500) 59 | assert obj.matrix.sum() == 500 * 42 60 | assert np.allclose(obj.matrix.tocoo().row, np.arange(500)) 61 | assert np.allclose(obj.matrix.tocoo().col, np.arange(500)) 62 | 63 | 64 | def test_svdm_iteration(): 65 | obj = SVDM( 66 | packages=[ 67 | get_datapackage(fixture_dir / "svdm.zip"), 68 | ], 69 | matrix="weighting_matrix", 70 | use_arrays=False, 71 | dimension=500, 72 | ) 73 | assert obj.matrix.shape == (500, 500) 74 | assert obj.matrix.sum() == 500 * 42 75 | next(obj) 76 | assert obj.matrix.shape == (500, 500) 77 | assert obj.matrix.sum() == 500 * 42 78 | 79 | 80 | def test_svdm_distributions(): 81 | obj = SVDM( 82 | packages=[ 83 | get_datapackage(fixture_dir / "svdm.zip"), 84 | ], 85 | matrix="weighting_matrix", 86 | use_distributions=True, 87 | use_arrays=False, 88 | dimension=500, 89 | ) 90 | assert obj.matrix.shape == (500, 500) 91 | total = obj.matrix.sum() 92 | assert total 93 | next(obj) 94 | assert total != obj.matrix.sum() 95 | next(obj) 96 | assert total != obj.matrix.sum() 97 | 98 | 99 | def test_svdm_arrays(): 100 | obj = SVDM( 101 | packages=[ 102 | get_datapackage(fixture_dir / "svdm.zip"), 103 | ], 104 | matrix="weighting_matrix", 105 | use_vectors=False, 106 | use_arrays=True, 107 | dimension=500, 108 | ) 109 | assert obj.matrix.shape == (500, 500) 110 | assert obj.matrix.sum() == 500 * 1 111 | next(obj) 112 | assert obj.matrix.shape == (500, 500) 113 | assert obj.matrix.sum() == 500 * 2 114 | next(obj) 115 | assert obj.matrix.shape == (500, 500) 116 | assert obj.matrix.sum() == 500 * 3 117 | -------------------------------------------------------------------------------- /tests/test_dict_man.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from bw2calc.dictionary_manager import DictionaryManager as DM 4 | from bw2calc.dictionary_manager import ReversibleRemappableDictionary as RRD 5 | 6 | 7 | def test_dm_initiation(): 8 | DM() 9 | 10 | 11 | def test_dm_setting(): 12 | dm = DM() 13 | dm.foo = {1: 2} 14 | 15 | 16 | def test_dm_getting(): 17 | dm = DM() 18 | with pytest.raises(ValueError): 19 | dm.foo[1] 20 | dm.foo = {1: 2} 21 | assert dm.foo[1] == 2 22 | with pytest.raises(KeyError): 23 | dm.foo[2] 24 | 25 | 26 | def test_dm_str(): 27 | dm = DM() 28 | str(dm) 29 | dm.foo = {1: 2} 30 | str(dm) 31 | 32 | 33 | def test_dm_iter(): 34 | dm = DM() 35 | assert list(dm) == [] 36 | dm.foo = {1: 2} 37 | assert list(dm) == ["foo"] 38 | 39 | 40 | def test_dm_len(): 41 | dm = DM() 42 | assert len(dm) == 0 43 | dm.foo = {1: 2} 44 | assert len(dm) == 1 45 | 46 | 47 | def test_rrd_input_error(): 48 | with pytest.raises(ValueError): 49 | RRD(1) 50 | 51 | 52 | def test_rrd_basic(): 53 | r = RRD({1: 2}) 54 | assert r[1] == 2 55 | with pytest.raises(KeyError): 56 | r[2] 57 | 58 | 59 | def test_rrd_reversed(): 60 | r = RRD({1: 2}) 61 | assert r.reversed == {2: 1} 62 | r = RRD({1: 10, 2: 10}) 63 | assert r.reversed == {10: 2} 64 | 65 | 66 | def test_rrd_reversed_create_on_demand(): 67 | r = RRD({1: 2}) 68 | assert not hasattr(r, "_reversed") 69 | r.reversed 70 | assert hasattr(r, "_reversed") 71 | 72 | 73 | def test_rrd_remapping_multiple(): 74 | r = RRD({1: 2}) 75 | r.remap({1: "foo"}) 76 | r.remap({"foo": "bar"}) 77 | assert r["bar"] == 2 78 | 79 | 80 | def test_rrd_remapping(): 81 | r = RRD({1: 2}) 82 | r.remap({1: "foo"}) 83 | assert r["foo"] == 2 84 | 85 | 86 | def test_rrd_remapping_deletes_reversed(): 87 | r = RRD({1: 2}) 88 | r.reversed 89 | assert hasattr(r, "_reversed") 90 | r.remap({1: "foo"}) 91 | assert not hasattr(r, "_reversed") 92 | 93 | 94 | def test_rrd_remapping_sets_original(): 95 | r = RRD({1: 2}) 96 | r.remap({1: "foo"}) 97 | assert r.original == {1: 2} 98 | assert hasattr(r, "_original") 99 | 100 | 101 | def test_rrd_remapping_multiple_original(): 102 | r = RRD({1: 2}) 103 | r.remap({1: "foo"}) 104 | r.remap({"foo": "bar"}) 105 | assert r.original == {"foo": 2} 106 | 107 | 108 | def test_rrd_str(): 109 | assert str(RRD({1: 2})) 110 | 111 | 112 | def test_rrd_unmap(): 113 | r = RRD({1: 2}) 114 | assert r[1] == 2 115 | r.remap({1: "foo"}) 116 | assert r["foo"] == 2 117 | with pytest.raises(KeyError): 118 | r[1] 119 | r.unmap() 120 | assert r[1] == 2 121 | with pytest.raises(KeyError): 122 | r["foo"] 123 | 124 | 125 | def test_rrd_unmap_reversed(): 126 | r = RRD({1: 2}) 127 | assert not hasattr(r, "_reversed") 128 | r.reversed 129 | assert hasattr(r, "_reversed") 130 | r.remap({1: "foo"}) 131 | assert not hasattr(r, "_reversed") 132 | r.reversed 133 | assert hasattr(r, "_reversed") 134 | r.unmap() 135 | assert not hasattr(r, "_reversed") 136 | assert r.reversed == {2: 1} 137 | assert hasattr(r, "_reversed") 138 | 139 | 140 | def test_rrd_unmap_original(): 141 | r = RRD({1: 2}) 142 | r.remap({1: "foo"}) 143 | assert r["foo"] == 2 144 | assert r.original == {1: 2} 145 | r.unmap() 146 | assert r.original == {1: 2} 147 | 148 | 149 | def test_rrd_iter(): 150 | r = RRD({1: 2}) 151 | assert list(r) == [1] 152 | 153 | 154 | def test_rrd_len(): 155 | assert len(RRD({1: 2})) == 1 156 | -------------------------------------------------------------------------------- /src/bw2calc/single_value_diagonal_matrix.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Callable, Sequence, Union 2 | 3 | import numpy as np 4 | from bw_processing import Datapackage 5 | from matrix_utils import MappedMatrix 6 | from scipy import sparse 7 | 8 | from bw2calc.errors import MultipleValues 9 | 10 | 11 | class SingleValueDiagonalMatrix(MappedMatrix): 12 | """A scipy sparse matrix handler which takes in ``bw_processing`` data packages. Row and column 13 | ids are mapped to matrix indices, and a matrix is constructed. 14 | 15 | Use primarily in the weighting step of life cycle impact assessment. 16 | 17 | `indexer_override` allows for custom indexer behaviour. Indexers should follow a simple API: 18 | they must support `.__next__()`, and have the attribute `.index`, which returns an integer. 19 | 20 | `custom_filter` allows you to remove some data based on their indices. It is applied to all 21 | resource groups. If you need more fine-grained control, process the matrix after 22 | construction/iteration. `custom_filter` should take the indices array as an input, and return a 23 | Numpy boolean array with the same length as the indices array. 24 | 25 | Args: 26 | 27 | * packages: A list of Ddatapackage objects. 28 | * matrix: The string identifying the matrix to be built. 29 | * use_vectors: Flag to use vector data from datapackages 30 | * use_arrays: Flag to use array data from datapackages 31 | * use_distributions: Flag to use `stats_arrays` distribution data from datapackages 32 | * row_mapper: Optional instance of `ArrayMapper`. Used when matrices must align. 33 | * col_mapper: Optional instance of `ArrayMapper`. Used when matrices must align. 34 | * seed_override: Optional integer. Overrides the RNG seed given in the datapackage, if any. 35 | * indexer_override: Parameter for custom indexers. See above. 36 | * diagonal: If True, only use the `row` indices to build a diagonal matrix. 37 | * custom_filter: Callable for function to filter data based on `indices` values. See above. 38 | * empty_ok: If False, raise `AllArraysEmpty` if the matrix would be empty 39 | 40 | """ 41 | 42 | def __init__( 43 | self, 44 | *, 45 | packages: Sequence[Datapackage], 46 | matrix: str, 47 | dimension: int, 48 | use_vectors: bool = True, 49 | use_arrays: bool = True, 50 | use_distributions: bool = False, 51 | seed_override: Union[int, None] = None, 52 | indexer_override: Any = None, 53 | custom_filter: Union[Callable, None] = None, 54 | **kwargs, 55 | ): 56 | self.dimension = dimension 57 | 58 | # We let it build an incorrect matrix, mappers, etc. just to ignore them 59 | # It would be riskier to copy/paste out parts of the `__init__`, and 60 | # remember to be consistent in the future. The resource cost 61 | # of this approach is very low. 62 | super().__init__( 63 | packages=packages, 64 | matrix=matrix, 65 | use_vectors=use_vectors, 66 | use_arrays=use_arrays, 67 | use_distributions=use_distributions, 68 | seed_override=seed_override, 69 | indexer_override=indexer_override, 70 | diagonal=True, 71 | custom_filter=custom_filter, 72 | ) 73 | 74 | if self.raw_data.shape != (1,): 75 | raise MultipleValues( 76 | ( 77 | "Multiple ({}) numerical values found, but only one single numerical value is " 78 | + "allowed. Data packages:\n\t{}" 79 | ).format(len(self.raw_data), "\n\t".join([str(x) for x in self.packages])) 80 | ) 81 | 82 | self.matrix = sparse.coo_matrix( 83 | ( 84 | np.ones(self.dimension), 85 | (np.arange(self.dimension), np.arange(self.dimension)), 86 | ), 87 | (self.dimension, self.dimension), 88 | dtype=np.float64, 89 | ).tocsr() 90 | self.rebuild_matrix() 91 | 92 | def rebuild_matrix(self): 93 | self.raw_data = np.hstack([group.calculate()[2] for group in self.groups]) 94 | self.matrix.data *= 0 95 | self.matrix.data += float(self.raw_data[0]) 96 | -------------------------------------------------------------------------------- /src/bw2calc/dictionary_manager.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Mapping 2 | from functools import partial 3 | 4 | 5 | def resolved(f): 6 | """Decorator that resolves a ``partial`` function before it can be used""" 7 | 8 | def wrapper(self, *args): 9 | if not self._resolved: 10 | self._dict = self._partial() 11 | self._resolved = True 12 | delattr(self, "_partial") 13 | return f(self, *args) 14 | 15 | return wrapper 16 | 17 | 18 | class ReversibleRemappableDictionary(Mapping): 19 | """A dictionary that can be easily remapped or reversed. 20 | 21 | Perhaps overkill, but at the time it was easier than creating many dictionaries on the LCA 22 | object itself. 23 | 24 | Example usage:: 25 | 26 | In [1]: from bw2calc.dictionary_manager import ReversibleRemappableDictionary 27 | 28 | In [2]: d = ReversibleRemappableDictionary({1: 2}) 29 | 30 | In [3]: d.reverse 31 | Out[3]: {2: 1} 32 | 33 | In [4]: d.remap({1: "foo"}) 34 | 35 | In [5]: d['foo'] 36 | Out[5]: 2 37 | 38 | In [6]: d.original 39 | Out[6]: {1: 2} 40 | 41 | In [7]: d.reverse 42 | Out[7]: {2: 'foo'} 43 | 44 | In [8]: d.unmap() 45 | 46 | In [9]: d[1] 47 | Out[9]: 2 48 | 49 | """ 50 | 51 | def __init__(self, obj): 52 | if isinstance(obj, partial): 53 | self._resolved = False 54 | self._partial = obj 55 | elif isinstance(obj, Mapping): 56 | self._resolved = True 57 | self._dict = obj 58 | else: 59 | raise ValueError("Input must be a dict") 60 | 61 | @property 62 | @resolved 63 | def reversed(self): 64 | if not hasattr(self, "_reversed"): 65 | self._reversed = {v: k for k, v in self.items()} 66 | return self._reversed 67 | 68 | @property 69 | @resolved 70 | def original(self): 71 | if not hasattr(self, "_original"): 72 | return self 73 | return self._original 74 | 75 | @resolved 76 | def remap(self, mapping): 77 | """Transform the keys based on the mapping dict ``mapping``. 78 | 79 | ``mapping`` doesn't need to cover every key in the original. 80 | 81 | Example usage: 82 | 83 | {1: 2}.remap({1: "foo"} >> {"foo": 2} 84 | 85 | """ 86 | if not isinstance(mapping, Mapping): 87 | raise ValueError 88 | if hasattr(self, "_reversed"): 89 | delattr(self, "_reversed") 90 | self._original = self._dict.copy() 91 | self._dict = {mapping.get(k, k): v for k, v in self.items()} 92 | 93 | @resolved 94 | def unmap(self): 95 | """Restore dict to original state.""" 96 | if hasattr(self, "_reversed"): 97 | delattr(self, "_reversed") 98 | self._dict = self._original 99 | delattr(self, "_original") 100 | 101 | @resolved 102 | def __getitem__(self, key): 103 | return self._dict[key] 104 | 105 | @resolved 106 | def __iter__(self): 107 | return iter(self._dict) 108 | 109 | @resolved 110 | def __len__(self): 111 | return len(self._dict) 112 | 113 | @resolved 114 | def __str__(self): 115 | return self._dict.__str__() 116 | 117 | 118 | class DictionaryManager: 119 | """Class that handles dictionaries which can be remapped or reverse. 120 | 121 | Usage:: 122 | 123 | dm = DictionaryManager() 124 | dm.foo = {1: 2} 125 | dm.foo[1] 126 | >> 2 127 | 128 | """ 129 | 130 | def __init__(self): 131 | self._dicts = {} 132 | 133 | def __getattr__(self, attr): 134 | try: 135 | return self._dicts[attr] 136 | except KeyError: 137 | raise ValueError("This dictionary not yet created") 138 | 139 | def __setattr__(self, attr, value): 140 | if attr == "_dicts": 141 | super().__setattr__(attr, value) 142 | else: 143 | self._dicts[attr] = ReversibleRemappableDictionary(value) 144 | 145 | def __len__(self): 146 | return len(self._dicts) 147 | 148 | def __iter__(self): 149 | return iter(self._dicts) 150 | 151 | def __str__(self): 152 | return "Dictionary manager with {} keys:".format(len(self)) 153 | -------------------------------------------------------------------------------- /src/bw2calc/log_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import uuid 5 | 6 | from bw2calc.utils import utc_now 7 | 8 | """Adapted from json-log-formatter (https://github.com/marselester/json-log-formatter) 9 | 10 | The MIT License (MIT) 11 | 12 | Copyright (c) 2015 Marsel Mavletkulov 13 | 14 | Permission is hereby granted, free of charge, to any person obtaining a copy 15 | of this software and associated documentation files (the "Software"), to deal 16 | in the Software without restriction, including without limitation the rights 17 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 18 | copies of the Software, and to permit persons to whom the Software is 19 | furnished to do so, subject to the following conditions: 20 | 21 | The above copyright notice and this permission notice shall be included in all 22 | copies or substantial portions of the Software. 23 | 24 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 29 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 | SOFTWARE.""" 31 | 32 | BUILTIN_ATTRS = { 33 | "args", 34 | "asctime", 35 | "created", 36 | "exc_info", 37 | "exc_text", 38 | "filename", 39 | "funcName", 40 | "levelname", 41 | "levelno", 42 | "lineno", 43 | "module", 44 | "msecs", 45 | "message", 46 | "msg", 47 | "name", 48 | "pathname", 49 | "process", 50 | "processName", 51 | "relativeCreated", 52 | "stack_info", 53 | "thread", 54 | "threadName", 55 | } 56 | 57 | 58 | class JSONFormatter(logging.Formatter): 59 | json_lib = json 60 | 61 | def format(self, record): 62 | message = record.getMessage() 63 | extra = self.extra_from_record(record) 64 | json_record = self.json_record(message, extra, record) 65 | return self.to_json(json_record) 66 | 67 | def to_json(self, record): 68 | return self.json_lib.dumps(record, ensure_ascii=False) 69 | 70 | def extra_from_record(self, record): 71 | return { 72 | attr_name: record.__dict__[attr_name] 73 | for attr_name in record.__dict__ 74 | if attr_name not in BUILTIN_ATTRS 75 | } 76 | 77 | def json_record(self, message, extra, record): 78 | if extra: 79 | data = extra 80 | if message: 81 | data["message"] = message 82 | else: 83 | # Also support logger.info({'foo': 'bar'}) 84 | data = eval(message) 85 | data["time"] = utc_now().isoformat() + "Z" 86 | 87 | if record.exc_info: 88 | data["exc_info"] = self.formatException(record.exc_info) 89 | return data 90 | 91 | 92 | def create_logger(dirpath=None, name=None, **kwargs): 93 | """Create a ``logger`` instance named ``bw2calc`` that can be used to log calculations. 94 | 95 | ``dirpath`` is the directory where the log file is saved. If ``dirpath`` is ``None``, no logger 96 | is created. 97 | 98 | ``name`` is the name of the calculation run, used to construct the log filepath. 99 | 100 | You can add other types of loggers, just add another handler to the ``bw2calc`` named logger 101 | before starting your calculations. 102 | 103 | Returns the filepath of the created log file. 104 | 105 | TODO: Decide on whether we copy safe_filepath to this package or create a common core package. 106 | """ 107 | if dirpath is None: 108 | return 109 | 110 | assert os.path.isdir(dirpath) and os.access(dirpath, os.W_OK) 111 | 112 | # Use safe_filepath here 113 | filename = "{}.{}.json".format(name or uuid.uuid4().hex, utc_now().isoformat() + "Z") 114 | 115 | formatter = JSONFormatter() 116 | fp = os.path.abspath(os.path.join(dirpath, filename)) 117 | 118 | json_handler = logging.FileHandler(filename=fp, encoding="utf-8") 119 | json_handler.setFormatter(formatter) 120 | 121 | logger = logging.getLogger("bw2calc") 122 | logger.addHandler(json_handler) 123 | logger.setLevel(logging.INFO) 124 | 125 | return fp 126 | -------------------------------------------------------------------------------- /src/bw2calc/fast_scores.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import numpy as np 4 | import xarray 5 | 6 | from bw2calc import PYPARDISO, UMFPACK 7 | from bw2calc.fast_supply_arrays import FastSupplyArraysMixin 8 | from bw2calc.multi_lca import MultiLCA 9 | 10 | 11 | class FastScoresOnlyMultiLCA(MultiLCA, FastSupplyArraysMixin): 12 | """Use chunking and pre-calculate as much as possible to optimize speed for multiple LCA 13 | calculations. 14 | 15 | If using pardiso via pypardiso: 16 | 17 | - Feed multiple demands at once as a tensor into the solver function 18 | - Skip some identity checks on the technosphere matrix 19 | 20 | """ 21 | 22 | def __init__(self, *args, chunk_size: int = 50, **kwargs): 23 | # Extract chunk_size before passing to super() to avoid it being consumed 24 | # by MultiLCA.__init__, then manually initialize mixin attributes 25 | super().__init__(*args, **kwargs) 26 | self.set_chunk_size(chunk_size) 27 | 28 | if UMFPACK: 29 | warnings.warn( 30 | """Using UMFPACK - the speedups in `FastSupplyArraysMixin` work better when using PARDISO""" # noqa: E501 31 | ) 32 | 33 | def lci(self) -> None: 34 | raise NotImplementedError( 35 | "LCI and LCIA aren't separate in `FastScoresOnlyMultiLCA`; use `next()` to calculate scores." # noqa: E501 36 | ) 37 | 38 | def lci_calculation(self) -> None: 39 | raise NotImplementedError( 40 | "LCI and LCIA aren't separate in `FastScoresOnlyMultiLCA`; use `next()` to calculate scores." # noqa: E501 41 | ) 42 | 43 | def lcia(self) -> None: 44 | raise NotImplementedError( 45 | "LCI and LCIA aren't separate in `FastScoresOnlyMultiLCA`; use `next()` to calculate scores." # noqa: E501 46 | ) 47 | 48 | def lcia_calculation(self) -> None: 49 | raise NotImplementedError( 50 | "LCI and LCIA aren't separate in `FastScoresOnlyMultiLCA`; use `next()` to calculate scores." # noqa: E501 51 | ) 52 | 53 | def build_precalculated(self) -> None: 54 | """Multiply the characterization, and normalization and weighting matrices if present, by 55 | the biosphere matrix. When done outside the calculation loop, this only needs to be done 56 | once.""" 57 | self.precalculated = self.characterization_matrices @ self.biosphere_matrix 58 | if hasattr(self, "normalization_matrices"): 59 | self.precalculated = self.normalization_matrices @ self.precalculated 60 | if hasattr(self, "weighting_matrices"): 61 | self.precalculated = self.weighting_matrices @ self.precalculated 62 | self.precalculated = { 63 | key: np.asarray(matrix.sum(axis=0)) for key, matrix in self.precalculated.items() 64 | } 65 | 66 | def _calculation(self) -> xarray.DataArray: 67 | # Calls lci_calculation() and lcia_calculation in parent class, but we don't have 68 | # these as separate methods, so need to override to change behaviour. 69 | return self.calculate() 70 | 71 | def _load_datapackages(self) -> None: 72 | self.load_lci_data() 73 | self.build_demand_array() 74 | self.load_lcia_data() 75 | if self.config.get("normalizations"): 76 | self.load_normalization_data() 77 | if self.config.get("weightings"): 78 | self.load_weighting_data() 79 | 80 | def calculate(self) -> xarray.DataArray: 81 | """The actual LCI calculation. 82 | 83 | Separated from ``lci`` to be reusable in cases where the matrices are already built, e.g. 84 | ``redo_lci`` and Monte Carlo classes. 85 | 86 | """ 87 | if not (PYPARDISO or UMFPACK): 88 | raise ValueError( 89 | "`FastScoresOnlyMultiLCA` only supported with PARDISO and UMFPACK solvers" 90 | ) 91 | 92 | if not hasattr(self, "technosphere_matrix"): 93 | self._load_datapackages() 94 | self.build_precalculated() 95 | 96 | self.supply_array = self.calculate_supply_arrays(list(self.demand_arrays.values())) 97 | 98 | lcia_array = np.vstack(list(self.precalculated.values())) 99 | scores = lcia_array @ self.supply_array 100 | 101 | self._set_scores( 102 | xarray.DataArray( 103 | scores, 104 | coords=[[str(x) for x in self.precalculated], list(self.demand_arrays)], 105 | dims=["LCIA", "processes"], 106 | ) 107 | ) 108 | return self._scores 109 | 110 | def _get_scores(self) -> xarray.DataArray: 111 | if not hasattr(self, "_scores"): 112 | raise ValueError("Scores not calculated yet") 113 | return self._scores 114 | 115 | def _set_scores(self, arr: xarray.DataArray) -> None: 116 | self._scores = arr 117 | 118 | scores = property(fget=_get_scores, fset=_set_scores) 119 | -------------------------------------------------------------------------------- /tests/test_method_config.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from pydantic import ValidationError 3 | 4 | from bw2calc.errors import InconsistentLCIA 5 | from bw2calc.method_config import MethodConfig 6 | 7 | 8 | def test_method_config_valid(): 9 | data = { 10 | "impact_categories": [("foo", "a"), ("foo", "b")], 11 | } 12 | assert MethodConfig(**data) 13 | 14 | data = { 15 | "impact_categories": [("foo", "a"), ("foo", "b")], 16 | "normalizations": {("norm", "standard"): [("foo", "a"), ("foo", "b")]}, 17 | } 18 | assert MethodConfig(**data) 19 | 20 | data = { 21 | "impact_categories": [("foo", "a"), ("foo", "b")], 22 | "normalizations": {("norm", "standard"): [("foo", "a"), ("foo", "b")]}, 23 | "weightings": {("weighting",): [("norm", "standard")]}, 24 | } 25 | assert MethodConfig(**data) 26 | 27 | 28 | def test_method_config_len_one_tuples_valid(): 29 | data = { 30 | "impact_categories": [("a",), ("b",)], 31 | } 32 | assert MethodConfig(**data) 33 | 34 | data = { 35 | "impact_categories": [("a",), ("b",)], 36 | "normalizations": {("norm",): [("a",), ("b",)]}, 37 | } 38 | assert MethodConfig(**data) 39 | 40 | data = { 41 | "impact_categories": [("a",), ("b",)], 42 | "normalizations": {("norm",): [("a",), ("b",)]}, 43 | "weightings": {("weighting",): [("norm",)]}, 44 | } 45 | assert MethodConfig(**data) 46 | 47 | 48 | def test_method_config_weighting_can_refer_to_impact_category(): 49 | data = { 50 | "impact_categories": [("a",), ("b",)], 51 | "normalizations": {("norm",): [("a",), ("b",)]}, 52 | "weightings": {("weighting",): [("a",), ("b",)]}, 53 | } 54 | assert MethodConfig(**data) 55 | 56 | 57 | def test_method_config_weighting_can_refer_to_normalization(): 58 | data = { 59 | "impact_categories": [("a",), ("b",)], 60 | "normalizations": {("norm",): [("a",), ("b",)]}, 61 | "weightings": {("weighting",): [("norm",)]}, 62 | } 63 | assert MethodConfig(**data) 64 | 65 | 66 | def test_method_config_wrong_tuple_types(): 67 | data = { 68 | "impact_categories": [("a",), (1,)], 69 | } 70 | with pytest.raises(ValidationError): 71 | MethodConfig(**data) 72 | 73 | data = { 74 | "impact_categories": [("a",), 1], 75 | } 76 | with pytest.raises(ValidationError): 77 | MethodConfig(**data) 78 | 79 | data = { 80 | "impact_categories": [("a",), ("b",)], 81 | "normalizations": {("norm",): [("a",), (1,)]}, 82 | } 83 | with pytest.raises(ValidationError): 84 | MethodConfig(**data) 85 | 86 | data = { 87 | "impact_categories": [("a",), ("b",)], 88 | "normalizations": {("norm",): [(1,), ("b",)]}, 89 | } 90 | with pytest.raises(ValidationError): 91 | MethodConfig(**data) 92 | 93 | data = { 94 | "impact_categories": [("a",), ("b",)], 95 | "normalizations": {("norm",): [("a",), ("b",)]}, 96 | "weightings": {("norm",): (1,)}, 97 | } 98 | with pytest.raises(ValidationError): 99 | MethodConfig(**data) 100 | 101 | data = { 102 | "impact_categories": [("a",), ("b",)], 103 | "normalizations": {("norm",): [("a",), ("b",)]}, 104 | "weightings": {("norm",): 1}, 105 | } 106 | with pytest.raises(ValidationError): 107 | MethodConfig(**data) 108 | 109 | 110 | def test_method_config_missing_normalization_reference(): 111 | data = { 112 | "impact_categories": [("foo", "a"), ("foo", "b")], 113 | "normalizations": {("norm", "standard"): [("foo", "a"), ("foo", "b"), ("foo", "c")]}, 114 | } 115 | with pytest.raises(ValueError): 116 | MethodConfig(**data) 117 | 118 | 119 | def test_method_config_normalization_overlaps_impact_categories(): 120 | data = { 121 | "impact_categories": [("foo", "a"), ("foo", "b")], 122 | "normalizations": {("foo", "a"): [("foo", "a"), ("foo", "b")]}, 123 | } 124 | with pytest.raises(ValueError): 125 | MethodConfig(**data) 126 | 127 | 128 | def test_method_config_weighting_overlaps_impact_categories(): 129 | data = { 130 | "impact_categories": [("foo", "a"), ("foo", "b")], 131 | "normalizations": {("normalization",): [("foo", "a"), ("foo", "b")]}, 132 | "weightings": {("foo", "a"): [("foo", "a"), ("foo", "b")]}, 133 | } 134 | with pytest.raises(ValueError): 135 | MethodConfig(**data) 136 | 137 | 138 | def test_method_config_weighting_overlaps_normalizations(): 139 | data = { 140 | "impact_categories": [("foo", "a"), ("foo", "b")], 141 | "normalizations": {("normalization",): [("foo", "a"), ("foo", "b")]}, 142 | "weightings": {("normalization",): [("normalization",)]}, 143 | } 144 | with pytest.raises(ValueError): 145 | MethodConfig(**data) 146 | 147 | 148 | def test_method_config_weighting_missing_reference(): 149 | data = { 150 | "impact_categories": [("foo", "a"), ("foo", "b")], 151 | "normalizations": {("normalization",): [("foo", "a"), ("foo", "b")]}, 152 | "weightings": {("normalization",): [("foo", "c"), ("foo", "a"), ("foo", "b")]}, 153 | } 154 | with pytest.raises(ValueError): 155 | MethodConfig(**data) 156 | 157 | 158 | def test_method_config_missing_ic_for_weightings(): 159 | data = { 160 | "impact_categories": [("foo", "a"), ("foo", "b")], 161 | "normalizations": {("normalization",): [("foo", "a")]}, 162 | } 163 | with pytest.raises(InconsistentLCIA): 164 | MethodConfig(**data) 165 | 166 | 167 | def test_method_config_weighting_mixed_references(): 168 | data = { 169 | "impact_categories": [("foo", "a"), ("foo", "b")], 170 | "normalizations": {("normalization",): [("foo", "a"), ("foo", "b")]}, 171 | "weightings": {("weighting",): [("normalization",), ("foo", "a"), ("foo", "b")]}, 172 | } 173 | with pytest.raises(InconsistentLCIA): 174 | MethodConfig(**data) 175 | 176 | 177 | def test_method_config_weighting_missing_ic(): 178 | data = { 179 | "impact_categories": [("foo", "a"), ("foo", "b")], 180 | "weightings": {("weighting",): [("foo", "b")]}, 181 | } 182 | with pytest.raises(InconsistentLCIA): 183 | MethodConfig(**data) 184 | 185 | 186 | def test_method_config_weighting_missing_normalization(): 187 | data = { 188 | "impact_categories": [("foo", "a"), ("foo", "b")], 189 | "normalizations": { 190 | ("normalization", "a"): [("foo", "a")], 191 | ("normalization", "b"): [("foo", "b")], 192 | }, 193 | "weightings": {("weighting",): [("normalization", "a")]}, 194 | } 195 | with pytest.raises(InconsistentLCIA): 196 | MethodConfig(**data) 197 | -------------------------------------------------------------------------------- /src/bw2calc/method_config.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Sequence 2 | 3 | from pydantic import BaseModel, model_validator 4 | 5 | from bw2calc.errors import InconsistentLCIA 6 | 7 | 8 | class MethodConfig(BaseModel): 9 | """ 10 | A class that stores the logical relationships between impact categories, normalization, and 11 | weighting. 12 | 13 | The basic object in all three categories is an identifying tuple, i.e. tuples of strings. These 14 | tuples have no length restrictions. 15 | 16 | `impact_categories` is a list of tuples which identify each impact category (`bw2data.Method`). 17 | 18 | `normalizations` link normalization factors to impact categories. They are optional. If 19 | provided, they take the form of a dictionary, with keys of tuples which identify each 20 | normalization (`bw2data.Normalization`), and values of *lists* of impact categories tuples. 21 | 22 | If `normalizations` is defined, **all** impact categories must have a normalization. 23 | 24 | `weightings` link weighting factors to either normalizations *or* impact categories. They are 25 | optional. If provided, they take the form of a dictionary, with keys of tuples which identify 26 | each weighting (`bw2data.Weighting`), and values of *lists* of normalizations or impact 27 | categories tuples. They keys identify the weighting data, and the values refer to either 28 | impact categories or normalizations - mixing impact categories and normalizations is not 29 | allowed. 30 | 31 | If `normalizations` is defined, **all** impact categories or normalizations must have a 32 | weighting. 33 | 34 | The identifying tuples for `impact_categories`, `normalizations`, and `weightings` must all be 35 | unique. 36 | 37 | Example 38 | ------- 39 | 40 | ```python 41 | { 42 | "impact_categories": [ 43 | ("climate change", "100 years"), 44 | ("climate change", "20 years"), 45 | ("eutrophication",), 46 | ], 47 | "normalizations": { 48 | ("climate change", "global normalization"): [ 49 | ("climate change", "100 years"), 50 | ("climate change", "20 years"), 51 | ], 52 | ("eut european reference", "1990"): [ 53 | ("eutrophication",), 54 | ] 55 | }, 56 | "weightings": { 57 | ("climate change", "bad"): [ 58 | ("how bad?", "dead", "people") 59 | ], 60 | ("eutrophication", "also bad"): [ 61 | ("how bad?", "dead", "fish") 62 | ] 63 | } 64 | } 65 | ``` 66 | 67 | """ 68 | 69 | impact_categories: Sequence[tuple[str, ...]] 70 | normalizations: Optional[dict[tuple[str, ...], list[tuple[str, ...]]]] = None 71 | weightings: Optional[dict[tuple[str, ...], list[tuple[str, ...]]]] = None 72 | 73 | @model_validator(mode="after") 74 | def normalizations_reference_impact_categories(self): 75 | if not self.normalizations: 76 | return self 77 | references = set.union(*[set(lst) for lst in self.normalizations.values()]) 78 | difference = references.difference(set(self.impact_categories)) 79 | if difference: 80 | raise ValueError( 81 | ( 82 | "Impact categories in `normalizations` not present in `impact_categories`: " 83 | + f"{difference}" 84 | ) 85 | ) 86 | return self 87 | 88 | @model_validator(mode="after") 89 | def normalizations_unique_from_impact_categories(self): 90 | if not self.normalizations: 91 | return self 92 | 93 | references = set.union(*[set(lst) for lst in self.normalizations.values()]) 94 | overlap = set(self.normalizations).intersection(references) 95 | if overlap: 96 | raise ValueError( 97 | f"Normalization identifiers overlap impact category identifiers: {overlap}" 98 | ) 99 | return self 100 | 101 | @model_validator(mode="after") 102 | def normalizations_cover_all_impact_categories(self): 103 | if not self.normalizations: 104 | return self 105 | missing = set(self.impact_categories).difference( 106 | set(ic for lst in self.normalizations.values() for ic in lst) 107 | ) 108 | if missing: 109 | raise InconsistentLCIA( 110 | f"Normalization not provided for all impact categories; missing {missing}" 111 | ) 112 | return self 113 | 114 | @model_validator(mode="after") 115 | def weightings_reference_impact_categories_or_normalizations(self): 116 | if not self.weightings: 117 | return self 118 | 119 | if self.normalizations: 120 | possibles = set(self.normalizations).union(set(self.impact_categories)) 121 | else: 122 | possibles = set(self.impact_categories) 123 | 124 | references = set.union(*[set(lst) for lst in self.weightings.values()]) 125 | difference = set(references).difference(possibles) 126 | if difference: 127 | raise ValueError( 128 | f"`weightings` refers to missing impact categories or normalizations: {difference}" 129 | ) 130 | return self 131 | 132 | @model_validator(mode="after") 133 | def weightings_unique_from_impact_categories(self): 134 | if not self.weightings: 135 | return self 136 | overlap = set(self.weightings).intersection(set(self.impact_categories)) 137 | if overlap: 138 | raise ValueError( 139 | f"Weighting identifiers overlap impact category identifiers: {overlap}" 140 | ) 141 | return self 142 | 143 | @model_validator(mode="after") 144 | def weightings_unique_from_normalizations(self): 145 | if not self.weightings or not self.normalizations: 146 | return self 147 | overlap = set(self.weightings).intersection(set(self.normalizations)) 148 | if overlap: 149 | raise ValueError(f"Weighting identifiers overlap normalization identifiers: {overlap}") 150 | return self 151 | 152 | @model_validator(mode="after") 153 | def weightings_cant_have_mixed_references(self): 154 | if not self.weightings or not self.normalizations: 155 | return self 156 | normalization_references = set( 157 | nor for lst in self.weightings.values() for nor in lst 158 | ).intersection(set(self.normalizations)) 159 | ic_references = set(nor for lst in self.weightings.values() for nor in lst).intersection( 160 | set(self.impact_categories) 161 | ) 162 | if normalization_references and ic_references: 163 | raise InconsistentLCIA( 164 | "Weightings must reference impact categories or normalizations, not both" 165 | ) 166 | return self 167 | 168 | @model_validator(mode="after") 169 | def weightings_cover_all_impact_categories(self): 170 | if not self.weightings: 171 | return self 172 | references = set(nor for lst in self.weightings.values() for nor in lst) 173 | missing = set(self.impact_categories).difference(references) 174 | if references.intersection(self.impact_categories) and missing: 175 | raise InconsistentLCIA( 176 | f"Weighting not provided for all impact categories; missing {missing}" 177 | ) 178 | return self 179 | 180 | @model_validator(mode="after") 181 | def weightings_cover_all_normalizations(self): 182 | if not self.weightings or not self.normalizations: 183 | return self 184 | references = set(nor for lst in self.weightings.values() for nor in lst) 185 | missing = set(self.normalizations).difference(references) 186 | if references.intersection(self.normalizations) and missing: 187 | raise InconsistentLCIA( 188 | f"Weighting not provided for all normalizations; missing {missing}" 189 | ) 190 | return self 191 | -------------------------------------------------------------------------------- /tests/fixtures/presamples_basic.py: -------------------------------------------------------------------------------- 1 | import os 2 | import uuid 3 | 4 | import numpy as np 5 | from bw2data import Database, Method, projects 6 | 7 | basedir = os.path.dirname(os.path.abspath(__file__)) 8 | 9 | # Technosphere matrix 10 | 11 | [[1, 2, -3], [0, 0.5, -2], [-0.1, 0, 1]] 12 | 13 | # Biosphere matrix 14 | 15 | [[0, 1, 2], [7, 5, 0]] 16 | 17 | # Characterization matrix 18 | 19 | [[4, 0], [0, -2]] 20 | 21 | # A^{-1} 22 | 23 | [ 24 | [2 / 3, 4 / 15, 2 / 30], 25 | [-(2 + 2 / 3), 14 / 15, -4 / 15], 26 | [-(3 + 1 / 3), 2 + 2 / 3, 2 / 3], 27 | ] 28 | 29 | 30 | def write_database(): 31 | bio_data = { 32 | ("bio", "a"): {"exchange": [], "type": "biosphere"}, 33 | ("bio", "b"): {"exchange": [], "type": "biosphere"}, 34 | } 35 | Database("bio").write(bio_data) 36 | 37 | tech_data = { 38 | ("test", "1"): { 39 | "exchanges": [ 40 | { 41 | "amount": 1, 42 | "type": "production", 43 | "input": ("test", "1"), 44 | "uncertainty type": 0, 45 | }, 46 | { 47 | "amount": 0.1, 48 | "type": "technosphere", 49 | "input": ("test", "3"), 50 | "uncertainty type": 0, 51 | }, 52 | { 53 | "amount": 7, 54 | "type": "biosphere", 55 | "input": ("bio", "b"), 56 | "uncertainty type": 0, 57 | }, 58 | ], 59 | }, 60 | ("test", "2"): { 61 | "exchanges": [ 62 | { 63 | "amount": 0.5, 64 | "type": "production", 65 | "input": ("test", "2"), 66 | "uncertainty type": 0, 67 | }, 68 | { 69 | "amount": -2, 70 | "type": "technosphere", 71 | "input": ("test", "1"), 72 | "uncertainty type": 0, 73 | }, 74 | { 75 | "amount": 1, 76 | "type": "biosphere", 77 | "input": ("bio", "a"), 78 | "uncertainty type": 0, 79 | }, 80 | { 81 | "amount": 5, 82 | "type": "biosphere", 83 | "input": ("bio", "b"), 84 | "uncertainty type": 0, 85 | }, 86 | ], 87 | }, 88 | ("test", "3"): { 89 | "exchanges": [ 90 | { 91 | "amount": 1, 92 | "type": "production", 93 | "input": ("test", "3"), 94 | "uncertainty type": 0, 95 | }, 96 | { 97 | "amount": 3, 98 | "type": "technosphere", 99 | "input": ("test", "1"), 100 | "uncertainty type": 0, 101 | }, 102 | { 103 | "amount": 2, 104 | "type": "technosphere", 105 | "input": ("test", "2"), 106 | "uncertainty type": 0, 107 | }, 108 | { 109 | "amount": 2, 110 | "type": "biosphere", 111 | "input": ("bio", "a"), 112 | "uncertainty type": 0, 113 | }, 114 | ], 115 | }, 116 | } 117 | 118 | Database("test").write(tech_data) 119 | 120 | cfs = [ 121 | (("bio", "a"), 4), 122 | (("bio", "b"), -2), 123 | ] 124 | 125 | Method(("m",)).register() 126 | Method(("m",)).write(cfs) 127 | 128 | 129 | def build_single_presample_array(): 130 | from presamples import create_presamples_package 131 | 132 | tech_indices = [ 133 | (("test", "1"), ("test", "2"), "technosphere"), 134 | (("test", "2"), ("test", "2"), "production"), 135 | ] 136 | 137 | tech_samples = np.array( 138 | ( 139 | [1], 140 | [1], 141 | ) 142 | ) 143 | 144 | bio_indices = [ 145 | (("bio", "a"), ("test", "2")), 146 | (("bio", "b"), ("test", "2")), 147 | (("bio", "b"), ("test", "1")), 148 | ] 149 | 150 | bio_samples = np.array( 151 | ( 152 | [10], 153 | [1], 154 | [0], 155 | ) 156 | ) 157 | 158 | cf_indices = [("bio", "a")] 159 | 160 | cf_samples = np.array(([1],)) 161 | 162 | create_presamples_package( 163 | matrix_data=[ 164 | (tech_samples, tech_indices, "technosphere"), 165 | (bio_samples, bio_indices, "biosphere"), 166 | (cf_samples, cf_indices, "cf"), 167 | ], 168 | id_="single-sample", 169 | name="single-sample", 170 | dirpath=basedir, 171 | overwrite=True, 172 | seed=54321, 173 | ) 174 | 175 | 176 | def build_multi_presample_array_unseeded(): 177 | from presamples import create_presamples_package 178 | 179 | tech_indices = [ 180 | (("test", "1"), ("test", "2"), "technosphere"), 181 | (("test", "2"), ("test", "2"), "production"), 182 | ] 183 | 184 | tech_samples = np.array( 185 | ( 186 | [1, 2, 3], 187 | [100, 101, 102], 188 | ) 189 | ) 190 | 191 | bio_indices = [ 192 | (("bio", "a"), ("test", "2")), 193 | (("bio", "b"), ("test", "2")), 194 | (("bio", "b"), ("test", "1")), 195 | ] 196 | 197 | bio_samples = np.array( 198 | ( 199 | [10, 11, 12], 200 | [1, 2, 3], 201 | [0, -1, -2], 202 | ) 203 | ) 204 | 205 | create_presamples_package( 206 | matrix_data=[ 207 | (tech_samples, tech_indices, "technosphere"), 208 | (bio_samples, bio_indices, "biosphere"), 209 | ], 210 | id_="unseeded", 211 | name="unseeded", 212 | dirpath=basedir, 213 | overwrite=True, 214 | ) 215 | 216 | 217 | def build_multi_presample_array(): 218 | from presamples import create_presamples_package 219 | 220 | tech_indices = [ 221 | (("test", "1"), ("test", "2"), "technosphere"), 222 | (("test", "2"), ("test", "2"), "production"), 223 | ] 224 | 225 | tech_samples = np.array( 226 | ( 227 | [1, 2, 3], 228 | [100, 101, 102], 229 | ) 230 | ) 231 | 232 | bio_indices = [ 233 | (("bio", "a"), ("test", "2")), 234 | (("bio", "b"), ("test", "2")), 235 | (("bio", "b"), ("test", "1")), 236 | ] 237 | 238 | bio_samples = np.array( 239 | ( 240 | [10, 11, 12], 241 | [1, 2, 3], 242 | [0, -1, -2], 243 | ) 244 | ) 245 | 246 | create_presamples_package( 247 | matrix_data=[ 248 | (tech_samples, tech_indices, "technosphere"), 249 | (bio_samples, bio_indices, "biosphere"), 250 | ], 251 | id_="multi", 252 | name="multi", 253 | dirpath=basedir, 254 | overwrite=True, 255 | seed=42, 256 | ) 257 | 258 | 259 | def build_multi_presample_sequential_array(): 260 | from presamples import create_presamples_package 261 | 262 | tech_indices = [ 263 | (("test", "1"), ("test", "2"), "technosphere"), 264 | (("test", "2"), ("test", "2"), "production"), 265 | ] 266 | 267 | tech_samples = np.array( 268 | ( 269 | [1, 2, 3], 270 | [100, 101, 102], 271 | ) 272 | ) 273 | 274 | bio_indices = [ 275 | (("bio", "a"), ("test", "2")), 276 | (("bio", "b"), ("test", "2")), 277 | (("bio", "b"), ("test", "1")), 278 | ] 279 | 280 | bio_samples = np.array( 281 | ( 282 | [10, 11, 12], 283 | [1, 2, 3], 284 | [0, -1, -2], 285 | ) 286 | ) 287 | 288 | create_presamples_package( 289 | matrix_data=[ 290 | (tech_samples, tech_indices, "technosphere"), 291 | (bio_samples, bio_indices, "biosphere"), 292 | ], 293 | id_="seq", 294 | name="seq", 295 | dirpath=basedir, 296 | overwrite=True, 297 | seed="sequential", 298 | ) 299 | 300 | 301 | if __name__ == "__main__": 302 | name = "test-builder-{}".format(uuid.uuid4().hex) 303 | if name in projects: 304 | raise ValueError("Test project name not unique; please run again.") 305 | try: 306 | projects.set_current(name) 307 | write_database() 308 | build_single_presample_array() 309 | build_multi_presample_array() 310 | build_multi_presample_array_unseeded() 311 | build_multi_presample_sequential_array() 312 | finally: 313 | projects.delete_project(delete_dir=True) 314 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Pytest configuration and fixtures for bw2calc tests. 3 | 4 | This module provides fixtures for handling solver availability and testing 5 | different solver configurations. 6 | """ 7 | 8 | from pathlib import Path 9 | from unittest.mock import Mock, patch 10 | 11 | import pytest 12 | 13 | 14 | def check_solver_availability(): 15 | """Check which solvers are available.""" 16 | pypardiso_available = False 17 | umfpack_available = False 18 | 19 | try: 20 | import pypardiso # noqa: F401 21 | 22 | pypardiso_available = True 23 | except ImportError: 24 | pass 25 | 26 | try: 27 | import scikits.umfpack # noqa: F401 28 | 29 | umfpack_available = True 30 | except ImportError: 31 | pass 32 | 33 | return pypardiso_available, umfpack_available 34 | 35 | 36 | @pytest.fixture(scope="session") 37 | def solver_availability(): 38 | """Session-scoped fixture that checks solver availability.""" 39 | return check_solver_availability() 40 | 41 | 42 | @pytest.fixture 43 | def pypardiso_available(solver_availability): 44 | """Fixture indicating if pypardiso is available.""" 45 | return solver_availability[0] 46 | 47 | 48 | @pytest.fixture 49 | def umfpack_available(solver_availability): 50 | """Fixture indicating if scikits.umfpack is available.""" 51 | return solver_availability[1] 52 | 53 | 54 | @pytest.fixture 55 | def no_solvers_available(): 56 | """Fixture that monkey-patches both solvers to be unavailable.""" 57 | with patch.dict( 58 | "sys.modules", 59 | { 60 | "pypardiso": None, 61 | "scikits.umfpack": None, 62 | "scikits": None, 63 | }, 64 | ): 65 | # Patch the bw2calc module's solver flags in all places they're used 66 | # Note: Only patch attributes that actually exist in each module 67 | with ( 68 | patch("bw2calc.__init__.PYPARDISO", False), 69 | patch("bw2calc.__init__.UMFPACK", False), 70 | patch("bw2calc.fast_scores.PYPARDISO", False), 71 | patch("bw2calc.fast_scores.UMFPACK", False), 72 | patch("bw2calc.fast_supply_arrays.PYPARDISO", False), 73 | patch("bw2calc.fast_supply_arrays.UMFPACK", False), 74 | patch("bw2calc.lca_base.PYPARDISO", False), 75 | patch("bw2calc.lca.PYPARDISO", False), 76 | patch("bw2calc.multi_lca.PYPARDISO", False), 77 | ): 78 | yield 79 | 80 | 81 | @pytest.fixture 82 | def only_pypardiso_available(): 83 | """Fixture that makes only pypardiso available.""" 84 | with patch.dict( 85 | "sys.modules", 86 | { 87 | "scikits.umfpack": None, 88 | "scikits": None, 89 | }, 90 | ): 91 | # Mock pypardiso module 92 | mock_pypardiso = Mock() 93 | mock_pypardiso.pardiso_wrapper = Mock() 94 | 95 | with ( 96 | patch.dict("sys.modules", {"pypardiso": mock_pypardiso}), 97 | patch("bw2calc.fast_scores.PYPARDISO", True), 98 | patch("bw2calc.fast_scores.UMFPACK", False), 99 | ): 100 | yield 101 | 102 | 103 | @pytest.fixture 104 | def only_umfpack_available(): 105 | """Fixture that makes only scikits.umfpack available.""" 106 | with patch.dict( 107 | "sys.modules", 108 | { 109 | "pypardiso": None, 110 | }, 111 | ): 112 | # Mock scikits.umfpack module 113 | mock_scikits = Mock() 114 | mock_scikits.umfpack = Mock() 115 | 116 | with ( 117 | patch.dict( 118 | "sys.modules", 119 | { 120 | "scikits": mock_scikits, 121 | "scikits.umfpack": mock_scikits.umfpack, 122 | }, 123 | ), 124 | patch("bw2calc.fast_scores.PYPARDISO", False), 125 | patch("bw2calc.fast_scores.UMFPACK", True), 126 | ): 127 | yield 128 | 129 | 130 | @pytest.fixture 131 | def both_solvers_available(): 132 | """Fixture that makes both solvers available.""" 133 | # Mock pypardiso module 134 | mock_pypardiso = Mock() 135 | mock_pypardiso.pardiso_wrapper = Mock() 136 | 137 | # Mock scikits.umfpack module 138 | mock_scikits = Mock() 139 | mock_scikits.umfpack = Mock() 140 | 141 | with ( 142 | patch.dict( 143 | "sys.modules", 144 | { 145 | "pypardiso": mock_pypardiso, 146 | "scikits": mock_scikits, 147 | "scikits.umfpack": mock_scikits.umfpack, 148 | }, 149 | ), 150 | patch("bw2calc.fast_scores.PYPARDISO", True), 151 | patch("bw2calc.fast_scores.UMFPACK", True), 152 | ): 153 | yield 154 | 155 | 156 | @pytest.fixture 157 | def mock_pypardiso_solver(): 158 | """Fixture providing a mock PyPardisoSolver.""" 159 | 160 | def create_mock_solver(): 161 | mock_solver = Mock() 162 | mock_solver.factorized = False 163 | mock_solver.phase = None 164 | 165 | def factorize(matrix): 166 | mock_solver.factorized = True 167 | 168 | def set_phase(phase): 169 | mock_solver.phase = phase 170 | 171 | def _check_b(matrix, b): 172 | return b 173 | 174 | def _call_pardiso(matrix, b): 175 | # Return a mock solution 176 | import numpy as np 177 | 178 | return np.ones((matrix.shape[0], b.shape[1])) 179 | 180 | mock_solver.factorize = factorize 181 | mock_solver.set_phase = set_phase 182 | mock_solver._check_b = _check_b 183 | mock_solver._call_pardiso = _call_pardiso 184 | 185 | return mock_solver 186 | 187 | return create_mock_solver 188 | 189 | 190 | @pytest.fixture 191 | def mock_umfpack_solver(): 192 | """Fixture providing a mock UMFPACK solver.""" 193 | 194 | def mock_factorized(matrix): 195 | def solver(b): 196 | import numpy as np 197 | 198 | return np.ones(matrix.shape[0]) 199 | 200 | return solver 201 | 202 | return mock_factorized 203 | 204 | 205 | # Pytest markers for different solver configurations 206 | def pytest_configure(config): 207 | """Configure pytest markers.""" 208 | config.addinivalue_line("markers", "pypardiso: mark test as requiring pypardiso solver") 209 | config.addinivalue_line("markers", "umfpack: mark test as requiring scikits.umfpack solver") 210 | config.addinivalue_line("markers", "no_solvers: mark test as requiring no solvers available") 211 | config.addinivalue_line( 212 | "markers", "solver_agnostic: mark test as working with any solver configuration" 213 | ) 214 | 215 | 216 | def pytest_collection_modifyitems(config, items): 217 | """Modify test collection based on solver availability.""" 218 | pypardiso_available, umfpack_available = check_solver_availability() 219 | 220 | for item in items: 221 | # Skip pypardiso tests if pypardiso is not available 222 | if item.get_closest_marker("pypardiso") and not pypardiso_available: 223 | skip_marker = pytest.mark.skip(reason="pypardiso not available") 224 | item.add_marker(skip_marker) 225 | 226 | # Skip umfpack tests if umfpack is not available 227 | if item.get_closest_marker("umfpack") and not umfpack_available: 228 | skip_marker = pytest.mark.skip(reason="scikits.umfpack not available") 229 | item.add_marker(skip_marker) 230 | 231 | # Skip tests that require no solvers if any solver is available 232 | if item.get_closest_marker("no_solvers") and (pypardiso_available or umfpack_available): 233 | skip_marker = pytest.mark.skip(reason="solvers are available") 234 | item.add_marker(skip_marker) 235 | 236 | 237 | # Convenience fixtures for common test scenarios 238 | @pytest.fixture 239 | def solver_config(request): 240 | """Parametrized fixture for testing different solver configurations.""" 241 | config = request.param 242 | if config == "no_solvers": 243 | return pytest.fixture(no_solvers_available) 244 | elif config == "pypardiso_only": 245 | return pytest.fixture(only_pypardiso_available) 246 | elif config == "umfpack_only": 247 | return pytest.fixture(only_umfpack_available) 248 | elif config == "both_solvers": 249 | return pytest.fixture(both_solvers_available) 250 | else: 251 | raise ValueError(f"Unknown solver config: {config}") 252 | 253 | 254 | @pytest.fixture 255 | def fixture_dir() -> Path: 256 | return Path(__file__).resolve().parent / "fixtures" 257 | 258 | 259 | @pytest.fixture 260 | def basic_test_data(fixture_dir): 261 | """Basic test data for FastScoresOnlyMultiLCA tests.""" 262 | 263 | from bw2calc.utils import get_datapackage 264 | 265 | return { 266 | "dps": [ 267 | get_datapackage(fixture_dir / "multi_lca_simple_1.zip"), 268 | get_datapackage(fixture_dir / "multi_lca_simple_2.zip"), 269 | get_datapackage(fixture_dir / "multi_lca_simple_3.zip"), 270 | get_datapackage(fixture_dir / "multi_lca_simple_4.zip"), 271 | get_datapackage(fixture_dir / "multi_lca_simple_5.zip"), 272 | ], 273 | "config": { 274 | "impact_categories": [ 275 | ("first", "category"), 276 | ("second", "category"), 277 | ] 278 | }, 279 | "demands": { 280 | "γ": {100: 1}, 281 | "ε": {103: 2}, 282 | "ζ": {105: 3}, 283 | }, 284 | } 285 | -------------------------------------------------------------------------------- /tests/test_restricted_sparse_matrix_dict.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from matrix_utils import SparseMatrixDict 3 | from pydantic import ValidationError 4 | 5 | from bw2calc.restricted_sparse_matrix_dict import RestrictedSparseMatrixDict, RestrictionsValidator 6 | 7 | 8 | class Dummy: 9 | def __init__(self, a): 10 | self.a = a 11 | 12 | def __matmul__(self, other): 13 | if isinstance(other, Dummy): 14 | return self.a + other.a 15 | return self.a + other 16 | 17 | 18 | def test_restricted_sparse_matrix_dict(): 19 | smd = SparseMatrixDict({(("one",), "foo"): 1, (("two",), "bar"): 2}) 20 | rsmd = RestrictedSparseMatrixDict( 21 | {("seven",): [("one",)], ("eight",): [("two",)]}, 22 | {("seven",): Dummy(7), ("eight",): Dummy(8)}, 23 | ) 24 | 25 | result = rsmd @ smd 26 | assert isinstance(result, SparseMatrixDict) 27 | assert len(result) == 2 28 | assert result[(("seven",), ("one",), "foo")] == 8 29 | assert result[(("eight",), ("two",), "bar")] == 10 30 | 31 | 32 | def test_restrictions_validator(): 33 | assert RestrictionsValidator(restrictions={("seven",): [("one",)], ("eight",): [("two",)]}) 34 | with pytest.raises(ValidationError): 35 | RestrictionsValidator(restrictions={"seven": [("one",)], ("eight",): [("two",)]}) 36 | 37 | 38 | # Test the updated RestrictedSparseMatrixDict functionality 39 | 40 | 41 | def test_get_first_element_with_nested_tuple(): 42 | """Test _get_first_element with nested tuple keys.""" 43 | rsmd = RestrictedSparseMatrixDict({("seven",): [("one",)]}, {("seven",): Dummy(7)}) 44 | 45 | # Test with nested tuple key 46 | nested_key = (("some", "lcia"), "functional-unit-id") 47 | result = rsmd._get_first_element(nested_key) 48 | assert result == ("some", "lcia") 49 | 50 | 51 | def test_get_first_element_with_simple_tuple(): 52 | """Test _get_first_element with simple tuple keys.""" 53 | rsmd = RestrictedSparseMatrixDict({("seven",): [("one",)]}, {("seven",): Dummy(7)}) 54 | 55 | # Test with simple tuple key 56 | simple_key = ("some", "lcia") 57 | result = rsmd._get_first_element(simple_key) 58 | assert result == ("some", "lcia") 59 | 60 | 61 | def test_get_first_element_invalid_type(): 62 | """Test _get_first_element with invalid type.""" 63 | rsmd = RestrictedSparseMatrixDict({("seven",): [("one",)]}, {("seven",): Dummy(7)}) 64 | 65 | # Test with invalid type 66 | with pytest.raises(AssertionError, match="Wrong type: should be tuple"): 67 | rsmd._get_first_element("invalid") 68 | 69 | 70 | def test_concatenate_with_nested_tuple(): 71 | """Test _concatenate with nested tuple in second argument.""" 72 | rsmd = RestrictedSparseMatrixDict({("seven",): [("one",)]}, {("seven",): Dummy(7)}) 73 | 74 | a = ("normalization", "key") 75 | b = (("some", "lcia"), ("a", "b"), "functional-unit-id") 76 | result = rsmd._concatenate(a, b) 77 | assert result == (("normalization", "key"), ("some", "lcia"), ("a", "b"), "functional-unit-id") 78 | 79 | 80 | def test_concatenate_with_simple_tuple(): 81 | """Test _concatenate with simple tuple in second argument.""" 82 | rsmd = RestrictedSparseMatrixDict({("seven",): [("one",)]}, {("seven",): Dummy(7)}) 83 | 84 | a = ("normalization", "key") 85 | b = ("some", "lcia") 86 | result = rsmd._concatenate(a, b) 87 | assert result == (("normalization", "key"), ("some", "lcia")) 88 | 89 | 90 | def test_matmul_with_restricted_sparse_matrix_dict(): 91 | """Test matrix multiplication with another RestrictedSparseMatrixDict.""" 92 | # Create two RestrictedSparseMatrixDict instances 93 | restrictions1 = { 94 | ("norm", "key1"): [("first", "category")], 95 | ("norm", "key2"): [("second", "category")], 96 | } 97 | rsmd1 = RestrictedSparseMatrixDict( 98 | restrictions1, {("norm", "key1"): Dummy(10), ("norm", "key2"): Dummy(20)} 99 | ) 100 | 101 | restrictions2 = { 102 | ("first", "category"): [("process1",)], 103 | ("second", "category"): [("process2",)], 104 | } 105 | rsmd2 = RestrictedSparseMatrixDict( 106 | restrictions2, {("first", "category"): Dummy(5), ("second", "category"): Dummy(15)} 107 | ) 108 | 109 | result = rsmd1 @ rsmd2 110 | 111 | assert isinstance(result, SparseMatrixDict) 112 | assert len(result) == 2 113 | # Check that the correct combinations were created based on restrictions 114 | assert (("norm", "key1"), ("first", "category")) in result 115 | assert (("norm", "key2"), ("second", "category")) in result 116 | # Check that the multiplication was performed correctly 117 | assert result[(("norm", "key1"), ("first", "category"))] == 15 # 10 + 5 118 | assert result[(("norm", "key2"), ("second", "category"))] == 35 # 20 + 15 119 | 120 | 121 | def test_matmul_with_sparse_matrix_dict(): 122 | """Test matrix multiplication with SparseMatrixDict.""" 123 | rsmd = RestrictedSparseMatrixDict( 124 | {("norm", "key1"): [("first", "category")]}, {("norm", "key1"): Dummy(10)} 125 | ) 126 | 127 | smd = SparseMatrixDict( 128 | { 129 | (("first", "category"), "process1"): 5, 130 | (("second", "category"), "process2"): 15, # This should be filtered out 131 | } 132 | ) 133 | 134 | result = rsmd @ smd 135 | 136 | assert isinstance(result, SparseMatrixDict) 137 | assert len(result) == 1 138 | # Only the allowed combination should be present 139 | assert (("norm", "key1"), ("first", "category"), "process1") in result 140 | assert result[(("norm", "key1"), ("first", "category"), "process1")] == 15 # 10 + 5 141 | 142 | 143 | def test_matmul_restrictions_filtering(): 144 | """Test that restrictions properly filter out disallowed combinations.""" 145 | rsmd = RestrictedSparseMatrixDict( 146 | {("norm", "key1"): [("first", "category")]}, # Only allows ("first", "category") 147 | {("norm", "key1"): Dummy(10)}, 148 | ) 149 | 150 | smd = SparseMatrixDict( 151 | { 152 | (("first", "category"), "process1"): 5, # Should be allowed 153 | (("second", "category"), "process2"): 15, # Should be filtered out 154 | (("third", "category"), "process3"): 25, # Should be filtered out 155 | } 156 | ) 157 | 158 | result = rsmd @ smd 159 | 160 | assert isinstance(result, SparseMatrixDict) 161 | assert len(result) == 1 162 | # Only the allowed combination should be present 163 | assert (("norm", "key1"), ("first", "category"), "process1") in result 164 | assert result[(("norm", "key1"), ("first", "category"), "process1")] == 15 # 10 + 5 165 | 166 | 167 | def test_matmul_with_non_matrix_dict(): 168 | """Test matrix multiplication with non-SparseMatrixDict type.""" 169 | rsmd = RestrictedSparseMatrixDict( 170 | {("norm", "key1"): [("first", "category")]}, {("norm", "key1"): Dummy(10)} 171 | ) 172 | 173 | # Test with a non-SparseMatrixDict type 174 | other = "not a matrix dict" 175 | 176 | # This should call the parent class method 177 | with pytest.raises(TypeError): 178 | rsmd @ other 179 | 180 | 181 | def test_matmul_empty_restrictions(): 182 | """Test matrix multiplication with empty restrictions.""" 183 | rsmd = RestrictedSparseMatrixDict({}, {("norm", "key1"): Dummy(10)}) # Empty restrictions 184 | 185 | smd = SparseMatrixDict( 186 | {(("first", "category"), "process1"): 5, (("second", "category"), "process2"): 15} 187 | ) 188 | 189 | with pytest.raises(KeyError): 190 | rsmd @ smd 191 | 192 | 193 | def test_matmul_multiple_restrictions(): 194 | """Test matrix multiplication with multiple restrictions.""" 195 | rsmd = RestrictedSparseMatrixDict( 196 | { 197 | ("norm", "key1"): [("first", "category"), ("second", "category")], 198 | ("norm", "key2"): [("third", "category")], 199 | }, 200 | {("norm", "key1"): Dummy(10), ("norm", "key2"): Dummy(20)}, 201 | ) 202 | 203 | smd = SparseMatrixDict( 204 | { 205 | (("first", "category"), "process1"): 5, 206 | (("second", "category"), "process2"): 15, 207 | (("third", "category"), "process3"): 25, 208 | (("fourth", "category"), "process4"): 35, # Should be filtered out 209 | } 210 | ) 211 | 212 | result = rsmd @ smd 213 | 214 | assert isinstance(result, SparseMatrixDict) 215 | assert len(result) == 3 216 | # Check all allowed combinations are present 217 | assert (("norm", "key1"), ("first", "category"), "process1") in result 218 | assert (("norm", "key1"), ("second", "category"), "process2") in result 219 | assert (("norm", "key2"), ("third", "category"), "process3") in result 220 | # Check values 221 | assert result[(("norm", "key1"), ("first", "category"), "process1")] == 15 # 10 + 5 222 | assert result[(("norm", "key1"), ("second", "category"), "process2")] == 25 # 10 + 15 223 | assert result[(("norm", "key2"), ("third", "category"), "process3")] == 45 # 20 + 25 224 | 225 | 226 | def test_initialization_with_validation(): 227 | """Test that initialization validates restrictions using RestrictionsValidator.""" 228 | # Valid restrictions 229 | valid_restrictions = {("norm", "key1"): [("first", "category")]} 230 | rsmd = RestrictedSparseMatrixDict(valid_restrictions, {("norm", "key1"): Dummy(10)}) 231 | assert rsmd._restrictions == valid_restrictions 232 | 233 | # Invalid restrictions should raise ValidationError 234 | with pytest.raises(ValidationError): 235 | RestrictedSparseMatrixDict( 236 | {"invalid": [("first", "category")]}, # String key instead of tuple 237 | {("norm", "key1"): Dummy(10)}, 238 | ) 239 | 240 | 241 | def test_restrictions_attribute(): 242 | """Test that restrictions are properly stored as private attribute.""" 243 | restrictions = {("norm", "key1"): [("first", "category")]} 244 | rsmd = RestrictedSparseMatrixDict(restrictions, {("norm", "key1"): Dummy(10)}) 245 | 246 | assert hasattr(rsmd, "_restrictions") 247 | assert rsmd._restrictions == restrictions 248 | -------------------------------------------------------------------------------- /tests/test_to_dataframe.py: -------------------------------------------------------------------------------- 1 | try: 2 | import bw2data as bd 3 | import bw2io as bi 4 | from bw2data.tests import bw2test 5 | except ImportError: 6 | 7 | def bw2test(func): 8 | return func 9 | 10 | bd = None 11 | 12 | import json 13 | from pathlib import Path 14 | 15 | import pandas as pd 16 | import pytest 17 | from pandas.testing import assert_frame_equal 18 | 19 | import bw2calc as bc 20 | 21 | fixture_dir = Path(__file__).resolve().parent / "fixtures" 22 | 23 | 24 | def frames(one, two): 25 | assert_frame_equal( 26 | one.reindex(sorted(one.columns), axis=1), 27 | two.reindex(sorted(two.columns), axis=1), 28 | rtol=1e-04, 29 | atol=1e-04, 30 | check_dtype=False, 31 | ) 32 | 33 | 34 | @pytest.fixture 35 | def basic_example(): 36 | mapping = dict(json.load(open(fixture_dir / "bw2io_example_db_mapping.json"))) 37 | packages = [ 38 | fixture_dir / "bw2io_example_db.zip", 39 | fixture_dir / "ipcc_simple.zip", 40 | ] 41 | 42 | lca = bc.LCA( 43 | {mapping["Driving an electric car"]: 1}, 44 | data_objs=packages, 45 | ) 46 | lca.lci() 47 | lca.lcia() 48 | 49 | return lca, mapping 50 | 51 | 52 | def test_to_dataframe_basic(basic_example): 53 | lca, mapping = basic_example 54 | 55 | elec = mapping["Electricity"] 56 | steel = mapping["Steel"] 57 | co2 = mapping["CO2"] 58 | 59 | df = lca.to_dataframe(annotate=False) 60 | 61 | expected = pd.DataFrame( 62 | [ 63 | { 64 | "row_id": co2, 65 | "row_index": lca.dicts.biosphere[co2], 66 | "amount": 0.16800001296144934, 67 | "col_id": elec, 68 | "col_index": lca.dicts.activity[elec], 69 | }, 70 | { 71 | "row_id": co2, 72 | "row_index": lca.dicts.biosphere[co2], 73 | "amount": 0.014481599635022317, 74 | "col_id": steel, 75 | "col_index": lca.dicts.activity[steel], 76 | }, 77 | ] 78 | ) 79 | frames(expected, df) 80 | 81 | 82 | def test_to_dataframe_inventory_matrix(basic_example): 83 | lca, mapping = basic_example 84 | 85 | elec = mapping["Electricity"] 86 | steel = mapping["Steel"] 87 | co2 = mapping["CO2"] 88 | 89 | df = lca.to_dataframe(matrix_label="inventory", annotate=False) 90 | 91 | expected = pd.DataFrame( 92 | [ 93 | { 94 | "row_id": co2, 95 | "row_index": lca.dicts.biosphere[co2], 96 | "amount": 0.16800001296144934, 97 | "col_id": elec, 98 | "col_index": lca.dicts.activity[elec], 99 | }, 100 | { 101 | "row_id": co2, 102 | "row_index": lca.dicts.biosphere[co2], 103 | "amount": 0.014481599635022317, 104 | "col_id": steel, 105 | "col_index": lca.dicts.activity[steel], 106 | }, 107 | ] 108 | ) 109 | frames(expected, df) 110 | 111 | 112 | def test_to_dataframe_characterization_matrix(basic_example): 113 | lca, mapping = basic_example 114 | 115 | co2 = mapping["CO2"] 116 | 117 | df = lca.to_dataframe(matrix_label="characterization_matrix", annotate=False) 118 | 119 | expected = pd.DataFrame( 120 | [ 121 | { 122 | "row_id": co2, 123 | "row_index": lca.dicts.biosphere[co2], 124 | "amount": 1, 125 | "col_id": co2, 126 | "col_index": lca.dicts.biosphere[co2], 127 | } 128 | ] 129 | ) 130 | frames(expected, df) 131 | 132 | 133 | def test_to_dataframe_technosphere_matrix(basic_example): 134 | lca, mapping = basic_example 135 | 136 | elec = mapping["Electricity"] 137 | steel = mapping["Steel"] 138 | batt = mapping["Electric car battery"] 139 | ecar = mapping["Electric car"] 140 | ccar = mapping["Combustion car"] 141 | 142 | df = lca.to_dataframe(matrix_label="technosphere_matrix", annotate=False, cutoff=4) 143 | 144 | expected = pd.DataFrame( 145 | [ 146 | { 147 | "row_id": elec, 148 | "row_index": lca.dicts.product[elec], 149 | "amount": -2000, 150 | "col_id": batt, 151 | "col_index": lca.dicts.activity[batt], 152 | }, 153 | { 154 | "row_id": steel, 155 | "row_index": lca.dicts.product[steel], 156 | "amount": -1921, 157 | "col_id": ecar, 158 | "col_index": lca.dicts.activity[ecar], 159 | }, 160 | { 161 | "row_id": steel, 162 | "row_index": lca.dicts.product[steel], 163 | "amount": -1641, 164 | "col_id": ccar, 165 | "col_index": lca.dicts.activity[ccar], 166 | }, 167 | { 168 | "row_id": steel, 169 | "row_index": lca.dicts.product[steel], 170 | "amount": -9.880000114440918, 171 | "col_id": batt, 172 | "col_index": lca.dicts.activity[batt], 173 | }, 174 | ] 175 | ) 176 | frames(expected, df) 177 | 178 | 179 | def test_to_dataframe_biosphere_matrix(basic_example): 180 | lca, mapping = basic_example 181 | 182 | co2 = mapping["CO2"] 183 | elec = mapping["Electricity"] 184 | steel = mapping["Steel"] 185 | ccar = mapping["Driving an combustion car"] 186 | 187 | df = lca.to_dataframe(matrix_label="biosphere_matrix", annotate=False) 188 | 189 | expected = pd.DataFrame( 190 | [ 191 | { 192 | "row_id": co2, 193 | "row_index": lca.dicts.biosphere[co2], 194 | "amount": 1.5, 195 | "col_id": steel, 196 | "col_index": lca.dicts.activity[steel], 197 | }, 198 | { 199 | "row_id": co2, 200 | "row_index": lca.dicts.biosphere[co2], 201 | "amount": 0.6, 202 | "col_id": elec, 203 | "col_index": lca.dicts.activity[elec], 204 | }, 205 | { 206 | "row_id": co2, 207 | "row_index": lca.dicts.biosphere[co2], 208 | "amount": 0.1426, 209 | "col_id": ccar, 210 | "col_index": lca.dicts.activity[ccar], 211 | }, 212 | ] 213 | ) 214 | frames(expected, df) 215 | 216 | 217 | def test_to_dataframe_number_cutoff(basic_example): 218 | lca, mapping = basic_example 219 | 220 | elec = mapping["Electricity"] 221 | co2 = mapping["CO2"] 222 | 223 | df = lca.to_dataframe(annotate=False, cutoff=1) 224 | 225 | expected = pd.DataFrame( 226 | [ 227 | { 228 | "row_id": co2, 229 | "row_index": lca.dicts.biosphere[co2], 230 | "amount": 0.16800001296144934, 231 | "col_id": elec, 232 | "col_index": lca.dicts.activity[elec], 233 | } 234 | ] 235 | ) 236 | frames(expected, df) 237 | 238 | 239 | def test_to_dataframe_fraction_cutoff(basic_example): 240 | lca, mapping = basic_example 241 | 242 | elec = mapping["Electricity"] 243 | steel = mapping["Steel"] 244 | co2 = mapping["CO2"] 245 | 246 | df = lca.to_dataframe(annotate=False) 247 | 248 | expected = pd.DataFrame( 249 | [ 250 | { 251 | "row_id": co2, 252 | "row_index": lca.dicts.biosphere[co2], 253 | "amount": 0.16800001296144934, 254 | "col_id": elec, 255 | "col_index": lca.dicts.activity[elec], 256 | }, 257 | { 258 | "row_id": co2, 259 | "row_index": lca.dicts.biosphere[co2], 260 | "amount": 0.014481599635022317, 261 | "col_id": steel, 262 | "col_index": lca.dicts.activity[steel], 263 | }, 264 | ] 265 | ) 266 | frames(expected, df) 267 | 268 | 269 | def test_to_dataframe_custom_mappings(basic_example): 270 | lca, mapping = basic_example 271 | 272 | elec = mapping["Electricity"] 273 | steel = mapping["Steel"] 274 | co2 = mapping["CO2"] 275 | 276 | df = lca.to_dataframe( 277 | annotate=False, 278 | row_dict={lca.dicts.biosphere[co2]: 111}, 279 | col_dict={ 280 | lca.dicts.activity[steel]: 201, 281 | lca.dicts.activity[elec]: 202, 282 | }, 283 | ) 284 | 285 | expected = pd.DataFrame( 286 | [ 287 | { 288 | "row_id": 111, 289 | "row_index": lca.dicts.biosphere[co2], 290 | "amount": 0.16800001296144934, 291 | "col_id": 202, 292 | "col_index": lca.dicts.activity[elec], 293 | }, 294 | { 295 | "row_id": 111, 296 | "row_index": lca.dicts.biosphere[co2], 297 | "amount": 0.014481599635022317, 298 | "col_id": 201, 299 | "col_index": lca.dicts.activity[steel], 300 | }, 301 | ] 302 | ) 303 | print(df) 304 | frames(expected, df) 305 | 306 | 307 | @pytest.mark.skipif(not bd, reason="bw2data not installed") 308 | @bw2test 309 | def test_to_dataframe_annotated(basic_example): 310 | bi.add_example_database() 311 | 312 | co2 = bd.get_node(code="CO2") 313 | steel = bd.get_node(code="Steel") 314 | elec = bd.get_node(code="Electricity") 315 | driving = bd.get_node(code="Driving an electric car") 316 | 317 | lca = bc.LCA({driving: 1}, method=("IPCC", "simple")) 318 | lca.lci() 319 | lca.lcia() 320 | 321 | df = lca.to_dataframe() 322 | 323 | expected = pd.DataFrame( 324 | [ 325 | { 326 | "row_id": co2.id, 327 | "row_index": lca.dicts.biosphere[co2.id], 328 | "row_code": co2["code"], 329 | "row_database": co2["database"], 330 | "row_location": None, 331 | "row_categories": None, 332 | "row_type": co2["type"], 333 | "row_name": co2["name"], 334 | "row_unit": co2["unit"], 335 | "row_product": None, 336 | "amount": 0.16800001296144934, 337 | "col_id": elec.id, 338 | "col_index": lca.dicts.activity[elec.id], 339 | "col_code": elec["code"], 340 | "col_database": elec["database"], 341 | "col_location": elec["location"], 342 | "col_name": elec["name"], 343 | "col_reference_product": elec["reference product"], 344 | "col_type": elec["type"], 345 | "col_unit": elec["unit"], 346 | }, 347 | { 348 | "row_id": co2.id, 349 | "row_index": lca.dicts.biosphere[co2.id], 350 | "row_code": co2["code"], 351 | "row_database": co2["database"], 352 | "row_location": None, 353 | "row_categories": None, 354 | "row_type": co2["type"], 355 | "row_name": co2["name"], 356 | "row_unit": co2["unit"], 357 | "row_product": None, 358 | "amount": 0.014481599635022317, 359 | "col_id": steel.id, 360 | "col_index": lca.dicts.activity[steel.id], 361 | "col_code": steel["code"], 362 | "col_database": steel["database"], 363 | "col_location": steel["location"], 364 | "col_name": steel["name"], 365 | "col_reference_product": steel["reference product"], 366 | "col_type": steel["type"], 367 | "col_unit": steel["unit"], 368 | }, 369 | ] 370 | ) 371 | frames(expected, df) 372 | -------------------------------------------------------------------------------- /tests/test_fast_supply_arrays.py: -------------------------------------------------------------------------------- 1 | """Unit tests for FastSupplyArraysMixin.""" 2 | 3 | from unittest.mock import patch 4 | 5 | import numpy as np 6 | import pytest 7 | 8 | from bw2calc import PYPARDISO, UMFPACK 9 | from bw2calc.fast_supply_arrays import FastSupplyArraysMixin 10 | from bw2calc.lca import LCA 11 | 12 | 13 | # Create a test class that mixes FastSupplyArraysMixin with LCA 14 | # Note: Name doesn't start with "Test" to avoid pytest collection 15 | class LCAWithFastSupplyArrays(LCA, FastSupplyArraysMixin): 16 | """Test class that mixes FastSupplyArraysMixin with LCA.""" 17 | 18 | pass 19 | 20 | 21 | @pytest.fixture 22 | def basic_lca(fixture_dir): 23 | """Create a basic LCA instance with FastSupplyArraysMixin.""" 24 | packages = [fixture_dir / "basic_fixture.zip"] 25 | lca = LCAWithFastSupplyArrays({1: 1}, data_objs=packages) 26 | lca.load_lci_data() 27 | return lca 28 | 29 | 30 | class TestChunkSize: 31 | """Test chunk_size attribute and set_chunk_size method.""" 32 | 33 | def test_default_chunk_size(self, basic_lca): 34 | """Test that default chunk_size is 50.""" 35 | assert basic_lca.chunk_size == 50 36 | 37 | def test_set_chunk_size_valid(self, basic_lca): 38 | """Test setting chunk_size with valid values.""" 39 | basic_lca.set_chunk_size(100) 40 | assert basic_lca.chunk_size == 100 41 | 42 | basic_lca.set_chunk_size(1) 43 | assert basic_lca.chunk_size == 1 44 | 45 | def test_set_chunk_size_invalid(self, basic_lca): 46 | """Test that set_chunk_size raises ValueError for invalid values.""" 47 | with pytest.raises(ValueError, match="Invalid chunk_size"): 48 | basic_lca.set_chunk_size(0) 49 | 50 | with pytest.raises(ValueError, match="Invalid chunk_size"): 51 | basic_lca.set_chunk_size(-1) 52 | 53 | 54 | class TestCalculateSupplyArraysNoSolver: 55 | """Test calculate_supply_arrays when no solver is available.""" 56 | 57 | def test_no_solver_error(self, basic_lca, no_solvers_available): 58 | """Test that calculate_supply_arrays raises error when no solver is available.""" 59 | # Create demand array with correct size 60 | demand = np.zeros(len(basic_lca.dicts.product)) 61 | demand[basic_lca.dicts.product[1]] = 1.0 62 | demand_arrays = [demand] 63 | 64 | # Patch the fast_supply_arrays module's solver flags 65 | with ( 66 | patch("bw2calc.fast_supply_arrays.PYPARDISO", False), 67 | patch("bw2calc.fast_supply_arrays.UMFPACK", False), 68 | ): 69 | with pytest.raises(ValueError, match="only supported with PARDISO and UMFPACK solvers"): 70 | basic_lca.calculate_supply_arrays(demand_arrays) 71 | 72 | 73 | class TestCalculateSupplyArraysUMFPACK: 74 | """Test calculate_supply_arrays with UMFPACK solver.""" 75 | 76 | @pytest.mark.skipif(not UMFPACK, reason="UMFPACK not available") 77 | def test_calculate_supply_arrays_umfpack_single(self, basic_lca): 78 | """Test calculate_supply_arrays with UMFPACK for a single demand array.""" 79 | # Build demand array 80 | basic_lca.build_demand_array() 81 | demand_arrays = [basic_lca.demand_array] 82 | 83 | # Calculate supply arrays 84 | result = basic_lca.calculate_supply_arrays(demand_arrays) 85 | 86 | # Check result shape 87 | assert result.shape == (basic_lca.technosphere_matrix.shape[0], 1) 88 | 89 | # Verify against standard solve 90 | expected = basic_lca.solve_linear_system(basic_lca.demand_array) 91 | np.testing.assert_array_almost_equal(result[:, 0], expected) 92 | 93 | @pytest.mark.skipif(not UMFPACK, reason="UMFPACK not available") 94 | def test_calculate_supply_arrays_umfpack_multiple(self, basic_lca): 95 | """Test calculate_supply_arrays with UMFPACK for multiple demand arrays.""" 96 | # Create multiple demand arrays 97 | demand1 = np.zeros(len(basic_lca.dicts.product)) 98 | demand1[basic_lca.dicts.product[1]] = 1.0 99 | 100 | demand2 = np.zeros(len(basic_lca.dicts.product)) 101 | demand2[basic_lca.dicts.product[1]] = 2.0 102 | 103 | demand_arrays = [demand1, demand2] 104 | 105 | # Calculate supply arrays 106 | result = basic_lca.calculate_supply_arrays(demand_arrays) 107 | 108 | # Check result shape 109 | assert result.shape == (basic_lca.technosphere_matrix.shape[0], 2) 110 | 111 | # Verify against standard solve for each demand 112 | expected1 = basic_lca.solve_linear_system(demand1) 113 | expected2 = basic_lca.solve_linear_system(demand2) 114 | 115 | np.testing.assert_array_almost_equal(result[:, 0], expected1) 116 | np.testing.assert_array_almost_equal(result[:, 1], expected2) 117 | 118 | def test_calculate_supply_arrays_umfpack_mocked(self, basic_lca, mock_umfpack_solver): 119 | """Test calculate_supply_arrays with mocked UMFPACK solver.""" 120 | # Create demand array with correct size 121 | demand = np.zeros(len(basic_lca.dicts.product)) 122 | demand[basic_lca.dicts.product[1]] = 1.0 123 | demand_arrays = [demand] 124 | 125 | with ( 126 | patch("bw2calc.fast_supply_arrays.UMFPACK", True), 127 | patch("bw2calc.fast_supply_arrays.PYPARDISO", False), 128 | patch("bw2calc.fast_supply_arrays.factorized", mock_umfpack_solver), 129 | ): 130 | result = basic_lca.calculate_supply_arrays(demand_arrays) 131 | 132 | # Check result shape 133 | assert result.shape == (basic_lca.technosphere_matrix.shape[0], 1) 134 | 135 | # With mocked solver returning ones, result should be ones 136 | expected = np.ones(basic_lca.technosphere_matrix.shape[0]) 137 | np.testing.assert_array_equal(result[:, 0], expected) 138 | 139 | 140 | class TestCalculateSupplyArraysPARDISO: 141 | """Test calculate_supply_arrays with PARDISO solver.""" 142 | 143 | @pytest.mark.skipif(not PYPARDISO, reason="PARDISO not available") 144 | def test_calculate_supply_arrays_pardiso_single(self, basic_lca): 145 | """Test calculate_supply_arrays with PARDISO for a single demand array.""" 146 | # Build demand array 147 | basic_lca.build_demand_array() 148 | demand_arrays = [basic_lca.demand_array] 149 | 150 | # Calculate supply arrays 151 | result = basic_lca.calculate_supply_arrays(demand_arrays) 152 | 153 | # Check result shape 154 | assert result.shape == (basic_lca.technosphere_matrix.shape[0], 1) 155 | 156 | # Verify against standard solve 157 | expected = basic_lca.solve_linear_system(basic_lca.demand_array) 158 | np.testing.assert_array_almost_equal(result[:, 0], expected) 159 | 160 | @pytest.mark.skipif(not PYPARDISO, reason="PARDISO not available") 161 | def test_calculate_supply_arrays_pardiso_multiple(self, basic_lca): 162 | """Test calculate_supply_arrays with PARDISO for multiple demand arrays.""" 163 | # Create multiple demand arrays 164 | demand1 = np.zeros(len(basic_lca.dicts.product)) 165 | demand1[basic_lca.dicts.product[1]] = 1.0 166 | 167 | demand2 = np.zeros(len(basic_lca.dicts.product)) 168 | demand2[basic_lca.dicts.product[1]] = 2.0 169 | 170 | demand_arrays = [demand1, demand2] 171 | 172 | # Calculate supply arrays 173 | result = basic_lca.calculate_supply_arrays(demand_arrays) 174 | 175 | # Check result shape 176 | assert result.shape == (basic_lca.technosphere_matrix.shape[0], 2) 177 | 178 | # Verify against standard solve for each demand 179 | expected1 = basic_lca.solve_linear_system(demand1) 180 | expected2 = basic_lca.solve_linear_system(demand2) 181 | 182 | np.testing.assert_array_almost_equal(result[:, 0], expected1) 183 | np.testing.assert_array_almost_equal(result[:, 1], expected2) 184 | 185 | def test_calculate_supply_arrays_pardiso_chunking( 186 | self, basic_lca, only_pypardiso_available, mock_pypardiso_solver 187 | ): 188 | """Test calculate_supply_arrays with PARDISO and chunking.""" 189 | # Set a small chunk size to test chunking 190 | basic_lca.set_chunk_size(1) 191 | 192 | # Create multiple demand arrays (more than chunk_size) 193 | demand_arrays = [] 194 | for i in range(5): 195 | demand = np.zeros(len(basic_lca.dicts.product)) 196 | demand[basic_lca.dicts.product[1]] = float(i + 1) 197 | demand_arrays.append(demand) 198 | 199 | with ( 200 | patch("bw2calc.fast_supply_arrays.PyPardisoSolver", mock_pypardiso_solver), 201 | patch("bw2calc.fast_supply_arrays.PYPARDISO", True), 202 | ): 203 | result = basic_lca.calculate_supply_arrays(demand_arrays) 204 | 205 | # Check result shape 206 | assert result.shape == (basic_lca.technosphere_matrix.shape[0], 5) 207 | 208 | def test_calculate_supply_arrays_pardiso_mocked( 209 | self, basic_lca, only_pypardiso_available, mock_pypardiso_solver 210 | ): 211 | """Test calculate_supply_arrays with mocked PARDISO solver.""" 212 | # Create demand array with correct size 213 | demand = np.zeros(len(basic_lca.dicts.product)) 214 | demand[basic_lca.dicts.product[1]] = 1.0 215 | demand_arrays = [demand] 216 | 217 | with ( 218 | patch("bw2calc.fast_supply_arrays.PyPardisoSolver", mock_pypardiso_solver), 219 | patch("bw2calc.fast_supply_arrays.PYPARDISO", True), 220 | ): 221 | result = basic_lca.calculate_supply_arrays(demand_arrays) 222 | 223 | # Check result shape 224 | assert result.shape == (basic_lca.technosphere_matrix.shape[0], 1) 225 | 226 | # Mock solver returns ones, so result should be ones 227 | expected = np.ones(basic_lca.technosphere_matrix.shape[0]) 228 | np.testing.assert_array_equal(result[:, 0], expected) 229 | 230 | def test_calculate_supply_arrays_pardiso_large_chunk( 231 | self, basic_lca, only_pypardiso_available, mock_pypardiso_solver 232 | ): 233 | """Test calculate_supply_arrays with PARDISO and large chunk size.""" 234 | # Set a large chunk size 235 | basic_lca.set_chunk_size(100) 236 | 237 | # Create multiple demand arrays 238 | demand_arrays = [] 239 | for i in range(3): 240 | demand = np.zeros(len(basic_lca.dicts.product)) 241 | demand[basic_lca.dicts.product[1]] = float(i + 1) 242 | demand_arrays.append(demand) 243 | 244 | with ( 245 | patch("bw2calc.fast_supply_arrays.PyPardisoSolver", mock_pypardiso_solver), 246 | patch("bw2calc.fast_supply_arrays.PYPARDISO", True), 247 | ): 248 | result = basic_lca.calculate_supply_arrays(demand_arrays) 249 | 250 | # Check result shape 251 | assert result.shape == (basic_lca.technosphere_matrix.shape[0], 3) 252 | 253 | 254 | class TestCalculateSupplyArraysEdgeCases: 255 | """Test edge cases for calculate_supply_arrays.""" 256 | 257 | def test_empty_demand_arrays(self, basic_lca, only_pypardiso_available, mock_pypardiso_solver): 258 | """Test calculate_supply_arrays with empty demand_arrays list.""" 259 | with ( 260 | patch("bw2calc.fast_supply_arrays.PyPardisoSolver", mock_pypardiso_solver), 261 | patch("bw2calc.fast_supply_arrays.PYPARDISO", True), 262 | ): 263 | # Empty list should return array with shape (n_rows, 0) 264 | result = basic_lca.calculate_supply_arrays([]) 265 | assert result.shape == (basic_lca.technosphere_matrix.shape[0], 0) 266 | 267 | @pytest.mark.skipif(not PYPARDISO and not UMFPACK, reason="No fast solver available") 268 | def test_single_element_demand_array(self, basic_lca): 269 | """Test calculate_supply_arrays with a single element demand array.""" 270 | # Create a minimal demand array 271 | demand = np.zeros(len(basic_lca.dicts.product)) 272 | demand[basic_lca.dicts.product[1]] = 1.0 273 | 274 | result = basic_lca.calculate_supply_arrays([demand]) 275 | 276 | # Check result shape 277 | assert result.shape == (basic_lca.technosphere_matrix.shape[0], 1) 278 | assert result.shape[0] == basic_lca.technosphere_matrix.shape[0] 279 | -------------------------------------------------------------------------------- /tests/test_fast_scores.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | from pathlib import Path 3 | from unittest.mock import patch 4 | 5 | import numpy as np 6 | import pytest 7 | import xarray 8 | 9 | from bw2calc.fast_scores import PYPARDISO, FastScoresOnlyMultiLCA 10 | from bw2calc.method_config import MethodConfig 11 | from bw2calc.utils import get_datapackage 12 | 13 | try: 14 | import scikits.umfpack # noqa: F401 15 | 16 | UMFPACK = True 17 | except ImportError: 18 | UMFPACK = False 19 | 20 | 21 | def test_initialization_chunk_size(basic_test_data): 22 | """Test initialization with default chunk size.""" 23 | fsmlca = FastScoresOnlyMultiLCA( 24 | demands=basic_test_data["demands"], 25 | method_config=basic_test_data["config"], 26 | data_objs=basic_test_data["dps"], 27 | ) 28 | assert fsmlca.chunk_size == 50 29 | 30 | fsmlca = FastScoresOnlyMultiLCA( 31 | demands=basic_test_data["demands"], 32 | method_config=basic_test_data["config"], 33 | data_objs=basic_test_data["dps"], 34 | chunk_size=100, 35 | ) 36 | assert fsmlca.chunk_size == 100 37 | 38 | 39 | def test_umfpack_warning(basic_test_data, only_umfpack_available): 40 | """Test that UMFPACK warning is issued when using FastScoresOnlyMultiLCA.""" 41 | with warnings.catch_warnings(record=True) as w: 42 | warnings.simplefilter("always") 43 | FastScoresOnlyMultiLCA( 44 | demands=basic_test_data["demands"], 45 | method_config=basic_test_data["config"], 46 | data_objs=basic_test_data["dps"], 47 | ) 48 | assert len(w) == 1 49 | assert "UMFPACK" in str(w[0].message) 50 | assert "PARDISO" in str(w[0].message) 51 | 52 | 53 | def test_methods_not_implemented(basic_test_data): 54 | fsmlca = FastScoresOnlyMultiLCA( 55 | demands=basic_test_data["demands"], 56 | method_config=basic_test_data["config"], 57 | data_objs=basic_test_data["dps"], 58 | ) 59 | with pytest.raises(NotImplementedError, match="LCI and LCIA aren't separate"): 60 | fsmlca.lci() 61 | with pytest.raises(NotImplementedError, match="LCI and LCIA aren't separate"): 62 | fsmlca.lci_calculation() 63 | with pytest.raises(NotImplementedError, match="LCI and LCIA aren't separate"): 64 | fsmlca.lcia() 65 | with pytest.raises(NotImplementedError, match="LCI and LCIA aren't separate"): 66 | fsmlca.lcia_calculation() 67 | 68 | 69 | def test_build_precalculated_basic(basic_test_data): 70 | """Test build_precalculated with basic setup.""" 71 | fsmlca = FastScoresOnlyMultiLCA( 72 | demands=basic_test_data["demands"], 73 | method_config=basic_test_data["config"], 74 | data_objs=basic_test_data["dps"], 75 | ) 76 | fsmlca._load_datapackages() 77 | 78 | fsmlca.build_precalculated() 79 | 80 | # Check that precalculated is a dictionary 81 | assert isinstance(fsmlca.precalculated, dict) 82 | 83 | # Check that all characterization matrices are processed 84 | assert len(fsmlca.precalculated) == len(fsmlca.characterization_matrices) 85 | 86 | # Check that the correct keys are present 87 | expected_keys = {("first", "category"), ("second", "category")} 88 | assert set(fsmlca.precalculated.keys()) == expected_keys 89 | 90 | # Check that each precalculated matrix is a numpy array 91 | for key, matrix in fsmlca.precalculated.items(): 92 | assert isinstance(matrix, np.ndarray) 93 | assert matrix.ndim == 2 94 | assert matrix.shape[0] == 1 95 | assert key in expected_keys 96 | 97 | 98 | def test_build_precalculated_with_normalization(basic_test_data): 99 | """Test build_precalculated with normalization matrices.""" 100 | # Add normalization datapackage 101 | fixture_dir = Path(__file__).resolve().parent / "fixtures" 102 | dps_with_norm = basic_test_data["dps"] + [ 103 | get_datapackage(fixture_dir / "multi_lca_simple_normalization.zip") 104 | ] 105 | 106 | config_with_norm = { 107 | "impact_categories": [ 108 | ("first", "category"), 109 | ("second", "category"), 110 | ], 111 | "normalizations": { 112 | ("n", "1"): [ 113 | ("first", "category"), 114 | ("second", "category"), 115 | ] 116 | }, 117 | } 118 | 119 | fsmlca = FastScoresOnlyMultiLCA( 120 | demands=basic_test_data["demands"], method_config=config_with_norm, data_objs=dps_with_norm 121 | ) 122 | fsmlca._load_datapackages() 123 | 124 | fsmlca.build_precalculated() 125 | 126 | assert hasattr(fsmlca, "normalization_matrices") 127 | assert isinstance(fsmlca.precalculated, dict) 128 | 129 | # Check that the correct keys are present (should be the same as basic test) 130 | expected_keys = {(("n", "1"), ("first", "category")), (("n", "1"), ("second", "category"))} 131 | assert set(fsmlca.precalculated.keys()) == expected_keys 132 | 133 | # Check that each precalculated matrix is a numpy array 134 | for key, matrix in fsmlca.precalculated.items(): 135 | assert isinstance(matrix, np.ndarray) 136 | assert matrix.ndim == 2 137 | assert matrix.shape[0] == 1 138 | assert key in expected_keys 139 | 140 | 141 | def test_build_precalculated_with_weighting(basic_test_data): 142 | """Test build_precalculated with weighting matrices.""" 143 | # Add weighting datapackage 144 | fixture_dir = Path(__file__).resolve().parent / "fixtures" 145 | dps_with_weight = basic_test_data["dps"] + [ 146 | get_datapackage(fixture_dir / "multi_lca_simple_weighting.zip") 147 | ] 148 | 149 | config_with_weight = { 150 | "impact_categories": [ 151 | ("first", "category"), 152 | ("second", "category"), 153 | ], 154 | "weightings": { 155 | ("w", "1"): [ 156 | ("first", "category"), 157 | ("second", "category"), 158 | ] 159 | }, 160 | } 161 | 162 | fsmlca = FastScoresOnlyMultiLCA( 163 | demands=basic_test_data["demands"], 164 | method_config=config_with_weight, 165 | data_objs=dps_with_weight, 166 | ) 167 | fsmlca._load_datapackages() 168 | fsmlca.build_precalculated() 169 | 170 | # Check that weighting matrices are applied 171 | assert hasattr(fsmlca, "weighting_matrices") 172 | assert isinstance(fsmlca.precalculated, dict) 173 | 174 | # Check that the correct keys are present (should be the same as basic test) 175 | expected_keys = {(("w", "1"), ("first", "category")), (("w", "1"), ("second", "category"))} 176 | assert set(fsmlca.precalculated.keys()) == expected_keys 177 | 178 | # Check that each precalculated matrix is a numpy array 179 | for key, matrix in fsmlca.precalculated.items(): 180 | assert isinstance(matrix, np.ndarray) 181 | assert matrix.ndim == 2 182 | assert matrix.shape[0] == 1 183 | assert key in expected_keys 184 | 185 | 186 | def test_no_solver_fixture(no_solvers_available): 187 | with pytest.raises(ImportError): 188 | import pypardiso 189 | 190 | assert pypardiso 191 | with pytest.raises(ImportError): 192 | import scikits.umfpack # noqa: F811 193 | 194 | assert scikits.umfpack 195 | 196 | 197 | def test_calculate_no_solver_error(basic_test_data, no_solvers_available): 198 | """Test that calculate raises error when no suitable solver is available.""" 199 | fsmlca = FastScoresOnlyMultiLCA( 200 | demands=basic_test_data["demands"], 201 | method_config=basic_test_data["config"], 202 | data_objs=basic_test_data["dps"], 203 | ) 204 | fsmlca._load_datapackages() 205 | fsmlca.build_precalculated() 206 | 207 | with pytest.raises(ValueError, match="only supported with PARDISO and UMFPACK solvers"): 208 | fsmlca.calculate() 209 | 210 | 211 | def test_next_no_solver_error(basic_test_data, no_solvers_available): 212 | """Test that __next__ raises error when no suitable solver is available.""" 213 | fsmlca = FastScoresOnlyMultiLCA( 214 | demands=basic_test_data["demands"], 215 | method_config=basic_test_data["config"], 216 | data_objs=basic_test_data["dps"], 217 | ) 218 | fsmlca._load_datapackages() 219 | fsmlca.build_precalculated() 220 | 221 | with pytest.raises(ValueError, match="only supported with PARDISO and UMFPACK"): 222 | next(fsmlca) 223 | 224 | 225 | # Test the scores property getter and setter 226 | 227 | 228 | def test_scores_getter_not_calculated(basic_test_data): 229 | """Test that scores getter raises error when scores not calculated.""" 230 | fsmlca = FastScoresOnlyMultiLCA( 231 | demands=basic_test_data["demands"], 232 | method_config=basic_test_data["config"], 233 | data_objs=basic_test_data["dps"], 234 | ) 235 | 236 | with pytest.raises(ValueError, match="Scores not calculated yet"): 237 | _ = fsmlca.scores 238 | 239 | 240 | def test_scores_setter_and_getter(basic_test_data): 241 | """Test that scores setter and getter work correctly.""" 242 | fsmlca = FastScoresOnlyMultiLCA( 243 | demands=basic_test_data["demands"], 244 | method_config=basic_test_data["config"], 245 | data_objs=basic_test_data["dps"], 246 | ) 247 | 248 | # Create a mock DataArray 249 | mock_scores = xarray.DataArray( 250 | np.array([[1.0, 2.0, 3.0]]), 251 | coords=[["first||category"], ["γ", "ε", "ζ"]], 252 | dims=["LCIA", "processes"], 253 | ) 254 | 255 | # Set scores 256 | fsmlca.scores = mock_scores 257 | 258 | # Get scores 259 | retrieved_scores = fsmlca.scores 260 | 261 | # Check that they are the same 262 | assert retrieved_scores is mock_scores 263 | assert hasattr(fsmlca, "_scores") 264 | assert fsmlca._scores is mock_scores 265 | 266 | 267 | def test_calculation_with_different_chunk_sizes( 268 | basic_test_data, only_pypardiso_available, mock_pypardiso_solver 269 | ): 270 | """Test calculation with different chunk sizes.""" 271 | chunk_sizes = [1, 2, 5, 10, 50] 272 | 273 | for chunk_size in chunk_sizes: 274 | fsmlca = FastScoresOnlyMultiLCA( 275 | demands=basic_test_data["demands"], 276 | method_config=basic_test_data["config"], 277 | data_objs=basic_test_data["dps"], 278 | chunk_size=chunk_size, 279 | ) 280 | assert fsmlca.chunk_size == chunk_size 281 | 282 | # Mock the PyPardisoSolver and PYPARDISO flag 283 | with ( 284 | patch("bw2calc.fast_supply_arrays.PYPARDISO", True), 285 | patch("bw2calc.fast_supply_arrays.PyPardisoSolver", mock_pypardiso_solver), 286 | ): 287 | result = fsmlca.calculate() 288 | 289 | # All should produce valid results 290 | assert isinstance(result, xarray.DataArray) 291 | assert result.dims == ("LCIA", "processes") 292 | assert result.shape[0] == 2 # Two impact categories 293 | assert result.shape[1] == 3 # Three functional units 294 | 295 | 296 | def test_calculation_with_normalization_and_weighting( 297 | basic_test_data, only_pypardiso_available, mock_pypardiso_solver 298 | ): 299 | """Test calculation with normalization and weighting.""" 300 | # Add normalization and weighting datapackages 301 | fixture_dir = Path(__file__).resolve().parent / "fixtures" 302 | dps_with_all = basic_test_data["dps"] + [ 303 | get_datapackage(fixture_dir / "multi_lca_simple_normalization.zip"), 304 | get_datapackage(fixture_dir / "multi_lca_simple_weighting.zip"), 305 | ] 306 | 307 | config_with_all = { 308 | "impact_categories": [ 309 | ("first", "category"), 310 | ("second", "category"), 311 | ], 312 | "normalizations": { 313 | ("n", "1"): [ 314 | ("first", "category"), 315 | ("second", "category"), 316 | ] 317 | }, 318 | "weightings": { 319 | ("w", "1"): [ 320 | ("n", "1"), 321 | ] 322 | }, 323 | } 324 | 325 | fsmlca = FastScoresOnlyMultiLCA( 326 | demands=basic_test_data["demands"], method_config=config_with_all, data_objs=dps_with_all 327 | ) 328 | 329 | # Mock the PyPardisoSolver 330 | with ( 331 | patch("bw2calc.fast_supply_arrays.PYPARDISO", True), 332 | patch("bw2calc.fast_supply_arrays.PyPardisoSolver", mock_pypardiso_solver), 333 | ): 334 | result = fsmlca.calculate() 335 | 336 | # Should still work with normalization and weighting 337 | assert isinstance(result, xarray.DataArray) 338 | assert result.dims == ("LCIA", "processes") 339 | assert hasattr(fsmlca, "precalculated") 340 | 341 | # Check that precalculated has the correct keys 342 | expected_keys = { 343 | (("w", "1"), ("n", "1"), ("second", "category")), 344 | (("w", "1"), ("n", "1"), ("first", "category")), 345 | } 346 | assert set(fsmlca.precalculated.keys()) == expected_keys 347 | 348 | 349 | @pytest.mark.skipif((not PYPARDISO and not UMFPACK), reason="Fast sparse solvers not installed") 350 | def test_integration(basic_test_data, fixture_dir): 351 | method_config = MethodConfig( 352 | impact_categories=[ 353 | ("first", "category"), 354 | ("second", "category"), 355 | ], 356 | normalizations={ 357 | ("n", "1"): [("first", "category")], 358 | ("n", "2"): [("second", "category")], 359 | }, 360 | weightings={ 361 | ("w", "1"): [("n", "1")], 362 | ("w", "2"): [("n", "2")], 363 | }, 364 | ) 365 | 366 | func_units = basic_test_data["demands"] 367 | dps = basic_test_data["dps"] 368 | dps.append( 369 | get_datapackage(fixture_dir / "multi_lca_simple_normalization.zip"), 370 | ) 371 | dps.append( 372 | get_datapackage(fixture_dir / "multi_lca_simple_weighting.zip"), 373 | ) 374 | 375 | mlca = FastScoresOnlyMultiLCA(demands=func_units, method_config=method_config, data_objs=dps) 376 | mlca.calculate() 377 | 378 | assert mlca.scores.shape == (2, 3) 379 | assert ( 380 | mlca.scores.loc["(('w', '2'), ('n', '2'), ('second', 'category'))", "ζ"] 381 | == 3 * (3 * 10 + 1 * 10) * 84 382 | ) 383 | assert mlca.scores.loc["(('w', '1'), ('n', '1'), ('first', 'category'))", "γ"] == 3 * 42 384 | -------------------------------------------------------------------------------- /src/bw2calc/lca_base.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | from collections.abc import Iterator 3 | from functools import partial 4 | from typing import Optional, Tuple 5 | 6 | import matrix_utils as mu 7 | import numpy as np 8 | 9 | from bw2calc import PYPARDISO, factorized, spsolve 10 | from bw2calc.errors import EmptyBiosphere, NonsquareTechnosphere 11 | 12 | 13 | class LCABase(Iterator): 14 | """Base class for single and multi LCA classes""" 15 | 16 | def keep_first_iteration(self): 17 | """Set a flag to use the current values as first element when 18 | iterating. 19 | 20 | When creating the class instance, we already use the first index. This 21 | method allows us to use the values for the first index. 22 | 23 | Note that the methods ``.lci_calculation()`` and 24 | ``.lcia_calculation()`` will be called on the current values, even if 25 | these calculations have already been done. 26 | """ 27 | self.keep_first_iteration_flag = True 28 | 29 | def check_selective_use(self, matrix_label: str) -> Tuple[bool, bool]: 30 | return ( 31 | self.selective_use.get(matrix_label, {}).get("use_arrays", self.use_arrays), 32 | self.selective_use.get(matrix_label, {}).get( 33 | "use_distributions", self.use_distributions 34 | ), 35 | ) 36 | 37 | def load_lci_data(self, nonsquare_ok=False) -> None: 38 | """Load inventory data and create technosphere and biosphere matrices.""" 39 | use_arrays, use_distributions = self.check_selective_use("technosphere_matrix") 40 | 41 | self.technosphere_mm = mu.MappedMatrix( 42 | packages=self.packages, 43 | matrix="technosphere_matrix", 44 | use_arrays=use_arrays, 45 | use_distributions=use_distributions, 46 | seed_override=self.seed_override, 47 | ) 48 | self.dicts.product = partial(self.technosphere_mm.row_mapper.to_dict) 49 | self.dicts.activity = partial(self.technosphere_mm.col_mapper.to_dict) 50 | self.technosphere_matrix = self.technosphere_mm.matrix 51 | 52 | # Avoid this conversion each time we do a calculation in the future 53 | # See https://github.com/haasad/PyPardiso/issues/75#issuecomment-2186825609 54 | if PYPARDISO: 55 | self.technosphere_matrix = self.technosphere_matrix.tocsr() 56 | 57 | if ( 58 | len(self.technosphere_mm.row_mapper) != len(self.technosphere_mm.col_mapper) 59 | and not nonsquare_ok 60 | ): 61 | raise NonsquareTechnosphere( 62 | ( 63 | "Technosphere matrix is not square: {} activities " 64 | "(columns) and {} products (rows). Use LeastSquaresLCA to " 65 | "solve this system, or fix the input data" 66 | ).format( 67 | len(self.technosphere_mm.col_mapper), 68 | len(self.technosphere_mm.row_mapper), 69 | ) 70 | ) 71 | 72 | use_arrays, use_distributions = self.check_selective_use("biosphere_matrix") 73 | 74 | self.biosphere_mm = mu.MappedMatrix( 75 | packages=self.packages, 76 | matrix="biosphere_matrix", 77 | use_arrays=use_arrays, 78 | use_distributions=use_distributions, 79 | seed_override=self.seed_override, 80 | col_mapper=self.technosphere_mm.col_mapper, 81 | empty_ok=True, 82 | ) 83 | self.biosphere_matrix = self.biosphere_mm.matrix 84 | self.dicts.biosphere = partial(self.biosphere_mm.row_mapper.to_dict) 85 | 86 | if self.biosphere_mm.matrix.shape[0] == 0: 87 | warnings.warn( 88 | "No valid biosphere flows found. No inventory results can " 89 | "be calculated, `lcia` will raise an error" 90 | ) 91 | 92 | def remap_inventory_dicts(self) -> None: 93 | """Remap ``self.dicts.activity|product|biosphere`` and ``self.demand`` 94 | from database integer IDs to keys (``(database name, code)``). 95 | 96 | Uses remapping dictionaries in ``self.remapping_dicts``.""" 97 | if getattr(self, "_remapped", False): 98 | warnings.warn("Remapping has already been done; returning without changing data") 99 | return 100 | 101 | if "product" in self.remapping_dicts: 102 | self.demand = {self.remapping_dicts["product"][k]: v for k, v in self.demand.items()} 103 | 104 | for label in ("activity", "product", "biosphere"): 105 | if label in self.remapping_dicts: 106 | getattr(self.dicts, label).remap(self.remapping_dicts[label]) 107 | 108 | self._remapped = True 109 | 110 | def decompose_technosphere(self) -> None: 111 | """ 112 | Factorize the technosphere matrix into lower and upper triangular 113 | matrices, :math:`A=LU`. Does not solve the linear system :math:`Ax=B`. 114 | 115 | Doesn't return anything, but creates ``self.solver``. 116 | 117 | .. warning:: Incorrect results could occur if a technosphere matrix was 118 | factorized, and then a new technosphere matrix was constructed, as 119 | ``self.solver`` would still be the factorized older technosphere 120 | matrix. You are responsible for deleting ``self.solver`` when doing 121 | these types of advanced calculations. 122 | 123 | """ 124 | if PYPARDISO: 125 | warnings.warn("PARDISO installed; this is a no-op") 126 | else: 127 | # UMFPACK factorization needs CSC sparse matrix; see 128 | # https://github.com/brightway-lca/brightway2-calc/issues/132 129 | self.solver = factorized(self.technosphere_matrix.tocsc()) 130 | 131 | def solve_linear_system(self, demand: Optional[np.ndarray] = None) -> None: 132 | """ 133 | Master solution function for linear system :math:`Ax=B`. 134 | 135 | To most numerical analysts, matrix inversion is a sin. 136 | 137 | -- Nicolas Higham, Accuracy and Stability of Numerical Algorithms, 138 | Society for Industrial and Applied Mathematics, Philadelphia, PA, 139 | USA, 2002, p. 260. 140 | 141 | We use `pypardiso `_ or 142 | `UMFpack `_, which is 143 | a very fast solver for sparse matrices. 144 | 145 | If the technosphere matrix has already been factorized, then the 146 | decomposed technosphere (``self.solver``) is reused. Otherwise the 147 | calculation is redone completely. 148 | 149 | """ 150 | if demand is None: 151 | demand = self.demand_array 152 | if hasattr(self, "solver"): 153 | return self.solver(demand) 154 | else: 155 | return spsolve(self.technosphere_matrix, demand) 156 | 157 | def lci(self, demand: Optional[dict] = None, factorize: bool = False) -> None: 158 | """ 159 | Calculate a life cycle inventory. 160 | 161 | #. Load LCI data, and construct the technosphere and biosphere 162 | matrices. 163 | #. Build the demand array 164 | #. Solve the linear system to get the supply array and life cycle 165 | inventory. 166 | 167 | Args: 168 | * *factorize* (bool, optional): Factorize the technosphere matrix. 169 | Makes additional calculations with the same technosphere matrix 170 | much faster. Default is ``False``; not useful is only doing one LCI 171 | calculation. 172 | * *builder* (``MatrixBuilder`` object, optional): Default is 173 | ``bw2calc.matrices.MatrixBuilder``, which is fine for most cases. 174 | Custom matrix builders can be used to manipulate data in creative 175 | ways before building the matrices. 176 | 177 | Doesn't return anything, but creates ``self.supply_array`` and 178 | ``self.inventory``. 179 | 180 | """ 181 | if not hasattr(self, "technosphere_matrix"): 182 | self.load_lci_data() 183 | if demand is not None: 184 | self.check_demand(demand) 185 | self.build_demand_array(demand) 186 | self.demand = demand 187 | else: 188 | self.build_demand_array() 189 | if factorize and not PYPARDISO: 190 | self.decompose_technosphere() 191 | self.lci_calculation() 192 | 193 | def lcia(self, demand: Optional[dict] = None) -> None: 194 | """ 195 | Calculate the life cycle impact assessment. 196 | 197 | #. Load and construct the characterization matrix 198 | #. Multiply the characterization matrix by the life cycle inventory 199 | 200 | Doesn't return anything, but creates ``self.characterized_inventory``. 201 | 202 | """ 203 | assert hasattr(self, "inventory") or hasattr(self, "inventories"), "Must do lci first" 204 | if not self.dicts.biosphere: 205 | raise EmptyBiosphere 206 | 207 | if not ( 208 | hasattr(self, "characterization_matrix") or hasattr(self, "characterization_matrices") 209 | ): 210 | self.load_lcia_data() 211 | if demand is not None: 212 | self.check_demand(demand) 213 | self.lci(demand=demand) 214 | self.demand = demand 215 | self.lcia_calculation() 216 | 217 | def normalize(self) -> None: 218 | """ 219 | Multiply characterized inventory by flow-specific normalization factors. 220 | """ 221 | if not ( 222 | hasattr(self, "characterized_inventory") or hasattr(self, "characterized_inventories") 223 | ): 224 | raise ValueError("Must do lcia first") 225 | if not hasattr(self, "normalization_matrix"): 226 | self.load_normalization_data() 227 | self.normalization_calculation() 228 | 229 | def weight(self) -> None: 230 | """Multiply characterized inventory by weighting value. 231 | 232 | Can be done with or without normalization.""" 233 | if not ( 234 | hasattr(self, "characterized_inventory") or hasattr(self, "characterized_inventories") 235 | ): 236 | raise ValueError("Must do lcia first") 237 | if not hasattr(self, "weighting_value"): 238 | self.load_weighting_data() 239 | self.weighting_calculation() 240 | 241 | def invert_technosphere_matrix(self): 242 | """Use one-shot approach to efficiently calculate the inverse of the 243 | technosphere matrix by simultaneously solving ``Ax=b`` for all ``b``. 244 | 245 | Technosphere matrix inversion is often not the most efficient approach. 246 | See https://github.com/brightway-lca/brightway2-calc/issues/35 247 | 248 | See `Intel forum `__ 249 | for a discussion on why we use this approach.""" # noqa: E501 250 | assert hasattr(self, "technosphere_matrix"), "Must load lci data first" 251 | 252 | if not PYPARDISO: 253 | warnings.warn( 254 | "Performance is much better with pypardiso (not available on MacOS ARM machines)" 255 | ) 256 | 257 | self.inverted_technosphere_matrix = spsolve( 258 | self.technosphere_matrix, np.eye(*self.technosphere_matrix.shape) 259 | ) 260 | return self.inverted_technosphere_matrix 261 | 262 | def has(self, label: str) -> bool: 263 | """Shortcut to find out if matrix data for type ``{label}_matrix`` is 264 | present in the given data objects. 265 | 266 | Returns a boolean. Will return ``True`` even if data for a 267 | zero-dimensional matrix is given. 268 | """ 269 | return any( 270 | True 271 | for package in self.packages 272 | for resource in package.resources 273 | if resource["matrix"] == f"{label}_matrix" 274 | ) 275 | 276 | ################# 277 | # Compatibility # 278 | ################# 279 | 280 | @property 281 | def activity_dict(self): 282 | warnings.warn( 283 | "This method is deprecated, please use `.dicts.activity` instead", 284 | DeprecationWarning, 285 | ) 286 | return self.dicts.activity 287 | 288 | @property 289 | def product_dict(self): 290 | warnings.warn( 291 | "This method is deprecated, please use `.dicts.product` instead", 292 | DeprecationWarning, 293 | ) 294 | return self.dicts.product 295 | 296 | @property 297 | def biosphere_dict(self): 298 | warnings.warn( 299 | "This method is deprecated, please use `.dicts.biosphere` instead", 300 | DeprecationWarning, 301 | ) 302 | return self.dicts.biosphere 303 | 304 | def reverse_dict(self): 305 | warnings.warn( 306 | "This method is deprecated, please use `.dicts.X.reversed` directly", 307 | DeprecationWarning, 308 | ) 309 | return ( 310 | self.dicts.activity.reversed, 311 | self.dicts.product.reversed, 312 | self.dicts.biosphere.reversed, 313 | ) 314 | 315 | def redo_lci(self, demand: Optional[dict] = None) -> None: 316 | """Redo LCI with same databases but different demand. 317 | 318 | Args: 319 | * *demand* (dict): A demand dictionary. 320 | 321 | Doesn't return anything, but overwrites ``self.demand_array``, 322 | ``self.supply_array``, and ``self.inventory``. 323 | 324 | .. warning:: If you want to redo the LCIA as well, use 325 | ``redo_lcia(demand)`` directly. 326 | 327 | """ 328 | warnings.warn("Please use .lci(demand=demand) instead of `redo_lci`.", DeprecationWarning) 329 | self.lci(demand=demand) 330 | 331 | def redo_lcia(self, demand: Optional[dict] = None) -> None: 332 | """Redo LCIA, optionally with new demand. 333 | 334 | Args: 335 | * *demand* (dict, optional): New demand dictionary. Optional, 336 | defaults to ``self.demand``. 337 | 338 | Doesn't return anything, but overwrites 339 | ``self.characterized_inventory``. If ``demand`` is given, also 340 | overwrites ``self.demand_array``, ``self.supply_array``, and 341 | ``self.inventory``. 342 | 343 | """ 344 | warnings.warn("Please use .lcia(demand=demand) instead of `redo_lci`.", DeprecationWarning) 345 | self.lcia(demand=demand) 346 | 347 | def weighting(self) -> None: 348 | """ 349 | Backwards compatibility. Switching to verb form consistent with 350 | ``.normalize``. 351 | """ 352 | warnings.warn("Please switch to `.weight`", DeprecationWarning) 353 | return self.weight() 354 | 355 | def _delete_solver_state(self) -> None: 356 | """Low-level function to force freeing up memory and removing any `solver` state.""" 357 | if hasattr(self, "solver"): 358 | delattr(self, "solver") 359 | if PYPARDISO: 360 | # This is global state in the pypardiso library - use built-in reset function 361 | from pypardiso.scipy_aliases import pypardiso_solver 362 | 363 | pypardiso_solver.free_memory() 364 | -------------------------------------------------------------------------------- /tests/fixtures/single-matrix/Test fixture.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from bw2calc.single_matrix import *\n", 10 | "s = SingleMatrixLCA({\"f\": 1}, \"sm-fixture.tar.bz2\")\n", 11 | "s.calculate()" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": {}, 18 | "outputs": [ 19 | { 20 | "data": { 21 | "text/plain": [ 22 | "{'foo': 2.2136449078579945}" 23 | ] 24 | }, 25 | "execution_count": 2, 26 | "metadata": {}, 27 | "output_type": "execute_result" 28 | } 29 | ], 30 | "source": [ 31 | "s.scores" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 3, 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "rr, rc = s.reverse_dict()" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 4, 46 | "metadata": {}, 47 | "outputs": [ 48 | { 49 | "name": "stdout", 50 | "output_type": "stream", 51 | "text": [ 52 | "a 1.7772306782190697\n", 53 | "b 1.1946185999098617\n", 54 | "c 0.5829304560926344\n", 55 | "d 0.7675674047355302\n", 56 | "e 0.4420576095581056\n", 57 | "f 1.0000000000000002\n", 58 | "g 0.0\n", 59 | "h 0.0\n", 60 | "1 -3.014776081286758\n", 61 | "2 -1.1574804568810333\n", 62 | "3 -1.2509597313552767\n", 63 | "4 -0.5963594988971035\n", 64 | "α 0.8481379383592647\n", 65 | "β 0.6118402109571791\n", 66 | "γ 0.48054130351108815\n", 67 | "δ 0.2731254550304624\n", 68 | "ε 0.0\n", 69 | "ζ 0.0\n", 70 | "η 0.0\n", 71 | "θ 0.0\n" 72 | ] 73 | } 74 | ], 75 | "source": [ 76 | "for i, v in enumerate(s.supply_array):\n", 77 | " print(rr[i], v)" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 5, 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "%matplotlib inline" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": 6, 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "from matplotlib import pyplot as plt" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 7, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "m = s.matrix.tocoo()" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 8, 110 | "metadata": {}, 111 | "outputs": [ 112 | { 113 | "name": "stdout", 114 | "output_type": "stream", 115 | "text": [ 116 | "a a 1.0\n", 117 | "a b -0.15875966846942902\n", 118 | "a c -0.07783499360084534\n", 119 | "a d -0.6575725674629211\n", 120 | "a e -0.3313387334346771\n", 121 | "a f -0.8909989595413208\n", 122 | "a g -0.45032998919487\n", 123 | "a h -0.11410125344991684\n", 124 | "b b 1.0\n", 125 | "b c -0.7733734846115112\n", 126 | "b d -0.1522853821516037\n", 127 | "b e -0.5507182478904724\n", 128 | "b f -0.38345715403556824\n", 129 | "b g -0.22234876453876495\n", 130 | "b h -0.42207664251327515\n", 131 | "c c 1.0\n", 132 | "c d -0.3805234134197235\n", 133 | "c e -0.12428241968154907\n", 134 | "c f -0.23591309785842896\n", 135 | "c g -0.1798233985900879\n", 136 | "c h -0.3284483551979065\n", 137 | "d d 1.0\n", 138 | "d e -0.32029885053634644\n", 139 | "d f -0.6259768605232239\n", 140 | "d g -0.42775073647499084\n", 141 | "d h -0.24756036698818207\n", 142 | "e e 1.0\n", 143 | "e f -0.44205760955810547\n", 144 | "e g -0.6496731638908386\n", 145 | "e h -0.815680742263794\n", 146 | "f f 1.0\n", 147 | "f g -0.9775451421737671\n", 148 | "f h -0.7285186648368835\n", 149 | "g g 1.0\n", 150 | "g h -0.12447405606508255\n", 151 | "h h 1.0\n", 152 | "1 a 0.5544072389602661\n", 153 | "1 b 1.2243407368659973\n", 154 | "1 f 0.5668463110923767\n", 155 | "1 1 1.0\n", 156 | "2 c 0.3165927827358246\n", 157 | "2 f 0.9729288816452026\n", 158 | "2 2 1.0\n", 159 | "3 a 0.6669450402259827\n", 160 | "3 e 0.14849771559238434\n", 161 | "3 3 1.0\n", 162 | "4 b 0.49920493364334106\n", 163 | "4 4 1.0\n", 164 | "α 1 0.2813270092010498\n", 165 | "α α 1.0\n", 166 | "β 2 0.5285965800285339\n", 167 | "β β 1.0\n", 168 | "γ 3 0.3841381072998047\n", 169 | "γ γ 1.0\n", 170 | "δ 4 0.4579879343509674\n", 171 | "δ δ 1.0\n", 172 | "ε ε 1.0\n", 173 | "ζ ζ 1.0\n", 174 | "η η 1.0\n", 175 | "θ θ 1.0\n" 176 | ] 177 | } 178 | ], 179 | "source": [ 180 | "for x, y, z in zip(m.row, m.col, m.data):\n", 181 | " print(rr[x], rr[y], z)" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": 9, 187 | "metadata": {}, 188 | "outputs": [ 189 | { 190 | "data": { 191 | "text/plain": [ 192 | "[]" 193 | ] 194 | }, 195 | "execution_count": 9, 196 | "metadata": {}, 197 | "output_type": "execute_result" 198 | }, 199 | { 200 | "data": { 201 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAe0AAAFpCAYAAACxlXA1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAGhJJREFUeJzt3X+sZHV9//Hnuwu01hIB94KwsK62hKSaCuRmq6VtsFbADRXa+G2XNN9SpdnSlqQmrSnWRA39R0vtT4xkWwnQWKQ/YN18BWGj34SaFOQuP1cBWSmWu0vZVQQ0bqKL7/4x5+J0mNk7d37ccz+feT6Smztzzmdm3p9zzr2vnXPPzjsyE0mStPb9SNsFSJKk4RjakiQVwtCWJKkQhrYkSYUwtCVJKoShLUlSIQxtSZIKYWhLklQIQ1uSpEIY2pIkFeKotgvoZ/369blp06a2y5AkaVXs3r37G5k5t9y4NRnamzZtYmFhoe0yJElaFRHx9WHGeXpckqRCGNqSJBXC0JYkqRCGtiRJhTC0JUkqhKEtSVIhDG1JkgphaEuSVAhDW5KkQqzJT0STJGkt2nH/Pq6+4zH2P3eIU457Be87/wwuPmvDqr2+oS1J0hB23L+P99/yMIe+/yIA+547xPtveRhg1YLb0+OSJA3h6jseeymwlxz6/otcfcdjq1aDoS1J0hD2P3doRcunwdCWJGkIpxz3ihUtnwZDW5KkIbzv/DN4xdHr/teyVxy9jvedf8aq1eCFaJIkDWHpYjOvHpckqQAXn7VhVUO6l6fHJUkqhKEtSVIhDG1JkgphaEuSVAhDW5KkQhjakiQVwtCWJKkQhrYkSYXww1UkSdVpu+/1tBjakqSqrIW+19Pi6XFJUlXWQt/raTG0JUlVWQt9r6fF0JYkVWUt9L2eFkNbklSVtdD3elq8EE2SVJW10Pd6WpYN7Yi4DrgQOJCZb2yW3Qws/ZPlOOC5zDyzz2OfBL4NvAgczsz5CdUtSdJAbfe9npZh3mlfD1wD3Li0IDN/Y+l2RHwMeP4Ij39rZn5j1AIlSVLHsqGdmXdFxKZ+6yIigF8HfmmyZUmSpF7jXoj2C8Azmfn4gPUJ3BkRuyNi25ivJUnSTBv3QrRLgJuOsP6czNwfEScCuyLi0cy8q9/AJtS3AWzcuHHMsiRJqs/I77Qj4ijg14CbB43JzP3N9wPArcDmI4zdnpnzmTk/Nzc3almSJFVrnNPjvww8mpmL/VZGxCsj4til28B5wJ4xXk+SpJm2bGhHxE3AfwBnRMRiRFzWrNpKz6nxiDglIm5r7p4EfDEiHgS+BHw2Mz83udIlSZotw1w9fsmA5b/dZ9l+YEtz+wngTWPWJ0mSGn4imiSpVbX2vp4GQ1uS1Jqae19Pgw1DJEmtqbn39TQY2pKk1tTc+3oaDG1JUmtq7n09DYa2JKk1Nfe+ngYvRJMktabm3tfTYGhLklpVa+/rafD0uCRJhTC0JUkqhKEtSVIhDG1JkgphaEuSVAhDW5KkQhjakiQVwv+nLUkamm0022VoS5KGYhvN9nl6XJI0FNtots/QliQNxTaa7TO0JUlDsY1m+wxtSdJQbKPZPi9EkyQNxTaa7TO0JUlDs41muzw9LklSIQxtSZIKYWhLklQIQ1uSpEIY2pIkFcLQliSpEIa2JEmFWDa0I+K6iDgQEXu6ln04IvZFxAPN15YBj70gIh6LiL0RceUkC5ckadYM8+Eq1wPXADf2LP+rzPyLQQ+KiHXAx4G3A4vAvRGxMzO/MmKtkqQh2fe6Tsu+087Mu4BnR3juzcDezHwiM78HfBq4aITnkSStwFLf633PHSL5Yd/rHffva7s0jWmcv2lfEREPNafPj++zfgPwVNf9xWaZJGmK7Htdr1FD+xPATwJnAk8DH+szJvosy0FPGBHbImIhIhYOHjw4YlmSJPte12uk0M7MZzLzxcz8AfD3dE6F91oETuu6fyqw/wjPuT0z5zNzfm5ubpSyJEnY97pmI4V2RJzcdfdXgT19ht0LnB4Rr4uIY4CtwM5RXk+SNDz7Xtdr2avHI+Im4FxgfUQsAh8Czo2IM+mc7n4S+N1m7CnAP2Tmlsw8HBFXAHcA64DrMvPLU5mFJOkl9r2uV2QO/DNza+bn53NhYaHtMiRJWhURsTsz55cb5yeiSZJUCENbkqRCGNqSJBXC0JYkqRCGtiRJhTC0JUkqhKEtSVIhhmnNKUmaIttoaliGtiS1aKmN5lJXrqU2moDBrZfx9Lgktcg2mloJQ1uSWmQbTa2EoS1JLbKNplbC0JakFtlGUyvhhWiS1CLbaGolDG1JatnFZ20wpDUUT49LklQIQ1uSpEIY2pIkFcLQliSpEIa2JEmFMLQlSSqEoS1JUiEMbUmSCuGHq0jSCtj7Wm0ytCVpSPa+Vts8PS5JQ7L3tdpmaEvSkOx9rbYZ2pI0JHtfq22GtiQNyd7XapsXoknSkOx9rbYtG9oRcR1wIXAgM9/YLLsa+BXge8DXgHdn5nN9Hvsk8G3gReBwZs5PrnRJWn32vlabhjk9fj1wQc+yXcAbM/NngK8C7z/C49+amWca2JIkjWfZ0M7Mu4Bne5bdmZmHm7t3A6dOoTZJktRlEheivQe4fcC6BO6MiN0RsW0CryVJ0swa60K0iPgAcBj41IAh52Tm/og4EdgVEY8279z7Pdc2YBvAxo0bxylLkqQqjfxOOyIupXOB2m9mZvYbk5n7m+8HgFuBzYOeLzO3Z+Z8Zs7Pzc2NWpYkSdUaKbQj4gLgT4B3ZuZ3B4x5ZUQcu3QbOA/YM2qhkiTNumVDOyJuAv4DOCMiFiPiMuAa4Fg6p7wfiIhrm7GnRMRtzUNPAr4YEQ8CXwI+m5mfm8osJEmaAcv+TTszL+mz+JMDxu4HtjS3nwDeNFZ1kiTpJX4imqQq2fdaNTK0JVXHvteqlQ1DJFXHvteqlaEtqTr2vVatDG1J1bHvtWplaEuqjn2vVSsvRJNUHfteq1aGtqQq2fdaNfL0uCRJhTC0JUkqhKEtSVIhDG1JkgphaEuSVAhDW5KkQhjakiQVwv+nLal1ttGcPLdpnQxtSa2yjebkuU3r5elxSa2yjebkuU3rZWhLapVtNCfPbVovQ1tSq2yjOXlu03oZ2pJaZRvNyXOb1ssL0SS1yjaak+c2rVdkZts1vMz8/HwuLCy0XYYkSasiInZn5vxy4zw9LklSIQxtSZIKYWhLklQIQ1uSpEIY2pIkFcLQliSpEIa2JEmFGCq0I+K6iDgQEXu6lp0QEbsi4vHm+/EDHntpM+bxiLh0UoVLkjRrhn2nfT1wQc+yK4HPZ+bpwOeb+/9LRJwAfAj4WWAz8KFB4S5p7dtx/z7O+cgXeN2Vn+Wcj3yBHffva7ukKrhdNayhQjsz7wKe7Vl8EXBDc/sG4OI+Dz0f2JWZz2bmt4BdvDz8JRVgqUfzvucOkfywR7MBMx63q1ZinL9pn5SZTwM030/sM2YD8FTX/cVmmaTC2KN5OtyuWolpX4gWfZb1/bDziNgWEQsRsXDw4MEplyVppezRPB1uV63EOKH9TEScDNB8P9BnzCJwWtf9U4H9/Z4sM7dn5nxmzs/NzY1RlqRpsEfzdLhdtRLjhPZOYOlq8EuBz/QZcwdwXkQc31yAdl6zTFJh7NE8HW5XrcRQ/bQj4ibgXGB9RCzSuSL8I8A/R8RlwH8B/6cZOw9cnpm/k5nPRsSfAfc2T3VVZvZe0CapAPZong63q1bCftqSJLXMftqSJFXG0JYkqRCGtiRJhTC0JUkqhKEtSVIhDG1JkgphaEuSVAhDW5KkQgz1iWiSyrPj/n1+ytYUuF3VJkNbqtBSj+allo9LPZoBA2YMble1zdPjUoXs0Twdble1zdCWKmSP5ulwu6pthrZUIXs0T4fbVW0ztKUK2aN5OtyuapsXokkVskfzdLhd1Tb7aUuS1DL7aUuSVBlDW5KkQhjakiQVwtCWJKkQhrYkSYUwtCVJKoShLUlSIfxwFWkNsN3j5LlNVSNDW2qZ7R4nz22qWnl6XGqZ7R4nz22qWhnaUsts9zh5blPVytCWWma7x8lzm6pWhrbUMts9Tp7bVLXyQjSpZbZ7nDy3qWo1cmvOiDgDuLlr0euBD2bmX3eNORf4DPCfzaJbMvOq5Z7b1pySpFkybGvOkd9pZ+ZjwJnNi60D9gG39hn675l54aivI0mSOib1N+23AV/LzK9P6PkkSVKPSYX2VuCmAeveEhEPRsTtEfGGCb2eJEkzZ+zQjohjgHcC/9Jn9X3AazPzTcDfATuO8DzbImIhIhYOHjw4blmSJFVnEu+03wHcl5nP9K7IzBcy8zvN7duAoyNifb8nycztmTmfmfNzc3MTKEuSpLpMIrQvYcCp8Yh4TUREc3tz83rfnMBrSpI0c8b6f9oR8ePA24Hf7Vp2OUBmXgu8C/i9iDgMHAK25qj/x0ySpBk3Vmhn5neBV/csu7br9jXANeO8hiRJ6qj6E9Gm0U93Wj16S6nV+dujWVJ7qg3tafTTnVaP3lJqdf72aJbUrmobhkyjn+60evSWUqvzt0ezpHZVG9rT6Kc7rR69pdTq/O3RLKld1Yb2NPrpTqtHbym1On97NEtqV7WhPY1+utPq0VtKrc7fHs2S2lXthWjT6Kc7rR69pdTq/O3RLKldI/fTnib7aUuSZsmw/bSrPT0uSVJtDG1JkgphaEuSVAhDW5KkQhjakiQVwtCWJKkQhrYkSYWo9sNVoKzWjKXUOuvzl6Q2VRvaJbVmLKXWWZ+/JLWt2tPjJbVmLKXWWZ+/JLWt2tAuqTVjKbXO+vwlqW3VhnZJrRlLqXXW5y9Jbas2tEtqzVhKrbM+f0lqW7UXopXUmrGUWmd9/pLUNltzSpLUMltzSpJUGUNbkqRCGNqSJBXC0JYkqRCGtiRJhTC0JUkqhKEtSVIhxg7tiHgyIh6OiAci4mX/uTo6/jYi9kbEQxFx9rivKUnSLJrUJ6K9NTO/MWDdO4DTm6+fBT7RfJ+6kno0l1JrSfMvSUn7SlJ7VuNjTC8CbszOR6/dHRHHRcTJmfn0NF+0pB7NpdRa0vxLUtK+ktSuSfxNO4E7I2J3RGzrs34D8FTX/cVm2VSV1KO5lFpLmn9JStpXkto1iXfa52Tm/og4EdgVEY9m5l1d66PPY172gedN4G8D2Lhx49hFldSjuZRaS5p/SUraV5LaNfY77czc33w/ANwKbO4Zsgic1nX/VGB/n+fZnpnzmTk/Nzc3bllF9WgupdaS5l+SkvaVpHaNFdoR8cqIOHbpNnAesKdn2E7gt5qryN8MPD/tv2dDWT2aS6m1pPmXpKR9Jald454ePwm4NSKWnuufMvNzEXE5QGZeC9wGbAH2At8F3j3maw6lpB7NpdRa0vxLUtK+ktQu+2lLktQy+2lLklQZQ1uSpEIY2pIkFcLQliSpEIa2JEmFMLQlSSqEoS1JUiEMbUmSCrEarTlbU1KP4lJqtUfzbHP/S+2qNrRL6lFcSq32aJ5t7n+pfdWeHi+pR3Eptdqjeba5/6X2VRvaJfUoLqVWezTPNve/1L5qQ7ukHsWl1GqP5tnm/pfaV21ol9SjuJRa7dE829z/UvuqvRCtpB7FpdRqj+bZ5v6X2mc/bUmSWmY/bUmSKmNoS5JUCENbkqRCGNqSJBXC0JYkqRCGtiRJhTC0JUkqRLUfrgJltaYsqVZJUjuqDe2SWlOWVKskqT3Vnh4vqTVlSbVKktpTbWiX1JqypFolSe2pNrRLak1ZUq2SpPZUG9oltaYsqVZJUnuqvRCtpNaUJdUqSWrPyK05I+I04EbgNcAPgO2Z+Tc9Y84FPgP8Z7Polsy8arnntjWnJGmWDNuac5x32oeBP8rM+yLiWGB3ROzKzK/0jPv3zLxwjNeRJEmM8TftzHw6M+9rbn8beATw3KskSVMykQvRImITcBZwT5/Vb4mIByPi9oh4wyReT5KkWTT2hWgR8RPAvwHvzcwXelbfB7w2M78TEVuAHcDpA55nG7ANYOPGjeOWJUlSdcZ6px0RR9MJ7E9l5i296zPzhcz8TnP7NuDoiFjf77kyc3tmzmfm/Nzc3DhlSZJUpZFDOyIC+CTwSGb+5YAxr2nGERGbm9f75qivKUnSLBvn9Pg5wP8FHo6IB5plfwpsBMjMa4F3Ab8XEYeBQ8DWHPX/mEmSNONGDu3M/CIQy4y5Brhm1NeQJEk/VO0nokFZPartfa1J85iS6lNtaJfUo9re15o0jympTtU2DCmpR7W9rzVpHlNSnaoN7ZJ6VNv7WpPmMSXVqdrQLqlHtb2vNWkeU1Kdqg3tknpU2/tak+YxJdWp2gvRSupRbe9rTZrHlFSnkftpT5P9tCVJs2TYftrVnh6XJKk2hrYkSYUwtCVJKoShLUlSIQxtSZIKYWhLklQIQ1uSpEJU++EqUFZrTs22WT+uZn3+0rCqDe2SWnNqts36cTXr85dWotrT4yW15tRsm/XjatbnL61EtaFdUmtOzbZZP65mff7SSlQb2iW15tRsm/XjatbnL61EtaFdUmtOzbZZP65mff7SSlR7IVpJrTk122b9uJr1+UsrYWtOSZJaZmtOSZIqY2hLklQIQ1uSpEIY2pIkFcLQliSpEIa2JEmFMLQlSSrEWKEdERdExGMRsTciruyz/kcj4uZm/T0RsWmc15MkaZaN/IloEbEO+DjwdmARuDcidmbmV7qGXQZ8KzN/KiK2Ah8FfmOcglfCHr0qRSnHail1SrUa5532ZmBvZj6Rmd8DPg1c1DPmIuCG5va/Am+LiBjjNYe21KN333OHSH7Yo3fH/ftW4+WloZVyrJZSp1SzcUJ7A/BU1/3FZlnfMZl5GHgeePUYrzk0e/SqFKUcq6XUKdVsnNDu946594PMhxnTGRixLSIWImLh4MGDY5TVYY9elaKUY7WUOqWajRPai8BpXfdPBfYPGhMRRwGvAp7t92SZuT0z5zNzfm5uboyyOuzRq1KUcqyWUqdUs3FC+17g9Ih4XUQcA2wFdvaM2Qlc2tx+F/CFXKW2YvboVSlKOVZLqVOq2chXj2fm4Yi4ArgDWAdcl5lfjoirgIXM3Al8EvjHiNhL5x321kkUPQx79KoUpRyrpdQp1cx+2pIktcx+2pIkVcbQliSpEIa2JEmFMLQlSSqEoS1JUiEMbUmSCmFoS5JUCENbkqRCGNqSJBXC0JYkqRBr8mNMI+Ig8PUJPuV64BsTfL61oMY5QZ3zck7lqHFeNc4J6pvXazNz2RaXazK0Jy0iFob5TNeS1DgnqHNezqkcNc6rxjlBvfNajqfHJUkqhKEtSVIhZiW0t7ddwBTUOCeoc17OqRw1zqvGOUG98zqimfibtiRJNZiVd9qSJBWvqtCOiAsi4rGI2BsRV/ZZ/6MRcXOz/p6I2LT6VQ4vIk6LiP8fEY9ExJcj4g/7jDk3Ip6PiAearw+2UetKRcSTEfFwU/NCn/UREX/b7KuHIuLsNuocVkSc0bUPHoiIFyLivT1jithXEXFdRByIiD1dy06IiF0R8Xjz/fgBj720GfN4RFy6elUf2YA5XR0RjzbH160RcdyAxx7xWG3LgDl9OCL2dR1jWwY89oi/K9s0YF43d83pyYh4YMBj1+S+mqjMrOILWAd8DXg9cAzwIPDTPWN+H7i2ub0VuLntupeZ08nA2c3tY4Gv9pnTucD/a7vWEeb2JLD+COu3ALcDAbwZuKftmlcwt3XAf9P5f5fF7SvgF4GzgT1dy/4cuLK5fSXw0T6POwF4ovl+fHP7+Lbnc4Q5nQcc1dz+aL85NeuOeKyusTl9GPjjZR637O/KtTavnvUfAz5Y0r6a5FdN77Q3A3sz84nM/B7waeCinjEXATc0t/8VeFtExCrWuCKZ+XRm3tfc/jbwCLCh3apWzUXAjdlxN3BcRJzcdlFDehvwtcyc5AcErZrMvAt4tmdx98/ODcDFfR56PrArM5/NzG8Bu4ALplboCvSbU2bemZmHm7t3A6euemFjGLCfhjHM78rWHGleze/rXwduWtWi1pCaQnsD8FTX/UVeHnAvjWl+WJ8HXr0q1Y2pOZV/FnBPn9VviYgHI+L2iHjDqhY2ugTujIjdEbGtz/ph9udatZXBv1RK3FcAJ2Xm09D5xyRwYp8xJe+z99A5s9PPcsfqWnNFc8r/ugF/xih5P/0C8ExmPj5gfWn7asVqCu1+75h7L40fZsyaExE/Afwb8N7MfKFn9X10TsO+Cfg7YMdq1zeiczLzbOAdwB9ExC/2rC91Xx0DvBP4lz6rS91Xwyp1n30AOAx8asCQ5Y7VteQTwE8CZwJP0zmV3KvI/dS4hCO/yy5pX42kptBeBE7run8qsH/QmIg4CngVo51eWjURcTSdwP5UZt7Suz4zX8jM7zS3bwOOjoj1q1zmimXm/ub7AeBWOqfsug2zP9eidwD3ZeYzvStK3VeNZ5b+PNF8P9BnTHH7rLlY7kLgN7P5o2ivIY7VNSMzn8nMFzPzB8Df07/W4vYTvPQ7+9eAmweNKWlfjaqm0L4XOD0iXte829kK7OwZsxNYuqL1XcAXBv2grgXN328+CTySmX85YMxrlv4uHxGb6ezTb65elSsXEa+MiGOXbtO5IGhPz7CdwG81V5G/GXh+6fTsGjfwnUCJ+6pL98/OpcBn+oy5AzgvIo5vTsue1yxbkyLiAuBPgHdm5ncHjBnmWF0zeq77+FX61zrM78q16JeBRzNzsd/K0vbVyNq+Em6SX3SuOP4qnSsjP9Asu4rODyXAj9E5bbkX+BLw+rZrXmY+P0/ntNVDwAPN1xbgcuDyZswVwJfpXAF6N/Bzbdc9xLxe39T7YFP70r7qnlcAH2/25cPAfNt1DzGvH6cTwq/qWlbcvqLzj46nge/TeVd2GZ1rPz4PPN58P6EZOw/8Q9dj39P8fO0F3t32XJaZ0146f9td+tla+p8lpwC3HelYXQtfA+b0j83Py0N0gvjk3jk191/2u3KtfPWbV7P8+qWfpa6xReyrSX75iWiSJBWiptPjkiRVzdCWJKkQhrYkSYUwtCVJKoShLUlSIQxtSZIKYWhLklQIQ1uSpEL8DxrEMH9GZc96AAAAAElFTkSuQmCC\n", 202 | "text/plain": [ 203 | "
" 204 | ] 205 | }, 206 | "metadata": {}, 207 | "output_type": "display_data" 208 | } 209 | ], 210 | "source": [ 211 | "plt.figure(figsize=(8, 6))\n", 212 | "plt.plot(m.row, m.col, 'o')" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": 8, 218 | "metadata": {}, 219 | "outputs": [ 220 | { 221 | "data": { 222 | "text/plain": [ 223 | "{'1': 8,\n", 224 | " '2': 9,\n", 225 | " '3': 10,\n", 226 | " '4': 11,\n", 227 | " 'a': 0,\n", 228 | " 'b': 1,\n", 229 | " 'c': 2,\n", 230 | " 'd': 3,\n", 231 | " 'e': 4,\n", 232 | " 'f': 5,\n", 233 | " 'g': 6,\n", 234 | " 'h': 7,\n", 235 | " 'α': 12,\n", 236 | " 'β': 13,\n", 237 | " 'γ': 14,\n", 238 | " 'δ': 15,\n", 239 | " 'ε': 16,\n", 240 | " 'ζ': 17,\n", 241 | " 'η': 18,\n", 242 | " 'θ': 19}" 243 | ] 244 | }, 245 | "execution_count": 8, 246 | "metadata": {}, 247 | "output_type": "execute_result" 248 | } 249 | ], 250 | "source": [ 251 | "s.row_dict" 252 | ] 253 | }, 254 | { 255 | "cell_type": "code", 256 | "execution_count": null, 257 | "metadata": {}, 258 | "outputs": [], 259 | "source": [] 260 | } 261 | ], 262 | "metadata": { 263 | "kernelspec": { 264 | "display_name": "Python 3", 265 | "language": "python", 266 | "name": "python3" 267 | }, 268 | "language_info": { 269 | "codemirror_mode": { 270 | "name": "ipython", 271 | "version": 3 272 | }, 273 | "file_extension": ".py", 274 | "mimetype": "text/x-python", 275 | "name": "python", 276 | "nbconvert_exporter": "python", 277 | "pygments_lexer": "ipython3", 278 | "version": "3.6.4" 279 | } 280 | }, 281 | "nbformat": 4, 282 | "nbformat_minor": 2 283 | } 284 | --------------------------------------------------------------------------------