├── .gitignore ├── .readthedocs.yaml ├── LICENSE ├── README.md ├── docs ├── Makefile ├── conf.py ├── example.ipynb ├── index.rst ├── make.bat ├── requirements.txt └── usage.rst ├── examples ├── criteria_electric_cars.csv ├── dataset_cars.csv ├── dataset_localisations.csv ├── electric_cars_2021.csv ├── example.ipynb ├── main.py ├── main_crispyn_update.py ├── results │ ├── bar_chart_weights_Alternative.eps │ ├── bar_chart_weights_Alternative.pdf │ ├── bar_chart_weights_Alternatives.eps │ ├── bar_chart_weights_Alternatives.pdf │ ├── bar_chart_weights_Weighting methods.eps │ ├── bar_chart_weights_Weighting methods.pdf │ ├── boxplot_weights.eps │ ├── boxplot_weights.pdf │ ├── heatmap_smaa.eps │ ├── heatmap_smaa.pdf │ ├── heatmap_weights.eps │ ├── heatmap_weights.pdf │ └── rankings.csv ├── results_smaa │ ├── ai.csv │ ├── cw.csv │ └── fr.csv └── results_update │ ├── barplot_rankings.eps │ ├── barplot_rankings.pdf │ ├── boxplot_weights.eps │ ├── boxplot_weights.pdf │ ├── boxplot_weights_col.eps │ ├── boxplot_weights_col.pdf │ ├── boxplot_weights_stacked_col.eps │ ├── boxplot_weights_stacked_col.pdf │ ├── crit_ahp.csv │ ├── crit_sapevo.csv │ ├── df_prefs.csv │ ├── df_ranks.csv │ ├── df_weights.csv │ ├── heatmap.eps │ ├── heatmap.pdf │ ├── radar.eps │ ├── radar.pdf │ ├── radar_weights.eps │ └── radar_weights.pdf ├── pyproject.toml ├── requirements.txt ├── requirements_python_ver_min.txt ├── requirements_src.txt ├── setup.py ├── src └── crispyn │ ├── __init__.py │ ├── additions.py │ ├── correlations.py │ ├── mcda_methods │ ├── __init__.py │ ├── mcda_method.py │ ├── vikor.py │ └── vikor_smaa.py │ ├── normalizations.py │ └── weighting_methods.py └── tests ├── test_correlations.py ├── test_mcda_methods.py └── test_weighting_methods.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | .vscode 113 | 114 | # Spyder project settings 115 | .spyderproject 116 | .spyproject 117 | 118 | # Rope project settings 119 | .ropeproject 120 | 121 | # mkdocs documentation 122 | /site 123 | 124 | # mypy 125 | .mypy_cache/ 126 | .dmypy.json 127 | dmypy.json 128 | 129 | # Pyre type checker 130 | .pyre/ 131 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file for Sphinx projects 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | 4 | # Required 5 | version: 2 6 | 7 | # Set the OS, Python version and other tools you might need 8 | build: 9 | os: ubuntu-22.04 10 | tools: 11 | python: "3.12" 12 | # You can also specify other tool versions: 13 | # nodejs: "20" 14 | # rust: "1.70" 15 | # golang: "1.20" 16 | 17 | # Build documentation in the "docs/" directory with Sphinx 18 | sphinx: 19 | configuration: docs/conf.py 20 | # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs 21 | # builder: "dirhtml" 22 | # Fail on all warnings to avoid broken references 23 | # fail_on_warning: true 24 | 25 | # Optionally build your docs in additional formats such as PDF and ePub 26 | # formats: 27 | # - pdf 28 | # - epub 29 | 30 | # Optional but recommended, declare the Python requirements required 31 | # to build your documentation 32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 33 | python: 34 | install: 35 | - requirements: docs/requirements.txt -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 energyinpython 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # crispyn 2 | CRIteria Significance determining in PYthoN - The Python 3 Library for determining criteria weights for MCDA methods. 3 | 4 | This library provides 15 criteria weighting methods: 11 objective, 4 subjective and a Stochastic Multicriteria Acceptability Analysis Method (SMAA) 5 | that does not require criteria weights. 6 | 7 | ## Installation 8 | 9 | ``` 10 | pip install crispyn 11 | ``` 12 | 13 | ## Usage 14 | 15 | `crispyn` is the Python 3 package that provides 15 weighting methods: 11 objective and 4 subjective, which can be used to determine criteria weights for 16 | solving multi-criteria problems with Multi-Criteria Decision Analysis (MCDA) methods. The first step is providing the decision matrix `matrix` with alternatives' 17 | performance values. The decision matrix is two-dimensional and contains m alternatives in rows and n criteria in columns. You also have to provide 18 | criteria types `types`. Criteria types are equal to 1 for profit criteria and -1 for cost criteria. Then you have to calculate criteria weights 19 | using the weighting method chosen from `crispyn.weighting_methods` submodule. Depending on the chosen objective method, you have to provide `matrix` or `matrix` and `types` as 20 | weighting method arguments. In the case of subjective weighting methods, provided parameters are different, such as ordered criteria indexes and significance values assigned by the decision-maker to criteria. It is detailed in Usage in the documentation. Then, you can evaluate alternatives from the decision matrix using the VIKOR method 21 | from `crispyn.mcda_methods` module. The VIKOR method returns a vector with preference values `pref` assigned to alternatives. To rank alternatives 22 | according to VIKOR preference values, you have to sort them in ascending order because, in the VIKOR method, the best alternative has the lowest 23 | preference value. The alternatives are ranked using the `rank_preferences` method provided in the `crispyn.additions` submodule. Parameter `reverse = False` means that alternatives 24 | are sorted in ascending order. Here is an example of using the Entropy weighting method `entropy_weighting` for determining criteria weights and 25 | the VIKOR method to calculate preference values: 26 | 27 | ```python 28 | import numpy as np 29 | from crispyn.mcda_methods import VIKOR 30 | from crispyn import weighting_methods as mcda_weights 31 | from crispyn import normalizations as norms 32 | from crispyn.additions import rank_preferences 33 | 34 | matrix = np.array([[256, 8, 41, 1.6, 1.77, 7347.16], 35 | [256, 8, 32, 1.0, 1.8, 6919.99], 36 | [256, 8, 53, 1.6, 1.9, 8400], 37 | [256, 8, 41, 1.0, 1.75, 6808.9], 38 | [512, 8, 35, 1.6, 1.7, 8479.99], 39 | [256, 4, 35, 1.6, 1.7, 7499.99]]) 40 | 41 | types = np.array([1, 1, 1, 1, -1, -1]) 42 | weights = mcda_weights.entropy_weighting(matrix) 43 | 44 | # Create the VIKOR method object 45 | vikor = VIKOR(normalization_method=norms.minmax_normalization) 46 | # Calculate alternatives preference function values with VIKOR method 47 | pref = vikor(matrix, weights, types) 48 | # Rank alternatives according to preference values 49 | rank = rank_preferences(pref, reverse = False) 50 | ``` 51 | 52 | ### Stochastic Multicriteria Acceptability Analysis Method (SMAA) 53 | 54 | Additionally, the Crispyn library provides the Stochastic Multicriteria Acceptability Analysis Method (SMAA), which, combined 55 | with the VIKOR method, is designed to solve decision problems when there is a lack of information about criteria preferences (unknown criteria 56 | weights). This method is implemented in the class named `VIKOR_SMAA`. This method requires only the decision matrix, a matrix with 57 | weight vectors and criteria types provided in one call. The number of weight vectors is equal to the number of iterations. First, the matrix with 58 | weight vectors must be generated with `_generate_weights` method provided by the `VIKOR_SMAA` class. In this method, uniform distributed weights 59 | are generated by Monte Carlo simulation. The results of the provided `VIKOR_SMAA` method are Rank acceptability index, Central weight vector, and 60 | Rank scores. 61 | 62 | ### Rank acceptability index 63 | 64 | The ranking is built based on generated weights. Next, counters for corresponding ranks in relation to the alternatives are increased. 65 | After a given number of iterations, the rank acceptability indexes are obtained by dividing the counters by the number of iterations. 66 | Rank acceptability index shows the share of different scores placing an alternative in a given rank. 67 | 68 | ### Central weight vector 69 | 70 | The central weights are calculated similarly. In each iteration, the weight vector is added to its ‘summed weight vector’ when the 71 | alternative gets the rank. Next, this vector is divided by the number of iterations to get the central weight vector. The central weight 72 | vector describes the preferences of a typical decision-maker, supporting this alternative with the assumed preference model. It allows the 73 | decision-maker to see what criteria preferences result in the best evaluation of given alternatives. 74 | 75 | ### Rank scores 76 | 77 | Final ranking of alternatives provided by the ranking function, which adds to each alternative value of 1 each time it has better preference 78 | values than each other. 79 | 80 | Here is example of use of the `VIKOR_SMAA` method: 81 | 82 | ```python 83 | from crispyn.mcda_methods import VIKOR_SMAA 84 | 85 | # criteria number 86 | n = matrix.shape[1] 87 | # SMAA iterations number 88 | iterations = 10000 89 | 90 | # create the VIKOR_SMAA method object 91 | vikor_smaa = VIKOR_SMAA() 92 | 93 | # generate multiple weight vectors in matrix 94 | weight_vectors = vikor_smaa._generate_weights(n, iterations) 95 | 96 | # run the vikor_smaa method 97 | rank_acceptability_index, central_weight_vector, rank_scores = vikor_smaa(matrix, weight_vectors, types) 98 | ``` 99 | 100 | ## License 101 | 102 | `crispyn` was created by Aleksandra Bączkiewicz. It is licensed under the terms of the MIT license. 103 | 104 | ## Documentation 105 | 106 | Documentation of this library with instruction for installation and usage is 107 | provided [here](https://crispyn.readthedocs.io/en/latest/) 108 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | 3 | # -- Project information 4 | 5 | project = 'crispyn' 6 | copyright = '2023, energyinpython' 7 | author = 'Aleksandra Bączkiewicz' 8 | 9 | release = '0.1' 10 | version = '0.0.6' 11 | 12 | # -- General configuration 13 | 14 | extensions = ['autoapi.extension', 15 | 'nbsphinx', 16 | 'sphinx_rtd_theme', 17 | ] 18 | 19 | autoapi_type = 'python' 20 | autoapi_dirs = ["../src"] # location to parse for API reference 21 | 22 | intersphinx_mapping = { 23 | 'python': ('https://docs.python.org/3/', None), 24 | 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), 25 | } 26 | intersphinx_disabled_domains = ['std'] 27 | 28 | templates_path = ['_templates'] 29 | 30 | # -- Options for HTML output 31 | 32 | html_theme = 'sphinx_rtd_theme' 33 | 34 | # -- Options for EPUB output 35 | epub_show_urls = 'footnote' 36 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to crispyn documentation! 2 | =============================================== 3 | 4 | ``crispyn`` is Python 3 library dedicated to multi-criteria decision analysis with criteria weights determined by objective weighting methods. 5 | This library includes: 6 | 7 | - The VIKOR method ``VIKOR`` 8 | 9 | - Objective weighting methods for determining criteria weights required by Multi-Criteria Decision Analysis (MCDA) methods: 10 | 11 | - ``equal_weighting`` (Equal weighting method) 12 | - ``entropy_weighting`` (Entropy weighting method) 13 | - ``std_weighting`` (Standard deviation weighting method) 14 | - ``critic_weighting`` (CRITIC weighting method) 15 | - ``gini_weighting`` (Gini coefficient-based weighting method) 16 | - ``merec_weighting`` (MEREC weighting method) 17 | - ``stat_var_weighting`` (Statistical variance weighting method) 18 | - ``cilos_weighting`` (CILOS weighting method) 19 | - ``idocriw_weighting`` (IDOCRIW weighting method) 20 | - ``angle_weighting`` (Angle weighting method) 21 | - ``coeff_var_weighting`` (Coefficient of variation weighting method) 22 | 23 | - Subjective weighting methods for determining criteria weights required by Multi-Criteria Decision Analysis (MCDA) methods: 24 | 25 | - ``AHP_WEIGHTING`` (AHP weighting method) 26 | - ``swara_weighting`` (SWARA weighting method) 27 | - ``lbwa_weighting`` (LBWA weighting method) 28 | - ``sapevo_weighting`` (SAPEVO weighting method) 29 | 30 | - Stochastic Multicriteria Acceptability Analysis Method - SMAA combined with VIKOR (``VIKOR_SMAA``) 31 | 32 | - Correlation coefficients: 33 | 34 | - ``spearman`` (Spearman rank correlation coefficient) 35 | - ``weighted_spearman`` (Weighted Spearman rank correlation coefficient) 36 | - ``pearson_coeff`` (Pearson correlation coefficient) 37 | 38 | - Methods for normalization of decision matrix: 39 | 40 | - ``linear_normalization`` (Linear normalization) 41 | - ``minmax_normalization`` (Minimum-Maximum normalization) 42 | - ``max_normalization`` (Maximum normalization) 43 | - ``sum_normalization`` (Sum normalization) 44 | - ``vector_normalization`` (Vector normalization) 45 | 46 | - additions: 47 | 48 | - ``rank_preferences`` (Method for ordering alternatives according to their preference values obtained with MCDA methods) 49 | 50 | Check out the :doc:`usage` section for further information, including 51 | how to :ref:`installation` the project. 52 | 53 | .. note:: 54 | 55 | This project is under active development. 56 | 57 | Contents 58 | -------- 59 | 60 | .. toctree:: 61 | :maxdepth: 2 62 | 63 | usage 64 | example 65 | autoapi/index 66 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx-autoapi 2 | nbsphinx 3 | sphinx_rtd_theme -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | Usage 2 | ====== 3 | 4 | .. _installation: 5 | 6 | Installation 7 | ------------- 8 | 9 | To use ``crispyn`` package, first install it using pip: 10 | 11 | .. code-block:: python 12 | 13 | pip install crispyn 14 | 15 | 16 | Usage examples 17 | ---------------------- 18 | 19 | 20 | The VIKOR method 21 | __________________ 22 | 23 | The VIKOR method provided in this library can be used with single weight vector and with multiple weight vectors, like in the Stochastic Multicriteria Acceptability 24 | Analysis (SMAA) method. 25 | 26 | Using the VIKOR method with single weight vector: 27 | 28 | .. code-block:: python 29 | 30 | import numpy as np 31 | from crispyn.mcda_methods import VIKOR 32 | from crispyn.additions import rank_preferences 33 | 34 | # Provide decision matrix in array numpy.darray. 35 | matrix = np.array([[8, 7, 2, 1], 36 | [5, 3, 7, 5], 37 | [7, 5, 6, 4], 38 | [9, 9, 7, 3], 39 | [11, 10, 3, 7], 40 | [6, 9, 5, 4]]) 41 | 42 | # Provide criteria weights in array numpy.darray. All weights must sum to 1. 43 | weights = np.array([0.4, 0.3, 0.1, 0.2]) 44 | 45 | # Provide criteria types in array numpy.darray. Profit criteria are represented by 1, and cost criteria by -1. 46 | types = np.array([1, 1, 1, 1]) 47 | 48 | # Create the VIKOR method object providing `v` parameter. The default `v` parameter is set to 0.5, so if you do not provide it, `v` will be equal to 0.5. 49 | vikor = VIKOR(v = 0.625) 50 | 51 | # Calculate the VIKOR preference values of alternatives. 52 | pref = vikor(matrix, weights, types) 53 | 54 | # Generate ranking of alternatives by sorting alternatives ascendingly according to the VIKOR algorithm (reverse = False means sorting in ascending order) according to preference values. 55 | rank = rank_preferences(pref, reverse = False) 56 | 57 | print('Preference values: ', np.round(pref, 4)) 58 | print('Ranking: ', rank) 59 | 60 | Output 61 | 62 | .. code-block:: console 63 | 64 | Preference values: [[0.6399] 65 | [1. ] 66 | [0.6929] 67 | [0.2714] 68 | [0. ] 69 | [0.6939]] 70 | Ranking: [3 6 4 2 1 5] 71 | 72 | 73 | The VIKOR method provided in the ``crispyn`` library can also be used with multiple weight vectors provided in the matrix. This matrix 74 | includes weight vectors in rows. The number of rows is equal to the vectors number, and the number of columns is equal to the criteria number. In this case, 75 | the VIKOR method returns a matrix with preference values. Vectors with preference values for each weight vector are contained in each column. The number 76 | of rows of the matrix with preference values is equal to the number of alternatives, and the number of columns is equal to the number of weight vectors. 77 | This functionality is useful for Stochastic Multicriteria Acceptability Analysis (SMAA) methods. Here is demonstrated how it works using the VIKOR method 78 | with multiple weight vectors. 79 | 80 | .. code-block:: python 81 | 82 | import numpy as np 83 | from crispyn.additions import rank_preferences 84 | from crispyn.mcda_methods import VIKOR, VIKOR_SMAA 85 | 86 | matrix = np.array([[256, 8, 41, 1.6, 1.77, 7347.16], 87 | [256, 8, 32, 1.0, 1.8, 6919.99], 88 | [256, 8, 53, 1.6, 1.9, 8400], 89 | [256, 8, 41, 1.0, 1.75, 6808.9], 90 | [512, 8, 35, 1.6, 1.7, 8479.99], 91 | [256, 4, 35, 1.6, 1.7, 7499.99]]) 92 | 93 | n = matrix.shape[1] 94 | iterations = 10 95 | 96 | types = np.array([1, 1, 1, 1, -1, -1]) 97 | 98 | vikor_smaa = VIKOR_SMAA() 99 | weight_vectors = vikor_smaa._generate_weights(n, iterations) 100 | 101 | vikor = VIKOR() 102 | pref = vikor(matrix, weight_vectors, types) 103 | print(pref) 104 | 105 | Output 106 | 107 | .. code-block:: console 108 | 109 | Preference values: [[0.09618783 0.27346371 0.09902209 0.16314653 0.58629107 0.01900846 110 | 0.85270574 0.28086327 0.24628691 0.05633723] 111 | [1. 0.40327448 1. 1. 1. 1. 112 | 0.97327618 0.29458204 0.94333641 1. ] 113 | [0.28701119 1. 0.55618621 0.231067 0.57237663 0.52735721 114 | 0.95398644 0.29797528 0. 0.41316479] 115 | [0.85675331 0.21838546 0.8992903 0.89447867 0.95984659 0.89945467 116 | 0.8867631 0.27612402 0.32504461 0.89805712] 117 | [0.03792154 0. 0. 0. 0. 0.22357098 118 | 0. 0. 0.50907579 0.01255136] 119 | [0.42033457 0.34191157 0.30924524 0.30984365 0.64516556 0.02140185 120 | 1. 1. 0.86570054 0.05526169]] 121 | 122 | Matrix with preference values includes subsequent vectors with preference values in columns. We can rank preferences in this matrix 123 | using the ``rank_preferences`` method in following way: 124 | 125 | .. code-block:: python 126 | 127 | rank = np.zeros((pref.shape)) 128 | for i in range(pref.shape[1]): 129 | rank[:, i] = rank_preferences(pref[:, i], reverse = False) 130 | 131 | print('Rankings: ', rank) 132 | 133 | Output 134 | 135 | .. code-block:: console 136 | 137 | Rankings: [[2. 3. 2. 4. 1. 2. 2. 1. 1. 4.] 138 | [5. 5. 5. 3. 6. 5. 4. 5. 4. 5.] 139 | [3. 6. 4. 6. 3. 4. 5. 3. 6. 6.] 140 | [4. 4. 1. 2. 2. 3. 1. 2. 3. 2.] 141 | [1. 1. 3. 1. 5. 1. 6. 4. 5. 1.] 142 | [6. 2. 6. 5. 4. 6. 3. 6. 2. 3.]] 143 | 144 | Now each column of the above matrix contains a ranking generated for each weight vector. 145 | 146 | 147 | Correlation coefficents 148 | __________________________ 149 | 150 | Spearman correlation coefficient 151 | 152 | .. code-block:: python 153 | 154 | import numpy as np 155 | from crispyn import correlations as corrs 156 | 157 | # Provide two vectors with rankings obtained with different MCDA methods. 158 | R = np.array([1, 2, 3, 4, 5]) 159 | Q = np.array([1, 3, 2, 4, 5]) 160 | 161 | # Calculate the correlation using `spearman` coefficient. 162 | coeff = corrs.spearman(R, Q) 163 | print('Spearman coeff: ', np.round(coeff, 4)) 164 | 165 | Output 166 | 167 | .. code-block:: console 168 | 169 | Spearman coeff: 0.9 170 | 171 | 172 | 173 | Weighted Spearman correlation coefficient 174 | 175 | .. code-block:: python 176 | 177 | import numpy as np 178 | from crispyn import correlations as corrs 179 | 180 | # Provide two vectors with rankings obtained with different MCDA methods. 181 | R = np.array([1, 2, 3, 4, 5]) 182 | Q = np.array([1, 3, 2, 4, 5]) 183 | 184 | # Calculate the correlation using `weighted_spearman` coefficient. 185 | coeff = corrs.weighted_spearman(R, Q) 186 | print('Weighted Spearman coeff: ', np.round(coeff, 4)) 187 | 188 | Output 189 | 190 | .. code-block:: console 191 | 192 | Weighted Spearman coeff: 0.8833 193 | 194 | 195 | 196 | Pearson correlation coefficient 197 | 198 | .. code-block:: python 199 | 200 | import numpy as np 201 | from crispyn import correlations as corrs 202 | 203 | # Provide two vectors with rankings obtained with different MCDA methods. 204 | R = np.array([1, 2, 3, 4, 5]) 205 | Q = np.array([1, 3, 2, 4, 5]) 206 | 207 | # Calculate the correlation using `pearson_coeff` coefficient. 208 | coeff = corrs.pearson_coeff(R, Q) 209 | print('Pearson coeff: ', np.round(coeff, 4)) 210 | 211 | Output 212 | 213 | .. code-block:: console 214 | 215 | Pearson coeff: 0.9 216 | 217 | 218 | 219 | Objective methods for criteria weights determination 220 | ____________________________________________________________ 221 | 222 | Entropy weighting method 223 | 224 | .. code-block:: python 225 | 226 | import numpy as np 227 | from crispyn import weighting_methods as mcda_weights 228 | 229 | matrix = np.array([[30, 30, 38, 29], 230 | [19, 54, 86, 29], 231 | [19, 15, 85, 28.9], 232 | [68, 70, 60, 29]]) 233 | 234 | weights = mcda_weights.entropy_weighting(matrix) 235 | 236 | print('Entropy weights: ', np.round(weights, 4)) 237 | 238 | Output 239 | 240 | .. code-block:: console 241 | 242 | Entropy weights: [0.463 0.3992 0.1378 0. ] 243 | 244 | 245 | CRITIC weighting method 246 | 247 | .. code-block:: python 248 | 249 | import numpy as np 250 | from crispyn import weighting_methods as mcda_weights 251 | 252 | matrix = np.array([[5000, 3, 3, 4, 3, 2], 253 | [680, 5, 3, 2, 2, 1], 254 | [2000, 3, 2, 3, 4, 3], 255 | [600, 4, 3, 1, 2, 2], 256 | [800, 2, 4, 3, 3, 4]]) 257 | 258 | weights = mcda_weights.critic_weighting(matrix) 259 | 260 | print('CRITIC weights: ', np.round(weights, 4)) 261 | 262 | Output 263 | 264 | .. code-block:: console 265 | 266 | CRITIC weights: [0.157 0.2495 0.1677 0.1211 0.1541 0.1506] 267 | 268 | 269 | Standard deviation weighting method 270 | 271 | .. code-block:: python 272 | 273 | import numpy as np 274 | from crispyn import weighting_methods as mcda_weights 275 | 276 | matrix = np.array([[0.619, 0.449, 0.447], 277 | [0.862, 0.466, 0.006], 278 | [0.458, 0.698, 0.771], 279 | [0.777, 0.631, 0.491], 280 | [0.567, 0.992, 0.968]]) 281 | 282 | weights = mcda_weights.std_weighting(matrix) 283 | 284 | print('Standard deviation weights: ', np.round(weights, 4)) 285 | 286 | Output 287 | 288 | .. code-block:: console 289 | 290 | Standard deviation weights: [0.2173 0.2945 0.4882] 291 | 292 | 293 | Equal weighting method 294 | 295 | .. code-block:: python 296 | 297 | import numpy as np 298 | from crispyn import weighting_methods as mcda_weights 299 | 300 | matrix = np.array([[0.619, 0.449, 0.447], 301 | [0.862, 0.466, 0.006], 302 | [0.458, 0.698, 0.771], 303 | [0.777, 0.631, 0.491], 304 | [0.567, 0.992, 0.968]]) 305 | 306 | weights = mcda_weights.equal_weighting(matrix) 307 | print('Equal weights: ', np.round(weights, 3)) 308 | 309 | Output 310 | 311 | .. code-block:: console 312 | 313 | Equal weights: [0.333 0.333 0.333] 314 | 315 | 316 | Gini coefficient-based weighting method 317 | 318 | .. code-block:: python 319 | 320 | import numpy as np 321 | from crispyn import weighting_methods as mcda_weights 322 | 323 | matrix = np.array([[29.4, 83, 47, 114, 12, 30, 120, 240, 170, 90, 1717.75], 324 | [30, 38.1, 124.7, 117, 16, 60, 60, 60, 93, 70, 2389], 325 | [29.28, 59.27, 41.13, 58, 16, 30, 60, 120, 170, 78, 239.99], 326 | [33.6, 71, 55, 159, 23.6, 60, 240, 240, 132, 140, 2099], 327 | [21, 59, 41, 66, 16, 24, 60, 120, 170, 70, 439], 328 | [35, 65, 42, 134, 12, 60, 240, 240, 145, 60, 1087], 329 | [47, 79, 54, 158, 19, 60, 120, 120, 360, 72, 2499], 330 | [28.3, 62.3, 44.9, 116, 12, 30, 60, 60, 130, 90, 999.99], 331 | [36.9, 28.6, 121.6, 130, 12, 60, 120, 120, 80, 80, 1099], 332 | [32, 59, 41, 60, 16, 30, 120, 120, 170, 60, 302.96], 333 | [28.4, 66.3, 48.6, 126, 12, 60, 240, 240, 132, 135, 1629], 334 | [29.8, 46, 113, 47, 18, 50, 50, 50, 360, 72, 2099], 335 | [20.2, 64, 80, 70, 8, 24, 60, 120, 166, 480, 699.99], 336 | [33, 60, 44, 59, 12, 30, 60, 120, 170, 90, 388], 337 | [29, 59, 41, 55, 16, 30, 60, 120, 170, 120, 299], 338 | [29, 59, 41, 182, 12, 30, 30, 60, 94, 140, 249], 339 | [29.8, 59.2, 41, 65, 16, 30, 60, 120, 160, 90, 219.99], 340 | [28.8, 62.5, 41, 70, 12, 60, 120, 120, 170, 138, 1399.99], 341 | [24, 40, 59, 60, 12, 10, 30, 30, 140, 78, 269.99], 342 | [30, 60, 45, 201, 16, 30, 30, 30, 170, 90, 199.99]]) 343 | 344 | weights = mcda_weights.gini_weighting(matrix) 345 | print('Gini coefficient-based weights: ', np.round(weights, 4)) 346 | 347 | 348 | Output 349 | 350 | .. code-block:: console 351 | 352 | Gini coefficient-based weights: [0.0362 0.0437 0.0848 0.0984 0.048 0.0842 0.1379 0.1125 0.0745 0.1107 0.169 ] 353 | 354 | 355 | MEREC weighting method 356 | 357 | .. code-block:: python 358 | 359 | import numpy as np 360 | from crispyn import weighting_methods as mcda_weights 361 | 362 | matrix = np.array([[450, 8000, 54, 145], 363 | [10, 9100, 2, 160], 364 | [100, 8200, 31, 153], 365 | [220, 9300, 1, 162], 366 | [5, 8400, 23, 158]]) 367 | 368 | types = np.array([1, 1, -1, -1]) 369 | 370 | weights = mcda_weights.merec_weighting(matrix, types) 371 | print('MEREC weights: ', np.round(weights, 4)) 372 | 373 | 374 | Output 375 | 376 | .. code-block:: console 377 | 378 | MEREC weights: [0.5752 0.0141 0.4016 0.0091] 379 | 380 | 381 | Statistical variance weighting method 382 | 383 | .. code-block:: python 384 | 385 | import numpy as np 386 | from crispyn import weighting_methods as mcda_weights 387 | 388 | matrix = np.array([[0.619, 0.449, 0.447], 389 | [0.862, 0.466, 0.006], 390 | [0.458, 0.698, 0.771], 391 | [0.777, 0.631, 0.491], 392 | [0.567, 0.992, 0.968]]) 393 | 394 | weights = mcda_weights.stat_var_weighting(matrix) 395 | print('Statistical variance weights: ', np.round(weights, 4)) 396 | 397 | 398 | Output 399 | 400 | .. code-block:: console 401 | 402 | Statistical variance weights: [0.3441 0.3497 0.3062] 403 | 404 | 405 | CILOS weighting method 406 | 407 | .. code-block:: python 408 | 409 | import numpy as np 410 | from crispyn import weighting_methods as mcda_weights 411 | 412 | matrix = np.array([[3, 100, 10, 7], 413 | [2.500, 80, 8, 5], 414 | [1.800, 50, 20, 11], 415 | [2.200, 70, 12, 9]]) 416 | 417 | types = np.array([-1, 1, -1, 1]) 418 | 419 | weights = mcda_weights.cilos_weighting(matrix, types) 420 | print('CILOS weights: ', np.round(weights, 3)) 421 | 422 | 423 | Output 424 | 425 | .. code-block:: console 426 | 427 | CILOS weights: [0.334 0.22 0.196 0.25 ] 428 | 429 | 430 | IDOCRIW weighting method 431 | 432 | .. code-block:: python 433 | 434 | import numpy as np 435 | from crispyn import weighting_methods as mcda_weights 436 | 437 | matrix = np.array([[3.0, 100, 10, 7], 438 | [2.5, 80, 8, 5], 439 | [1.8, 50, 20, 11], 440 | [2.2, 70, 12, 9]]) 441 | 442 | types = np.array([-1, 1, -1, 1]) 443 | 444 | weights = mcda_weights.idocriw_weighting(matrix, types) 445 | print('IDOCRIW weights: ', np.round(weights, 3)) 446 | 447 | Output 448 | 449 | .. code-block:: console 450 | 451 | IDOCRIW weights: [0.166 0.189 0.355 0.291] 452 | 453 | 454 | Angle weighting method 455 | 456 | .. code-block:: python 457 | 458 | import numpy as np 459 | from crispyn import weighting_methods as mcda_weights 460 | 461 | matrix = np.array([[30, 30, 38, 29], 462 | [19, 54, 86, 29], 463 | [19, 15, 85, 28.9], 464 | [68, 70, 60, 29]]) 465 | 466 | types = np.array([1, 1, 1, 1]) 467 | 468 | weights = mcda_weights.angle_weighting(matrix, types) 469 | print('Angle weights: ', np.round(weights, 4)) 470 | 471 | 472 | Output 473 | 474 | .. code-block:: console 475 | 476 | Angle weights: [0.415 0.3612 0.2227 0.0012] 477 | 478 | 479 | Coefficient of variation weighting method 480 | 481 | .. code-block:: python 482 | 483 | import numpy as np 484 | from crispyn import weighting_methods as mcda_weights 485 | 486 | matrix = np.array([[30, 30, 38, 29], 487 | [19, 54, 86, 29], 488 | [19, 15, 85, 28.9], 489 | [68, 70, 60, 29]]) 490 | 491 | weights = mcda_weights.coeff_var_weighting(matrix) 492 | print('Coefficient of variation weights: ', np.round(weights, 4)) 493 | 494 | 495 | Output 496 | 497 | .. code-block:: console 498 | 499 | Coefficient of variation weights: [0.4258 0.361 0.2121 0.0011] 500 | 501 | 502 | Subjective methods for criteria weights determination 503 | _______________________________________________________________________________ 504 | 505 | 506 | AHP weighting method 507 | 508 | .. code-block:: python 509 | 510 | import numpy as np 511 | from crispyn import weighting_methods as mcda_weights 512 | 513 | PCcriteria = np.array([[1, 1, 5, 3], [1, 1, 5, 3], 514 | [1/5, 1/5, 1, 1/3], [1/3, 1/3, 3, 1]]) 515 | 516 | ahp_weighting = mcda_weights.AHP_WEIGHTING() 517 | weights = ahp_weighting(X = PCcriteria, compute_priority_vector_method=ahp_weighting._eigenvector) 518 | 519 | print('AHP weights: ', np.round(weights, 4)) 520 | 521 | Output 522 | 523 | .. code-block:: console 524 | 525 | Inconsistency index: 0.01610868948440318 526 | AHP weights: [0.3899 0.3899 0.0679 0.1524] 527 | 528 | 529 | SWARA weighting method 530 | 531 | .. code-block:: python 532 | 533 | import numpy as np 534 | from crispyn import weighting_methods as mcda_weights 535 | 536 | criteria_indexes = np.array([0, 1, 2, 3, 4, 5, 6]) 537 | s = np.array([0, 0.35, 0.2, 0.3, 0, 0.4]) 538 | 539 | weights = mcda_weights.swara_weighting(criteria_indexes, s) 540 | 541 | print('SWARA weights: ', np.round(weights, 4)) 542 | 543 | Output 544 | 545 | .. code-block:: console 546 | 547 | SWARA weights: [0.2152 0.2152 0.1594 0.1328 0.1022 0.1022 0.073 ] 548 | 549 | 550 | LBWA weighting method 551 | 552 | .. code-block:: python 553 | 554 | import numpy as np 555 | from crispyn import weighting_methods as mcda_weights 556 | 557 | criteria_indexes = [ 558 | [1, 4, 6, 5, 0, 2], 559 | [7, 3] 560 | ] 561 | 562 | criteria_values_I = [ 563 | [0, 2, 3, 4, 4, 5], 564 | [1, 2] 565 | ] 566 | 567 | weights = mcda_weights.lbwa_weighting(criteria_indexes, criteria_values_I) 568 | 569 | print('LBWA weights: ', np.round(weights, 4)) 570 | 571 | Output 572 | 573 | .. code-block:: console 574 | 575 | LBWA weights: [0.1215 0.1909 0.1114 0.0835 0.1485 0.1215 0.1336 0.0891] 576 | 577 | 578 | SAPEVO weighting method 579 | 580 | .. code-block:: python 581 | 582 | import numpy as np 583 | from crispyn import weighting_methods as mcda_weights 584 | 585 | criteria_matrix = np.array([ 586 | [0, 0, 3, 3, 1, 3, 2, 1, 2], 587 | [0, 0, 3, 3, 1, 3, 2, 1, 2], 588 | [-3, -3, 0, 0, -1, -2, -2, -1, -2], 589 | [-3, -3, 0, 0, -2, 2, -2, -2, -2], 590 | [-1, -1, 1, 2, 0, 2, 0, -1, 1], 591 | [-3, -3, 2, -2, -2, 0, -2, -1, -2], 592 | [-3, -2, 2, 2, 0, 2, 0, 3, 0], 593 | [-1, -1, 1, 2, 1, 1, -3, 0, -1], 594 | [-2, -2, 2, 2, -1, 2, 0, 1, 0], 595 | ]) 596 | 597 | weights = mcda_weights.sapevo_weighting(criteria_matrix) 598 | 599 | print('SAPEVO weights: ', np.round(weights, 4)) 600 | 601 | Output 602 | 603 | .. code-block:: console 604 | 605 | SAPEVO weights: [0.232 0.232 0. 0.016 0.136 0.008 0.144 0.104 0.128] 606 | 607 | 608 | 609 | Stochastic Multicriteria Acceptability Analysis Method - SMAA (VIKOR_SMAA) 610 | _______________________________________________________________________________ 611 | 612 | 613 | .. code-block:: python 614 | 615 | from crispyn.mcda_methods import VIKOR_SMAA 616 | 617 | # Criteria number 618 | n = matrix.shape[1] 619 | # Number of weight vectors to generate for SMAA 620 | iterations = 10000 621 | 622 | # Create the object of the ``VIKOR_SMAA`` method 623 | vikor_smaa = VIKOR_SMAA() 624 | # Generate weight vectors for SMAA. Number of weight vectors is equal to ``iterations`` number. Vectors include ``n`` values. 625 | weight_vectors = vikor_smaa._generate_weights(n, iterations) 626 | 627 | # Calculate Rank acceptability index, Central weight vector and final ranking based on SMAA method combined with VIKOR 628 | rank_acceptability_index, central_weight_vector, rank_scores = vikor_smaa(matrix, weight_vectors, types) 629 | 630 | 631 | 632 | Normalization methods 633 | ______________________ 634 | 635 | Here is an example of ``vector_normalization`` usage. Other normalizations provided in module ``normalizations``, namely ``minmax_normalization``, ``max_normalization``, 636 | ``sum_normalization``, ``linear_normalization`` are used in analogous way. 637 | 638 | 639 | Vector normalization 640 | 641 | .. code-block:: python 642 | 643 | import numpy as np 644 | from crispyn import normalizations as norms 645 | 646 | matrix = np.array([[8, 7, 2, 1], 647 | [5, 3, 7, 5], 648 | [7, 5, 6, 4], 649 | [9, 9, 7, 3], 650 | [11, 10, 3, 7], 651 | [6, 9, 5, 4]]) 652 | 653 | types = np.array([1, 1, 1, 1]) 654 | 655 | norm_matrix = norms.vector_normalization(matrix, types) 656 | print('Normalized matrix: ', np.round(norm_matrix, 4)) 657 | 658 | Output 659 | 660 | .. code-block:: console 661 | 662 | Normalized matrix: [[0.4126 0.3769 0.1525 0.0928] 663 | [0.2579 0.1615 0.5337 0.4642] 664 | [0.361 0.2692 0.4575 0.3714] 665 | [0.4641 0.4845 0.5337 0.2785] 666 | [0.5673 0.5384 0.2287 0.6499] 667 | [0.3094 0.4845 0.3812 0.3714]] 668 | -------------------------------------------------------------------------------- /examples/criteria_electric_cars.csv: -------------------------------------------------------------------------------- 1 | Cj,Name,Unit,Type 2 | C1,Max speed,mph,1 3 | C2,Battery capacity,kWh,1 4 | C3,Electric motor,kW,1 5 | C4,Maximum torque,Nm,1 6 | C5,Horsepower,hp,1 7 | C6,EPA Fuel Economy Combined,MPGe,1 8 | C7,EPA Fuel Economy City,MPGe,1 9 | C8,EPA Fuel Economy Highway,MPGe,1 10 | C9,EPA range,miles,1 11 | C10,"Turning Diameter / Radius, curb to curb",feet,-1 12 | C11,Base price,USD,-1 13 | -------------------------------------------------------------------------------- /examples/dataset_cars.csv: -------------------------------------------------------------------------------- 1 | Ai,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10,C11 2 | A1,155.3,74,340,673,456,111,115,106,244,39.8,65440 3 | A2,162.2,79.5,247,639,283,113,118,107,263,38.8,60440 4 | A3,112.5,68,198,430,266,98,105,91,230,38.1,56575 5 | A4,90.1,66,150,360,201.2,120,131,109,259,34.8,32495 6 | A5,99.4,77,150,310,201.2,97,102,90,260,36.4,45635 7 | A6,89.5,40,110,320,147.5,111,123,99,226,34.8,28425 8 | A7,124.3,95,125,247,187.7,78,78,77,222,40,84595 9 | A8,155.3,79.2,160,300,214.6,79,79,80,227,38.4,105150 10 | A9,162.2,100,205,420,502.9,120,124,115,402,40.3,96440 11 | A10,96.3,39.2,100,395,134.1,120,132,108,258,34.8,35245 12 | A11,162.2,100,205,420,502.9,98,103,93,371,40.8,127940 13 | A12,102.5,38.3,101,295,136.1,133,145,121,170,34.8,34250 14 | Type,1,1,1,1,1,1,1,1,1,-1,-1 15 | -------------------------------------------------------------------------------- /examples/dataset_localisations.csv: -------------------------------------------------------------------------------- 1 | Symbol,C1,C2,C3,C4,C5,C6,C7,C8,C9,C10 2 | A1,106780,6.75,2,220,6,1,52,455.5,8.9,36.8 3 | A2,86370,7.12,3,400,10,0,20,336.5,7.2,29.8 4 | A3,104850,6.95,60,220,7,1,60,416,8.7,36.2 5 | A4,46600,6.04,1,220,3,0,50,277,3.9,16 6 | type,1,1,-1,1,-1,-1,1,-1,-1,1 7 | -------------------------------------------------------------------------------- /examples/electric_cars_2021.csv: -------------------------------------------------------------------------------- 1 | Ai,Name,C1 Max speed [mph],C2 Battery [kWh],C3 Electric motor [kW] Front,C4 Torque [Nm] Front,C5 Mechanical horsepower [hp],C6 EPA Fuel Economy Combined [MPGe],C7 EPA Fuel Economy City [MPGe],C8 EPA Fuel Economy Highway [MPGe],C9 EPA range [miles],"C10 Turning Diameter / Radius, curb to curb [feet]",C11 Base price [$] 2 | A1,Tesla Model Y,155.3,74,340,673,456,111,115,106,244,39.8,65440 3 | A2,Tesla Model 3,162.2,79.5,247,639,283,113,118,107,263,38.8,60440 4 | A3,Ford Mustang Mach-E,112.5,68,198,430,266,98,105,91,230,38.1,56575 5 | A4,Chevrolet Bolt EV and EUV,90.1,66,150,360,201.2,120,131,109,259,34.8,32495 6 | A5,Volkswagen ID.4,99.4,77,150,310,201.2,97,102,90,260,36.4,45635 7 | A6,Nissan Leaf,89.5,40,110,320,147.5,111,123,99,226,34.8,28425 8 | A7,Audi e-tron and e-tron Sportback,124.3,95,125,247,187.7,78,78,77,222,40,84595 9 | A8,Porsche Taycan,155.3,79.2,160,300,214.6,79,79,80,227,38.4,105150 10 | A9,Tesla Model S,162.2,100,205,420,502.9,120,124,115,402,40.3,96440 11 | A10,Hyundai Kona Electric,96.3,39.2,100,395,134.1,120,132,108,258,34.8,35245 12 | A11,Tesla Model X,162.2,100,205,420,502.9,98,103,93,371,40.8,127940 13 | A12,Hyundai Ioniq Electric,102.5,38.3,101,295,136.1,133,145,121,170,34.8,34250 14 | Type,,1,1,1,1,1,1,1,1,1,-1,-1 15 | -------------------------------------------------------------------------------- /examples/main.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import time 3 | 4 | import numpy as np 5 | import pandas as pd 6 | import matplotlib.pyplot as plt 7 | import matplotlib 8 | import seaborn as sns 9 | 10 | from crispyn.mcda_methods import VIKOR 11 | from crispyn.mcda_methods import VIKOR_SMAA 12 | from crispyn.additions import rank_preferences 13 | from crispyn import correlations as corrs 14 | from crispyn import normalizations as norms 15 | from crispyn import weighting_methods as mcda_weights 16 | 17 | 18 | # Functions for visualizations 19 | 20 | def plot_barplot(df_plot, x_name, y_name, title): 21 | """ 22 | Display stacked column chart of weights for criteria for `x_name == Weighting methods` 23 | and column chart of ranks for alternatives `x_name == Alternatives` 24 | 25 | Parameters 26 | ---------- 27 | df_plot : dataframe 28 | dataframe with criteria weights calculated different weighting methods 29 | or with alternaives rankings for different weighting methods 30 | x_name : str 31 | name of x axis, Alternatives or Weighting methods 32 | y_name : str 33 | name of y axis, Ranks or Weight values 34 | title : str 35 | name of chart title, Weighting methods or Criteria 36 | 37 | Examples 38 | ---------- 39 | >>> plot_barplot(df_plot, x_name, y_name, title) 40 | """ 41 | 42 | list_rank = np.arange(1, len(df_plot) + 1, 1) 43 | stacked = True 44 | width = 0.5 45 | if x_name == 'Alternatives': 46 | stacked = False 47 | width = 0.8 48 | elif x_name == 'Alternative': 49 | pass 50 | else: 51 | df_plot = df_plot.T 52 | ax = df_plot.plot(kind='bar', width = width, stacked=stacked, edgecolor = 'black', figsize = (9,4)) 53 | ax.set_xlabel(x_name, fontsize = 12) 54 | ax.set_ylabel(y_name, fontsize = 12) 55 | 56 | if x_name == 'Alternatives': 57 | ax.set_yticks(list_rank) 58 | 59 | ax.set_xticklabels(df_plot.index, rotation = 'horizontal') 60 | ax.tick_params(axis = 'both', labelsize = 12) 61 | 62 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', 63 | ncol=4, mode="expand", borderaxespad=0., edgecolor = 'black', title = title, fontsize = 11) 64 | 65 | ax.grid(True, linestyle = '--') 66 | ax.set_axisbelow(True) 67 | plt.tight_layout() 68 | plt.savefig('results/bar_chart_weights_' + x_name + '.pdf') 69 | plt.savefig('results/bar_chart_weights_' + x_name + '.eps') 70 | plt.show() 71 | 72 | 73 | def draw_heatmap(data, title): 74 | """ 75 | Display heatmap with correlations of compared rankings generated using different methods 76 | 77 | Parameters 78 | ---------- 79 | data : dataframe 80 | dataframe with correlation values between compared rankings 81 | title : str 82 | title of chart containing name of used correlation coefficient 83 | 84 | Examples 85 | ---------- 86 | >>> draw_heatmap(data, title) 87 | """ 88 | 89 | plt.figure(figsize = (6, 4)) 90 | sns.set(font_scale=1.0) 91 | heatmap = sns.heatmap(data, annot=True, fmt=".2f", cmap="RdYlBu", 92 | linewidth=0.5, linecolor='w') 93 | plt.yticks(va="center") 94 | plt.xlabel('Weighting methods') 95 | plt.title('Correlation coefficient: ' + title) 96 | plt.tight_layout() 97 | plt.savefig('results/heatmap_weights.pdf') 98 | plt.savefig('results/heatmap_weights.eps') 99 | plt.show() 100 | 101 | 102 | def draw_heatmap_smaa(data, title): 103 | """ 104 | Display heatmap with correlations of compared rankings generated using different methods 105 | 106 | Parameters 107 | ---------- 108 | data : dataframe 109 | dataframe with correlation values between compared rankings 110 | title : str 111 | title of chart containing name of used correlation coefficient 112 | 113 | Examples 114 | ---------- 115 | >>> draw_heatmap(data, title) 116 | """ 117 | 118 | sns.set(font_scale=1.0) 119 | heatmap = sns.heatmap(data, annot=True, fmt=".2f", cmap="RdYlBu_r", 120 | linewidth=0.05, linecolor='w') 121 | plt.yticks(rotation=0) 122 | plt.ylabel('Alternatives') 123 | plt.tick_params(labelbottom=False,labeltop=True) 124 | 125 | plt.title(title) 126 | plt.tight_layout() 127 | plt.savefig('results/heatmap_smaa.pdf') 128 | plt.savefig('results/heatmap_smaa.eps') 129 | plt.show() 130 | 131 | 132 | def plot_boxplot(data): 133 | """ 134 | Display boxplot showing distribution of criteria weights determined with different methods. 135 | 136 | Parameters 137 | ---------- 138 | data : dataframe 139 | dataframe with correlation values between compared rankings 140 | 141 | Examples 142 | --------- 143 | >>> plot_boxplot(data) 144 | """ 145 | 146 | df_melted = pd.melt(data) 147 | plt.figure(figsize = (7, 4)) 148 | ax = sns.boxplot(x = 'variable', y = 'value', data = df_melted, width = 0.6) 149 | ax.grid(True, linestyle = '--') 150 | ax.set_axisbelow(True) 151 | ax.set_xlabel('Criterion', fontsize = 12) 152 | ax.set_ylabel('Criteria weights distribution', fontsize = 12) 153 | plt.tight_layout() 154 | plt.savefig('results/boxplot_weights.pdf') 155 | plt.savefig('results/boxplot_weights.eps') 156 | plt.show() 157 | 158 | 159 | # Create dictionary class 160 | class Create_dictionary(dict): 161 | 162 | # __init__ function 163 | def __init__(self): 164 | self = dict() 165 | 166 | # Function to add key:value 167 | def add(self, key, value): 168 | self[key] = value 169 | 170 | 171 | # main 172 | def main(): 173 | 174 | # Load data from CSV 175 | filename = 'dataset_cars.csv' 176 | data = pd.read_csv(filename, index_col = 'Ai') 177 | # Load decision matrix from CSV 178 | df_data = data.iloc[:len(data) - 1, :] 179 | # Criteria types are in the last row of CSV 180 | types = data.iloc[len(data) - 1, :].to_numpy() 181 | 182 | # Convert decision matrix from dataframe to numpy ndarray type for faster calculations. 183 | matrix = df_data.to_numpy() 184 | 185 | # Symbols for alternatives Ai 186 | list_alt_names = [r'$A_{' + str(i) + '}$' for i in range(1, df_data.shape[0] + 1)] 187 | # Symbols for columns Cj 188 | cols = [r'$C_{' + str(j) + '}$' for j in range(1, data.shape[1] + 1)] 189 | 190 | 191 | # Part 1 - study with single weighting method 192 | 193 | # Determine criteria weights with chosen weighting method 194 | weights = mcda_weights.entropy_weighting(matrix) 195 | 196 | # Create the VIKOR method object 197 | # vikor = VIKOR(normalization_method=norms.minmax_normalization) 198 | vikor = VIKOR() 199 | 200 | # Calculate alternatives preference function values with VIKOR method 201 | pref = vikor(matrix, weights, types) 202 | 203 | # when there is only one (single) preference vector 204 | rank = rank_preferences(pref, reverse = False) 205 | 206 | print(rank) 207 | 208 | 209 | # Part 2 - study with several weighting methods 210 | # Create a list with weighting methods that you want to explore 211 | weighting_methods_set = [ 212 | mcda_weights.entropy_weighting, 213 | #mcda_weights.std_weighting, 214 | mcda_weights.critic_weighting, 215 | mcda_weights.gini_weighting, 216 | mcda_weights.merec_weighting, 217 | mcda_weights.stat_var_weighting, 218 | #mcda_weights.cilos_weighting, 219 | mcda_weights.idocriw_weighting, 220 | mcda_weights.angle_weighting, 221 | mcda_weights.coeff_var_weighting 222 | ] 223 | 224 | 225 | #df_weights = pd.DataFrame(weights.reshape(1, -1), index = ['Weights'], columns = cols) 226 | # Create dataframes for weights, preference function values and rankings determined using different weighting methods 227 | df_weights = pd.DataFrame(index = cols) 228 | df_preferences = pd.DataFrame(index = list_alt_names) 229 | df_rankings = pd.DataFrame(index = list_alt_names) 230 | 231 | # Create the VIKOR method object 232 | vikor = VIKOR() 233 | 234 | for weight_type in weighting_methods_set: 235 | 236 | if weight_type.__name__ in ["cilos_weighting", "idocriw_weighting", "angle_weighting", "merec_weighting"]: 237 | weights = weight_type(matrix, types) 238 | else: 239 | weights = weight_type(matrix) 240 | 241 | df_weights[weight_type.__name__[:-10].upper().replace('_', ' ')] = weights 242 | pref = vikor(matrix, weights, types) 243 | 244 | rank = rank_preferences(pref, reverse = False) 245 | 246 | df_preferences[weight_type.__name__[:-10].upper().replace('_', ' ')] = pref 247 | df_rankings[weight_type.__name__[:-10].upper().replace('_', ' ')] = rank 248 | 249 | 250 | df_rankings.to_csv('results/rankings.csv') 251 | # plot criteria weights distribution using box chart 252 | plot_boxplot(df_weights.T) 253 | 254 | # plot stacked column chart of criteria weights 255 | plot_barplot(df_weights, 'Weighting methods', 'Weight value', 'Criteria') 256 | 257 | # plot column chart of alternatives rankings 258 | plot_barplot(df_rankings, 'Alternatives', 'Rank', 'Weighting methods') 259 | 260 | # Plot heatmaps of rankings correlation coefficient 261 | # Create dataframe with rankings correlation values 262 | results = copy.deepcopy(df_rankings) 263 | method_types = list(results.columns) 264 | dict_new_heatmap_rw = Create_dictionary() 265 | 266 | for el in method_types: 267 | dict_new_heatmap_rw.add(el, []) 268 | 269 | for i, j in [(i, j) for i in method_types[::-1] for j in method_types]: 270 | dict_new_heatmap_rw[j].append(corrs.weighted_spearman(results[i], results[j])) 271 | 272 | df_new_heatmap_rw = pd.DataFrame(dict_new_heatmap_rw, index = method_types[::-1]) 273 | df_new_heatmap_rw.columns = method_types 274 | 275 | # Plot heatmap with rankings correlation 276 | draw_heatmap(df_new_heatmap_rw, r'$r_w$') 277 | 278 | 279 | 280 | # SMAA method 281 | cols_ai = [str(el) for el in range(1, matrix.shape[0] + 1)] 282 | 283 | # criteria number 284 | n = matrix.shape[1] 285 | # SMAA iterations number (number of weight vectors for SMAA) 286 | iterations = 10000 287 | 288 | start = time.time() 289 | # create the VIKOR_SMAA object 290 | vikor_smaa = VIKOR_SMAA() 291 | # generate matrix with weight vectors for SMAA 292 | weight_vectors = vikor_smaa._generate_weights(n, iterations) 293 | 294 | # run the VIKOR_SMAA method 295 | rank_acceptability_index, central_weight_vector, rank_scores = vikor_smaa(matrix, weight_vectors, types) 296 | 297 | end = time.time() - start 298 | print('Run time: ', end) 299 | 300 | acc_in_df = pd.DataFrame(rank_acceptability_index, index = list_alt_names, columns = cols_ai) 301 | acc_in_df.to_csv('results_smaa/ai.csv') 302 | 303 | matplotlib.rcdefaults() 304 | plot_barplot(acc_in_df, 'Alternative', 'Rank acceptability index', 'Rank') 305 | 306 | draw_heatmap_smaa(acc_in_df, 'Rank acceptability indexes') 307 | 308 | central_weights_df = pd.DataFrame(central_weight_vector, index = list_alt_names, columns = cols) 309 | central_weights_df.to_csv('results_smaa/cw.csv') 310 | 311 | rank_scores_df = pd.DataFrame(rank_scores, index = list_alt_names, columns = ['Rank']) 312 | rank_scores_df.to_csv('results_smaa/fr.csv') 313 | 314 | 315 | 316 | 317 | if __name__ == '__main__': 318 | main() -------------------------------------------------------------------------------- /examples/main_crispyn_update.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import matplotlib.pyplot as plt 6 | import seaborn as sns 7 | 8 | from crispyn.mcda_methods import VIKOR 9 | from crispyn.additions import rank_preferences 10 | from crispyn import correlations as corrs 11 | from crispyn import weighting_methods as mcda_weights 12 | 13 | 14 | def plot_boxplot(data): 15 | """ 16 | Display boxplot showing distribution of criteria weights determined with different methods. 17 | 18 | Parameters 19 | ---------- 20 | data : dataframe 21 | dataframe with correlation values between compared rankings 22 | 23 | Examples 24 | --------- 25 | >>> plot_boxplot(data) 26 | """ 27 | 28 | df_melted = pd.melt(data) 29 | plt.figure(figsize = (7, 4)) 30 | ax = sns.boxplot(x = 'variable', y = 'value', data = df_melted, width = 0.6) 31 | ax.grid(True, linestyle = '--') 32 | ax.set_axisbelow(True) 33 | ax.set_xlabel('Criterion', fontsize = 12) 34 | ax.set_ylabel('Criteria weights distribution', fontsize = 12) 35 | plt.title('Distribution of criteria weights values', fontsize = 12) 36 | plt.tight_layout() 37 | plt.savefig('results_update/boxplot_weights.pdf') 38 | plt.savefig('results_update/boxplot_weights.eps') 39 | plt.show() 40 | 41 | 42 | # bar (column) chart 43 | def plot_barplot(df_plot, stacked = False): 44 | """ 45 | Visualization method to display column chart of alternatives rankings obtained with 46 | different methods. 47 | 48 | Parameters 49 | ---------- 50 | df_plot : DataFrame 51 | DataFrame containing rankings of alternatives obtained with different methods. 52 | The particular rankings are included in subsequent columns of DataFrame. 53 | 54 | stacked : Boolean 55 | Variable denoting if the chart is to be stacked or not. 56 | 57 | Examples 58 | ---------- 59 | >>> plot_barplot(df_plot) 60 | """ 61 | 62 | ax = df_plot.plot(kind='bar', width = 0.6, stacked=stacked, edgecolor = 'black', figsize = (9,4)) 63 | if stacked == False: 64 | step = 1 65 | list_rank = np.arange(1, len(df_plot) + 1, step) 66 | ax.set_yticks(list_rank) 67 | ax.set_ylim(0, len(df_plot) + 1) 68 | 69 | ax.set_xticklabels(df_plot.index, rotation = 'horizontal') 70 | ax.tick_params(axis = 'both', labelsize = 12) 71 | y_ticks = ax.yaxis.get_major_ticks() 72 | 73 | ncol = df_plot.shape[1] 74 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', title = 'Weighting methods', 75 | ncol = ncol, mode="expand", borderaxespad=0., edgecolor = 'black', fontsize = 12) 76 | 77 | ax.set_xlabel('Alternatives', fontsize = 12) 78 | ax.set_ylabel('Rank', fontsize = 12) 79 | 80 | ax.grid(True, linestyle = '--') 81 | ax.set_axisbelow(True) 82 | plt.tight_layout() 83 | plt.savefig('results_update/barplot_rankings.pdf') 84 | plt.savefig('results_update/barplot_rankings.eps') 85 | plt.show() 86 | 87 | 88 | # bar (column) chart 89 | def plot_barplot_stacked(df_plot, stacked = False): 90 | """ 91 | Visualization method to display column chart of alternatives rankings obtained with 92 | different methods. 93 | 94 | Parameters 95 | ---------- 96 | df_plot : DataFrame 97 | DataFrame containing rankings of alternatives obtained with different methods. 98 | The particular rankings are included in subsequent columns of DataFrame. 99 | 100 | stacked : Boolean 101 | Variable denoting if the chart is to be stacked or not. 102 | 103 | Examples 104 | ---------- 105 | >>> plot_barplot(df_plot) 106 | """ 107 | 108 | ax = df_plot.plot(kind='bar', width = 0.6, stacked=stacked, edgecolor = 'black', figsize = (9,4)) 109 | if stacked == False: 110 | list_rank = np.arange(0, 0.30, 0.05) 111 | ax.set_yticks(list_rank) 112 | ax.set_ylim(0, 0.25) 113 | 114 | ax.set_xticklabels(df_plot.index, rotation = 'horizontal') 115 | ax.tick_params(axis = 'both', labelsize = 12) 116 | y_ticks = ax.yaxis.get_major_ticks() 117 | 118 | ncol = df_plot.shape[1] 119 | plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', 120 | ncol = ncol, mode="expand", borderaxespad=0., edgecolor = 'black', title = 'Criteria', fontsize = 12) 121 | 122 | ax.set_xlabel('Weighting methods', fontsize = 12) 123 | ax.set_ylabel('Weight value', fontsize = 12) 124 | 125 | ax.grid(True, linestyle = '--') 126 | ax.set_axisbelow(True) 127 | plt.tight_layout() 128 | plt.savefig('results_update/boxplot_weights_col.pdf') 129 | plt.savefig('results_update/boxplot_weights_col.eps') 130 | plt.show() 131 | 132 | 133 | # plot radar chart 134 | def plot_radar(data): 135 | """ 136 | Visualization method to display rankings of alternatives obtained with different methods 137 | on the radar chart. 138 | 139 | Parameters 140 | ----------- 141 | data : DataFrame 142 | DataFrame containing rankings of alternatives obtained with different 143 | methods. The particular rankings are contained in subsequent columns of DataFrame. 144 | 145 | Examples 146 | ---------- 147 | >>> plot_radar(data) 148 | """ 149 | 150 | fig=plt.figure() 151 | ax = fig.add_subplot(111, polar = True) 152 | 153 | for col in list(data.columns): 154 | labels=np.array(list(data.index)) 155 | stats = data.loc[labels, col].values 156 | 157 | angles=np.linspace(0, 2*np.pi, len(labels), endpoint=False) 158 | # close the plot 159 | stats=np.concatenate((stats,[stats[0]])) 160 | angles=np.concatenate((angles,[angles[0]])) 161 | 162 | lista = list(data.index) 163 | lista.append(data.index[0]) 164 | labels=np.array(lista) 165 | 166 | ax.plot(angles, stats, '-o', linewidth=2) 167 | # ax.fill(angles, stats, label='_nolegend_', alpha=0.5) 168 | 169 | ax.set_thetagrids(angles * 180/np.pi, labels) 170 | ax.set_rgrids(np.arange(1, data.shape[0] + 1, 1)) 171 | ax.grid(True, linestyle = '--') 172 | ax.set_axisbelow(True) 173 | # plt.legend(data.columns, bbox_to_anchor=(1.0, 0.95, 0.4, 0.2), loc='upper left') 174 | if data.shape[1] % 2 == 0: 175 | ncol = data.shape[1] // 2 176 | else: 177 | ncol = data.shape[1] // 2 + 1 178 | # plt.legend(data.columns, bbox_to_anchor=(-0.1, 1.1, 1.2, .102), loc='lower left', 179 | # ncol = ncol, mode="expand", borderaxespad=0., edgecolor = 'black', title = 'Weighting methods', fontsize = 12) 180 | plt.legend(data.columns, bbox_to_anchor=(-0.1, 1.1, 1.2, .102), loc='lower left', 181 | ncol = ncol, mode="expand", borderaxespad=0., edgecolor = 'black', title = 'Weighting methods', fontsize = 12) 182 | # plt.title('VIKOR Rankings with different weighting methods') 183 | # ax.set_title('VIKOR Rankings with different weighting methods', y=1.39) 184 | plt.tight_layout() 185 | plt.savefig('results_update/radar.pdf') 186 | plt.savefig('results_update/radar.eps') 187 | plt.show() 188 | 189 | 190 | # plot radar chart 191 | def plot_radar_weights(data): 192 | """ 193 | Visualization method to display weights values obtained with different weighing methods 194 | on the radar chart. 195 | 196 | Parameters 197 | ----------- 198 | data : DataFrame 199 | DataFrame containing weights obtained with different weighting 200 | methods. The particular weights are contained in subsequent columns of DataFrame. 201 | 202 | Examples 203 | ---------- 204 | >>> plot_radar_weights(data) 205 | """ 206 | 207 | fig=plt.figure() 208 | ax = fig.add_subplot(111, polar = True) 209 | 210 | for col in list(data.columns): 211 | labels=np.array(list(data.index)) 212 | stats = data.loc[labels, col].values 213 | 214 | angles=np.linspace(0, 2*np.pi, len(labels), endpoint=False) 215 | # close the plot 216 | stats=np.concatenate((stats,[stats[0]])) 217 | angles=np.concatenate((angles,[angles[0]])) 218 | 219 | lista = list(data.index) 220 | lista.append(data.index[0]) 221 | labels=np.array(lista) 222 | 223 | ax.plot(angles, stats, linewidth=2) 224 | # ax.fill(angles, stats, label='_nolegend_', alpha=0.5) 225 | 226 | ax.set_axisbelow(True) 227 | ax.grid(True, linestyle='--') 228 | ax.set_rgrids(np.round(np.linspace(0, np.max(stats) + 0.05, 5), 2)) 229 | ax.set_thetagrids(angles * 180/np.pi, labels) 230 | 231 | 232 | # plt.legend(data.columns, bbox_to_anchor=(1.0, 0.95, 0.4, 0.2), loc='upper left') 233 | if data.shape[1] % 2 == 0: 234 | ncol = data.shape[1] // 2 235 | else: 236 | ncol = data.shape[1] // 2 + 1 237 | plt.legend(data.columns, bbox_to_anchor=(-0.1, 1.1, 1.2, .102), loc='lower left', 238 | ncol = ncol, mode="expand", borderaxespad=0., edgecolor = 'black', title = 'Weighting methods', fontsize = 12) 239 | plt.tight_layout() 240 | plt.savefig('results_update/radar_weights.pdf') 241 | plt.savefig('results_update/radar_weights.eps') 242 | plt.show() 243 | 244 | 245 | # heat maps with correlations 246 | def draw_heatmap(df_new_heatmap, title): 247 | """ 248 | Visualization method to display heatmap with correlations of compared rankings generated using different methods 249 | 250 | Parameters 251 | ---------- 252 | data : DataFrame 253 | DataFrame with correlation values between compared rankings 254 | 255 | Examples 256 | --------- 257 | >>> draw_heatmap(df_new_heatmap) 258 | """ 259 | plt.figure(figsize = (8, 5)) 260 | sns.set(font_scale = 1.2) 261 | heatmap = sns.heatmap(df_new_heatmap, annot=True, fmt=".4f", cmap="PiYG", 262 | linewidth=0.5, linecolor='w') 263 | plt.yticks(va="center") 264 | plt.yticks(va="center") 265 | plt.xlabel('Weighting methods') 266 | plt.ylabel('Weighting methods') 267 | plt.title('Correlation coefficient: ' + title) 268 | plt.tight_layout() 269 | plt.savefig('results_update/heatmap.pdf') 270 | plt.savefig('results_update/heatmap.eps') 271 | plt.show() 272 | 273 | 274 | # Create dictionary class 275 | class Create_dictionary(dict): 276 | 277 | # __init__ function 278 | def __init__(self): 279 | self = dict() 280 | 281 | # Function to add key:value 282 | def add(self, key, value): 283 | self[key] = value 284 | 285 | 286 | # main 287 | def main(): 288 | 289 | # load data of localisations 290 | data = pd.read_csv('./dataset_localisations.csv', index_col='Symbol') 291 | 292 | df_data = data.iloc[:len(data) - 1, :] 293 | types = data.iloc[len(data) - 1, :].to_numpy() 294 | matrix = df_data.to_numpy() 295 | 296 | 297 | # dataframe for weights 298 | cols = [r'$C_{' + str(j) + '}$' for j in range(1, matrix.shape[1] + 1)] 299 | df_weights = pd.DataFrame(index = cols) 300 | 301 | # AHP weighting 302 | # matrix with criteria pairwise comparison 303 | PCcriteria_ahp = np.array([ 304 | [1, 2, 2, 2, 6, 2, 1, 1, 1/2, 1/2], 305 | [1/2, 1, 1, 1, 4, 1, 1, 1, 1/2, 1/2], 306 | [1/2, 1, 1, 1, 3, 1, 1/2, 1/2, 1/3, 1/3], 307 | [1/2, 1, 1, 1, 3, 1, 1/2, 1/2, 1/3, 1/3], 308 | [1/6, 1/4, 1/3, 1/3, 1, 1/3, 1/5, 1/5, 1/9, 1/9], 309 | [1/2, 1, 1, 1, 3, 1, 1/2, 1/2, 1/3, 1/3], 310 | [1, 1, 2, 2, 5, 2, 1, 1, 1/2, 1/2], 311 | [1, 1, 2, 2, 5, 2, 1, 1, 1/2, 1/2], 312 | [2, 2, 3, 3, 9, 3, 2, 2, 1, 1], 313 | [2, 2, 3, 3, 9, 3, 2, 2, 1, 1] 314 | ]) 315 | 316 | 317 | crit_ahp = pd.DataFrame(PCcriteria_ahp) 318 | crit_ahp.to_csv('./results_update/crit_ahp.csv') 319 | 320 | ahp_weighting = mcda_weights.AHP_WEIGHTING() 321 | weights_ahp = ahp_weighting(X = PCcriteria_ahp, compute_priority_vector_method=ahp_weighting._eigenvector) 322 | df_weights['AHP'] = weights_ahp 323 | 324 | # SWARA 325 | criteria_indexes = np.array([8, 9, 0, 6, 7, 1, 2, 3, 5, 4]) 326 | s = np.array([0, 0.4, 0.17, 0, 0.2, 0.25, 0, 0, 0.67]) 327 | 328 | weights_swara = mcda_weights.swara_weighting(criteria_indexes, s) 329 | 330 | df_weights['SWARA'] = weights_swara 331 | 332 | # weights LBWA 333 | # criteria_indexes, criteria_values_I 334 | criteria_indexes = [ 335 | [8, 9, 0], 336 | [6, 7, 1], 337 | [2, 3, 5], 338 | [], 339 | [], 340 | [], 341 | [], 342 | [], 343 | [], 344 | [4] 345 | ] 346 | 347 | criteria_values_I = [ 348 | [0, 0, 2], 349 | [1, 1, 2], 350 | [1, 1, 1], 351 | [], 352 | [], 353 | [], 354 | [], 355 | [], 356 | [], 357 | [3] 358 | ] 359 | 360 | weights_lbwa = mcda_weights.lbwa_weighting(criteria_indexes, criteria_values_I) 361 | 362 | df_weights['LBWA'] = weights_lbwa 363 | 364 | # SAPEVO 365 | PCcriteria_sapevo = np.array([ 366 | [ 0, 1, 1, 1, 2, 1, 0, 0, -1, -1], 367 | [-1, 0, 0, 0, 1, 0, 0, 0, -1, -1], 368 | [-1, 0, 0, 0, 1, 0, -1, -1, -1, -1], 369 | [-1, 0, 0, 0, 1, 0, -1, -1, -1, -1], 370 | [-2, -1, -1, -1, 0, -1, -2, -2, -3, -3], 371 | [-1, 0, 0, 0, 1, 0, -1, -1, -1, -1], 372 | [ 0, 0, 1, 1, 2, 1, 0, 0, -1, -1], 373 | [ 0, 0, 1, 1, 2, 1, 0, 0, -1, -1], 374 | [ 1, 1, 1, 1, 3, 1, 1, 1, 0, 0], 375 | [ 1, 1, 1, 1, 3, 1, 1, 1, 0, 0] 376 | ]) 377 | 378 | crit_sapevo = pd.DataFrame(PCcriteria_sapevo) 379 | crit_sapevo.to_csv('./results_update/crit_sapevo.csv') 380 | 381 | weights_sapevo = mcda_weights.sapevo_weighting(PCcriteria_sapevo) 382 | 383 | df_weights['SAPEVO'] = weights_sapevo 384 | 385 | df_weights.to_csv('./results_update/df_weights.csv') 386 | 387 | plot_boxplot(df_weights.T) 388 | plot_barplot_stacked(df_weights.T, stacked = True) 389 | plot_barplot_stacked(df_weights.T, stacked = False) 390 | plot_radar_weights(df_weights) 391 | 392 | weighting_methods_names = ['AHP', 'SWARA', 'LBWA', 'SAPEVO'] 393 | weights_list = [weights_ahp, weights_swara, weights_lbwa, weights_sapevo] 394 | 395 | # MCDA assessment 396 | # dataframe for alternatives 397 | alts = [r'$A_{' + str(j) + '}$' for j in range(1, matrix.shape[0] + 1)] 398 | df_prefs = pd.DataFrame(index = alts) 399 | df_ranks = pd.DataFrame(index = alts) 400 | 401 | vikor = VIKOR() 402 | for el, weights in enumerate(weights_list): 403 | pref = vikor(matrix, weights, types) 404 | rank = rank_preferences(pref, reverse=False) 405 | 406 | df_prefs[weighting_methods_names[el]] = pref 407 | df_ranks[weighting_methods_names[el]] = rank 408 | 409 | plot_radar(df_ranks) 410 | plot_barplot(df_ranks) 411 | 412 | df_prefs.to_csv('./results_update/df_prefs.csv') 413 | df_ranks.to_csv('./results_update/df_ranks.csv') 414 | 415 | 416 | # Rankings correlations 417 | results = copy.deepcopy(df_ranks) 418 | method_types = list(results.columns) 419 | dict_new_heatmap_rw = Create_dictionary() 420 | 421 | for el in method_types: 422 | dict_new_heatmap_rw.add(el, []) 423 | 424 | # heatmaps for correlations coefficients 425 | for i, j in [(i, j) for i in method_types[::-1] for j in method_types]: 426 | dict_new_heatmap_rw[j].append(corrs.weighted_spearman(results[i], results[j])) 427 | 428 | df_new_heatmap_rw = pd.DataFrame(dict_new_heatmap_rw, index = method_types[::-1]) 429 | df_new_heatmap_rw.columns = method_types 430 | 431 | # correlation matrix with rw coefficient 432 | draw_heatmap(df_new_heatmap_rw, r'$r_w$') 433 | 434 | 435 | 436 | if __name__ == '__main__': 437 | main() -------------------------------------------------------------------------------- /examples/results/bar_chart_weights_Alternative.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results/bar_chart_weights_Alternative.pdf -------------------------------------------------------------------------------- /examples/results/bar_chart_weights_Alternatives.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results/bar_chart_weights_Alternatives.pdf -------------------------------------------------------------------------------- /examples/results/bar_chart_weights_Weighting methods.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results/bar_chart_weights_Weighting methods.pdf -------------------------------------------------------------------------------- /examples/results/boxplot_weights.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results/boxplot_weights.pdf -------------------------------------------------------------------------------- /examples/results/heatmap_smaa.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results/heatmap_smaa.pdf -------------------------------------------------------------------------------- /examples/results/heatmap_weights.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results/heatmap_weights.pdf -------------------------------------------------------------------------------- /examples/results/rankings.csv: -------------------------------------------------------------------------------- 1 | ,ENTROPY,CRITIC,GINI,MEREC,STAT VAR,IDOCRIW,ANGLE,COEFF VAR 2 | $A_{1}$,1,2,1,1,2,1,1,1 3 | $A_{2}$,2,1,2,2,1,2,3,3 4 | $A_{3}$,4,4,4,4,5,4,4,4 5 | $A_{4}$,5,5,5,5,10,5,6,5 6 | $A_{5}$,7,6,7,6,9,6,7,7 7 | $A_{6}$,10,10,12,9,12,9,12,12 8 | $A_{7}$,9,12,10,10,11,10,9,9 9 | $A_{8}$,8,11,8,12,4,12,8,8 10 | $A_{9}$,3,3,3,3,3,3,2,2 11 | $A_{10}$,11,8,9,7,8,8,10,10 12 | $A_{11}$,6,9,6,11,6,11,5,6 13 | $A_{12}$,12,7,11,8,7,7,11,11 14 | -------------------------------------------------------------------------------- /examples/results_smaa/ai.csv: -------------------------------------------------------------------------------- 1 | ,1,2,3,4,5,6,7,8,9,10,11,12 2 | $A_{1}$,0.2336,0.2566,0.1847,0.1303,0.0493,0.0531,0.0236,0.0486,0.0164,0.0038,0.0,0.0 3 | $A_{2}$,0.2218,0.3457,0.221,0.1212,0.0454,0.0381,0.0068,0.0,0.0,0.0,0.0,0.0 4 | $A_{3}$,0.0,0.0089,0.0248,0.0715,0.2903,0.1403,0.158,0.1388,0.1588,0.0086,0.0,0.0 5 | $A_{4}$,0.1139,0.0692,0.0742,0.1356,0.1642,0.2322,0.0794,0.0362,0.032,0.0327,0.0304,0.0 6 | $A_{5}$,0.0004,0.0114,0.0144,0.0219,0.0805,0.1024,0.2563,0.1411,0.1273,0.2234,0.0209,0.0 7 | $A_{6}$,0.0,0.0003,0.01,0.0507,0.0268,0.0368,0.1308,0.1109,0.1573,0.1661,0.1325,0.1778 8 | $A_{7}$,0.0,0.0,0.0001,0.0015,0.0061,0.0303,0.0314,0.0309,0.0867,0.0807,0.0889,0.6434 9 | $A_{8}$,0.0,0.0016,0.0008,0.0056,0.0618,0.0422,0.0547,0.1433,0.0819,0.124,0.4436,0.0405 10 | $A_{9}$,0.3804,0.104,0.2814,0.0415,0.0317,0.0284,0.0224,0.0185,0.0671,0.0246,0.0,0.0 11 | $A_{10}$,0.0093,0.0432,0.0737,0.0678,0.0826,0.1671,0.0967,0.0758,0.1391,0.1384,0.0944,0.0119 12 | $A_{11}$,0.0,0.1054,0.0779,0.2954,0.0692,0.0521,0.0577,0.0902,0.0272,0.0785,0.1065,0.0399 13 | $A_{12}$,0.0406,0.0537,0.037,0.057,0.0921,0.077,0.0822,0.1657,0.1062,0.1192,0.0828,0.0865 14 | -------------------------------------------------------------------------------- /examples/results_smaa/cw.csv: -------------------------------------------------------------------------------- 1 | ,$C_{1}$,$C_{2}$,$C_{3}$,$C_{4}$,$C_{5}$,$C_{6}$,$C_{7}$,$C_{8}$,$C_{9}$,$C_{10}$,$C_{11}$ 2 | $A_{1}$,0.08149258780275177,0.06923063446544006,0.16539466341358622,0.12398888021873616,0.12537637958922696,0.07419473303809192,0.07076788905601401,0.07737366929434679,0.057474302412452084,0.05356369500362399,0.10114256570573006 3 | $A_{2}$,0.12038105475979202,0.08764048230128121,0.07660281840778414,0.12574607311204458,0.05720307945243896,0.0806882775550099,0.07950860005600449,0.08105271808792865,0.0710728509937187,0.11094341250709619,0.10916063276690109 4 | $A_{3}$,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 5 | $A_{4}$,0.0439411412269504,0.08181732204310924,0.06502948711785592,0.055917552901565096,0.0669235595112236,0.08677543507793362,0.08937944176936635,0.08104627822773469,0.08204172312814885,0.2125638835839456,0.13456417541216673 6 | $A_{5}$,0.02021529457604862,0.31188130734918884,0.009518676427058882,0.018356351073732936,0.08827669666757966,0.04085282361090218,0.028546145769598887,0.0171898576280174,0.04566248450317305,0.1983866279599678,0.22111373443473167 7 | $A_{6}$,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 8 | $A_{7}$,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 9 | $A_{8}$,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 10 | $A_{9}$,0.09491701707030739,0.11506937488072184,0.0659536455545521,0.06333551707606894,0.10674522319496656,0.10423651858738235,0.09943361654361814,0.10459886736900796,0.13114497030934774,0.055236384477609535,0.0593288649364174 11 | $A_{10}$,0.05193981102376379,0.027417790032211863,0.033811874056840406,0.1551683897499285,0.03657905048290103,0.07601885641611668,0.13215871190988152,0.07595219335122966,0.07713431266489167,0.23319626629619206,0.10062274401604297 12 | $A_{11}$,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0 13 | $A_{12}$,0.07842307502671954,0.03762886470882343,0.04228020215736445,0.0465191327543591,0.041440877254519205,0.15055304666635394,0.14743620828303325,0.14598460019708615,0.03919085700588104,0.15319439910101526,0.11734873684484456 14 | -------------------------------------------------------------------------------- /examples/results_smaa/fr.csv: -------------------------------------------------------------------------------- 1 | ,Rank 2 | $A_{1}$,3 3 | $A_{2}$,1 4 | $A_{3}$,6 5 | $A_{4}$,4 6 | $A_{5}$,9 7 | $A_{6}$,10 8 | $A_{7}$,12 9 | $A_{8}$,11 10 | $A_{9}$,2 11 | $A_{10}$,7 12 | $A_{11}$,5 13 | $A_{12}$,8 14 | -------------------------------------------------------------------------------- /examples/results_update/barplot_rankings.eps: -------------------------------------------------------------------------------- 1 | %!PS-Adobe-3.0 EPSF-3.0 2 | %%Title: barplot_rankings.eps 3 | %%Creator: Matplotlib v3.7.1, https://matplotlib.org/ 4 | %%CreationDate: Thu Sep 21 16:02:05 2023 5 | %%Orientation: portrait 6 | %%BoundingBox: -18 252 630 540 7 | %%HiResBoundingBox: -18.000000 252.000000 630.000000 540.000000 8 | %%EndComments 9 | %%BeginProlog 10 | /mpldict 12 dict def 11 | mpldict begin 12 | /_d { bind def } bind def 13 | /m { moveto } _d 14 | /l { lineto } _d 15 | /r { rlineto } _d 16 | /c { curveto } _d 17 | /cl { closepath } _d 18 | /ce { closepath eofill } _d 19 | /box { 20 | m 21 | 1 index 0 r 22 | 0 exch r 23 | neg 0 r 24 | cl 25 | } _d 26 | /clipbox { 27 | box 28 | clip 29 | newpath 30 | } _d 31 | /sc { setcachedevice } _d 32 | %!PS-Adobe-3.0 Resource-Font 33 | %%Creator: Converted from TrueType to Type 3 by Matplotlib. 34 | 10 dict begin 35 | /FontName /DejaVuSans-Oblique def 36 | /PaintType 0 def 37 | /FontMatrix [0.00048828125 0 0 0.00048828125 0 0] def 38 | /FontBBox [-2080 -717 3398 2187] def 39 | /FontType 3 def 40 | /Encoding [/A] def 41 | /CharStrings 2 dict dup begin 42 | /.notdef 0 def 43 | /A{1401 0 -109 0 1260 1493 sc 44 | 754 1493 m 45 | 983 1493 l 46 | 1260 0 l 47 | 1049 0 l 48 | 987 383 l 49 | 315 383 l 50 | 104 0 l 51 | -109 0 l 52 | 754 1493 l 53 | 54 | 827 1294 m 55 | 408 551 l 56 | 956 551 l 57 | 827 1294 l 58 | 59 | ce} _d 60 | end readonly def 61 | 62 | /BuildGlyph { 63 | exch begin 64 | CharStrings exch 65 | 2 copy known not {pop /.notdef} if 66 | true 3 1 roll get exec 67 | end 68 | } _d 69 | 70 | /BuildChar { 71 | 1 index /Encoding get exch get 72 | 1 index /BuildGlyph get exec 73 | } _d 74 | 75 | FontName currentdict end definefont pop 76 | %!PS-Adobe-3.0 Resource-Font 77 | %%Creator: Converted from TrueType to Type 3 by Matplotlib. 78 | 10 dict begin 79 | /FontName /DejaVuSans def 80 | /PaintType 0 def 81 | /FontMatrix [0.00048828125 0 0 0.00048828125 0 0] def 82 | /FontBBox [-2090 -948 3673 2524] def 83 | /FontType 3 def 84 | /Encoding [/space /one /two /three /four /A /B /E /H /L /O /P /R /S /V /W /a /d /e /g /h /i /k /l /m /n /o /r /s /t /v] def 85 | /CharStrings 32 dict dup begin 86 | /.notdef 0 def 87 | /space{651 0 0 0 0 0 sc 88 | ce} _d 89 | /one{1303 0 225 0 1114 1493 sc 90 | 254 170 m 91 | 584 170 l 92 | 584 1309 l 93 | 225 1237 l 94 | 225 1421 l 95 | 582 1493 l 96 | 784 1493 l 97 | 784 170 l 98 | 1114 170 l 99 | 1114 0 l 100 | 254 0 l 101 | 254 170 l 102 | 103 | ce} _d 104 | /two{1303 0 150 0 1098 1520 sc 105 | 393 170 m 106 | 1098 170 l 107 | 1098 0 l 108 | 150 0 l 109 | 150 170 l 110 | 227 249 331 356 463 489 c 111 | 596 623 679 709 713 748 c 112 | 778 821 823 882 848 932 c 113 | 874 983 887 1032 887 1081 c 114 | 887 1160 859 1225 803 1275 c 115 | 748 1325 675 1350 586 1350 c 116 | 523 1350 456 1339 385 1317 c 117 | 315 1295 240 1262 160 1217 c 118 | 160 1421 l 119 | 241 1454 317 1478 388 1495 c 120 | 459 1512 523 1520 582 1520 c 121 | 737 1520 860 1481 952 1404 c 122 | 1044 1327 1090 1223 1090 1094 c 123 | 1090 1033 1078 974 1055 919 c 124 | 1032 864 991 800 930 725 c 125 | 913 706 860 650 771 557 c 126 | 682 465 556 336 393 170 c 127 | 128 | ce} _d 129 | /three{1303 0 156 -29 1139 1520 sc 130 | 831 805 m 131 | 928 784 1003 741 1057 676 c 132 | 1112 611 1139 530 1139 434 c 133 | 1139 287 1088 173 987 92 c 134 | 886 11 742 -29 555 -29 c 135 | 492 -29 428 -23 361 -10 c 136 | 295 2 227 20 156 45 c 137 | 156 240 l 138 | 212 207 273 183 340 166 c 139 | 407 149 476 141 549 141 c 140 | 676 141 772 166 838 216 c 141 | 905 266 938 339 938 434 c 142 | 938 522 907 591 845 640 c 143 | 784 690 698 715 588 715 c 144 | 414 715 l 145 | 414 881 l 146 | 596 881 l 147 | 695 881 771 901 824 940 c 148 | 877 980 903 1037 903 1112 c 149 | 903 1189 876 1247 821 1288 c 150 | 767 1329 689 1350 588 1350 c 151 | 533 1350 473 1344 410 1332 c 152 | 347 1320 277 1301 201 1276 c 153 | 201 1456 l 154 | 278 1477 349 1493 416 1504 c 155 | 483 1515 547 1520 606 1520 c 156 | 759 1520 881 1485 970 1415 c 157 | 1059 1346 1104 1252 1104 1133 c 158 | 1104 1050 1080 980 1033 923 c 159 | 986 866 918 827 831 805 c 160 | 161 | ce} _d 162 | /four{1303 0 100 0 1188 1493 sc 163 | 774 1317 m 164 | 264 520 l 165 | 774 520 l 166 | 774 1317 l 167 | 168 | 721 1493 m 169 | 975 1493 l 170 | 975 520 l 171 | 1188 520 l 172 | 1188 352 l 173 | 975 352 l 174 | 975 0 l 175 | 774 0 l 176 | 774 352 l 177 | 100 352 l 178 | 100 547 l 179 | 721 1493 l 180 | 181 | ce} _d 182 | /A{1401 0 16 0 1384 1493 sc 183 | 700 1294 m 184 | 426 551 l 185 | 975 551 l 186 | 700 1294 l 187 | 188 | 586 1493 m 189 | 815 1493 l 190 | 1384 0 l 191 | 1174 0 l 192 | 1038 383 l 193 | 365 383 l 194 | 229 0 l 195 | 16 0 l 196 | 586 1493 l 197 | 198 | ce} _d 199 | /B{1405 0 201 0 1260 1493 sc 200 | 403 713 m 201 | 403 166 l 202 | 727 166 l 203 | 836 166 916 188 968 233 c 204 | 1021 278 1047 347 1047 440 c 205 | 1047 533 1021 602 968 646 c 206 | 916 691 836 713 727 713 c 207 | 403 713 l 208 | 209 | 403 1327 m 210 | 403 877 l 211 | 702 877 l 212 | 801 877 874 895 922 932 c 213 | 971 969 995 1026 995 1102 c 214 | 995 1177 971 1234 922 1271 c 215 | 874 1308 801 1327 702 1327 c 216 | 403 1327 l 217 | 218 | 201 1493 m 219 | 717 1493 l 220 | 871 1493 990 1461 1073 1397 c 221 | 1156 1333 1198 1242 1198 1124 c 222 | 1198 1033 1177 960 1134 906 c 223 | 1091 852 1029 818 946 805 c 224 | 1045 784 1122 739 1177 671 c 225 | 1232 604 1260 519 1260 418 c 226 | 1260 285 1215 182 1124 109 c 227 | 1033 36 904 0 737 0 c 228 | 201 0 l 229 | 201 1493 l 230 | 231 | ce} _d 232 | /E{1294 0 201 0 1163 1493 sc 233 | 201 1493 m 234 | 1145 1493 l 235 | 1145 1323 l 236 | 403 1323 l 237 | 403 881 l 238 | 1114 881 l 239 | 1114 711 l 240 | 403 711 l 241 | 403 170 l 242 | 1163 170 l 243 | 1163 0 l 244 | 201 0 l 245 | 201 1493 l 246 | 247 | ce} _d 248 | /H{1540 0 201 0 1339 1493 sc 249 | 201 1493 m 250 | 403 1493 l 251 | 403 881 l 252 | 1137 881 l 253 | 1137 1493 l 254 | 1339 1493 l 255 | 1339 0 l 256 | 1137 0 l 257 | 1137 711 l 258 | 403 711 l 259 | 403 0 l 260 | 201 0 l 261 | 201 1493 l 262 | 263 | ce} _d 264 | /L{1141 0 201 0 1130 1493 sc 265 | 201 1493 m 266 | 403 1493 l 267 | 403 170 l 268 | 1130 170 l 269 | 1130 0 l 270 | 201 0 l 271 | 201 1493 l 272 | 273 | ce} _d 274 | /O{1612 0 115 -29 1497 1520 sc 275 | 807 1356 m 276 | 660 1356 544 1301 457 1192 c 277 | 371 1083 328 934 328 745 c 278 | 328 557 371 408 457 299 c 279 | 544 190 660 135 807 135 c 280 | 954 135 1070 190 1155 299 c 281 | 1241 408 1284 557 1284 745 c 282 | 1284 934 1241 1083 1155 1192 c 283 | 1070 1301 954 1356 807 1356 c 284 | 285 | 807 1520 m 286 | 1016 1520 1184 1450 1309 1309 c 287 | 1434 1169 1497 981 1497 745 c 288 | 1497 510 1434 322 1309 181 c 289 | 1184 41 1016 -29 807 -29 c 290 | 597 -29 429 41 303 181 c 291 | 178 321 115 509 115 745 c 292 | 115 981 178 1169 303 1309 c 293 | 429 1450 597 1520 807 1520 c 294 | 295 | ce} _d 296 | /P{1235 0 201 0 1165 1493 sc 297 | 403 1327 m 298 | 403 766 l 299 | 657 766 l 300 | 751 766 824 790 875 839 c 301 | 926 888 952 957 952 1047 c 302 | 952 1136 926 1205 875 1254 c 303 | 824 1303 751 1327 657 1327 c 304 | 403 1327 l 305 | 306 | 201 1493 m 307 | 657 1493 l 308 | 824 1493 951 1455 1036 1379 c 309 | 1122 1304 1165 1193 1165 1047 c 310 | 1165 900 1122 788 1036 713 c 311 | 951 638 824 600 657 600 c 312 | 403 600 l 313 | 403 0 l 314 | 201 0 l 315 | 201 1493 l 316 | 317 | ce} _d 318 | /R{1423 0 201 0 1364 1493 sc 319 | 909 700 m 320 | 952 685 994 654 1035 606 c 321 | 1076 558 1118 492 1159 408 c 322 | 1364 0 l 323 | 1147 0 l 324 | 956 383 l 325 | 907 483 859 549 812 582 c 326 | 766 615 703 631 623 631 c 327 | 403 631 l 328 | 403 0 l 329 | 201 0 l 330 | 201 1493 l 331 | 657 1493 l 332 | 828 1493 955 1457 1039 1386 c 333 | 1123 1315 1165 1207 1165 1063 c 334 | 1165 969 1143 891 1099 829 c 335 | 1056 767 992 724 909 700 c 336 | 337 | 403 1327 m 338 | 403 797 l 339 | 657 797 l 340 | 754 797 828 819 877 864 c 341 | 927 909 952 976 952 1063 c 342 | 952 1150 927 1216 877 1260 c 343 | 828 1305 754 1327 657 1327 c 344 | 403 1327 l 345 | 346 | ce} _d 347 | /S{1300 0 135 -29 1186 1520 sc 348 | 1096 1444 m 349 | 1096 1247 l 350 | 1019 1284 947 1311 879 1329 c 351 | 811 1347 745 1356 682 1356 c 352 | 572 1356 487 1335 427 1292 c 353 | 368 1249 338 1189 338 1110 c 354 | 338 1044 358 994 397 960 c 355 | 437 927 512 900 623 879 c 356 | 745 854 l 357 | 896 825 1007 775 1078 702 c 358 | 1150 630 1186 533 1186 412 c 359 | 1186 267 1137 158 1040 83 c 360 | 943 8 801 -29 614 -29 c 361 | 543 -29 468 -21 388 -5 c 362 | 309 11 226 35 141 66 c 363 | 141 274 l 364 | 223 228 303 193 382 170 c 365 | 461 147 538 135 614 135 c 366 | 729 135 818 158 881 203 c 367 | 944 248 975 313 975 397 c 368 | 975 470 952 528 907 569 c 369 | 862 610 789 641 686 662 c 370 | 563 686 l 371 | 412 716 303 763 236 827 c 372 | 169 891 135 980 135 1094 c 373 | 135 1226 181 1330 274 1406 c 374 | 367 1482 496 1520 659 1520 c 375 | 729 1520 800 1514 873 1501 c 376 | 946 1488 1020 1469 1096 1444 c 377 | 378 | ce} _d 379 | /V{1401 0 16 0 1384 1493 sc 380 | 586 0 m 381 | 16 1493 l 382 | 227 1493 l 383 | 700 236 l 384 | 1174 1493 l 385 | 1384 1493 l 386 | 815 0 l 387 | 586 0 l 388 | 389 | ce} _d 390 | /W{2025 0 68 0 1958 1493 sc 391 | 68 1493 m 392 | 272 1493 l 393 | 586 231 l 394 | 899 1493 l 395 | 1126 1493 l 396 | 1440 231 l 397 | 1753 1493 l 398 | 1958 1493 l 399 | 1583 0 l 400 | 1329 0 l 401 | 1014 1296 l 402 | 696 0 l 403 | 442 0 l 404 | 68 1493 l 405 | 406 | ce} _d 407 | /a{1255 0 123 -29 1069 1147 sc 408 | 702 563 m 409 | 553 563 450 546 393 512 c 410 | 336 478 307 420 307 338 c 411 | 307 273 328 221 371 182 c 412 | 414 144 473 125 547 125 c 413 | 649 125 731 161 792 233 c 414 | 854 306 885 402 885 522 c 415 | 885 563 l 416 | 702 563 l 417 | 418 | 1069 639 m 419 | 1069 0 l 420 | 885 0 l 421 | 885 170 l 422 | 843 102 791 52 728 19 c 423 | 665 -13 589 -29 498 -29 c 424 | 383 -29 292 3 224 67 c 425 | 157 132 123 218 123 326 c 426 | 123 452 165 547 249 611 c 427 | 334 675 460 707 627 707 c 428 | 885 707 l 429 | 885 725 l 430 | 885 810 857 875 801 921 c 431 | 746 968 668 991 567 991 c 432 | 503 991 441 983 380 968 c 433 | 319 953 261 930 205 899 c 434 | 205 1069 l 435 | 272 1095 338 1114 401 1127 c 436 | 464 1140 526 1147 586 1147 c 437 | 748 1147 869 1105 949 1021 c 438 | 1029 937 1069 810 1069 639 c 439 | 440 | ce} _d 441 | /d{1300 0 113 -29 1114 1556 sc 442 | 930 950 m 443 | 930 1556 l 444 | 1114 1556 l 445 | 1114 0 l 446 | 930 0 l 447 | 930 168 l 448 | 891 101 842 52 783 19 c 449 | 724 -13 654 -29 571 -29 c 450 | 436 -29 325 25 240 133 c 451 | 155 241 113 383 113 559 c 452 | 113 735 155 877 240 985 c 453 | 325 1093 436 1147 571 1147 c 454 | 654 1147 724 1131 783 1098 c 455 | 842 1066 891 1017 930 950 c 456 | 457 | 303 559 m 458 | 303 424 331 317 386 240 c 459 | 442 163 519 125 616 125 c 460 | 713 125 790 163 846 240 c 461 | 902 317 930 424 930 559 c 462 | 930 694 902 800 846 877 c 463 | 790 954 713 993 616 993 c 464 | 519 993 442 954 386 877 c 465 | 331 800 303 694 303 559 c 466 | 467 | ce} _d 468 | /e{1260 0 113 -29 1151 1147 sc 469 | 1151 606 m 470 | 1151 516 l 471 | 305 516 l 472 | 313 389 351 293 419 226 c 473 | 488 160 583 127 705 127 c 474 | 776 127 844 136 910 153 c 475 | 977 170 1043 196 1108 231 c 476 | 1108 57 l 477 | 1042 29 974 8 905 -7 c 478 | 836 -22 765 -29 694 -29 c 479 | 515 -29 374 23 269 127 c 480 | 165 231 113 372 113 549 c 481 | 113 732 162 878 261 985 c 482 | 360 1093 494 1147 662 1147 c 483 | 813 1147 932 1098 1019 1001 c 484 | 1107 904 1151 773 1151 606 c 485 | 486 | 967 660 m 487 | 966 761 937 841 882 901 c 488 | 827 961 755 991 664 991 c 489 | 561 991 479 962 417 904 c 490 | 356 846 320 764 311 659 c 491 | 967 660 l 492 | 493 | ce} _d 494 | /g{1300 0 113 -426 1114 1147 sc 495 | 930 573 m 496 | 930 706 902 810 847 883 c 497 | 792 956 715 993 616 993 c 498 | 517 993 440 956 385 883 c 499 | 330 810 303 706 303 573 c 500 | 303 440 330 337 385 264 c 501 | 440 191 517 154 616 154 c 502 | 715 154 792 191 847 264 c 503 | 902 337 930 440 930 573 c 504 | 505 | 1114 139 m 506 | 1114 -52 1072 -193 987 -286 c 507 | 902 -379 773 -426 598 -426 c 508 | 533 -426 472 -421 415 -411 c 509 | 358 -402 302 -387 248 -367 c 510 | 248 -188 l 511 | 302 -217 355 -239 408 -253 c 512 | 461 -267 514 -274 569 -274 c 513 | 690 -274 780 -242 840 -179 c 514 | 900 -116 930 -21 930 106 c 515 | 930 197 l 516 | 892 131 843 82 784 49 c 517 | 725 16 654 0 571 0 c 518 | 434 0 323 52 239 157 c 519 | 155 262 113 400 113 573 c 520 | 113 746 155 885 239 990 c 521 | 323 1095 434 1147 571 1147 c 522 | 654 1147 725 1131 784 1098 c 523 | 843 1065 892 1016 930 950 c 524 | 930 1120 l 525 | 1114 1120 l 526 | 1114 139 l 527 | 528 | ce} _d 529 | /h{1298 0 186 0 1124 1556 sc 530 | 1124 676 m 531 | 1124 0 l 532 | 940 0 l 533 | 940 670 l 534 | 940 776 919 855 878 908 c 535 | 837 961 775 987 692 987 c 536 | 593 987 514 955 457 892 c 537 | 400 829 371 742 371 633 c 538 | 371 0 l 539 | 186 0 l 540 | 186 1556 l 541 | 371 1556 l 542 | 371 946 l 543 | 415 1013 467 1064 526 1097 c 544 | 586 1130 655 1147 733 1147 c 545 | 862 1147 959 1107 1025 1027 c 546 | 1091 948 1124 831 1124 676 c 547 | 548 | ce} _d 549 | /i{569 0 193 0 377 1556 sc 550 | 193 1120 m 551 | 377 1120 l 552 | 377 0 l 553 | 193 0 l 554 | 193 1120 l 555 | 556 | 193 1556 m 557 | 377 1556 l 558 | 377 1323 l 559 | 193 1323 l 560 | 193 1556 l 561 | 562 | ce} _d 563 | /k{1186 0 186 0 1180 1556 sc 564 | 186 1556 m 565 | 371 1556 l 566 | 371 637 l 567 | 920 1120 l 568 | 1155 1120 l 569 | 561 596 l 570 | 1180 0 l 571 | 940 0 l 572 | 371 547 l 573 | 371 0 l 574 | 186 0 l 575 | 186 1556 l 576 | 577 | ce} _d 578 | /l{569 0 193 0 377 1556 sc 579 | 193 1556 m 580 | 377 1556 l 581 | 377 0 l 582 | 193 0 l 583 | 193 1556 l 584 | 585 | ce} _d 586 | /m{1995 0 186 0 1821 1147 sc 587 | 1065 905 m 588 | 1111 988 1166 1049 1230 1088 c 589 | 1294 1127 1369 1147 1456 1147 c 590 | 1573 1147 1663 1106 1726 1024 c 591 | 1789 943 1821 827 1821 676 c 592 | 1821 0 l 593 | 1636 0 l 594 | 1636 670 l 595 | 1636 777 1617 857 1579 909 c 596 | 1541 961 1483 987 1405 987 c 597 | 1310 987 1234 955 1179 892 c 598 | 1124 829 1096 742 1096 633 c 599 | 1096 0 l 600 | 911 0 l 601 | 911 670 l 602 | 911 778 892 858 854 909 c 603 | 816 961 757 987 678 987 c 604 | 584 987 509 955 454 891 c 605 | 399 828 371 742 371 633 c 606 | 371 0 l 607 | 186 0 l 608 | 186 1120 l 609 | 371 1120 l 610 | 371 946 l 611 | 413 1015 463 1065 522 1098 c 612 | 581 1131 650 1147 731 1147 c 613 | 812 1147 881 1126 938 1085 c 614 | 995 1044 1038 984 1065 905 c 615 | 616 | ce} _d 617 | /n{1298 0 186 0 1124 1147 sc 618 | 1124 676 m 619 | 1124 0 l 620 | 940 0 l 621 | 940 670 l 622 | 940 776 919 855 878 908 c 623 | 837 961 775 987 692 987 c 624 | 593 987 514 955 457 892 c 625 | 400 829 371 742 371 633 c 626 | 371 0 l 627 | 186 0 l 628 | 186 1120 l 629 | 371 1120 l 630 | 371 946 l 631 | 415 1013 467 1064 526 1097 c 632 | 586 1130 655 1147 733 1147 c 633 | 862 1147 959 1107 1025 1027 c 634 | 1091 948 1124 831 1124 676 c 635 | 636 | ce} _d 637 | /o{1253 0 113 -29 1141 1147 sc 638 | 627 991 m 639 | 528 991 450 952 393 875 c 640 | 336 798 307 693 307 559 c 641 | 307 425 335 319 392 242 c 642 | 449 165 528 127 627 127 c 643 | 725 127 803 166 860 243 c 644 | 917 320 946 426 946 559 c 645 | 946 692 917 797 860 874 c 646 | 803 952 725 991 627 991 c 647 | 648 | 627 1147 m 649 | 787 1147 913 1095 1004 991 c 650 | 1095 887 1141 743 1141 559 c 651 | 1141 376 1095 232 1004 127 c 652 | 913 23 787 -29 627 -29 c 653 | 466 -29 340 23 249 127 c 654 | 158 232 113 376 113 559 c 655 | 113 743 158 887 249 991 c 656 | 340 1095 466 1147 627 1147 c 657 | 658 | ce} _d 659 | /r{842 0 186 0 842 1147 sc 660 | 842 948 m 661 | 821 960 799 969 774 974 c 662 | 750 980 723 983 694 983 c 663 | 590 983 510 949 454 881 c 664 | 399 814 371 717 371 590 c 665 | 371 0 l 666 | 186 0 l 667 | 186 1120 l 668 | 371 1120 l 669 | 371 946 l 670 | 410 1014 460 1064 522 1097 c 671 | 584 1130 659 1147 748 1147 c 672 | 761 1147 775 1146 790 1144 c 673 | 805 1143 822 1140 841 1137 c 674 | 842 948 l 675 | 676 | ce} _d 677 | /s{1067 0 111 -29 967 1147 sc 678 | 907 1087 m 679 | 907 913 l 680 | 855 940 801 960 745 973 c 681 | 689 986 631 993 571 993 c 682 | 480 993 411 979 365 951 c 683 | 320 923 297 881 297 825 c 684 | 297 782 313 749 346 724 c 685 | 379 700 444 677 543 655 c 686 | 606 641 l 687 | 737 613 829 573 884 522 c 688 | 939 471 967 400 967 309 c 689 | 967 205 926 123 843 62 c 690 | 761 1 648 -29 504 -29 c 691 | 444 -29 381 -23 316 -11 c 692 | 251 0 183 18 111 41 c 693 | 111 231 l 694 | 179 196 246 169 312 151 c 695 | 378 134 443 125 508 125 c 696 | 595 125 661 140 708 169 c 697 | 755 199 778 241 778 295 c 698 | 778 345 761 383 727 410 c 699 | 694 437 620 462 506 487 c 700 | 442 502 l 701 | 328 526 246 563 195 612 c 702 | 144 662 119 730 119 817 c 703 | 119 922 156 1004 231 1061 c 704 | 306 1118 412 1147 549 1147 c 705 | 617 1147 681 1142 741 1132 c 706 | 801 1122 856 1107 907 1087 c 707 | 708 | ce} _d 709 | /t{803 0 55 0 754 1438 sc 710 | 375 1438 m 711 | 375 1120 l 712 | 754 1120 l 713 | 754 977 l 714 | 375 977 l 715 | 375 369 l 716 | 375 278 387 219 412 193 c 717 | 437 167 488 154 565 154 c 718 | 754 154 l 719 | 754 0 l 720 | 565 0 l 721 | 423 0 325 26 271 79 c 722 | 217 132 190 229 190 369 c 723 | 190 977 l 724 | 55 977 l 725 | 55 1120 l 726 | 190 1120 l 727 | 190 1438 l 728 | 375 1438 l 729 | 730 | ce} _d 731 | /v{1212 0 61 0 1151 1120 sc 732 | 61 1120 m 733 | 256 1120 l 734 | 606 180 l 735 | 956 1120 l 736 | 1151 1120 l 737 | 731 0 l 738 | 481 0 l 739 | 61 1120 l 740 | 741 | ce} _d 742 | end readonly def 743 | 744 | /BuildGlyph { 745 | exch begin 746 | CharStrings exch 747 | 2 copy known not {pop /.notdef} if 748 | true 3 1 roll get exec 749 | end 750 | } _d 751 | 752 | /BuildChar { 753 | 1 index /Encoding get exch get 754 | 1 index /BuildGlyph get exec 755 | } _d 756 | 757 | FontName currentdict end definefont pop 758 | end 759 | %%EndProlog 760 | mpldict begin 761 | -18 252 translate 762 | 648 288 0 0 clipbox 763 | gsave 764 | 0 0 m 765 | 648 0 l 766 | 648 288 l 767 | 0 288 l 768 | cl 769 | 1.000 setgray 770 | fill 771 | grestore 772 | gsave 773 | 42.41 49.16 m 774 | 637.2 49.16 l 775 | 637.2 234.1248 l 776 | 42.41 234.1248 l 777 | cl 778 | 1.000 setgray 779 | fill 780 | grestore 781 | 0.800 setlinewidth 782 | 1 setlinejoin 783 | 0 setlinecap 784 | [2.96 1.28] 0 setdash 785 | 0.690 setgray 786 | gsave 787 | 594.79 184.965 42.41 49.16 clipbox 788 | 122.198902 49.16 m 789 | 122.198902 234.1248 l 790 | stroke 791 | grestore 792 | [] 0 setdash 793 | 0.000 setgray 794 | gsave 795 | /o { 796 | gsave 797 | newpath 798 | translate 799 | 0.8 setlinewidth 800 | 1 setlinejoin 801 | 802 | 0 setlinecap 803 | 804 | 0 0 m 805 | 0 -3.5 l 806 | 807 | gsave 808 | 0.000 setgray 809 | fill 810 | grestore 811 | stroke 812 | grestore 813 | } bind def 814 | 122.199 49.16 o 815 | grestore 816 | gsave 817 | 115.199 33.035 translate 818 | 0 rotate 819 | /DejaVuSans-Oblique 12.0 selectfont 820 | 0 0.25 moveto 821 | /A glyphshow 822 | /DejaVuSans 8.399999999999999 selectfont 823 | 8.20898 -1.71875 moveto 824 | /one glyphshow 825 | grestore 826 | [2.96 1.28] 0 setdash 827 | 0.690 setgray 828 | gsave 829 | 594.79 184.965 42.41 49.16 clipbox 830 | 267.269634 49.16 m 831 | 267.269634 234.1248 l 832 | stroke 833 | grestore 834 | [] 0 setdash 835 | 0.000 setgray 836 | gsave 837 | /o { 838 | gsave 839 | newpath 840 | translate 841 | 0.8 setlinewidth 842 | 1 setlinejoin 843 | 844 | 0 setlinecap 845 | 846 | 0 0 m 847 | 0 -3.5 l 848 | 849 | gsave 850 | 0.000 setgray 851 | fill 852 | grestore 853 | stroke 854 | grestore 855 | } bind def 856 | 267.27 49.16 o 857 | grestore 858 | gsave 859 | 260.27 33.035 translate 860 | 0 rotate 861 | /DejaVuSans-Oblique 12.0 selectfont 862 | 0 0.25 moveto 863 | /A glyphshow 864 | /DejaVuSans 8.399999999999999 selectfont 865 | 8.20898 -1.71875 moveto 866 | /two glyphshow 867 | grestore 868 | [2.96 1.28] 0 setdash 869 | 0.690 setgray 870 | gsave 871 | 594.79 184.965 42.41 49.16 clipbox 872 | 412.340366 49.16 m 873 | 412.340366 234.1248 l 874 | stroke 875 | grestore 876 | [] 0 setdash 877 | 0.000 setgray 878 | gsave 879 | /o { 880 | gsave 881 | newpath 882 | translate 883 | 0.8 setlinewidth 884 | 1 setlinejoin 885 | 886 | 0 setlinecap 887 | 888 | 0 0 m 889 | 0 -3.5 l 890 | 891 | gsave 892 | 0.000 setgray 893 | fill 894 | grestore 895 | stroke 896 | grestore 897 | } bind def 898 | 412.34 49.16 o 899 | grestore 900 | gsave 901 | 405.34 33.16 translate 902 | 0 rotate 903 | /DejaVuSans-Oblique 12.0 selectfont 904 | 0 0.25 moveto 905 | /A glyphshow 906 | /DejaVuSans 8.399999999999999 selectfont 907 | 8.20898 -1.71875 moveto 908 | /three glyphshow 909 | grestore 910 | [2.96 1.28] 0 setdash 911 | 0.690 setgray 912 | gsave 913 | 594.79 184.965 42.41 49.16 clipbox 914 | 557.411098 49.16 m 915 | 557.411098 234.1248 l 916 | stroke 917 | grestore 918 | [] 0 setdash 919 | 0.000 setgray 920 | gsave 921 | /o { 922 | gsave 923 | newpath 924 | translate 925 | 0.8 setlinewidth 926 | 1 setlinejoin 927 | 928 | 0 setlinecap 929 | 930 | 0 0 m 931 | 0 -3.5 l 932 | 933 | gsave 934 | 0.000 setgray 935 | fill 936 | grestore 937 | stroke 938 | grestore 939 | } bind def 940 | 557.411 49.16 o 941 | grestore 942 | gsave 943 | 550.411 33.035 translate 944 | 0 rotate 945 | /DejaVuSans-Oblique 12.0 selectfont 946 | 0 0.25 moveto 947 | /A glyphshow 948 | /DejaVuSans 8.399999999999999 selectfont 949 | 8.20898 -1.71875 moveto 950 | /four glyphshow 951 | grestore 952 | /DejaVuSans 12.000 selectfont 953 | gsave 954 | 955 | 303.774 17.035 translate 956 | 0 rotate 957 | 0 0 m /A glyphshow 958 | 8.20898 0 m /l glyphshow 959 | 11.543 0 m /t glyphshow 960 | 16.248 0 m /e glyphshow 961 | 23.6309 0 m /r glyphshow 962 | 28.3145 0 m /n glyphshow 963 | 35.9199 0 m /a glyphshow 964 | 43.2734 0 m /t glyphshow 965 | 47.9785 0 m /i glyphshow 966 | 51.3125 0 m /v glyphshow 967 | 58.4141 0 m /e glyphshow 968 | 65.7969 0 m /s glyphshow 969 | grestore 970 | [2.96 1.28] 0 setdash 971 | 0.690 setgray 972 | gsave 973 | 594.79 184.965 42.41 49.16 clipbox 974 | 42.41 86.15296 m 975 | 637.2 86.15296 l 976 | stroke 977 | grestore 978 | [] 0 setdash 979 | 0.000 setgray 980 | gsave 981 | /o { 982 | gsave 983 | newpath 984 | translate 985 | 0.8 setlinewidth 986 | 1 setlinejoin 987 | 988 | 0 setlinecap 989 | 990 | -0 0 m 991 | -3.5 0 l 992 | 993 | gsave 994 | 0.000 setgray 995 | fill 996 | grestore 997 | stroke 998 | grestore 999 | } bind def 1000 | 42.41 86.153 o 1001 | grestore 1002 | /DejaVuSans 12.000 selectfont 1003 | gsave 1004 | 1005 | 27.7694 81.5905 translate 1006 | 0 rotate 1007 | 0 0 m /one glyphshow 1008 | grestore 1009 | [2.96 1.28] 0 setdash 1010 | 0.690 setgray 1011 | gsave 1012 | 594.79 184.965 42.41 49.16 clipbox 1013 | 42.41 123.14592 m 1014 | 637.2 123.14592 l 1015 | stroke 1016 | grestore 1017 | [] 0 setdash 1018 | 0.000 setgray 1019 | gsave 1020 | /o { 1021 | gsave 1022 | newpath 1023 | translate 1024 | 0.8 setlinewidth 1025 | 1 setlinejoin 1026 | 1027 | 0 setlinecap 1028 | 1029 | -0 0 m 1030 | -3.5 0 l 1031 | 1032 | gsave 1033 | 0.000 setgray 1034 | fill 1035 | grestore 1036 | stroke 1037 | grestore 1038 | } bind def 1039 | 42.41 123.146 o 1040 | grestore 1041 | /DejaVuSans 12.000 selectfont 1042 | gsave 1043 | 1044 | 27.7694 118.583 translate 1045 | 0 rotate 1046 | 0 0 m /two glyphshow 1047 | grestore 1048 | [2.96 1.28] 0 setdash 1049 | 0.690 setgray 1050 | gsave 1051 | 594.79 184.965 42.41 49.16 clipbox 1052 | 42.41 160.13888 m 1053 | 637.2 160.13888 l 1054 | stroke 1055 | grestore 1056 | [] 0 setdash 1057 | 0.000 setgray 1058 | gsave 1059 | /o { 1060 | gsave 1061 | newpath 1062 | translate 1063 | 0.8 setlinewidth 1064 | 1 setlinejoin 1065 | 1066 | 0 setlinecap 1067 | 1068 | -0 0 m 1069 | -3.5 0 l 1070 | 1071 | gsave 1072 | 0.000 setgray 1073 | fill 1074 | grestore 1075 | stroke 1076 | grestore 1077 | } bind def 1078 | 42.41 160.139 o 1079 | grestore 1080 | /DejaVuSans 12.000 selectfont 1081 | gsave 1082 | 1083 | 27.7694 155.576 translate 1084 | 0 rotate 1085 | 0 0 m /three glyphshow 1086 | grestore 1087 | [2.96 1.28] 0 setdash 1088 | 0.690 setgray 1089 | gsave 1090 | 594.79 184.965 42.41 49.16 clipbox 1091 | 42.41 197.13184 m 1092 | 637.2 197.13184 l 1093 | stroke 1094 | grestore 1095 | [] 0 setdash 1096 | 0.000 setgray 1097 | gsave 1098 | /o { 1099 | gsave 1100 | newpath 1101 | translate 1102 | 0.8 setlinewidth 1103 | 1 setlinejoin 1104 | 1105 | 0 setlinecap 1106 | 1107 | -0 0 m 1108 | -3.5 0 l 1109 | 1110 | gsave 1111 | 0.000 setgray 1112 | fill 1113 | grestore 1114 | stroke 1115 | grestore 1116 | } bind def 1117 | 42.41 197.132 o 1118 | grestore 1119 | /DejaVuSans 12.000 selectfont 1120 | gsave 1121 | 1122 | 27.7694 192.569 translate 1123 | 0 rotate 1124 | 0 0 m /four glyphshow 1125 | grestore 1126 | /DejaVuSans 12.000 selectfont 1127 | gsave 1128 | 1129 | 21.2694 126.635 translate 1130 | 90 rotate 1131 | 0 0 m /R glyphshow 1132 | 8.08789 0 m /a glyphshow 1133 | 15.4414 0 m /n glyphshow 1134 | 23.0469 0 m /k glyphshow 1135 | grestore 1136 | 1.000 setlinewidth 1137 | 0 setlinejoin 1138 | gsave 1139 | 594.79 184.965 42.41 49.16 clipbox 1140 | 78.677683 49.16 m 1141 | 100.438293 49.16 l 1142 | 100.438293 197.13184 l 1143 | 78.677683 197.13184 l 1144 | cl 1145 | gsave 1146 | 0.122 0.467 0.706 setrgbcolor 1147 | fill 1148 | grestore 1149 | stroke 1150 | grestore 1151 | gsave 1152 | 594.79 184.965 42.41 49.16 clipbox 1153 | 223.748415 49.16 m 1154 | 245.509024 49.16 l 1155 | 245.509024 86.15296 l 1156 | 223.748415 86.15296 l 1157 | cl 1158 | gsave 1159 | 0.122 0.467 0.706 setrgbcolor 1160 | fill 1161 | grestore 1162 | stroke 1163 | grestore 1164 | gsave 1165 | 594.79 184.965 42.41 49.16 clipbox 1166 | 368.819146 49.16 m 1167 | 390.579756 49.16 l 1168 | 390.579756 123.14592 l 1169 | 368.819146 123.14592 l 1170 | cl 1171 | gsave 1172 | 0.122 0.467 0.706 setrgbcolor 1173 | fill 1174 | grestore 1175 | stroke 1176 | grestore 1177 | gsave 1178 | 594.79 184.965 42.41 49.16 clipbox 1179 | 513.889878 49.16 m 1180 | 535.650488 49.16 l 1181 | 535.650488 160.13888 l 1182 | 513.889878 160.13888 l 1183 | cl 1184 | gsave 1185 | 0.122 0.467 0.706 setrgbcolor 1186 | fill 1187 | grestore 1188 | stroke 1189 | grestore 1190 | gsave 1191 | 594.79 184.965 42.41 49.16 clipbox 1192 | 100.438293 49.16 m 1193 | 122.198902 49.16 l 1194 | 122.198902 160.13888 l 1195 | 100.438293 160.13888 l 1196 | cl 1197 | gsave 1198 | 1.000 0.498 0.055 setrgbcolor 1199 | fill 1200 | grestore 1201 | stroke 1202 | grestore 1203 | gsave 1204 | 594.79 184.965 42.41 49.16 clipbox 1205 | 245.509024 49.16 m 1206 | 267.269634 49.16 l 1207 | 267.269634 86.15296 l 1208 | 245.509024 86.15296 l 1209 | cl 1210 | gsave 1211 | 1.000 0.498 0.055 setrgbcolor 1212 | fill 1213 | grestore 1214 | stroke 1215 | grestore 1216 | gsave 1217 | 594.79 184.965 42.41 49.16 clipbox 1218 | 390.579756 49.16 m 1219 | 412.340366 49.16 l 1220 | 412.340366 197.13184 l 1221 | 390.579756 197.13184 l 1222 | cl 1223 | gsave 1224 | 1.000 0.498 0.055 setrgbcolor 1225 | fill 1226 | grestore 1227 | stroke 1228 | grestore 1229 | gsave 1230 | 594.79 184.965 42.41 49.16 clipbox 1231 | 535.650488 49.16 m 1232 | 557.411098 49.16 l 1233 | 557.411098 123.14592 l 1234 | 535.650488 123.14592 l 1235 | cl 1236 | gsave 1237 | 1.000 0.498 0.055 setrgbcolor 1238 | fill 1239 | grestore 1240 | stroke 1241 | grestore 1242 | gsave 1243 | 594.79 184.965 42.41 49.16 clipbox 1244 | 122.198902 49.16 m 1245 | 143.959512 49.16 l 1246 | 143.959512 160.13888 l 1247 | 122.198902 160.13888 l 1248 | cl 1249 | gsave 1250 | 0.173 0.627 0.173 setrgbcolor 1251 | fill 1252 | grestore 1253 | stroke 1254 | grestore 1255 | gsave 1256 | 594.79 184.965 42.41 49.16 clipbox 1257 | 267.269634 49.16 m 1258 | 289.030244 49.16 l 1259 | 289.030244 86.15296 l 1260 | 267.269634 86.15296 l 1261 | cl 1262 | gsave 1263 | 0.173 0.627 0.173 setrgbcolor 1264 | fill 1265 | grestore 1266 | stroke 1267 | grestore 1268 | gsave 1269 | 594.79 184.965 42.41 49.16 clipbox 1270 | 412.340366 49.16 m 1271 | 434.100976 49.16 l 1272 | 434.100976 123.14592 l 1273 | 412.340366 123.14592 l 1274 | cl 1275 | gsave 1276 | 0.173 0.627 0.173 setrgbcolor 1277 | fill 1278 | grestore 1279 | stroke 1280 | grestore 1281 | gsave 1282 | 594.79 184.965 42.41 49.16 clipbox 1283 | 557.411098 49.16 m 1284 | 579.171707 49.16 l 1285 | 579.171707 197.13184 l 1286 | 557.411098 197.13184 l 1287 | cl 1288 | gsave 1289 | 0.173 0.627 0.173 setrgbcolor 1290 | fill 1291 | grestore 1292 | stroke 1293 | grestore 1294 | gsave 1295 | 594.79 184.965 42.41 49.16 clipbox 1296 | 143.959512 49.16 m 1297 | 165.720122 49.16 l 1298 | 165.720122 197.13184 l 1299 | 143.959512 197.13184 l 1300 | cl 1301 | gsave 1302 | 0.839 0.153 0.157 setrgbcolor 1303 | fill 1304 | grestore 1305 | stroke 1306 | grestore 1307 | gsave 1308 | 594.79 184.965 42.41 49.16 clipbox 1309 | 289.030244 49.16 m 1310 | 310.790854 49.16 l 1311 | 310.790854 86.15296 l 1312 | 289.030244 86.15296 l 1313 | cl 1314 | gsave 1315 | 0.839 0.153 0.157 setrgbcolor 1316 | fill 1317 | grestore 1318 | stroke 1319 | grestore 1320 | gsave 1321 | 594.79 184.965 42.41 49.16 clipbox 1322 | 434.100976 49.16 m 1323 | 455.861585 49.16 l 1324 | 455.861585 123.14592 l 1325 | 434.100976 123.14592 l 1326 | cl 1327 | gsave 1328 | 0.839 0.153 0.157 setrgbcolor 1329 | fill 1330 | grestore 1331 | stroke 1332 | grestore 1333 | gsave 1334 | 594.79 184.965 42.41 49.16 clipbox 1335 | 579.171707 49.16 m 1336 | 600.932317 49.16 l 1337 | 600.932317 160.13888 l 1338 | 579.171707 160.13888 l 1339 | cl 1340 | gsave 1341 | 0.839 0.153 0.157 setrgbcolor 1342 | fill 1343 | grestore 1344 | stroke 1345 | grestore 1346 | 0.800 setlinewidth 1347 | 2 setlinecap 1348 | gsave 1349 | 42.41 49.16 m 1350 | 42.41 234.1248 l 1351 | stroke 1352 | grestore 1353 | gsave 1354 | 637.2 49.16 m 1355 | 637.2 234.1248 l 1356 | stroke 1357 | grestore 1358 | gsave 1359 | 42.41 49.16 m 1360 | 637.2 49.16 l 1361 | stroke 1362 | grestore 1363 | gsave 1364 | 42.41 234.1248 m 1365 | 637.2 234.1248 l 1366 | stroke 1367 | grestore 1368 | 1.000 setlinewidth 1369 | 0 setlinecap 1370 | gsave 1371 | 44.81 237.824096 m 1372 | 634.8 237.824096 l 1373 | 636.4 237.824096 637.2 238.624096 637.2 240.224096 c 1374 | 637.2 272.320971 l 1375 | 637.2 273.920971 636.4 274.720971 634.8 274.720971 c 1376 | 44.81 274.720971 l 1377 | 43.21 274.720971 42.41 273.920971 42.41 272.320971 c 1378 | 42.41 240.224096 l 1379 | 42.41 238.624096 43.21 237.824096 44.81 237.824096 c 1380 | cl 1381 | gsave 1382 | 1.000 setgray 1383 | fill 1384 | grestore 1385 | stroke 1386 | grestore 1387 | /DejaVuSans 10.000 selectfont 1388 | gsave 1389 | 1390 | 291.141 262.327 translate 1391 | 0 rotate 1392 | 0 0 m /W glyphshow 1393 | 9.2627 0 m /e glyphshow 1394 | 15.415 0 m /i glyphshow 1395 | 18.1934 0 m /g glyphshow 1396 | 24.541 0 m /h glyphshow 1397 | 30.8789 0 m /t glyphshow 1398 | 34.7998 0 m /i glyphshow 1399 | 37.5781 0 m /n glyphshow 1400 | 43.916 0 m /g glyphshow 1401 | 50.2637 0 m /space glyphshow 1402 | 53.4424 0 m /m glyphshow 1403 | 63.1836 0 m /e glyphshow 1404 | 69.3359 0 m /t glyphshow 1405 | 73.2568 0 m /h glyphshow 1406 | 79.5947 0 m /o glyphshow 1407 | 85.7129 0 m /d glyphshow 1408 | 92.0605 0 m /s glyphshow 1409 | grestore 1410 | gsave 1411 | 47.21 245.124096 m 1412 | 71.21 245.124096 l 1413 | 71.21 253.524096 l 1414 | 47.21 253.524096 l 1415 | cl 1416 | gsave 1417 | 0.122 0.467 0.706 setrgbcolor 1418 | fill 1419 | grestore 1420 | stroke 1421 | grestore 1422 | /DejaVuSans 12.000 selectfont 1423 | gsave 1424 | 1425 | 80.81 245.124 translate 1426 | 0 rotate 1427 | 0 0 m /A glyphshow 1428 | 8.20898 0 m /H glyphshow 1429 | 17.2324 0 m /P glyphshow 1430 | grestore 1431 | gsave 1432 | 205.588958 245.124096 m 1433 | 229.588958 245.124096 l 1434 | 229.588958 253.524096 l 1435 | 205.588958 253.524096 l 1436 | cl 1437 | gsave 1438 | 1.000 0.498 0.055 setrgbcolor 1439 | fill 1440 | grestore 1441 | stroke 1442 | grestore 1443 | /DejaVuSans 12.000 selectfont 1444 | gsave 1445 | 1446 | 239.189 245.124 translate 1447 | 0 rotate 1448 | 0 0 m /S glyphshow 1449 | 7.61719 0 m /W glyphshow 1450 | 18.8574 0 m /A glyphshow 1451 | 27.0664 0 m /R glyphshow 1452 | 34.9043 0 m /A glyphshow 1453 | grestore 1454 | gsave 1455 | 382.608542 245.124096 m 1456 | 406.608542 245.124096 l 1457 | 406.608542 253.524096 l 1458 | 382.608542 253.524096 l 1459 | cl 1460 | gsave 1461 | 0.173 0.627 0.173 setrgbcolor 1462 | fill 1463 | grestore 1464 | stroke 1465 | grestore 1466 | /DejaVuSans 12.000 selectfont 1467 | gsave 1468 | 1469 | 416.209 245.124 translate 1470 | 0 rotate 1471 | 0 0 m /L glyphshow 1472 | 6.68555 0 m /B glyphshow 1473 | 14.543 0 m /W glyphshow 1474 | 25.7832 0 m /A glyphshow 1475 | grestore 1476 | gsave 1477 | 550.503125 245.124096 m 1478 | 574.503125 245.124096 l 1479 | 574.503125 253.524096 l 1480 | 550.503125 253.524096 l 1481 | cl 1482 | gsave 1483 | 0.839 0.153 0.157 setrgbcolor 1484 | fill 1485 | grestore 1486 | stroke 1487 | grestore 1488 | /DejaVuSans 12.000 selectfont 1489 | gsave 1490 | 1491 | 584.103 245.124 translate 1492 | 0 rotate 1493 | 0 0 m /S glyphshow 1494 | 7.86719 0 m /A glyphshow 1495 | 16.0762 0 m /P glyphshow 1496 | 23.3125 0 m /E glyphshow 1497 | 30.8945 0 m /V glyphshow 1498 | 38.8535 0 m /O glyphshow 1499 | grestore 1500 | 1501 | end 1502 | showpage 1503 | -------------------------------------------------------------------------------- /examples/results_update/barplot_rankings.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results_update/barplot_rankings.pdf -------------------------------------------------------------------------------- /examples/results_update/boxplot_weights.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results_update/boxplot_weights.pdf -------------------------------------------------------------------------------- /examples/results_update/boxplot_weights_col.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results_update/boxplot_weights_col.pdf -------------------------------------------------------------------------------- /examples/results_update/boxplot_weights_stacked_col.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results_update/boxplot_weights_stacked_col.pdf -------------------------------------------------------------------------------- /examples/results_update/crit_ahp.csv: -------------------------------------------------------------------------------- 1 | ,0,1,2,3,4,5,6,7,8,9 2 | 0,1.0,2.0,2.0,2.0,6.0,2.0,1.0,1.0,0.5,0.5 3 | 1,0.5,1.0,1.0,1.0,4.0,1.0,1.0,1.0,0.5,0.5 4 | 2,0.5,1.0,1.0,1.0,3.0,1.0,0.5,0.5,0.3333333333333333,0.3333333333333333 5 | 3,0.5,1.0,1.0,1.0,3.0,1.0,0.5,0.5,0.3333333333333333,0.3333333333333333 6 | 4,0.16666666666666666,0.25,0.3333333333333333,0.3333333333333333,1.0,0.3333333333333333,0.2,0.2,0.1111111111111111,0.1111111111111111 7 | 5,0.5,1.0,1.0,1.0,3.0,1.0,0.5,0.5,0.3333333333333333,0.3333333333333333 8 | 6,1.0,1.0,2.0,2.0,5.0,2.0,1.0,1.0,0.5,0.5 9 | 7,1.0,1.0,2.0,2.0,5.0,2.0,1.0,1.0,0.5,0.5 10 | 8,2.0,2.0,3.0,3.0,9.0,3.0,2.0,2.0,1.0,1.0 11 | 9,2.0,2.0,3.0,3.0,9.0,3.0,2.0,2.0,1.0,1.0 12 | -------------------------------------------------------------------------------- /examples/results_update/crit_sapevo.csv: -------------------------------------------------------------------------------- 1 | ,0,1,2,3,4,5,6,7,8,9 2 | 0,0,1,1,1,2,1,0,0,-1,-1 3 | 1,-1,0,0,0,1,0,0,0,-1,-1 4 | 2,-1,0,0,0,1,0,-1,-1,-1,-1 5 | 3,-1,0,0,0,1,0,-1,-1,-1,-1 6 | 4,-2,-1,-1,-1,0,-1,-2,-2,-3,-3 7 | 5,-1,0,0,0,1,0,-1,-1,-1,-1 8 | 6,0,0,1,1,2,1,0,0,-1,-1 9 | 7,0,0,1,1,2,1,0,0,-1,-1 10 | 8,1,1,1,1,3,1,1,1,0,0 11 | 9,1,1,1,1,3,1,1,1,0,0 12 | -------------------------------------------------------------------------------- /examples/results_update/df_prefs.csv: -------------------------------------------------------------------------------- 1 | ,AHP,SWARA,LBWA,SAPEVO 2 | $A_{1}$,0.9712622988344686,0.9228281419800827,0.8635879741221462,0.9629271567884804 3 | $A_{2}$,0.0,0.0,0.0,0.0 4 | $A_{3}$,0.9411764705882348,0.941176470588235,0.8618223624893369,0.9257142857142853 5 | $A_{4}$,0.9541057865248105,0.8826628036498521,1.0,0.937836098140101 6 | -------------------------------------------------------------------------------- /examples/results_update/df_ranks.csv: -------------------------------------------------------------------------------- 1 | ,AHP,SWARA,LBWA,SAPEVO 2 | $A_{1}$,4,3,3,4 3 | $A_{2}$,1,1,1,1 4 | $A_{3}$,2,4,2,2 5 | $A_{4}$,3,2,4,3 6 | -------------------------------------------------------------------------------- /examples/results_update/df_weights.csv: -------------------------------------------------------------------------------- 1 | ,AHP,SWARA,LBWA,SAPEVO 2 | $C_{1}$,0.11649503779328321,0.12088606472416892,0.13409348962914394,0.12500000000000003 3 | $C_{2}$,0.08039710727058896,0.08610118570097504,0.08045609377748637,0.08750000000000001 4 | $C_{3}$,0.061432101814617876,0.06888094856078002,0.06188930290575875,0.07500000000000001 5 | $C_{4}$,0.061432101814617834,0.06888094856078002,0.06188930290575875,0.07500000000000001 6 | $C_{5}$,0.020516973927510986,0.04124607698250301,0.018710719483136365,0.0 7 | $C_{6}$,0.0614321018146179,0.06888094856078002,0.06188930290575875,0.07500000000000001 8 | $C_{7}$,0.10648668365303617,0.10332142284117002,0.08939565975276263,0.11875000000000001 9 | $C_{8}$,0.10648668365303612,0.10332142284117002,0.08939565975276263,0.11875000000000001 10 | $C_{9}$,0.19266060412934546,0.16924049061383648,0.20114023444371593,0.1625 11 | $C_{10}$,0.1926606041293455,0.16924049061383648,0.20114023444371593,0.1625 12 | -------------------------------------------------------------------------------- /examples/results_update/heatmap.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results_update/heatmap.pdf -------------------------------------------------------------------------------- /examples/results_update/radar.eps: -------------------------------------------------------------------------------- 1 | %!PS-Adobe-3.0 EPSF-3.0 2 | %%Title: radar.eps 3 | %%Creator: Matplotlib v3.7.1, https://matplotlib.org/ 4 | %%CreationDate: Thu Sep 21 16:02:02 2023 5 | %%Orientation: portrait 6 | %%BoundingBox: 75 223 537 569 7 | %%HiResBoundingBox: 75.600000 223.200000 536.400000 568.800000 8 | %%EndComments 9 | %%BeginProlog 10 | /mpldict 12 dict def 11 | mpldict begin 12 | /_d { bind def } bind def 13 | /m { moveto } _d 14 | /l { lineto } _d 15 | /r { rlineto } _d 16 | /c { curveto } _d 17 | /cl { closepath } _d 18 | /ce { closepath eofill } _d 19 | /box { 20 | m 21 | 1 index 0 r 22 | 0 exch r 23 | neg 0 r 24 | cl 25 | } _d 26 | /clipbox { 27 | box 28 | clip 29 | newpath 30 | } _d 31 | /sc { setcachedevice } _d 32 | %!PS-Adobe-3.0 Resource-Font 33 | %%Creator: Converted from TrueType to Type 3 by Matplotlib. 34 | 10 dict begin 35 | /FontName /DejaVuSans-Oblique def 36 | /PaintType 0 def 37 | /FontMatrix [0.00048828125 0 0 0.00048828125 0 0] def 38 | /FontBBox [-2080 -717 3398 2187] def 39 | /FontType 3 def 40 | /Encoding [/A] def 41 | /CharStrings 2 dict dup begin 42 | /.notdef 0 def 43 | /A{1401 0 -109 0 1260 1493 sc 44 | 754 1493 m 45 | 983 1493 l 46 | 1260 0 l 47 | 1049 0 l 48 | 987 383 l 49 | 315 383 l 50 | 104 0 l 51 | -109 0 l 52 | 754 1493 l 53 | 54 | 827 1294 m 55 | 408 551 l 56 | 956 551 l 57 | 827 1294 l 58 | 59 | ce} _d 60 | end readonly def 61 | 62 | /BuildGlyph { 63 | exch begin 64 | CharStrings exch 65 | 2 copy known not {pop /.notdef} if 66 | true 3 1 roll get exec 67 | end 68 | } _d 69 | 70 | /BuildChar { 71 | 1 index /Encoding get exch get 72 | 1 index /BuildGlyph get exec 73 | } _d 74 | 75 | FontName currentdict end definefont pop 76 | %!PS-Adobe-3.0 Resource-Font 77 | %%Creator: Converted from TrueType to Type 3 by Matplotlib. 78 | 10 dict begin 79 | /FontName /DejaVuSans def 80 | /PaintType 0 def 81 | /FontMatrix [0.00048828125 0 0 0.00048828125 0 0] def 82 | /FontBBox [-2090 -948 3673 2524] def 83 | /FontType 3 def 84 | /Encoding [/space /one /two /three /four /A /B /E /H /L /O /P /R /S /V /W /d /e /g /h /i /m /n /o /s /t] def 85 | /CharStrings 27 dict dup begin 86 | /.notdef 0 def 87 | /space{651 0 0 0 0 0 sc 88 | ce} _d 89 | /one{1303 0 225 0 1114 1493 sc 90 | 254 170 m 91 | 584 170 l 92 | 584 1309 l 93 | 225 1237 l 94 | 225 1421 l 95 | 582 1493 l 96 | 784 1493 l 97 | 784 170 l 98 | 1114 170 l 99 | 1114 0 l 100 | 254 0 l 101 | 254 170 l 102 | 103 | ce} _d 104 | /two{1303 0 150 0 1098 1520 sc 105 | 393 170 m 106 | 1098 170 l 107 | 1098 0 l 108 | 150 0 l 109 | 150 170 l 110 | 227 249 331 356 463 489 c 111 | 596 623 679 709 713 748 c 112 | 778 821 823 882 848 932 c 113 | 874 983 887 1032 887 1081 c 114 | 887 1160 859 1225 803 1275 c 115 | 748 1325 675 1350 586 1350 c 116 | 523 1350 456 1339 385 1317 c 117 | 315 1295 240 1262 160 1217 c 118 | 160 1421 l 119 | 241 1454 317 1478 388 1495 c 120 | 459 1512 523 1520 582 1520 c 121 | 737 1520 860 1481 952 1404 c 122 | 1044 1327 1090 1223 1090 1094 c 123 | 1090 1033 1078 974 1055 919 c 124 | 1032 864 991 800 930 725 c 125 | 913 706 860 650 771 557 c 126 | 682 465 556 336 393 170 c 127 | 128 | ce} _d 129 | /three{1303 0 156 -29 1139 1520 sc 130 | 831 805 m 131 | 928 784 1003 741 1057 676 c 132 | 1112 611 1139 530 1139 434 c 133 | 1139 287 1088 173 987 92 c 134 | 886 11 742 -29 555 -29 c 135 | 492 -29 428 -23 361 -10 c 136 | 295 2 227 20 156 45 c 137 | 156 240 l 138 | 212 207 273 183 340 166 c 139 | 407 149 476 141 549 141 c 140 | 676 141 772 166 838 216 c 141 | 905 266 938 339 938 434 c 142 | 938 522 907 591 845 640 c 143 | 784 690 698 715 588 715 c 144 | 414 715 l 145 | 414 881 l 146 | 596 881 l 147 | 695 881 771 901 824 940 c 148 | 877 980 903 1037 903 1112 c 149 | 903 1189 876 1247 821 1288 c 150 | 767 1329 689 1350 588 1350 c 151 | 533 1350 473 1344 410 1332 c 152 | 347 1320 277 1301 201 1276 c 153 | 201 1456 l 154 | 278 1477 349 1493 416 1504 c 155 | 483 1515 547 1520 606 1520 c 156 | 759 1520 881 1485 970 1415 c 157 | 1059 1346 1104 1252 1104 1133 c 158 | 1104 1050 1080 980 1033 923 c 159 | 986 866 918 827 831 805 c 160 | 161 | ce} _d 162 | /four{1303 0 100 0 1188 1493 sc 163 | 774 1317 m 164 | 264 520 l 165 | 774 520 l 166 | 774 1317 l 167 | 168 | 721 1493 m 169 | 975 1493 l 170 | 975 520 l 171 | 1188 520 l 172 | 1188 352 l 173 | 975 352 l 174 | 975 0 l 175 | 774 0 l 176 | 774 352 l 177 | 100 352 l 178 | 100 547 l 179 | 721 1493 l 180 | 181 | ce} _d 182 | /A{1401 0 16 0 1384 1493 sc 183 | 700 1294 m 184 | 426 551 l 185 | 975 551 l 186 | 700 1294 l 187 | 188 | 586 1493 m 189 | 815 1493 l 190 | 1384 0 l 191 | 1174 0 l 192 | 1038 383 l 193 | 365 383 l 194 | 229 0 l 195 | 16 0 l 196 | 586 1493 l 197 | 198 | ce} _d 199 | /B{1405 0 201 0 1260 1493 sc 200 | 403 713 m 201 | 403 166 l 202 | 727 166 l 203 | 836 166 916 188 968 233 c 204 | 1021 278 1047 347 1047 440 c 205 | 1047 533 1021 602 968 646 c 206 | 916 691 836 713 727 713 c 207 | 403 713 l 208 | 209 | 403 1327 m 210 | 403 877 l 211 | 702 877 l 212 | 801 877 874 895 922 932 c 213 | 971 969 995 1026 995 1102 c 214 | 995 1177 971 1234 922 1271 c 215 | 874 1308 801 1327 702 1327 c 216 | 403 1327 l 217 | 218 | 201 1493 m 219 | 717 1493 l 220 | 871 1493 990 1461 1073 1397 c 221 | 1156 1333 1198 1242 1198 1124 c 222 | 1198 1033 1177 960 1134 906 c 223 | 1091 852 1029 818 946 805 c 224 | 1045 784 1122 739 1177 671 c 225 | 1232 604 1260 519 1260 418 c 226 | 1260 285 1215 182 1124 109 c 227 | 1033 36 904 0 737 0 c 228 | 201 0 l 229 | 201 1493 l 230 | 231 | ce} _d 232 | /E{1294 0 201 0 1163 1493 sc 233 | 201 1493 m 234 | 1145 1493 l 235 | 1145 1323 l 236 | 403 1323 l 237 | 403 881 l 238 | 1114 881 l 239 | 1114 711 l 240 | 403 711 l 241 | 403 170 l 242 | 1163 170 l 243 | 1163 0 l 244 | 201 0 l 245 | 201 1493 l 246 | 247 | ce} _d 248 | /H{1540 0 201 0 1339 1493 sc 249 | 201 1493 m 250 | 403 1493 l 251 | 403 881 l 252 | 1137 881 l 253 | 1137 1493 l 254 | 1339 1493 l 255 | 1339 0 l 256 | 1137 0 l 257 | 1137 711 l 258 | 403 711 l 259 | 403 0 l 260 | 201 0 l 261 | 201 1493 l 262 | 263 | ce} _d 264 | /L{1141 0 201 0 1130 1493 sc 265 | 201 1493 m 266 | 403 1493 l 267 | 403 170 l 268 | 1130 170 l 269 | 1130 0 l 270 | 201 0 l 271 | 201 1493 l 272 | 273 | ce} _d 274 | /O{1612 0 115 -29 1497 1520 sc 275 | 807 1356 m 276 | 660 1356 544 1301 457 1192 c 277 | 371 1083 328 934 328 745 c 278 | 328 557 371 408 457 299 c 279 | 544 190 660 135 807 135 c 280 | 954 135 1070 190 1155 299 c 281 | 1241 408 1284 557 1284 745 c 282 | 1284 934 1241 1083 1155 1192 c 283 | 1070 1301 954 1356 807 1356 c 284 | 285 | 807 1520 m 286 | 1016 1520 1184 1450 1309 1309 c 287 | 1434 1169 1497 981 1497 745 c 288 | 1497 510 1434 322 1309 181 c 289 | 1184 41 1016 -29 807 -29 c 290 | 597 -29 429 41 303 181 c 291 | 178 321 115 509 115 745 c 292 | 115 981 178 1169 303 1309 c 293 | 429 1450 597 1520 807 1520 c 294 | 295 | ce} _d 296 | /P{1235 0 201 0 1165 1493 sc 297 | 403 1327 m 298 | 403 766 l 299 | 657 766 l 300 | 751 766 824 790 875 839 c 301 | 926 888 952 957 952 1047 c 302 | 952 1136 926 1205 875 1254 c 303 | 824 1303 751 1327 657 1327 c 304 | 403 1327 l 305 | 306 | 201 1493 m 307 | 657 1493 l 308 | 824 1493 951 1455 1036 1379 c 309 | 1122 1304 1165 1193 1165 1047 c 310 | 1165 900 1122 788 1036 713 c 311 | 951 638 824 600 657 600 c 312 | 403 600 l 313 | 403 0 l 314 | 201 0 l 315 | 201 1493 l 316 | 317 | ce} _d 318 | /R{1423 0 201 0 1364 1493 sc 319 | 909 700 m 320 | 952 685 994 654 1035 606 c 321 | 1076 558 1118 492 1159 408 c 322 | 1364 0 l 323 | 1147 0 l 324 | 956 383 l 325 | 907 483 859 549 812 582 c 326 | 766 615 703 631 623 631 c 327 | 403 631 l 328 | 403 0 l 329 | 201 0 l 330 | 201 1493 l 331 | 657 1493 l 332 | 828 1493 955 1457 1039 1386 c 333 | 1123 1315 1165 1207 1165 1063 c 334 | 1165 969 1143 891 1099 829 c 335 | 1056 767 992 724 909 700 c 336 | 337 | 403 1327 m 338 | 403 797 l 339 | 657 797 l 340 | 754 797 828 819 877 864 c 341 | 927 909 952 976 952 1063 c 342 | 952 1150 927 1216 877 1260 c 343 | 828 1305 754 1327 657 1327 c 344 | 403 1327 l 345 | 346 | ce} _d 347 | /S{1300 0 135 -29 1186 1520 sc 348 | 1096 1444 m 349 | 1096 1247 l 350 | 1019 1284 947 1311 879 1329 c 351 | 811 1347 745 1356 682 1356 c 352 | 572 1356 487 1335 427 1292 c 353 | 368 1249 338 1189 338 1110 c 354 | 338 1044 358 994 397 960 c 355 | 437 927 512 900 623 879 c 356 | 745 854 l 357 | 896 825 1007 775 1078 702 c 358 | 1150 630 1186 533 1186 412 c 359 | 1186 267 1137 158 1040 83 c 360 | 943 8 801 -29 614 -29 c 361 | 543 -29 468 -21 388 -5 c 362 | 309 11 226 35 141 66 c 363 | 141 274 l 364 | 223 228 303 193 382 170 c 365 | 461 147 538 135 614 135 c 366 | 729 135 818 158 881 203 c 367 | 944 248 975 313 975 397 c 368 | 975 470 952 528 907 569 c 369 | 862 610 789 641 686 662 c 370 | 563 686 l 371 | 412 716 303 763 236 827 c 372 | 169 891 135 980 135 1094 c 373 | 135 1226 181 1330 274 1406 c 374 | 367 1482 496 1520 659 1520 c 375 | 729 1520 800 1514 873 1501 c 376 | 946 1488 1020 1469 1096 1444 c 377 | 378 | ce} _d 379 | /V{1401 0 16 0 1384 1493 sc 380 | 586 0 m 381 | 16 1493 l 382 | 227 1493 l 383 | 700 236 l 384 | 1174 1493 l 385 | 1384 1493 l 386 | 815 0 l 387 | 586 0 l 388 | 389 | ce} _d 390 | /W{2025 0 68 0 1958 1493 sc 391 | 68 1493 m 392 | 272 1493 l 393 | 586 231 l 394 | 899 1493 l 395 | 1126 1493 l 396 | 1440 231 l 397 | 1753 1493 l 398 | 1958 1493 l 399 | 1583 0 l 400 | 1329 0 l 401 | 1014 1296 l 402 | 696 0 l 403 | 442 0 l 404 | 68 1493 l 405 | 406 | ce} _d 407 | /d{1300 0 113 -29 1114 1556 sc 408 | 930 950 m 409 | 930 1556 l 410 | 1114 1556 l 411 | 1114 0 l 412 | 930 0 l 413 | 930 168 l 414 | 891 101 842 52 783 19 c 415 | 724 -13 654 -29 571 -29 c 416 | 436 -29 325 25 240 133 c 417 | 155 241 113 383 113 559 c 418 | 113 735 155 877 240 985 c 419 | 325 1093 436 1147 571 1147 c 420 | 654 1147 724 1131 783 1098 c 421 | 842 1066 891 1017 930 950 c 422 | 423 | 303 559 m 424 | 303 424 331 317 386 240 c 425 | 442 163 519 125 616 125 c 426 | 713 125 790 163 846 240 c 427 | 902 317 930 424 930 559 c 428 | 930 694 902 800 846 877 c 429 | 790 954 713 993 616 993 c 430 | 519 993 442 954 386 877 c 431 | 331 800 303 694 303 559 c 432 | 433 | ce} _d 434 | /e{1260 0 113 -29 1151 1147 sc 435 | 1151 606 m 436 | 1151 516 l 437 | 305 516 l 438 | 313 389 351 293 419 226 c 439 | 488 160 583 127 705 127 c 440 | 776 127 844 136 910 153 c 441 | 977 170 1043 196 1108 231 c 442 | 1108 57 l 443 | 1042 29 974 8 905 -7 c 444 | 836 -22 765 -29 694 -29 c 445 | 515 -29 374 23 269 127 c 446 | 165 231 113 372 113 549 c 447 | 113 732 162 878 261 985 c 448 | 360 1093 494 1147 662 1147 c 449 | 813 1147 932 1098 1019 1001 c 450 | 1107 904 1151 773 1151 606 c 451 | 452 | 967 660 m 453 | 966 761 937 841 882 901 c 454 | 827 961 755 991 664 991 c 455 | 561 991 479 962 417 904 c 456 | 356 846 320 764 311 659 c 457 | 967 660 l 458 | 459 | ce} _d 460 | /g{1300 0 113 -426 1114 1147 sc 461 | 930 573 m 462 | 930 706 902 810 847 883 c 463 | 792 956 715 993 616 993 c 464 | 517 993 440 956 385 883 c 465 | 330 810 303 706 303 573 c 466 | 303 440 330 337 385 264 c 467 | 440 191 517 154 616 154 c 468 | 715 154 792 191 847 264 c 469 | 902 337 930 440 930 573 c 470 | 471 | 1114 139 m 472 | 1114 -52 1072 -193 987 -286 c 473 | 902 -379 773 -426 598 -426 c 474 | 533 -426 472 -421 415 -411 c 475 | 358 -402 302 -387 248 -367 c 476 | 248 -188 l 477 | 302 -217 355 -239 408 -253 c 478 | 461 -267 514 -274 569 -274 c 479 | 690 -274 780 -242 840 -179 c 480 | 900 -116 930 -21 930 106 c 481 | 930 197 l 482 | 892 131 843 82 784 49 c 483 | 725 16 654 0 571 0 c 484 | 434 0 323 52 239 157 c 485 | 155 262 113 400 113 573 c 486 | 113 746 155 885 239 990 c 487 | 323 1095 434 1147 571 1147 c 488 | 654 1147 725 1131 784 1098 c 489 | 843 1065 892 1016 930 950 c 490 | 930 1120 l 491 | 1114 1120 l 492 | 1114 139 l 493 | 494 | ce} _d 495 | /h{1298 0 186 0 1124 1556 sc 496 | 1124 676 m 497 | 1124 0 l 498 | 940 0 l 499 | 940 670 l 500 | 940 776 919 855 878 908 c 501 | 837 961 775 987 692 987 c 502 | 593 987 514 955 457 892 c 503 | 400 829 371 742 371 633 c 504 | 371 0 l 505 | 186 0 l 506 | 186 1556 l 507 | 371 1556 l 508 | 371 946 l 509 | 415 1013 467 1064 526 1097 c 510 | 586 1130 655 1147 733 1147 c 511 | 862 1147 959 1107 1025 1027 c 512 | 1091 948 1124 831 1124 676 c 513 | 514 | ce} _d 515 | /i{569 0 193 0 377 1556 sc 516 | 193 1120 m 517 | 377 1120 l 518 | 377 0 l 519 | 193 0 l 520 | 193 1120 l 521 | 522 | 193 1556 m 523 | 377 1556 l 524 | 377 1323 l 525 | 193 1323 l 526 | 193 1556 l 527 | 528 | ce} _d 529 | /m{1995 0 186 0 1821 1147 sc 530 | 1065 905 m 531 | 1111 988 1166 1049 1230 1088 c 532 | 1294 1127 1369 1147 1456 1147 c 533 | 1573 1147 1663 1106 1726 1024 c 534 | 1789 943 1821 827 1821 676 c 535 | 1821 0 l 536 | 1636 0 l 537 | 1636 670 l 538 | 1636 777 1617 857 1579 909 c 539 | 1541 961 1483 987 1405 987 c 540 | 1310 987 1234 955 1179 892 c 541 | 1124 829 1096 742 1096 633 c 542 | 1096 0 l 543 | 911 0 l 544 | 911 670 l 545 | 911 778 892 858 854 909 c 546 | 816 961 757 987 678 987 c 547 | 584 987 509 955 454 891 c 548 | 399 828 371 742 371 633 c 549 | 371 0 l 550 | 186 0 l 551 | 186 1120 l 552 | 371 1120 l 553 | 371 946 l 554 | 413 1015 463 1065 522 1098 c 555 | 581 1131 650 1147 731 1147 c 556 | 812 1147 881 1126 938 1085 c 557 | 995 1044 1038 984 1065 905 c 558 | 559 | ce} _d 560 | /n{1298 0 186 0 1124 1147 sc 561 | 1124 676 m 562 | 1124 0 l 563 | 940 0 l 564 | 940 670 l 565 | 940 776 919 855 878 908 c 566 | 837 961 775 987 692 987 c 567 | 593 987 514 955 457 892 c 568 | 400 829 371 742 371 633 c 569 | 371 0 l 570 | 186 0 l 571 | 186 1120 l 572 | 371 1120 l 573 | 371 946 l 574 | 415 1013 467 1064 526 1097 c 575 | 586 1130 655 1147 733 1147 c 576 | 862 1147 959 1107 1025 1027 c 577 | 1091 948 1124 831 1124 676 c 578 | 579 | ce} _d 580 | /o{1253 0 113 -29 1141 1147 sc 581 | 627 991 m 582 | 528 991 450 952 393 875 c 583 | 336 798 307 693 307 559 c 584 | 307 425 335 319 392 242 c 585 | 449 165 528 127 627 127 c 586 | 725 127 803 166 860 243 c 587 | 917 320 946 426 946 559 c 588 | 946 692 917 797 860 874 c 589 | 803 952 725 991 627 991 c 590 | 591 | 627 1147 m 592 | 787 1147 913 1095 1004 991 c 593 | 1095 887 1141 743 1141 559 c 594 | 1141 376 1095 232 1004 127 c 595 | 913 23 787 -29 627 -29 c 596 | 466 -29 340 23 249 127 c 597 | 158 232 113 376 113 559 c 598 | 113 743 158 887 249 991 c 599 | 340 1095 466 1147 627 1147 c 600 | 601 | ce} _d 602 | /s{1067 0 111 -29 967 1147 sc 603 | 907 1087 m 604 | 907 913 l 605 | 855 940 801 960 745 973 c 606 | 689 986 631 993 571 993 c 607 | 480 993 411 979 365 951 c 608 | 320 923 297 881 297 825 c 609 | 297 782 313 749 346 724 c 610 | 379 700 444 677 543 655 c 611 | 606 641 l 612 | 737 613 829 573 884 522 c 613 | 939 471 967 400 967 309 c 614 | 967 205 926 123 843 62 c 615 | 761 1 648 -29 504 -29 c 616 | 444 -29 381 -23 316 -11 c 617 | 251 0 183 18 111 41 c 618 | 111 231 l 619 | 179 196 246 169 312 151 c 620 | 378 134 443 125 508 125 c 621 | 595 125 661 140 708 169 c 622 | 755 199 778 241 778 295 c 623 | 778 345 761 383 727 410 c 624 | 694 437 620 462 506 487 c 625 | 442 502 l 626 | 328 526 246 563 195 612 c 627 | 144 662 119 730 119 817 c 628 | 119 922 156 1004 231 1061 c 629 | 306 1118 412 1147 549 1147 c 630 | 617 1147 681 1142 741 1132 c 631 | 801 1122 856 1107 907 1087 c 632 | 633 | ce} _d 634 | /t{803 0 55 0 754 1438 sc 635 | 375 1438 m 636 | 375 1120 l 637 | 754 1120 l 638 | 754 977 l 639 | 375 977 l 640 | 375 369 l 641 | 375 278 387 219 412 193 c 642 | 437 167 488 154 565 154 c 643 | 754 154 l 644 | 754 0 l 645 | 565 0 l 646 | 423 0 325 26 271 79 c 647 | 217 132 190 229 190 369 c 648 | 190 977 l 649 | 55 977 l 650 | 55 1120 l 651 | 190 1120 l 652 | 190 1438 l 653 | 375 1438 l 654 | 655 | ce} _d 656 | end readonly def 657 | 658 | /BuildGlyph { 659 | exch begin 660 | CharStrings exch 661 | 2 copy known not {pop /.notdef} if 662 | true 3 1 roll get exec 663 | end 664 | } _d 665 | 666 | /BuildChar { 667 | 1 index /Encoding get exch get 668 | 1 index /BuildGlyph get exec 669 | } _d 670 | 671 | FontName currentdict end definefont pop 672 | end 673 | %%EndProlog 674 | mpldict begin 675 | 75.6 223.2 translate 676 | 460.8 345.6 0 0 clipbox 677 | gsave 678 | 0 0 m 679 | 460.8 0 l 680 | 460.8 345.6 l 681 | 0 345.6 l 682 | cl 683 | 1.000 setgray 684 | fill 685 | grestore 686 | gsave 687 | 340.3424 140.6464 m 688 | 340.3424 155.083904 337.498527 169.381021 331.973533 182.719535 c 689 | 326.44854 196.058049 318.349873 208.17856 308.141017 218.387417 c 690 | 297.93216 228.596273 285.811649 236.69494 272.473135 242.219933 c 691 | 259.134621 247.744927 244.837504 250.5888 230.4 250.5888 c 692 | 215.962496 250.5888 201.665379 247.744927 188.326865 242.219933 c 693 | 174.988351 236.69494 162.86784 228.596273 152.658983 218.387417 c 694 | 142.450127 208.17856 134.35146 196.058049 128.826467 182.719535 c 695 | 123.301473 169.381021 120.4576 155.083904 120.4576 140.6464 c 696 | 120.4576 126.208896 123.301473 111.911779 128.826467 98.573265 c 697 | 134.35146 85.234751 142.450127 73.11424 152.658983 62.905383 c 698 | 162.86784 52.696527 174.988351 44.59786 188.326865 39.072867 c 699 | 201.665379 33.547873 215.962496 30.704 230.4 30.704 c 700 | 244.837504 30.704 259.134621 33.547873 272.473135 39.072867 c 701 | 285.811649 44.59786 297.93216 52.696527 308.141017 62.905383 c 702 | 318.349873 73.11424 326.44854 85.234751 331.973533 98.573265 c 703 | 337.498527 111.911779 340.3424 126.208896 340.3424 140.6464 c 704 | 230.4 140.6464 m 705 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 706 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 707 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 708 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 709 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 710 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 711 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 712 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 713 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 714 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 715 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 716 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 717 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 718 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 719 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 720 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 721 | cl 722 | 1.000 setgray 723 | fill 724 | grestore 725 | 0.800 setlinewidth 726 | 1 setlinejoin 727 | 0 setlinecap 728 | [2.96 1.28] 0 setdash 729 | 0.690 setgray 730 | gsave 731 | /c0 { 732 | 340.3424 140.6464 m 733 | 340.3424 155.083904 337.498527 169.381021 331.973533 182.719535 c 734 | 326.44854 196.058049 318.349873 208.17856 308.141017 218.387417 c 735 | 297.93216 228.596273 285.811649 236.69494 272.473135 242.219933 c 736 | 259.134621 247.744927 244.837504 250.5888 230.4 250.5888 c 737 | 215.962496 250.5888 201.665379 247.744927 188.326865 242.219933 c 738 | 174.988351 236.69494 162.86784 228.596273 152.658983 218.387417 c 739 | 142.450127 208.17856 134.35146 196.058049 128.826467 182.719535 c 740 | 123.301473 169.381021 120.4576 155.083904 120.4576 140.6464 c 741 | 120.4576 126.208896 123.301473 111.911779 128.826467 98.573265 c 742 | 134.35146 85.234751 142.450127 73.11424 152.658983 62.905383 c 743 | 162.86784 52.696527 174.988351 44.59786 188.326865 39.072867 c 744 | 201.665379 33.547873 215.962496 30.704 230.4 30.704 c 745 | 244.837504 30.704 259.134621 33.547873 272.473135 39.072867 c 746 | 285.811649 44.59786 297.93216 52.696527 308.141017 62.905383 c 747 | 318.349873 73.11424 326.44854 85.234751 331.973533 98.573265 c 748 | 337.498527 111.911779 340.3424 126.208896 340.3424 140.6464 c 749 | 230.4 140.6464 m 750 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 751 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 752 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 753 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 754 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 755 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 756 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 757 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 758 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 759 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 760 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 761 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 762 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 763 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 764 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 765 | 230.4 140.6464 230.4 140.6464 230.4 140.6464 c 766 | cl 767 | 768 | clip 769 | newpath 770 | } bind def 771 | c0 772 | 230.4 140.6464 m 773 | 340.3424 140.6464 l 774 | stroke 775 | grestore 776 | 0.000 setgray 777 | gsave 778 | 348.342 137.725 translate 779 | 0 rotate 780 | /DejaVuSans-Oblique 10.0 selectfont 781 | 0 0.703125 moveto 782 | /A glyphshow 783 | /DejaVuSans 7.0 selectfont 784 | 6.84082 -0.9375 moveto 785 | /one glyphshow 786 | grestore 787 | 0.690 setgray 788 | gsave 789 | c0 790 | 230.4 140.6464 m 791 | 230.4 250.5888 l 792 | stroke 793 | grestore 794 | 0.000 setgray 795 | gsave 796 | 224.4 261.667 translate 797 | 0 rotate 798 | /DejaVuSans-Oblique 10.0 selectfont 799 | 0 0.703125 moveto 800 | /A glyphshow 801 | /DejaVuSans 7.0 selectfont 802 | 6.84082 -0.9375 moveto 803 | /two glyphshow 804 | grestore 805 | 0.690 setgray 806 | gsave 807 | c0 808 | 230.4 140.6464 m 809 | 120.4576 140.6464 l 810 | stroke 811 | grestore 812 | 0.000 setgray 813 | gsave 814 | 100.458 137.725 translate 815 | 0 rotate 816 | /DejaVuSans-Oblique 10.0 selectfont 817 | 0 0.703125 moveto 818 | /A glyphshow 819 | /DejaVuSans 7.0 selectfont 820 | 6.84082 -0.9375 moveto 821 | /three glyphshow 822 | grestore 823 | 0.690 setgray 824 | gsave 825 | c0 826 | 230.4 140.6464 m 827 | 230.4 30.704 l 828 | stroke 829 | grestore 830 | 0.000 setgray 831 | gsave 832 | 224.4 13.7821 translate 833 | 0 rotate 834 | /DejaVuSans-Oblique 10.0 selectfont 835 | 0 0.703125 moveto 836 | /A glyphshow 837 | /DejaVuSans 7.0 selectfont 838 | 6.84082 -0.9375 moveto 839 | /four glyphshow 840 | grestore 841 | 0.690 setgray 842 | gsave 843 | c0 844 | 230.4 140.6464 m 845 | 340.3424 140.6464 l 846 | stroke 847 | grestore 848 | 0.000 setgray 849 | gsave 850 | 348.342 137.725 translate 851 | 0 rotate 852 | /DejaVuSans-Oblique 10.0 selectfont 853 | 0 0.703125 moveto 854 | /A glyphshow 855 | /DejaVuSans 7.0 selectfont 856 | 6.84082 -0.9375 moveto 857 | /one glyphshow 858 | grestore 859 | 0.690 setgray 860 | gsave 861 | c0 862 | 256.892145 140.6464 m 863 | 256.892145 144.125317 256.206874 147.570405 254.87555 150.784505 c 864 | 253.544226 153.998605 251.592741 156.91921 249.132775 159.379175 c 865 | 246.67281 161.839141 243.752205 163.790626 240.538105 165.12195 c 866 | 237.324005 166.453274 233.878917 167.138545 230.4 167.138545 c 867 | 226.921083 167.138545 223.475995 166.453274 220.261895 165.12195 c 868 | 217.047795 163.790626 214.12719 161.839141 211.667225 159.379175 c 869 | 209.207259 156.91921 207.255774 153.998605 205.92445 150.784505 c 870 | 204.593126 147.570405 203.907855 144.125317 203.907855 140.6464 c 871 | 203.907855 137.167483 204.593126 133.722395 205.92445 130.508295 c 872 | 207.255774 127.294195 209.207259 124.37359 211.667225 121.913625 c 873 | 214.12719 119.453659 217.047795 117.502174 220.261895 116.17085 c 874 | 223.475995 114.839526 226.921083 114.154255 230.4 114.154255 c 875 | 233.878917 114.154255 237.324005 114.839526 240.538105 116.17085 c 876 | 243.752205 117.502174 246.67281 119.453659 249.132775 121.913625 c 877 | 251.592741 124.37359 253.544226 127.294195 254.87555 130.508295 c 878 | 256.206874 133.722395 256.892145 137.167483 256.892145 140.6464 c 879 | stroke 880 | grestore 881 | 0.000 setgray 882 | /DejaVuSans 10.000 selectfont 883 | gsave 884 | 885 | 254.876 152.863 translate 886 | 0 rotate 887 | 0 0 m /one glyphshow 888 | grestore 889 | 0.690 setgray 890 | gsave 891 | c0 892 | 283.384289 140.6464 m 893 | 283.384289 147.604233 282.013748 154.49441 279.3511 160.92261 c 894 | 276.688453 167.350809 272.785481 173.192019 267.86555 178.11195 c 895 | 262.945619 183.031881 257.104409 186.934853 250.67621 189.5975 c 896 | 244.24801 192.260148 237.357833 193.630689 230.4 193.630689 c 897 | 223.442167 193.630689 216.55199 192.260148 210.12379 189.5975 c 898 | 203.695591 186.934853 197.854381 183.031881 192.93445 178.11195 c 899 | 188.014519 173.192019 184.111547 167.350809 181.4489 160.92261 c 900 | 178.786252 154.49441 177.415711 147.604233 177.415711 140.6464 c 901 | 177.415711 133.688567 178.786252 126.79839 181.4489 120.37019 c 902 | 184.111547 113.941991 188.014519 108.100781 192.93445 103.18085 c 903 | 197.854381 98.260919 203.695591 94.357947 210.12379 91.6953 c 904 | 216.55199 89.032652 223.442167 87.662111 230.4 87.662111 c 905 | 237.357833 87.662111 244.24801 89.032652 250.67621 91.6953 c 906 | 257.104409 94.357947 262.945619 98.260919 267.86555 103.18085 c 907 | 272.785481 108.100781 276.688453 113.941991 279.3511 120.37019 c 908 | 282.013748 126.79839 283.384289 133.688567 283.384289 140.6464 c 909 | stroke 910 | grestore 911 | 0.000 setgray 912 | /DejaVuSans 10.000 selectfont 913 | gsave 914 | 915 | 279.351 163.001 translate 916 | 0 rotate 917 | 0 0 m /two glyphshow 918 | grestore 919 | 0.690 setgray 920 | gsave 921 | c0 922 | 309.876434 140.6464 m 923 | 309.876434 151.08315 307.820622 161.418415 303.82665 171.060714 c 924 | 299.832679 180.703014 293.978222 189.464829 286.598325 196.844725 c 925 | 279.218429 204.224622 270.456614 210.079079 260.814314 214.07305 c 926 | 251.172015 218.067022 240.83675 220.122834 230.4 220.122834 c 927 | 219.96325 220.122834 209.627985 218.067022 199.985686 214.07305 c 928 | 190.343386 210.079079 181.581571 204.224622 174.201675 196.844725 c 929 | 166.821778 189.464829 160.967321 180.703014 156.97335 171.060714 c 930 | 152.979378 161.418415 150.923566 151.08315 150.923566 140.6464 c 931 | 150.923566 130.20965 152.979378 119.874385 156.97335 110.232086 c 932 | 160.967321 100.589786 166.821778 91.827971 174.201675 84.448075 c 933 | 181.581571 77.068178 190.343386 71.213721 199.985686 67.21975 c 934 | 209.627985 63.225778 219.96325 61.169966 230.4 61.169966 c 935 | 240.83675 61.169966 251.172015 63.225778 260.814314 67.21975 c 936 | 270.456614 71.213721 279.218429 77.068178 286.598325 84.448075 c 937 | 293.978222 91.827971 299.832679 100.589786 303.82665 110.232086 c 938 | 307.820622 119.874385 309.876434 130.20965 309.876434 140.6464 c 939 | stroke 940 | grestore 941 | 0.000 setgray 942 | /DejaVuSans 10.000 selectfont 943 | gsave 944 | 945 | 303.827 173.139 translate 946 | 0 rotate 947 | 0 0 m /three glyphshow 948 | grestore 949 | 0.690 setgray 950 | gsave 951 | c0 952 | 336.368578 140.6464 m 953 | 336.368578 154.562066 333.627495 168.34242 328.302201 181.198819 c 954 | 322.976906 194.055218 315.170962 205.737638 305.3311 215.5775 c 955 | 295.491238 225.417362 283.808818 233.223306 270.952419 238.548601 c 956 | 258.09602 243.873895 244.315666 246.614978 230.4 246.614978 c 957 | 216.484334 246.614978 202.70398 243.873895 189.847581 238.548601 c 958 | 176.991182 233.223306 165.308762 225.417362 155.4689 215.5775 c 959 | 145.629038 205.737638 137.823094 194.055218 132.497799 181.198819 c 960 | 127.172505 168.34242 124.431422 154.562066 124.431422 140.6464 c 961 | 124.431422 126.730734 127.172505 112.95038 132.497799 100.093981 c 962 | 137.823094 87.237582 145.629038 75.555162 155.4689 65.7153 c 963 | 165.308762 55.875438 176.991182 48.069494 189.847581 42.744199 c 964 | 202.70398 37.418905 216.484334 34.677822 230.4 34.677822 c 965 | 244.315666 34.677822 258.09602 37.418905 270.952419 42.744199 c 966 | 283.808818 48.069494 295.491238 55.875438 305.3311 65.7153 c 967 | 315.170962 75.555162 322.976906 87.237582 328.302201 100.093981 c 968 | 333.627495 112.95038 336.368578 126.730734 336.368578 140.6464 c 969 | stroke 970 | grestore 971 | 0.000 setgray 972 | /DejaVuSans 10.000 selectfont 973 | gsave 974 | 975 | 328.302 183.277 translate 976 | 0 rotate 977 | 0 0 m /four glyphshow 978 | grestore 979 | 2.000 setlinewidth 980 | 2 setlinecap 981 | [] 0 setdash 982 | 0.122 0.467 0.706 setrgbcolor 983 | gsave 984 | c0 985 | 336.368578 140.6464 m 986 | 230.4 167.138545 l 987 | 177.415711 140.6464 l 988 | 230.4 61.169966 l 989 | 336.368578 140.6464 l 990 | stroke 991 | grestore 992 | 1.000 setlinewidth 993 | 0 setlinecap 994 | gsave 995 | c0 996 | /o { 997 | gsave 998 | newpath 999 | translate 1000 | 1.0 setlinewidth 1001 | 1 setlinejoin 1002 | 1003 | 0 setlinecap 1004 | 1005 | 0 -3 m 1006 | 0.795609 -3 1.55874 -2.683901 2.12132 -2.12132 c 1007 | 2.683901 -1.55874 3 -0.795609 3 0 c 1008 | 3 0.795609 2.683901 1.55874 2.12132 2.12132 c 1009 | 1.55874 2.683901 0.795609 3 0 3 c 1010 | -0.795609 3 -1.55874 2.683901 -2.12132 2.12132 c 1011 | -2.683901 1.55874 -3 0.795609 -3 0 c 1012 | -3 -0.795609 -2.683901 -1.55874 -2.12132 -2.12132 c 1013 | -1.55874 -2.683901 -0.795609 -3 0 -3 c 1014 | cl 1015 | 1016 | gsave 1017 | 0.122 0.467 0.706 setrgbcolor 1018 | fill 1019 | grestore 1020 | stroke 1021 | grestore 1022 | } bind def 1023 | 336.369 140.646 o 1024 | 230.4 167.139 o 1025 | 177.416 140.646 o 1026 | 230.4 61.17 o 1027 | 336.369 140.646 o 1028 | grestore 1029 | 2.000 setlinewidth 1030 | 2 setlinecap 1031 | 1.000 0.498 0.055 setrgbcolor 1032 | gsave 1033 | c0 1034 | 309.876434 140.6464 m 1035 | 230.4 167.138545 l 1036 | 124.431422 140.6464 l 1037 | 230.4 87.662111 l 1038 | 309.876434 140.6464 l 1039 | stroke 1040 | grestore 1041 | 1.000 setlinewidth 1042 | 0 setlinecap 1043 | gsave 1044 | c0 1045 | /o { 1046 | gsave 1047 | newpath 1048 | translate 1049 | 1.0 setlinewidth 1050 | 1 setlinejoin 1051 | 1052 | 0 setlinecap 1053 | 1054 | 0 -3 m 1055 | 0.795609 -3 1.55874 -2.683901 2.12132 -2.12132 c 1056 | 2.683901 -1.55874 3 -0.795609 3 0 c 1057 | 3 0.795609 2.683901 1.55874 2.12132 2.12132 c 1058 | 1.55874 2.683901 0.795609 3 0 3 c 1059 | -0.795609 3 -1.55874 2.683901 -2.12132 2.12132 c 1060 | -2.683901 1.55874 -3 0.795609 -3 0 c 1061 | -3 -0.795609 -2.683901 -1.55874 -2.12132 -2.12132 c 1062 | -1.55874 -2.683901 -0.795609 -3 0 -3 c 1063 | cl 1064 | 1065 | gsave 1066 | 1.000 0.498 0.055 setrgbcolor 1067 | fill 1068 | grestore 1069 | stroke 1070 | grestore 1071 | } bind def 1072 | 309.876 140.646 o 1073 | 230.4 167.139 o 1074 | 124.431 140.646 o 1075 | 230.4 87.6621 o 1076 | 309.876 140.646 o 1077 | grestore 1078 | 2.000 setlinewidth 1079 | 2 setlinecap 1080 | 0.173 0.627 0.173 setrgbcolor 1081 | gsave 1082 | c0 1083 | 309.876434 140.6464 m 1084 | 230.4 167.138545 l 1085 | 177.415711 140.6464 l 1086 | 230.4 34.677822 l 1087 | 309.876434 140.6464 l 1088 | stroke 1089 | grestore 1090 | 1.000 setlinewidth 1091 | 0 setlinecap 1092 | gsave 1093 | c0 1094 | /o { 1095 | gsave 1096 | newpath 1097 | translate 1098 | 1.0 setlinewidth 1099 | 1 setlinejoin 1100 | 1101 | 0 setlinecap 1102 | 1103 | 0 -3 m 1104 | 0.795609 -3 1.55874 -2.683901 2.12132 -2.12132 c 1105 | 2.683901 -1.55874 3 -0.795609 3 0 c 1106 | 3 0.795609 2.683901 1.55874 2.12132 2.12132 c 1107 | 1.55874 2.683901 0.795609 3 0 3 c 1108 | -0.795609 3 -1.55874 2.683901 -2.12132 2.12132 c 1109 | -2.683901 1.55874 -3 0.795609 -3 0 c 1110 | -3 -0.795609 -2.683901 -1.55874 -2.12132 -2.12132 c 1111 | -1.55874 -2.683901 -0.795609 -3 0 -3 c 1112 | cl 1113 | 1114 | gsave 1115 | 0.173 0.627 0.173 setrgbcolor 1116 | fill 1117 | grestore 1118 | stroke 1119 | grestore 1120 | } bind def 1121 | 309.876 140.646 o 1122 | 230.4 167.139 o 1123 | 177.416 140.646 o 1124 | 230.4 34.6778 o 1125 | 309.876 140.646 o 1126 | grestore 1127 | 2.000 setlinewidth 1128 | 2 setlinecap 1129 | 0.839 0.153 0.157 setrgbcolor 1130 | gsave 1131 | c0 1132 | 336.368578 140.6464 m 1133 | 230.4 167.138545 l 1134 | 177.415711 140.6464 l 1135 | 230.4 61.169966 l 1136 | 336.368578 140.6464 l 1137 | stroke 1138 | grestore 1139 | 1.000 setlinewidth 1140 | 0 setlinecap 1141 | gsave 1142 | c0 1143 | /o { 1144 | gsave 1145 | newpath 1146 | translate 1147 | 1.0 setlinewidth 1148 | 1 setlinejoin 1149 | 1150 | 0 setlinecap 1151 | 1152 | 0 -3 m 1153 | 0.795609 -3 1.55874 -2.683901 2.12132 -2.12132 c 1154 | 2.683901 -1.55874 3 -0.795609 3 0 c 1155 | 3 0.795609 2.683901 1.55874 2.12132 2.12132 c 1156 | 1.55874 2.683901 0.795609 3 0 3 c 1157 | -0.795609 3 -1.55874 2.683901 -2.12132 2.12132 c 1158 | -2.683901 1.55874 -3 0.795609 -3 0 c 1159 | -3 -0.795609 -2.683901 -1.55874 -2.12132 -2.12132 c 1160 | -1.55874 -2.683901 -0.795609 -3 0 -3 c 1161 | cl 1162 | 1163 | gsave 1164 | 0.839 0.153 0.157 setrgbcolor 1165 | fill 1166 | grestore 1167 | stroke 1168 | grestore 1169 | } bind def 1170 | 336.369 140.646 o 1171 | 230.4 167.139 o 1172 | 177.416 140.646 o 1173 | 230.4 61.17 o 1174 | 336.369 140.646 o 1175 | grestore 1176 | 0.800 setlinewidth 1177 | 0 setlinejoin 1178 | 2 setlinecap 1179 | 0.000 setgray 1180 | gsave 1181 | 340.3424 140.6464 m 1182 | 340.3424 155.083904 337.498527 169.381021 331.973533 182.719535 c 1183 | 326.44854 196.058049 318.349873 208.17856 308.141017 218.387417 c 1184 | 297.93216 228.596273 285.811649 236.69494 272.473135 242.219933 c 1185 | 259.134621 247.744927 244.837504 250.5888 230.4 250.5888 c 1186 | 215.962496 250.5888 201.665379 247.744927 188.326865 242.219933 c 1187 | 174.988351 236.69494 162.86784 228.596273 152.658983 218.387417 c 1188 | 142.450127 208.17856 134.35146 196.058049 128.826467 182.719535 c 1189 | 123.301473 169.381021 120.4576 155.083904 120.4576 140.6464 c 1190 | 120.4576 126.208896 123.301473 111.911779 128.826467 98.573265 c 1191 | 134.35146 85.234751 142.450127 73.11424 152.658983 62.905383 c 1192 | 162.86784 52.696527 174.988351 44.59786 188.326865 39.072867 c 1193 | 201.665379 33.547873 215.962496 30.704 230.4 30.704 c 1194 | 244.837504 30.704 259.134621 33.547873 272.473135 39.072867 c 1195 | 285.811649 44.59786 297.93216 52.696527 308.141017 62.905383 c 1196 | 318.349873 73.11424 326.44854 85.234751 331.973533 98.573265 c 1197 | 337.498527 111.911779 340.3424 126.208896 340.3424 140.6464 c 1198 | stroke 1199 | grestore 1200 | 1.000 setlinewidth 1201 | 0 setlinecap 1202 | gsave 1203 | 100.86912 272.57728 m 1204 | 359.93088 272.57728 l 1205 | 361.53088 272.57728 362.33088 273.37728 362.33088 274.97728 c 1206 | 362.33088 324.699155 l 1207 | 362.33088 326.299155 361.53088 327.099155 359.93088 327.099155 c 1208 | 100.86912 327.099155 l 1209 | 99.26912 327.099155 98.46912 326.299155 98.46912 324.699155 c 1210 | 98.46912 274.97728 l 1211 | 98.46912 273.37728 99.26912 272.57728 100.86912 272.57728 c 1212 | cl 1213 | gsave 1214 | 1.000 setgray 1215 | fill 1216 | grestore 1217 | stroke 1218 | grestore 1219 | /DejaVuSans 10.000 selectfont 1220 | gsave 1221 | 1222 | 181.736 314.705 translate 1223 | 0 rotate 1224 | 0 0 m /W glyphshow 1225 | 9.2627 0 m /e glyphshow 1226 | 15.415 0 m /i glyphshow 1227 | 18.1934 0 m /g glyphshow 1228 | 24.541 0 m /h glyphshow 1229 | 30.8789 0 m /t glyphshow 1230 | 34.7998 0 m /i glyphshow 1231 | 37.5781 0 m /n glyphshow 1232 | 43.916 0 m /g glyphshow 1233 | 50.2637 0 m /space glyphshow 1234 | 53.4424 0 m /m glyphshow 1235 | 63.1836 0 m /e glyphshow 1236 | 69.3359 0 m /t glyphshow 1237 | 73.2568 0 m /h glyphshow 1238 | 79.5947 0 m /o glyphshow 1239 | 85.7129 0 m /d glyphshow 1240 | 92.0605 0 m /s glyphshow 1241 | grestore 1242 | 2.000 setlinewidth 1243 | 1 setlinejoin 1244 | 2 setlinecap 1245 | 0.122 0.467 0.706 setrgbcolor 1246 | gsave 1247 | 103.26912 301.70228 m 1248 | 115.26912 301.70228 l 1249 | 127.26912 301.70228 l 1250 | stroke 1251 | grestore 1252 | 1.000 setlinewidth 1253 | 0 setlinecap 1254 | gsave 1255 | /o { 1256 | gsave 1257 | newpath 1258 | translate 1259 | 1.0 setlinewidth 1260 | 1 setlinejoin 1261 | 1262 | 0 setlinecap 1263 | 1264 | 0 -3 m 1265 | 0.795609 -3 1.55874 -2.683901 2.12132 -2.12132 c 1266 | 2.683901 -1.55874 3 -0.795609 3 0 c 1267 | 3 0.795609 2.683901 1.55874 2.12132 2.12132 c 1268 | 1.55874 2.683901 0.795609 3 0 3 c 1269 | -0.795609 3 -1.55874 2.683901 -2.12132 2.12132 c 1270 | -2.683901 1.55874 -3 0.795609 -3 0 c 1271 | -3 -0.795609 -2.683901 -1.55874 -2.12132 -2.12132 c 1272 | -1.55874 -2.683901 -0.795609 -3 0 -3 c 1273 | cl 1274 | 1275 | gsave 1276 | 0.122 0.467 0.706 setrgbcolor 1277 | fill 1278 | grestore 1279 | stroke 1280 | grestore 1281 | } bind def 1282 | 115.269 301.702 o 1283 | grestore 1284 | 0.000 setgray 1285 | /DejaVuSans 12.000 selectfont 1286 | gsave 1287 | 1288 | 136.869 297.502 translate 1289 | 0 rotate 1290 | 0 0 m /A glyphshow 1291 | 8.20898 0 m /H glyphshow 1292 | 17.2324 0 m /P glyphshow 1293 | grestore 1294 | 2.000 setlinewidth 1295 | 2 setlinecap 1296 | 1.000 0.498 0.055 setrgbcolor 1297 | gsave 1298 | 103.26912 284.07728 m 1299 | 115.26912 284.07728 l 1300 | 127.26912 284.07728 l 1301 | stroke 1302 | grestore 1303 | 1.000 setlinewidth 1304 | 0 setlinecap 1305 | gsave 1306 | /o { 1307 | gsave 1308 | newpath 1309 | translate 1310 | 1.0 setlinewidth 1311 | 1 setlinejoin 1312 | 1313 | 0 setlinecap 1314 | 1315 | 0 -3 m 1316 | 0.795609 -3 1.55874 -2.683901 2.12132 -2.12132 c 1317 | 2.683901 -1.55874 3 -0.795609 3 0 c 1318 | 3 0.795609 2.683901 1.55874 2.12132 2.12132 c 1319 | 1.55874 2.683901 0.795609 3 0 3 c 1320 | -0.795609 3 -1.55874 2.683901 -2.12132 2.12132 c 1321 | -2.683901 1.55874 -3 0.795609 -3 0 c 1322 | -3 -0.795609 -2.683901 -1.55874 -2.12132 -2.12132 c 1323 | -1.55874 -2.683901 -0.795609 -3 0 -3 c 1324 | cl 1325 | 1326 | gsave 1327 | 1.000 0.498 0.055 setrgbcolor 1328 | fill 1329 | grestore 1330 | stroke 1331 | grestore 1332 | } bind def 1333 | 115.269 284.077 o 1334 | grestore 1335 | 0.000 setgray 1336 | /DejaVuSans 12.000 selectfont 1337 | gsave 1338 | 1339 | 136.869 279.877 translate 1340 | 0 rotate 1341 | 0 0 m /S glyphshow 1342 | 7.61719 0 m /W glyphshow 1343 | 18.8574 0 m /A glyphshow 1344 | 27.0664 0 m /R glyphshow 1345 | 34.9043 0 m /A glyphshow 1346 | grestore 1347 | 2.000 setlinewidth 1348 | 2 setlinecap 1349 | 0.173 0.627 0.173 setrgbcolor 1350 | gsave 1351 | 275.634005 301.70228 m 1352 | 287.634005 301.70228 l 1353 | 299.634005 301.70228 l 1354 | stroke 1355 | grestore 1356 | 1.000 setlinewidth 1357 | 0 setlinecap 1358 | gsave 1359 | /o { 1360 | gsave 1361 | newpath 1362 | translate 1363 | 1.0 setlinewidth 1364 | 1 setlinejoin 1365 | 1366 | 0 setlinecap 1367 | 1368 | 0 -3 m 1369 | 0.795609 -3 1.55874 -2.683901 2.12132 -2.12132 c 1370 | 2.683901 -1.55874 3 -0.795609 3 0 c 1371 | 3 0.795609 2.683901 1.55874 2.12132 2.12132 c 1372 | 1.55874 2.683901 0.795609 3 0 3 c 1373 | -0.795609 3 -1.55874 2.683901 -2.12132 2.12132 c 1374 | -2.683901 1.55874 -3 0.795609 -3 0 c 1375 | -3 -0.795609 -2.683901 -1.55874 -2.12132 -2.12132 c 1376 | -1.55874 -2.683901 -0.795609 -3 0 -3 c 1377 | cl 1378 | 1379 | gsave 1380 | 0.173 0.627 0.173 setrgbcolor 1381 | fill 1382 | grestore 1383 | stroke 1384 | grestore 1385 | } bind def 1386 | 287.634 301.702 o 1387 | grestore 1388 | 0.000 setgray 1389 | /DejaVuSans 12.000 selectfont 1390 | gsave 1391 | 1392 | 309.234 297.502 translate 1393 | 0 rotate 1394 | 0 0 m /L glyphshow 1395 | 6.68555 0 m /B glyphshow 1396 | 14.543 0 m /W glyphshow 1397 | 25.7832 0 m /A glyphshow 1398 | grestore 1399 | 2.000 setlinewidth 1400 | 2 setlinecap 1401 | 0.839 0.153 0.157 setrgbcolor 1402 | gsave 1403 | 275.634005 284.07728 m 1404 | 287.634005 284.07728 l 1405 | 299.634005 284.07728 l 1406 | stroke 1407 | grestore 1408 | 1.000 setlinewidth 1409 | 0 setlinecap 1410 | gsave 1411 | /o { 1412 | gsave 1413 | newpath 1414 | translate 1415 | 1.0 setlinewidth 1416 | 1 setlinejoin 1417 | 1418 | 0 setlinecap 1419 | 1420 | 0 -3 m 1421 | 0.795609 -3 1.55874 -2.683901 2.12132 -2.12132 c 1422 | 2.683901 -1.55874 3 -0.795609 3 0 c 1423 | 3 0.795609 2.683901 1.55874 2.12132 2.12132 c 1424 | 1.55874 2.683901 0.795609 3 0 3 c 1425 | -0.795609 3 -1.55874 2.683901 -2.12132 2.12132 c 1426 | -2.683901 1.55874 -3 0.795609 -3 0 c 1427 | -3 -0.795609 -2.683901 -1.55874 -2.12132 -2.12132 c 1428 | -1.55874 -2.683901 -0.795609 -3 0 -3 c 1429 | cl 1430 | 1431 | gsave 1432 | 0.839 0.153 0.157 setrgbcolor 1433 | fill 1434 | grestore 1435 | stroke 1436 | grestore 1437 | } bind def 1438 | 287.634 284.077 o 1439 | grestore 1440 | 0.000 setgray 1441 | /DejaVuSans 12.000 selectfont 1442 | gsave 1443 | 1444 | 309.234 279.877 translate 1445 | 0 rotate 1446 | 0 0 m /S glyphshow 1447 | 7.86719 0 m /A glyphshow 1448 | 16.0762 0 m /P glyphshow 1449 | 23.3125 0 m /E glyphshow 1450 | 30.8945 0 m /V glyphshow 1451 | 38.8535 0 m /O glyphshow 1452 | grestore 1453 | 1454 | end 1455 | showpage 1456 | -------------------------------------------------------------------------------- /examples/results_update/radar.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results_update/radar.pdf -------------------------------------------------------------------------------- /examples/results_update/radar_weights.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/energyinpython/crispyn/ff2d3f0ee9bfe28a5fe1b228a3efdaf743805b33/examples/results_update/radar_weights.pdf -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=42"] 3 | build-backend = "setuptools.build_meta" -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib==3.7.1 2 | numpy==1.24.2 3 | pandas==1.5.3 4 | scipy==1.11.2 5 | seaborn==0.12.2 6 | setuptools==65.5.0 7 | -------------------------------------------------------------------------------- /requirements_python_ver_min.txt: -------------------------------------------------------------------------------- 1 | Detecting python files.. 2 | Analyzing 9 files using 8 processes.. 3 | ~2, ~3 D:\HUAWEI\CRISPYN 2023\crispyn\src\crispyn\__init__.py 4 | 2.4, 3.0 D:\HUAWEI\CRISPYN 2023\crispyn\src\crispyn\additions.py 5 | 2.3, 3.0 D:\HUAWEI\CRISPYN 2023\crispyn\src\crispyn\correlations.py 6 | ~2, ~3 D:\HUAWEI\CRISPYN 2023\crispyn\src\crispyn\mcda_methods\__init__.py 7 | !2, 3.4 D:\HUAWEI\CRISPYN 2023\crispyn\src\crispyn\mcda_methods\mcda_method.py 8 | 2.4, 3.0 D:\HUAWEI\CRISPYN 2023\crispyn\src\crispyn\mcda_methods\vikor.py 9 | 2.4, 3.0 D:\HUAWEI\CRISPYN 2023\crispyn\src\crispyn\mcda_methods\vikor_smaa.py 10 | ~2, ~3 D:\HUAWEI\CRISPYN 2023\crispyn\src\crispyn\normalizations.py 11 | 2.6, 3.0 D:\HUAWEI\CRISPYN 2023\crispyn\src\crispyn\weighting_methods.py 12 | 13 | Tips: 14 | - Since '# novm' or '# novermin' weren't used, a speedup can be achieved using: --no-parse-comments 15 | (disable using: --no-tips) 16 | 17 | Minimum required versions: 3.4 18 | Incompatible versions: 2 19 | -------------------------------------------------------------------------------- /requirements_src.txt: -------------------------------------------------------------------------------- 1 | numpy==1.24.2 2 | scipy==1.11.2 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r", encoding="utf-8") as fh: 4 | long_description = fh.read() 5 | 6 | setuptools.setup( 7 | name="crispyn", 8 | version="0.0.6", 9 | author="Aleksandra Bączkiewicz", 10 | author_email="aleksandra.baczkiewicz@phd.usz.edu.pl", 11 | description="CRIteria Significance determining in PYthoN - The Python 3 Library for determining criteria weights for MCDA methods.", 12 | long_description=long_description, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/energyinpython/crispyn", 15 | package_dir={"": "src"}, 16 | packages=setuptools.find_packages(where="src"), 17 | classifiers=[ 18 | "Programming Language :: Python :: 3", 19 | "License :: OSI Approved :: MIT License", 20 | "Operating System :: OS Independent", 21 | ], 22 | python_requires=">=3.4", 23 | install_requires=['numpy', 'scipy'], 24 | ) -------------------------------------------------------------------------------- /src/crispyn/__init__.py: -------------------------------------------------------------------------------- 1 | from . import mcda_methods 2 | from . import additions 3 | from . import correlations 4 | from . import normalizations 5 | from . import weighting_methods -------------------------------------------------------------------------------- /src/crispyn/additions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # reverse = True: descending order ( for example TOPSIS, CODAS), False: ascending order (for example VIKOR, SPOTIS) 4 | def rank_preferences(pref, reverse = True): 5 | """ 6 | Rank alternatives according to MCDA preference function values. If more than one alternative 7 | have the same preference function value, they will be given the same rank value (tie). 8 | 9 | Parameters 10 | ------------ 11 | pref : ndarray 12 | Vector with MCDA preference function values for alternatives 13 | reverse : bool 14 | The boolean variable is True for MCDA methods that rank alternatives in descending 15 | order (for example, TOPSIS, CODAS) and False for MCDA methods that rank alternatives in ascending 16 | order (for example, VIKOR, SPOTIS) 17 | 18 | Returns 19 | --------- 20 | ndarray 21 | Vector with alternatives ranking. Alternative with 1 value is the best and has the first position in the ranking. 22 | 23 | Examples 24 | ---------- 25 | >>> rank = rank_preferences(pref, reverse = True) 26 | """ 27 | 28 | # Create an array ndarray for the ranking values of alternatives 29 | rank = np.zeros(len(pref)) 30 | # Generate sorted vector with MCDA preference function values 31 | # sorting order is determined by variable `reverse` and depends on MCDA method 32 | sorted_pref = sorted(pref, reverse = reverse) 33 | # position of the best alternative in ranking is denoted by 1, so assign 1 to `pos` 34 | pos = 1 35 | for i in range(len(sorted_pref) - 1): 36 | # find index in vector with preference values `pref` equal to preference value in sorted vector `sorted_pref` 37 | ind = np.where(sorted_pref[i] == pref)[0] 38 | # assign rank denoted by `pos` to index `ind` in rank vector 39 | rank[ind] = pos 40 | # if the next preference value in sorted vector is higher than actual, increase `pos` which denotes rank 41 | # if the next preference value is equal to actual, `pos` will be unchanged 42 | if sorted_pref[i] != sorted_pref[i + 1]: 43 | pos += 1 44 | # find index with the last preference value 45 | ind = np.where(sorted_pref[i + 1] == pref)[0] 46 | # assign the last place `pos` to alternative with the last preference value 47 | rank[ind] = pos 48 | return rank.astype(int) -------------------------------------------------------------------------------- /src/crispyn/correlations.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | # Spearman ranking correlation coefficient rs 5 | def spearman(R, Q): 6 | """ 7 | Calculate Spearman rank correlation coefficient between two vectors 8 | 9 | Parameters 10 | ----------- 11 | R : ndarray 12 | First vector containing values 13 | Q : ndarray 14 | Second vector containing values 15 | 16 | Returns 17 | -------- 18 | float 19 | Value of correlation coefficient between two vectors 20 | 21 | Examples 22 | ---------- 23 | >>> rS = spearman(R, Q) 24 | """ 25 | N = len(R) 26 | denominator = N*(N**2-1) 27 | numerator = 6*sum((R-Q)**2) 28 | rS = 1-(numerator/denominator) 29 | return rS 30 | 31 | 32 | # weighted Spearman ranking correlation coefficient rw 33 | def weighted_spearman(R, Q): 34 | """ 35 | Calculate Weighted Spearman rank correlation coefficient between two vectors 36 | 37 | Parameters 38 | ----------- 39 | R : ndarray 40 | First vector containing values 41 | Q : ndarray 42 | Second vector containing values 43 | 44 | Returns 45 | -------- 46 | float 47 | Value of correlation coefficient between two vectors 48 | 49 | Examples 50 | --------- 51 | >>> rW = weighted_spearman(R, Q) 52 | """ 53 | N = len(R) 54 | denominator = N**4 + N**3 - N**2 - N 55 | numerator = 6 * sum((R - Q)**2 * ((N - R + 1) + (N - Q + 1))) 56 | rW = 1 - (numerator / denominator) 57 | return rW 58 | 59 | 60 | # Pearson coefficient 61 | def pearson_coeff(R, Q): 62 | """ 63 | Calculate Pearson correlation coefficient between two vectors 64 | 65 | Parameters 66 | ----------- 67 | R : ndarray 68 | First vector containing values 69 | Q : ndarray 70 | Second vector containing values 71 | 72 | Returns 73 | -------- 74 | float 75 | Value of correlation coefficient between two vectors 76 | 77 | Examples 78 | ---------- 79 | >>> corr = pearson_coeff(R, Q) 80 | """ 81 | numerator = np.sum((R - np.mean(R)) * (Q - np.mean(Q))) 82 | denominator = np.sqrt(np.sum((R - np.mean(R))**2) * np.sum((Q - np.mean(Q))**2)) 83 | corr = numerator / denominator 84 | return corr -------------------------------------------------------------------------------- /src/crispyn/mcda_methods/__init__.py: -------------------------------------------------------------------------------- 1 | from .vikor import VIKOR 2 | from .vikor_smaa import VIKOR_SMAA 3 | from . import mcda_method -------------------------------------------------------------------------------- /src/crispyn/mcda_methods/mcda_method.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | import numpy as np 3 | 4 | class MCDA_method(ABC): 5 | 6 | def __call__(self, matrix, weights, types): 7 | """ 8 | Score alternatives from decision matrix `matrix` using criteria weights `weights` and 9 | criteria types `types` 10 | 11 | Parameters 12 | ---------- 13 | matrix : ndarray 14 | decision matrix with performance values for m alternatives in rows and n criteria 15 | in columns 16 | weights : ndarray 17 | matrix with criteria weights vectors with number of columns equal to 18 | number of columns n of `matrix` 19 | types : ndarray 20 | vector with criteria types containing values of 1 for profit criteria and -1 for 21 | cost criteria with size equal to number of columns n of `matrix` 22 | """ 23 | pass 24 | 25 | @staticmethod 26 | def _verify_input_data(matrix, weights, types): 27 | m, n = matrix.shape 28 | # if weights are vector with one dimension 29 | if len(weights.shape) == 1: 30 | if len(weights) != n: 31 | raise ValueError('The size of the weight vector must be the same as the number of criteria') 32 | # if weights are two-dimensional matrix containing many weight vectors in rows 33 | elif len(weights.shape) == 2: 34 | if weights.shape[1] != n: 35 | raise ValueError('The number of columns of matrix with weight vectors must be the same as the number of criteria') 36 | if len(types) != n: 37 | raise ValueError('The size of the types vector must be the same as the number of criteria') 38 | check_types = np.all((types == 1) | (types == -1)) 39 | if check_types == False: 40 | raise ValueError('Criteria types can only have a value of 1 for profits and -1 for costs') -------------------------------------------------------------------------------- /src/crispyn/mcda_methods/vikor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .mcda_method import MCDA_method 4 | 5 | 6 | class VIKOR(MCDA_method): 7 | def __init__(self, normalization_method = None, v = 0.5): 8 | """Create the VIKOR method object. 9 | 10 | Parameters 11 | ----------- 12 | 13 | normalization_method : function 14 | VIKOR does not use normalization by default, thus `normalization_method` is set to None by default. 15 | However, you can choose method for normalization of decision matrix chosen `normalization_method` from `normalizations`. 16 | It is used in a way `normalization_method(X, types)` where `X` is a decision matrix 17 | and `types` is a vector with criteria types where 1 means profit and -1 means cost. 18 | v : float 19 | parameter that is the weight of strategy of the majority of criteria (the maximum group utility) 20 | """ 21 | self.v = v 22 | self.normalization_method = normalization_method 23 | 24 | def __call__(self, matrix, weights, types): 25 | """ 26 | Score alternatives provided in decision matrix `matrix` using criteria `weights` and criteria `types`. 27 | 28 | Parameters 29 | ----------- 30 | matrix : ndarray 31 | Decision matrix with m alternatives in rows and n criteria in columns. 32 | weights: ndarray 33 | Matrix containing vectors with criteria weights in subsequent rows. 34 | Sum of weights in each vector must be equal to 1. 35 | types: ndarray 36 | Vector with criteria types. Profit criteria are represented by 1 and cost by -1. 37 | 38 | Returns 39 | -------- 40 | ndrarray 41 | Matrix with vectors containing preference values of each alternative. 42 | The best alternative has the lowest preference value. Vectors are placed 43 | in subsequent columns of matrix. 44 | 45 | Examples 46 | --------- 47 | >>> vikor = VIKOR(normalization_method = minmax_normalization) 48 | >>> pref = vikor(matrix, weights, types) 49 | >>> rank = np.zeros((pref.shape)) 50 | >>> for i in range(pref.shape[1]): 51 | >>> rank[:, i] = rank_preferences(pref[:, i], reverse = False) 52 | """ 53 | VIKOR._verify_input_data(matrix, weights, types) 54 | return VIKOR._vikor(matrix, weights, types, self.normalization_method, self.v) 55 | 56 | @staticmethod 57 | def _vikor(matrix, weights, types, normalization_method, v): 58 | # if weights are vector with one dimension 59 | if len(weights.shape) == 1: 60 | weights = weights.reshape(1, -1) 61 | # array for collecting vectors with preference values in subsequent columns 62 | pref = np.zeros((matrix.shape[0], weights.shape[0])) 63 | for el, w in enumerate(weights): 64 | # Without applying a special normalization method 65 | if normalization_method == None: 66 | 67 | # Determine the best `fstar` and the worst `fmin` values of all criterion function 68 | maximums_matrix = np.amax(matrix, axis = 0) 69 | minimums_matrix = np.amin(matrix, axis = 0) 70 | 71 | fstar = np.zeros(matrix.shape[1]) 72 | fmin = np.zeros(matrix.shape[1]) 73 | 74 | # for profit criteria (`types` == 1) and for cost criteria (`types` == -1) 75 | fstar[types == 1] = maximums_matrix[types == 1] 76 | fstar[types == -1] = minimums_matrix[types == -1] 77 | fmin[types == 1] = minimums_matrix[types == 1] 78 | fmin[types == -1] = maximums_matrix[types == -1] 79 | 80 | weighted_matrix = w * ((fstar - matrix) / (fstar - fmin)) 81 | else: 82 | # With applying the special normalization method 83 | norm_matrix = normalization_method(matrix, types) 84 | fstar = np.amax(norm_matrix, axis = 0) 85 | fmin = np.amin(norm_matrix, axis = 0) 86 | weighted_matrix = w * ((fstar - norm_matrix) / (fstar - fmin)) 87 | 88 | # Calculate the `S` and `R` values 89 | S = np.sum(weighted_matrix, axis = 1) 90 | R = np.amax(weighted_matrix, axis = 1) 91 | # Calculate the Q values 92 | Sstar = np.min(S) 93 | Smin = np.max(S) 94 | Rstar = np.min(R) 95 | Rmin = np.max(R) 96 | Q = v * (S - Sstar) / (Smin - Sstar) + (1 - v) * (R - Rstar) / (Rmin - Rstar) 97 | # save vector with preference values in array `pref` 98 | pref[:, el] = Q 99 | return pref -------------------------------------------------------------------------------- /src/crispyn/mcda_methods/vikor_smaa.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from .vikor import VIKOR 4 | from ..additions import rank_preferences 5 | 6 | 7 | class VIKOR_SMAA(): 8 | def __init__(self, normalization_method = None, v = 0.5): 9 | """Create the VIKOR method object. 10 | 11 | Parameters 12 | ----------- 13 | 14 | normalization_method : function 15 | VIKOR does not use normalization by default, thus `normalization_method` is set to None by default. 16 | However, you can choose method for normalization of decision matrix chosen `normalization_method` from `normalizations`. 17 | It is used in a way `normalization_method(X, types)` where `X` is a decision matrix 18 | and `types` is a vector with criteria types where 1 means profit and -1 means cost. 19 | v : float 20 | parameter that is the weight of strategy of the majority of criteria (the maximum group utility) 21 | """ 22 | self.v = v 23 | self.normalization_method = normalization_method 24 | 25 | def __call__(self, matrix, weights, types): 26 | """ 27 | Score alternatives provided in decision matrix `matrix` using criteria `weights` and criteria `types`. 28 | 29 | Parameters 30 | ----------- 31 | matrix : ndarray 32 | Decision matrix with m alternatives in rows and n criteria in columns. 33 | weights : ndarray 34 | Matrix with i vectors in rows of n weights in columns. i means number of 35 | iterations of SMAA 36 | types : ndarray 37 | Vector with criteria types. Profit criteria are represented by 1 and cost by -1. 38 | 39 | Returns 40 | -------- 41 | ndrarray, ndarray, ndarray 42 | Matrix with acceptability indexes values for each alternative in rows in relation to each rank in columns, 43 | Matrix with central weight vectors for each alternative in rows 44 | Matrix with final ranking of alternatives 45 | 46 | Examples 47 | --------- 48 | >>> vikor_smaa = VIKOR_SMAA(normalization_method = minmax_normalization) 49 | >>> rank_acceptability_index, central_weight_vector, rank_scores = vikor_smaa(matrix, weights, types) 50 | """ 51 | 52 | return VIKOR_SMAA._vikor_smaa(self, matrix, weights, types, self.normalization_method, self.v) 53 | 54 | 55 | # function to generate multiple weight vectors 56 | # returns matrix with n weights in columns and number of vectors in rows equal to iterations number 57 | def _generate_weights(self, n, iterations): 58 | """ 59 | Function to generate multiple weight vectors 60 | 61 | Parameters 62 | ----------- 63 | n : int 64 | Number of criteria 65 | iterations : int 66 | Number of weight vector to generate 67 | 68 | Returns 69 | ---------- 70 | ndarray 71 | Matrix containing in rows vectors with weights for n criteria 72 | 73 | """ 74 | weight_vectors = np.zeros((iterations, n)) 75 | # n weight generation - when no preference information available 76 | # generate n - 1 uniform distributed weights within the range [0, 1] 77 | for i in range(iterations): 78 | w = np.random.uniform(0, 1, n) 79 | 80 | # sort weights into ascending order (q[1], ..., q[n-1]) 81 | ind = np.argsort(w) 82 | w = w[ind] 83 | 84 | # insert 0 as the first q[0] and 1 as the last (q[n]) numbers 85 | w = np.insert(w, 0, 0) 86 | w = np.insert(w, len(w), 1) 87 | 88 | # the weights are obtained as intervals between consecutive numbers (w[j] = q[j] - q[j-1]) 89 | weights = [w[i] - w[i - 1] for i in range(1, n + 1)] 90 | weights = np.array(weights) 91 | 92 | # scale the generated weights so that their sum is 1 93 | new_weights = weights / np.sum(weights) 94 | weight_vectors[i, :] = new_weights 95 | return weight_vectors 96 | 97 | 98 | @staticmethod 99 | def _vikor_smaa(self, matrix, weights, types, normalization_method, v): 100 | m, n = matrix.shape 101 | 102 | # Central weight vector for each alternative 103 | central_weight_vector = np.zeros((m, n)) 104 | 105 | # Rank acceptability index of each place for each alternative 106 | rank_acceptability_index = np.zeros((m, m)) 107 | 108 | # Ranks 109 | rank_score = np.zeros(m) 110 | 111 | vikor = VIKOR() 112 | 113 | # Calculate alternatives preference function values with VIKOR method 114 | pref = vikor(matrix, weights, types) 115 | 116 | # Calculate rankings based on preference values 117 | rank = np.zeros((pref.shape)) 118 | for i in range(pref.shape[1]): 119 | rank[:, i] = rank_preferences(pref[:, i], reverse = False) 120 | 121 | # add value for the rank acceptability index for each alternative considering rank and rank score 122 | # iteration by each alternative 123 | rr = rank[:, i] 124 | for k, r in enumerate(rr): 125 | rank_acceptability_index[k, int(r - 1)] += 1 126 | # rank score 127 | # calculate how many alternatives have worst preference values than k-th alternative 128 | # Note: in VIKOR better alternatives have lower preference values 129 | better_ranks = rr[rr > rr[k]] 130 | # add to k-th index value 1 for each alternative that is worse than k-th alternative 131 | rank_score[k] += len(better_ranks) 132 | 133 | # add central weights for the best scored alternative 134 | ind_min = np.argmin(rr) 135 | central_weight_vector[ind_min, :] += weights[i, :] 136 | 137 | # 138 | # end of loop for i iterations 139 | # Calculate the rank acceptability index 140 | rank_acceptability_index = rank_acceptability_index / pref.shape[1] 141 | 142 | # Calculate central the weights vectors 143 | central_weight_vector = central_weight_vector / pref.shape[1] 144 | for i in range(m): 145 | if np.sum(central_weight_vector[i, :]): 146 | central_weight_vector[i, :] = central_weight_vector[i, :] / np.sum(central_weight_vector[i, :]) 147 | 148 | # Calculate rank scores 149 | rank_score = rank_score / pref.shape[1] 150 | rank_scores = rank_preferences(rank_score, reverse = True) 151 | 152 | return rank_acceptability_index, central_weight_vector, rank_scores -------------------------------------------------------------------------------- /src/crispyn/normalizations.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # Linear normalization 4 | def linear_normalization(matrix, types): 5 | """ 6 | Normalize decision matrix using linear normalization method. 7 | 8 | Parameters 9 | ----------- 10 | matrix : ndarray 11 | Decision matrix with m alternatives in rows and n criteria in columns 12 | types : ndarray 13 | Criteria types. Profit criteria are represented by 1 and cost by -1. 14 | 15 | Returns 16 | -------- 17 | ndarray 18 | Normalized decision matrix 19 | 20 | Examples 21 | ---------- 22 | >>> nmatrix = linear_normalization(matrix, types) 23 | """ 24 | x_norm = np.zeros(np.shape(matrix)) 25 | x_norm[:, types == 1] = matrix[:, types == 1] / (np.amax(matrix[:, types == 1], axis = 0)) 26 | x_norm[:, types == -1] = np.amin(matrix[:, types == -1], axis = 0) / matrix[:, types == -1] 27 | return x_norm 28 | 29 | 30 | # Mininum-Maximum normalization 31 | def minmax_normalization(matrix, types): 32 | """ 33 | Normalize decision matrix using minimum-maximum normalization method. 34 | 35 | Parameters 36 | ----------- 37 | matrix : ndarray 38 | Decision matrix with m alternatives in rows and n criteria in columns 39 | types : ndarray 40 | Criteria types. Profit criteria are represented by 1 and cost by -1. 41 | 42 | Returns 43 | -------- 44 | ndarray 45 | Normalized decision matrix 46 | 47 | Examples 48 | ---------- 49 | >>> nmatrix = minmax_normalization(matrix, types) 50 | """ 51 | x_norm = np.zeros((matrix.shape[0], matrix.shape[1])) 52 | x_norm[:, types == 1] = (matrix[:, types == 1] - np.amin(matrix[:, types == 1], axis = 0) 53 | ) / (np.amax(matrix[:, types == 1], axis = 0) - np.amin(matrix[:, types == 1], axis = 0)) 54 | 55 | x_norm[:, types == -1] = (np.amax(matrix[:, types == -1], axis = 0) - matrix[:, types == -1] 56 | ) / (np.amax(matrix[:, types == -1], axis = 0) - np.amin(matrix[:, types == -1], axis = 0)) 57 | 58 | return x_norm 59 | 60 | 61 | # Maximum normalization 62 | def max_normalization(matrix, types): 63 | """ 64 | Normalize decision matrix using maximum normalization method. 65 | 66 | Parameters 67 | ----------- 68 | matrix : ndarray 69 | Decision matrix with m alternatives in rows and n criteria in columns 70 | types : ndarray 71 | Criteria types. Profit criteria are represented by 1 and cost by -1. 72 | 73 | Returns 74 | -------- 75 | ndarray 76 | Normalized decision matrix 77 | 78 | Examples 79 | ---------- 80 | >>> nmatrix = max_normalization(matrix, types) 81 | """ 82 | maximes = np.amax(matrix, axis = 0) 83 | matrix = matrix / maximes 84 | matrix[:, types == -1] = 1 - matrix[:, types == -1] 85 | return matrix 86 | 87 | 88 | # Sum normalization 89 | def sum_normalization(matrix, types): 90 | """ 91 | Normalize decision matrix using sum normalization method. 92 | 93 | Parameters 94 | ----------- 95 | matrix : ndarray 96 | Decision matrix with m alternatives in rows and n criteria in columns 97 | types : ndarray 98 | Criteria types. Profit criteria are represented by 1 and cost by -1. 99 | 100 | Returns 101 | -------- 102 | ndarray 103 | Normalized decision matrix 104 | 105 | Examples 106 | ---------- 107 | >>> nmatrix = sum_normalization(matrix, types) 108 | """ 109 | x_norm = np.zeros((matrix.shape[0], matrix.shape[1])) 110 | x_norm[:, types == 1] = matrix[:, types == 1] / np.sum(matrix[:, types == 1], axis = 0) 111 | x_norm[:, types == -1] = (1 / matrix[:, types == -1]) / np.sum((1 / matrix[:, types == -1]), axis = 0) 112 | 113 | return x_norm 114 | 115 | 116 | # Vector normalization 117 | def vector_normalization(matrix, types): 118 | """ 119 | Normalize decision matrix using vector normalization method. 120 | 121 | Parameters 122 | ----------- 123 | matrix : ndarray 124 | Decision matrix with m alternatives in rows and n criteria in columns 125 | types : ndarray 126 | Criteria types. Profit criteria are represented by 1 and cost by -1. 127 | 128 | Returns 129 | -------- 130 | ndarray 131 | Normalized decision matrix 132 | 133 | Examples 134 | ----------- 135 | >>> nmatrix = vector_normalization(matrix, types) 136 | """ 137 | x_norm = np.zeros((matrix.shape[0], matrix.shape[1])) 138 | x_norm[:, types == 1] = matrix[:, types == 1] / (np.sum(matrix[:, types == 1] ** 2, axis = 0))**(0.5) 139 | x_norm[:, types == -1] = 1 - (matrix[:, types == -1] / (np.sum(matrix[:, types == -1] ** 2, axis = 0))**(0.5)) 140 | 141 | return x_norm 142 | -------------------------------------------------------------------------------- /src/crispyn/weighting_methods.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import itertools 3 | from scipy import linalg 4 | from scipy.sparse.linalg import eigs 5 | 6 | import numpy as np 7 | from .correlations import pearson_coeff 8 | from .normalizations import sum_normalization, minmax_normalization 9 | 10 | 11 | 12 | # Equal weighting 13 | def equal_weighting(matrix): 14 | """ 15 | Calculate criteria weights using objective Equal weighting method. 16 | 17 | Parameters 18 | ----------- 19 | matrix : ndarray 20 | Decision matrix with performance values of m alternatives and n criteria. 21 | 22 | Returns 23 | -------- 24 | ndarray 25 | Vector of criteria weights. 26 | 27 | Examples 28 | ---------- 29 | >>> weights = equal_weighting(matrix) 30 | """ 31 | N = np.shape(matrix)[1] 32 | w = np.ones(N) / N 33 | return w 34 | 35 | 36 | # Entropy weighting 37 | def entropy_weighting(matrix): 38 | """ 39 | Calculate criteria weights using objective Entropy weighting method. 40 | 41 | Parameters 42 | ----------- 43 | matrix : ndarray 44 | Decision matrix with performance values of m alternatives and n criteria. 45 | 46 | Returns 47 | -------- 48 | ndarray 49 | Vector of criteria weights. 50 | 51 | Examples 52 | ---------- 53 | >>> weights = entropy_weighting(matrix) 54 | """ 55 | # normalize the decision matrix with `sum_normalization` method from `normalizations` as for profit criteria 56 | types = np.ones(np.shape(matrix)[1]) 57 | pij = sum_normalization(matrix, types) 58 | # Transform negative values in decision matrix `matrix` to positive values 59 | pij = np.abs(pij) 60 | m, n = np.shape(pij) 61 | H = np.zeros((m, n)) 62 | 63 | # Calculate entropy 64 | for j, i in itertools.product(range(n), range(m)): 65 | if pij[i, j]: 66 | H[i, j] = pij[i, j] * np.log(pij[i, j]) 67 | 68 | h = np.sum(H, axis = 0) * (-1 * ((np.log(m)) ** (-1))) 69 | 70 | # Calculate degree of diversification 71 | d = 1 - h 72 | 73 | # Set w as the degree of importance of each criterion 74 | w = d / (np.sum(d)) 75 | return w 76 | 77 | 78 | # Standard Deviation weighting 79 | def std_weighting(matrix): 80 | """ 81 | Calculate criteria weights using objective Standard deviation weighting method. 82 | 83 | Parameters 84 | ----------- 85 | matrix : ndarray 86 | Decision matrix with performance values of m alternatives and n criteria. 87 | 88 | Returns 89 | -------- 90 | ndarray 91 | Vector of criteria weights. 92 | 93 | Examples 94 | ---------- 95 | >>> weights = std_weighting(matrix) 96 | """ 97 | 98 | # Calculate the standard deviation of each criterion in decision matrix 99 | stdv = np.sqrt((np.sum(np.square(matrix - np.mean(matrix, axis = 0)), axis = 0)) / (matrix.shape[0])) 100 | # Calculate criteria weights by dividing the standard deviations by their sum 101 | w = stdv / np.sum(stdv) 102 | return w 103 | 104 | 105 | # CRITIC weighting 106 | def critic_weighting(matrix): 107 | """ 108 | Calculate criteria weights using objective CRITIC weighting method. 109 | 110 | Parameters 111 | ----------- 112 | matrix : ndarray 113 | Decision matrix with performance values of m alternatives and n criteria. 114 | 115 | Returns 116 | -------- 117 | ndarray 118 | Vector of criteria weights. 119 | 120 | Examples 121 | ---------- 122 | >>> weights = critic_weighting(matrix) 123 | """ 124 | # Normalize the decision matrix using Minimum-Maximum normalization `minmax_normalization` from `normalizations` as for profit criteria 125 | types = np.ones(np.shape(matrix)[1]) 126 | x_norm = minmax_normalization(matrix, types) 127 | # Calculate the standard deviation 128 | std = np.std(x_norm, axis = 0) 129 | n = np.shape(x_norm)[1] 130 | # Calculate correlation coefficients of all pairs of columns of normalized decision matrix 131 | correlations = np.zeros((n, n)) 132 | for i, j in itertools.product(range(n), range(n)): 133 | correlations[i, j] = pearson_coeff(x_norm[:, i], x_norm[:, j]) 134 | 135 | # Calculate the difference between 1 and calculated correlations 136 | difference = 1 - correlations 137 | # Multiply the difference by the standard deviation 138 | C = std * np.sum(difference, axis = 0) 139 | # Calculate the weights by dividing vector with `C` by their sum 140 | w = C / np.sum(C) 141 | return w 142 | 143 | 144 | # Gini coefficient-based weighting 145 | def gini_weighting(matrix): 146 | """ 147 | Calculate criteria weights using objective Gini coefficient-based weighting method. 148 | 149 | Parameters 150 | ---------- 151 | matrix : ndarray 152 | Decision matrix with performance values of m alternatives and n criteria. 153 | 154 | Returns 155 | -------- 156 | ndarray 157 | Vector of criteria weights. 158 | 159 | Examples 160 | --------- 161 | >>> weights = gini_weighting(matrix) 162 | """ 163 | m, n = np.shape(matrix) 164 | G = np.zeros(n) 165 | # Calculate the Gini coefficient for decision matrix `matrix` 166 | # iteration over criteria j = 1, 2, ..., n 167 | for j in range(n): 168 | Yi = 0 169 | # iteration over alternatives i = 1, 2, ..., m 170 | if np.mean(matrix[:, j]): 171 | for i in range(m): 172 | Yi += np.sum(np.abs(matrix[i, j] - matrix[:, j]) / (2 * m**2 * (np.sum(matrix[:, j]) / m))) 173 | else: 174 | for i in range(m): 175 | Yi += np.sum(np.abs(matrix[i, j] - matrix[:, j]) / (m**2 - m)) 176 | 177 | G[j] = Yi 178 | # calculate and return the criteria weights by dividing the vector of Gini coefficients by their sum 179 | w = G / np.sum(G) 180 | return w 181 | 182 | 183 | # MEREC weighting 184 | def merec_weighting(matrix, types): 185 | """ 186 | Calculate criteria weights using objective MEREC weighting method. 187 | 188 | Parameters 189 | ----------- 190 | matrix : ndarray 191 | Decision matrix with performance values of m alternatives and n criteria. 192 | types : ndarray 193 | Vector with criteria types. 194 | 195 | Returns 196 | -------- 197 | ndarray 198 | Vector of criteria weights. 199 | 200 | Examples 201 | --------- 202 | >>> weights = merec_weighting(matrix, types) 203 | """ 204 | X = copy.deepcopy(matrix) 205 | m, n = X.shape 206 | # Transform negative values in decision matrix X to positive values 207 | X = np.abs(X) 208 | # Normalize the decision matrix X with linear normalization method 209 | norm_matrix = np.zeros(X.shape) 210 | norm_matrix[:, types == 1] = np.min(X[:, types == 1], axis = 0) / X[:, types == 1] 211 | norm_matrix[:, types == -1] = X[:, types == -1] / np.max(X[:, types == -1], axis = 0) 212 | 213 | # Calculate the overall performance of the values in normalized matrix using a logarithmic measure with equal criteria weights 214 | S = np.log(1 + ((1 / n) * np.sum(np.abs(np.log(norm_matrix)), axis = 1))) 215 | 216 | # Calculate the performance of the alternatives by removing each criterion using the logarithmic measure 217 | Sp = np.zeros(X.shape) 218 | 219 | for j in range(n): 220 | norm_mat = np.delete(norm_matrix, j, axis = 1) 221 | Sp[:, j] = np.log(1 + ((1 / n) * np.sum(np.abs(np.log(norm_mat)), axis = 1))) 222 | 223 | # Calculate the summation of absolute deviations 224 | E = np.sum(np.abs(Sp - S.reshape(-1, 1)), axis = 0) 225 | 226 | # Calculate the final weights of the criteria 227 | w = E / np.sum(E) 228 | return w 229 | 230 | 231 | # Statistical Variance weighting 232 | def stat_var_weighting(matrix): 233 | """ 234 | Calculate criteria weights using objective Statistical variance weighting method. 235 | 236 | Parameters 237 | ---------- 238 | matrix : ndarray 239 | Decision matrix with performance values of m alternatives and n criteria. 240 | 241 | Returns 242 | ------- 243 | ndarray 244 | Vector of criteria weights. 245 | 246 | Examples 247 | --------- 248 | >>> weights = stat_var_weighting(matrix) 249 | """ 250 | 251 | # Normalize the decision matrix `matrix` with `minmax_normalization` method from normalizations 252 | types = np.ones(np.shape(matrix)[1]) 253 | xn = minmax_normalization(matrix, types) 254 | # Calculate the statistical variance for each criterion 255 | v = np.mean(np.square(xn - np.mean(xn, axis = 0)), axis = 0) 256 | # Calculate the final weights of the criteria 257 | w = v / np.sum(v) 258 | return w 259 | 260 | 261 | # CILOS weighting 262 | def cilos_weighting(matrix, types): 263 | """ 264 | Calculate criteria weights using objective CILOS weighting method. 265 | 266 | Parameters 267 | ---------- 268 | matrix : ndarray 269 | Decision matrix with performance values of m alternatives and n criteria. 270 | types : ndarray 271 | Vector with criteria types. 272 | 273 | Returns 274 | ------- 275 | ndarray 276 | Vector of criteria weights. 277 | 278 | Examples 279 | >>> weights = cilos_weighting(matrix, types) 280 | """ 281 | 282 | xr = copy.deepcopy(matrix) 283 | # Convert negative criteria to positive criteria 284 | xr[:, types == -1] = np.min(matrix[:, types == -1], axis = 0) / matrix[:, types == -1] 285 | # Normalize the decision matrix `xr` using the sum normalization method 286 | xn = xr / np.sum(xr, axis = 0) 287 | 288 | # Calculate the square matrix 289 | A = xn[np.argmax(xn, axis = 0), :] 290 | 291 | # Calculate relative impact loss matrix 292 | pij = np.zeros((matrix.shape[1], matrix.shape[1])) 293 | for j, i in itertools.product(range(matrix.shape[1]), range(matrix.shape[1])): 294 | pij[i, j] = (A[j, j] - A[i, j]) / A[j, j] 295 | 296 | # Determine the weight system matrix 297 | F = np.diag(-np.sum(pij - np.diag(np.diag(pij)), axis = 0)) + pij 298 | 299 | # Calculate the criterion impact loss weight 300 | # The criteria weights q are determined from the formulated homogeneous linear system of equations 301 | # Solve the system equation 302 | q = linalg.null_space(F) 303 | 304 | # Calculate and return the final weights of the criteria 305 | weights = q / np.sum(q) 306 | return np.ravel(weights) 307 | 308 | 309 | # IDOCRIW weighting 310 | def idocriw_weighting(matrix, types): 311 | """ 312 | Calculate criteria weights using objective IDOCRIW weighting method. 313 | 314 | Parameters 315 | ---------- 316 | matrix : ndarray 317 | Decision matrix with performance values of m alternatives and n criteria. 318 | types : ndarray 319 | Vector with criteria types. 320 | 321 | Returns 322 | ------- 323 | ndarray 324 | Vector of criteria weights. 325 | 326 | Examples 327 | --------- 328 | >>> weights = idocriw_weighting(matrix, types) 329 | """ 330 | # Calculate the Entropy weights 331 | q = entropy_weighting(matrix) 332 | # Calculate the CILOS weights 333 | w = cilos_weighting(matrix, types) 334 | # Aggregate the weight value of the attributes considering Entropy and CILOS weights 335 | weights = (q * w) / np.sum(q * w) 336 | return weights 337 | 338 | 339 | # Angle weighting 340 | def angle_weighting(matrix, types): 341 | """ 342 | Calculate criteria weights using objective Angle weighting method. 343 | 344 | Parameters 345 | ---------- 346 | matrix : ndarray 347 | Decision matrix with performance values of m alternatives and n criteria. 348 | types : ndarray 349 | Vector with criteria types. 350 | 351 | Returns 352 | ------- 353 | ndarray 354 | Vector of criteria weights. 355 | 356 | Examples 357 | --------- 358 | >>> weights = angle_weighting(matrix, types) 359 | """ 360 | m, n = matrix.shape 361 | # Normalize the decision matrix X using sum_normalization method from normalizations 362 | X = sum_normalization(matrix, types) 363 | # Calculate elements of additional column (the reference attribute) which are equal to 1 / m 364 | B = np.ones(m) * (1 / m) 365 | # Calculate the angle between attraibutes in decision matrix X and the reference attribute 366 | u = np.arccos(np.sum(X / m, axis = 0) / (np.sqrt(np.sum(X ** 2, axis = 0)) * np.sqrt(np.sum(B ** 2)))) 367 | # Calculate the final angle weights for each criterion 368 | w = u / np.sum(u) 369 | return w 370 | 371 | 372 | # Coeffcient of variation weighting 373 | def coeff_var_weighting(matrix): 374 | """ 375 | Calculate criteria weights using objective Coefficient of variation weighting method. 376 | 377 | Parameters 378 | ---------- 379 | matrix : ndarray 380 | Decision matrix with performance values of m alternatives and n criteria. 381 | 382 | Returns 383 | ------- 384 | ndarray 385 | Vector of criteria weights. 386 | 387 | Examples 388 | --------- 389 | >>> weights = coeff_var_weighting(matrix) 390 | """ 391 | m, n = matrix.shape 392 | # Normalize the decision matrix `matrix` with `sum_normalization` method from `normalizations` 393 | types = np.ones(n) 394 | B = sum_normalization(matrix, types) 395 | 396 | # Calculate the standard deviation of each column 397 | Bm = np.sum(B, axis = 0) / m 398 | std = np.sqrt(np.sum(((B - Bm)**2), axis = 0) / (m - 1)) 399 | 400 | # Calculate the Coefficient of Variation for each criterion 401 | ej = std / Bm 402 | # Calculate the weights for each criterion 403 | w = ej / np.sum(ej) 404 | return w 405 | 406 | 407 | # AHP weighting 408 | class AHP_WEIGHTING(): 409 | 410 | def __init__(self): 411 | """Create object of the AHP weighting method""" 412 | 413 | pass 414 | 415 | def __call__(self, X, compute_priority_vector_method = None): 416 | 417 | if compute_priority_vector_method is None: 418 | compute_priority_vector_method = self._eigenvector 419 | 420 | return AHP_WEIGHTING._ahp_weighting(self, X, compute_priority_vector_method) 421 | 422 | 423 | def _check_consistency(self, X): 424 | """ 425 | Consistency Check on the Pairwise Comparison Matrix of the Criteria or alternatives 426 | 427 | Parameters 428 | ----------- 429 | X : ndarray 430 | matrix of pairwise comparisons 431 | 432 | Examples 433 | ---------- 434 | >>> PCcriteria = np.array([[1, 1, 5, 3], [1, 1, 5, 3], [1/5, 1/5, 1, 1/3], [1/3, 1/3, 3, 1]]) 435 | >>> ahp_weighting = AHP_WEIGHTING() 436 | >>> ahp_weighting._check_consistency(PCcriteria) 437 | """ 438 | 439 | n = X.shape[1] 440 | RI = [0, 0, 0.58, 0.90, 1.12, 1.24, 1.32, 1.41, 1.45, 1.49] 441 | lambdamax = np.amax(np.linalg.eigvals(X).real) 442 | CI = (lambdamax - n) / (n - 1) 443 | CR = CI / RI[n - 1] 444 | print("Inconsistency index: ", CR) 445 | if CR > 0.1: 446 | print("The pairwise comparison matrix is inconsistent") 447 | 448 | 449 | def _eigenvector(self, X): 450 | """ 451 | Compute the Priority Vector of Criteria (weights) or alternatives using Eigenvector method 452 | 453 | Parameters 454 | ----------- 455 | X : ndarray 456 | matrix of pairwise comparisons 457 | 458 | Returns 459 | --------- 460 | ndarray 461 | Eigenvector 462 | 463 | Examples 464 | ---------- 465 | >>> PCM1 = np.array([[1, 5, 1, 1, 1/3, 3], 466 | [1/5, 1, 1/3, 1/5, 1/7, 1], 467 | [1, 3, 1, 1/3, 1/5, 1], 468 | [1, 5, 3, 1, 1/3, 3], 469 | [3, 7, 5, 3, 1, 7], 470 | [1/3, 1, 1, 1/3, 1/7, 1]]) 471 | >>> ahp = AHP() 472 | >>> S = ahp._eigenvector(PCM1) 473 | """ 474 | 475 | val, vec = eigs(X, k=1) 476 | eig_vec = np.real(vec) 477 | S = eig_vec / np.sum(eig_vec) 478 | S = S.ravel() 479 | return S 480 | 481 | 482 | def _normalized_column_sum(self, X): 483 | """ 484 | Compute the Priority Vector of Criteria (weights) or alternatives using The normalized column sum method 485 | 486 | Parameters 487 | ----------- 488 | X : ndarray 489 | matrix of pairwise comparisons 490 | 491 | Returns 492 | --------- 493 | ndarray 494 | Vector with weights calculated with The normalized column sum method 495 | 496 | Examples 497 | ---------- 498 | >>> PCM1 = np.array([[1, 5, 1, 1, 1/3, 3], 499 | [1/5, 1, 1/3, 1/5, 1/7, 1], 500 | [1, 3, 1, 1/3, 1/5, 1], 501 | [1, 5, 3, 1, 1/3, 3], 502 | [3, 7, 5, 3, 1, 7], 503 | [1/3, 1, 1, 1/3, 1/7, 1]]) 504 | >>> ahp = AHP() 505 | >>> S = ahp._normalized_column_sum(PCM1) 506 | """ 507 | 508 | return np.sum(X, axis = 1) / np.sum(X) 509 | 510 | 511 | def _geometric_mean(self, X): 512 | """ 513 | Compute the Priority Vector of Criteria (weights) or alternatives using The geometric mean method 514 | 515 | Parameters 516 | ----------- 517 | X : ndarray 518 | matrix of pairwise comparisons 519 | 520 | Returns 521 | --------- 522 | ndarray 523 | Vector with weights calculated with The geometric mean method 524 | 525 | Examples 526 | ---------- 527 | >>> PCM1 = np.array([[1, 5, 1, 1, 1/3, 3], 528 | [1/5, 1, 1/3, 1/5, 1/7, 1], 529 | [1, 3, 1, 1/3, 1/5, 1], 530 | [1, 5, 3, 1, 1/3, 3], 531 | [3, 7, 5, 3, 1, 7], 532 | [1/3, 1, 1, 1/3, 1/7, 1]]) 533 | >>> ahp = AHP() 534 | >>> S = ahp._geometric_mean(PCM1) 535 | """ 536 | 537 | n = X.shape[1] 538 | numerator = (np.prod(X, axis = 1))**(1 / n) 539 | denominator = np.sum(numerator) 540 | return numerator / denominator 541 | 542 | @staticmethod 543 | def _ahp_weighting(self, X, compute_priority_vector_method): 544 | """ 545 | Calculate criteria weights using subjective AHP weighting method based on 546 | provided pairwise criteria comparison matrix 547 | 548 | Parameters 549 | ------------ 550 | X : ndarray 551 | pairwise criteria comparison matrix 552 | 553 | compute_priority_vector_method : function 554 | selected function for calculation priority vector 555 | eigenvector, _normalized_column_sum, _geometric_mean 556 | 557 | Returns 558 | ------------- 559 | ndarray 560 | Vector of criteria weights. 561 | 562 | Examples 563 | ------------- 564 | >>> PCcriteria = np.array([[1, 1, 5, 3], [1, 1, 5, 3], 565 | [1/5, 1/5, 1, 1/3], [1/3, 1/3, 3, 1]]) 566 | >>> ahp_weighting = AHP_WEIGHTING() 567 | >>> weights = ahp_weighting(X = PCcriteria, compute_priority_vector_method=ahp_weighting._normalized_column_sum) 568 | """ 569 | 570 | self._check_consistency(X) 571 | weights = compute_priority_vector_method(X) 572 | return weights 573 | 574 | 575 | # SWARA weighting 576 | def swara_weighting(criteria_indexes, s): 577 | """ 578 | Calculation of criteria weights using SWARA subjective weighting method 579 | 580 | Parameters 581 | ------------- 582 | criteria_indexes : ndarray 583 | Vector with indexes of n criteria in accordance with given decision problem from C1 to Cn ordered 584 | in descending order beginning from the most important criterion 585 | (Vector with sorted evaluation criteria in descending order, based on their expected significances) 586 | 587 | s : ndarray 588 | The s vector containing n-1 values of criteria comparison generated in following way: 589 | Make the respondent express how much criterion j-1 is more significant than 590 | criterion j in percentage in range [0, 1] 591 | 592 | Returns 593 | ------------ 594 | ndarray 595 | Vector with criteria weights 596 | 597 | Examples 598 | ----------- 599 | >>> criteria_indexes = np.array([0, 1, 2, 3, 4, 5, 6]) 600 | >>> s = np.array([0, 0.35, 0.2, 0.3, 0, 0.4]) 601 | >>> swara_weights = swara_weighting(criteria_indexes, s) 602 | """ 603 | 604 | # Calculation of SWARA weights for ordered criteria 605 | # First criterion is considered as most important 606 | 607 | # Adding 0 at first index 608 | s = np.insert(s, 0, 0) 609 | 610 | # Determination of the k coefficient 611 | k = np.ones(len(s)) 612 | 613 | # Determination of the recalculated weight q 614 | q = np.ones(len(s)) 615 | for j in range(1, len(s)): 616 | k[j] = s[j] + 1 617 | q[j] = q[j - 1] / k[j] 618 | 619 | # Determination of the relative weights of the evaluation criteria 620 | weights = q / np.sum(q) 621 | # Assigning criteria weights according to their original order of criteria in given decision problem 622 | indexes = np.argsort(criteria_indexes) 623 | weights = weights[indexes] 624 | return weights 625 | 626 | 627 | # LBWA weighting 628 | def lbwa_weighting(criteria_indexes, criteria_values_I): 629 | """ 630 | Calculation of criteria weights using subjective LBWA weighting method. 631 | 632 | Parameters 633 | ------------- 634 | criteria_indexes : list including sublists 635 | A list including sublists containing grouped and ordered indexes of criteria in a given decision problem from C1 to Cn according to their significance, beginning from the most significant 636 | 637 | criteria_values_I : list including sublists 638 | A list including sublists containing influence values of criteria within each subset provided in order beginning from the most significant 639 | 640 | Returns 641 | -------------- 642 | ndarray 643 | Vector of criteria weights 644 | 645 | Examples 646 | -------------- 647 | >>> criteria_indexes = [ 648 | [1, 4, 6, 5, 0, 2], 649 | [7, 3] 650 | ] 651 | >>> criteria_values_I = [ 652 | [0, 2, 3, 4, 4, 5], 653 | [1, 2] 654 | ] 655 | >>> weights = lbwa_weighting(criteria_indexes, criteria_values_I) 656 | 657 | >>> criteria_indexes = [ 658 | [4, 7, 8, 0], 659 | [2, 3], 660 | [], 661 | [5], 662 | [], 663 | [], 664 | [1, 6] 665 | ] 666 | 667 | >>> criteria_values_I = [ 668 | [0, 1, 2, 4], 669 | [1, 2], 670 | [], 671 | [2], 672 | [], 673 | [], 674 | [1, 3] 675 | ] 676 | 677 | >>> weights = lbwa_weighting(criteria_indexes, criteria_values_I) 678 | """ 679 | # Determination of r coefficient 680 | r = 0 681 | for el in criteria_values_I: 682 | if len(el) > r: 683 | r = len(el) 684 | 685 | # Determination of r0 elasticity coefficient 686 | r0 = r + 1 687 | 688 | lbwa_influence_function = copy.deepcopy(criteria_values_I) 689 | 690 | best_weight = 0 691 | lenght = 0 692 | 693 | # Calculation of the influence function of the criteria 694 | for ind1, el1 in enumerate(criteria_values_I): 695 | for ind2, el2 in enumerate(el1): 696 | lbwa_influence_function[ind1][ind2] = r0 / ((ind1 + 1) * r0 + el2) 697 | best_weight += r0 / ((ind1 + 1) * r0 + el2) 698 | lenght += 1 699 | 700 | # Calculation of the optimum values of the weight coefficients of criteria 701 | # it is calculated the weight coefficient of the most significant criterion 702 | best_weight = 1 / best_weight 703 | 704 | # Calculation of the weight coefficients of the remaining criteria 705 | weights = np.zeros(lenght) 706 | 707 | for ind1, el1 in enumerate(lbwa_influence_function): 708 | for ind2, el2 in enumerate(el1): 709 | weights[criteria_indexes[ind1][ind2]] = lbwa_influence_function[ind1][ind2] * best_weight 710 | 711 | # Criterion considered as the most important criterion has index [0][0] in list with influence values 712 | weights[criteria_indexes[0][0]] = best_weight 713 | return weights 714 | 715 | 716 | # SAPEVO weighting 717 | def sapevo_weighting(criteria_matrix): 718 | """ 719 | Calculate criteria weights using SAPEVO subjective weighting method 720 | 721 | Parameters 722 | ------------ 723 | criteria_matrix : ndarray 724 | Matrix with degrees of pairwise criteria comparison in scale from -3 to 3 725 | 726 | Returns 727 | ---------- 728 | ndarray: 729 | Vector of criteria weights 730 | 731 | Examples 732 | ----------- 733 | >>> criteria_matrix = np.array([ 734 | [0, 0, 3, 3, 1, 3, 2, 1, 2], 735 | [0, 0, 3, 3, 1, 3, 2, 1, 2], 736 | [-3, -3, 0, 0, -1, -2, -2, -1, -2], 737 | [-3, -3, 0, 0, -2, 2, -2, -2, -2], 738 | [-1, -1, 1, 2, 0, 2, 0, -1, 1], 739 | [-3, -3, 2, -2, -2, 0, -2, -1, -2], 740 | [-3, -2, 2, 2, 0, 2, 0, 3, 0], 741 | [-1, -1, 1, 2, 1, 1, -3, 0, -1], 742 | [-2, -2, 2, 2, -1, 2, 0, 1, 0], 743 | ]) 744 | 745 | >>> weights = sapevo_weighting(criteria_matrix) 746 | """ 747 | # Calculation of the sum of degrees of preference in criteria comparison matrix in column vector 748 | sum_vector = np.sum(criteria_matrix, axis = 1) 749 | 750 | # Normalization of the column vector using Minimum-Maximum normalizations 751 | norm_vector = (sum_vector - np.min(sum_vector)) / (np.max(sum_vector) - np.min(sum_vector)) 752 | return norm_vector / np.sum(norm_vector) -------------------------------------------------------------------------------- /tests/test_correlations.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | from scipy.stats import pearsonr 4 | 5 | from crispyn import correlations as corrs 6 | 7 | # Test for Spearman rank correlation coefficient 8 | class Test_Spearman(unittest.TestCase): 9 | 10 | def test_spearman(self): 11 | """Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity 12 | in decision-making problems. In International Conference on Computational Science 13 | (pp. 632-645). Springer, Cham. DOI: https://doi.org/10.1007/978-3-030-50417-5_47""" 14 | 15 | R = np.array([1, 2, 3, 4, 5]) 16 | Q = np.array([1, 3, 2, 4, 5]) 17 | test_result = corrs.spearman(R, Q) 18 | real_result = 0.9 19 | self.assertEqual(test_result, real_result) 20 | 21 | 22 | # Test for Weighted Spearman rank correlation coefficient 23 | class Test_Weighted_Spearman(unittest.TestCase): 24 | 25 | def test_weighted_spearman(self): 26 | """Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity 27 | in decision-making problems. In International Conference on Computational Science 28 | (pp. 632-645). Springer, Cham. DOI: https://doi.org/10.1007/978-3-030-50417-5_47""" 29 | 30 | R = np.array([1, 2, 3, 4, 5]) 31 | Q = np.array([1, 3, 2, 4, 5]) 32 | test_result = corrs.weighted_spearman(R, Q) 33 | real_result = 0.8833 34 | self.assertEqual(np.round(test_result, 4), real_result) 35 | 36 | 37 | # Test for Pearson correlation coefficient 38 | class Test_Pearson(unittest.TestCase): 39 | 40 | def test_pearson(self): 41 | """Test based on paper Sałabun, W., & Urbaniak, K. (2020, June). A new coefficient of rankings similarity 42 | in decision-making problems. In International Conference on Computational Science 43 | (pp. 632-645). Springer, Cham. DOI: https://doi.org/10.1007/978-3-030-50417-5_47""" 44 | 45 | R = np.array([1, 2, 3, 4, 5]) 46 | Q = np.array([1, 3, 2, 4, 5]) 47 | test_result = corrs.pearson_coeff(R, Q) 48 | real_result, _ = pearsonr(R, Q) 49 | self.assertEqual(test_result, real_result) 50 | 51 | 52 | def main(): 53 | test_spearman_coeff = Test_Spearman() 54 | test_spearman_coeff.test_spearman() 55 | 56 | test_weighted_spearman_coeff = Test_Weighted_Spearman() 57 | test_weighted_spearman_coeff.test_weighted_spearman() 58 | 59 | test_pearson_coeff = Test_Pearson() 60 | test_pearson_coeff.test_pearson() 61 | 62 | 63 | if __name__ == '__main__': 64 | main() -------------------------------------------------------------------------------- /tests/test_mcda_methods.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | 4 | from crispyn.mcda_methods import VIKOR 5 | from crispyn.additions import rank_preferences 6 | 7 | 8 | # Test for VIKOR method 9 | class Test_VIKOR(unittest.TestCase): 10 | 11 | def test_vikor(self): 12 | """Test based on paper Papathanasiou, J., & Ploskas, N. (2018). Vikor. In Multiple Criteria Decision Aid 13 | (pp. 31-55). Springer, Cham. DOI: https://doi.org/10.1007/978-3-319-91648-4_2""" 14 | 15 | matrix = np.array([[8, 7, 2, 1], 16 | [5, 3, 7, 5], 17 | [7, 5, 6, 4], 18 | [9, 9, 7, 3], 19 | [11, 10, 3, 7], 20 | [6, 9, 5, 4]]) 21 | 22 | weights = np.array([0.4, 0.3, 0.1, 0.2]) 23 | 24 | types = np.array([1, 1, 1, 1]) 25 | 26 | method = VIKOR(v = 0.625) 27 | test_result = method(matrix, weights, types) 28 | real_result = np.array([0.640, 1.000, 0.693, 0.271, 0.000, 0.694]) 29 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 30 | 31 | 32 | # Test for rank preferences 33 | class Test_Rank_preferences(unittest.TestCase): 34 | 35 | def test_rank_preferences(self): 36 | """Test based on paper Papathanasiou, J., & Ploskas, N. (2018). Vikor. In Multiple Criteria Decision Aid 37 | (pp. 31-55). Springer, Cham. DOI: https://doi.org/10.1007/978-3-319-91648-4_2""" 38 | 39 | pref = np.array([0.640, 1.000, 0.693, 0.271, 0.000, 0.694]) 40 | test_result = rank_preferences(pref , reverse = False) 41 | real_result = np.array([3, 6, 4, 2, 1, 5]) 42 | self.assertEqual(list(test_result), list(real_result)) 43 | 44 | 45 | def main(): 46 | test_vikor = Test_VIKOR() 47 | test_vikor.test_vikor() 48 | 49 | test_rank_preferences = Test_Rank_preferences() 50 | test_rank_preferences.test_rank_preferences() 51 | 52 | 53 | if __name__ == '__main__': 54 | main() -------------------------------------------------------------------------------- /tests/test_weighting_methods.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import numpy as np 3 | from crispyn import weighting_methods as mcda_weights 4 | from crispyn import normalizations as norms 5 | 6 | 7 | # Test for CRITIC weighting 8 | class Test_CRITIC(unittest.TestCase): 9 | 10 | def test_critic(self): 11 | """Test based on paper Tuş, A., & Aytaç Adalı, E. (2019). The new combination with CRITIC and WASPAS methods 12 | for the time and attendance software selection problem. Opsearch, 56(2), 528-538. 13 | DOI: https://doi.org/10.1007/s12597-019-00371-6""" 14 | 15 | matrix = np.array([[5000, 3, 3, 4, 3, 2], 16 | [680, 5, 3, 2, 2, 1], 17 | [2000, 3, 2, 3, 4, 3], 18 | [600, 4, 3, 1, 2, 2], 19 | [800, 2, 4, 3, 3, 4]]) 20 | 21 | types = np.array([-1, 1, 1, 1, 1, 1]) 22 | 23 | test_result = mcda_weights.critic_weighting(matrix) 24 | real_result = np.array([0.157, 0.249, 0.168, 0.121, 0.154, 0.151]) 25 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 26 | 27 | 28 | # Test for MEREC weighting 29 | class Test_MEREC(unittest.TestCase): 30 | 31 | def test_merec(self): 32 | """Test based on paper Keshavarz-Ghorabaee, M., Amiri, M., Zavadskas, E. K., Turskis, Z., & Antucheviciene, 33 | J. (2021). Determination of objective weights using a new method based on the removal 34 | effects of criteria (MEREC). Symmetry, 13(4), 525. DOI: https://doi.org/10.3390/sym13040525""" 35 | 36 | matrix = np.array([[450, 8000, 54, 145], 37 | [10, 9100, 2, 160], 38 | [100, 8200, 31, 153], 39 | [220, 9300, 1, 162], 40 | [5, 8400, 23, 158]]) 41 | 42 | types = np.array([1, 1, -1, -1]) 43 | 44 | test_result = mcda_weights.merec_weighting(matrix, types) 45 | real_result = np.array([0.5752, 0.0141, 0.4016, 0.0091]) 46 | self.assertEqual(list(np.round(test_result, 4)), list(real_result)) 47 | 48 | 49 | # Test for Entropy weighting 50 | class Test_Entropy(unittest.TestCase): 51 | 52 | def test_Entropy(self): 53 | """Test based on paper Xu, X. (2004). A note on the subjective and objective integrated approach to 54 | determine attribute weights. European Journal of Operational Research, 156(2), 55 | 530-532. DOI: https://doi.org/10.1016/S0377-2217(03)00146-2""" 56 | 57 | matrix = np.array([[30, 30, 38, 29], 58 | [19, 54, 86, 29], 59 | [19, 15, 85, 28.9], 60 | [68, 70, 60, 29]]) 61 | 62 | types = np.array([1, 1, 1, 1]) 63 | 64 | test_result = mcda_weights.entropy_weighting(matrix) 65 | real_result = np.array([0.4630, 0.3992, 0.1378, 0.0000]) 66 | self.assertEqual(list(np.round(test_result, 4)), list(real_result)) 67 | 68 | def test_Entropy2(self): 69 | """Test based on paper Zavadskas, E. K., & Podvezko, V. (2016). Integrated determination of objective 70 | criteria weights in MCDM. International Journal of Information Technology & Decision 71 | Making, 15(02), 267-283. DOI: https://EconPapers.repec.org/RePEc:wsi:ijitdm:v:15:y:2016:i:02:n:s0219622016500036""" 72 | 73 | matrix = np.array([[3.0, 100, 10, 7], 74 | [2.5, 80, 8, 5], 75 | [1.8, 50, 20, 11], 76 | [2.2, 70, 12, 9]]) 77 | 78 | types = np.array([-1, 1, -1, 1]) 79 | 80 | test_result = mcda_weights.entropy_weighting(matrix) 81 | real_result = np.array([0.1146, 0.1981, 0.4185, 0.2689]) 82 | self.assertEqual(list(np.round(test_result, 4)), list(real_result)) 83 | 84 | def test_Entropy3(self): 85 | """Test based on paper Ersoy, Y. (2021). Equipment selection for an e-commerce company using entropy-based 86 | topsis, edas and codas methods during the COVID-19. LogForum, 17(3). DOI: http://doi.org/10.17270/J.LOG.2021.603""" 87 | 88 | matrix = np.array([[256, 8, 41, 1.6, 1.77, 7347.16], 89 | [256, 8, 32, 1.0, 1.8, 6919.99], 90 | [256, 8, 53, 1.6, 1.9, 8400], 91 | [256, 8, 41, 1.0, 1.75, 6808.9], 92 | [512, 8, 35, 1.6, 1.7, 8479.99], 93 | [256, 4, 35, 1.6, 1.7, 7499.99]]) 94 | 95 | types = np.array([-1, 1, -1, 1]) 96 | test_result = mcda_weights.entropy_weighting(matrix) 97 | 98 | real_result = np.array([0.405, 0.221, 0.134, 0.199, 0.007, 0.034]) 99 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 100 | 101 | def test_Entropy4(self): 102 | """Test based on paper Lee, H. C., & Chang, C. T. (2018). Comparative analysis of MCDM 103 | methods for ranking renewable energy sources in Taiwan. Renewable and Sustainable Energy 104 | Reviews, 92, 883-896. DOI: https://doi.org/10.1016/j.rser.2018.05.007""" 105 | 106 | matrix = np.array([[4550, 30, 6.74, 20, 15, 5, 85, 150, 0.87, 4.76], 107 | [3005, 60.86, 2.4, 35, 27, 4, 26, 200, 0.17, 4.51], 108 | [2040, 14.85, 1.7, 90, 25, 5, 26, 500, 0.27, 4.19], 109 | [3370, 99.4, 3.25, 25.3, 54, 3, 45, 222, 0.21, 3.78], 110 | [3920, 112.6, 4.93, 11.4, 71.7, 2, 50, 100, 0.25, 4.11]]) 111 | 112 | types = np.array([-1, -1, -1, 1, 1, 1, -1, -1, 1, 1]) 113 | 114 | test_result = mcda_weights.entropy_weighting(matrix) 115 | real_result = np.array([0.026, 0.154, 0.089, 0.199, 0.115, 0.04, 0.08, 0.123, 0.172, 0.002]) 116 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 117 | 118 | 119 | # Test for CILOS weighting 120 | class Test_CILOS(unittest.TestCase): 121 | 122 | def test_cilos(self): 123 | """Test based on paper Alinezhad, A., & Khalili, J. (2019). New methods and applications in multiple 124 | attribute decision making (MADM) (Vol. 277). Cham: Springer. DOI: https://doi.org/10.1007/978-3-030-15009-9""" 125 | 126 | matrix = np.array([[3, 100, 10, 7], 127 | [2.500, 80, 8, 5], 128 | [1.800, 50, 20, 11], 129 | [2.200, 70, 12, 9]]) 130 | 131 | types = np.array([-1, 1, -1, 1]) 132 | 133 | test_result = mcda_weights.cilos_weighting(matrix, types) 134 | real_result = np.array([0.334, 0.220, 0.196, 0.250]) 135 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 136 | 137 | 138 | def test_cilos2(self): 139 | """Test based on paper Zavadskas, E. K., & Podvezko, V. (2016). Integrated determination of objective 140 | criteria weights in MCDM. International Journal of Information Technology & Decision 141 | Making, 15(02), 267-283. DOI: https://EconPapers.repec.org/RePEc:wsi:ijitdm:v:15:y:2016:i:02:n:s0219622016500036""" 142 | 143 | matrix = np.array([[0.6, 100, 0.8, 7], 144 | [0.72, 80, 1, 5], 145 | [1, 50, 0.4, 11], 146 | [0.818, 70, 0.667, 9]]) 147 | 148 | types = np.array([1, 1, 1, 1]) 149 | 150 | test_result = mcda_weights.cilos_weighting(matrix, types) 151 | real_result = np.array([0.3343, 0.2199, 0.1957, 0.2501]) 152 | self.assertEqual(list(np.round(test_result, 4)), list(real_result)) 153 | 154 | 155 | # Test for IDOCRIW weighting 156 | class Test_IDOCRIW(unittest.TestCase): 157 | 158 | def test_idocriw(self): 159 | """Test based on paper Zavadskas, E. K., & Podvezko, V. (2016). Integrated determination of objective 160 | criteria weights in MCDM. International Journal of Information Technology & Decision 161 | Making, 15(02), 267-283. DOI: https://EconPapers.repec.org/RePEc:wsi:ijitdm:v:15:y:2016:i:02:n:s0219622016500036""" 162 | 163 | matrix = np.array([[3.0, 100, 10, 7], 164 | [2.5, 80, 8, 5], 165 | [1.8, 50, 20, 11], 166 | [2.2, 70, 12, 9]]) 167 | 168 | types = np.array([-1, 1, -1, 1]) 169 | 170 | test_result = mcda_weights.idocriw_weighting(matrix, types) 171 | real_result = np.array([0.1658, 0.1886, 0.35455, 0.2911]) 172 | self.assertEqual(list(np.round(test_result, 3)), list(np.round(real_result, 3))) 173 | 174 | 175 | # Test for Angle weighting 176 | class Test_Angle(unittest.TestCase): 177 | 178 | def test_angle(self): 179 | """Test based on paper Shuai, D., Zongzhun, Z., Yongji, W., & Lei, L. (2012, May). A new angular method to 180 | determine the objective weights. In 2012 24th Chinese Control and Decision Conference 181 | (CCDC) (pp. 3889-3892). IEEE. DOI: https://doi.org/10.1109/CCDC.2012.6244621""" 182 | 183 | matrix = np.array([[30, 30, 38, 29], 184 | [19, 54, 86, 29], 185 | [19, 15, 85, 28.9], 186 | [68, 70, 60, 29]]) 187 | 188 | types = np.array([1, 1, 1, 1]) 189 | 190 | test_result = mcda_weights.angle_weighting(matrix, types) 191 | real_result = np.array([0.4150, 0.3612, 0.2227, 0.0012]) 192 | self.assertEqual(list(np.round(test_result, 4)), list(real_result)) 193 | 194 | 195 | # Test for Coefficient of Variation weighting 196 | class Test_Coeff_var(unittest.TestCase): 197 | 198 | def test_coeff_var(self): 199 | """Test based on paper Shuai, D., Zongzhun, Z., Yongji, W., & Lei, L. (2012, May). A new angular method to 200 | determine the objective weights. In 2012 24th Chinese Control and Decision Conference 201 | (CCDC) (pp. 3889-3892). IEEE. DOI: https://doi.org/10.1109/CCDC.2012.6244621""" 202 | 203 | matrix = np.array([[30, 30, 38, 29], 204 | [19, 54, 86, 29], 205 | [19, 15, 85, 28.9], 206 | [68, 70, 60, 29]]) 207 | 208 | types = np.array([1, 1, 1, 1]) 209 | 210 | test_result = mcda_weights.coeff_var_weighting(matrix) 211 | real_result = np.array([0.4258, 0.3610, 0.2121, 0.0011]) 212 | self.assertEqual(list(np.round(test_result, 4)), list(real_result)) 213 | 214 | 215 | # Test for Standard Deviation weighting 216 | class Test_STD(unittest.TestCase): 217 | 218 | def test_std(self): 219 | """Test based on paper Sałabun, W., Wątróbski, J., & Shekhovtsov, A. (2020). Are mcda methods benchmarkable? 220 | a comparative study of topsis, vikor, copras, and promethee ii methods. Symmetry, 12(9), 221 | 1549. DOI: https://doi.org/10.3390/sym12091549""" 222 | 223 | matrix = np.array([[0.619, 0.449, 0.447], 224 | [0.862, 0.466, 0.006], 225 | [0.458, 0.698, 0.771], 226 | [0.777, 0.631, 0.491], 227 | [0.567, 0.992, 0.968]]) 228 | 229 | types = np.array([1, 1, 1]) 230 | 231 | test_result = mcda_weights.std_weighting(matrix) 232 | real_result = np.array([0.217, 0.294, 0.488]) 233 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 234 | 235 | 236 | # Test for Equal weighting 237 | class Test_equal(unittest.TestCase): 238 | 239 | def test_equal(self): 240 | """Test based on paper Sałabun, W., Wątróbski, J., & Shekhovtsov, A. (2020). Are mcda methods benchmarkable? 241 | a comparative study of topsis, vikor, copras, and promethee ii methods. Symmetry, 12(9), 242 | 1549. DOI: https://doi.org/10.3390/sym12091549""" 243 | 244 | matrix = np.array([[0.619, 0.449, 0.447], 245 | [0.862, 0.466, 0.006], 246 | [0.458, 0.698, 0.771], 247 | [0.777, 0.631, 0.491], 248 | [0.567, 0.992, 0.968]]) 249 | 250 | types = np.array([1, 1, 1]) 251 | 252 | test_result = mcda_weights.equal_weighting(matrix) 253 | real_result = np.array([0.333, 0.333, 0.333]) 254 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 255 | 256 | 257 | # Test for Statistical variance weighting 258 | class Test_stat_var(unittest.TestCase): 259 | 260 | def test_stat_var(self): 261 | """Test based on paper Sałabun, W., Wątróbski, J., & Shekhovtsov, A. (2020). Are mcda methods benchmarkable? 262 | a comparative study of topsis, vikor, copras, and promethee ii methods. Symmetry, 12(9), 263 | 1549. DOI: https://doi.org/10.3390/sym12091549""" 264 | 265 | matrix = np.array([[0.619, 0.449, 0.447], 266 | [0.862, 0.466, 0.006], 267 | [0.458, 0.698, 0.771], 268 | [0.777, 0.631, 0.491], 269 | [0.567, 0.992, 0.968]]) 270 | 271 | types = np.array([1, 1, 1]) 272 | 273 | test_result = mcda_weights.stat_var_weighting(matrix) 274 | xn = norms.minmax_normalization(matrix, np.ones(matrix.shape[1])) 275 | real_result = np.var(xn, axis = 0) / np.sum(np.var(xn, axis = 0)) 276 | self.assertEqual(list(np.round(test_result, 4)), list(np.round(real_result, 4))) 277 | 278 | 279 | # Test for Gini coefficient-based weighting 280 | class Test_gini(unittest.TestCase): 281 | 282 | def test_gini(self): 283 | """Test based on paper Bączkiewicz, A., Wątróbski, J., Kizielewicz, B., & Sałabun, W. (2021). Towards 284 | Reliable Results-A Comparative Analysis of Selected MCDA Techniques in the Camera 285 | Selection Problem. In Information Technology for Management: Business and Social Issues 286 | (pp. 143-165). Springer, Cham. DOI: 10.1007/978-3-030-98997-2_7""" 287 | 288 | matrix = np.array([[29.4, 83, 47, 114, 12, 30, 120, 240, 170, 90, 1717.75], 289 | [30, 38.1, 124.7, 117, 16, 60, 60, 60, 93, 70, 2389], 290 | [29.28, 59.27, 41.13, 58, 16, 30, 60, 120, 170, 78, 239.99], 291 | [33.6, 71, 55, 159, 23.6, 60, 240, 240, 132, 140, 2099], 292 | [21, 59, 41, 66, 16, 24, 60, 120, 170, 70, 439], 293 | [35, 65, 42, 134, 12, 60, 240, 240, 145, 60, 1087], 294 | [47, 79, 54, 158, 19, 60, 120, 120, 360, 72, 2499], 295 | [28.3, 62.3, 44.9, 116, 12, 30, 60, 60, 130, 90, 999.99], 296 | [36.9, 28.6, 121.6, 130, 12, 60, 120, 120, 80, 80, 1099], 297 | [32, 59, 41, 60, 16, 30, 120, 120, 170, 60, 302.96], 298 | [28.4, 66.3, 48.6, 126, 12, 60, 240, 240, 132, 135, 1629], 299 | [29.8, 46, 113, 47, 18, 50, 50, 50, 360, 72, 2099], 300 | [20.2, 64, 80, 70, 8, 24, 60, 120, 166, 480, 699.99], 301 | [33, 60, 44, 59, 12, 30, 60, 120, 170, 90, 388], 302 | [29, 59, 41, 55, 16, 30, 60, 120, 170, 120, 299], 303 | [29, 59, 41, 182, 12, 30, 30, 60, 94, 140, 249], 304 | [29.8, 59.2, 41, 65, 16, 30, 60, 120, 160, 90, 219.99], 305 | [28.8, 62.5, 41, 70, 12, 60, 120, 120, 170, 138, 1399.99], 306 | [24, 40, 59, 60, 12, 10, 30, 30, 140, 78, 269.99], 307 | [30, 60, 45, 201, 16, 30, 30, 30, 170, 90, 199.99]]) 308 | 309 | types = np.array([-1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1]) 310 | 311 | test_result = mcda_weights.gini_weighting(matrix) 312 | 313 | real_result = np.array([0.0362, 0.0437, 0.0848, 0.0984, 0.0480, 0.0842, 0.1379, 0.1125, 0.0745, 0.1107, 0.1690]) 314 | self.assertEqual(list(np.round(test_result, 4)), list(np.round(real_result, 4))) 315 | 316 | 317 | # Test for AHP weighting method 318 | class Test_AHP(unittest.TestCase): 319 | 320 | def test_ahp1(self): 321 | """Papathanasiou, J., Ploskas, N., Papathanasiou, J., & Ploskas, N. (2018). 322 | Ahp. Multiple Criteria Decision Aid: Methods, Examples and Python Implementations, 323 | 109-129. Springer Optimization and Its Applications, vol 136. Springer, Cham. 324 | DOI: https://doi.org/10.1007/978-3-319-91648-4_5""" 325 | 326 | PCcriteria = np.array([[1, 1, 5, 3], [1, 1, 5, 3], 327 | [1/5, 1/5, 1, 1/3], [1/3, 1/3, 3, 1]]) 328 | 329 | ahp_weighting = mcda_weights.AHP_WEIGHTING() 330 | test_result = ahp_weighting(X = PCcriteria, compute_priority_vector_method=ahp_weighting._eigenvector) 331 | real_result = np.array([0.390, 0.390, 0.068, 0.152]) 332 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 333 | 334 | 335 | def test_ahp2(self): 336 | """ 337 | Wątróbski, J., Ziemba, P., Jankowski, J., & Zioło, M. (2016). Green energy for a green 338 | city—A multi-perspective model approach. Sustainability, 8(8), 702. 339 | DOI: https://doi.org/10.3390/su8080702 340 | """ 341 | 342 | PCcriteria = np.array([ 343 | [1, 2, 2, 2, 6, 2, 1, 1, 1/2, 1/2], 344 | [1/2, 1, 1, 1, 4, 1, 1, 1, 1/2, 1/2], 345 | [1/2, 1, 1, 1, 3, 1, 1/2, 1/2, 1/3, 1/3], 346 | [1/2, 1, 1, 1, 3, 1, 1/2, 1/2, 1/3, 1/3], 347 | [1/6, 1/4, 1/3, 1/3, 1, 1/3, 1/5, 1/5, 1/9, 1/9], 348 | [1/2, 1, 1, 1, 3, 1, 1/2, 1/2, 1/3, 1/3], 349 | [1, 1, 2, 2, 5, 2, 1, 1, 1/2, 1/2], 350 | [1, 1, 2, 2, 5, 2, 1, 1, 1/2, 1/2], 351 | [2, 2, 3, 3, 9, 3, 2, 2, 1, 1], 352 | [2, 2, 3, 3, 9, 3, 2, 2, 1, 1] 353 | ]) 354 | 355 | ahp_weighting = mcda_weights.AHP_WEIGHTING() 356 | test_result = ahp_weighting(X = PCcriteria, compute_priority_vector_method=ahp_weighting._eigenvector) 357 | real_result = np.array([0.1165, 0.0804, 0.0614, 0.0614, 0.0205, 0.0614, 0.10648, 0.10648, 0.19266, 0.19266]) 358 | self.assertEqual(list(np.round(test_result, 4)), list(np.round(real_result, 4))) 359 | 360 | 361 | # Test for SWARA weighting method 362 | class Test_SWARA(unittest.TestCase): 363 | 364 | def test_swara(self): 365 | """Karabasevic, D., Stanujkic, D., Urosevic, S., & Maksimovic, M. (2015). 366 | Selection of candidates in the mining industry based on the application of the SWARA 367 | and the MULTIMOORA methods. Acta Montanistica Slovaca, 20(2). 368 | DOI: 10.3390/ams20020116""" 369 | 370 | criteria_indexes = np.array([0, 1, 2, 3, 4, 5, 6]) 371 | s = np.array([0, 0.35, 0.2, 0.3, 0, 0.4]) 372 | 373 | test_result = mcda_weights.swara_weighting(criteria_indexes, s) 374 | real_result = np.array([0.215, 0.215, 0.159, 0.133, 0.102, 0.102, 0.073]) 375 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 376 | 377 | 378 | # Test for LBWA weighting method 379 | class Test_LBWA(unittest.TestCase): 380 | 381 | def test_lbwa1(self): 382 | """Žižović, M., & Pamucar, D. (2019). New model for determining criteria weights: 383 | Level Based Weight Assessment (LBWA) model. Decision Making: Applications in Management 384 | and Engineering, 2(2), 126-137. 385 | DOI: https://doi.org/10.31181/dmame1902102z""" 386 | 387 | criteria_indexes = [ 388 | [1, 4, 6, 5, 0, 2], 389 | [7, 3] 390 | ] 391 | 392 | criteria_values_I = [ 393 | [0, 2, 3, 4, 4, 5], 394 | [1, 2] 395 | ] 396 | 397 | test_result = mcda_weights.lbwa_weighting(criteria_indexes, criteria_values_I) 398 | real_result = np.array([0.121, 0.191, 0.111, 0.084, 0.148, 0.121, 0.134, 0.089]) 399 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 400 | 401 | 402 | def test_lbwa2(self): 403 | """Žižović, M., & Pamucar, D. (2019). New model for determining criteria weights: 404 | Level Based Weight Assessment (LBWA) model. Decision Making: Applications in Management 405 | and Engineering, 2(2), 126-137. 406 | DOI: https://doi.org/10.31181/dmame1902102z""" 407 | 408 | criteria_indexes = [ 409 | [4, 7, 8, 0], 410 | [2, 3], 411 | [], 412 | [5], 413 | [], 414 | [], 415 | [1, 6] 416 | ] 417 | 418 | criteria_values_I = [ 419 | [0, 1, 2, 4], 420 | [1, 2], 421 | [], 422 | [2], 423 | [], 424 | [], 425 | [1, 3] 426 | ] 427 | 428 | test_result = mcda_weights.lbwa_weighting(criteria_indexes, criteria_values_I) 429 | real_result = np.array([0.124, 0.031, 0.102, 0.093, 0.224, 0.051, 0.029, 0.186, 0.160]) 430 | self.assertEqual(list(np.round(test_result, 3)), list(real_result)) 431 | 432 | 433 | # Test for SAPEVO weighting method 434 | class Test_SAPEVO(unittest.TestCase): 435 | 436 | def test_sapevo1(self): 437 | """Vitorino, L., Almeida SILVA, F. C., Simões GOMES, C. F., SANTOS, M. D., & Lucas, S. F. (2022). 438 | SAPEVO-WASPAS-2N-A PROPOSAL.. Economic Computation & Economic Cybernetics Studies & Research, 439 | 56(4). 440 | DOI: 10.24818/18423264/56.4.22.02""" 441 | 442 | criteria_matrix = np.array([ 443 | [0, 0, 3, 3, 1, 3, 2, 1, 2], 444 | [0, 0, 3, 3, 1, 3, 2, 1, 2], 445 | [-3, -3, 0, 0, -1, -2, -2, -1, -2], 446 | [-3, -3, 0, 0, -2, 2, -2, -2, -2], 447 | [-1, -1, 1, 2, 0, 2, 0, -1, 1], 448 | [-3, -3, 2, -2, -2, 0, -2, -1, -2], 449 | [-3, -2, 2, 2, 0, 2, 0, 3, 0], 450 | [-1, -1, 1, 2, 1, 1, -3, 0, -1], 451 | [-2, -2, 2, 2, -1, 2, 0, 1, 0], 452 | ]) 453 | 454 | test_result = mcda_weights.sapevo_weighting(criteria_matrix) 455 | real_result = np.array([1.0000, 1.0000, 0.0000, 0.0690, 0.5862, 0.0345, 0.6207, 0.4483, 0.5517]) 456 | real_result = real_result / np.sum(real_result) 457 | self.assertEqual(list(np.round(test_result, 4)), list(np.round(real_result, 4))) 458 | 459 | 460 | def test_sapevo2(self): 461 | """do Nascimento MAÊDA, S. M., Basílio, M. P., Pinheiro, I., de Araújo Costa, M. Â., 462 | Moreira, L., dos Santos, M., & Gomes, C. F. S. (2021). The SAPEVO-M-NC Method. Front. 463 | Artif. Intell. Appl, 341, 89-95. 464 | doi:10.3233/FAIA210235""" 465 | 466 | criteria_matrix = np.array([ 467 | [0, 1, 3, 2], 468 | [-1, 0, 2, 1], 469 | [-3, -2, 0, -1], 470 | [-2, -1, 1, 0] 471 | ]) 472 | 473 | test_result = mcda_weights.sapevo_weighting(criteria_matrix) 474 | real_result = np.array([1, 0.666667, 0.003333, 0.333333]) 475 | real_result = real_result / np.sum(real_result) 476 | self.assertEqual(list(np.round(test_result, 2)), list(np.round(real_result, 2))) 477 | 478 | 479 | def main(): 480 | # Test of the CRITIC weighting method 481 | test_critic = Test_CRITIC() 482 | test_critic.test_critic() 483 | 484 | # Test of the MEREC weighting method 485 | test_merec = Test_MEREC() 486 | test_merec.test_merec() 487 | 488 | # Test of the Entropy weighting method 489 | test_entropy = Test_Entropy() 490 | test_entropy.test_Entropy() 491 | test_entropy.test_Entropy2() 492 | test_entropy.test_Entropy3() 493 | test_entropy.test_Entropy4() 494 | 495 | # Test of the CILOS weighting method 496 | test_cilos = Test_CILOS() 497 | test_cilos.test_cilos() 498 | test_cilos.test_cilos2() 499 | 500 | # Test of the IDOCRIW weighting method 501 | test_idocriw = Test_IDOCRIW() 502 | test_idocriw.test_idocriw() 503 | 504 | # Test of the Angle weighting method 505 | test_angle = Test_Angle() 506 | test_angle.test_angle() 507 | 508 | # Test of the Coefficient of variation weighting method 509 | test_coeff_var = Test_Coeff_var() 510 | test_coeff_var.test_coeff_var() 511 | 512 | # Test of the Standard deviation weighting method 513 | test_std = Test_STD() 514 | test_std.test_std() 515 | 516 | # Test of the Equal weighting method 517 | test_equal = Test_equal() 518 | test_equal.test_equal() 519 | 520 | # Test of the Statistical variance weighting method 521 | test_stat_var = Test_stat_var() 522 | test_stat_var.test_stat_var() 523 | 524 | # Test of the Gini coefficient-based weighting method 525 | test_gini = Test_gini() 526 | test_gini.test_gini() 527 | 528 | # Test of the AHP weighting method 529 | test_ahp = Test_AHP() 530 | test_ahp.test_ahp1() 531 | test_ahp.test_ahp2() 532 | 533 | # Test of the SWARA weighting method 534 | test_swara = Test_SWARA() 535 | test_swara.test_swara() 536 | 537 | # Test of the LBWA weighting method 538 | test_lbwa = Test_LBWA() 539 | test_lbwa.test_lbwa1() 540 | test_lbwa.test_lbwa2() 541 | 542 | # Test of the SAPEVO weighting method 543 | test_sapevo = Test_SAPEVO() 544 | test_sapevo.test_sapevo1() 545 | test_sapevo.test_sapevo2() 546 | 547 | if __name__ == '__main__': 548 | main() 549 | 550 | --------------------------------------------------------------------------------