├── .editorconfig ├── .gitattributes ├── .github └── workflows │ └── tests.yml ├── .gitignore ├── .isort.cfg ├── .mailmap ├── .prepare-commit-msg.py ├── .readthedocs.yaml ├── CHANGES.md ├── CITATION.cff ├── CITATION.rst ├── CONTRIBUTING.rst ├── LICENSE.txt ├── MANIFEST.in ├── README.rst ├── axelrod ├── __init__.py ├── _strategy_utils.py ├── action.py ├── classifier.py ├── compute_finite_state_machine_memory.py ├── data │ ├── all_classifiers.yml │ ├── ann_weights.csv │ └── pso_gambler.csv ├── deterministic_cache.py ├── ecosystem.py ├── eigen.py ├── evolvable_player.py ├── fingerprint.py ├── game.py ├── graph.py ├── history.py ├── interaction_utils.py ├── load_data_.py ├── makes_use_of.py ├── match.py ├── match_generator.py ├── mock_player.py ├── moran.py ├── player.py ├── plot.py ├── random_.py ├── result_set.py ├── strategies │ ├── __init__.py │ ├── _filters.py │ ├── _strategies.py │ ├── adaptive.py │ ├── adaptor.py │ ├── alternator.py │ ├── ann.py │ ├── apavlov.py │ ├── appeaser.py │ ├── averagecopier.py │ ├── axelrod_first.py │ ├── axelrod_second.py │ ├── backstabber.py │ ├── better_and_better.py │ ├── bush_mosteller.py │ ├── calculator.py │ ├── cooperator.py │ ├── cycler.py │ ├── darwin.py │ ├── dbs.py │ ├── defector.py │ ├── doubler.py │ ├── finite_state_machines.py │ ├── forgiver.py │ ├── frequency_analyzer.py │ ├── gambler.py │ ├── gobymajority.py │ ├── gradualkiller.py │ ├── grudger.py │ ├── grumpy.py │ ├── handshake.py │ ├── hmm.py │ ├── hunter.py │ ├── inverse.py │ ├── lookerup.py │ ├── mathematicalconstants.py │ ├── memoryone.py │ ├── memorytwo.py │ ├── meta.py │ ├── momentum.py │ ├── mutual.py │ ├── negation.py │ ├── oncebitten.py │ ├── prober.py │ ├── punisher.py │ ├── qlearner.py │ ├── rand.py │ ├── resurrection.py │ ├── retaliate.py │ ├── revised_downing.py │ ├── selfsteem.py │ ├── sequence_player.py │ ├── shortmem.py │ ├── stalker.py │ ├── titfortat.py │ ├── verybad.py │ ├── worse_and_worse.py │ └── zero_determinant.py ├── strategy_transformers.py ├── tests │ ├── __init__.py │ ├── integration │ │ ├── __init__.py │ │ ├── test_filtering.py │ │ ├── test_matches.py │ │ ├── test_names.py │ │ ├── test_sample_tournaments.py │ │ └── test_tournament.py │ ├── property.py │ ├── strategies │ │ ├── __init__.py │ │ ├── test_adaptive.py │ │ ├── test_adaptor.py │ │ ├── test_alternator.py │ │ ├── test_ann.py │ │ ├── test_apavlov.py │ │ ├── test_appeaser.py │ │ ├── test_averagecopier.py │ │ ├── test_axelrod_first.py │ │ ├── test_axelrod_second.py │ │ ├── test_backstabber.py │ │ ├── test_better_and_better.py │ │ ├── test_bush_mosteller.py │ │ ├── test_calculator.py │ │ ├── test_cooperator.py │ │ ├── test_cycler.py │ │ ├── test_darwin.py │ │ ├── test_dbs.py │ │ ├── test_defector.py │ │ ├── test_doubler.py │ │ ├── test_evolvable_player.py │ │ ├── test_finite_state_machines.py │ │ ├── test_forgiver.py │ │ ├── test_frequency_analyzer.py │ │ ├── test_gambler.py │ │ ├── test_gobymajority.py │ │ ├── test_gradualkiller.py │ │ ├── test_grudger.py │ │ ├── test_grumpy.py │ │ ├── test_handshake.py │ │ ├── test_headsup.py │ │ ├── test_hmm.py │ │ ├── test_hunter.py │ │ ├── test_inverse.py │ │ ├── test_lookerup.py │ │ ├── test_mathematicalconstants.py │ │ ├── test_memoryone.py │ │ ├── test_memorytwo.py │ │ ├── test_meta.py │ │ ├── test_momentum.py │ │ ├── test_mutual.py │ │ ├── test_negation.py │ │ ├── test_oncebitten.py │ │ ├── test_player.py │ │ ├── test_prober.py │ │ ├── test_punisher.py │ │ ├── test_qlearner.py │ │ ├── test_rand.py │ │ ├── test_resurrection.py │ │ ├── test_retaliate.py │ │ ├── test_revised_downing.py │ │ ├── test_selfsteem.py │ │ ├── test_sequence_player.py │ │ ├── test_shortmem.py │ │ ├── test_stalker.py │ │ ├── test_titfortat.py │ │ ├── test_verybad.py │ │ ├── test_worse_and_worse.py │ │ └── test_zero_determinant.py │ └── unit │ │ ├── __init__.py │ │ ├── test_actions.py │ │ ├── test_classification.py │ │ ├── test_compute_finite_state_machine_memory.py │ │ ├── test_deterministic_cache.py │ │ ├── test_ecosystem.py │ │ ├── test_eigen.py │ │ ├── test_filters.py │ │ ├── test_fingerprint.py │ │ ├── test_game.py │ │ ├── test_graph.py │ │ ├── test_history.py │ │ ├── test_interaction_utils.py │ │ ├── test_load_data.py │ │ ├── test_makes_use_of.py │ │ ├── test_match.py │ │ ├── test_match_generator.py │ │ ├── test_mock_player.py │ │ ├── test_moran.py │ │ ├── test_pickling.py │ │ ├── test_plot.py │ │ ├── test_property.py │ │ ├── test_random_.py │ │ ├── test_resultset.py │ │ ├── test_strategy_transformers.py │ │ ├── test_strategy_utils.py │ │ ├── test_tournament.py │ │ └── test_version.py ├── tournament.py └── version.py ├── citations.md ├── docs ├── Makefile ├── _static │ └── favicon.ico ├── conf.py ├── discussion │ ├── axelrods_tournaments.rst │ ├── community │ │ ├── coc.rst │ │ ├── communication.rst │ │ ├── index.rst │ │ └── team.rst │ ├── fortan_keys.rst │ ├── index.rst │ ├── overview_of_past_tournaments.rst │ ├── play_contexts.rst │ └── strategy_archetypes.rst ├── how-to │ ├── _static │ │ ├── ecological_variant │ │ │ └── demo_strategies_stackplot.svg │ │ ├── fingerprinting │ │ │ ├── WSLS_large.png │ │ │ ├── WSLS_large_alt.png │ │ │ ├── WSLS_small.png │ │ │ ├── transitive_TFT.png │ │ │ └── transitive_TFT_against_demo.png │ │ ├── noisy_tournaments │ │ │ ├── demo_strategies_noisy_boxplot.svg │ │ │ └── demo_strategies_noisy_winplot.svg │ │ ├── prob_end_tournaments │ │ │ ├── prob_end_boxplot.svg │ │ │ └── prob_end_lengthplot.svg │ │ └── spatial_tournaments │ │ │ ├── spatial.png │ │ │ └── spatial_results.png │ ├── access_collections_of_strategies.rst │ ├── access_tournament_results.rst │ ├── calculate_morality_metrics.rst │ ├── check_player_equality.rst │ ├── cite_the_library.rst │ ├── classify_strategies.rst │ ├── contributing │ │ ├── guidelines.rst │ │ ├── index.rst │ │ ├── library │ │ │ └── index.rst │ │ ├── running_tests.rst │ │ ├── setting_up_the_environment.rst │ │ └── strategy │ │ │ ├── adding_the_new_strategy.rst │ │ │ ├── classifying_the_new_strategy.rst │ │ │ ├── docstrings.rst │ │ │ ├── index.rst │ │ │ ├── instructions.rst │ │ │ ├── writing_test_for_the_new_strategy.rst │ │ │ └── writing_the_new_strategy.rst │ ├── create_approximate_moran_processes.rst │ ├── create_moran_processes_on_graphs.rst │ ├── create_spatial_tournaments.rst │ ├── evolve_players.rst │ ├── fingerprint.rst │ ├── include_noise.rst │ ├── include_probabilistic_endings.rst │ ├── index.rst │ ├── read_and_write_interactions.rst │ ├── run_axelrods_ecological_variant.rst │ ├── set_a_seed.rst │ ├── set_player_information.rst │ ├── use_a_cache.rst │ ├── use_custom_matches.rst │ ├── use_different_stage_games.rst │ ├── use_parallel_processing.rst │ └── use_strategy_transformers.rst ├── index.rst ├── reference │ ├── bibliography.rst │ ├── glossary.rst │ ├── index.rst │ └── strategy_index.rst ├── requirements.txt └── tutorials │ ├── creating_heterogenous_player_moran_process │ └── index.rst │ ├── implement_new_games │ └── index.rst │ ├── index.rst │ ├── new_to_game_theory_and_or_python │ ├── _static │ │ ├── getting_started │ │ │ ├── demo_deterministic_strategies_boxplot.svg │ │ │ └── moran_example.svg │ │ └── visualising_results │ │ │ ├── demo_strategies_boxplot.svg │ │ │ ├── demo_strategies_payoff.svg │ │ │ ├── demo_strategies_winplot.svg │ │ │ └── title_labels_payoff.png │ ├── index.rst │ ├── installation.rst │ ├── match.rst │ ├── moran.rst │ ├── summarising_tournaments.rst │ ├── tournament.rst │ └── visualising_results.rst │ └── running_axelrods_first_tournament │ ├── _static │ └── running_axelrods_first_tournament │ │ ├── boxplot.svg │ │ ├── main.py │ │ └── rank_comparison.svg │ └── index.rst ├── doctests.py ├── matplotlibrc ├── pyproject.toml ├── rebuild_classifier_table.py ├── run_mypy.py ├── run_strategy_indexer.py ├── setup.cfg ├── setup.py ├── test ├── test_outputs ├── README.md ├── classifier_test.yaml ├── expected_test_tournament.csv ├── expected_test_tournament_no_results.csv ├── test_results.csv ├── test_results_spatial.csv ├── test_results_spatial_three.csv └── test_results_spatial_two.csv └── tox.ini /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig helps developers define and maintain consistent 2 | # coding styles between different editors and IDEs 3 | # editorconfig.org 4 | 5 | # top-most EditorConfig file 6 | root = true 7 | 8 | [*] 9 | end_of_line = lf 10 | insert_final_newline = true 11 | charset = utf-8 12 | trim_trailing_whitespace = true 13 | indent_style = space 14 | 15 | [*.{py, md, rst}] 16 | indent_size = 4 17 | 18 | [*.yml] 19 | indent_size = 2 20 | 21 | [Makefile] 22 | indent_style = tab 23 | indent_size = 8 24 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | .hypothesis/examples.db merge=hypothesisdb 2 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ${{ matrix.os }} 8 | strategy: 9 | max-parallel: 4 10 | matrix: 11 | os: [ubuntu-latest, macOS-latest, windows-latest] 12 | python-version: ["3.11", "3.12"] 13 | 14 | steps: 15 | - uses: actions/checkout@v2 16 | - name: Set up Python ${{ matrix.python-version }} 17 | uses: actions/setup-python@v2 18 | with: 19 | python-version: ${{ matrix.python-version }} 20 | 21 | - name: update pip 22 | run: | 23 | python -m pip install --upgrade pip 24 | 25 | - name: install tox 26 | run: | 27 | python -m pip install tox tox-gh-actions 28 | 29 | - name: run tox 30 | run: | 31 | python -m tox 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Axelrod Specific 2 | basic_strategies.csv 3 | cache.txt 4 | test.csv 5 | summary.csv 6 | basic_tournament.csv 7 | test_outputs/*csv.summary 8 | test_outputs/*svg 9 | test_outputs/*cache 10 | 11 | # Mac Specific 12 | *.DS_Store 13 | 14 | # Byte-compiled / optimized / DLL files 15 | __pycache__/ 16 | *.py[cod] 17 | *$py.class 18 | 19 | # C extensions 20 | *.so 21 | 22 | # Distribution / packaging 23 | .Python 24 | env/ 25 | build/ 26 | develop-eggs/ 27 | dist/ 28 | downloads/ 29 | eggs/ 30 | .eggs/ 31 | lib/ 32 | lib64/ 33 | parts/ 34 | sdist/ 35 | var/ 36 | *.egg-info/ 37 | .installed.cfg 38 | *.egg 39 | 40 | # PyInstaller 41 | # Usually these files are written by a python script from a template 42 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 43 | *.manifest 44 | *.spec 45 | 46 | # Installer logs 47 | pip-log.txt 48 | pip-delete-this-directory.txt 49 | 50 | # Unit test / coverage reports 51 | htmlcov/ 52 | .tox/ 53 | .coverage 54 | .coverage.* 55 | .cache 56 | nosetests.xml 57 | coverage.xml 58 | *,cover 59 | .hypothesis/ 60 | 61 | # Translations 62 | *.mo 63 | *.pot 64 | 65 | # Django stuff: 66 | *.log 67 | local_settings.py 68 | 69 | # Flask stuff: 70 | instance/ 71 | .webassets-cache 72 | 73 | # Scrapy stuff: 74 | .scrapy 75 | 76 | # Sphinx documentation 77 | docs/_build/ 78 | 79 | # PyBuilder 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | *.ipynb 85 | # pyenv 86 | .python-version 87 | 88 | # celery beat schedule file 89 | celerybeat-schedule 90 | 91 | # dotenv 92 | .env 93 | 94 | # virtualenv 95 | .venv/ 96 | venv/ 97 | ENV/ 98 | 99 | # Spyder project settings 100 | .spyderproject 101 | 102 | # Rope project settings 103 | .ropeproject 104 | 105 | 106 | # Pycharm Files 107 | .idea/ 108 | 109 | # Sublime Text settings 110 | *.sublime-* 111 | 112 | # Jetbrain editor settings 113 | .idea/ 114 | 115 | # Docker files 116 | Dockerfile 117 | docker-compose.yml 118 | 119 | # Mypy files 120 | .mypy_cache/ 121 | 122 | test_outputs/classifier_test.yaml 123 | test_outputs/stochastic_tournament_0.csv 124 | test_outputs/stochastic_tournament_1.csv 125 | test_outputs/test_fingerprint.csv 126 | test_outputs/test_fingerprint_tmp.csv 127 | test_outputs/test_results_from_file.csv 128 | test_outputs/test_results_from_file_tmp.csv 129 | test_outputs/test_tournament.csv 130 | test_outputs/tran_fin.csv 131 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | default_section = THIRDPARTY 3 | multi_line_output = 3 4 | include_trailing_comma = True 5 | force_grid_wrap = 0 6 | combine_as_imports = True 7 | line_length = 80 8 | -------------------------------------------------------------------------------- /.mailmap: -------------------------------------------------------------------------------- 1 | Vincent Knight Vince Knight 2 | Vincent Knight vince 3 | Karol M. Langner Karol M. Langner 4 | Karol M. Langner Karol M. Langner 5 | Jason Young jasonyoung 6 | Marc Harper Marc Harper, PhD 7 | Owen Campbell Owen Campbell 8 | Geraint Palmer geraint 9 | Paul Slavin slavinp 10 | Paul Slavin Paul Slavin 11 | Martin Chorley Martin Chorley 12 | -------------------------------------------------------------------------------- /.prepare-commit-msg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # This script is an optional git hook and will prepend the issue 4 | # number to a commit message in the correct format for Github to parse. 5 | # 6 | # If you wish to use it, create a shortcut to this file in .git/hooks called 7 | # 'prepare-commit-msg' e.g. from top folder of your project: 8 | # ln -s ./.prepare-commit-msg.py .git/hooks/prepare-commit-msg 9 | # 10 | # or, for Windows users: 11 | # mklink .git\hooks\prepare-commit-msg .prepare-commit-msg.py 12 | # 13 | # If you use a graphical git client, you can configure it so that the issue 14 | # numbers become clickable in the log view. e.g. for Atlassian SourceTree: 15 | # From the 'Repository Settings' menu, click the 'Advanced' tab 16 | # In the 'Commit Text Replacements', click the 'Add' button 17 | # Select 'Other' as the 'Replacement Type' 18 | # Enter '#(\d{1,})' as the 'Regex Pattern' 19 | # Enter '#$1' as the 'Replace With' 20 | # 21 | # Any issue numbers created by this hook (or entered manually in the correct) 22 | # format will now be clickable links in the log view. 23 | 24 | import re 25 | import sys 26 | from subprocess import check_output 27 | 28 | # By default, the hook will check to see if the branch name starts with 29 | # 'issue-' and will then prepend whatever follows in the commit message. 30 | # e.g. for a branch named 'issue-123', the commit message will start with 31 | # '[#123]' 32 | # If you wish to use a diferent prefix on branch names, change it here. 33 | issue_prefix = "issue-" 34 | 35 | commit_msg_filepath = sys.argv[1] 36 | branch = ( 37 | check_output(["git", "symbolic-ref", "--short", "HEAD"]) 38 | .strip() 39 | .decode(encoding="UTF-8") 40 | ) 41 | 42 | if branch.startswith(issue_prefix): 43 | issue_number = re.match("%s(.*)" % issue_prefix, branch).group(1) 44 | print( 45 | "prepare-commit-msg: Prepending [#%s] to commit message" % issue_number 46 | ) 47 | 48 | with open(commit_msg_filepath, "r+") as f: 49 | content = f.read() 50 | f.seek(0, 0) 51 | f.write("[#%s] %s" % (issue_number, content)) 52 | else: 53 | print("prepare-commit-msg: No changes made to commit message") 54 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3.11" 7 | 8 | sphinx: 9 | configuration: docs/conf.py 10 | 11 | python: 12 | install: 13 | - requirements: docs/requirements.txt 14 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | # This CITATION.cff file was generated with cffinit. 2 | # Visit https://bit.ly/cffinit to generate yours today! 3 | 4 | cff-version: 1.2.0 5 | title: 'Axelrod-Python/Axelrod: v4.12.0' 6 | message: >- 7 | If you use this software, please cite it using the 8 | metadata from this file. 9 | type: software 10 | authors: 11 | - given-names: Vince 12 | family-names: Knight 13 | - given-names: Owen 14 | family-names: Campbell 15 | - given-names: Marc 16 | - given-names: T.J. 17 | family-names: Gaffney 18 | - given-names: Eric 19 | family-names: Shaw 20 | - given-names: VSN 21 | family-names: Reddy Janga 22 | - given-names: Nikoleta 23 | family-names: Glynatsi 24 | - given-names: James 25 | family-names: Campbell 26 | - given-names: Karol M. 27 | family-names: Langner 28 | - given-names: Sourav 29 | family-names: Singh 30 | - given-names: Julie 31 | family-names: Rymer 32 | - given-names: Thomas 33 | family-names: Campbell 34 | - given-names: Jason 35 | family-names: Young 36 | - given-names: M 37 | family-names: Hakem 38 | - given-names: Geraint 39 | family-names: Palmer 40 | - given-names: Kristian 41 | family-names: Glass 42 | - given-names: Daniel 43 | family-names: Mancia 44 | - given-names: Edouard 45 | family-names: Argenson 46 | - given-names: Jones 47 | family-names: Martin 48 | - family-names: Kjurgielajtis 49 | - given-names: Yohsuke 50 | family-names: Murase 51 | - given-names: Sudarshan 52 | family-names: Parvatikar 53 | - given-names: Melanie 54 | family-names: Beck 55 | - given-names: Cameron 56 | family-names: Davidson-Pilon 57 | - given-names: Marios 58 | family-names: Zoulias 59 | - given-names: Adam 60 | family-names: Pohl 61 | - given-names: Paul 62 | family-names: Slavin 63 | - given-names: Timothy 64 | family-names: Standen 65 | - given-names: Aaron 66 | family-names: Kratz 67 | - given-names: Ahmed 68 | family-names: Areeb 69 | identifiers: 70 | - type: doi 71 | value: 10.5281/zenodo.5616793 72 | repository-code: 'https://github.com/Axelrod-Python/Axelrod' 73 | url: 'http://axelrod.readthedocs.org/' 74 | -------------------------------------------------------------------------------- /CITATION.rst: -------------------------------------------------------------------------------- 1 | Please use the following to cite the latest version of the Axelrod library:: 2 | 3 | @misc{axelrodproject, 4 | author = {{ {The Axelrod project developers} }}, 5 | title = {Axelrod: }, 6 | month = apr, 7 | year = 2016, 8 | doi = {}, 9 | url = {http://dx.doi.org/10.5281/zenodo.} 10 | } 11 | 12 | To check the details (RELEASE TITLE, DOI INFORMATION and DOI NUMBER) please view 13 | the Zenodo page for the project. Click on the badge/link below: 14 | 15 | .. image:: https://zenodo.org/badge/19509/Axelrod-Python/Axelrod.svg 16 | :target: https://zenodo.org/badge/latestdoi/19509/Axelrod-Python/Axelrod 17 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | Detailed contribution guidelines are available as part of our documentation at https://axelrod.readthedocs.io/en/latest/how-to/contributing/index.html 2 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 The Axelrod-Python project team members listed at 4 | https://github.com/Axelrod-Python/Axelrod/graphs/contributors 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | recursive-include docs *.rst 3 | recursive-include axelrod/data * 4 | -------------------------------------------------------------------------------- /axelrod/__init__.py: -------------------------------------------------------------------------------- 1 | # isort:skip_file 2 | DEFAULT_TURNS = 200 3 | 4 | # The order of imports matters! 5 | from axelrod.version import __version__ 6 | from axelrod.action import Action 7 | from axelrod.random_ import Pdf, RandomGenerator, BulkRandomGenerator 8 | 9 | # Initialize module level Random 10 | # This is initially seeded by the clock / OS entropy pool 11 | # It is not used if user specifies seeds everywhere and should only be 12 | # used internally by the library and in certain tests that need to set 13 | # its seed. 14 | _module_random = RandomGenerator() 15 | 16 | from axelrod.load_data_ import load_pso_tables, load_weights 17 | from axelrod import graph 18 | from axelrod.plot import Plot 19 | from axelrod.game import DefaultGame, AsymmetricGame, Game 20 | from axelrod.history import History, LimitedHistory 21 | from axelrod.player import Player 22 | from axelrod.classifier import Classifiers 23 | from axelrod.evolvable_player import EvolvablePlayer 24 | from axelrod.mock_player import MockPlayer 25 | from axelrod.match import Match 26 | from axelrod.moran import MoranProcess, ApproximateMoranProcess 27 | from axelrod.strategies import * 28 | from axelrod.deterministic_cache import DeterministicCache 29 | from axelrod.match_generator import * 30 | from axelrod.tournament import Tournament 31 | from axelrod.result_set import ResultSet 32 | from axelrod.ecosystem import Ecosystem 33 | from axelrod.fingerprint import AshlockFingerprint, TransitiveFingerprint 34 | -------------------------------------------------------------------------------- /axelrod/_strategy_utils.py: -------------------------------------------------------------------------------- 1 | """Utilities used by various strategies.""" 2 | 3 | import itertools 4 | from functools import lru_cache 5 | 6 | from axelrod.action import Action 7 | from axelrod.strategies.cooperator import Cooperator 8 | from axelrod.strategies.defector import Defector 9 | 10 | C, D = Action.C, Action.D 11 | 12 | 13 | def detect_cycle(history, min_size=1, max_size=12, offset=0): 14 | """Detects cycles in the sequence history. 15 | 16 | Mainly used by hunter strategies. 17 | 18 | Parameters 19 | ---------- 20 | history: sequence of C and D 21 | The sequence to look for cycles within 22 | min_size: int, 1 23 | The minimum length of the cycle 24 | max_size: int, 12 25 | The maximum length of the cycle 26 | offset: int, 0 27 | The amount of history to skip initially 28 | 29 | Returns 30 | ------- 31 | Tuple of C and D 32 | The cycle detected in the input history 33 | """ 34 | history_tail = history[offset:] 35 | new_max_size = min(len(history_tail) // 2, max_size) 36 | for i in range(min_size, new_max_size + 1): 37 | has_cycle = True 38 | cycle = tuple(history_tail[:i]) 39 | for j, elem in enumerate(history_tail): 40 | if elem != cycle[j % len(cycle)]: 41 | has_cycle = False 42 | break 43 | if has_cycle: 44 | return cycle 45 | return None 46 | 47 | 48 | @lru_cache() 49 | def recursive_thue_morse(n): 50 | """The recursive definition of the Thue-Morse sequence. 51 | 52 | The first few terms of the Thue-Morse sequence are: 53 | 0 1 1 0 1 0 0 1 1 0 0 1 0 1 1 0 . . . 54 | """ 55 | 56 | if n == 0: 57 | return 0 58 | if n % 2 == 0: 59 | return recursive_thue_morse(n / 2) 60 | if n % 2 == 1: 61 | return 1 - recursive_thue_morse((n - 1) / 2) 62 | 63 | 64 | def thue_morse_generator(start=0): 65 | """A generator for the Thue-Morse sequence.""" 66 | for n in itertools.count(start): 67 | yield recursive_thue_morse(n) 68 | -------------------------------------------------------------------------------- /axelrod/action.py: -------------------------------------------------------------------------------- 1 | """Actions for the Prisoner's Dilemma and related utilities. 2 | 3 | For convenience in other modules you can alias the actions: 4 | 5 | from axelrod import Action 6 | C, D = Action.C, Action.D 7 | """ 8 | 9 | from enum import Enum 10 | from functools import total_ordering 11 | from typing import Iterable, Tuple 12 | 13 | 14 | class UnknownActionError(ValueError): 15 | """Error indicating an unknown action was used.""" 16 | 17 | def __init__(self, *args): 18 | super(UnknownActionError, self).__init__(*args) 19 | 20 | 21 | @total_ordering 22 | class Action(Enum): 23 | """Core actions in the Prisoner's Dilemma. 24 | 25 | There are only two possible actions, namely Cooperate or Defect, 26 | which are called C and D for convenience. 27 | """ 28 | 29 | C = 0 # Cooperate 30 | D = 1 # Defect 31 | 32 | def __lt__(self, other): 33 | return self.value < other.value 34 | 35 | def __repr__(self): 36 | return self.name 37 | 38 | def __str__(self): 39 | return self.name 40 | 41 | def flip(self): 42 | """Returns the opposite Action.""" 43 | if self == Action.C: 44 | return Action.D 45 | return Action.C 46 | 47 | @classmethod 48 | def from_char(cls, character): 49 | """Converts a single character into an Action. 50 | 51 | Parameters 52 | ---------- 53 | character: a string of length one 54 | 55 | Returns 56 | ------- 57 | Action 58 | The action corresponding to the input character 59 | 60 | 61 | Raises 62 | ------ 63 | UnknownActionError 64 | If the input string is not 'C' or 'D' 65 | """ 66 | if character == "C": 67 | return cls.C 68 | if character == "D": 69 | return cls.D 70 | raise UnknownActionError('Character must be "C" or "D".') 71 | 72 | 73 | def str_to_actions(actions: str) -> Tuple[Action, ...]: 74 | """Converts a string to a tuple of actions. 75 | 76 | Parameters 77 | ---------- 78 | actions: string consisting of 'C's and 'D's 79 | 80 | Returns 81 | ------- 82 | tuple 83 | Each element corresponds to a letter from the input string. 84 | """ 85 | return tuple(Action.from_char(element) for element in actions) 86 | 87 | 88 | def actions_to_str(actions: Iterable[Action]) -> str: 89 | """Converts an iterable of actions into a string. 90 | 91 | Example: (D, D, C) would be converted to 'DDC' 92 | 93 | Parameters 94 | ----------- 95 | actions: iterable of Action 96 | 97 | Returns 98 | ------- 99 | str 100 | A string of 'C's and 'D's. 101 | """ 102 | return "".join(map(str, actions)) 103 | -------------------------------------------------------------------------------- /axelrod/data/pso_gambler.csv: -------------------------------------------------------------------------------- 1 | # Name (string), plays (int), opp_plays(int), starting_plays(int), weights (floats) 2 | PSO Gambler Mem1, 1, 1, 0, 1.0, 0.52173487, 0.0, 0.12050939 3 | PSO Gambler 1_1_1, 1, 1, 1, 1.0, 1.0, 0.12304797, 0.57740178, 0.0, 0.0, 0.13581423, 0.11886807 4 | # , 2, 2, 2, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.93, 0.0, 1.0, 0.67, 0.42, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.48, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.19, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.36, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 5 | PSO Gambler 2_2_2, 2, 2, 2, 1.0, 1.0, 1.0, 0.0, 1.0, 0.95280465, 0.0, 0.0, 0.0, 0.80897541, 0.0, 0.0, 0.02126434, 0.0, 0.43278586, 0.0, 0.0, 0.0, 1.0, 0.15140743, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.23563137, 0.0, 0.0, 0.65147565, 1.0, 0.0, 0.0, 0.15412392, 1.0, 0.0, 0.0, 0.24922166, 1.0, 0.0, 0.0, 0.0, 0.00227615, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.77344942, 1.0, 0.24523149, 1.0, 0.0 6 | PSO Gambler 2_2_2 Noise 05, 2, 2, 2, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.98603825, 1.0, 1.0, 0.0, 0.0, 0.16240799, 0.63548102, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.13863175, 0.06434619, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.7724137, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.50999729, 1.0, 0.0, 0.0, 0.00524508, 0.87463905, 0.0, 0.07127653, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.28124022, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0 7 | -------------------------------------------------------------------------------- /axelrod/eigen.py: -------------------------------------------------------------------------------- 1 | """ 2 | Compute the principal eigenvector of a matrix using power iteration. 3 | 4 | See also numpy.linalg.eig which calculates all the eigenvalues and 5 | eigenvectors. 6 | """ 7 | 8 | from typing import Tuple 9 | 10 | import numpy as np 11 | 12 | 13 | def _normalise(nvec: np.ndarray) -> np.ndarray: 14 | """Normalises the given numpy array.""" 15 | with np.errstate(invalid="ignore"): 16 | result = nvec / np.sqrt((nvec @ nvec)) 17 | return result 18 | 19 | 20 | def _squared_error(vector_1: np.ndarray, vector_2: np.ndarray) -> float: 21 | """Computes the squared error between two numpy arrays.""" 22 | diff = vector_1 - vector_2 23 | s = diff @ diff 24 | return np.sqrt(s) 25 | 26 | 27 | def _power_iteration(mat: np.array, initial: np.ndarray) -> np.ndarray: 28 | """ 29 | Generator of successive approximations. 30 | 31 | Params 32 | ------ 33 | mat: numpy.array 34 | The matrix to use for multiplication iteration 35 | initial: numpy.array, None 36 | The initial state. Will be set to np.array([1, 1, ...]) if None 37 | 38 | Yields 39 | ------ 40 | Successive powers (mat ^ k) * initial 41 | """ 42 | 43 | vec = initial 44 | while True: 45 | vec = _normalise(np.dot(mat, vec)) 46 | yield vec 47 | 48 | 49 | def principal_eigenvector( 50 | mat: np.array, maximum_iterations=1000, max_error=1e-3 51 | ) -> Tuple[np.ndarray, float]: 52 | """ 53 | Computes the (normalised) principal eigenvector of the given matrix. 54 | 55 | Params 56 | ------ 57 | mat: numpy.array 58 | The matrix to use for multiplication iteration 59 | maximum_iterations: int, None 60 | The maximum number of iterations of the approximation 61 | max_error: float, 1e-8 62 | Exit criterion -- error threshold of the difference of successive steps 63 | 64 | Returns 65 | ------- 66 | ndarray 67 | Eigenvector estimate for the input matrix 68 | float 69 | Eigenvalue corresponding to the returned eigenvector 70 | """ 71 | 72 | mat_ = np.array(mat) 73 | size = mat_.shape[0] 74 | initial = np.ones(size) 75 | 76 | # Power iteration 77 | if not maximum_iterations: 78 | maximum_iterations = float("inf") 79 | last = initial 80 | for i, vector in enumerate(_power_iteration(mat, initial=initial)): 81 | if i > maximum_iterations: 82 | break 83 | if _squared_error(vector, last) < max_error: 84 | break 85 | last = vector 86 | # Compute the eigenvalue (Rayleigh quotient) 87 | eigenvalue = ((mat_ @ vector) @ vector) / (vector @ vector) 88 | # Liberate the eigenvalue from numpy 89 | eigenvalue = float(eigenvalue) 90 | return vector, eigenvalue 91 | -------------------------------------------------------------------------------- /axelrod/load_data_.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import pkgutil 3 | from typing import Callable, Dict, List, Optional, Tuple 4 | 5 | 6 | def axl_filename(path: pathlib.Path) -> pathlib.Path: 7 | """Given a path under Axelrod/, return absolute filepath. 8 | 9 | Parameters 10 | ---------- 11 | axl_path 12 | A pathlib.Path object with the relative directory under Axelrod/ 13 | 14 | Returns 15 | ------- 16 | A pathlib.Path object with the absolute directory. 17 | """ 18 | # We go up a dir because this code is located in Axelrod/axelrod. 19 | axl_path = pathlib.Path(__file__).resolve().parent.parent 20 | return axl_path / path 21 | 22 | 23 | def load_file( 24 | filename: str, 25 | directory: str, 26 | get_data: Callable[[str, str], Optional[bytes]] = pkgutil.get_data, 27 | ) -> List[List[str]]: 28 | """Loads a data file stored in the Axelrod library's data subdirectory, 29 | likely for parameters for a strategy.""" 30 | 31 | path = str(pathlib.Path(directory) / filename) 32 | data_bytes = get_data(__name__, path) 33 | if data_bytes is None: 34 | raise FileNotFoundError(f"Some loader issue for path {path}") 35 | data = data_bytes.decode("UTF-8", "replace") 36 | 37 | rows = [] 38 | for line in data.split("\n"): 39 | if line.startswith("#") or len(line) == 0: 40 | continue 41 | s = line.split(", ") 42 | rows.append(s) 43 | return rows 44 | 45 | 46 | def load_weights( 47 | filename: str = "ann_weights.csv", directory: str = "data" 48 | ) -> Dict[str, Tuple[int, int, List[float]]]: 49 | """Load Neural Network Weights.""" 50 | rows = load_file(filename, directory) 51 | d = dict() 52 | for row in rows: 53 | name = str(row[0]) 54 | num_features = int(row[1]) 55 | num_hidden = int(row[2]) 56 | weights = list(map(float, row[3:])) 57 | d[name] = (num_features, num_hidden, weights) 58 | return d 59 | 60 | 61 | def load_pso_tables(filename="pso_gambler.csv", directory="data"): 62 | """Load lookup tables.""" 63 | rows = load_file(filename, directory) 64 | d = dict() 65 | for row in rows: 66 | ( 67 | name, 68 | a, 69 | b, 70 | c, 71 | ) = ( 72 | str(row[0]), 73 | int(row[1]), 74 | int(row[2]), 75 | int(row[3]), 76 | ) 77 | values = list(map(float, row[4:])) 78 | d[(name, int(a), int(b), int(c))] = values 79 | return d 80 | -------------------------------------------------------------------------------- /axelrod/makes_use_of.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | import re 3 | from typing import Callable, Set, Text, Type, Union 4 | 5 | from axelrod.player import Player 6 | 7 | 8 | def method_makes_use_of(method: Callable) -> Set[Text]: 9 | result = set() 10 | method_code = inspect.getsource(method) 11 | attr_string = r".match_attributes\[\"(\w+)\"\]" 12 | all_attrs = re.findall(attr_string, method_code) 13 | for attr in all_attrs: 14 | result.add(attr) 15 | return result 16 | 17 | 18 | def class_makes_use_of(cls) -> Set[Text]: 19 | try: 20 | result = cls.classifier["makes_use_of"] 21 | except (AttributeError, KeyError): 22 | result = set() 23 | 24 | for method in inspect.getmembers(cls, inspect.ismethod): 25 | if method[0] == "__init__": 26 | continue 27 | result.update(method_makes_use_of(method[1])) 28 | return result 29 | 30 | 31 | def makes_use_of(player: Type[Player]) -> Set[Text]: 32 | if not isinstance(player, Player): # pragma: no cover 33 | player = player() 34 | 35 | return class_makes_use_of(player) 36 | 37 | 38 | def makes_use_of_variant( 39 | player_or_method: Union[Callable, Type[Player]], 40 | ) -> Set[Text]: 41 | """A version of makes_use_of that works on functions or player classes.""" 42 | try: 43 | return method_makes_use_of(player_or_method) 44 | # OSError catches the case of a transformed player, which has a dynamically 45 | # created class. 46 | # TypeError is the case in which we have a class rather than a method 47 | except (OSError, TypeError): 48 | return class_makes_use_of(player_or_method) 49 | -------------------------------------------------------------------------------- /axelrod/mock_player.py: -------------------------------------------------------------------------------- 1 | from itertools import cycle 2 | from typing import List, Optional 3 | 4 | from axelrod.action import Action 5 | from axelrod.player import Player 6 | 7 | C, D = Action.C, Action.D 8 | 9 | 10 | class MockPlayer(Player): 11 | """Creates a mock player that plays a given sequence of actions. If 12 | no actions are given, plays like Cooperator. Used for testing. 13 | """ 14 | 15 | name = "Mock Player" 16 | 17 | def __init__(self, actions: Optional[List[Action]] = None) -> None: 18 | super().__init__() 19 | if not actions: 20 | actions = [] 21 | self.actions = cycle(actions) 22 | 23 | def strategy(self, opponent: Player) -> Action: 24 | # Return the next saved action, if present. 25 | try: 26 | action = self.actions.__next__() 27 | return action 28 | except StopIteration: 29 | return C 30 | -------------------------------------------------------------------------------- /axelrod/strategies/adaptive.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from axelrod.action import Action 4 | from axelrod.player import Player 5 | 6 | C, D = Action.C, Action.D 7 | 8 | 9 | class Adaptive(Player): 10 | """Start with a specific sequence of C and D, then play the strategy that 11 | has worked best, recalculated each turn. 12 | 13 | Names: 14 | 15 | - Adaptive: [Li2011]_ 16 | 17 | """ 18 | 19 | name = "Adaptive" 20 | classifier = { 21 | "memory_depth": float("inf"), # Long memory 22 | "stochastic": False, 23 | "long_run_time": False, 24 | "inspects_source": False, 25 | "manipulates_source": False, 26 | "manipulates_state": False, 27 | } 28 | 29 | def __init__(self, initial_plays: Optional[List[Action]] = None) -> None: 30 | super().__init__() 31 | if not initial_plays: 32 | initial_plays = [C] * 6 + [D] * 5 33 | self.initial_plays = initial_plays 34 | self.scores = {C: 0, D: 0} 35 | 36 | def score_last_round(self, opponent: Player): 37 | # Load the default game if not supplied by a tournament. 38 | game = self.match_attributes["game"] 39 | if len(self.history): 40 | last_round = (self.history[-1], opponent.history[-1]) 41 | scores = game.score(last_round) 42 | self.scores[last_round[0]] += scores[0] 43 | 44 | def strategy(self, opponent: Player) -> Action: 45 | """Actual strategy definition that determines player's action.""" 46 | # Update scores from the last play 47 | self.score_last_round(opponent) 48 | # Begin by playing the sequence C,C,C,C,C,C,D,D,D,D,D 49 | index = len(self.history) 50 | if index < len(self.initial_plays): 51 | return self.initial_plays[index] 52 | # Play the strategy with the highest average score so far 53 | if self.scores[C] > self.scores[D]: 54 | return C 55 | return D 56 | -------------------------------------------------------------------------------- /axelrod/strategies/adaptor.py: -------------------------------------------------------------------------------- 1 | from typing import Dict, Tuple 2 | 3 | from numpy import heaviside 4 | 5 | from axelrod.action import Action 6 | from axelrod.player import Player 7 | 8 | C, D = Action.C, Action.D 9 | 10 | 11 | class AbstractAdaptor(Player): 12 | """ 13 | An adaptive strategy that updates an internal state based on the last 14 | round of play. Using this state the player Cooperates with a probability 15 | derived from the state. 16 | 17 | s, float: 18 | the internal state, initially 0 19 | perr, float: 20 | an error threshold for misinterpreted moves 21 | delta, a dictionary of floats: 22 | additive update values for s depending on the last round's outcome 23 | 24 | Names: 25 | 26 | - Adaptor: [Hauert2002]_ 27 | 28 | """ 29 | 30 | name = "AbstractAdaptor" 31 | classifier = { 32 | "memory_depth": float("inf"), # Long memory 33 | "stochastic": True, 34 | "long_run_time": False, 35 | "inspects_source": False, 36 | "manipulates_source": False, 37 | "manipulates_state": False, 38 | } 39 | 40 | def __init__( 41 | self, delta: Dict[Tuple[Action, Action], float], perr: float = 0.01 42 | ) -> None: 43 | super().__init__() 44 | self.perr = perr 45 | self.delta = delta 46 | self.s = 0.0 47 | 48 | def strategy(self, opponent: Player) -> Action: 49 | """Actual strategy definition that determines player's action.""" 50 | if self.history: 51 | # Update internal state from the last play 52 | last_round = (self.history[-1], opponent.history[-1]) 53 | self.s += self.delta[last_round] 54 | 55 | # Compute probability of Cooperation 56 | p = self.perr + (1.0 - 2 * self.perr) * ( 57 | heaviside(self.s + 1, 1) - heaviside(self.s - 1, 1) 58 | ) 59 | # Draw action 60 | action = self._random.random_choice(p) 61 | return action 62 | 63 | 64 | class AdaptorBrief(AbstractAdaptor): 65 | """ 66 | An Adaptor trained on short interactions. 67 | 68 | Names: 69 | 70 | - AdaptorBrief: [Hauert2002]_ 71 | 72 | """ 73 | 74 | name = "AdaptorBrief" 75 | 76 | def __init__(self) -> None: 77 | delta = { 78 | (C, C): 0.0, # R 79 | (C, D): -1.001505, # S 80 | (D, C): 0.992107, # T 81 | (D, D): -0.638734, # P 82 | } 83 | super().__init__(delta=delta) 84 | 85 | 86 | class AdaptorLong(AbstractAdaptor): 87 | """ 88 | An Adaptor trained on long interactions. 89 | 90 | Names: 91 | 92 | - AdaptorLong: [Hauert2002]_ 93 | 94 | """ 95 | 96 | name = "AdaptorLong" 97 | 98 | def __init__(self) -> None: 99 | delta = { 100 | (C, C): 0.0, # R 101 | (C, D): 1.888159, # S 102 | (D, C): 1.858883, # T 103 | (D, D): -0.995703, # P 104 | } 105 | super().__init__(delta=delta) 106 | -------------------------------------------------------------------------------- /axelrod/strategies/alternator.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Alternator(Player): 8 | """ 9 | A player who alternates between cooperating and defecting. 10 | 11 | Names 12 | 13 | - Alternator: [Axelrod1984]_ 14 | - Periodic player CD: [Mittal2009]_ 15 | """ 16 | 17 | name = "Alternator" 18 | classifier = { 19 | "memory_depth": 1, 20 | "stochastic": False, 21 | "long_run_time": False, 22 | "inspects_source": False, 23 | "manipulates_source": False, 24 | "manipulates_state": False, 25 | } 26 | 27 | def strategy(self, opponent: Player) -> Action: 28 | """Actual strategy definition that determines player's action.""" 29 | if len(self.history) == 0: 30 | return C 31 | if self.history[-1] == C: 32 | return D 33 | return C 34 | -------------------------------------------------------------------------------- /axelrod/strategies/appeaser.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Appeaser(Player): 8 | """A player who tries to guess what the opponent wants. 9 | 10 | Switch the classifier every time the opponent plays D. 11 | Start with C, switch between C and D when opponent plays D. 12 | 13 | Names: 14 | 15 | - Appeaser: Original Name by Jochen Müller 16 | """ 17 | 18 | name = "Appeaser" 19 | classifier = { 20 | "memory_depth": float("inf"), # Depends on internal memory. 21 | "stochastic": False, 22 | "long_run_time": False, 23 | "inspects_source": False, 24 | "manipulates_source": False, 25 | "manipulates_state": False, 26 | } 27 | 28 | def strategy(self, opponent: Player) -> Action: 29 | """Actual strategy definition that determines player's action.""" 30 | if not len(opponent.history): 31 | return C 32 | else: 33 | if opponent.history[-1] == D: 34 | if self.history[-1] == C: 35 | return D 36 | else: 37 | return C 38 | return self.history[-1] 39 | -------------------------------------------------------------------------------- /axelrod/strategies/averagecopier.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class AverageCopier(Player): 8 | """ 9 | The player will cooperate with probability p if the opponent's cooperation 10 | ratio is p. Starts with random decision. 11 | 12 | Names: 13 | 14 | - Average Copier: Original name by Geraint Palmer 15 | """ 16 | 17 | name = "Average Copier" 18 | classifier = { 19 | "memory_depth": float("inf"), # Long memory 20 | "stochastic": True, 21 | "long_run_time": False, 22 | "inspects_source": False, 23 | "manipulates_source": False, 24 | "manipulates_state": False, 25 | } 26 | 27 | def strategy(self, opponent: Player) -> Action: 28 | """Actual strategy definition that determines player's action.""" 29 | if len(opponent.history) == 0: 30 | # Randomly picks a strategy (not affected by history). 31 | return self._random.random_choice(0.5) 32 | p = opponent.cooperations / len(opponent.history) 33 | return self._random.random_choice(p) 34 | 35 | 36 | class NiceAverageCopier(Player): 37 | """ 38 | Same as Average Copier, but always starts by cooperating. 39 | 40 | Names: 41 | 42 | - Average Copier: Original name by Owen Campbell 43 | """ 44 | 45 | name = "Nice Average Copier" 46 | classifier = { 47 | "memory_depth": float("inf"), # Long memory 48 | "stochastic": True, 49 | "long_run_time": False, 50 | "inspects_source": False, 51 | "manipulates_source": False, 52 | "manipulates_state": False, 53 | } 54 | 55 | def strategy(self, opponent: Player) -> Action: 56 | """Actual strategy definition that determines player's action.""" 57 | if len(opponent.history) == 0: 58 | return C 59 | p = opponent.cooperations / len(opponent.history) 60 | return self._random.random_choice(p) 61 | -------------------------------------------------------------------------------- /axelrod/strategies/better_and_better.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class BetterAndBetter(Player): 8 | """ 9 | Defects with probability of '(1000 - current turn) / 1000'. 10 | Therefore it is less and less likely to defect as the round goes on. 11 | 12 | Names: 13 | - Better and Better: [Prison1998]_ 14 | 15 | """ 16 | 17 | name = "Better and Better" 18 | classifier = { 19 | "memory_depth": float("inf"), 20 | "stochastic": True, 21 | "long_run_time": False, 22 | "inspects_source": False, 23 | "manipulates_source": False, 24 | "manipulates_state": False, 25 | } 26 | 27 | def strategy(self, opponent: Player) -> Action: 28 | """Actual strategy definition that determines player's action.""" 29 | current_round = len(self.history) + 1 30 | probability = current_round / 1000 31 | return self._random.random_choice(probability) 32 | -------------------------------------------------------------------------------- /axelrod/strategies/calculator.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from axelrod._strategy_utils import detect_cycle 4 | from axelrod.action import Action 5 | from axelrod.player import Player 6 | 7 | from .axelrod_first import FirstByJoss as Joss 8 | 9 | C, D = Action.C, Action.D 10 | 11 | 12 | class Calculator(Player): 13 | """ 14 | Plays like (Hard) Joss for the first 20 rounds. If periodic behavior is 15 | detected, defect forever. Otherwise play TFT. 16 | 17 | 18 | Names: 19 | 20 | - Calculator: [Prison1998]_ 21 | """ 22 | 23 | name = "Calculator" 24 | classifier = { 25 | "memory_depth": float("inf"), 26 | "stochastic": True, 27 | "long_run_time": False, 28 | "inspects_source": False, 29 | "manipulates_source": False, 30 | "manipulates_state": False, 31 | } 32 | 33 | def __init__(self) -> None: 34 | self.joss_instance = Joss() 35 | super().__init__() 36 | 37 | def set_seed(self, seed: Optional[int] = None): 38 | super().set_seed(seed) 39 | self.joss_instance.set_seed(seed) 40 | 41 | def strategy(self, opponent: Player) -> Action: 42 | """Actual strategy definition that determines player's action.""" 43 | turn = len(self.history) 44 | if turn > 0: 45 | self.joss_instance.history.append( 46 | self.history[-1], opponent.history[-1] 47 | ) 48 | if turn == 20: 49 | self.cycle = detect_cycle(opponent.history) 50 | return self.extended_strategy(opponent) 51 | if turn > 20: 52 | return self.extended_strategy(opponent) 53 | else: 54 | play = self.joss_instance.strategy(opponent) 55 | return play 56 | 57 | def extended_strategy(self, opponent: Player) -> Action: 58 | if self.cycle: 59 | return D 60 | else: 61 | # TFT 62 | return D if opponent.history[-1:] == [D] else C 63 | -------------------------------------------------------------------------------- /axelrod/strategies/cooperator.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Cooperator(Player): 8 | """A player who only ever cooperates. 9 | 10 | Names: 11 | 12 | - Cooperator: [Axelrod1984]_ 13 | - ALLC: [Press2012]_ 14 | - Always cooperate: [Mittal2009]_ 15 | """ 16 | 17 | name = "Cooperator" 18 | classifier = { 19 | "memory_depth": 0, 20 | "stochastic": False, 21 | "long_run_time": False, 22 | "inspects_source": False, 23 | "manipulates_source": False, 24 | "manipulates_state": False, 25 | } 26 | 27 | @staticmethod 28 | def strategy(opponent: Player) -> Action: 29 | """Actual strategy definition that determines player's action.""" 30 | return C 31 | 32 | 33 | class TrickyCooperator(Player): 34 | """ 35 | A cooperator that is trying to be tricky. 36 | 37 | Names: 38 | 39 | - Tricky Cooperator: Original name by Karol Langner 40 | """ 41 | 42 | name = "Tricky Cooperator" 43 | classifier = { 44 | "memory_depth": 10, 45 | "stochastic": False, 46 | "long_run_time": False, 47 | "inspects_source": False, 48 | "manipulates_source": False, 49 | "manipulates_state": False, 50 | } 51 | 52 | _min_history_required_to_try_trickiness = 3 53 | _max_history_depth_for_trickiness = -10 54 | 55 | def strategy(self, opponent: Player) -> Action: 56 | """Almost always cooperates, but will try to trick the opponent by 57 | defecting. 58 | 59 | Defect once in a while in order to get a better payout. 60 | After 3 rounds, if opponent has not defected to a max history depth of 61 | 10, defect. 62 | """ 63 | if ( 64 | self._has_played_enough_rounds_to_be_tricky() 65 | and self._opponents_has_cooperated_enough_to_be_tricky(opponent) 66 | ): 67 | return D 68 | return C 69 | 70 | def _has_played_enough_rounds_to_be_tricky(self): 71 | return len(self.history) >= self._min_history_required_to_try_trickiness 72 | 73 | def _opponents_has_cooperated_enough_to_be_tricky(self, opponent): 74 | rounds_to_be_checked = opponent.history[ 75 | self._max_history_depth_for_trickiness : 76 | ] 77 | return D not in rounds_to_be_checked 78 | -------------------------------------------------------------------------------- /axelrod/strategies/defector.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Defector(Player): 8 | """A player who only ever defects. 9 | 10 | Names: 11 | 12 | - Defector: [Axelrod1984]_ 13 | - ALLD: [Press2012]_ 14 | - Always defect: [Mittal2009]_ 15 | """ 16 | 17 | name = "Defector" 18 | classifier = { 19 | "memory_depth": 0, 20 | "stochastic": False, 21 | "long_run_time": False, 22 | "inspects_source": False, 23 | "manipulates_source": False, 24 | "manipulates_state": False, 25 | } 26 | 27 | @staticmethod 28 | def strategy(opponent: Player) -> Action: 29 | """Actual strategy definition that determines player's action.""" 30 | return D 31 | 32 | 33 | class TrickyDefector(Player): 34 | """A defector that is trying to be tricky. 35 | 36 | Names: 37 | 38 | - Tricky Defector: Original name by Karol Langner 39 | """ 40 | 41 | name = "Tricky Defector" 42 | classifier = { 43 | "memory_depth": float("inf"), # Long memory 44 | "stochastic": False, 45 | "long_run_time": False, 46 | "inspects_source": False, 47 | "manipulates_source": False, 48 | "manipulates_state": False, 49 | } 50 | 51 | def strategy(self, opponent: Player) -> Action: 52 | """Almost always defects, but will try to trick the opponent into 53 | cooperating. 54 | 55 | Defect if opponent has cooperated at least once in the past and has 56 | defected for the last 3 turns in a row. 57 | """ 58 | if ( 59 | opponent.history.cooperations > 0 60 | and opponent.history[-3:] == [D] * 3 61 | ): 62 | return C 63 | return D 64 | -------------------------------------------------------------------------------- /axelrod/strategies/doubler.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Doubler(Player): 8 | """ 9 | Cooperates except when the opponent has defected and 10 | the opponent's cooperation count is less than twice their defection count. 11 | 12 | Names: 13 | 14 | - Doubler: [Prison1998]_ 15 | """ 16 | 17 | name = "Doubler" 18 | classifier = { 19 | "stochastic": False, 20 | "memory_depth": float("inf"), 21 | "long_run_time": False, 22 | "inspects_source": False, 23 | "manipulates_source": False, 24 | "manipulates_state": False, 25 | } 26 | 27 | def strategy(self, opponent: Player) -> Action: 28 | """Actual strategy definition that determines player's action.""" 29 | if not self.history: 30 | return C 31 | if ( 32 | opponent.history[-1] == D 33 | and opponent.cooperations <= opponent.defections * 2 34 | ): 35 | return D 36 | return C 37 | -------------------------------------------------------------------------------- /axelrod/strategies/forgiver.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Forgiver(Player): 8 | """ 9 | A player starts by cooperating however will defect if at any point 10 | the opponent has defected more than 10 percent of the time 11 | 12 | Names: 13 | 14 | - Forgiver: Original name by Thomas Campbell 15 | """ 16 | 17 | name = "Forgiver" 18 | classifier = { 19 | "memory_depth": float("inf"), # Long memory 20 | "stochastic": False, 21 | "long_run_time": False, 22 | "inspects_source": False, 23 | "manipulates_source": False, 24 | "manipulates_state": False, 25 | } 26 | 27 | def strategy(self, opponent: Player) -> Action: 28 | """ 29 | Begins by playing C, then plays D if the opponent has defected more 30 | than 10 percent of the time. 31 | """ 32 | if opponent.defections > len(opponent.history) / 10.0: 33 | return D 34 | return C 35 | 36 | 37 | class ForgivingTitForTat(Player): 38 | """ 39 | A player starts by cooperating however will defect if at any point, the 40 | opponent has defected more than 10 percent of the time, and their most 41 | recent decision was defect. 42 | 43 | Names: 44 | 45 | - Forgiving Tit For Tat: Original name by Thomas Campbell 46 | """ 47 | 48 | name = "Forgiving Tit For Tat" 49 | classifier = { 50 | "memory_depth": float("inf"), # Long memory 51 | "stochastic": False, 52 | "long_run_time": False, 53 | "inspects_source": False, 54 | "manipulates_source": False, 55 | "manipulates_state": False, 56 | } 57 | 58 | def strategy(self, opponent: Player) -> Action: 59 | """ 60 | Begins by playing C, then plays D if the opponent has defected more than 61 | 10 percent of the time and their most recent decision was defect. 62 | """ 63 | if opponent.defections > len(opponent.history) / 10: 64 | return opponent.history[-1] 65 | return C 66 | -------------------------------------------------------------------------------- /axelrod/strategies/gradualkiller.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | from axelrod.strategy_transformers import InitialTransformer 4 | 5 | C, D = Action.C, Action.D 6 | 7 | 8 | @InitialTransformer((D, D, D, D, D, C, C), name_prefix=None) 9 | class GradualKiller(Player): 10 | """ 11 | It begins by defecting in the first five moves, then cooperates two times. 12 | It then defects all the time if the opponent has defected in move 6 and 7, 13 | else cooperates all the time. 14 | Initially designed to stop Gradual from defeating TitForTat in a 3 Player 15 | tournament. 16 | 17 | Names 18 | 19 | - Gradual Killer: [Prison1998]_ 20 | """ 21 | 22 | # These are various properties for the strategy 23 | name = "Gradual Killer" 24 | classifier = { 25 | "memory_depth": float("Inf"), 26 | "stochastic": False, 27 | "long_run_time": False, 28 | "inspects_source": False, 29 | "manipulates_source": False, 30 | "manipulates_state": False, 31 | } 32 | 33 | def strategy(self, opponent: Player) -> Action: 34 | """Actual strategy definition that determines player's action.""" 35 | if opponent.history[5:7] == [D, D]: 36 | return D 37 | return C 38 | -------------------------------------------------------------------------------- /axelrod/strategies/grumpy.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Grumpy(Player): 8 | """ 9 | A player that defects after a certain level of grumpiness. 10 | Grumpiness increases when the opponent defects and decreases 11 | when the opponent co-operates. 12 | 13 | Names: 14 | 15 | - Grumpy: Original name by Jason Young 16 | """ 17 | 18 | name = "Grumpy" 19 | classifier = { 20 | "memory_depth": float("inf"), # Long memory 21 | "stochastic": False, 22 | "long_run_time": False, 23 | "inspects_source": False, 24 | "manipulates_source": False, 25 | "manipulates_state": False, 26 | } 27 | 28 | def __init__( 29 | self, 30 | starting_state: str = "Nice", 31 | grumpy_threshold: int = 10, 32 | nice_threshold: int = -10, 33 | ) -> None: 34 | """ 35 | Parameters 36 | ---------- 37 | starting_state: str 38 | 'Nice' or 'Grumpy' 39 | grumpy_threshold: int 40 | The threshold of opponent defections - cooperations to become 41 | grumpy 42 | nice_threshold: int 43 | The threshold of opponent defections - cooperations to become 44 | nice 45 | """ 46 | super().__init__() 47 | self.state = starting_state 48 | self.grumpy_threshold = grumpy_threshold 49 | self.nice_threshold = nice_threshold 50 | 51 | def strategy(self, opponent: Player) -> Action: 52 | """A player that gets grumpier the more the opposition defects, 53 | and nicer the more they cooperate. 54 | 55 | Starts off Nice, but becomes grumpy once the grumpiness threshold is 56 | hit. Won't become nice once that grumpy threshold is hit, but must 57 | reach a much lower threshold before it becomes nice again. 58 | """ 59 | 60 | grumpiness = opponent.defections - opponent.cooperations 61 | 62 | if self.state == "Nice": 63 | if grumpiness > self.grumpy_threshold: 64 | self.state = "Grumpy" 65 | return D 66 | return C 67 | 68 | if self.state == "Grumpy": 69 | if grumpiness < self.nice_threshold: 70 | self.state = "Nice" 71 | return C 72 | return D 73 | -------------------------------------------------------------------------------- /axelrod/strategies/handshake.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional 2 | 3 | from axelrod.action import Action 4 | from axelrod.player import Player 5 | 6 | C, D = Action.C, Action.D 7 | 8 | 9 | class Handshake(Player): 10 | """Starts with C, D. If the opponent plays the same way, cooperate forever, 11 | else defect forever. 12 | 13 | Names: 14 | 15 | - Handshake: [Robson1990]_ 16 | """ 17 | 18 | name = "Handshake" 19 | classifier = { 20 | "memory_depth": float("inf"), # Long memory 21 | "stochastic": False, 22 | "long_run_time": False, 23 | "inspects_source": False, 24 | "manipulates_source": False, 25 | "manipulates_state": False, 26 | } 27 | 28 | def __init__(self, initial_plays: Optional[List[Action]] = None) -> None: 29 | super().__init__() 30 | if not initial_plays: 31 | initial_plays = [C, D] 32 | self.initial_plays = initial_plays 33 | 34 | def strategy(self, opponent: Player) -> Action: 35 | """Actual strategy definition that determines player's action.""" 36 | # Begin by playing the sequence C, D 37 | index = len(self.history) 38 | if index < len(self.initial_plays): 39 | return self.initial_plays[index] 40 | # If our opponent played [C, D] on the first two moves, cooperate 41 | # forever. Otherwise defect forever. 42 | if opponent.history[0 : len(self.initial_plays)] == self.initial_plays: 43 | return C 44 | return D 45 | -------------------------------------------------------------------------------- /axelrod/strategies/inverse.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Inverse(Player): 8 | """A player who defects with a probability that diminishes relative to how 9 | long ago the opponent defected. 10 | 11 | Names: 12 | 13 | - Inverse: Original Name by Karol Langner 14 | """ 15 | 16 | name = "Inverse" 17 | classifier = { 18 | "memory_depth": float("inf"), # Long memory 19 | "stochastic": True, 20 | "long_run_time": False, 21 | "inspects_source": False, 22 | "manipulates_source": False, 23 | "manipulates_state": False, 24 | } 25 | 26 | def strategy(self, opponent: Player) -> Action: 27 | """Looks at opponent history to see if they have defected. 28 | 29 | If so, player defection is inversely proportional to when this occurred. 30 | """ 31 | 32 | # calculate how many turns ago the opponent defected 33 | index = next( 34 | ( 35 | index 36 | for index, value in enumerate(opponent.history[::-1], start=1) 37 | if value == D 38 | ), 39 | None, 40 | ) 41 | 42 | if index is None: 43 | return C 44 | 45 | return self._random.random_choice(1 - 1 / abs(index)) 46 | -------------------------------------------------------------------------------- /axelrod/strategies/mathematicalconstants.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | from axelrod.action import Action 4 | from axelrod.player import Player 5 | 6 | C, D = Action.C, Action.D 7 | 8 | 9 | class CotoDeRatio(Player): 10 | """The player will always aim to bring the ratio of co-operations to 11 | defections closer to the ratio as given in a sub class 12 | 13 | Names: 14 | 15 | - Co to Do Ratio: Original Name by Timothy Standen 16 | """ 17 | 18 | classifier = { 19 | "stochastic": False, 20 | "memory_depth": float("inf"), # Long memory 21 | "long_run_time": False, 22 | "inspects_source": False, 23 | "manipulates_source": False, 24 | "manipulates_state": False, 25 | } 26 | 27 | def strategy(self, opponent: Player) -> Action: 28 | """Actual strategy definition that determines player's action.""" 29 | # Initially cooperate 30 | if len(opponent.history) == 0: 31 | return C 32 | # Avoid initial division by zero 33 | if not opponent.defections: 34 | return D 35 | # Otherwise compare ratio to golden mean 36 | cooperations = opponent.cooperations + self.cooperations 37 | defections = opponent.defections + self.defections 38 | if cooperations / defections > self.ratio: 39 | return D 40 | return C 41 | 42 | 43 | class Golden(CotoDeRatio): 44 | """The player will always aim to bring the ratio of co-operations to 45 | defections closer to the golden mean 46 | 47 | Names: 48 | 49 | - Golden: Original Name by Timothy Standen 50 | """ 51 | 52 | name = "$\phi$" 53 | ratio = (1 + math.sqrt(5)) / 2 54 | 55 | 56 | class Pi(CotoDeRatio): 57 | """The player will always aim to bring the ratio of co-operations to 58 | defections closer to the pi 59 | 60 | Names: 61 | 62 | - Pi: Original Name by Timothy Standen 63 | """ 64 | 65 | name = "$\pi$" 66 | ratio = math.pi 67 | 68 | 69 | class e(CotoDeRatio): 70 | """The player will always aim to bring the ratio of co-operations to 71 | defections closer to the e 72 | 73 | Names: 74 | 75 | - e: Original Name by Timothy Standen 76 | """ 77 | 78 | name = "$e$" 79 | ratio = math.e 80 | -------------------------------------------------------------------------------- /axelrod/strategies/momentum.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Momentum(Player): 8 | """ 9 | This strategy is inspired by the concept of Gradual and the mathematical foundation of 10 | the Momentum optimizer used in deep learning. 11 | 12 | The idea is that trust (or cooperation) evolves dynamically. A shift in trust can 13 | create significant and rapid changes in the player's behavior, much like how momentum 14 | responds to gradients in optimization. 15 | 16 | Parameters: 17 | - alpha: Momentum decay factor that determines the rate of trust reduction. A higher value leads to slower decay, and the opponent's Defect acts as a trigger. (Optimized by Genetic Algorithm) 18 | - threshold: The minimum momentum required to continue cooperation. If momentum falls below this value, the strategy switches to Defect as punishment. (Optimized by Genetic Algorithm) 19 | - momentum: Represents the inertia of trust, dynamically changing based on past cooperation. 20 | 21 | Names: 22 | - Momentum: Original name by Dong Won Moon 23 | 24 | """ 25 | 26 | name = "Momentum" 27 | classifier = { 28 | "memory_depth": float("inf"), 29 | "stochastic": False, 30 | "long_run_time": False, 31 | "inspects_source": False, 32 | "manipulates_source": False, 33 | "manipulates_state": False, 34 | } 35 | 36 | def __init__( 37 | self, 38 | alpha=0.9914655399877477, 39 | threshold=0.9676595613724907, 40 | ) -> None: 41 | super().__init__() 42 | self.alpha = alpha 43 | self.threshold = threshold 44 | self.momentum = 1.0 45 | 46 | def __repr__(self): 47 | return f"Momentum: {self.momentum}, Alpha: {self.alpha}, Threshold: {self.threshold}" 48 | 49 | def update_momentum(self, opponent_action): 50 | # If the opponent defects, the momentum decreases, reflecting a loss of trust. 51 | action_value = 1 if opponent_action == C else 0 52 | self.momentum = ( 53 | self.alpha * self.momentum + (1 - self.alpha) * action_value 54 | ) 55 | 56 | def strategy(self, opponent: Player) -> Action: 57 | if len(self.history) == 0: 58 | self.momentum = 1.0 59 | return C 60 | 61 | else: 62 | self.update_momentum(opponent.history[-1]) 63 | return C if self.momentum >= self.threshold else D 64 | -------------------------------------------------------------------------------- /axelrod/strategies/mutual.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Desperate(Player): 8 | """A player that only cooperates after mutual defection. 9 | 10 | Names: 11 | 12 | - Desperate: [Berg2015]_""" 13 | 14 | name = "Desperate" 15 | classifier = { 16 | "memory_depth": 1, 17 | "long_run_time": False, 18 | "stochastic": True, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def strategy(self, opponent: Player) -> Action: 25 | """Actual strategy definition that determines player's action.""" 26 | if not opponent.history: 27 | return self._random.random_choice() 28 | if self.history[-1] == D and opponent.history[-1] == D: 29 | return C 30 | return D 31 | 32 | 33 | class Hopeless(Player): 34 | """A player that only defects after mutual cooperation. 35 | 36 | Names: 37 | 38 | - Hopeless: [Berg2015]_""" 39 | 40 | name = "Hopeless" 41 | classifier = { 42 | "memory_depth": 1, 43 | "long_run_time": False, 44 | "stochastic": True, 45 | "inspects_source": False, 46 | "manipulates_source": False, 47 | "manipulates_state": False, 48 | } 49 | 50 | def strategy(self, opponent: Player) -> Action: 51 | """Actual strategy definition that determines player's action.""" 52 | if not opponent.history: 53 | return self._random.random_choice() 54 | if self.history[-1] == C and opponent.history[-1] == C: 55 | return D 56 | return C 57 | 58 | 59 | class Willing(Player): 60 | """A player that only defects after mutual defection. 61 | 62 | Names: 63 | 64 | - Willing: [Berg2015]_""" 65 | 66 | name = "Willing" 67 | classifier = { 68 | "memory_depth": 1, 69 | "long_run_time": False, 70 | "stochastic": True, 71 | "inspects_source": False, 72 | "manipulates_source": False, 73 | "manipulates_state": False, 74 | } 75 | 76 | def strategy(self, opponent: Player) -> Action: 77 | """Actual strategy definition that determines player's action.""" 78 | if not opponent.history: 79 | return self._random.random_choice() 80 | if self.history[-1] == D and opponent.history[-1] == D: 81 | return D 82 | return C 83 | -------------------------------------------------------------------------------- /axelrod/strategies/negation.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Negation(Player): 8 | """ 9 | A player starts by cooperating or defecting randomly if it's their first move, 10 | then simply doing the opposite of the opponents last move thereafter. 11 | 12 | Names: 13 | 14 | - Negation: [PD2017]_ 15 | """ 16 | 17 | name = "Negation" 18 | classifier = { 19 | "memory_depth": 1, 20 | "stochastic": True, 21 | "long_run_time": False, 22 | "inspects_source": False, 23 | "manipulates_source": False, 24 | "manipulates_state": False, 25 | } 26 | 27 | def strategy(self, opponent: Player) -> Action: 28 | """Actual strategy definition that determines player's action.""" 29 | # Random first move 30 | if not self.history: 31 | return self._random.random_choice() 32 | # Act opposite of opponent otherwise 33 | return opponent.history[-1].flip() 34 | -------------------------------------------------------------------------------- /axelrod/strategies/rand.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Random(Player): 8 | """A player who randomly chooses between cooperating and defecting. 9 | 10 | This strategy came 15th in Axelrod's original tournament. 11 | 12 | Names: 13 | 14 | - Random: [Axelrod1980]_ 15 | - Lunatic: [Tzafestas2000]_ 16 | """ 17 | 18 | name = "Random" 19 | classifier = { 20 | "memory_depth": 0, # Memory-one Four-Vector = (p, p, p, p) 21 | "stochastic": True, 22 | "long_run_time": False, 23 | "inspects_source": False, 24 | "manipulates_source": False, 25 | "manipulates_state": False, 26 | } 27 | 28 | def __init__(self, p: float = 0.5) -> None: 29 | """ 30 | Parameters 31 | ---------- 32 | p, float 33 | The probability to cooperate 34 | 35 | Special Cases 36 | ------------- 37 | Random(0) is equivalent to Defector 38 | Random(1) is equivalent to Cooperator 39 | """ 40 | super().__init__() 41 | self.p = p 42 | 43 | def strategy(self, opponent: Player) -> Action: 44 | """Actual strategy definition that determines player's action.""" 45 | return self._random.random_choice(self.p) 46 | 47 | def _post_init(self): 48 | super()._post_init() 49 | if self.p in [0, 1]: 50 | self.classifier["stochastic"] = False 51 | # Avoid calls to _random, if strategy is deterministic 52 | # by overwriting the strategy function. 53 | if self.p <= 0: 54 | self.strategy = self.defect 55 | if self.p >= 1: 56 | self.strategy = self.cooperate 57 | 58 | @classmethod 59 | def cooperate(cls, opponent: Player) -> Action: 60 | return C 61 | 62 | @classmethod 63 | def defect(cls, opponent: Player) -> Action: 64 | return D 65 | -------------------------------------------------------------------------------- /axelrod/strategies/resurrection.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class Resurrection(Player): 8 | """ 9 | A player starts by cooperating and defects if the number of rounds 10 | played by the player is greater than five and the last five rounds 11 | are defections. 12 | 13 | Otherwise, the strategy plays like Tit-for-tat. 14 | 15 | Names: 16 | 17 | - Resurrection: [Eckhart2015]_ 18 | """ 19 | 20 | # These are various properties for the strategy 21 | name = "Resurrection" 22 | classifier = { 23 | "memory_depth": 5, 24 | "stochastic": False, 25 | "long_run_time": False, 26 | "inspects_source": False, 27 | "manipulates_source": False, 28 | "manipulates_state": False, 29 | } 30 | 31 | def strategy(self, opponent: Player) -> Action: 32 | """Actual strategy definition that determines player's action.""" 33 | if len(self.history) == 0: 34 | return C 35 | if len(self.history) >= 5 and self.history[-5:] == [D, D, D, D, D]: 36 | return D 37 | else: 38 | return opponent.history[-1] 39 | 40 | 41 | class DoubleResurrection(Player): 42 | """ 43 | A player starts by cooperating and defects if the number of rounds 44 | played by the player is greater than five and the last five rounds 45 | are cooperations. 46 | 47 | If the last five rounds were defections, the player cooperates. 48 | 49 | Names: 50 | 51 | - DoubleResurrection: [Eckhart2015]_ 52 | """ 53 | 54 | name = "DoubleResurrection" 55 | classifier = { 56 | "memory_depth": 5, 57 | "stochastic": False, 58 | "long_run_time": False, 59 | "inspects_source": False, 60 | "manipulates_source": False, 61 | "manipulates_state": False, 62 | } 63 | 64 | def strategy(self, opponent: Player) -> Action: 65 | """Actual strategy definition that determines player's action.""" 66 | if len(self.history) == 0: 67 | return C 68 | if len(self.history) >= 5 and self.history[-5:] == [C, C, C, C, C]: 69 | return D 70 | elif len(self.history) >= 5 and self.history[-5:] == [D, D, D, D, D]: 71 | return C 72 | else: 73 | return opponent.history[-1] 74 | -------------------------------------------------------------------------------- /axelrod/strategies/revised_downing.py: -------------------------------------------------------------------------------- 1 | """ 2 | Revised Downing implemented from the Fortran source code for the second of 3 | Axelrod's tournaments. 4 | """ 5 | 6 | from axelrod.action import Action 7 | from axelrod.player import Player 8 | 9 | C, D = Action.C, Action.D 10 | 11 | 12 | class RevisedDowning(Player): 13 | """ 14 | Strategy submitted to Axelrod's second tournament by Leslie Downing. 15 | (K59R). 16 | 17 | Revised Downing attempts to determine if players are cooperative or not. 18 | If so, it cooperates with them. 19 | 20 | This strategy is a revision of the strategy submitted by Downing to 21 | Axelrod's first tournament. 22 | 23 | 24 | Names: 25 | - Revised Downing: [Axelrod1980]_ 26 | """ 27 | 28 | name = "Revised Downing" 29 | 30 | classifier = { 31 | "memory_depth": float("inf"), 32 | "stochastic": False, 33 | "long_run_time": False, 34 | "inspects_source": False, 35 | "manipulates_source": False, 36 | "manipulates_state": False, 37 | } 38 | 39 | def __init__(self) -> None: 40 | super().__init__() 41 | self.good = 1.0 42 | self.bad = 0.0 43 | self.nice1 = 0 44 | self.nice2 = 0 45 | self.total_C = 0 # note the same as self.cooperations 46 | self.total_D = 0 # note the same as self.defections 47 | 48 | def strategy(self, opponent: Player) -> Action: 49 | round_number = len(self.history) + 1 50 | 51 | if round_number == 1: 52 | return C 53 | 54 | # Update various counts 55 | if round_number > 2: 56 | if self.history[-2] == D: 57 | if opponent.history[-1] == C: 58 | self.nice2 += 1 59 | self.total_D += 1 60 | self.bad = self.nice2 / self.total_D 61 | else: 62 | if opponent.history[-1] == C: 63 | self.nice1 += 1 64 | self.total_C += 1 65 | self.good = self.nice1 / self.total_C 66 | # Make a decision based on the accrued counts 67 | c = 6.0 * self.good - 8.0 * self.bad - 2 68 | alt = 4.0 * self.good - 5.0 * self.bad - 1 69 | if c >= 0 and c >= alt: 70 | move = C 71 | elif (0 <= c < alt) or (alt >= 0): 72 | move = self.history[-1].flip() 73 | else: 74 | move = D 75 | return move 76 | -------------------------------------------------------------------------------- /axelrod/strategies/selfsteem.py: -------------------------------------------------------------------------------- 1 | from math import pi, sin 2 | 3 | from axelrod.action import Action 4 | from axelrod.player import Player 5 | 6 | C, D = Action.C, Action.D 7 | 8 | 9 | class SelfSteem(Player): 10 | """ 11 | This strategy is based on the feeling with the same name. 12 | It is modeled on the sine curve(f = sin( 2* pi * n / 10 )), which varies 13 | with the current iteration. 14 | 15 | If f > 0.95, 'ego' of the algorithm is inflated; always defects. 16 | If 0.95 > abs(f) > 0.3, rational behavior; follows TitForTat algortithm. 17 | If 0.3 > f > -0.3; random behavior. 18 | If f < -0.95, algorithm is at rock bottom; always cooperates. 19 | 20 | Futhermore, the algorithm implements a retaliation policy, if the opponent 21 | defects; the sin curve is shifted. But due to lack of further information, 22 | this implementation does not include a sin phase change. 23 | Names: 24 | 25 | - SelfSteem: [Andre2013]_ 26 | """ 27 | 28 | name = "SelfSteem" 29 | classifier = { 30 | "memory_depth": float("inf"), 31 | "stochastic": True, 32 | "long_run_time": False, 33 | "inspects_source": False, 34 | "manipulates_source": False, 35 | "manipulates_state": False, 36 | } 37 | 38 | def strategy(self, opponent: Player) -> Action: 39 | """Actual strategy definition that determines player's action.""" 40 | turns_number = len(self.history) 41 | sine_value = sin(2 * pi * turns_number / 10) 42 | 43 | if sine_value > 0.95: 44 | return D 45 | 46 | if 0.95 > abs(sine_value) > 0.3: 47 | return opponent.history[-1] 48 | 49 | if 0.3 > sine_value > -0.3: 50 | return self._random.random_choice() 51 | 52 | return C 53 | -------------------------------------------------------------------------------- /axelrod/strategies/shortmem.py: -------------------------------------------------------------------------------- 1 | from axelrod import Player 2 | from axelrod.action import Action 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class ShortMem(Player): 8 | """ 9 | A player starts by always cooperating for the first 10 moves. 10 | 11 | From the tenth round on, the player analyzes the last ten actions, and 12 | compare the number of defects and cooperates of the opponent, based in 13 | percentage. If cooperation occurs 30% more than defection, it will 14 | cooperate. 15 | If defection occurs 30% more than cooperation, the program will defect. 16 | Otherwise, the program follows the TitForTat algorithm. 17 | 18 | Names: 19 | 20 | - ShortMem: [Andre2013]_ 21 | """ 22 | 23 | name = "ShortMem" 24 | classifier = { 25 | "memory_depth": float("inf"), 26 | "stochastic": False, 27 | "long_run_time": False, 28 | "inspects_source": False, 29 | "manipulates_source": False, 30 | "manipulates_state": False, 31 | } 32 | 33 | @staticmethod 34 | def strategy(opponent: Player) -> Action: 35 | """Actual strategy definition that determines player's action.""" 36 | if len(opponent.history) <= 10: 37 | return C 38 | 39 | array = opponent.history[-10:] 40 | C_counts = array.count(C) 41 | D_counts = array.count(D) 42 | 43 | if C_counts - D_counts >= 3: 44 | return C 45 | elif D_counts - C_counts >= 3: 46 | return D 47 | else: 48 | return opponent.history[-1] 49 | -------------------------------------------------------------------------------- /axelrod/strategies/stalker.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | from axelrod.strategy_transformers import FinalTransformer 4 | 5 | C, D = Action.C, Action.D 6 | 7 | 8 | @FinalTransformer((D,), name_prefix=None) # End with defection 9 | class Stalker(Player): 10 | """ 11 | 12 | This is a strategy which is only influenced by the score. 13 | Its behavior is based on three values: 14 | the very_bad_score (all rounds in defection) 15 | very_good_score (all rounds in cooperation) 16 | wish_score (average between bad and very_good score) 17 | 18 | It starts with cooperation. 19 | 20 | - If current_average_score > very_good_score, it defects 21 | - If current_average_score lies in (wish_score, very_good_score) it 22 | cooperates 23 | - If current_average_score > 2, it cooperates 24 | - If current_average_score lies in (1, 2) 25 | - The remaining case, current_average_score < 1, it behaves randomly. 26 | - It defects in the last round 27 | 28 | Names: 29 | 30 | - Stalker: [Andre2013]_ 31 | """ 32 | 33 | name = "Stalker" 34 | classifier = { 35 | "memory_depth": float("inf"), 36 | "stochastic": True, 37 | "long_run_time": False, 38 | "inspects_source": False, 39 | "manipulates_source": False, 40 | "manipulates_state": False, 41 | } 42 | 43 | def receive_match_attributes(self): 44 | R, P, S, T = self.match_attributes["game"].RPST() 45 | self.very_good_score = R 46 | self.very_bad_score = P 47 | self.wish_score = (R + P) / 2 48 | self.current_score = 0 49 | 50 | def score_last_round(self, opponent: Player): 51 | # Load the default game if not supplied by a tournament. 52 | game = self.match_attributes["game"] 53 | last_round = (self.history[-1], opponent.history[-1]) 54 | scores = game.score(last_round) 55 | self.current_score += scores[0] 56 | 57 | def strategy(self, opponent: Player) -> Action: 58 | """Actual strategy definition that determines player's action.""" 59 | 60 | if len(self.history) == 0: 61 | return C 62 | 63 | self.score_last_round(opponent) 64 | 65 | current_average_score = self.current_score / len(self.history) 66 | 67 | if current_average_score > self.very_good_score: 68 | return D 69 | if (current_average_score > self.wish_score) and ( 70 | current_average_score < self.very_good_score 71 | ): 72 | return C 73 | if current_average_score > 2: 74 | return C 75 | if (current_average_score < 2) and (current_average_score > 1): 76 | return D 77 | return self._random.random_choice() 78 | -------------------------------------------------------------------------------- /axelrod/strategies/verybad.py: -------------------------------------------------------------------------------- 1 | from axelrod.action import Action 2 | from axelrod.player import Player 3 | 4 | C, D = Action.C, Action.D 5 | 6 | 7 | class VeryBad(Player): 8 | """ 9 | It cooperates in the first three rounds, and uses probability 10 | (it implements a memory, which stores the opponent’s moves) to decide for 11 | cooperating or defecting. 12 | Due to a lack of information as to what that probability refers to in this 13 | context, probability(P(X)) refers to (Count(X)/Total_Moves) in this 14 | implementation 15 | P(C) = Cooperations / Total_Moves 16 | P(D) = Defections / Total_Moves = 1 - P(C) 17 | 18 | Names: 19 | 20 | - VeryBad: [Andre2013]_ 21 | """ 22 | 23 | name = "VeryBad" 24 | classifier = { 25 | "memory_depth": float("inf"), 26 | "stochastic": False, 27 | "long_run_time": False, 28 | "inspects_source": False, 29 | "manipulates_source": False, 30 | "manipulates_state": False, 31 | } 32 | 33 | @staticmethod 34 | def strategy(opponent: Player) -> Action: 35 | """Actual strategy definition that determines player's action.""" 36 | total_moves = len(opponent.history) 37 | 38 | if total_moves < 3: 39 | return C 40 | 41 | cooperations = opponent.cooperations 42 | 43 | cooperation_probability = cooperations / total_moves 44 | 45 | if cooperation_probability > 0.5: 46 | return C 47 | 48 | elif cooperation_probability < 0.5: 49 | return D 50 | 51 | else: 52 | return opponent.history[-1] 53 | -------------------------------------------------------------------------------- /axelrod/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/axelrod/tests/__init__.py -------------------------------------------------------------------------------- /axelrod/tests/integration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/axelrod/tests/integration/__init__.py -------------------------------------------------------------------------------- /axelrod/tests/integration/test_matches.py: -------------------------------------------------------------------------------- 1 | """Tests for some expected match behaviours""" 2 | 3 | import unittest 4 | 5 | from hypothesis import given, settings 6 | from hypothesis.strategies import integers 7 | 8 | import axelrod as axl 9 | from axelrod.tests.property import strategy_lists 10 | 11 | C, D = axl.Action.C, axl.Action.D 12 | 13 | deterministic_strategies = [ 14 | s 15 | for s in axl.short_run_time_strategies 16 | if not axl.Classifiers["stochastic"](s()) 17 | ] 18 | stochastic_strategies = [ 19 | s 20 | for s in axl.short_run_time_strategies 21 | if axl.Classifiers["stochastic"](s()) 22 | ] 23 | 24 | 25 | class TestMatchOutcomes(unittest.TestCase): 26 | @given( 27 | strategies=strategy_lists( 28 | strategies=deterministic_strategies, min_size=2, max_size=2 29 | ), 30 | turns=integers(min_value=1, max_value=20), 31 | ) 32 | @settings(max_examples=5) 33 | def test_outcome_repeats(self, strategies, turns): 34 | """A test that if we repeat 3 matches with deterministic and well 35 | behaved strategies then we get the same result""" 36 | players = [s() for s in strategies] 37 | matches = [axl.Match(players, turns) for _ in range(3)] 38 | self.assertEqual(matches[0].play(), matches[1].play()) 39 | self.assertEqual(matches[1].play(), matches[2].play()) 40 | 41 | @given( 42 | strategies=strategy_lists( 43 | strategies=stochastic_strategies, min_size=2, max_size=2 44 | ), 45 | turns=integers(min_value=1, max_value=20), 46 | seed=integers(min_value=0, max_value=4294967295), 47 | ) 48 | @settings(max_examples=5, deadline=None) 49 | def test_outcome_repeats_stochastic(self, strategies, turns, seed): 50 | """a test to check that if a seed is set stochastic strategies give the 51 | same result""" 52 | results = [] 53 | for _ in range(3): 54 | players = [s() for s in strategies] 55 | results.append(axl.Match(players, turns=turns, seed=seed).play()) 56 | 57 | self.assertEqual(results[0], results[1]) 58 | self.assertEqual(results[1], results[2]) 59 | 60 | def test_matches_with_det_player_for_stochastic_classes(self): 61 | """A test based on a bug found in the cache. 62 | 63 | See: https://github.com/Axelrod-Python/Axelrod/issues/779""" 64 | p1 = axl.MemoryOnePlayer(four_vector=(0, 0, 0, 0)) 65 | p2 = axl.MemoryOnePlayer(four_vector=(1, 0, 1, 0)) 66 | p3 = axl.MemoryOnePlayer(four_vector=(1, 1, 1, 0)) 67 | 68 | m = axl.Match((p1, p2), turns=3) 69 | self.assertEqual(m.play(), [(C, C), (D, C), (D, D)]) 70 | 71 | m = axl.Match((p2, p3), turns=3) 72 | self.assertEqual(m.play(), [(C, C), (C, C), (C, C)]) 73 | 74 | m = axl.Match((p1, p3), turns=3) 75 | self.assertEqual(m.play(), [(C, C), (D, C), (D, C)]) 76 | -------------------------------------------------------------------------------- /axelrod/tests/integration/test_names.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import axelrod as axl 4 | 5 | 6 | class TestNames(unittest.TestCase): 7 | def test_all_strategies_have_names(self): 8 | names = [s.name for s in axl.all_strategies if s.name != "Player"] 9 | self.assertEqual(len(names), len(axl.all_strategies)) 10 | 11 | def test_all_names_are_unique(self): 12 | names = set(s.name for s in axl.all_strategies) 13 | self.assertEqual(len(names), len(axl.all_strategies)) 14 | -------------------------------------------------------------------------------- /axelrod/tests/integration/test_sample_tournaments.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import axelrod as axl 4 | 5 | C, D = axl.Action.C, axl.Action.D 6 | 7 | 8 | class TestSampleTournaments(unittest.TestCase): 9 | @classmethod 10 | def setUpClass(cls): 11 | cls.game = axl.Game() 12 | 13 | @classmethod 14 | def get_test_outcome(cls, outcome, turns=10): 15 | # Extract the name of players from the outcome tuples, 16 | # and initiate the players by getting the classes from axelrod. 17 | names = [out[0] for out in outcome] 18 | players = [getattr(axl, n)() for n in names] 19 | 20 | # Play the tournament and build the actual outcome tuples. 21 | tournament = axl.Tournament( 22 | players=players, game=cls.game, turns=turns, repetitions=1 23 | ) 24 | results = tournament.play(progress_bar=False) 25 | scores = [score[0] for score in results.scores] 26 | outcome = zip(names, scores) 27 | 28 | # Return the outcome sorted by score 29 | return sorted(outcome, key=lambda k: k[1]) 30 | 31 | def test_defector_v_cooperator(self): 32 | """Test: the defector viciously punishes the cooperator.""" 33 | outcome = [("Cooperator", 0), ("Defector", 50)] 34 | self.assertEqual(self.get_test_outcome(outcome), outcome) 35 | 36 | def test_defector_v_titfortat(self): 37 | """Test: the defector does well against tit for tat.""" 38 | outcome = [("TitForTat", 9), ("Defector", 14)] 39 | self.assertEqual(self.get_test_outcome(outcome), outcome) 40 | 41 | def test_cooperator_v_titfortat(self): 42 | """Test: the cooperator does very well WITH tit for tat.""" 43 | outcome = [("Cooperator", 30), ("TitForTat", 30)] 44 | self.assertEqual(self.get_test_outcome(outcome), outcome) 45 | 46 | def test_cooperator_v_titfortat_v_defector(self): 47 | """Test: the defector dominates in this population.""" 48 | outcome = [("Cooperator", 30), ("TitForTat", 39), ("Defector", 64)] 49 | self.assertEqual(self.get_test_outcome(outcome), outcome) 50 | 51 | def test_cooperator_v_titfortat_v_defector_v_grudger(self): 52 | """Test: tit for tat does better this time around.""" 53 | outcome = [ 54 | ("Cooperator", 60), 55 | ("TitForTat", 69), 56 | ("Grudger", 69), 57 | ("Defector", 78), 58 | ] 59 | self.assertEqual(self.get_test_outcome(outcome), outcome) 60 | 61 | def test_cooperator_v_titfortat_v_defector_v_grudger_v_go_by_majority(self): 62 | """Test: Tit for tat is doing a lot better.""" 63 | outcome = [ 64 | ("Cooperator", 90), 65 | ("Defector", 92), 66 | ("Grudger", 99), 67 | ("GoByMajority", 99), 68 | ("TitForTat", 99), 69 | ] 70 | self.assertEqual(self.get_test_outcome(outcome), outcome) 71 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/axelrod/tests/strategies/__init__.py -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_adaptive.py: -------------------------------------------------------------------------------- 1 | """Tests for the Adaptive strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestAdaptive(TestPlayer): 11 | 12 | name = "Adaptive" 13 | player = axl.Adaptive 14 | expected_classifier = { 15 | "memory_depth": float("inf"), 16 | "stochastic": False, 17 | "makes_use_of": {"game"}, 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_default_initial_actions_against_cooperator(self): 25 | coplayer = axl.Cooperator() 26 | player_actions = [C] * 6 + [D] * 8 27 | coplayer_actions = [C] * 14 28 | expected_actions = list(zip(player_actions, coplayer_actions)) 29 | self.versus_test(coplayer, expected_actions=expected_actions) 30 | 31 | def test_default_initial_actions_against_defector(self): 32 | coplayer = axl.Defector() 33 | player_actions = [C] * 6 + [D] * 8 34 | coplayer_actions = [D] * 14 35 | expected_actions = list(zip(player_actions, coplayer_actions)) 36 | self.versus_test(coplayer, expected_actions=expected_actions) 37 | 38 | def test_default_initial_actions_against_alternator(self): 39 | coplayer = axl.Alternator() 40 | player_actions = [C] * 6 + [D] * 8 41 | coplayer_actions = [C, D] * 7 42 | expected_actions = list(zip(player_actions, coplayer_actions)) 43 | self.versus_test(coplayer, expected_actions=expected_actions) 44 | 45 | def test_default_initial_actions_against_tft(self): 46 | coplayer = axl.TitForTat() 47 | player_actions = [C] * 6 + [D] * 5 + [C, C] 48 | coplayer_actions = [C] * 7 + [D] * 5 + [C] 49 | expected_actions = list(zip(player_actions, coplayer_actions)) 50 | self.versus_test(coplayer, expected_actions=expected_actions) 51 | 52 | def test_scoring_with_default_game(self): 53 | """Tests that the default game is used in scoring.""" 54 | opponent = axl.Cooperator() 55 | attrs = {"scores": {C: 3, D: 0}} 56 | expected_actions = list(zip([C, C], [C, C])) 57 | self.versus_test( 58 | opponent, expected_actions, turns=2, attrs=attrs, seed=9 59 | ) 60 | 61 | def test_scoring_with_alternate_game(self): 62 | """Tests that the alternate game is used in scoring.""" 63 | opponent = axl.Alternator() 64 | expected_actions = list(zip([C, C, C], [C, D, C])) 65 | attrs = {"scores": {C: 7, D: 0}} 66 | match_attributes = {"game": axl.Game(-3, 10, 10, 10)} 67 | self.versus_test( 68 | opponent, 69 | expected_actions, 70 | turns=3, 71 | attrs=attrs, 72 | seed=9, 73 | match_attributes=match_attributes, 74 | ) 75 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_alternator.py: -------------------------------------------------------------------------------- 1 | """Tests for the Alternator strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestAlternator(TestPlayer): 11 | 12 | name = "Alternator" 13 | player = axl.Alternator 14 | expected_classifier = { 15 | "memory_depth": 1, 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_versus_cooperator(self): 25 | actions = [(C, C), (D, C)] * 5 26 | self.versus_test(axl.Cooperator(), expected_actions=actions) 27 | 28 | def test_versus_defector(self): 29 | actions = [(C, D), (D, D)] * 5 30 | self.versus_test(axl.Defector(), expected_actions=actions) 31 | 32 | def test_versus_cycler_DC(self): 33 | opponent = axl.CyclerDC() 34 | actions = [(C, D), (D, C)] * 5 35 | self.versus_test(opponent, expected_actions=actions) 36 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_appeaser.py: -------------------------------------------------------------------------------- 1 | """Tests for the Appeaser strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestAppeaser(TestPlayer): 11 | 12 | name = "Appeaser" 13 | player = axl.Appeaser 14 | expected_classifier = { 15 | "memory_depth": float("inf"), # Depends on internal memory. 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_strategy_versus_cooperator(self): 25 | actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] 26 | self.versus_test(axl.Cooperator(), expected_actions=actions) 27 | 28 | def test_strategy_versus_defector(self): 29 | actions = [(C, D), (D, D), (C, D), (D, D), (C, D)] 30 | self.versus_test(axl.Defector(), expected_actions=actions) 31 | 32 | def test_cooperate_on_opponent_defect(self): 33 | opponent = axl.MockPlayer(actions=[C, C, D, D]) 34 | actions = [(C, C), (C, C), (C, D), (D, D), (C, C), (C, C)] 35 | self.versus_test(opponent, expected_actions=actions) 36 | 37 | def test_cooperate_then_defect_on_opponent_defect(self): 38 | opponent = axl.MockPlayer(actions=[C, C, D, D, D]) 39 | actions = [(C, C), (C, C), (C, D), (D, D), (C, D), (D, C), (D, C)] 40 | self.versus_test(opponent, expected_actions=actions) 41 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_better_and_better.py: -------------------------------------------------------------------------------- 1 | """Tests for the BetterAndBetter strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestBetterAndBetter(TestPlayer): 11 | 12 | name = "Better and Better" 13 | player = axl.BetterAndBetter 14 | expected_classifier = { 15 | "memory_depth": float("inf"), 16 | "stochastic": True, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_strategy(self): 25 | """Tests that the strategy gives expected behaviour.""" 26 | expected_actions = [(D, D)] * 90 + [(C, D)] 27 | self.versus_test( 28 | axl.Defector(), 29 | expected_actions=expected_actions, 30 | seed=6, 31 | ) 32 | expected_actions = [(D, C)] * 10 33 | self.versus_test( 34 | axl.Cooperator(), 35 | expected_actions=expected_actions, 36 | seed=8, 37 | ) 38 | expected_actions = [(D, D)] * 41 + [(C, D)] 39 | self.versus_test( 40 | axl.Defector(), 41 | expected_actions=expected_actions, 42 | seed=13, 43 | ) 44 | expected_indices = [18, 39, 49, 67, 77, 116, 139, 142, 149] 45 | m = axl.Match((self.player(), axl.Defector()), turns=150, seed=111) 46 | result = m.play() 47 | indices = [] 48 | for index, actions in enumerate(result): 49 | if actions == (C, D): 50 | indices.append(index) 51 | self.assertEqual(expected_indices, indices) 52 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_bush_mosteller.py: -------------------------------------------------------------------------------- 1 | import axelrod as axl 2 | 3 | from .test_player import TestPlayer 4 | 5 | C, D = axl.Action.C, axl.Action.D 6 | 7 | 8 | class TestBushMostellar(TestPlayer): 9 | 10 | name = "Bush Mosteller: 0.5, 0.5, 3.0, 0.5" 11 | player = axl.BushMosteller 12 | expected_classifier = { 13 | "memory_depth": float("inf"), 14 | "stochastic": True, 15 | "makes_use_of": {"game"}, 16 | "long_run_time": False, 17 | "inspects_source": False, 18 | "manipulates_source": False, 19 | "manipulates_state": False, 20 | } 21 | 22 | def test_strategy(self): 23 | actions = [(C, C), (D, C), (D, C)] 24 | self.versus_test( 25 | axl.Cooperator(), 26 | expected_actions=actions, 27 | attrs={"_stimulus": 1}, 28 | seed=1, 29 | ) 30 | 31 | # Making sure probabilities changes following payoffs 32 | actions = [(C, C), (D, D)] 33 | self.versus_test( 34 | axl.Alternator(), 35 | expected_actions=actions, 36 | attrs={"_stimulus": 0.4, "_c_prob": 0.6, "_d_prob": 0.5}, 37 | seed=1, 38 | ) 39 | 40 | actions = [(C, D), (D, D), (D, D)] 41 | self.versus_test( 42 | axl.Defector(), 43 | expected_actions=actions, 44 | attrs={ 45 | "_stimulus": -0.20000000000000004, 46 | "_c_prob": 0.375, 47 | "_d_prob": 0.45, 48 | }, 49 | seed=1, 50 | ) 51 | 52 | # Testing that stimulus never goes under -1 53 | actions = [(C, C), (D, C), (D, C)] 54 | self.versus_test( 55 | axl.Cooperator(), 56 | expected_actions=actions, 57 | attrs={"_stimulus": -1}, 58 | init_kwargs={"aspiration_level_divider": 0.1}, 59 | seed=1, 60 | ) 61 | 62 | # Ensures that the player will never play C or D if his probability is equal to 0 63 | actions = [(C, C)] * 100 64 | self.versus_test( 65 | axl.Cooperator(), 66 | expected_actions=actions, 67 | init_kwargs={"d_prob": 0.0}, 68 | seed=1, 69 | ) 70 | 71 | actions = [(D, C)] * 100 72 | self.versus_test( 73 | axl.Cooperator(), 74 | expected_actions=actions, 75 | init_kwargs={"c_prob": 0.0}, 76 | seed=1, 77 | ) 78 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_cooperator.py: -------------------------------------------------------------------------------- 1 | """Tests for the Cooperator strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestCooperator(TestPlayer): 11 | 12 | name = "Cooperator" 13 | player = axl.Cooperator 14 | expected_classifier = { 15 | "memory_depth": 0, 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "inspects_source": False, 19 | "manipulates_source": False, 20 | "manipulates_state": False, 21 | } 22 | 23 | def test_strategy(self): 24 | # Cooperates always. 25 | actions = [(C, C)] + [(C, D), (C, C)] * 9 26 | self.versus_test(opponent=axl.Alternator(), expected_actions=actions) 27 | 28 | 29 | class TestTrickyCooperator(TestPlayer): 30 | 31 | name = "Tricky Cooperator" 32 | player = axl.TrickyCooperator 33 | expected_classifier = { 34 | "memory_depth": 10, 35 | "stochastic": False, 36 | "makes_use_of": set(), 37 | "inspects_source": False, 38 | "manipulates_source": False, 39 | "manipulates_state": False, 40 | } 41 | 42 | def test_strategy(self): 43 | # Test if it tries to trick opponent. 44 | self.versus_test( 45 | axl.Cooperator(), [(C, C), (C, C), (C, C), (D, C), (D, C)] 46 | ) 47 | 48 | opponent_actions = [C, C, C, C, D, D] 49 | expected_actions = [(C, C), (C, C), (C, C), (D, C), (D, D), (C, D)] 50 | self.versus_test( 51 | axl.MockPlayer(actions=opponent_actions), 52 | expected_actions=expected_actions, 53 | ) 54 | 55 | opponent_actions = [C, C, C, C] + [D, D] + [C] * 10 56 | expected_actions = ( 57 | [(C, C), (C, C), (C, C), (D, C)] + [(D, D), (C, D)] + [(C, C)] * 10 58 | ) 59 | self.versus_test( 60 | axl.MockPlayer(actions=opponent_actions), 61 | expected_actions=expected_actions, 62 | ) 63 | 64 | def test_cooperates_in_first_three_rounds(self): 65 | against_defector = [(C, D)] * 3 66 | against_cooperator = [(C, C)] * 3 67 | against_alternator = [(C, C), (C, D), (C, C)] 68 | self.versus_test(axl.Defector(), expected_actions=against_defector) 69 | self.versus_test(axl.Cooperator(), expected_actions=against_cooperator) 70 | self.versus_test(axl.Alternator(), expected_actions=against_alternator) 71 | 72 | def test_defects_after_three_rounds_if_opponent_only_cooperated_in_max_history_depth_ten( 73 | self, 74 | ): 75 | against_cooperator = [(C, C)] * 3 + [(D, C)] * 20 76 | self.versus_test(axl.Cooperator(), expected_actions=against_cooperator) 77 | 78 | def test_defects_when_opponent_has_no_defections_to_history_depth_ten(self): 79 | opponent_actions = [D] + [C] * 10 + [D, C] 80 | expected_actions = [(C, D)] + [(C, C)] * 10 + [(D, D), (C, C)] 81 | self.versus_test( 82 | axl.MockPlayer(actions=opponent_actions), expected_actions 83 | ) 84 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_darwin.py: -------------------------------------------------------------------------------- 1 | """Tests for the Darwin PD strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestDarwin(TestPlayer): 11 | 12 | name = "Darwin" 13 | player = axl.Darwin 14 | expected_classifier = { 15 | "memory_depth": float("inf"), 16 | "stochastic": False, 17 | "makes_use_of": {"game"}, 18 | "long_run_time": False, 19 | "inspects_source": True, 20 | "manipulates_source": False, 21 | "manipulates_state": True, 22 | } 23 | 24 | @classmethod 25 | def tearDownClass(cls): 26 | """After all tests have run, makes sure the Darwin genome is reset.""" 27 | cls.player.reset_genome() 28 | super(TestDarwin, cls).tearDownClass() 29 | 30 | def setUp(self): 31 | """Each test starts with a fresh genome.""" 32 | self.player.reset_genome() 33 | super(TestDarwin, self).setUp() 34 | 35 | def test_setup(self): 36 | player = self.player() 37 | self.assertEqual(player.genome, [C]) 38 | self.assertEqual(player.history, []) 39 | 40 | def test_strategy(self): 41 | p1 = self.player() 42 | p1.reset() 43 | 44 | self.versus_test( 45 | axl.Cooperator(), 46 | expected_actions=[(C, C)] * 5, 47 | attrs={"genome": [C] * 5}, 48 | ) 49 | 50 | expected_genome = [D] * 4 + [C] 51 | self.versus_test( 52 | axl.Defector(), 53 | expected_actions=[(C, D)] * 5, 54 | attrs={"genome": expected_genome}, 55 | ) 56 | 57 | # uses genome 58 | expected_actions = [(C, C)] + [(D, C)] * 3 + [(C, C)] * 2 59 | self.versus_test(axl.Cooperator(), expected_actions) 60 | 61 | def test_reset_history_and_attributes(self): 62 | # Overwrite this method because Darwin does not reset 63 | self.versus_test( 64 | axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4 65 | ) 66 | 67 | p1 = self.player() 68 | self.assertEqual(p1.genome, [D, C, C, C, D]) 69 | p1.reset() 70 | self.assertEqual(len(p1.history), 0) 71 | self.assertEqual(p1.genome, [C, C, C, C, D]) 72 | 73 | def test_all_darwin_instances_share_one_genome(self): 74 | p1 = self.player() 75 | p2 = self.player() 76 | self.assertIs(p1.genome, p2.genome) 77 | 78 | self.versus_test( 79 | axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4 80 | ) 81 | 82 | self.assertEqual(p2.genome, [D, C, C, C, D]) 83 | self.assertIs(p1.genome, p2.genome) 84 | p3 = self.player() 85 | self.assertIs(p3.genome, p2.genome) 86 | 87 | def test_reset_genome(self): 88 | self.versus_test( 89 | axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 4 90 | ) 91 | self.player.reset_genome() 92 | self.assertEqual(self.player().genome, [C]) 93 | 94 | def equality_of_players_test(self, p1, p2, seed, opponent): 95 | return True 96 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_defector.py: -------------------------------------------------------------------------------- 1 | """Tests for the Defector strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestDefector(TestPlayer): 11 | 12 | name = "Defector" 13 | player = axl.Defector 14 | expected_classifier = { 15 | "memory_depth": 0, 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_state": False, 21 | "manipulates_source": False, 22 | } 23 | 24 | def test_strategy(self): 25 | # Test that always defects. 26 | actions = [(D, C)] + [(D, D), (D, C)] * 9 27 | self.versus_test(opponent=axl.Alternator(), expected_actions=actions) 28 | 29 | 30 | class TestTrickyDefector(TestPlayer): 31 | 32 | name = "Tricky Defector" 33 | player = axl.TrickyDefector 34 | expected_classifier = { 35 | "memory_depth": float("inf"), # Long memory 36 | "stochastic": False, 37 | "makes_use_of": set(), 38 | "long_run_time": False, 39 | "inspects_source": False, 40 | "manipulates_source": False, 41 | "manipulates_state": False, 42 | } 43 | 44 | def test_cooperates_if_opponent_history_has_C_and_last_three_are_D(self): 45 | opponent_actions = [D, C] + [D] * 5 46 | actions = [(D, D), (D, C)] + [(D, D)] * 3 + [(C, D)] * 2 47 | self.versus_test( 48 | axl.MockPlayer(actions=opponent_actions), expected_actions=actions 49 | ) 50 | 51 | def test_defects_if_opponent_never_cooperated(self): 52 | opponent_actions = [D] * 7 53 | actions = [(D, D)] * 7 54 | self.versus_test( 55 | axl.MockPlayer(actions=opponent_actions), expected_actions=actions 56 | ) 57 | 58 | def test_defects_if_opponent_last_three_are_not_D(self): 59 | opponent_actions = [C] + [D] * 3 + [C, D] 60 | actions = [(D, C)] + [(D, D)] * 3 + [(C, C), (D, D)] 61 | self.versus_test( 62 | axl.MockPlayer(actions=opponent_actions), expected_actions=actions 63 | ) 64 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_doubler.py: -------------------------------------------------------------------------------- 1 | """Tests for the Doubler strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestDoubler(TestPlayer): 11 | 12 | name = "Doubler" 13 | player = axl.Doubler 14 | expected_classifier = { 15 | "memory_depth": float("inf"), 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_defects_if_opponent_last_play_is_D_and_defections_gt_two_times_cooperations( 25 | self, 26 | ): 27 | opponent_plays = [C] * 7 + [D] * 4 + [C] 28 | actions = [(C, C)] * 7 + [(C, D)] * 4 + [(D, C)] 29 | self.versus_test( 30 | axl.MockPlayer(actions=opponent_plays), expected_actions=actions 31 | ) 32 | 33 | def test_defects_if_opponent_last_play_D_and_defections_equal_two_times_cooperations( 34 | self, 35 | ): 36 | opponent_plays = [C] * 8 + [D] * 4 + [C] 37 | actions = [(C, C)] * 8 + [(C, D)] * 4 + [(D, C)] 38 | self.versus_test( 39 | axl.MockPlayer(actions=opponent_plays), expected_actions=actions 40 | ) 41 | 42 | def test_cooperates_if_opponent_last_play_is_C(self): 43 | opponent_first_five = [D] * 5 44 | actions_first_five = [(C, D)] + [(D, D)] * 4 45 | opponent_plays = opponent_first_five + [C] + [D] 46 | actions = actions_first_five + [(D, C)] + [(C, D)] 47 | self.versus_test( 48 | axl.MockPlayer(actions=opponent_plays), expected_actions=actions 49 | ) 50 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_gradualkiller.py: -------------------------------------------------------------------------------- 1 | """Tests for the Gradual Killer strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestGradualKiller(TestPlayer): 11 | 12 | name = "Gradual Killer: (D, D, D, D, D, C, C)" 13 | player = axl.GradualKiller 14 | expected_classifier = { 15 | "memory_depth": float("Inf"), 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "inspects_source": False, 19 | "manipulates_source": False, 20 | "manipulates_state": False, 21 | } 22 | 23 | first_seven = [D, D, D, D, D, C, C] 24 | 25 | def test_first_seven_moves_always_the_same(self): 26 | opponent = axl.Cooperator() 27 | actions = list(zip(self.first_seven, [C] * 7)) 28 | self.versus_test(opponent, expected_actions=actions) 29 | 30 | opponent = axl.Defector() 31 | actions = list(zip(self.first_seven, [D] * 7)) 32 | self.versus_test(opponent, expected_actions=actions) 33 | 34 | opponent = axl.Alternator() 35 | actions = list(zip(self.first_seven, [C, D] * 4)) 36 | self.versus_test(opponent, expected_actions=actions) 37 | 38 | def test_effect_of_strategy_with_history_CC(self): 39 | """Continues with C if opponent played CC on 6 and 7.""" 40 | opponent_actions = [D] * 5 + [C, C] + [D, C] * 20 41 | opponent = axl.MockPlayer(actions=opponent_actions) 42 | 43 | start = list(zip(self.first_seven, opponent_actions[:7])) 44 | actions = start + [(C, D), (C, C)] * 20 45 | 46 | self.versus_test(opponent, expected_actions=actions) 47 | 48 | def test_effect_of_strategy_with_history_CD(self): 49 | """Continues with C if opponent played CD on 6 and 7.""" 50 | opponent_actions = [D] * 5 + [C, D] + [D, C] * 20 51 | opponent = axl.MockPlayer(actions=opponent_actions) 52 | 53 | start = list(zip(self.first_seven, opponent_actions[:7])) 54 | actions = start + [(C, D), (C, C)] * 20 55 | 56 | self.versus_test(opponent, expected_actions=actions) 57 | 58 | def test_effect_of_strategy_with_history_DC(self): 59 | """Continues with C if opponent played DC on 6 and 7.""" 60 | opponent_actions = [D] * 5 + [D, C] + [D, C] * 20 61 | opponent = axl.MockPlayer(actions=opponent_actions) 62 | 63 | start = list(zip(self.first_seven, opponent_actions[:7])) 64 | actions = start + [(C, D), (C, C)] * 20 65 | 66 | self.versus_test(opponent, expected_actions=actions) 67 | 68 | def test_effect_of_strategy_with_history_DD(self): 69 | """Continues with D if opponent played DD on 6 and 7.""" 70 | opponent_actions = [C] * 5 + [D, D] + [D, C] * 20 71 | opponent = axl.MockPlayer(actions=opponent_actions) 72 | 73 | start = list(zip(self.first_seven, opponent_actions[:7])) 74 | actions = start + [(D, D), (D, C)] * 20 75 | 76 | self.versus_test(opponent, expected_actions=actions) 77 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_handshake.py: -------------------------------------------------------------------------------- 1 | """Tests for the Handshake strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestHandshake(TestPlayer): 11 | 12 | name = "Handshake" 13 | player = axl.Handshake 14 | expected_classifier = { 15 | "memory_depth": float("inf"), 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_strategy(self): 25 | actions = [(C, C), (D, D)] + [(C, C), (C, D)] * 10 26 | self.versus_test(axl.Alternator(), expected_actions=actions) 27 | 28 | actions = [(C, C), (D, C)] + [(D, C)] * 20 29 | self.versus_test(axl.Cooperator(), expected_actions=actions) 30 | 31 | opponent = axl.MockPlayer([D, C]) 32 | actions = [(C, D), (D, C)] + [(D, D), (D, C)] * 10 33 | self.versus_test(opponent, expected_actions=actions) 34 | 35 | actions = [(C, D), (D, D)] + [(D, D)] * 20 36 | self.versus_test(axl.Defector(), expected_actions=actions) 37 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_inverse.py: -------------------------------------------------------------------------------- 1 | """Tests for the inverse strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestInverse(TestPlayer): 11 | 12 | name = "Inverse" 13 | player = axl.Inverse 14 | expected_classifier = { 15 | "memory_depth": float("inf"), # Long memory 16 | "stochastic": True, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_strategy(self): 25 | # Test that as long as the opponent has not defected the player will 26 | # cooperate, regardless of the random seed. 27 | self.versus_test(axl.Cooperator(), expected_actions=[(C, C)], seed=None) 28 | 29 | # Tests that if opponent has played all D then player chooses D, 30 | # regardless of the random seed. 31 | self.versus_test( 32 | axl.Defector(), expected_actions=[(C, D)] + [(D, D)] * 9, seed=None 33 | ) 34 | 35 | expected_actions = [ 36 | (C, D), 37 | (D, C), 38 | (D, C), 39 | (C, D), 40 | (D, C), 41 | (C, C), 42 | (D, C), 43 | (C, C), 44 | (C, D), 45 | (D, D), 46 | ] 47 | self.versus_test( 48 | axl.MockPlayer(actions=[a[1] for a in expected_actions]), 49 | expected_actions=expected_actions, 50 | seed=0, 51 | ) 52 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_mathematicalconstants.py: -------------------------------------------------------------------------------- 1 | """Tests for the golden and other mathematical strategies.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestGolden(TestPlayer): 11 | 12 | name = "$\phi$" 13 | player = axl.Golden 14 | expected_classifier = { 15 | "memory_depth": float("inf"), # Long memory 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_strategy(self): 25 | actions = [(C, C), (D, D), (C, C), (D, D), (C, C)] 26 | self.versus_test(opponent=axl.Alternator(), expected_actions=actions) 27 | 28 | actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] 29 | self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) 30 | 31 | actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] 32 | self.versus_test(opponent=axl.Defector(), expected_actions=actions) 33 | 34 | 35 | class TestPi(TestPlayer): 36 | 37 | name = "$\pi$" 38 | player = axl.Pi 39 | expected_classifier = { 40 | "memory_depth": float("inf"), # Long memory 41 | "stochastic": False, 42 | "makes_use_of": set(), 43 | "long_run_time": False, 44 | "inspects_source": False, 45 | "manipulates_source": False, 46 | "manipulates_state": False, 47 | } 48 | 49 | def test_strategy(self): 50 | actions = [(C, C), (D, D), (C, C), (C, D), (C, C)] 51 | self.versus_test(opponent=axl.Alternator(), expected_actions=actions) 52 | 53 | actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] 54 | self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) 55 | 56 | actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] 57 | self.versus_test(opponent=axl.Defector(), expected_actions=actions) 58 | 59 | 60 | class Teste(TestPlayer): 61 | 62 | name = "$e$" 63 | player = axl.e 64 | expected_classifier = { 65 | "memory_depth": float("inf"), # Long memory 66 | "stochastic": False, 67 | "makes_use_of": set(), 68 | "long_run_time": False, 69 | "inspects_source": False, 70 | "manipulates_source": False, 71 | "manipulates_state": False, 72 | } 73 | 74 | def test_strategy(self): 75 | actions = [(C, C), (D, D), (C, C), (C, D), (C, C)] 76 | self.versus_test(opponent=axl.Alternator(), expected_actions=actions) 77 | 78 | actions = [(C, C), (D, C), (D, C), (D, C), (D, C)] 79 | self.versus_test(opponent=axl.Cooperator(), expected_actions=actions) 80 | 81 | actions = [(C, D), (C, D), (C, D), (C, D), (C, D)] 82 | self.versus_test(opponent=axl.Defector(), expected_actions=actions) 83 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_momentum.py: -------------------------------------------------------------------------------- 1 | import axelrod as axl 2 | from axelrod import Action 3 | from axelrod.strategies.momentum import Momentum 4 | from axelrod.tests.strategies.test_player import TestPlayer 5 | 6 | C, D = Action.C, Action.D 7 | 8 | 9 | class TestMomentum(TestPlayer): 10 | name = "Momentum" 11 | player = Momentum 12 | expected_classifier = { 13 | "memory_depth": float("inf"), 14 | "stochastic": False, 15 | "long_run_time": False, 16 | "inspects_source": False, 17 | "manipulates_source": False, 18 | "manipulates_state": False, 19 | } 20 | 21 | def test_initialisation(self): 22 | player = self.player(alpha=0.9, threshold=0.8) 23 | self.assertEqual(player.alpha, 0.9) 24 | self.assertEqual(player.threshold, 0.8) 25 | self.assertEqual(player.momentum, 1.0) 26 | 27 | def test_repr(self): 28 | player = self.player(alpha=0.9, threshold=0.8) 29 | self.assertEqual( 30 | repr(player), "Momentum: 1.0, Alpha: 0.9, Threshold: 0.8" 31 | ) 32 | 33 | def test_strategy(self): 34 | actions = [(C, C)] 35 | self.versus_test( 36 | axl.MockPlayer(actions=[C]), 37 | expected_actions=actions, 38 | init_kwargs={"alpha": 0.5, "threshold": 0.5}, 39 | attrs={"momentum": 1.0}, 40 | ) 41 | 42 | actions = [(C, D), (C, D), (D, D)] 43 | self.versus_test( 44 | axl.MockPlayer(actions=[D]), 45 | expected_actions=actions, 46 | init_kwargs={"alpha": 0.5, "threshold": 0.5}, 47 | attrs={"momentum": 0.25}, 48 | ) 49 | 50 | def test_vs_alternator(self): 51 | actions = [(C, C), (C, D), (C, C), (C, D), (D, C)] 52 | self.versus_test( 53 | axl.Alternator(), 54 | expected_actions=actions, 55 | init_kwargs={"alpha": 0.5, "threshold": 0.5}, 56 | ) 57 | 58 | def test_vs_cooperator(self): 59 | actions = [(C, C), (C, C), (C, C), (C, C), (C, C)] 60 | self.versus_test( 61 | axl.Cooperator(), 62 | expected_actions=actions, 63 | init_kwargs={"alpha": 0.5, "threshold": 0.5}, 64 | ) 65 | 66 | def test_vs_defector(self): 67 | actions = [(C, D), (C, D), (D, D), (D, D), (D, D)] 68 | self.versus_test( 69 | axl.Defector(), 70 | expected_actions=actions, 71 | init_kwargs={"alpha": 0.5, "threshold": 0.5}, 72 | ) 73 | 74 | def test_vs_random(self): 75 | actions = [(C, D), (C, C), (C, C), (C, D), (D, D)] 76 | self.versus_test( 77 | axl.Random(), 78 | expected_actions=actions, 79 | seed=17, 80 | init_kwargs={"alpha": 0.5, "threshold": 0.5}, 81 | ) 82 | 83 | def test_vs_random2(self): 84 | actions = [(C, C), (C, C), (C, C), (C, C)] 85 | self.versus_test( 86 | axl.Random(), 87 | expected_actions=actions, 88 | seed=3, 89 | init_kwargs={"alpha": 0.5, "threshold": 0.5}, 90 | ) 91 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_negation.py: -------------------------------------------------------------------------------- 1 | """Tests for the Neg Strategy""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestNegation(TestPlayer): 11 | 12 | name = "Negation" 13 | player = axl.Negation 14 | expected_classifier = { 15 | "memory_depth": 1, 16 | "stochastic": True, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_strategy(self): 25 | # First move is random. 26 | actions = [(C, C), (D, D), (C, C)] 27 | self.versus_test( 28 | opponent=axl.Alternator(), expected_actions=actions, seed=1 29 | ) 30 | actions = [(D, C), (D, D), (C, C)] 31 | self.versus_test( 32 | opponent=axl.Alternator(), expected_actions=actions, seed=2 33 | ) 34 | actions = [(C, C), (D, C), (D, C)] 35 | self.versus_test( 36 | opponent=axl.Cooperator(), expected_actions=actions, seed=1 37 | ) 38 | actions = [(D, D), (C, D), (C, D)] 39 | self.versus_test( 40 | opponent=axl.Defector(), expected_actions=actions, seed=2 41 | ) 42 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_rand.py: -------------------------------------------------------------------------------- 1 | """Tests for the random strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestRandom(TestPlayer): 11 | 12 | name = "Random: 0.5" 13 | player = axl.Random 14 | expected_classifier = { 15 | "memory_depth": 0, 16 | "stochastic": True, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_deterministic(self): 25 | actions = [(D, C), (D, C), (D, C)] 26 | self.versus_test( 27 | axl.Cooperator(), expected_actions=actions, init_kwargs={"p": 0} 28 | ) 29 | 30 | actions = [(C, C), (C, C), (C, C)] 31 | self.versus_test( 32 | axl.Cooperator(), expected_actions=actions, init_kwargs={"p": 1} 33 | ) 34 | 35 | def test_stochastic_behavior1(self): 36 | """Test that strategy is randomly picked (not affected by history).""" 37 | opponent = axl.MockPlayer() 38 | actions = [(C, C), (D, C), (D, C), (C, C)] 39 | self.versus_test(opponent, expected_actions=actions, seed=1) 40 | 41 | def test_stochastic_behavior2(self): 42 | opponent = axl.MockPlayer() 43 | actions = [(D, C), (C, C), (D, C)] 44 | self.versus_test(opponent, expected_actions=actions, seed=2) 45 | 46 | def test_deterministic_classification(self): 47 | """Test classification when p is 0 or 1""" 48 | for p in [0, 1]: 49 | player = axl.Random(p=p) 50 | self.assertFalse(axl.Classifiers["stochastic"](player)) 51 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_resurrection.py: -------------------------------------------------------------------------------- 1 | """Test for the Resurrection strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class Resurrection(TestPlayer): 11 | 12 | name = "Resurrection" 13 | player = axl.Resurrection 14 | expected_classifier = { 15 | "memory_depth": 5, 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "long_run_time": False, 19 | "inspects_source": False, 20 | "manipulates_source": False, 21 | "manipulates_state": False, 22 | } 23 | 24 | def test_strategy(self): 25 | # Check if the turns played are greater than 5 26 | actions = [(C, C), (C, C), (C, C), (C, C), (C, C), (C, C), (C, C)] 27 | self.versus_test(axl.Cooperator(), expected_actions=actions) 28 | 29 | actions = [(C, D), (D, D), (D, D), (D, D), (D, D), (D, D), (D, D)] 30 | self.versus_test(axl.Defector(), expected_actions=actions) 31 | 32 | # Check for TFT behavior after 5 rounds 33 | actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D), (D, C)] 34 | self.versus_test(axl.Alternator(), expected_actions=actions) 35 | 36 | 37 | class TestDoubleResurrection(TestPlayer): 38 | 39 | name = "DoubleResurrection" 40 | player = axl.DoubleResurrection 41 | expected_classifier = { 42 | "memory_depth": 5, 43 | "stochastic": False, 44 | "makes_use_of": set(), 45 | "long_run_time": False, 46 | "inspects_source": False, 47 | "manipulates_source": False, 48 | "manipulates_state": False, 49 | } 50 | 51 | def test_strategy(self): 52 | actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)] 53 | self.versus_test(axl.Alternator(), expected_actions=actions) 54 | 55 | actions = [(C, C), (C, C), (C, C), (C, C), (C, C), (D, C)] 56 | self.versus_test(axl.Cooperator(), expected_actions=actions) 57 | 58 | actions = [(C, D), (D, D), (D, D), (D, D), (D, D), (D, D), (C, D)] 59 | self.versus_test(axl.Defector(), expected_actions=actions) 60 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_revised_downing.py: -------------------------------------------------------------------------------- 1 | import axelrod as axl 2 | 3 | from .test_player import TestPlayer 4 | 5 | C, D = axl.Action.C, axl.Action.D 6 | 7 | 8 | class TestRevisedDowning(TestPlayer): 9 | 10 | name = "Revised Downing" 11 | player = axl.RevisedDowning 12 | expected_classifier = { 13 | "memory_depth": float("inf"), 14 | "stochastic": False, 15 | "makes_use_of": set(), 16 | "long_run_time": False, 17 | "inspects_source": False, 18 | "manipulates_source": False, 19 | "manipulates_state": False, 20 | } 21 | 22 | def test_strategy(self): 23 | actions = [(C, C), (C, C), (C, C)] 24 | self.versus_test(axl.Cooperator(), expected_actions=actions) 25 | 26 | actions = [(C, D), (C, D), (D, D)] 27 | self.versus_test(axl.Defector(), expected_actions=actions) 28 | 29 | opponent = axl.MockPlayer(actions=[D, C, C]) 30 | actions = [(C, D), (C, C), (C, C), (C, D)] 31 | self.versus_test(opponent, expected_actions=actions) 32 | 33 | opponent = axl.MockPlayer(actions=[D, D, C]) 34 | actions = [(C, D), (C, D), (D, C), (C, D)] 35 | self.versus_test(opponent, expected_actions=actions) 36 | 37 | opponent = axl.MockPlayer(actions=[C, C, D, D, C, C]) 38 | actions = [(C, C), (C, C), (C, D), (C, D), (D, C), (C, C), (D, C)] 39 | self.versus_test(opponent, expected_actions=actions) 40 | 41 | opponent = axl.MockPlayer(actions=[C, C, C, C, D, D]) 42 | actions = [(C, C), (C, C), (C, C), (C, C), (C, D), (C, D), (C, C)] 43 | self.versus_test(opponent, expected_actions=actions) 44 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_sequence_player.py: -------------------------------------------------------------------------------- 1 | """Tests for the Thue-Morse strategies.""" 2 | 3 | import unittest 4 | 5 | import axelrod as axl 6 | from axelrod._strategy_utils import recursive_thue_morse 7 | from axelrod.strategies.sequence_player import SequencePlayer 8 | 9 | from .test_player import TestOpponent, TestPlayer 10 | 11 | C, D = axl.Action.C, axl.Action.D 12 | 13 | 14 | class TestThueMoreGenerator(unittest.TestCase): 15 | def test_sequence(self): 16 | expected = [0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0] 17 | for i, e in enumerate(expected): 18 | self.assertEqual(recursive_thue_morse(i), e) 19 | 20 | 21 | class TestSequencePlayer(unittest.TestCase): 22 | def test_sequence_player(self): 23 | """Basic test for SequencePlayer.""" 24 | 25 | def cooperate_gen(): 26 | yield 1 27 | 28 | player = SequencePlayer(generator_function=cooperate_gen) 29 | opponent = TestOpponent() 30 | self.assertEqual(C, player.strategy(opponent)) 31 | 32 | 33 | class TestThueMorse(TestPlayer): 34 | 35 | name = "ThueMorse" 36 | player = axl.ThueMorse 37 | expected_classifier = { 38 | "memory_depth": float("inf"), 39 | "stochastic": False, 40 | "makes_use_of": set(), 41 | "long_run_time": False, 42 | "inspects_source": False, 43 | "manipulates_source": False, 44 | "manipulates_state": False, 45 | } 46 | 47 | def test_strategy(self): 48 | 49 | thue_morse_seq = [D, C, C, D, C, D, D, C, C, D, D, C, D, C, C, D, C] 50 | n = len(thue_morse_seq) 51 | 52 | actions = list(zip(thue_morse_seq, [C] * n)) 53 | self.versus_test(axl.Cooperator(), expected_actions=actions) 54 | 55 | actions = list(zip(thue_morse_seq, [D] * n)) 56 | self.versus_test(axl.Defector(), expected_actions=actions) 57 | 58 | 59 | class TestThueMorseInverse(TestPlayer): 60 | 61 | name = "ThueMorseInverse" 62 | player = axl.ThueMorseInverse 63 | expected_classifier = { 64 | "memory_depth": float("inf"), 65 | "stochastic": False, 66 | "makes_use_of": set(), 67 | "long_run_time": False, 68 | "inspects_source": False, 69 | "manipulates_source": False, 70 | "manipulates_state": False, 71 | } 72 | 73 | def test_strategy(self): 74 | inv_thue_morse_seq = [C, D, D, C, D, C, C, D, D, C, C, D, C, D, D, C, D] 75 | n = len(inv_thue_morse_seq) 76 | 77 | actions = list(zip(inv_thue_morse_seq, [C] * n)) 78 | self.versus_test(axl.Cooperator(), expected_actions=actions) 79 | 80 | actions = list(zip(inv_thue_morse_seq, [D] * n)) 81 | self.versus_test(axl.Defector(), expected_actions=actions) 82 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_shortmem.py: -------------------------------------------------------------------------------- 1 | """Tests for the ShortMem strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestShortMem(TestPlayer): 11 | 12 | name = "ShortMem" 13 | player = axl.ShortMem 14 | expected_classifier = { 15 | "memory_depth": float("inf"), 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "inspects_source": False, 19 | "manipulates_source": False, 20 | "manipulates_state": False, 21 | } 22 | 23 | def test_strategy(self): 24 | 25 | # Starts by cooperating for the first ten moves. 26 | actions = [(C, C)] * 10 27 | self.versus_test(axl.Cooperator(), expected_actions=actions) 28 | 29 | actions = [(C, D)] * 10 30 | self.versus_test(axl.Defector(), expected_actions=actions) 31 | 32 | # Cooperate if in the last ten moves, Cooperations - Defections >= 3 33 | actions = [(C, C)] * 11 + [(C, D)] * 4 34 | self.versus_test( 35 | opponent=axl.MockPlayer(actions=[C] * 11 + [D] * 4), 36 | expected_actions=actions, 37 | ) 38 | 39 | # Defect if in the last ten moves, Defections - Cooperations >= 3 40 | actions = [(C, D)] * 11 + [(D, C)] * 4 41 | self.versus_test( 42 | opponent=axl.MockPlayer(actions=[D] * 11 + [C] * 4), 43 | expected_actions=actions, 44 | ) 45 | 46 | # If neither of the above conditions are met, apply TitForTat 47 | actions = ( 48 | [(C, D)] * 5 49 | + [(C, C)] * 6 50 | + [(C, D), (D, D), (D, D), (D, C), (C, C)] 51 | ) 52 | self.versus_test( 53 | opponent=axl.MockPlayer( 54 | actions=[D] * 5 + [C] * 6 + [D, D, D, C, C] 55 | ), 56 | expected_actions=actions, 57 | ) 58 | 59 | actions = ( 60 | [(C, C)] * 5 61 | + [(C, D)] * 6 62 | + [(D, C), (C, C), (C, C), (C, D), (D, D)] 63 | ) 64 | self.versus_test( 65 | opponent=axl.MockPlayer( 66 | actions=[C] * 5 + [D] * 6 + [C, C, C, D, D] 67 | ), 68 | expected_actions=actions, 69 | ) 70 | -------------------------------------------------------------------------------- /axelrod/tests/strategies/test_verybad.py: -------------------------------------------------------------------------------- 1 | """Tests for the VeryBad strategy.""" 2 | 3 | import axelrod as axl 4 | 5 | from .test_player import TestPlayer 6 | 7 | C, D = axl.Action.C, axl.Action.D 8 | 9 | 10 | class TestVeryBad(TestPlayer): 11 | 12 | name = "VeryBad" 13 | player = axl.VeryBad 14 | expected_classifier = { 15 | "memory_depth": float("inf"), 16 | "stochastic": False, 17 | "makes_use_of": set(), 18 | "inspects_source": False, 19 | "manipulates_source": False, 20 | "manipulates_state": False, 21 | } 22 | 23 | def test_strategy(self): 24 | # axelrod.Defector - 25 | # cooperates for the first three, defects for the rest P(C) < .5 26 | self.versus_test( 27 | axl.Defector(), expected_actions=([(C, D)] * 3 + [(D, D)] * 7) 28 | ) 29 | 30 | # axelrod.Cooperator - 31 | # cooperate for all, P(C) == 1 32 | self.versus_test(axl.Cooperator(), expected_actions=[(C, C)]) 33 | 34 | expected_actions = [ 35 | (C, C), # first three cooperate 36 | (C, D), 37 | (C, D), 38 | (D, C), # P(C) = .33 39 | (C, C), # P(C) = .5 (last move C) 40 | (C, D), # P(C) = .6 41 | (D, D), # P(C) = .5 (last move D) 42 | (D, D), # P(C) = .43 43 | (D, C), # P(C) = .375 44 | (D, D), # P(C) = .4 45 | ] 46 | mock_player = axl.MockPlayer(actions=[a[1] for a in expected_actions]) 47 | self.versus_test(mock_player, expected_actions=expected_actions) 48 | -------------------------------------------------------------------------------- /axelrod/tests/unit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/axelrod/tests/unit/__init__.py -------------------------------------------------------------------------------- /axelrod/tests/unit/test_actions.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import axelrod as axl 4 | from axelrod.action import UnknownActionError, actions_to_str, str_to_actions 5 | 6 | C, D = axl.Action.C, axl.Action.D 7 | 8 | 9 | class TestAction(unittest.TestCase): 10 | def test_lt(self): 11 | self.assertLess(C, D) 12 | 13 | def test_repr(self): 14 | self.assertEqual(repr(C), "C") 15 | self.assertEqual(repr(D), "D") 16 | 17 | def test_str(self): 18 | self.assertEqual(str(C), "C") 19 | self.assertEqual(str(D), "D") 20 | 21 | def test__eq__(self): 22 | self.assertTrue(C == C) 23 | self.assertTrue(D == D) 24 | self.assertFalse(C == D) 25 | self.assertFalse(D == C) 26 | 27 | def test_total_order(self): 28 | actions = [C, D, D, C, C, C, D] 29 | actions.sort() 30 | self.assertEqual(actions, [C, C, C, C, D, D, D]) 31 | 32 | def test_flip(self): 33 | self.assertEqual(C.flip(), D) 34 | self.assertEqual(D.flip(), C) 35 | 36 | def test_from_char(self): 37 | self.assertEqual(axl.Action.from_char("C"), C) 38 | self.assertEqual(axl.Action.from_char("D"), D) 39 | 40 | def test_from_char_error(self): 41 | self.assertRaises(UnknownActionError, axl.Action.from_char, "") 42 | self.assertRaises(UnknownActionError, axl.Action.from_char, "c") 43 | self.assertRaises(UnknownActionError, axl.Action.from_char, "d") 44 | self.assertRaises(UnknownActionError, axl.Action.from_char, "A") 45 | self.assertRaises(UnknownActionError, axl.Action.from_char, "CC") 46 | 47 | def test_str_to_actions(self): 48 | self.assertEqual(str_to_actions(""), ()) 49 | self.assertEqual(str_to_actions("C"), (C,)) 50 | self.assertEqual(str_to_actions("CDDC"), (C, D, D, C)) 51 | 52 | def test_str_to_actions_fails_fast_and_raises_value_error(self): 53 | self.assertRaises(UnknownActionError, str_to_actions, "Cc") 54 | 55 | def test_actions_to_str(self): 56 | self.assertEqual(actions_to_str([]), "") 57 | self.assertEqual(actions_to_str([C]), "C") 58 | self.assertEqual(actions_to_str([C, D, C]), "CDC") 59 | self.assertEqual(actions_to_str((C, C, D)), "CCD") 60 | 61 | def test_actions_to_str_with_iterable(self): 62 | self.assertEqual(actions_to_str(iter([C, D, C])), "CDC") 63 | generator = (action for action in [C, D, C]) 64 | self.assertEqual(actions_to_str(generator), "CDC") 65 | -------------------------------------------------------------------------------- /axelrod/tests/unit/test_eigen.py: -------------------------------------------------------------------------------- 1 | """Test for eigen.py.""" 2 | 3 | import unittest 4 | 5 | import numpy as np 6 | from numpy.testing import assert_array_almost_equal 7 | 8 | from axelrod.eigen import _normalise, principal_eigenvector 9 | 10 | 11 | class FunctionCases(unittest.TestCase): 12 | def test_identity_matrices(self): 13 | for size in range(2, 6): 14 | mat = np.identity(size) 15 | evector, evalue = principal_eigenvector(mat) 16 | self.assertAlmostEqual(evalue, 1) 17 | assert_array_almost_equal(evector, _normalise(np.ones(size))) 18 | 19 | def test_zero_matrix(self): 20 | mat = np.array([[0, 0], [0, 0]]) 21 | evector, evalue = principal_eigenvector(mat) 22 | self.assertTrue(np.isnan(evalue)) 23 | self.assertTrue(np.isnan(evector[0])) 24 | self.assertTrue(np.isnan(evector[1])) 25 | 26 | def test_2x2_matrix(self): 27 | mat = np.array([[2, 1], [1, 2]]) 28 | evector, evalue = principal_eigenvector(mat) 29 | self.assertAlmostEqual(evalue, 3) 30 | assert_array_almost_equal(evector, np.dot(mat, evector) / evalue) 31 | assert_array_almost_equal(evector, _normalise(np.array([1, 1]))) 32 | 33 | def test_3x3_matrix(self): 34 | mat = np.array([[1, 2, 0], [-2, 1, 2], [1, 3, 1]]) 35 | evector, evalue = principal_eigenvector( 36 | mat, maximum_iterations=None, max_error=1e-10 37 | ) 38 | self.assertAlmostEqual(evalue, 3) 39 | assert_array_almost_equal(evector, np.dot(mat, evector) / evalue) 40 | assert_array_almost_equal(evector, _normalise(np.array([0.5, 0.5, 1]))) 41 | 42 | def test_4x4_matrix(self): 43 | mat = np.array([[2, 0, 0, 0], [1, 2, 0, 0], [0, 1, 3, 0], [0, 0, 1, 3]]) 44 | evector, evalue = principal_eigenvector( 45 | mat, maximum_iterations=None, max_error=1e-10 46 | ) 47 | self.assertAlmostEqual(evalue, 3, places=3) 48 | assert_array_almost_equal(evector, np.dot(mat, evector) / evalue) 49 | assert_array_almost_equal( 50 | evector, _normalise(np.array([0, 0, 0, 1])), decimal=4 51 | ) 52 | -------------------------------------------------------------------------------- /axelrod/tests/unit/test_load_data.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | import unittest 4 | 5 | from axelrod.load_data_ import axl_filename, load_file 6 | 7 | 8 | class TestLoadData(unittest.TestCase): 9 | def test_axl_filename(self): 10 | path = pathlib.Path("axelrod/strategies/titfortat.py") 11 | actual_fn = axl_filename(path) 12 | 13 | # First go from "unit" up to "tests", then up to "axelrod" 14 | dirname = os.path.dirname(__file__) 15 | expected_fn = os.path.join(dirname, "../../strategies/titfortat.py") 16 | 17 | self.assertTrue(os.path.samefile(actual_fn, expected_fn)) 18 | 19 | def test_raise_error_if_file_empty(self): 20 | path = pathlib.Path("not/a/file.py") 21 | with self.assertRaises(FileNotFoundError): 22 | load_file(path, ".") 23 | 24 | def test_raise_error_if_something(self): 25 | dirname = os.path.dirname(__file__) 26 | path = os.path.join(dirname, "../../strategies/titfortat.py") 27 | bad_loader = lambda _, __: None 28 | with self.assertRaises(FileNotFoundError): 29 | load_file(path, ".", bad_loader) 30 | -------------------------------------------------------------------------------- /axelrod/tests/unit/test_makes_use_of.py: -------------------------------------------------------------------------------- 1 | """Tests for makes_use_of.""" 2 | 3 | import unittest 4 | 5 | import axelrod as axl 6 | from axelrod.makes_use_of import ( 7 | class_makes_use_of, 8 | makes_use_of, 9 | makes_use_of_variant, 10 | method_makes_use_of, 11 | ) 12 | from axelrod.strategy_transformers import final_sequence 13 | 14 | 15 | class TestMakesUseOfLengthAndGamePlayer(axl.Player): 16 | """ 17 | Should have some function that uses length 18 | """ 19 | 20 | def first_function(self): # pragma: no cover 21 | x = 1 + 2 22 | x * 5 23 | 24 | def second_function(self): # pragma: no cover 25 | # We put this in the second function to make sure both are checked. 26 | x = 1 + self.match_attributes["length"] 27 | 28 | # Should only add once. 29 | y = 2 + self.match_attributes["length"] 30 | 31 | # Should also add game. 32 | self.match_attributes["game"] 33 | 34 | 35 | class TestMakesUseOfNothingPlayer(axl.Player): 36 | """ 37 | Doesn't use match_attributes 38 | """ 39 | 40 | def only_function(self): # pragma: no cover 41 | 1 + 2 + 3 42 | print("=6") 43 | 44 | 45 | class TestMakesUseOf(unittest.TestCase): 46 | def test_makes_use_of_length_and_game(self): 47 | self.assertEqual( 48 | makes_use_of(TestMakesUseOfLengthAndGamePlayer()), 49 | {"length", "game"}, 50 | ) 51 | 52 | def test_makes_use_of_empty(self): 53 | self.assertEqual(makes_use_of(TestMakesUseOfNothingPlayer()), set()) 54 | 55 | def test_untransformed_class(self): 56 | for player in [axl.Cooperator(), axl.Random()]: 57 | self.assertEqual(class_makes_use_of(player), set()) 58 | self.assertEqual(makes_use_of_variant(player), set()) 59 | self.assertEqual(method_makes_use_of(player.strategy), set()) 60 | 61 | def test_transformer_wrapper(self): 62 | # Test that the final transformer wrapper makes use of length 63 | self.assertEqual(method_makes_use_of(final_sequence), {"length"}) 64 | 65 | def test_makes_use_of_transformed(self): 66 | # These players use match length via Final transformer 67 | for player in [axl.BackStabber(), axl.FirstBySteinAndRapoport()]: 68 | self.assertEqual(makes_use_of(player), {"length"}) 69 | self.assertEqual(makes_use_of_variant(player), {"length"}) 70 | -------------------------------------------------------------------------------- /axelrod/tests/unit/test_mock_player.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import axelrod as axl 4 | 5 | C, D = axl.Action.C, axl.Action.D 6 | 7 | 8 | class TestMockPlayer(unittest.TestCase): 9 | def test_strategy(self): 10 | for action in [C, D]: 11 | m = axl.MockPlayer(actions=[action]) 12 | p2 = axl.Player() 13 | self.assertEqual(action, m.strategy(p2)) 14 | 15 | actions = [C, C, D, D, C, C] 16 | m = axl.MockPlayer(actions=actions) 17 | p2 = axl.Player() 18 | for action in actions: 19 | self.assertEqual(action, m.strategy(p2)) 20 | -------------------------------------------------------------------------------- /axelrod/tests/unit/test_version.py: -------------------------------------------------------------------------------- 1 | """Tests the version number.""" 2 | 3 | import unittest 4 | 5 | import axelrod as axl 6 | 7 | 8 | class TestVersion(unittest.TestCase): 9 | def test_version(self): 10 | self.assertIsInstance(axl.__version__, str) 11 | -------------------------------------------------------------------------------- /axelrod/version.py: -------------------------------------------------------------------------------- 1 | __version__ = "4.13.1" 2 | -------------------------------------------------------------------------------- /docs/_static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/docs/_static/favicon.ico -------------------------------------------------------------------------------- /docs/discussion/axelrods_tournaments.rst: -------------------------------------------------------------------------------- 1 | Background to Axelrod's Tournament 2 | ================================== 3 | 4 | `In the 1980s, professor of Political Science Robert Axelrod ran a tournament inviting strategies from collaborators all over the world for the Iterated Prisoner's Dilemma `_. 5 | 6 | Another nice write up of Axelrod's work and this tournament on github was put together by `Artem Kaznatcheev `_ `here `_. 7 | 8 | The Prisoner's Dilemma 9 | ---------------------- 10 | 11 | The `Prisoner's dilemma `_ is the simple two player game shown below: 12 | 13 | +----------+---------------+---------------+ 14 | | | Cooperate | Defect | 15 | +==========+===============+===============+ 16 | |Cooperate | (3,3) | (0,5) | 17 | +----------+---------------+---------------+ 18 | |Defect | (5,0) | (1,1) | 19 | +----------+---------------+---------------+ 20 | 21 | If both players cooperate they will each go to prison for 2 years and receive an 22 | equivalent utility of 3. 23 | If one cooperates and the other defects: the defector does not go to prison and the cooperator goes to prison for 5 years, the cooperator receives a utility of 0 and the defector a utility of 5. 24 | If both defect: they both go to prison for 4 years and receive an equivalent 25 | utility of 1. 26 | 27 | .. note:: Years in prison doesn't equal to utility directly. The formula is U = 5 - Y for Y in [0, 5], where ``U`` is the utility, ``Y`` are years in prison. The reason is to follow the original Axelrod's scoring. 28 | 29 | By simply investigating the best responses against both possible actions of each player it is immediate to see that the Nash equilibrium for this game is for both players to defect. 30 | 31 | The Iterated Prisoner's Dilemma 32 | ------------------------------- 33 | 34 | We can use the basic Prisoner's Dilemma as a *stage* game in a repeated game. 35 | Players now aim to maximise the utility (corresponding to years in prison) over a repetition of the game. 36 | Strategies can take in to account both players history and so can take the form: 37 | 38 | "I will cooperate unless you defect 3 times in a row at which point I will defect forever." 39 | 40 | Axelrod ran such a tournament (twice) and invited strategies from anyone who would contribute. 41 | The tournament was a round robin and the winner was the strategy who had the lowest total amount of time in prison. 42 | 43 | This tournament has been used to study how cooperation can evolve from a very simple set of rules. 44 | This is mainly because the winner of both tournaments was 'tit for tat': a strategy that would never defect first (referred to as a 'nice' strategy). 45 | -------------------------------------------------------------------------------- /docs/discussion/community/communication.rst: -------------------------------------------------------------------------------- 1 | .. _communication: 2 | 3 | Communication 4 | ------------- 5 | 6 | There are various ways of communicating with the team: 7 | 8 | - `Gitter: a web based chat client, you can talk directly to the users and 9 | maintainers of the library. `_ 10 | - Irc: we have an irc channel. It's #axelrod-python on freenode. 11 | - `Email forum. `_ 12 | - `Issues: you are also very welcome to open an issue on 13 | github `_ 14 | - `Twitter. `_ This account periodically 15 | tweets out random match and tournament results; you're welcome to get in 16 | touch through twitter as well. 17 | -------------------------------------------------------------------------------- /docs/discussion/community/index.rst: -------------------------------------------------------------------------------- 1 | .. _community: 2 | 3 | Community 4 | ========= 5 | 6 | Contents: 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | 11 | team.rst 12 | communication.rst 13 | coc.rst 14 | -------------------------------------------------------------------------------- /docs/discussion/community/team.rst: -------------------------------------------------------------------------------- 1 | .. _team: 2 | 3 | Part of the team 4 | ---------------- 5 | 6 | If you’re reading this you’re probably interested in contributing to and/or 7 | using the Axelrod library! Firstly: **thank you** and **welcome**! 8 | 9 | We are proud of the library and the environment that surrounds it. A primary 10 | goal of the project is to cultivate an open and welcoming community, considerate 11 | and respectful to newcomers to python and game theory. 12 | 13 | The Axelrod library has been a first contribution to open source software for 14 | many, and this is in large part due to the fact that we all aim to help and 15 | encourage all levels of contribution. If you're a beginner, that's awesome! 16 | You're very welcome and don't hesitate to ask for help. 17 | 18 | **With regards to any contribution**, please do not feel the need to wait until 19 | your contribution is perfectly polished and complete: we're happy to offer 20 | early feedback, help with git, and anything else that you need to have a 21 | positive experience. 22 | 23 | **If you are using the library for your own work** and there's anything in the 24 | documentation that is unclear: we want to know so that we can fix it. We also 25 | want to help so please don't hesitate to get in touch. 26 | -------------------------------------------------------------------------------- /docs/discussion/index.rst: -------------------------------------------------------------------------------- 1 | Discussion 2 | ========== 3 | 4 | This section is a discussion of various aspects of Game Theory related to the 5 | Axelrod library. 6 | 7 | Contents: 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | axelrods_tournaments.rst 13 | strategy_archetypes.rst 14 | overview_of_past_tournaments.rst 15 | play_contexts.rst 16 | community/index.rst 17 | -------------------------------------------------------------------------------- /docs/discussion/play_contexts.rst: -------------------------------------------------------------------------------- 1 | .. _play_contexts: 2 | 3 | Play Contexts and Generic Prisoner's Dilemma 4 | ============================================ 5 | 6 | There are four possible round outcomes: 7 | 8 | - Mutual cooperation: :math:`(C, C)` 9 | - Defection: :math:`(C, D)` or :math:`(D, C)` 10 | - Mutual defection: :math:`(D, D)` 11 | 12 | Each of these corresponds to one particular set of payoffs in the following 13 | generic Prisoner's dilemma: 14 | 15 | 16 | +----------+---------------+---------------+ 17 | | | Cooperate | Defect | 18 | +==========+===============+===============+ 19 | |Cooperate | (R,R) | (S,T) | 20 | +----------+---------------+---------------+ 21 | |Defect | (T,S) | (P,P) | 22 | +----------+---------------+---------------+ 23 | 24 | For the above to constitute a Prisoner's dilemma, the following must hold: 25 | :math:`T>R>P>S`. 26 | 27 | These payoffs are commonly referred to as: 28 | 29 | - :math:`R`: the **Reward** payoff (default value in the library: 3) 30 | - :math:`P`: the **Punishment** payoff (default value in the library: 1) 31 | - :math:`S`: the **Sucker** payoff (default value in the library: 0) 32 | - :math:`T`: the **Temptation** payoff (default value in the library: 5) 33 | 34 | A particular Prisoner's Dilemma is often described by the 4-tuple: :math:`(R, P, 35 | S, T)`:: 36 | 37 | >>> import axelrod 38 | >>> axelrod.game.DefaultGame.RPST() 39 | (3, 1, 0, 5) 40 | -------------------------------------------------------------------------------- /docs/how-to/_static/fingerprinting/WSLS_large.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/docs/how-to/_static/fingerprinting/WSLS_large.png -------------------------------------------------------------------------------- /docs/how-to/_static/fingerprinting/WSLS_large_alt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/docs/how-to/_static/fingerprinting/WSLS_large_alt.png -------------------------------------------------------------------------------- /docs/how-to/_static/fingerprinting/WSLS_small.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/docs/how-to/_static/fingerprinting/WSLS_small.png -------------------------------------------------------------------------------- /docs/how-to/_static/fingerprinting/transitive_TFT.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/docs/how-to/_static/fingerprinting/transitive_TFT.png -------------------------------------------------------------------------------- /docs/how-to/_static/fingerprinting/transitive_TFT_against_demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/docs/how-to/_static/fingerprinting/transitive_TFT_against_demo.png -------------------------------------------------------------------------------- /docs/how-to/_static/spatial_tournaments/spatial.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/docs/how-to/_static/spatial_tournaments/spatial.png -------------------------------------------------------------------------------- /docs/how-to/_static/spatial_tournaments/spatial_results.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/docs/how-to/_static/spatial_tournaments/spatial_results.png -------------------------------------------------------------------------------- /docs/how-to/access_collections_of_strategies.rst: -------------------------------------------------------------------------------- 1 | .. _strategies: 2 | 3 | Access collections of strategies 4 | ================================ 5 | 6 | All of the strategies are accessible from the main name space of the library. 7 | For example:: 8 | 9 | >>> import axelrod as axl 10 | >>> axl.TitForTat() 11 | Tit For Tat 12 | >>> axl.Cooperator() 13 | Cooperator 14 | 15 | The **main strategies** which obey the rules of Axelrod's original tournament 16 | can be found in a list: `axelrod.strategies`:: 17 | 18 | >>> axl.strategies 19 | [... 20 | 21 | This makes creating a full 22 | tournament very straightforward:: 23 | 24 | >>> players = [s() for s in axl.strategies] 25 | >>> tournament = axl.Tournament(players) 26 | 27 | There are a list of various other strategies in the library to make it 28 | easier to create a variety of tournaments:: 29 | 30 | >>> axl.demo_strategies # 5 simple strategies useful for demonstration. 31 | [... 32 | >>> axl.basic_strategies # A set of basic strategies. 33 | [... 34 | >>> axl.long_run_time_strategies # These have a high computational cost 35 | [... 36 | 37 | Furthermore there are some strategies that 'cheat' (for example by modifying 38 | their opponents source code). These can be found in 39 | :code:`axelrod.cheating_strategies`:: 40 | 41 | >>> axl.cheating_strategies 42 | [... 43 | 44 | All of the strategies in the library are contained in: 45 | :code:`axelrod.all_strategies`:: 46 | 47 | >>> axl.all_strategies 48 | [... 49 | 50 | All strategies are also classified, you can read more about that in 51 | :ref:`classification-of-strategies`. 52 | -------------------------------------------------------------------------------- /docs/how-to/calculate_morality_metrics.rst: -------------------------------------------------------------------------------- 1 | .. _morality-metrics: 2 | 3 | Calculate Morality Metrics 4 | ========================== 5 | 6 | Tyler Singer-Clark's June 2014 paper, "Morality Metrics On Iterated Prisoner’s 7 | Dilemma Players" [Singer-Clark2014]_), describes several interesting metrics which 8 | may be used to analyse IPD tournaments all of which are available within the 9 | ResultSet class. (Tyler's paper is available here: 10 | http://www.scottaaronson.com/morality.pdf). 11 | 12 | Each metric depends upon the cooperation rate of the players, defined by Tyler 13 | Singer-Clark as: 14 | 15 | .. math:: 16 | 17 | CR(b) = \frac{C(b)}{TT} 18 | 19 | where C(b) is the total number of turns where a player chose to cooperate and TT 20 | is the total number of turns played. 21 | 22 | A matrix of cooperation rates is available within a tournament's ResultSet:: 23 | 24 | >>> import axelrod as axl 25 | >>> players = [axl.Cooperator(), axl.Defector(), 26 | ... axl.TitForTat(), axl.Grudger()] 27 | >>> tournament = axl.Tournament(players) 28 | >>> results = tournament.play() 29 | >>> [[round(float(ele), 3) for ele in row] for row in results.normalised_cooperation] 30 | [[1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0], [1.0, 0.005, 1.0, 1.0], [1.0, 0.005, 1.0, 1.0]] 31 | 32 | There is also a 'good partner' matrix showing how often a player cooperated at 33 | least as much as its opponent:: 34 | 35 | >>> results.good_partner_matrix 36 | [[0, 10, 10, 10], [0, 0, 0, 0], [10, 10, 0, 10], [10, 10, 10, 0]] 37 | 38 | Each of the metrics described in Tyler's paper is available as follows (here they are rounded to 2 digits):: 39 | 40 | >>> [round(ele, 2) for ele in results.cooperating_rating] 41 | [1.0, 0.0, 0.67..., 0.67...] 42 | >>> [round(ele, 2) for ele in results.good_partner_rating] 43 | [1.0, 0.0, 1.0, 1.0] 44 | >>> [round(ele, 2) for ele in results.eigenjesus_rating] 45 | [0.58, 0.0, 0.58, 0.58] 46 | >>> [round(ele, 2) for ele in results.eigenmoses_rating] 47 | [0.37, -0.37, 0.6, 0.6] 48 | -------------------------------------------------------------------------------- /docs/how-to/check_player_equality.rst: -------------------------------------------------------------------------------- 1 | Check Player equality 2 | ===================== 3 | 4 | It is possible to test for player equality using :code:`==`:: 5 | 6 | >>> import axelrod as axl 7 | >>> p1, p2, p3 = axl.Alternator(), axl.Alternator(), axl.TitForTat() 8 | >>> p1 == p2 9 | True 10 | >>> p1 == p3 11 | False 12 | 13 | Note that this checks all the attributes of an instance:: 14 | 15 | >>> p1.name = "John Nash" 16 | >>> p1 == p2 17 | False 18 | 19 | This however does not check if the players will behave in the same way. For 20 | example here are two equivalent players:: 21 | 22 | >>> p1 = axl.Alternator() 23 | >>> p2 = axl.Cycler("CD") 24 | >>> p1 == p2 25 | False 26 | 27 | To check if player strategies are equivalent you can use :ref:`fingerprinting`. 28 | -------------------------------------------------------------------------------- /docs/how-to/cite_the_library.rst: -------------------------------------------------------------------------------- 1 | Cite the library 2 | ================ 3 | 4 | We would be delighted if anyone wanted to use and/or reference this library for 5 | their own research. 6 | 7 | If you do please let us know and reference the library: as described in the 8 | `CITATION.rst file on the library 9 | repository 10 | `_. 11 | -------------------------------------------------------------------------------- /docs/how-to/contributing/guidelines.rst: -------------------------------------------------------------------------------- 1 | Guidelines 2 | ========== 3 | 4 | All contributions to this repository are welcome via pull request on the `github repository `_. 5 | 6 | The project follows the following guidelines: 7 | 8 | 1. Try as best as possible to follow `PEP8 9 | `_ which includes **using 10 | descriptive variable names**. 11 | 2. Code Format: Use the `Black formatter `_ to format 12 | all code and the `isort utility `_ to 13 | sort import statements. You can run black on all code with:: 14 | 15 | $ python -m black -l 80 . 16 | 17 | 3. Commits: Please try to use commit messages that give a meaningful history 18 | for anyone using git's log features. Try to use messages that complete sentence, 19 | "This commit will..." There is some excellent guidance on the subject 20 | from `Chris Beams `_ 21 | 4. Testing: the project uses the `unittest 22 | `_ library and has a nice 23 | testing suite that makes some things easy to write tests for. Please try 24 | to increase the test coverage on pull requests. 25 | 5. Merging pull-requests: We require two of the (currently three) core-team 26 | maintainers to merge. Opening a PR for early 27 | feedback or to check test coverage is OK, just indicate that the PR is not ready 28 | to merge (and update when it is). 29 | 30 | By submitting a pull request, you are agreeing that your work may be distributed 31 | under the terms of the project's `licence `_ and you will become one of the project's joint copyright holders. 32 | -------------------------------------------------------------------------------- /docs/how-to/contributing/index.rst: -------------------------------------------------------------------------------- 1 | Contribute 2 | ========== 3 | 4 | This section contains a variety of tutorials that should help you contribute to 5 | the library. 6 | 7 | Contents: 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | guidelines.rst 13 | setting_up_the_environment.rst 14 | strategy/index.rst 15 | library/index.rst 16 | running_tests.rst 17 | -------------------------------------------------------------------------------- /docs/how-to/contributing/library/index.rst: -------------------------------------------------------------------------------- 1 | Contributing to the library 2 | =========================== 3 | 4 | All contributions (docs, tests, etc) are very welcome, if there is a specific 5 | functionality that you would like to add then please open an issue (or indeed 6 | take a look at the ones already there and jump in the conversation!). 7 | 8 | If you want to work on documentation please keep in mind that doctests are 9 | encouraged to help keep the documentation up to date. 10 | -------------------------------------------------------------------------------- /docs/how-to/contributing/running_tests.rst: -------------------------------------------------------------------------------- 1 | Running tests 2 | ============= 3 | 4 | Basic test runners 5 | ------------------ 6 | 7 | The project has an extensive test suite which is run each time a new 8 | contribution is made to the repository. If you want to check that all the tests 9 | pass before you submit a pull request you can run the tests yourself:: 10 | 11 | $ python -m tox 12 | 13 | This will run the entire test suite in an isolated environment for all currently 14 | supported versions of Python. 15 | 16 | If you are developing new tests for the suite, it is useful to run a single test 17 | file so that you don't have to wait for the entire suite each time. For 18 | example, to run only the tests for the Grudger strategy:: 19 | 20 | $ python -m pytest axelrod/tests/strategies/test_grudger.py 21 | 22 | The test suite is divided into three categories: strategy tests, unit tests and integration tests. 23 | Each can be run individually:: 24 | 25 | $ python -m pytest axelrod/tests/strategies/ 26 | $ python -m pytest axelrod/tests/unit/ 27 | $ python -m pytest axelrod/tests/integration/ 28 | 29 | 30 | Testing coverage of tests 31 | ------------------------- 32 | 33 | The library has 100% test coverage. This can be tested using the Python 34 | :code:`coverage` package. Once installed (:code:`pip install coverage`), to run 35 | the tests and check the coverage for the entire library:: 36 | 37 | $ coverage run --source=axelrod -m pytest . 38 | 39 | You can then view a report of the coverage:: 40 | 41 | $ coverage report -m 42 | 43 | You can also run the coverage on a subset of the tests. For example, to run the 44 | tests with coverage for the Grudger strategy:: 45 | 46 | $ coverage run --source=axelrod -m pytest axelrod/tests/strategies/test_grudger.py 47 | 48 | 49 | Testing the documentation 50 | ------------------------- 51 | 52 | The documentation is doctested, to run those tests you can run 53 | the script:: 54 | 55 | $ python doctests.py 56 | 57 | You can also run the doctests on any given file. For example, to run the 58 | doctests for the :code:`docs/tutorials/getting_started/match.rst` file:: 59 | 60 | $ python -m doctest docs/tutorials/getting_started/match.rst 61 | -------------------------------------------------------------------------------- /docs/how-to/contributing/setting_up_the_environment.rst: -------------------------------------------------------------------------------- 1 | Setting up the environment 2 | ========================== 3 | 4 | Installing all dependencies 5 | --------------------------- 6 | 7 | All development dependencies can be installed by running:: 8 | 9 | $ pip install -r requirements/development.txt 10 | 11 | It is recommended to do this using a virtual environment tool of your choice. 12 | 13 | For example, when using the virtual environment library :code:`venv`:: 14 | 15 | $ python -m venv axelrod_development 16 | $ source axelrod_development/bin/activate 17 | $ pip install -r requirements/development.txt 18 | 19 | Alternatively, you can specify the development variant rather than the path:: 20 | 21 | $ python -m venv axelrod_development 22 | $ source axelrod_development/bin/activate 23 | $ pip install .[development] 24 | 25 | The git workflow 26 | ---------------- 27 | 28 | There are two important branches in this repository: 29 | 30 | - :code:`dev`: The most up to date branch with no failing tests. 31 | This is the default branch on github. 32 | - :code:`release`: The latest release. 33 | 34 | When working on a new contribution branch from the latest :code:`dev` branch and 35 | open a Pull Request on github from your branch to the :code:`dev` branch. 36 | 37 | The procedure for a new release (this is carried out by one of core maintainers): 38 | 39 | 1. Create a Pull Request from :code:`dev` to :code:`release` which should 40 | include an update to :code:`axelrod/version.py` and :code:`CHANGES.md` 41 | 2. Create a git tag. 42 | 3. Push to github. 43 | 4. Create a release on github. 44 | 5. Push to PyPi: :code:`python setup.py sdist bdist_wheel upload` 45 | -------------------------------------------------------------------------------- /docs/how-to/contributing/strategy/adding_the_new_strategy.rst: -------------------------------------------------------------------------------- 1 | Adding the new strategy 2 | ======================= 3 | 4 | To get the strategy to be recognised by the library we need to add it to the 5 | files that initialise when someone types :code:`import axelrod`. This is done 6 | in the :code:`axelrod/strategies/_strategies.py` file. 7 | 8 | To classify the new strategy, run rebuild_classifier_table:: 9 | 10 | python rebuild_classifier_table.py 11 | 12 | This will update :code:`axelrod/strategies/_strategies.py`. Check that the 13 | recorded classifications for the strategies are what you expected. 14 | 15 | If you have added your strategy to a file that already existed (perhaps you 16 | added a new variant of :code:`titfortat` to the :code:`titfortat.py` file), 17 | simply add your strategy to the list of strategies already imported from 18 | :code:`.py`:: 19 | 20 | from import 21 | 22 | If you have added your strategy to a new file then simply add a line similar to 23 | above with your new strategy. 24 | 25 | Once you have done that, you need to add the class itself to the 26 | :code:`all_strategies` list (in :code:`axelrod/strategies/_strategies.py`). 27 | You will also need to increment the doctest in 28 | :code:`axelrod/docs/index.rst`. 29 | 30 | Finally, if you have created a new module (a new :code:`` file) 31 | please add it to the :code:`docs/references/all_strategies.rst` file so that it 32 | will automatically be documented. 33 | -------------------------------------------------------------------------------- /docs/how-to/contributing/strategy/classifying_the_new_strategy.rst: -------------------------------------------------------------------------------- 1 | Classifying the new strategy 2 | ============================ 3 | 4 | Every strategy class has a classifier dictionary that gives some classification 5 | of the strategy according to certain dimensions. Some of the classifiers have 6 | formulas that try to compute the value for different strategies. Where these 7 | exist, they're overridden by the values defined in this dictionary. When 8 | creating a new strategy, you should try to fill out all of the dictionary. 9 | 10 | Let us take a look at the dimensions available by looking at :code:`TitForTat`:: 11 | 12 | >>> import axelrod 13 | >>> classifier = axelrod.TitForTat.classifier 14 | >>> for key in sorted(classifier.keys()): 15 | ... print(key) 16 | inspects_source 17 | long_run_time 18 | manipulates_source 19 | manipulates_state 20 | memory_depth 21 | stochastic 22 | 23 | You can read more about this in the :ref:`classification-of-strategies` section 24 | but here are some tips about filling this part in correctly. 25 | 26 | Note that when an instance of a class is created it gets it's own copy of the 27 | default classifier dictionary from the class. This might sometimes be modified by 28 | the initialisation depending on input parameters. A good example of this is the 29 | :code:`Joss` strategy:: 30 | 31 | >>> joss = axelrod.FirstByJoss() 32 | >>> boring_joss = axelrod.FirstByJoss(p=1) 33 | >>> axelrod.Classifiers["stochastic"](joss) 34 | True 35 | >>> axelrod.Classifiers["stochastic"](boring_joss) 36 | False 37 | 38 | A classifier value defined on the instance overrides the value defined for the 39 | class. 40 | 41 | There are currently three important dimensions that help identify if a strategy 42 | obeys axelrod's original tournament rules. 43 | 44 | 1. :code:`inspects_source` - does the strategy 'read' any source code that 45 | it would not normally have access to. An example of this is :code:`Geller`. 46 | 2. :code:`manipulates_source` - does the strategy 'write' any source code that 47 | it would not normally be able to. An example of this is :code:`Mind Bender`. 48 | 3. :code:`manipulates_state` - does the strategy 'change' any attributes that 49 | it would not normally be able to. An example of this is :code:`Mind Reader`. 50 | 51 | These dimensions are currently relevant to the `obey_axelrod` function which 52 | checks if a strategy obeys Axelrod's original rules. 53 | -------------------------------------------------------------------------------- /docs/how-to/contributing/strategy/docstrings.rst: -------------------------------------------------------------------------------- 1 | Writing docstrings 2 | ================== 3 | 4 | The project takes pride in its documentation for the strategies 5 | and its corresponding bibliography. The docstring is a string 6 | which describes a method, module or class. The docstrings help 7 | the user in understanding the working of the strategy 8 | and the source of the strategy. The docstring must be written in 9 | the following way, i.e.:: 10 | 11 | """This is a docstring. 12 | 13 | It can be written over multiple lines. 14 | 15 | """ 16 | 17 | Sections 18 | -------- 19 | 20 | The Sections of the docstring are: 21 | 22 | 1. **Working of the strategy** 23 | 24 | A brief summary on how the strategy works, E.g.:: 25 | 26 | class TitForTat(Player): 27 | """ 28 | A player starts by cooperating and then mimics the 29 | previous action of the opponent. 30 | """ 31 | 32 | 2. **Bibliography/Source of the strategy** 33 | 34 | A section to mention the source of the strategy 35 | or the paper from which the strategy was taken. 36 | The section must start with the Names section. 37 | For E.g.:: 38 | 39 | class TitForTat(Player): 40 | """ 41 | A player starts by cooperating and then mimics the 42 | previous action of the opponent. 43 | 44 | Names: 45 | 46 | - Rapoport's strategy: [Axelrod1980]_ 47 | - TitForTat: [Axelrod1980]_ 48 | """ 49 | 50 | Here, the info written under the Names section 51 | tells about the source of the TitforTat strategy. 52 | :code:`[Axelrod1980]_` corresponds to the bibliographic item in 53 | :code:`docs/reference/bibliography.rst`. If you are using a source 54 | that is not in the bibliography please add it. 55 | -------------------------------------------------------------------------------- /docs/how-to/contributing/strategy/index.rst: -------------------------------------------------------------------------------- 1 | Contributing a strategy 2 | ======================= 3 | 4 | This section contains a variety of tutorials that should help you contribute a 5 | new strategy to the library. 6 | 7 | Contents: 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | instructions.rst 13 | writing_the_new_strategy.rst 14 | docstrings.rst 15 | adding_the_new_strategy.rst 16 | classifying_the_new_strategy.rst 17 | writing_test_for_the_new_strategy.rst 18 | -------------------------------------------------------------------------------- /docs/how-to/contributing/strategy/instructions.rst: -------------------------------------------------------------------------------- 1 | Instructions 2 | ============ 3 | 4 | Here is the file structure for the Axelrod repository:: 5 | 6 | . 7 | ├── axelrod 8 | │ └── __init__.py 9 | │ └── ecosystem.py 10 | │ └── game.py 11 | │ └── player.py 12 | │ └── plot.py 13 | │ └── result_set.py 14 | │ └── round_robin.py 15 | │ └── tournament.py 16 | │ └── /strategies/ 17 | │ └── __init__.py 18 | │ └── _strategies.py 19 | │ └── cooperator.py 20 | │ └── defector.py 21 | │ └── grudger.py 22 | │ └── titfortat.py 23 | │ └── gobymajority.py 24 | │ └── ... 25 | │ └── /tests/ 26 | │ └── integration 27 | │ └── strategies 28 | │ └── unit 29 | │ └── test_*.py 30 | └── README.md 31 | 32 | To contribute a strategy you need to follow as many of the following steps as possible: 33 | 34 | 1. Fork the `github repository `_. 35 | 2. Add a :code:`.py` file to the strategies directory or add a 36 | strategy to a pre existing :code:`.py` file. 37 | 3. Update the :code:`./axelrod/strategies/_strategies.py` file. 38 | 4. If you created a new :code:`.py` file add it to 39 | :code:`.docs/reference/all_strategies.rst`. 40 | 5. Write some unit tests in the :code:`./axelrod/tests/strategies/` directory. 41 | 6. This one is also optional: ping us a message and we'll add you to the 42 | Contributors team. This would add an Axelrod-Python organisation badge to 43 | your profile. 44 | 7. Send us a pull request. 45 | 46 | **If you would like a hand with any of the above please do get in touch: we're 47 | always delighted to have new strategies.** 48 | -------------------------------------------------------------------------------- /docs/how-to/create_approximate_moran_processes.rst: -------------------------------------------------------------------------------- 1 | .. _approximate-moran-process: 2 | 3 | Create Approximate Moran Process 4 | ================================ 5 | 6 | Due to the high computational cost of a single Moran process, an approximate 7 | Moran process is implemented that can make use of cached outcomes of games. The 8 | following code snippet will generate a Moran process in which the outcomes of 9 | the matches played by a :code:`Random: 0.5` are sampled from one possible 10 | outcome against each opponent (:code:`Defector` and :code:`Random: 0.5`). First 11 | the cache is built by passing counter objects of outcomes:: 12 | 13 | >>> import axelrod as axl 14 | >>> from collections import Counter 15 | >>> cached_outcomes = {} 16 | >>> cached_outcomes[("Random: 0.5", "Defector")] = axl.Pdf(Counter([(1, 1)])) 17 | >>> cached_outcomes[("Random: 0.5", "Random: 0.5")] = axl.Pdf(Counter([(3, 3)])) 18 | >>> cached_outcomes[("Defector", "Defector")] = axl.Pdf(Counter([(1, 1)])) 19 | 20 | Now let us create an Approximate Moran Process:: 21 | 22 | >>> players = [axl.Defector(), axl.Random(), axl.Random()] 23 | >>> amp = axl.ApproximateMoranProcess(players, cached_outcomes, seed=5) 24 | >>> results = amp.play() 25 | >>> amp.population_distribution() 26 | Counter({'Random: 0.5': 3}) 27 | 28 | Note that by nature of being an approximation, it's possible that the results of an 29 | `ApproximateMoranProcess` may not always match the results of a standard `MoranProcess`, 30 | even for the same random seed. We see that, for this random seed, the :code:`Random: 0.5` 31 | won this Moran process. This is not what happens in a standard Moran process where the 32 | :code:`Random: 0.5` player will not win:: 33 | 34 | >>> mp = axl.MoranProcess(players, seed=5) 35 | >>> results = mp.play() 36 | >>> mp.population_distribution() 37 | Counter({'Defector': 3}) 38 | -------------------------------------------------------------------------------- /docs/how-to/create_moran_processes_on_graphs.rst: -------------------------------------------------------------------------------- 1 | .. _moran-process-on-graphs: 2 | 3 | Create Moran Processes On Graphs 4 | ================================ 5 | 6 | The library also provides a graph-based Moran process [Shakarian2013]_ with 7 | :code:`MoranProcess`. To use this feature you must supply at least one 8 | :code:`Axelrod.graph.Graph` object, which can be initialized with just a list of 9 | edges:: 10 | 11 | edges = [(source_1, target1), (source2, target2), ...] 12 | 13 | The nodes can be any hashable object (integers, strings, etc.). For example:: 14 | 15 | >>> import axelrod as axl 16 | >>> from axelrod.graph import Graph 17 | >>> edges = [(0, 1), (1, 2), (2, 3), (3, 1)] 18 | >>> graph = Graph(edges) 19 | 20 | Graphs are undirected by default but you can pass :code:`directed=True` to 21 | create a directed graph. Various intermediates such as the list of neighbors 22 | are cached for efficiency by the graph object. 23 | 24 | A Moran process can be invoked with one or two graphs. The first graph, the 25 | *interaction graph*, dictates how players are matched up in the scoring phase. 26 | Each player plays a match with each neighbor. The second graph dictates how 27 | players replace another during reproduction. When an individual is selected to 28 | reproduce, it replaces one of its neighbors in the *reproduction graph*. If only 29 | one graph is supplied to the process, the two graphs are assumed to be the same. 30 | 31 | To create a graph-based Moran process, use a graph as follows:: 32 | 33 | >>> from axelrod.graph import Graph 34 | >>> edges = [(0, 1), (1, 2), (2, 3), (3, 1)] 35 | >>> graph = Graph(edges) 36 | >>> players = [axl.Cooperator(), axl.Cooperator(), axl.Cooperator(), axl.Defector()] 37 | >>> mp = axl.MoranProcess(players, interaction_graph=graph, seed=40) 38 | >>> results = mp.play() 39 | >>> mp.population_distribution() 40 | Counter({'Defector': 4}) 41 | 42 | You can supply the :code:`reproduction_graph` as a keyword argument. The 43 | standard Moran process is equivalent to using a complete graph with no loops 44 | for the :code:`interaction_graph` and with loops for the 45 | :code:`reproduction_graph`. 46 | -------------------------------------------------------------------------------- /docs/how-to/create_spatial_tournaments.rst: -------------------------------------------------------------------------------- 1 | Create spatial tournaments 2 | ========================== 3 | 4 | A spatial tournament is defined on a graph where the nodes correspond to players 5 | and edges define whether or not a given player pair will have a match. 6 | 7 | The initial work on spatial tournaments was done by Nowak and May in a 1992 8 | paper: [Nowak1992]_. 9 | 10 | Additionally, Szabó and Fáth in their 2007 paper [Szabo2007]_ consider a variety 11 | of graphs, such as lattices, small world, scale-free graphs and evolving 12 | networks. 13 | 14 | Let's create a tournament where :code:`Cooperator` and :code:`Defector` do not 15 | play each other and neither do :code:`TitForTat` and :code:`Grudger` : 16 | 17 | .. image:: _static/spatial_tournaments/spatial.png 18 | :width: 80% 19 | :align: center 20 | 21 | Note that the edges have to be given as a list of tuples of player 22 | indices:: 23 | 24 | >>> import axelrod as axl 25 | >>> players = [axl.Cooperator(), axl.Defector(), 26 | ... axl.TitForTat(), axl.Grudger()] 27 | >>> edges = [(0, 2), (0, 3), (1, 2), (1, 3)] 28 | 29 | To create a spatial tournament you pass the :code:`edges` to the 30 | :code:`Tournament` class:: 31 | 32 | >>> spatial_tournament = axl.Tournament(players, edges=edges) 33 | >>> results = spatial_tournament.play() 34 | 35 | We can plot the results:: 36 | 37 | >>> plot = axl.Plot(results) 38 | >>> p = plot.boxplot() 39 | >>> p.show() 40 | 41 | .. image:: _static/spatial_tournaments/spatial_results.png 42 | :width: 50% 43 | :align: center 44 | 45 | We can, like any other tournament, obtain the ranks for our players:: 46 | 47 | >>> results.ranked_names 48 | ['Cooperator', 'Tit For Tat', 'Grudger', 'Defector'] 49 | 50 | Let's run a small tournament of 2 :code:`turns` and 2 :code:`repetitions` 51 | and obtain the interactions:: 52 | 53 | >>> spatial_tournament = axl.Tournament(players ,turns=2, repetitions=2, edges=edges) 54 | >>> results = spatial_tournament.play() 55 | >>> results.payoffs 56 | [[[], [], [3.0, 3.0], [3.0, 3.0]], [[], [], [3.0, 3.0], [3.0, 3.0]], [[3.0, 3.0], [0.5, 0.5], [], []], [[3.0, 3.0], [0.5, 0.5], [], []]] 57 | 58 | As anticipated not all players interact with each other. 59 | 60 | It is also possible to create a probabilistic ending spatial tournament:: 61 | 62 | >>> prob_end_spatial_tournament = axl.Tournament(players, edges=edges, prob_end=.1, repetitions=1, seed=10) 63 | >>> prob_end_results = prob_end_spatial_tournament.play() 64 | 65 | We see that the match lengths are no longer all equal:: 66 | 67 | >>> prob_end_results.match_lengths 68 | [[[0, 0, 20.0, 1.0], [0, 0, 46.0, 13.0], [20.0, 46.0, 0, 0], [1.0, 13.0, 0, 0]]] 69 | -------------------------------------------------------------------------------- /docs/how-to/evolve_players.rst: -------------------------------------------------------------------------------- 1 | .. _evolvable_players: 2 | 3 | Evolve Players 4 | ============== 5 | 6 | Several strategies in the library derive from :code:`EvolvablePlayer` which specifies methods 7 | allowing evolutionary or particle swarm algorithms to be used with these strategies. The 8 | `Axelrod Dojo library `_ [Axelrod1980]_ 9 | contains implementations of both algorithms for use with the Axelrod library. Examples include 10 | FSMPlayers, ANN (neural networks), and LookerUp and Gambler (lookup tables). 11 | 12 | New :code:`EvolvablePlayer` subclasses can be added to the library. Any strategy that can 13 | define :code:`mutation` and :code:`crossover` methods can be used with the evolutionary algorithm 14 | and the atomic mutation version of the Moran process. To use the particle swarm algorithms, methods 15 | to serialize the strategy to and from a vector of floats must be defined. 16 | 17 | Moran Process: Atomic Mutation for Evolvable Players 18 | ---------------------------------------------------- 19 | 20 | Additionally, the Moran process implementation supports a second style of mutation suitable for 21 | evolving new strategies utilizing the :code:`EvolvablePlayer` class via its :code:`mutate` method. 22 | This is in contrast to the transitional mutation that selects one of the other player types rather than (possibly) 23 | generating a new player variant. To use this mutation style set `mutation_method=atomic` in the initialisation 24 | of the Moran process:: 25 | 26 | >>> import axelrod as axl 27 | >>> C = axl.Action.C 28 | >>> players = [axl.EvolvableFSMPlayer(num_states=2, initial_state=1, initial_action=C) for _ in range(5)] 29 | >>> mp = axl.MoranProcess(players, turns=10, mutation_method="atomic", seed=1) 30 | >>> population = mp.play() # doctest: +SKIP 31 | 32 | Note that this may cause the Moran process to fail to converge, if the mutation rates are very high or the 33 | population size very large. See :ref:`moran-process` for more information. 34 | 35 | Reproducible Seeding 36 | -------------------- 37 | 38 | :code:`EvolvablePlayers` are inherently stochastic. For reproducibility of results, they can be seeded. When 39 | using the Moran process, a process level seed is sufficient. Child seeds will be created and propagated 40 | in a reproducible way. If initialized without a seed, an :code:`EvolvablePlayer` will be given a 41 | random seed in a non-reproducible way. 42 | -------------------------------------------------------------------------------- /docs/how-to/include_noise.rst: -------------------------------------------------------------------------------- 1 | Include noise 2 | ============= 3 | 4 | A common variation on iterated prisoner’s dilemma tournaments is to add 5 | stochasticity in the choice of actions, simply called noise. This noise is 6 | introduced by flipping plays between C and D with some probability that is 7 | applied to all plays after they are delivered by the player [Bendor1993]_. 8 | 9 | The presence of this persistent background noise causes some strategies to 10 | behave substantially differently. For example, :code:`TitForTat` can fall into 11 | defection loops with itself when there is noise. While :code:`TitForTat` would 12 | usually cooperate well with itself:: 13 | 14 | C C C C C ... 15 | C C C C C ... 16 | 17 | Noise can cause a C to flip to a D (or vice versa), disrupting the cooperative 18 | chain:: 19 | 20 | C C C D C D C D D D ... 21 | C C C C D C D D D D ... 22 | 23 | To create a noisy tournament you simply need to add the `noise` argument:: 24 | 25 | >>> import axelrod as axl 26 | >>> players = [axl.Cooperator(), axl.Defector(), 27 | ... axl.TitForTat(), axl.Grudger()] 28 | >>> noise = 0.1 29 | >>> tournament = axl.Tournament(players, noise=noise) 30 | >>> results = tournament.play() 31 | >>> plot = axl.Plot(results) 32 | >>> p = plot.boxplot() 33 | >>> p.show() 34 | 35 | .. image:: _static/noisy_tournaments/demo_strategies_noisy_boxplot.svg 36 | :width: 50% 37 | :align: center 38 | 39 | Here is how the distribution of wins now looks:: 40 | 41 | >>> p = plot.winplot() 42 | >>> p.show() 43 | 44 | .. image:: _static/noisy_tournaments/demo_strategies_noisy_winplot.svg 45 | :width: 50% 46 | :align: center 47 | -------------------------------------------------------------------------------- /docs/how-to/include_probabilistic_endings.rst: -------------------------------------------------------------------------------- 1 | Include probabilistic endings 2 | ============================= 3 | 4 | It is possible to create a tournament where the length of each Match is not 5 | constant for all encounters: after each turn the Match ends with a given 6 | probability, [Axelrod1980b]_:: 7 | 8 | >>> import axelrod as axl 9 | >>> players = [axl.Cooperator(), axl.Defector(), 10 | ... axl.TitForTat(), axl.Grudger()] 11 | >>> tournament = axl.Tournament(players, prob_end=0.5) 12 | 13 | 14 | We can view the results in a similar way as described in 15 | :ref:`tournament-results`:: 16 | 17 | >>> results = tournament.play() 18 | >>> m = results.payoff_matrix 19 | >>> for row in m: # doctest: +SKIP 20 | ... print([round(ele, 1) for ele in row]) # Rounding output # doctest: +SKIP 21 | 22 | [3.0, 0.0, 3.0, 3.0] 23 | [5.0, 1.0, 3.7, 3.6] 24 | [3.0, 0.3, 3.0, 3.0] 25 | [3.0, 0.4, 3.0, 3.0] 26 | 27 | 28 | We see that :code:`Cooperator` always scores 0 against :code:`Defector` but 29 | other scores seem variable as they are effected by the length of each match. 30 | 31 | We can (as before) obtain the ranks for our players:: 32 | 33 | >>> results.ranked_names # doctest: +SKIP 34 | ['Defector', 'Tit For Tat', 'Grudger', 'Cooperator'] 35 | 36 | We can plot the results:: 37 | 38 | >>> plot = axl.Plot(results) 39 | >>> p = plot.boxplot() 40 | >>> p.show() 41 | 42 | .. image:: _static/prob_end_tournaments/prob_end_boxplot.svg 43 | :width: 50% 44 | :align: center 45 | 46 | We can also view the length of the matches played by each player. The plot shows 47 | that the length of each match (for each player) is not the same. The median 48 | length is 4 which is the expected value with the probability of a match ending 49 | being :code:`0.5`. 50 | 51 | >>> p = plot.lengthplot() 52 | >>> p.show() 53 | 54 | .. image:: _static/prob_end_tournaments/prob_end_lengthplot.svg 55 | :width: 50% 56 | :align: center 57 | -------------------------------------------------------------------------------- /docs/how-to/index.rst: -------------------------------------------------------------------------------- 1 | How to 2 | ====== 3 | 4 | This section contains short descriptions of how to accomplish specific tasks 5 | with the Axelrod library. 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | include_noise.rst 11 | include_probabilistic_endings.rst 12 | create_spatial_tournaments.rst 13 | create_moran_processes_on_graphs.rst 14 | create_approximate_moran_processes.rst 15 | calculate_morality_metrics.rst 16 | run_axelrods_ecological_variant.rst 17 | fingerprint.rst 18 | evolve_players.rst 19 | access_collections_of_strategies.rst 20 | classify_strategies.rst 21 | use_strategy_transformers.rst 22 | access_tournament_results.rst 23 | read_and_write_interactions.rst 24 | use_parallel_processing.rst 25 | use_a_cache.rst 26 | use_different_stage_games.rst 27 | use_custom_matches.rst 28 | set_a_seed.rst 29 | set_player_information.rst 30 | check_player_equality.rst 31 | cite_the_library.rst 32 | contributing/index.rst 33 | -------------------------------------------------------------------------------- /docs/how-to/run_axelrods_ecological_variant.rst: -------------------------------------------------------------------------------- 1 | .. _ecological-variant: 2 | 3 | Run Axelrod's Ecological Variant 4 | ================================ 5 | 6 | In Axelrod's original work an ecological approach based on the payoff matrix of 7 | the tournament was used to study the evolutionary stability of each strategy. 8 | Whilst this bears some comparison to the :ref:`moran-process`, the latter is 9 | much more widely used in the literature. 10 | 11 | To study the evolutionary stability of each strategy it is possible to create an 12 | ecosystem based on the payoff matrix of a tournament:: 13 | 14 | >>> import axelrod as axl 15 | >>> players = [axl.Cooperator(), axl.Defector(), 16 | ... axl.TitForTat(), axl.Grudger(), 17 | ... axl.Random()] 18 | >>> tournament = axl.Tournament(players) 19 | >>> results = tournament.play() 20 | >>> eco = axl.Ecosystem(results) 21 | >>> eco.reproduce(100) # Evolve the population over 100 time steps 22 | 23 | Here is how we obtain a nice stackplot of the system evolving over time:: 24 | 25 | >>> plot = axl.Plot(results) 26 | >>> p = plot.stackplot(eco) 27 | >>> p.show() 28 | 29 | .. image:: _static/ecological_variant/demo_strategies_stackplot.svg 30 | :width: 50% 31 | :align: center 32 | -------------------------------------------------------------------------------- /docs/how-to/set_player_information.rst: -------------------------------------------------------------------------------- 1 | .. _player_information: 2 | 3 | Set Player information 4 | ====================== 5 | 6 | It is possible to determine what information players know about their matches. 7 | By default all known information is given. For example let us create a match 8 | with 5 turns between :code:`FirstBySteinAndRapoport` and :code:`Alternator`. The 9 | latter of these two always defects on the last 2 turns:: 10 | 11 | >>> import axelrod as axl 12 | >>> players = (axl.Alternator(), axl.FirstBySteinAndRapoport()) 13 | >>> axl.Match(players, turns=5).play() 14 | [(C, C), (D, C), (C, C), (D, D), (C, D)] 15 | 16 | We can play the same match but let us tell the players that the match lasts 6 17 | turns:: 18 | 19 | >>> axl.Match(players, turns=5, match_attributes={"length": 6}).play() 20 | [(C, C), (D, C), (C, C), (D, C), (C, D)] 21 | 22 | We can also pass this information to a tournament. Let us create a 23 | tournament with 5 turns but ensure the players believe the match length is 24 | infinite (unknown):: 25 | 26 | >>> tournament = axl.Tournament(players, turns=5, 27 | ... match_attributes={"length": float('inf')}) 28 | 29 | The :code:`match_attributes` dictionary can also be used to pass :code:`game` 30 | and :code:`noise`. 31 | -------------------------------------------------------------------------------- /docs/how-to/use_custom_matches.rst: -------------------------------------------------------------------------------- 1 | .. _use-custom-matches: 2 | 3 | Use custom matches 4 | ================== 5 | 6 | The Moran process supports custom match classes. Below 7 | creates a new class of a match where both players end with a score of 2:: 8 | 9 | 10 | >>> import axelrod as axl 11 | >>> class MassBaseMatch(axl.Match): 12 | ... """Axelrod Match object with a modified final score function to enable mass to influence the final score as a multiplier""" 13 | ... def final_score_per_turn(self): 14 | ... return 2, 2 15 | 16 | We then create a Moran process with the custom match class by passing our custom 17 | :code:`MassBaseMatch` to the Moran process with the :code:`match_class` keyword 18 | argument:: 19 | 20 | >>> players = [axl.Cooperator(), axl.Defector(), axl.TitForTat(), axl.Grudger()] 21 | >>> mp = axl.MoranProcess(players=players, match_class=MassBaseMatch, seed=0) 22 | >>> population = mp.play() 23 | >>> print(mp.winning_strategy_name) 24 | Defector 25 | -------------------------------------------------------------------------------- /docs/how-to/use_parallel_processing.rst: -------------------------------------------------------------------------------- 1 | Use Parallel processing 2 | ======================= 3 | 4 | When dealing with large tournaments on a multi core machine it is possible to 5 | run the tournament in parallel **although this is not currently supported on 6 | Windows**. Using :code:`processes=0` will simply use all available cores:: 7 | 8 | >>> import axelrod as axl 9 | >>> players = [s() for s in axl.basic_strategies] 10 | >>> tournament = axl.Tournament(players, turns=4, repetitions=2) 11 | >>> results = tournament.play(processes=0) 12 | -------------------------------------------------------------------------------- /docs/reference/index.rst: -------------------------------------------------------------------------------- 1 | Reference 2 | ========= 3 | 4 | This section is the reference guide for the various components of the library. 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | 9 | bibliography.rst 10 | strategy_index.rst 11 | glossary.rst 12 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | docutils>=0.18.1 2 | numpy==1.24.3 # numpy isn't mocked due to complex use in doctests 3 | mock>=5.1.0 4 | -------------------------------------------------------------------------------- /docs/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | .. _tutorials: 2 | 3 | Tutorials 4 | ========= 5 | 6 | This section contains a variety of tutorials related to the Axelrod library. 7 | 8 | Contents: 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | 13 | new_to_game_theory_and_or_python/index.rst 14 | running_axelrods_first_tournament/index.rst 15 | creating_heterogenous_player_moran_process/index.rst 16 | implement_new_games/index.rst 17 | -------------------------------------------------------------------------------- /docs/tutorials/new_to_game_theory_and_or_python/_static/visualising_results/title_labels_payoff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Axelrod-Python/Axelrod/8fa56d229a1790787e88ba2b89a9fea276dea3d9/docs/tutorials/new_to_game_theory_and_or_python/_static/visualising_results/title_labels_payoff.png -------------------------------------------------------------------------------- /docs/tutorials/new_to_game_theory_and_or_python/index.rst: -------------------------------------------------------------------------------- 1 | New to Game Theory and/or Python 2 | ================================ 3 | 4 | This section contains a variety of tutorials that should help get you started 5 | with the Axelrod library. 6 | 7 | Contents: 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | installation.rst 13 | match.rst 14 | tournament.rst 15 | summarising_tournaments.rst 16 | visualising_results.rst 17 | moran.rst 18 | -------------------------------------------------------------------------------- /docs/tutorials/new_to_game_theory_and_or_python/installation.rst: -------------------------------------------------------------------------------- 1 | .. _getting-started: 2 | 3 | Installation 4 | ============ 5 | 6 | The library requires Python 3.5. 7 | 8 | The simplest way to install the package is to obtain it from the PyPi 9 | repository:: 10 | 11 | $ pip install axelrod 12 | 13 | If you want to have access to the manual Human strategy for interactive play, use the following command to also install `prompt_toolkit`:: 14 | 15 | $ pip install axelrod[Human] 16 | 17 | You can also build it from source if you would like to:: 18 | 19 | $ git clone https://github.com/Axelrod-Python/Axelrod.git 20 | $ cd Axelrod 21 | $ python setup.py install 22 | -------------------------------------------------------------------------------- /docs/tutorials/new_to_game_theory_and_or_python/summarising_tournaments.rst: -------------------------------------------------------------------------------- 1 | .. _tournament-results-summary: 2 | 3 | Summarising tournament results 4 | ============================== 5 | 6 | 7 | As shown in :ref:`creating_tournaments` let us create a tournament:: 8 | 9 | >>> import axelrod as axl 10 | >>> players = [axl.Cooperator(), axl.Defector(), 11 | ... axl.TitForTat(), axl.Grudger()] 12 | >>> tournament = axl.Tournament(players, turns=10, repetitions=3) 13 | >>> results = tournament.play() 14 | 15 | The results set can return a list of named tuples, ordered by strategy rank 16 | that summarises the results of the tournament:: 17 | 18 | >>> summary = results.summarise() 19 | >>> import pprint 20 | >>> pprint.pprint(summary) 21 | [Player(Rank=0, Name='Defector', Median_score=2.6..., Cooperation_rating=0.0, Wins=3.0, Initial_C_rate=0.0, Original_index=1, CC_rate=...), 22 | Player(Rank=1, Name='Tit For Tat', Median_score=2.3..., Cooperation_rating=0..., Wins=0.0, Initial_C_rate=1.0, Original_index=2, CC_rate=...), 23 | Player(Rank=2, Name='Grudger', Median_score=2.3..., Cooperation_rating=0..., Wins=0.0, Initial_C_rate=1.0, Original_index=3, CC_rate=...), 24 | Player(Rank=3, Name='Cooperator', Median_score=2.0..., Cooperation_rating=1.0, Wins=0.0, Initial_C_rate=1.0, Original_index=0, CC_rate=...)] 25 | 26 | It is also possible to write this data directly to a csv file using the 27 | `write_summary` method:: 28 | 29 | >>> results.write_summary('summary.csv') 30 | >>> import csv 31 | >>> with open('summary.csv', 'r') as outfile: 32 | ... csvreader = csv.reader(outfile) 33 | ... for row in csvreader: 34 | ... print(row) 35 | ['Rank', 'Name', 'Median_score', 'Cooperation_rating', 'Wins', 'Initial_C_rate', 'Original_index', 'CC_rate', 'CD_rate', 'DC_rate', 'DD_rate', 'CC_to_C_rate', 'CD_to_C_rate', 'DC_to_C_rate', 'DD_to_C_rate'] 36 | ['0', 'Defector', ...] 37 | ['1', 'Tit For Tat', ...] 38 | ['2', 'Grudger', ...] 39 | ['3', 'Cooperator', ...] 40 | 41 | 42 | The result set class computes a large number of detailed outcomes read about 43 | those in :ref:`tournament-results`. 44 | -------------------------------------------------------------------------------- /docs/tutorials/new_to_game_theory_and_or_python/tournament.rst: -------------------------------------------------------------------------------- 1 | .. _creating_tournaments: 2 | 3 | Creating and running a simple tournament 4 | ======================================== 5 | 6 | The following lines of code creates a list players playing simple 7 | strategies:: 8 | 9 | >>> import axelrod as axl 10 | >>> players = [axl.Cooperator(), axl.Defector(), 11 | ... axl.TitForTat(), axl.Grudger()] 12 | >>> players 13 | [Cooperator, Defector, Tit For Tat, Grudger] 14 | 15 | We can now create a tournament, play it, save the results and view the rank of 16 | each player:: 17 | 18 | >>> tournament = axl.Tournament(players) 19 | >>> results = tournament.play() 20 | >>> results.ranked_names 21 | ['Defector', 'Tit For Tat', 'Grudger', 'Cooperator'] 22 | 23 | We can also plot these results:: 24 | 25 | >>> plot = axl.Plot(results) 26 | >>> p = plot.boxplot() 27 | >>> p.show() 28 | 29 | .. image:: _static/getting_started/demo_deterministic_strategies_boxplot.svg 30 | :width: 50% 31 | :align: center 32 | 33 | Note that in this case none of our strategies are stochastic so the boxplot 34 | shows that there is no variation. Take a look at the :ref:`visualising-results` 35 | section to see plots showing a stochastic effect. 36 | -------------------------------------------------------------------------------- /docs/tutorials/new_to_game_theory_and_or_python/visualising_results.rst: -------------------------------------------------------------------------------- 1 | .. _visualising-results: 2 | 3 | Visualising results 4 | =================== 5 | 6 | This tutorial will show you briefly how to visualise some basic results 7 | 8 | Visualising the results of the tournament 9 | ----------------------------------------- 10 | 11 | As shown in :ref:`creating_tournaments`, let us create a tournament, but this 12 | time we will include a player that acts randomly:: 13 | 14 | >>> import axelrod as axl 15 | >>> players = [axl.Cooperator(), axl.Defector(), 16 | ... axl.TitForTat(), axl.Grudger()] 17 | >>> players.append(axl.Random()) 18 | >>> tournament = axl.Tournament(players) 19 | >>> results = tournament.play() 20 | 21 | We can view these results (which helps visualise the stochastic effects):: 22 | 23 | >>> plot = axl.Plot(results) 24 | >>> p = plot.boxplot() 25 | >>> p.show() 26 | 27 | .. image:: _static/visualising_results/demo_strategies_boxplot.svg 28 | :width: 50% 29 | :align: center 30 | 31 | Visualising the distributions of wins 32 | ------------------------------------- 33 | 34 | We can view the distributions of wins for each strategy:: 35 | 36 | >>> p = plot.winplot() 37 | >>> p.show() 38 | 39 | .. image:: _static/visualising_results/demo_strategies_winplot.svg 40 | :width: 50% 41 | :align: center 42 | 43 | Visualising the payoff matrix 44 | ----------------------------- 45 | 46 | We can also easily view the payoff matrix described in 47 | :ref:`tournament-results`, this becomes particularly useful when viewing the 48 | outputs of tournaments with a large number of strategies:: 49 | 50 | >>> p = plot.payoff() 51 | >>> p.show() 52 | 53 | .. image:: _static/visualising_results/demo_strategies_payoff.svg 54 | :width: 50% 55 | :align: center 56 | 57 | Saving all plots 58 | ---------------- 59 | 60 | The :code:`axelrod.Plot` class has a method: :code:`save_all_plots` that will 61 | save all the above plots to file. 62 | 63 | Passing various objects to plot 64 | ------------------------------- 65 | 66 | The library give access to underlying matplotlib axes objects of each plot, thus 67 | the user can easily modify various aspects of a plot:: 68 | 69 | >>> import matplotlib.pyplot as plt 70 | >>> _, ax = plt.subplots() 71 | >>> title = ax.set_title('Payoff') 72 | >>> xlabel = ax.set_xlabel('Strategies') 73 | >>> p = plot.boxplot(ax=ax) 74 | >>> p.show() 75 | 76 | .. image:: _static/visualising_results/title_labels_payoff.png 77 | :width: 50% 78 | :align: center 79 | -------------------------------------------------------------------------------- /docs/tutorials/running_axelrods_first_tournament/_static/running_axelrods_first_tournament/main.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to obtain plots for the running axelrod tournament tutorial. 3 | """ 4 | 5 | import axelrod as axl 6 | import matplotlib.pyplot as plt 7 | 8 | first_tournament_participants_ordered_by_reported_rank = [ 9 | s() for s in axl.axelrod_first_strategies 10 | ] 11 | number_of_strategies = len( 12 | first_tournament_participants_ordered_by_reported_rank 13 | ) 14 | tournament = axl.Tournament( 15 | players=first_tournament_participants_ordered_by_reported_rank, 16 | turns=200, 17 | repetitions=5, 18 | seed=0, 19 | ) 20 | results = tournament.play() 21 | 22 | plt.figure(figsize=(15, 6)) 23 | plt.plot((0, 15), (0, 15), color="grey", linestyle="--") 24 | for original_rank, strategy in enumerate( 25 | first_tournament_participants_ordered_by_reported_rank 26 | ): 27 | rank = results.ranked_names.index(str(strategy)) 28 | if rank == original_rank: 29 | symbol = "+" 30 | plt.plot((rank, rank), (rank, 0), color="grey") 31 | else: 32 | symbol = "o" 33 | plt.scatter([rank], [original_rank], marker=symbol, color="black", s=50) 34 | plt.xticks(range(number_of_strategies), results.ranked_names, rotation=90) 35 | plt.ylabel("Reported rank") 36 | plt.xlabel("Reproduced rank") 37 | plt.savefig("rank_comparison.svg") 38 | 39 | plot = axl.Plot(results) 40 | p = plot.boxplot() 41 | p.savefig("boxplot.svg") 42 | -------------------------------------------------------------------------------- /doctests.py: -------------------------------------------------------------------------------- 1 | import doctest 2 | import os 3 | import unittest 4 | import warnings 5 | 6 | 7 | # Note loader and ignore are required arguments for unittest even if unused. 8 | def load_tests(loader, tests, ignore): 9 | """ 10 | Locates and returns a collection of unittests in a TestSuite object 11 | Parameters 12 | ---------- 13 | loader : 14 | A required but unused parameter. 15 | tests : 16 | A unittest TestSuite object for collecting the needed test cases. 17 | ignore : 18 | A required but unused parameter. 19 | Returns 20 | ------- 21 | tests : 22 | A unittest TestSuite object that holds test cases. 23 | """ 24 | for root, dirs, files in os.walk("."): 25 | for f in files: 26 | if f.endswith(".rst"): 27 | tests.addTests( 28 | doctest.DocFileSuite( 29 | # ELLIPSIS option tells doctest to ignore portions of the verification value. 30 | os.path.join(root, f), 31 | optionflags=doctest.ELLIPSIS, 32 | ) 33 | ) 34 | 35 | return tests 36 | 37 | 38 | if __name__ == "__main__": 39 | warnings.simplefilter("ignore") 40 | unittest.main() 41 | -------------------------------------------------------------------------------- /matplotlibrc: -------------------------------------------------------------------------------- 1 | # This is to ensure the matplotlib backend is set appropriately for CI 2 | # See https://github.com/Axelrod-Python/Axelrod/issues/1187 3 | backend : Agg 4 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "Axelrod" 7 | dynamic = ["version"] 8 | description = "Reproduce the Axelrod iterated prisoners dilemma tournament" 9 | readme = "README.rst" 10 | license = {file = "LICENSE.txt"} 11 | requires-python = ">=3.6" 12 | authors = [ 13 | { name = "Vince Knight", email = "axelrod-python@googlegroups.com" }, 14 | { name = "Owen Campbell" }, 15 | { name = "Karol Langner" }, 16 | { name = "Marc Harper" }, 17 | ] 18 | classifiers = [ 19 | "Programming Language :: Python :: 3 :: Only", 20 | "Programming Language :: Python :: 3.11", 21 | "Programming Language :: Python :: 3.12", 22 | "License :: OSI Approved :: MIT License", 23 | ] 24 | dependencies = [ 25 | "cloudpickle>=0.2.2", 26 | "dask[dataframe]>=2.9.2", 27 | "fsspec>=0.6.0", 28 | "matplotlib>=3.0.3", 29 | "numpy>=1.26.4", 30 | "pandas>=1.0.0", 31 | "pyyaml>=5.1", 32 | "scipy>=1.3.3", 33 | "toolz>=0.8.2", 34 | "tqdm>=4.39.0", 35 | ] 36 | 37 | [project.optional-dependencies] 38 | development = [ 39 | "hypothesis==5.19.3", 40 | ] 41 | human = [ 42 | "prompt-toolkit>=3.0", 43 | ] 44 | 45 | [project.urls] 46 | Homepage = "http://axelrod.readthedocs.org/" 47 | 48 | [tool.hatch.version] 49 | path = "axelrod/version.py" 50 | 51 | [tool.hatch.build.targets.sdist] 52 | include = [ 53 | "axelrod", 54 | ] 55 | -------------------------------------------------------------------------------- /rebuild_classifier_table.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from axelrod import all_strategies 4 | from axelrod.classifier import all_classifiers, rebuild_classifier_table 5 | 6 | if __name__ == "__main__": 7 | # Change to relative path inside axelrod folder 8 | rebuild_classifier_table(all_classifiers, all_strategies) 9 | -------------------------------------------------------------------------------- /run_mypy.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | 4 | modules = [ 5 | "run_strategy_indexer.py", 6 | "axelrod/action.py", 7 | "axelrod/deterministic_cache.py", 8 | "axelrod/ecosystem.py", 9 | "axelrod/evolvable_player.py", 10 | "axelrod/fingerprint.py", 11 | "axelrod/game.py", 12 | "axelrod/load_data_.py", 13 | "axelrod/mock_player.py", 14 | "axelrod/moran.py", 15 | "axelrod/plot.py", 16 | "axelrod/random_.py", 17 | "axelrod/tournament.py", 18 | "axelrod/strategies/adaptive.py", 19 | "axelrod/strategies/alternator.py", 20 | "axelrod/strategies/ann.py", 21 | "axelrod/strategies/apavlov.py", 22 | "axelrod/strategies/appeaser.py", 23 | "axelrod/strategies/averagecopier.py", 24 | "axelrod/strategies/axelrod_first.py", 25 | "axelrod/strategies/axelrod_second.py", 26 | "axelrod/strategies/backstabber.py", 27 | "axelrod/strategies/better_and_better.py", 28 | "axelrod/strategies/calculator.py", 29 | "axelrod/strategies/cooperator.py", 30 | "axelrod/strategies/cycler.py", 31 | "axelrod/strategies/darwin.py", 32 | "axelrod/strategies/defector.py", 33 | "axelrod/strategies/forgiver.py", 34 | "axelrod/strategies/frequency_analyzer.py", 35 | "axelrod/strategies/gradualkiller.py", 36 | "axelrod/strategies/grudger.py", 37 | "axelrod/strategies/grumpy.py", 38 | "axelrod/strategies/handshake.py", 39 | "axelrod/strategies/hunter.py", 40 | "axelrod/strategies/inverse.py", 41 | "axelrod/strategies/mathematicalconstants.py", 42 | "axelrod/strategies/memoryone.py", 43 | "axelrod/strategies/memorytwo.py", 44 | "axelrod/strategies/momentum.py", 45 | "axelrod/strategies/mutual.py", 46 | "axelrod/strategies/negation.py", 47 | "axelrod/strategies/oncebitten.py", 48 | "axelrod/strategies/prober.py", 49 | "axelrod/strategies/punisher.py", 50 | "axelrod/strategies/qlearner.py", 51 | "axelrod/strategies/rand.py", 52 | "axelrod/strategies/titfortat.py", 53 | "axelrod/strategies/hmm.py", 54 | "axelrod/strategies/finite_state_machines.py", 55 | "axelrod/strategies/worse_and_worse.py", 56 | ] 57 | 58 | exit_codes = [] 59 | for module in modules: 60 | rc = subprocess.call( 61 | ["mypy", "--ignore-missing-imports", "--follow-imports", "skip", module] 62 | ) 63 | exit_codes.append(rc) 64 | sys.exit(max(exit_codes)) 65 | -------------------------------------------------------------------------------- /run_strategy_indexer.py: -------------------------------------------------------------------------------- 1 | """ 2 | A script to check that all strategy modules have been included in 3 | `./docs/reference/all_strategies.rst` 4 | """ 5 | 6 | import pathlib 7 | import sys 8 | 9 | default_index_path = pathlib.Path("./docs/reference/strategy_index.rst") 10 | excluded_modules = ("_strategies", "__init__", "_filters") 11 | 12 | 13 | def check_module( 14 | module_path: pathlib.Path, 15 | index_path: pathlib.Path = default_index_path, 16 | excluded: tuple = excluded_modules, 17 | ) -> bool: 18 | """ 19 | Check if a module name is written in the index of strategies. 20 | 21 | Parameters 22 | ---------- 23 | module_path : 24 | A file path for a module file. 25 | index_path : 26 | A file path for the index file where all strategies are auto documented 27 | excluded : 28 | A collection of module names to be ignored 29 | 30 | Returns 31 | ------- 32 | boolean : 33 | True/False if module is referenced. 34 | 35 | """ 36 | strategies_index = index_path.read_text() 37 | module_name = module_path.stem 38 | if module_name not in excluded and module_name not in strategies_index: 39 | print("{} not in index".format(module_name)) 40 | return False 41 | return True 42 | 43 | 44 | if __name__ == "__main__": 45 | 46 | p = pathlib.Path(".") 47 | modules = p.glob("./axelrod/strategies/*.py") 48 | exit_codes = [] 49 | 50 | for module_path in modules: 51 | exit_codes.append(int(not check_module(module_path))) 52 | 53 | sys.exit(max(exit_codes)) 54 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal = 1 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import os 3 | import pathlib 4 | from setuptools import setup 5 | 6 | # Read in the requirements files. 7 | requirements = defaultdict(list) 8 | 9 | requirements_directory = pathlib.Path.cwd() / "requirements" 10 | for filename in requirements_directory.glob("*.txt"): 11 | variant = filename.stem 12 | with filename.open() as libraries: 13 | for library in libraries: 14 | if len(library) > 0 and (not library.startswith("-r")): 15 | requirements[variant].append(library.strip()) 16 | 17 | # Grab the default requirements 18 | install_requires = requirements["requirements"] 19 | # Delete the default from the dictionary for the extra variants. 20 | del requirements["requirements"] 21 | extras_require = dict(requirements) 22 | 23 | # Read in long description 24 | with open("README.rst", "r") as f: 25 | long_description = f.read() 26 | 27 | # Read in the version number 28 | exec(open("axelrod/version.py", "r").read()) 29 | 30 | setup( 31 | name="Axelrod", 32 | version=__version__, 33 | install_requires=install_requires, 34 | author="Vince Knight, Owen Campbell, Karol Langner, Marc Harper", 35 | author_email=("axelrod-python@googlegroups.com"), 36 | packages=["axelrod", "axelrod.strategies", "axelrod.data"], 37 | url="http://axelrod.readthedocs.org/", 38 | license="The MIT License (MIT)", 39 | description="Reproduce the Axelrod iterated prisoners dilemma tournament", 40 | long_description=long_description, 41 | long_description_content_type="text/x-rst", 42 | include_package_data=True, 43 | package_data={"": ["axelrod/data/*"]}, 44 | classifiers=[ 45 | "Programming Language :: Python :: 3.9", 46 | "Programming Language :: Python :: 3.10", 47 | "Programming Language :: Python :: 3.11", 48 | "Programming Language :: Python :: 3 :: Only", 49 | ], 50 | python_requires=">=3.6", 51 | extras_require=extras_require, 52 | ) 53 | -------------------------------------------------------------------------------- /test: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python -m unittest discover axelrod/tests/ 3 | python doctests.py 4 | -------------------------------------------------------------------------------- /test_outputs/README.md: -------------------------------------------------------------------------------- 1 | This directory contains output csv files from the test suite. 2 | -------------------------------------------------------------------------------- /test_outputs/classifier_test.yaml: -------------------------------------------------------------------------------- 1 | Cooperator: 2 | name: Cooperator 3 | Defector: 4 | name: Defector 5 | -------------------------------------------------------------------------------- /test_outputs/expected_test_tournament_no_results.csv: -------------------------------------------------------------------------------- 1 | Interaction index,Player index,Opponent index,Repetition,Player name,Opponent name,Actions 2 | 0,0,0,0,Cooperator,Cooperator,CC 3 | 0,0,0,0,Cooperator,Cooperator,CC 4 | 1,0,0,1,Cooperator,Cooperator,CC 5 | 1,0,0,1,Cooperator,Cooperator,CC 6 | 2,0,1,0,Cooperator,Tit For Tat,CC 7 | 2,1,0,0,Tit For Tat,Cooperator,CC 8 | 3,0,1,1,Cooperator,Tit For Tat,CC 9 | 3,1,0,1,Tit For Tat,Cooperator,CC 10 | 4,0,2,0,Cooperator,Defector,CC 11 | 4,2,0,0,Defector,Cooperator,DD 12 | 5,0,2,1,Cooperator,Defector,CC 13 | 5,2,0,1,Defector,Cooperator,DD 14 | 6,0,3,0,Cooperator,Grudger,CC 15 | 6,3,0,0,Grudger,Cooperator,CC 16 | 7,0,3,1,Cooperator,Grudger,CC 17 | 7,3,0,1,Grudger,Cooperator,CC 18 | 8,0,4,0,Cooperator,Soft Go By Majority,CC 19 | 8,4,0,0,Soft Go By Majority,Cooperator,CC 20 | 9,0,4,1,Cooperator,Soft Go By Majority,CC 21 | 9,4,0,1,Soft Go By Majority,Cooperator,CC 22 | 10,1,1,0,Tit For Tat,Tit For Tat,CC 23 | 10,1,1,0,Tit For Tat,Tit For Tat,CC 24 | 11,1,1,1,Tit For Tat,Tit For Tat,CC 25 | 11,1,1,1,Tit For Tat,Tit For Tat,CC 26 | 12,1,2,0,Tit For Tat,Defector,CD 27 | 12,2,1,0,Defector,Tit For Tat,DD 28 | 13,1,2,1,Tit For Tat,Defector,CD 29 | 13,2,1,1,Defector,Tit For Tat,DD 30 | 14,1,3,0,Tit For Tat,Grudger,CC 31 | 14,3,1,0,Grudger,Tit For Tat,CC 32 | 15,1,3,1,Tit For Tat,Grudger,CC 33 | 15,3,1,1,Grudger,Tit For Tat,CC 34 | 16,1,4,0,Tit For Tat,Soft Go By Majority,CC 35 | 16,4,1,0,Soft Go By Majority,Tit For Tat,CC 36 | 17,1,4,1,Tit For Tat,Soft Go By Majority,CC 37 | 17,4,1,1,Soft Go By Majority,Tit For Tat,CC 38 | 18,2,2,0,Defector,Defector,DD 39 | 18,2,2,0,Defector,Defector,DD 40 | 19,2,2,1,Defector,Defector,DD 41 | 19,2,2,1,Defector,Defector,DD 42 | 20,2,3,0,Defector,Grudger,DD 43 | 20,3,2,0,Grudger,Defector,CD 44 | 21,2,3,1,Defector,Grudger,DD 45 | 21,3,2,1,Grudger,Defector,CD 46 | 22,2,4,0,Defector,Soft Go By Majority,DD 47 | 22,4,2,0,Soft Go By Majority,Defector,CD 48 | 23,2,4,1,Defector,Soft Go By Majority,DD 49 | 23,4,2,1,Soft Go By Majority,Defector,CD 50 | 24,3,3,0,Grudger,Grudger,CC 51 | 24,3,3,0,Grudger,Grudger,CC 52 | 25,3,3,1,Grudger,Grudger,CC 53 | 25,3,3,1,Grudger,Grudger,CC 54 | 26,3,4,0,Grudger,Soft Go By Majority,CC 55 | 26,4,3,0,Soft Go By Majority,Grudger,CC 56 | 27,3,4,1,Grudger,Soft Go By Majority,CC 57 | 27,4,3,1,Soft Go By Majority,Grudger,CC 58 | 28,4,4,0,Soft Go By Majority,Soft Go By Majority,CC 59 | 28,4,4,0,Soft Go By Majority,Soft Go By Majority,CC 60 | 29,4,4,1,Soft Go By Majority,Soft Go By Majority,CC 61 | 29,4,4,1,Soft Go By Majority,Soft Go By Majority,CC 62 | -------------------------------------------------------------------------------- /test_outputs/test_results.csv: -------------------------------------------------------------------------------- 1 | Interaction index,Player index,Opponent index,Repetition,Player name,Opponent name,Actions,Score,Score difference,Turns,Score per turn,Score difference per turn,Win,Initial cooperation,Cooperation count,CC count,CD count,DC count,DD count,CC to C count,CC to D count,CD to C count,CD to D count,DC to C count,DC to D count,DD to C count,DD to D count,Good partner 2 | 0,0,1,0,Alternator,Tit For Tat,CDCDC,13,0,5,2.6,0.0,0,True,3,1,2,2,0,0,1,0,1,2,0,0,0,1 3 | 0,1,0,0,Tit For Tat,Alternator,CCDCD,13,0,5,2.6,0.0,0,True,3,1,2,2,0,1,0,0,2,1,0,0,0,1 4 | 1,0,1,1,Alternator,Tit For Tat,CDCDC,13,0,5,2.6,0.0,0,True,3,1,2,2,0,0,1,0,1,2,0,0,0,1 5 | 1,1,0,1,Tit For Tat,Alternator,CCDCD,13,0,5,2.6,0.0,0,True,3,1,2,2,0,1,0,0,2,1,0,0,0,1 6 | 2,0,1,2,Alternator,Tit For Tat,CDCDC,13,0,5,2.6,0.0,0,True,3,1,2,2,0,0,1,0,1,2,0,0,0,1 7 | 2,1,0,2,Tit For Tat,Alternator,CCDCD,13,0,5,2.6,0.0,0,True,3,1,2,2,0,1,0,0,2,1,0,0,0,1 8 | 3,0,2,0,Alternator,Defector,CDCDC,2,-15,5,0.4,-3.0,0,True,3,0,3,0,2,0,0,0,2,0,0,2,0,1 9 | 3,2,0,0,Defector,Alternator,DDDDD,17,15,5,3.4,3.0,1,False,0,0,0,3,2,0,0,0,0,0,2,0,2,0 10 | 4,0,2,1,Alternator,Defector,CDCDC,2,-15,5,0.4,-3.0,0,True,3,0,3,0,2,0,0,0,2,0,0,2,0,1 11 | 4,2,0,1,Defector,Alternator,DDDDD,17,15,5,3.4,3.0,1,False,0,0,0,3,2,0,0,0,0,0,2,0,2,0 12 | 5,0,2,2,Alternator,Defector,CDCDC,2,-15,5,0.4,-3.0,0,True,3,0,3,0,2,0,0,0,2,0,0,2,0,1 13 | 5,2,0,2,Defector,Alternator,DDDDD,17,15,5,3.4,3.0,1,False,0,0,0,3,2,0,0,0,0,0,2,0,2,0 14 | 6,1,2,0,Tit For Tat,Defector,CDDDD,4,-5,5,0.8,-1.0,0,True,1,0,1,0,4,0,0,0,1,0,0,0,3,1 15 | 6,2,1,0,Defector,Tit For Tat,DDDDD,9,5,5,1.8,1.0,1,False,0,0,0,1,4,0,0,0,0,0,1,0,3,0 16 | 7,1,2,1,Tit For Tat,Defector,CDDDD,4,-5,5,0.8,-1.0,0,True,1,0,1,0,4,0,0,0,1,0,0,0,3,1 17 | 7,2,1,1,Defector,Tit For Tat,DDDDD,9,5,5,1.8,1.0,1,False,0,0,0,1,4,0,0,0,0,0,1,0,3,0 18 | 8,1,2,2,Tit For Tat,Defector,CDDDD,4,-5,5,0.8,-1.0,0,True,1,0,1,0,4,0,0,0,1,0,0,0,3,1 19 | 8,2,1,2,Defector,Tit For Tat,DDDDD,9,5,5,1.8,1.0,1,False,0,0,0,1,4,0,0,0,0,0,1,0,3,0 20 | -------------------------------------------------------------------------------- /test_outputs/test_results_spatial.csv: -------------------------------------------------------------------------------- 1 | Interaction index,Player index,Opponent index,Repetition,Player name,Opponent name,Actions,Score,Score difference,Turns,Score per turn,Score difference per turn,Win,Initial cooperation,Cooperation count,CC count,CD count,DC count,DD count,CC to C count,CC to D count,CD to C count,CD to D count,DC to C count,DC to D count,DD to C count,DD to D count,Good partner 2 | 0,0,1,0,Alternator,Tit For Tat,CDCDC,13,0,5,2.6,0.0,0,True,3,1,2,2,0,0,1,0,1,2,0,0,0,1 3 | 0,1,0,0,Tit For Tat,Alternator,CCDCD,13,0,5,2.6,0.0,0,True,3,1,2,2,0,1,0,0,2,1,0,0,0,1 4 | 1,0,1,1,Alternator,Tit For Tat,CDCDC,13,0,5,2.6,0.0,0,True,3,1,2,2,0,0,1,0,1,2,0,0,0,1 5 | 1,1,0,1,Tit For Tat,Alternator,CCDCD,13,0,5,2.6,0.0,0,True,3,1,2,2,0,1,0,0,2,1,0,0,0,1 6 | 2,0,1,2,Alternator,Tit For Tat,CDCDC,13,0,5,2.6,0.0,0,True,3,1,2,2,0,0,1,0,1,2,0,0,0,1 7 | 2,1,0,2,Tit For Tat,Alternator,CCDCD,13,0,5,2.6,0.0,0,True,3,1,2,2,0,1,0,0,2,1,0,0,0,1 8 | 3,0,2,0,Alternator,Defector,CDCDC,2,-15,5,0.4,-3.0,0,True,3,0,3,0,2,0,0,0,2,0,0,2,0,1 9 | 3,2,0,0,Defector,Alternator,DDDDD,17,15,5,3.4,3.0,1,False,0,0,0,3,2,0,0,0,0,0,2,0,2,0 10 | 4,0,2,1,Alternator,Defector,CDCDC,2,-15,5,0.4,-3.0,0,True,3,0,3,0,2,0,0,0,2,0,0,2,0,1 11 | 4,2,0,1,Defector,Alternator,DDDDD,17,15,5,3.4,3.0,1,False,0,0,0,3,2,0,0,0,0,0,2,0,2,0 12 | 5,0,2,2,Alternator,Defector,CDCDC,2,-15,5,0.4,-3.0,0,True,3,0,3,0,2,0,0,0,2,0,0,2,0,1 13 | 5,2,0,2,Defector,Alternator,DDDDD,17,15,5,3.4,3.0,1,False,0,0,0,3,2,0,0,0,0,0,2,0,2,0 14 | -------------------------------------------------------------------------------- /test_outputs/test_results_spatial_three.csv: -------------------------------------------------------------------------------- 1 | Interaction index,Player index,Opponent index,Repetition,Player name,Opponent name,Actions,Score,Score difference,Turns,Score per turn,Score difference per turn,Win,Initial cooperation,Cooperation count,CC count,CD count,DC count,DD count,CC to C count,CC to D count,CD to C count,CD to D count,DC to C count,DC to D count,DD to C count,DD to D count,Good partner 2 | 0,0,0,0,Alternator,Alternator,CDCDC,11,0,5,2.2,0.0,0,True,3,3,0,0,2,0,2,0,0,0,0,2,0,1 3 | 0,0,0,0,Alternator,Alternator,CDCDC,11,0,5,2.2,0.0,0,True,3,3,0,0,2,0,2,0,0,0,0,2,0,1 4 | 1,0,0,1,Alternator,Alternator,CDCDC,11,0,5,2.2,0.0,0,True,3,3,0,0,2,0,2,0,0,0,0,2,0,1 5 | 1,0,0,1,Alternator,Alternator,CDCDC,11,0,5,2.2,0.0,0,True,3,3,0,0,2,0,2,0,0,0,0,2,0,1 6 | 2,0,0,2,Alternator,Alternator,CDCDC,11,0,5,2.2,0.0,0,True,3,3,0,0,2,0,2,0,0,0,0,2,0,1 7 | 2,0,0,2,Alternator,Alternator,CDCDC,11,0,5,2.2,0.0,0,True,3,3,0,0,2,0,2,0,0,0,0,2,0,1 8 | 3,1,1,0,Tit For Tat,Tit For Tat,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 9 | 3,1,1,0,Tit For Tat,Tit For Tat,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 10 | 4,1,1,1,Tit For Tat,Tit For Tat,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 11 | 4,1,1,1,Tit For Tat,Tit For Tat,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 12 | 5,1,1,2,Tit For Tat,Tit For Tat,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 13 | 5,1,1,2,Tit For Tat,Tit For Tat,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 14 | 6,2,2,0,Defector,Defector,DDDDD,5,0,5,1.0,0.0,0,False,0,0,0,0,5,0,0,0,0,0,0,0,4,1 15 | 6,2,2,0,Defector,Defector,DDDDD,5,0,5,1.0,0.0,0,False,0,0,0,0,5,0,0,0,0,0,0,0,4,1 16 | 7,2,2,1,Defector,Defector,DDDDD,5,0,5,1.0,0.0,0,False,0,0,0,0,5,0,0,0,0,0,0,0,4,1 17 | 7,2,2,1,Defector,Defector,DDDDD,5,0,5,1.0,0.0,0,False,0,0,0,0,5,0,0,0,0,0,0,0,4,1 18 | 8,2,2,2,Defector,Defector,DDDDD,5,0,5,1.0,0.0,0,False,0,0,0,0,5,0,0,0,0,0,0,0,4,1 19 | 8,2,2,2,Defector,Defector,DDDDD,5,0,5,1.0,0.0,0,False,0,0,0,0,5,0,0,0,0,0,0,0,4,1 20 | 9,3,3,0,Cooperator,Cooperator,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 21 | 9,3,3,0,Cooperator,Cooperator,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 22 | 10,3,3,1,Cooperator,Cooperator,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 23 | 10,3,3,1,Cooperator,Cooperator,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 24 | 11,3,3,2,Cooperator,Cooperator,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 25 | 11,3,3,2,Cooperator,Cooperator,CCCCC,15,0,5,3.0,0.0,0,True,5,5,0,0,0,4,0,0,0,0,0,0,0,1 26 | -------------------------------------------------------------------------------- /test_outputs/test_results_spatial_two.csv: -------------------------------------------------------------------------------- 1 | Interaction index,Player index,Opponent index,Repetition,Player name,Opponent name,Actions,Score,Score difference,Turns,Score per turn,Score difference per turn,Win,Initial cooperation,Cooperation count,CC count,CD count,DC count,DD count,CC to C count,CC to D count,CD to C count,CD to D count,DC to C count,DC to D count,DD to C count,DD to D count,Good partner 2 | 0,0,1,0,Alternator,Tit For Tat,CDCDC,13,0,5,2.6,0.0,0,True,3,1,2,2,0,0,1,0,1,2,0,0,0,1 3 | 0,1,0,0,Tit For Tat,Alternator,CCDCD,13,0,5,2.6,0.0,0,True,3,1,2,2,0,1,0,0,2,1,0,0,0,1 4 | 1,0,1,1,Alternator,Tit For Tat,CDCDC,13,0,5,2.6,0.0,0,True,3,1,2,2,0,0,1,0,1,2,0,0,0,1 5 | 1,1,0,1,Tit For Tat,Alternator,CCDCD,13,0,5,2.6,0.0,0,True,3,1,2,2,0,1,0,0,2,1,0,0,0,1 6 | 2,0,1,2,Alternator,Tit For Tat,CDCDC,13,0,5,2.6,0.0,0,True,3,1,2,2,0,0,1,0,1,2,0,0,0,1 7 | 2,1,0,2,Tit For Tat,Alternator,CCDCD,13,0,5,2.6,0.0,0,True,3,1,2,2,0,1,0,0,2,1,0,0,0,1 8 | 3,2,3,0,Defector,Cooperator,DDDDD,25,25,5,5.0,5.0,1,False,0,0,0,5,0,0,0,0,0,0,4,0,0,0 9 | 3,3,2,0,Cooperator,Defector,CCCCC,0,-25,5,0.0,-5.0,0,True,5,0,5,0,0,0,0,4,0,0,0,0,0,1 10 | 4,2,3,1,Defector,Cooperator,DDDDD,25,25,5,5.0,5.0,1,False,0,0,0,5,0,0,0,0,0,0,4,0,0,0 11 | 4,3,2,1,Cooperator,Defector,CCCCC,0,-25,5,0.0,-5.0,0,True,5,0,5,0,0,0,0,4,0,0,0,0,0,1 12 | 5,2,3,2,Defector,Cooperator,DDDDD,25,25,5,5.0,5.0,1,False,0,0,0,5,0,0,0,0,0,0,4,0,0,0 13 | 5,3,2,2,Cooperator,Defector,CCCCC,0,-25,5,0.0,-5.0,0,True,5,0,5,0,0,0,0,4,0,0,0,0,0,1 14 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | isolated_build = True 3 | envlist = py311, py312 4 | 5 | [gh-actions] 6 | python = 7 | 3.11: py311 8 | 3.12: py312 9 | 10 | [flake8] 11 | per-file-ignores = 12 | setup.py: F821 13 | docs/_build/*: ALL 14 | docs/conf.py: E402 15 | **/__init__.py: F401 F403 16 | ignore = 17 | E203 18 | E501 19 | W291 20 | W503 21 | 22 | [testenv] 23 | deps = 24 | hypothesis 25 | pytest-cov 26 | pytest-randomly 27 | pytest-sugar 28 | isort 29 | black 30 | numpy==1.26.4 31 | mypy 32 | types-setuptools 33 | commands = 34 | python -m pytest --cov-report term-missing --cov=axelrod --cov-fail-under=100 . --doctest-glob="*.md" --doctest-glob="*.rst" 35 | python -m black -l 80 . --check 36 | python -m isort --check-only axelrod/. 37 | python run_mypy.py 38 | python run_strategy_indexer.py 39 | --------------------------------------------------------------------------------