├── .coveragerc ├── .flake8 ├── .gitignore ├── .gitmodules ├── .travis.yml ├── BackwardChaining.ipynb ├── BasicConceptsSearch.ipynb ├── CONTRIBUTING.md ├── DecisionTreeLearning.ipynb ├── ForwardChaining.ipynb ├── HMM.ipynb ├── InferencePropLogic.ipynb ├── InformedSearch.ipynb ├── LICENSE ├── MarkovDecisionProcess.ipynb ├── README.md ├── SUBMODULE.md ├── SyntaxLogic.ipynb ├── UninformedSearch.ipynb ├── agents.py ├── agents4e.py ├── bayesian_networks.ipynb ├── csp.py ├── deep_learning4e.py ├── games.py ├── games4e.py ├── gui ├── eight_puzzle.py ├── genetic_algorithm_example.py ├── grid_mdp.py ├── romania_problem.py ├── tic-tac-toe.py ├── tsp.py ├── vacuum_agent.py └── xy_vacuum_environment.py ├── hmm.ipynb ├── homework └── .gitkeep ├── images ├── -0.04.jpg ├── -0.4.jpg ├── -4.jpg ├── 4.jpg ├── IMAGE-CREDITS ├── aima3e_big.jpg ├── aima_logo.png ├── bayesnet.png ├── broxrevised.png ├── cake_graph.jpg ├── decisiontree_fruit.jpg ├── dirt.svg ├── dirt05-icon.jpg ├── ensemble_learner.jpg ├── fig_5_2.png ├── ge0.jpg ├── ge1.jpg ├── ge2.jpg ├── ge4.jpg ├── general_learning_agent.jpg ├── grid_mdp.jpg ├── grid_mdp_agent.jpg ├── hillclimb-tsp.png ├── knn_plot.png ├── knowledge_FOIL_grandparent.png ├── knowledge_foil_family.png ├── makefile ├── maze.png ├── mdp-a.png ├── mdp-b.png ├── mdp-c.png ├── mdp-d.png ├── mdp.png ├── model_based_reflex_agent.jpg ├── model_goal_based_agent.jpg ├── model_utility_based_agent.jpg ├── neural_net.png ├── parse_tree.png ├── perceptron.png ├── pluralityLearner_plot.png ├── point_crossover.png ├── pop.jpg ├── queen_s.png ├── random_forest.png ├── refinement.png ├── restaurant.png ├── romania_map.png ├── search_animal.svg ├── simple_problem_solving_agent.jpg ├── simple_reflex_agent.jpg ├── sprinklernet.jpg ├── stapler1-test.png ├── uniform_crossover.png ├── vacuum-icon.jpg ├── vacuum.svg └── wall-icon.jpg ├── img ├── 19.png ├── 23_1.png ├── 23_2.png ├── 25_1.png ├── 25_2.png ├── 27.png ├── 28.png ├── 6.png ├── 7.png ├── decision_tree_learner_restaurant.png ├── decision_tree_restaurant_result.png └── reward_neg_0.02.png ├── ipyviews.py ├── js ├── canvas.js ├── continuousworld.js └── gridworld.js ├── knowledge.py ├── learning.py ├── learning4e.py ├── logic.py ├── logic4e.py ├── making_simple_decision4e.py ├── mdp.py ├── mdp4e.py ├── nlp.py ├── nlp4e.py ├── notebook.py ├── notebook4e.py ├── notebooks ├── chapter19 │ ├── Learners.ipynb │ ├── Loss Functions and Layers.ipynb │ ├── Optimizer and Backpropagation.ipynb │ ├── RNN.ipynb │ └── images │ │ ├── autoencoder.png │ │ ├── backprop.png │ │ ├── corss_entropy_plot.png │ │ ├── mse_plot.png │ │ ├── nn.png │ │ ├── nn_steps.png │ │ ├── perceptron.png │ │ ├── rnn_connections.png │ │ ├── rnn_unit.png │ │ ├── rnn_units.png │ │ └── vanilla.png ├── chapter21 │ ├── Active Reinforcement Learning.ipynb │ ├── Passive Reinforcement Learning.ipynb │ └── images │ │ └── mdp.png ├── chapter22 │ ├── Grammar.ipynb │ ├── Introduction.ipynb │ ├── Parsing.ipynb │ ├── images │ │ └── parse_tree.png │ └── nlp_apps.ipynb ├── chapter24 │ ├── Image Edge Detection.ipynb │ ├── Image Segmentation.ipynb │ ├── Objects in Images.ipynb │ └── images │ │ ├── RCNN.png │ │ ├── derivative_of_gaussian.png │ │ ├── gradients.png │ │ ├── laplacian.png │ │ ├── laplacian_kernels.png │ │ ├── stapler.png │ │ └── stapler_bbox.png └── old notebooks │ ├── agents.ipynb │ ├── arc_consistency_heuristics.ipynb │ ├── classical_planning_approaches.ipynb │ ├── csp.ipynb │ ├── games.ipynb │ ├── games4e.ipynb │ ├── improving_sat_algorithms.ipynb │ ├── index.ipynb │ ├── intro.ipynb │ ├── knowledge_FOIL.ipynb │ ├── knowledge_current_best.ipynb │ ├── knowledge_version_space.ipynb │ ├── learning.ipynb │ ├── learning_apps.ipynb │ ├── logic.ipynb │ ├── mdp.ipynb │ ├── mdp_apps.ipynb │ ├── neural_nets.ipynb │ ├── nlp.ipynb │ ├── nlp_apps.ipynb │ ├── obsolete_search4e.ipynb │ ├── planning.ipynb │ ├── planning_angelic_search.ipynb │ ├── planning_graphPlan.ipynb │ ├── planning_hierarchical_search.ipynb │ ├── planning_partial_order_planner.ipynb │ ├── planning_total_order_planner.ipynb │ ├── probability.ipynb │ ├── probability4e.ipynb │ ├── reinforcement_learning.ipynb │ ├── search.ipynb │ ├── search4e.ipynb │ ├── text.ipynb │ ├── vacuum_world.ipynb │ └── viterbi_algorithm.ipynb ├── perception4e.py ├── planning.py ├── probabilistic_learning.py ├── probability.py ├── probability4e.py ├── pytest.ini ├── reinforcement_learning.py ├── reinforcement_learning4e.py ├── requirements.txt ├── search.py ├── search_helpers.py ├── tests ├── __init__.py ├── pytest.ini ├── test_agents.py ├── test_agents4e.py ├── test_csp.py ├── test_games.py ├── test_games4e.py ├── test_knowledge.py ├── test_learning.py ├── test_logic.py ├── test_logic4e.py ├── test_mdp.py ├── test_mdp4e.py ├── test_nlp.py ├── test_nlp4e.py ├── test_planning.py ├── test_probabilistic_learning.py ├── test_probability.py ├── test_probability4e.py ├── test_reinforcement_learning.py ├── test_reinforcement_learning4e.py ├── test_search.py ├── test_text.py └── test_utils.py ├── text.py ├── utils.py └── utils4e.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | omit = 3 | tests/* -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 100 3 | ignore = E121,E123,E126,E221,E222,E225,E226,E242,E701,E702,E704,E731,W503,F405,F841 4 | exclude = tests 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | *.pytest_cache 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask instance folder 58 | instance/ 59 | 60 | # Sphinx documentation 61 | docs/_build/ 62 | 63 | # PyBuilder 64 | target/ 65 | 66 | # IPython Notebook 67 | .ipynb_checkpoints 68 | 69 | # pyenv 70 | .python-version 71 | 72 | # dotenv 73 | .env 74 | .idea 75 | 76 | # for macOS 77 | .DS_Store 78 | ._.DS_Store 79 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "aima-data"] 2 | path = aima-data 3 | url = https://github.com/aimacode/aima-data.git 4 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - 3.5 5 | - 3.6 6 | - 3.7 7 | - 3.8 8 | 9 | before_install: 10 | - git submodule update --remote 11 | 12 | install: 13 | - pip install --upgrade -r requirements.txt 14 | 15 | script: 16 | - py.test --cov=./ 17 | - python -m doctest -v *.py 18 | 19 | after_success: 20 | - flake8 --max-line-length 100 --ignore=E121,E123,E126,E221,E222,E225,E226,E242,E701,E702,E704,E731,W503 . 21 | 22 | notifications: 23 | email: false 24 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | How to Contribute to aima-python 2 | ========================== 3 | 4 | Thanks for considering contributing to `aima-python`! Whether you are an aspiring [Google Summer of Code](https://summerofcode.withgoogle.com/organizations/5431334980288512/) student, or an independent contributor, here is a guide on how you can help. 5 | 6 | First of all, you can read these write-ups from past GSoC students to get an idea about what you can do for the project. [Chipe1](https://github.com/aimacode/aima-python/issues/641) - [MrDupin](https://github.com/aimacode/aima-python/issues/632) 7 | 8 | In general, the main ways you can contribute to the repository are the following: 9 | 10 | 1. Implement algorithms from the [list of algorithms](https://github.com/aimacode/aima-python/blob/master/README.md#index-of-algorithms). 11 | 1. Add tests for algorithms. 12 | 1. Take care of [issues](https://github.com/aimacode/aima-python/issues). 13 | 1. Write on the notebooks (`.ipynb` files). 14 | 1. Add and edit documentation (the docstrings in `.py` files). 15 | 16 | In more detail: 17 | 18 | ## Read the Code and Start on an Issue 19 | 20 | - First, read and understand the code to get a feel for the extent and the style. 21 | - Look at the [issues](https://github.com/aimacode/aima-python/issues) and pick one to work on. 22 | - One of the issues is that some algorithms are missing from the [list of algorithms](https://github.com/aimacode/aima-python/blob/master/README.md#index-of-algorithms) and that some don't have tests. 23 | 24 | ## Port to Python 3; Pythonic Idioms 25 | 26 | - Check for common problems in [porting to Python 3](http://python3porting.com/problems.html), such as: `print` is now a function; `range` and `map` and other functions no longer produce `list`; objects of different types can no longer be compared with `<`; strings are now Unicode; it would be nice to move `%` string formatting to `.format`; there is a new `next` function for generators; integer division now returns a float; we can now use set literals. 27 | - Replace old Lisp-based idioms with proper Python idioms. For example, we have many functions that were taken directly from Common Lisp, such as the `every` function: `every(callable, items)` returns true if every element of `items` is callable. This is good Lisp style, but good Python style would be to use `all` and a generator expression: `all(callable(f) for f in items)`. Eventually, fix all calls to these legacy Lisp functions and then remove the functions. 28 | 29 | ## New and Improved Algorithms 30 | 31 | - Implement functions that were in the third edition of the book but were not yet implemented in the code. Check the [list of pseudocode algorithms (pdf)](https://github.com/aimacode/pseudocode/blob/master/aima3e-algorithms.pdf) to see what's missing. 32 | - As we finish chapters for the new fourth edition, we will share the new pseudocode in the [`aima-pseudocode`](https://github.com/aimacode/aima-pseudocode) repository, and describe what changes are necessary. 33 | We hope to have an `algorithm-name.md` file for each algorithm, eventually; it would be great if contributors could add some for the existing algorithms. 34 | 35 | ## Jupyter Notebooks 36 | 37 | In this project we use Jupyter/IPython Notebooks to showcase the algorithms in the book. They serve as short tutorials on what the algorithms do, how they are implemented and how one can use them. To install Jupyter, you can follow the instructions [here](https://jupyter.org/install.html). These are some ways you can contribute to the notebooks: 38 | 39 | - Proofread the notebooks for grammar mistakes, typos, or general errors. 40 | - Move visualization and unrelated to the algorithm code from notebooks to `notebook.py` (a file used to store code for the notebooks, like visualization and other miscellaneous stuff). Make sure the notebooks still work and have their outputs showing! 41 | - Replace the `%psource` magic notebook command with the function `psource` from `notebook.py` where needed. Examples where this is useful are a) when we want to show code for algorithm implementation and b) when we have consecutive cells with the magic keyword (in this case, if the code is large, it's best to leave the output hidden). 42 | - Add the function `pseudocode(algorithm_name)` in algorithm sections. The function prints the pseudocode of the algorithm. You can see some example usage in [`knowledge.ipynb`](https://github.com/aimacode/aima-python/blob/master/knowledge.ipynb). 43 | - Edit existing sections for algorithms to add more information and/or examples. 44 | - Add visualizations for algorithms. The visualization code should go in `notebook.py` to keep things clean. 45 | - Add new sections for algorithms not yet covered. The general format we use in the notebooks is the following: First start with an overview of the algorithm, printing the pseudocode and explaining how it works. Then, add some implementation details, including showing the code (using `psource`). Finally, add examples for the implementations, showing how the algorithms work. Don't fret with adding complex, real-world examples; the project is meant for educational purposes. You can of course choose another format if something better suits an algorithm. 46 | 47 | Apart from the notebooks explaining how the algorithms work, we also have notebooks showcasing some indicative applications of the algorithms. These notebooks are in the `*_apps.ipynb` format. We aim to have an `apps` notebook for each module, so if you don't see one for the module you would like to contribute to, feel free to create it from scratch! In these notebooks we are looking for applications showing what the algorithms can do. The general format of these sections is this: Add a description of the problem you are trying to solve, then explain how you are going to solve it and finally provide your solution with examples. Note that any code you write should not require any external libraries apart from the ones already provided (like `matplotlib`). 48 | 49 | # Style Guide 50 | 51 | There are a few style rules that are unique to this project: 52 | 53 | - The first rule is that the code should correspond directly to the pseudocode in the book. When possible this will be almost one-to-one, just allowing for the syntactic differences between Python and pseudocode, and for different library functions. 54 | - Don't make a function more complicated than the pseudocode in the book, even if the complication would add a nice feature, or give an efficiency gain. Instead, remain faithful to the pseudocode, and if you must, add a new function (not in the book) with the added feature. 55 | - I use functional programming (functions with no side effects) in many cases, but not exclusively (sometimes classes and/or functions with side effects are used). Let the book's pseudocode be the guide. 56 | 57 | Beyond the above rules, we use [Pep 8](https://www.python.org/dev/peps/pep-0008), with a few minor exceptions: 58 | 59 | - I have set `--max-line-length 100`, not 79. 60 | - You don't need two spaces after a sentence-ending period. 61 | - Strunk and White is [not a good guide for English](http://chronicle.com/article/50-Years-of-Stupid-Grammar/25497). 62 | - I prefer more concise docstrings; I don't follow [Pep 257](https://www.python.org/dev/peps/pep-0257/). In most cases, 63 | a one-line docstring suffices. It is rarely necessary to list what each argument does; the name of the argument usually is enough. 64 | - Not all constants have to be UPPERCASE. 65 | - At some point I may add [Pep 484](https://www.python.org/dev/peps/pep-0484/) type annotations, but I think I'll hold off for now; 66 | I want to get more experience with them, and some people may still be in Python 3.4. 67 | 68 | Reporting Issues 69 | ================ 70 | 71 | - Under which versions of Python does this happen? 72 | 73 | - Provide an example of the issue occurring. 74 | 75 | - Is anybody working on this? 76 | 77 | Patch Rules 78 | =========== 79 | 80 | - Ensure that the patch is Python 3.4 compliant. 81 | 82 | - Include tests if your patch is supposed to solve a bug, and explain 83 | clearly under which circumstances the bug happens. Make sure the test fails 84 | without your patch. 85 | 86 | - Follow the style guidelines described above. 87 | - Refer the issue you have fixed. 88 | - Explain in brief what changes you have made with affected files name. 89 | 90 | # Choice of Programming Languages 91 | 92 | Are we right to concentrate on Java and Python versions of the code? I think so; both languages are popular; Java is 93 | fast enough for our purposes, and has reasonable type declarations (but can be verbose); Python is popular and has a very direct mapping to the pseudocode in the book (but lacks type declarations and can be slow). The [TIOBE Index](http://www.tiobe.com/tiobe_index) says the top seven most popular languages, in order, are: 94 | 95 | Java, C, C++, C#, Python, PHP, Javascript 96 | 97 | So it might be reasonable to also support C++/C# at some point in the future. It might also be reasonable to support a language that combines the terse readability of Python with the type safety and speed of Java; perhaps Go or Julia. I see no reason to support PHP. Javascript is the language of the browser; it would be nice to have code that runs in the browser without need for any downloads; this would be in Javascript or a variant such as Typescript. 98 | 99 | There is also a `aima-lisp` project; in 1995 when we wrote the first edition of the book, Lisp was the right choice, but today it is less popular (currently #31 on the TIOBE index). 100 | 101 | What languages are instructors recommending for their AI class? To get an approximate idea, I gave the query [\[norvig russell "Modern Approach"\]](https://www.google.com/webhp#q=russell%20norvig%20%22modern%20approach%22%20java) along with the names of various languages and looked at the estimated counts of results on 102 | various dates. However, I don't have much confidence in these figures... 103 | 104 | |Language |2004 |2005 |2007 |2010 |2016 | 105 | |-------- |----: |----: |----: |----: |----: | 106 | |[none](http://www.google.com/search?q=norvig+russell+%22Modern+Approach%22)|8,080|20,100|75,200|150,000|132,000| 107 | |[java](http://www.google.com/search?q=java+norvig+russell+%22Modern+Approach%22)|1,990|4,930|44,200|37,000|50,000| 108 | |[c++](http://www.google.com/search?q=c%2B%2B+norvig+russell+%22Modern+Approach%22)|875|1,820|35,300|105,000|35,000| 109 | |[lisp](http://www.google.com/search?q=lisp+norvig+russell+%22Modern+Approach%22)|844|974|30,100|19,000|14,000| 110 | |[prolog](http://www.google.com/search?q=prolog+norvig+russell+%22Modern+Approach%22)|789|2,010|23,200|17,000|16,000| 111 | |[python](http://www.google.com/search?q=python+norvig+russell+%22Modern+Approach%22)|785|1,240|18,400|11,000|12,000| 112 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 aima-python contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 6 | 7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 10 | -------------------------------------------------------------------------------- /SUBMODULE.md: -------------------------------------------------------------------------------- 1 | This is a guide on how to update the `aima-data` submodule to the latest version. This needs to be done every time something changes in the [aima-data](https://github.com/aimacode/aima-data) repository. All the below commands should be executed from the local directory of the `aima-python` repository, using `git`. 2 | 3 | ``` 4 | git submodule deinit aima-data 5 | git rm aima-data 6 | git submodule add https://github.com/aimacode/aima-data.git aima-data 7 | git commit 8 | git push origin 9 | ``` 10 | 11 | Then you need to pull request the changes (unless you are a collaborator, in which case you can commit directly to the master). 12 | -------------------------------------------------------------------------------- /SyntaxLogic.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "# Syntax of Propositional Logic" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": {}, 15 | "source": [ 16 | "This Jupyter notebook demonstrate how proposition symbols and logic sentences can be created using the **AIMAcode** library." 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "metadata": {}, 22 | "source": [ 23 | "Import modules:" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": 1, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "from utils import *\n", 33 | "from logic import *\n", 34 | "from notebook import psource" 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "metadata": {}, 40 | "source": [ 41 | "## Proposition Symbols and Sentences" 42 | ] 43 | }, 44 | { 45 | "cell_type": "markdown", 46 | "metadata": {}, 47 | "source": [ 48 | "A single proposition symbol can be created with the function `Symbol`. For example, we can create a proposition symbol with the name **x**:" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": 2, 54 | "metadata": {}, 55 | "outputs": [ 56 | { 57 | "data": { 58 | "text/plain": [ 59 | "x" 60 | ] 61 | }, 62 | "execution_count": 2, 63 | "metadata": {}, 64 | "output_type": "execute_result" 65 | } 66 | ], 67 | "source": [ 68 | "Symbol('x')" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "Or we can define multiple symbols at the same time with the function `symbols`:" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 3, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "(x, y, P, Q, f) = symbols('x, y, P, Q, f')" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "The previously defined proposition symbols can be used to formulate logical sentences. Here's how we would form the logical sentence \"P and not Q\":" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 9, 97 | "metadata": {}, 98 | "outputs": [ 99 | { 100 | "data": { 101 | "text/plain": [ 102 | "(P & ~Q)" 103 | ] 104 | }, 105 | "execution_count": 9, 106 | "metadata": {}, 107 | "output_type": "execute_result" 108 | } 109 | ], 110 | "source": [ 111 | "P & ~Q" 112 | ] 113 | }, 114 | { 115 | "cell_type": "markdown", 116 | "metadata": {}, 117 | "source": [ 118 | "## Operators for Constructing Logical Sentences\n", 119 | "\n", 120 | "Here is a table of the operators that can be used to form sentences. Note that we have a problem: we want to use Python operators to make sentences, so that our programs (and our interactive sessions like the one here) will show simple code. But Python does not allow implication arrows as operators, so for now we have to use a more verbose notation that Python does allow: `|'==>'|` instead of just `==>`. Alternately, you can also use the constructor of the `Expr` class to construct logical sentences:\n", 121 | "\n", 122 | "| Operation | Book | Python Infix Input | Python Output | Python `Expr` Input\n", 123 | "|--------------------------|----------------------|-------------------------|---|---|\n", 124 | "| Negation | ¬ P | `~P` | `~P` | `Expr('~', P)`\n", 125 | "| And | P ∧ Q | `P & Q` | `P & Q` | `Expr('&', P, Q)`\n", 126 | "| Or | P ∨ Q | `P` | `Q`| `P` | `Q` | `Expr('`|`', P, Q)`\n", 127 | "| Inequality (Xor) | P ≠ Q | `P ^ Q` | `P ^ Q` | `Expr('^', P, Q)`\n", 128 | "| Implication | P → Q | `P` |`'==>'`| `Q` | `P ==> Q` | `Expr('==>', P, Q)`\n", 129 | "| Reverse Implication | Q ← P | `Q` |`'<=='`| `P` |`Q <== P` | `Expr('<==', Q, P)`\n", 130 | "| Equivalence | P ↔ Q | `P` |`'<=>'`| `Q` |`P <=> Q` | `Expr('<=>', P, Q)`\n", 131 | "\n", 132 | "Here's an example of defining a sentence with an implication arrow:" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 6, 138 | "metadata": {}, 139 | "outputs": [ 140 | { 141 | "data": { 142 | "text/plain": [ 143 | "(~(P & Q) ==> (~P | ~Q))" 144 | ] 145 | }, 146 | "execution_count": 6, 147 | "metadata": {}, 148 | "output_type": "execute_result" 149 | } 150 | ], 151 | "source": [ 152 | "~(P & Q) |'==>'| (~P | ~Q)" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "metadata": {}, 158 | "source": [ 159 | "If the `|'==>'|` notation looks ugly to you, you can use the function `expr` instead:" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 7, 165 | "metadata": {}, 166 | "outputs": [ 167 | { 168 | "data": { 169 | "text/plain": [ 170 | "(~(P & Q) ==> (~P | ~Q))" 171 | ] 172 | }, 173 | "execution_count": 7, 174 | "metadata": {}, 175 | "output_type": "execute_result" 176 | } 177 | ], 178 | "source": [ 179 | "expr('~(P & Q) ==> (~P | ~Q)')" 180 | ] 181 | } 182 | ], 183 | "metadata": { 184 | "kernelspec": { 185 | "display_name": "Python 3", 186 | "language": "python", 187 | "name": "python3" 188 | }, 189 | "language_info": { 190 | "codemirror_mode": { 191 | "name": "ipython", 192 | "version": 3 193 | }, 194 | "file_extension": ".py", 195 | "mimetype": "text/x-python", 196 | "name": "python", 197 | "nbconvert_exporter": "python", 198 | "pygments_lexer": "ipython3", 199 | "version": "3.6.9" 200 | } 201 | }, 202 | "nbformat": 4, 203 | "nbformat_minor": 1 204 | } 205 | -------------------------------------------------------------------------------- /gui/eight_puzzle.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import random 3 | import time 4 | from functools import partial 5 | from tkinter import * 6 | 7 | from search import astar_search, EightPuzzle 8 | 9 | sys.path.append(os.path.join(os.path.dirname(__file__), '..')) 10 | 11 | root = Tk() 12 | 13 | state = [1, 2, 3, 4, 5, 6, 7, 8, 0] 14 | puzzle = EightPuzzle(tuple(state)) 15 | solution = None 16 | 17 | b = [None] * 9 18 | 19 | 20 | # TODO: refactor into OOP, remove global variables 21 | 22 | def scramble(): 23 | """Scrambles the puzzle starting from the goal state""" 24 | 25 | global state 26 | global puzzle 27 | possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT'] 28 | scramble = [] 29 | for _ in range(60): 30 | scramble.append(random.choice(possible_actions)) 31 | 32 | for move in scramble: 33 | if move in puzzle.actions(state): 34 | state = list(puzzle.result(state, move)) 35 | puzzle = EightPuzzle(tuple(state)) 36 | create_buttons() 37 | 38 | 39 | def solve(): 40 | """Solves the puzzle using astar_search""" 41 | 42 | return astar_search(puzzle).solution() 43 | 44 | 45 | def solve_steps(): 46 | """Solves the puzzle step by step""" 47 | 48 | global puzzle 49 | global solution 50 | global state 51 | solution = solve() 52 | print(solution) 53 | 54 | for move in solution: 55 | state = puzzle.result(state, move) 56 | create_buttons() 57 | root.update() 58 | root.after(1, time.sleep(0.75)) 59 | 60 | 61 | def exchange(index): 62 | """Interchanges the position of the selected tile with the zero tile under certain conditions""" 63 | 64 | global state 65 | global solution 66 | global puzzle 67 | zero_ix = list(state).index(0) 68 | actions = puzzle.actions(state) 69 | current_action = '' 70 | i_diff = index // 3 - zero_ix // 3 71 | j_diff = index % 3 - zero_ix % 3 72 | if i_diff == 1: 73 | current_action += 'DOWN' 74 | elif i_diff == -1: 75 | current_action += 'UP' 76 | 77 | if j_diff == 1: 78 | current_action += 'RIGHT' 79 | elif j_diff == -1: 80 | current_action += 'LEFT' 81 | 82 | if abs(i_diff) + abs(j_diff) != 1: 83 | current_action = '' 84 | 85 | if current_action in actions: 86 | b[zero_ix].grid_forget() 87 | b[zero_ix] = Button(root, text=f'{state[index]}', width=6, font=('Helvetica', 40, 'bold'), 88 | command=partial(exchange, zero_ix)) 89 | b[zero_ix].grid(row=zero_ix // 3, column=zero_ix % 3, ipady=40) 90 | b[index].grid_forget() 91 | b[index] = Button(root, text=None, width=6, font=('Helvetica', 40, 'bold'), command=partial(exchange, index)) 92 | b[index].grid(row=index // 3, column=index % 3, ipady=40) 93 | state[zero_ix], state[index] = state[index], state[zero_ix] 94 | puzzle = EightPuzzle(tuple(state)) 95 | 96 | 97 | def create_buttons(): 98 | """Creates dynamic buttons""" 99 | 100 | # TODO: Find a way to use grid_forget() with a for loop for initialization 101 | b[0] = Button(root, text=f'{state[0]}' if state[0] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), 102 | command=partial(exchange, 0)) 103 | b[0].grid(row=0, column=0, ipady=40) 104 | b[1] = Button(root, text=f'{state[1]}' if state[1] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), 105 | command=partial(exchange, 1)) 106 | b[1].grid(row=0, column=1, ipady=40) 107 | b[2] = Button(root, text=f'{state[2]}' if state[2] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), 108 | command=partial(exchange, 2)) 109 | b[2].grid(row=0, column=2, ipady=40) 110 | b[3] = Button(root, text=f'{state[3]}' if state[3] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), 111 | command=partial(exchange, 3)) 112 | b[3].grid(row=1, column=0, ipady=40) 113 | b[4] = Button(root, text=f'{state[4]}' if state[4] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), 114 | command=partial(exchange, 4)) 115 | b[4].grid(row=1, column=1, ipady=40) 116 | b[5] = Button(root, text=f'{state[5]}' if state[5] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), 117 | command=partial(exchange, 5)) 118 | b[5].grid(row=1, column=2, ipady=40) 119 | b[6] = Button(root, text=f'{state[6]}' if state[6] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), 120 | command=partial(exchange, 6)) 121 | b[6].grid(row=2, column=0, ipady=40) 122 | b[7] = Button(root, text=f'{state[7]}' if state[7] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), 123 | command=partial(exchange, 7)) 124 | b[7].grid(row=2, column=1, ipady=40) 125 | b[8] = Button(root, text=f'{state[8]}' if state[8] != 0 else None, width=6, font=('Helvetica', 40, 'bold'), 126 | command=partial(exchange, 8)) 127 | b[8].grid(row=2, column=2, ipady=40) 128 | 129 | 130 | def create_static_buttons(): 131 | """Creates scramble and solve buttons""" 132 | 133 | scramble_btn = Button(root, text='Scramble', font=('Helvetica', 30, 'bold'), width=8, command=partial(init)) 134 | scramble_btn.grid(row=3, column=0, ipady=10) 135 | solve_btn = Button(root, text='Solve', font=('Helvetica', 30, 'bold'), width=8, command=partial(solve_steps)) 136 | solve_btn.grid(row=3, column=2, ipady=10) 137 | 138 | 139 | def init(): 140 | """Calls necessary functions""" 141 | 142 | global state 143 | global solution 144 | state = [1, 2, 3, 4, 5, 6, 7, 8, 0] 145 | scramble() 146 | create_buttons() 147 | create_static_buttons() 148 | 149 | 150 | init() 151 | root.mainloop() 152 | -------------------------------------------------------------------------------- /gui/genetic_algorithm_example.py: -------------------------------------------------------------------------------- 1 | # A simple program that implements the solution to the phrase generation problem using 2 | # genetic algorithms as given in the search.ipynb notebook. 3 | # 4 | # Type on the home screen to change the target phrase 5 | # Click on the slider to change genetic algorithm parameters 6 | # Click 'GO' to run the algorithm with the specified variables 7 | # Displays best individual of the current generation 8 | # Displays a progress bar that indicates the amount of completion of the algorithm 9 | # Displays the first few individuals of the current generation 10 | 11 | import os.path 12 | from tkinter import * 13 | from tkinter import ttk 14 | 15 | import search 16 | 17 | sys.path.append(os.path.join(os.path.dirname(__file__), '..')) 18 | 19 | LARGE_FONT = ('Verdana', 12) 20 | EXTRA_LARGE_FONT = ('Consolas', 36, 'bold') 21 | 22 | canvas_width = 800 23 | canvas_height = 600 24 | 25 | black = '#000000' 26 | white = '#ffffff' 27 | p_blue = '#042533' 28 | lp_blue = '#0c394c' 29 | 30 | # genetic algorithm variables 31 | # feel free to play around with these 32 | target = 'Genetic Algorithm' # the phrase to be generated 33 | max_population = 100 # number of samples in each population 34 | mutation_rate = 0.1 # probability of mutation 35 | f_thres = len(target) # fitness threshold 36 | ngen = 1200 # max number of generations to run the genetic algorithm 37 | 38 | generation = 0 # counter to keep track of generation number 39 | 40 | u_case = [chr(x) for x in range(65, 91)] # list containing all uppercase characters 41 | l_case = [chr(x) for x in range(97, 123)] # list containing all lowercase characters 42 | punctuations1 = [chr(x) for x in range(33, 48)] # lists containing punctuation symbols 43 | punctuations2 = [chr(x) for x in range(58, 65)] 44 | punctuations3 = [chr(x) for x in range(91, 97)] 45 | numerals = [chr(x) for x in range(48, 58)] # list containing numbers 46 | 47 | # extend the gene pool with the required lists and append the space character 48 | gene_pool = [] 49 | gene_pool.extend(u_case) 50 | gene_pool.extend(l_case) 51 | gene_pool.append(' ') 52 | 53 | 54 | # callbacks to update global variables from the slider values 55 | def update_max_population(slider_value): 56 | global max_population 57 | max_population = slider_value 58 | 59 | 60 | def update_mutation_rate(slider_value): 61 | global mutation_rate 62 | mutation_rate = slider_value 63 | 64 | 65 | def update_f_thres(slider_value): 66 | global f_thres 67 | f_thres = slider_value 68 | 69 | 70 | def update_ngen(slider_value): 71 | global ngen 72 | ngen = slider_value 73 | 74 | 75 | # fitness function 76 | def fitness_fn(_list): 77 | fitness = 0 78 | # create string from list of characters 79 | phrase = ''.join(_list) 80 | # add 1 to fitness value for every matching character 81 | for i in range(len(phrase)): 82 | if target[i] == phrase[i]: 83 | fitness += 1 84 | return fitness 85 | 86 | 87 | # function to bring a new frame on top 88 | def raise_frame(frame, init=False, update_target=False, target_entry=None, f_thres_slider=None): 89 | frame.tkraise() 90 | global target 91 | if update_target and target_entry is not None: 92 | target = target_entry.get() 93 | f_thres_slider.config(to=len(target)) 94 | if init: 95 | population = search.init_population(max_population, gene_pool, len(target)) 96 | genetic_algorithm_stepwise(population) 97 | 98 | 99 | # defining root and child frames 100 | root = Tk() 101 | f1 = Frame(root) 102 | f2 = Frame(root) 103 | 104 | # pack frames on top of one another 105 | for frame in (f1, f2): 106 | frame.grid(row=0, column=0, sticky='news') 107 | 108 | # Home Screen (f1) widgets 109 | target_entry = Entry(f1, font=('Consolas 46 bold'), exportselection=0, foreground=p_blue, justify=CENTER) 110 | target_entry.insert(0, target) 111 | target_entry.pack(expand=YES, side=TOP, fill=X, padx=50) 112 | target_entry.focus_force() 113 | 114 | max_population_slider = Scale(f1, from_=3, to=1000, orient=HORIZONTAL, label='Max population', 115 | command=lambda value: update_max_population(int(value))) 116 | max_population_slider.set(max_population) 117 | max_population_slider.pack(expand=YES, side=TOP, fill=X, padx=40) 118 | 119 | mutation_rate_slider = Scale(f1, from_=0, to=1, orient=HORIZONTAL, label='Mutation rate', resolution=0.0001, 120 | command=lambda value: update_mutation_rate(float(value))) 121 | mutation_rate_slider.set(mutation_rate) 122 | mutation_rate_slider.pack(expand=YES, side=TOP, fill=X, padx=40) 123 | 124 | f_thres_slider = Scale(f1, from_=0, to=len(target), orient=HORIZONTAL, label='Fitness threshold', 125 | command=lambda value: update_f_thres(int(value))) 126 | f_thres_slider.set(f_thres) 127 | f_thres_slider.pack(expand=YES, side=TOP, fill=X, padx=40) 128 | 129 | ngen_slider = Scale(f1, from_=1, to=5000, orient=HORIZONTAL, label='Max number of generations', 130 | command=lambda value: update_ngen(int(value))) 131 | ngen_slider.set(ngen) 132 | ngen_slider.pack(expand=YES, side=TOP, fill=X, padx=40) 133 | 134 | button = ttk.Button(f1, text='RUN', 135 | command=lambda: raise_frame(f2, init=True, update_target=True, target_entry=target_entry, 136 | f_thres_slider=f_thres_slider)).pack(side=BOTTOM, pady=50) 137 | 138 | # f2 widgets 139 | canvas = Canvas(f2, width=canvas_width, height=canvas_height) 140 | canvas.pack(expand=YES, fill=BOTH, padx=20, pady=15) 141 | button = ttk.Button(f2, text='EXIT', command=lambda: raise_frame(f1)).pack(side=BOTTOM, pady=15) 142 | 143 | 144 | # function to run the genetic algorithm and update text on the canvas 145 | def genetic_algorithm_stepwise(population): 146 | root.title('Genetic Algorithm') 147 | for generation in range(ngen): 148 | # generating new population after selecting, recombining and mutating the existing population 149 | population = [ 150 | search.mutate(search.recombine(*search.select(2, population, fitness_fn)), gene_pool, mutation_rate) for i 151 | in range(len(population))] 152 | # genome with the highest fitness in the current generation 153 | current_best = ''.join(max(population, key=fitness_fn)) 154 | # collecting first few examples from the current population 155 | members = [''.join(x) for x in population][:48] 156 | 157 | # clear the canvas 158 | canvas.delete('all') 159 | # displays current best on top of the screen 160 | canvas.create_text(canvas_width / 2, 40, fill=p_blue, font='Consolas 46 bold', text=current_best) 161 | 162 | # displaying a part of the population on the screen 163 | for i in range(len(members) // 3): 164 | canvas.create_text((canvas_width * .175), (canvas_height * .25 + (25 * i)), fill=lp_blue, 165 | font='Consolas 16', text=members[3 * i]) 166 | canvas.create_text((canvas_width * .500), (canvas_height * .25 + (25 * i)), fill=lp_blue, 167 | font='Consolas 16', text=members[3 * i + 1]) 168 | canvas.create_text((canvas_width * .825), (canvas_height * .25 + (25 * i)), fill=lp_blue, 169 | font='Consolas 16', text=members[3 * i + 2]) 170 | 171 | # displays current generation number 172 | canvas.create_text((canvas_width * .5), (canvas_height * 0.95), fill=p_blue, font='Consolas 18 bold', 173 | text=f'Generation {generation}') 174 | 175 | # displays blue bar that indicates current maximum fitness compared to maximum possible fitness 176 | scaling_factor = fitness_fn(current_best) / len(target) 177 | canvas.create_rectangle(canvas_width * 0.1, 90, canvas_width * 0.9, 100, outline=p_blue) 178 | canvas.create_rectangle(canvas_width * 0.1, 90, canvas_width * 0.1 + scaling_factor * canvas_width * 0.8, 100, 179 | fill=lp_blue) 180 | canvas.update() 181 | 182 | # checks for completion 183 | fittest_individual = search.fitness_threshold(fitness_fn, f_thres, population) 184 | if fittest_individual: 185 | break 186 | 187 | 188 | raise_frame(f1) 189 | root.mainloop() 190 | -------------------------------------------------------------------------------- /gui/tic-tac-toe.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | from tkinter import * 3 | 4 | from games import minmax_decision, alpha_beta_player, random_player, TicTacToe 5 | # "gen_state" can be used to generate a game state to apply the algorithm 6 | from tests.test_games import gen_state 7 | 8 | sys.path.append(os.path.join(os.path.dirname(__file__), '..')) 9 | 10 | ttt = TicTacToe() 11 | root = None 12 | buttons = [] 13 | frames = [] 14 | x_pos = [] 15 | o_pos = [] 16 | count = 0 17 | sym = "" 18 | result = None 19 | choices = None 20 | 21 | 22 | def create_frames(root): 23 | """ 24 | This function creates the necessary structure of the game. 25 | """ 26 | frame1 = Frame(root) 27 | frame2 = Frame(root) 28 | frame3 = Frame(root) 29 | frame4 = Frame(root) 30 | create_buttons(frame1) 31 | create_buttons(frame2) 32 | create_buttons(frame3) 33 | buttonExit = Button( 34 | frame4, height=1, width=2, 35 | text="Exit", 36 | command=lambda: exit_game(root)) 37 | buttonExit.pack(side=LEFT) 38 | frame4.pack(side=BOTTOM) 39 | frame3.pack(side=BOTTOM) 40 | frame2.pack(side=BOTTOM) 41 | frame1.pack(side=BOTTOM) 42 | frames.append(frame1) 43 | frames.append(frame2) 44 | frames.append(frame3) 45 | for x in frames: 46 | buttons_in_frame = [] 47 | for y in x.winfo_children(): 48 | buttons_in_frame.append(y) 49 | buttons.append(buttons_in_frame) 50 | buttonReset = Button(frame4, height=1, width=2, 51 | text="Reset", command=lambda: reset_game()) 52 | buttonReset.pack(side=LEFT) 53 | 54 | 55 | def create_buttons(frame): 56 | """ 57 | This function creates the buttons to be pressed/clicked during the game. 58 | """ 59 | button0 = Button(frame, height=2, width=2, text=" ", 60 | command=lambda: on_click(button0)) 61 | button0.pack(side=LEFT) 62 | button1 = Button(frame, height=2, width=2, text=" ", 63 | command=lambda: on_click(button1)) 64 | button1.pack(side=LEFT) 65 | button2 = Button(frame, height=2, width=2, text=" ", 66 | command=lambda: on_click(button2)) 67 | button2.pack(side=LEFT) 68 | 69 | 70 | # TODO: Add a choice option for the user. 71 | def on_click(button): 72 | """ 73 | This function determines the action of any button. 74 | """ 75 | global ttt, choices, count, sym, result, x_pos, o_pos 76 | 77 | if count % 2 == 0: 78 | sym = "X" 79 | else: 80 | sym = "O" 81 | count += 1 82 | 83 | button.config( 84 | text=sym, 85 | state='disabled', 86 | disabledforeground="red") # For cross 87 | 88 | x, y = get_coordinates(button) 89 | x += 1 90 | y += 1 91 | x_pos.append((x, y)) 92 | state = gen_state(to_move='O', x_positions=x_pos, 93 | o_positions=o_pos) 94 | try: 95 | choice = choices.get() 96 | if "Random" in choice: 97 | a, b = random_player(ttt, state) 98 | elif "Pro" in choice: 99 | a, b = minmax_decision(state, ttt) 100 | else: 101 | a, b = alpha_beta_player(ttt, state) 102 | except (ValueError, IndexError, TypeError) as e: 103 | disable_game() 104 | result.set("It's a draw :|") 105 | return 106 | if 1 <= a <= 3 and 1 <= b <= 3: 107 | o_pos.append((a, b)) 108 | button_to_change = get_button(a - 1, b - 1) 109 | if count % 2 == 0: # Used again, will become handy when user is given the choice of turn. 110 | sym = "X" 111 | else: 112 | sym = "O" 113 | count += 1 114 | 115 | if check_victory(button): 116 | result.set("You win :)") 117 | disable_game() 118 | else: 119 | button_to_change.config(text=sym, state='disabled', 120 | disabledforeground="black") 121 | if check_victory(button_to_change): 122 | result.set("You lose :(") 123 | disable_game() 124 | 125 | 126 | # TODO: Replace "check_victory" by "k_in_row" function. 127 | def check_victory(button): 128 | """ 129 | This function checks various winning conditions of the game. 130 | """ 131 | # check if previous move caused a win on vertical line 132 | global buttons 133 | x, y = get_coordinates(button) 134 | tt = button['text'] 135 | if buttons[0][y]['text'] == buttons[1][y]['text'] == buttons[2][y]['text'] != " ": 136 | buttons[0][y].config(text="|" + tt + "|") 137 | buttons[1][y].config(text="|" + tt + "|") 138 | buttons[2][y].config(text="|" + tt + "|") 139 | return True 140 | 141 | # check if previous move caused a win on horizontal line 142 | if buttons[x][0]['text'] == buttons[x][1]['text'] == buttons[x][2]['text'] != " ": 143 | buttons[x][0].config(text="--" + tt + "--") 144 | buttons[x][1].config(text="--" + tt + "--") 145 | buttons[x][2].config(text="--" + tt + "--") 146 | return True 147 | 148 | # check if previous move was on the main diagonal and caused a win 149 | if x == y and buttons[0][0]['text'] == buttons[1][1]['text'] == buttons[2][2]['text'] != " ": 150 | buttons[0][0].config(text="\\" + tt + "\\") 151 | buttons[1][1].config(text="\\" + tt + "\\") 152 | buttons[2][2].config(text="\\" + tt + "\\") 153 | return True 154 | 155 | # check if previous move was on the secondary diagonal and caused a win 156 | if x + y == 2 and buttons[0][2]['text'] == buttons[1][1]['text'] == buttons[2][0]['text'] != " ": 157 | buttons[0][2].config(text="/" + tt + "/") 158 | buttons[1][1].config(text="/" + tt + "/") 159 | buttons[2][0].config(text="/" + tt + "/") 160 | return True 161 | 162 | return False 163 | 164 | 165 | def get_coordinates(button): 166 | """ 167 | This function returns the coordinates of the button clicked. 168 | """ 169 | global buttons 170 | for x in range(len(buttons)): 171 | for y in range(len(buttons[x])): 172 | if buttons[x][y] == button: 173 | return x, y 174 | 175 | 176 | def get_button(x, y): 177 | """ 178 | This function returns the button memory location corresponding to a coordinate. 179 | """ 180 | global buttons 181 | return buttons[x][y] 182 | 183 | 184 | def reset_game(): 185 | """ 186 | This function will reset all the tiles to the initial null value. 187 | """ 188 | global x_pos, o_pos, frames, count 189 | 190 | count = 0 191 | x_pos = [] 192 | o_pos = [] 193 | result.set("Your Turn!") 194 | for x in frames: 195 | for y in x.winfo_children(): 196 | y.config(text=" ", state='normal') 197 | 198 | 199 | def disable_game(): 200 | """ 201 | This function deactivates the game after a win, loss or draw. 202 | """ 203 | global frames 204 | for x in frames: 205 | for y in x.winfo_children(): 206 | y.config(state='disabled') 207 | 208 | 209 | def exit_game(root): 210 | """ 211 | This function will exit the game by killing the root. 212 | """ 213 | root.destroy() 214 | 215 | 216 | if __name__ == "__main__": 217 | global result, choices 218 | 219 | root = Tk() 220 | root.title("TicTacToe") 221 | root.geometry("150x200") # Improved the window geometry 222 | root.resizable(0, 0) # To remove the maximize window option 223 | result = StringVar() 224 | result.set("Your Turn!") 225 | w = Label(root, textvariable=result) 226 | w.pack(side=BOTTOM) 227 | create_frames(root) 228 | choices = StringVar(root) 229 | choices.set("Vs Pro") 230 | menu = OptionMenu(root, choices, "Vs Random", "Vs Pro", "Vs Legend") 231 | menu.pack() 232 | root.mainloop() 233 | -------------------------------------------------------------------------------- /gui/vacuum_agent.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | from tkinter import * 3 | 4 | from agents import * 5 | 6 | sys.path.append(os.path.join(os.path.dirname(__file__), '..')) 7 | 8 | loc_A, loc_B = (0, 0), (1, 0) # The two locations for the Vacuum world 9 | 10 | 11 | class Gui(Environment): 12 | """This GUI environment has two locations, A and B. Each can be Dirty 13 | or Clean. The agent perceives its location and the location's 14 | status.""" 15 | 16 | def __init__(self, root, height=300, width=380): 17 | super().__init__() 18 | self.status = {loc_A: 'Clean', 19 | loc_B: 'Clean'} 20 | self.root = root 21 | self.height = height 22 | self.width = width 23 | self.canvas = None 24 | self.buttons = [] 25 | self.create_canvas() 26 | self.create_buttons() 27 | 28 | def thing_classes(self): 29 | """The list of things which can be used in the environment.""" 30 | return [Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent, 31 | TableDrivenVacuumAgent, ModelBasedVacuumAgent] 32 | 33 | def percept(self, agent): 34 | """Returns the agent's location, and the location status (Dirty/Clean).""" 35 | return agent.location, self.status[agent.location] 36 | 37 | def execute_action(self, agent, action): 38 | """Change the location status (Dirty/Clean); track performance. 39 | Score 10 for each dirt cleaned; -1 for each move.""" 40 | if action == 'Right': 41 | agent.location = loc_B 42 | agent.performance -= 1 43 | elif action == 'Left': 44 | agent.location = loc_A 45 | agent.performance -= 1 46 | elif action == 'Suck': 47 | if self.status[agent.location] == 'Dirty': 48 | if agent.location == loc_A: 49 | self.buttons[0].config(bg='white', activebackground='light grey') 50 | else: 51 | self.buttons[1].config(bg='white', activebackground='light grey') 52 | agent.performance += 10 53 | self.status[agent.location] = 'Clean' 54 | 55 | def default_location(self, thing): 56 | """Agents start in either location at random.""" 57 | return random.choice([loc_A, loc_B]) 58 | 59 | def create_canvas(self): 60 | """Creates Canvas element in the GUI.""" 61 | self.canvas = Canvas( 62 | self.root, 63 | width=self.width, 64 | height=self.height, 65 | background='powder blue') 66 | self.canvas.pack(side='bottom') 67 | 68 | def create_buttons(self): 69 | """Creates the buttons required in the GUI.""" 70 | button_left = Button(self.root, height=4, width=12, padx=2, pady=2, bg='white') 71 | button_left.config(command=lambda btn=button_left: self.dirt_switch(btn)) 72 | self.buttons.append(button_left) 73 | button_left_window = self.canvas.create_window(130, 200, anchor=N, window=button_left) 74 | button_right = Button(self.root, height=4, width=12, padx=2, pady=2, bg='white') 75 | button_right.config(command=lambda btn=button_right: self.dirt_switch(btn)) 76 | self.buttons.append(button_right) 77 | button_right_window = self.canvas.create_window(250, 200, anchor=N, window=button_right) 78 | 79 | def dirt_switch(self, button): 80 | """Gives user the option to put dirt in any tile.""" 81 | bg_color = button['bg'] 82 | if bg_color == 'saddle brown': 83 | button.config(bg='white', activebackground='light grey') 84 | elif bg_color == 'white': 85 | button.config(bg='saddle brown', activebackground='light goldenrod') 86 | 87 | def read_env(self): 88 | """Reads the current state of the GUI.""" 89 | for i, btn in enumerate(self.buttons): 90 | if i == 0: 91 | if btn['bg'] == 'white': 92 | self.status[loc_A] = 'Clean' 93 | else: 94 | self.status[loc_A] = 'Dirty' 95 | else: 96 | if btn['bg'] == 'white': 97 | self.status[loc_B] = 'Clean' 98 | else: 99 | self.status[loc_B] = 'Dirty' 100 | 101 | def update_env(self, agent): 102 | """Updates the GUI according to the agent's action.""" 103 | self.read_env() 104 | # print(self.status) 105 | before_step = agent.location 106 | self.step() 107 | # print(self.status) 108 | # print(agent.location) 109 | move_agent(self, agent, before_step) 110 | 111 | 112 | def create_agent(env, agent): 113 | """Creates the agent in the GUI and is kept independent of the environment.""" 114 | env.add_thing(agent) 115 | # print(agent.location) 116 | if agent.location == (0, 0): 117 | env.agent_rect = env.canvas.create_rectangle(80, 100, 175, 180, fill='lime green') 118 | env.text = env.canvas.create_text(128, 140, font="Helvetica 10 bold italic", text="Agent") 119 | else: 120 | env.agent_rect = env.canvas.create_rectangle(200, 100, 295, 180, fill='lime green') 121 | env.text = env.canvas.create_text(248, 140, font="Helvetica 10 bold italic", text="Agent") 122 | 123 | 124 | def move_agent(env, agent, before_step): 125 | """Moves the agent in the GUI when 'next' button is pressed.""" 126 | if agent.location == before_step: 127 | pass 128 | else: 129 | if agent.location == (1, 0): 130 | env.canvas.move(env.text, 120, 0) 131 | env.canvas.move(env.agent_rect, 120, 0) 132 | elif agent.location == (0, 0): 133 | env.canvas.move(env.text, -120, 0) 134 | env.canvas.move(env.agent_rect, -120, 0) 135 | 136 | 137 | # TODO: Add more agents to the environment. 138 | # TODO: Expand the environment to XYEnvironment. 139 | if __name__ == "__main__": 140 | root = Tk() 141 | root.title("Vacuum Environment") 142 | root.geometry("420x380") 143 | root.resizable(0, 0) 144 | frame = Frame(root, bg='black') 145 | # reset_button = Button(frame, text='Reset', height=2, width=6, padx=2, pady=2, command=None) 146 | # reset_button.pack(side='left') 147 | next_button = Button(frame, text='Next', height=2, width=6, padx=2, pady=2) 148 | next_button.pack(side='left') 149 | frame.pack(side='bottom') 150 | env = Gui(root) 151 | agent = ReflexVacuumAgent() 152 | create_agent(env, agent) 153 | next_button.config(command=lambda: env.update_env(agent)) 154 | root.mainloop() 155 | -------------------------------------------------------------------------------- /gui/xy_vacuum_environment.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | from tkinter import * 3 | 4 | from agents import * 5 | 6 | sys.path.append(os.path.join(os.path.dirname(__file__), '..')) 7 | 8 | 9 | class Gui(VacuumEnvironment): 10 | """This is a two-dimensional GUI environment. Each location may be 11 | dirty, clean or can have a wall. The user can change these at each step. 12 | """ 13 | xi, yi = (0, 0) 14 | perceptible_distance = 1 15 | 16 | def __init__(self, root, width=7, height=7, elements=None): 17 | super().__init__(width, height) 18 | if elements is None: 19 | elements = ['D', 'W'] 20 | self.root = root 21 | self.create_frames() 22 | self.create_buttons() 23 | self.create_walls() 24 | self.elements = elements 25 | 26 | def create_frames(self): 27 | """Adds frames to the GUI environment.""" 28 | self.frames = [] 29 | for _ in range(7): 30 | frame = Frame(self.root, bg='grey') 31 | frame.pack(side='bottom') 32 | self.frames.append(frame) 33 | 34 | def create_buttons(self): 35 | """Adds buttons to the respective frames in the GUI.""" 36 | self.buttons = [] 37 | for frame in self.frames: 38 | button_row = [] 39 | for _ in range(7): 40 | button = Button(frame, height=3, width=5, padx=2, pady=2) 41 | button.config( 42 | command=lambda btn=button: self.display_element(btn)) 43 | button.pack(side='left') 44 | button_row.append(button) 45 | self.buttons.append(button_row) 46 | 47 | def create_walls(self): 48 | """Creates the outer boundary walls which do not move.""" 49 | for row, button_row in enumerate(self.buttons): 50 | if row == 0 or row == len(self.buttons) - 1: 51 | for button in button_row: 52 | button.config(text='W', state='disabled', 53 | disabledforeground='black') 54 | else: 55 | button_row[0].config( 56 | text='W', state='disabled', disabledforeground='black') 57 | button_row[len(button_row) - 1].config(text='W', 58 | state='disabled', disabledforeground='black') 59 | # Place the agent in the centre of the grid. 60 | self.buttons[3][3].config( 61 | text='A', state='disabled', disabledforeground='black') 62 | 63 | def display_element(self, button): 64 | """Show the things on the GUI.""" 65 | txt = button['text'] 66 | if txt != 'A': 67 | if txt == 'W': 68 | button.config(text='D') 69 | elif txt == 'D': 70 | button.config(text='') 71 | elif txt == '': 72 | button.config(text='W') 73 | 74 | def execute_action(self, agent, action): 75 | """Determines the action the agent performs.""" 76 | xi, yi = (self.xi, self.yi) 77 | if action == 'Suck': 78 | dirt_list = self.list_things_at(agent.location, Dirt) 79 | if dirt_list: 80 | dirt = dirt_list[0] 81 | agent.performance += 100 82 | self.delete_thing(dirt) 83 | self.buttons[xi][yi].config(text='', state='normal') 84 | xf, yf = agent.location 85 | self.buttons[xf][yf].config( 86 | text='A', state='disabled', disabledforeground='black') 87 | 88 | else: 89 | agent.bump = False 90 | if action == 'TurnRight': 91 | agent.direction += Direction.R 92 | elif action == 'TurnLeft': 93 | agent.direction += Direction.L 94 | elif action == 'Forward': 95 | agent.bump = self.move_to(agent, agent.direction.move_forward(agent.location)) 96 | if not agent.bump: 97 | self.buttons[xi][yi].config(text='', state='normal') 98 | xf, yf = agent.location 99 | self.buttons[xf][yf].config( 100 | text='A', state='disabled', disabledforeground='black') 101 | 102 | if action != 'NoOp': 103 | agent.performance -= 1 104 | 105 | def read_env(self): 106 | """Reads the current state of the GUI environment.""" 107 | for i, btn_row in enumerate(self.buttons): 108 | for j, btn in enumerate(btn_row): 109 | if (i != 0 and i != len(self.buttons) - 1) and (j != 0 and j != len(btn_row) - 1): 110 | agt_loc = self.agents[0].location 111 | if self.some_things_at((i, j)) and (i, j) != agt_loc: 112 | for thing in self.list_things_at((i, j)): 113 | self.delete_thing(thing) 114 | if btn['text'] == self.elements[0]: 115 | self.add_thing(Dirt(), (i, j)) 116 | elif btn['text'] == self.elements[1]: 117 | self.add_thing(Wall(), (i, j)) 118 | 119 | def update_env(self): 120 | """Updates the GUI environment according to the current state.""" 121 | self.read_env() 122 | agt = self.agents[0] 123 | previous_agent_location = agt.location 124 | self.xi, self.yi = previous_agent_location 125 | self.step() 126 | xf, yf = agt.location 127 | 128 | def reset_env(self, agt): 129 | """Resets the GUI environment to the initial state.""" 130 | self.read_env() 131 | for i, btn_row in enumerate(self.buttons): 132 | for j, btn in enumerate(btn_row): 133 | if (i != 0 and i != len(self.buttons) - 1) and (j != 0 and j != len(btn_row) - 1): 134 | if self.some_things_at((i, j)): 135 | for thing in self.list_things_at((i, j)): 136 | self.delete_thing(thing) 137 | btn.config(text='', state='normal') 138 | self.add_thing(agt, location=(3, 3)) 139 | self.buttons[3][3].config( 140 | text='A', state='disabled', disabledforeground='black') 141 | 142 | 143 | def XYReflexAgentProgram(percept): 144 | """The modified SimpleReflexAgentProgram for the GUI environment.""" 145 | status, bump = percept 146 | if status == 'Dirty': 147 | return 'Suck' 148 | 149 | if bump == 'Bump': 150 | value = random.choice((1, 2)) 151 | else: 152 | value = random.choice((1, 2, 3, 4)) # 1-right, 2-left, others-forward 153 | 154 | if value == 1: 155 | return 'TurnRight' 156 | elif value == 2: 157 | return 'TurnLeft' 158 | else: 159 | return 'Forward' 160 | 161 | 162 | class XYReflexAgent(Agent): 163 | """The modified SimpleReflexAgent for the GUI environment.""" 164 | 165 | def __init__(self, program=None): 166 | super().__init__(program) 167 | self.location = (3, 3) 168 | self.direction = Direction("up") 169 | 170 | 171 | # TODO: Check the coordinate system. 172 | # TODO: Give manual choice for agent's location. 173 | if __name__ == "__main__": 174 | root = Tk() 175 | root.title("Vacuum Environment") 176 | root.geometry("420x440") 177 | root.resizable(0, 0) 178 | frame = Frame(root, bg='black') 179 | reset_button = Button(frame, text='Reset', height=2, 180 | width=6, padx=2, pady=2) 181 | reset_button.pack(side='left') 182 | next_button = Button(frame, text='Next', height=2, 183 | width=6, padx=2, pady=2) 184 | next_button.pack(side='left') 185 | frame.pack(side='bottom') 186 | env = Gui(root) 187 | agt = XYReflexAgent(program=XYReflexAgentProgram) 188 | env.add_thing(agt, location=(3, 3)) 189 | next_button.config(command=env.update_env) 190 | reset_button.config(command=lambda: env.reset_env(agt)) 191 | root.mainloop() 192 | -------------------------------------------------------------------------------- /homework/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/homework/.gitkeep -------------------------------------------------------------------------------- /images/-0.04.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/-0.04.jpg -------------------------------------------------------------------------------- /images/-0.4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/-0.4.jpg -------------------------------------------------------------------------------- /images/-4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/-4.jpg -------------------------------------------------------------------------------- /images/4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/4.jpg -------------------------------------------------------------------------------- /images/IMAGE-CREDITS: -------------------------------------------------------------------------------- 1 | PHOTO CREDITS 2 | 3 | Image After http://www.imageafter.com/ 4 | 5 | b15woods003.jpg 6 | (Cropped to 764x764 and scaled to 50x50 to make wall-icon.jpg 7 | by Gregory Weber) 8 | 9 | Noctua Graphics, http://www.noctua-graphics.de/english/fraset_e.htm 10 | 11 | dirt05.jpg 512x512 12 | (Scaled to 50x50 to make dirt05-icon.jpg by Gregory Weber) 13 | 14 | Gregory Weber 15 | 16 | dirt.svg, dirt.png 17 | vacuum.svg, vacuum.png 18 | -------------------------------------------------------------------------------- /images/aima3e_big.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/aima3e_big.jpg -------------------------------------------------------------------------------- /images/aima_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/aima_logo.png -------------------------------------------------------------------------------- /images/bayesnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/bayesnet.png -------------------------------------------------------------------------------- /images/broxrevised.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/broxrevised.png -------------------------------------------------------------------------------- /images/cake_graph.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/cake_graph.jpg -------------------------------------------------------------------------------- /images/decisiontree_fruit.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/decisiontree_fruit.jpg -------------------------------------------------------------------------------- /images/dirt05-icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/dirt05-icon.jpg -------------------------------------------------------------------------------- /images/ensemble_learner.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/ensemble_learner.jpg -------------------------------------------------------------------------------- /images/fig_5_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/fig_5_2.png -------------------------------------------------------------------------------- /images/ge0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/ge0.jpg -------------------------------------------------------------------------------- /images/ge1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/ge1.jpg -------------------------------------------------------------------------------- /images/ge2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/ge2.jpg -------------------------------------------------------------------------------- /images/ge4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/ge4.jpg -------------------------------------------------------------------------------- /images/general_learning_agent.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/general_learning_agent.jpg -------------------------------------------------------------------------------- /images/grid_mdp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/grid_mdp.jpg -------------------------------------------------------------------------------- /images/grid_mdp_agent.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/grid_mdp_agent.jpg -------------------------------------------------------------------------------- /images/hillclimb-tsp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/hillclimb-tsp.png -------------------------------------------------------------------------------- /images/knn_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/knn_plot.png -------------------------------------------------------------------------------- /images/knowledge_FOIL_grandparent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/knowledge_FOIL_grandparent.png -------------------------------------------------------------------------------- /images/knowledge_foil_family.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/knowledge_foil_family.png -------------------------------------------------------------------------------- /images/makefile: -------------------------------------------------------------------------------- 1 | # makefile for images 2 | 3 | Sources = dirt.svg vacuum.svg 4 | 5 | Targets = $(Sources:.svg=.png) 6 | 7 | ImageScale = 50x50 8 | 9 | Temporary = tmp.jpg 10 | 11 | .PHONY: all 12 | 13 | all: $(Targets) 14 | 15 | .PHONY: clean 16 | 17 | clean: 18 | rm -f $(Targets) $(Temporary) 19 | 20 | %.png: %.svg 21 | convert -scale $(ImageScale) $< $@ 22 | 23 | %-icon.jpg: %.svg 24 | convert -scale $(ImageScale) $< $@ 25 | 26 | %-icon.jpg: %.jpg 27 | convert -scale $(ImageScale) $< $@ 28 | 29 | wall-icon.jpg: b15woods003.jpg 30 | convert -crop 764x764+0+0 $< tmp.jpg 31 | convert -resize 50x50+0+0 tmp.jpg $@ 32 | 33 | vacuum-icon.jpg: vacuum.svg 34 | convert -scale $(ImageScale) -transparent white $< $@ 35 | -------------------------------------------------------------------------------- /images/maze.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/maze.png -------------------------------------------------------------------------------- /images/mdp-a.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/mdp-a.png -------------------------------------------------------------------------------- /images/mdp-b.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/mdp-b.png -------------------------------------------------------------------------------- /images/mdp-c.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/mdp-c.png -------------------------------------------------------------------------------- /images/mdp-d.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/mdp-d.png -------------------------------------------------------------------------------- /images/mdp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/mdp.png -------------------------------------------------------------------------------- /images/model_based_reflex_agent.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/model_based_reflex_agent.jpg -------------------------------------------------------------------------------- /images/model_goal_based_agent.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/model_goal_based_agent.jpg -------------------------------------------------------------------------------- /images/model_utility_based_agent.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/model_utility_based_agent.jpg -------------------------------------------------------------------------------- /images/neural_net.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/neural_net.png -------------------------------------------------------------------------------- /images/parse_tree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/parse_tree.png -------------------------------------------------------------------------------- /images/perceptron.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/perceptron.png -------------------------------------------------------------------------------- /images/pluralityLearner_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/pluralityLearner_plot.png -------------------------------------------------------------------------------- /images/point_crossover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/point_crossover.png -------------------------------------------------------------------------------- /images/pop.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/pop.jpg -------------------------------------------------------------------------------- /images/queen_s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/queen_s.png -------------------------------------------------------------------------------- /images/random_forest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/random_forest.png -------------------------------------------------------------------------------- /images/refinement.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/refinement.png -------------------------------------------------------------------------------- /images/restaurant.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/restaurant.png -------------------------------------------------------------------------------- /images/romania_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/romania_map.png -------------------------------------------------------------------------------- /images/simple_problem_solving_agent.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/simple_problem_solving_agent.jpg -------------------------------------------------------------------------------- /images/simple_reflex_agent.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/simple_reflex_agent.jpg -------------------------------------------------------------------------------- /images/sprinklernet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/sprinklernet.jpg -------------------------------------------------------------------------------- /images/stapler1-test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/stapler1-test.png -------------------------------------------------------------------------------- /images/uniform_crossover.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/uniform_crossover.png -------------------------------------------------------------------------------- /images/vacuum-icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/vacuum-icon.jpg -------------------------------------------------------------------------------- /images/vacuum.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 22 | 24 | 42 | 44 | 45 | 47 | image/svg+xml 48 | 50 | 51 | 52 | 53 | 57 | 67 | 79 | 93 | 108 | 124 | 137 | 149 | 150 | 151 | -------------------------------------------------------------------------------- /images/wall-icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/images/wall-icon.jpg -------------------------------------------------------------------------------- /img/19.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/19.png -------------------------------------------------------------------------------- /img/23_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/23_1.png -------------------------------------------------------------------------------- /img/23_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/23_2.png -------------------------------------------------------------------------------- /img/25_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/25_1.png -------------------------------------------------------------------------------- /img/25_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/25_2.png -------------------------------------------------------------------------------- /img/27.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/27.png -------------------------------------------------------------------------------- /img/28.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/28.png -------------------------------------------------------------------------------- /img/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/6.png -------------------------------------------------------------------------------- /img/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/7.png -------------------------------------------------------------------------------- /img/decision_tree_learner_restaurant.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/decision_tree_learner_restaurant.png -------------------------------------------------------------------------------- /img/decision_tree_restaurant_result.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/decision_tree_restaurant_result.png -------------------------------------------------------------------------------- /img/reward_neg_0.02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/img/reward_neg_0.02.png -------------------------------------------------------------------------------- /ipyviews.py: -------------------------------------------------------------------------------- 1 | from IPython.display import HTML, display, clear_output 2 | from collections import defaultdict 3 | from agents import PolygonObstacle 4 | import time 5 | import json 6 | import copy 7 | import __main__ 8 | 9 | # ______________________________________________________________________________ 10 | # Continuous environment 11 | 12 | 13 | _CONTINUOUS_WORLD_HTML = ''' 14 |
15 | 16 |
17 | 18 | 22 | ''' # noqa 23 | 24 | with open('js/continuousworld.js', 'r') as js_file: 25 | _JS_CONTINUOUS_WORLD = js_file.read() 26 | 27 | 28 | class ContinuousWorldView: 29 | """ View for continuousworld Implementation in agents.py """ 30 | 31 | def __init__(self, world, fill="#AAA"): 32 | self.time = time.time() 33 | self.world = world 34 | self.width = world.width 35 | self.height = world.height 36 | 37 | def object_name(self): 38 | globals_in_main = {x: getattr(__main__, x) for x in dir(__main__)} 39 | for x in globals_in_main: 40 | if isinstance(globals_in_main[x], type(self)): 41 | if globals_in_main[x].time == self.time: 42 | return x 43 | 44 | def handle_add_obstacle(self, vertices): 45 | """ Vertices must be a nestedtuple. This method 46 | is called from kernel.execute on completion of 47 | a polygon. """ 48 | self.world.add_obstacle(vertices) 49 | self.show() 50 | 51 | def handle_remove_obstacle(self): 52 | return NotImplementedError 53 | 54 | def get_polygon_obstacles_coordinates(self): 55 | obstacle_coordiantes = [] 56 | for thing in self.world.things: 57 | if isinstance(thing, PolygonObstacle): 58 | obstacle_coordiantes.append(thing.coordinates) 59 | return obstacle_coordiantes 60 | 61 | def show(self): 62 | clear_output() 63 | total_html = _CONTINUOUS_WORLD_HTML.format(self.width, self.height, self.object_name(), 64 | str(self.get_polygon_obstacles_coordinates()), 65 | _JS_CONTINUOUS_WORLD) 66 | display(HTML(total_html)) 67 | 68 | 69 | # ______________________________________________________________________________ 70 | # Grid environment 71 | 72 | _GRID_WORLD_HTML = ''' 73 |
74 | 75 |
76 | 77 |
78 |
79 | 83 | ''' 84 | 85 | with open('js/gridworld.js', 'r') as js_file: 86 | _JS_GRID_WORLD = js_file.read() 87 | 88 | 89 | class GridWorldView: 90 | """ View for grid world. Uses XYEnviornment in agents.py as model. 91 | world: an instance of XYEnviornment. 92 | block_size: size of individual blocks in pixes. 93 | default_fill: color of blocks. A hex value or name should be passed. 94 | """ 95 | 96 | def __init__(self, world, block_size=30, default_fill="white"): 97 | self.time = time.time() 98 | self.world = world 99 | self.labels = defaultdict(str) # locations as keys 100 | self.representation = {"default": {"type": "color", "source": default_fill}} 101 | self.block_size = block_size 102 | 103 | def object_name(self): 104 | globals_in_main = {x: getattr(__main__, x) for x in dir(__main__)} 105 | for x in globals_in_main: 106 | if isinstance(globals_in_main[x], type(self)): 107 | if globals_in_main[x].time == self.time: 108 | return x 109 | 110 | def set_label(self, coordinates, label): 111 | """ Add lables to a particular block of grid. 112 | coordinates: a tuple of (row, column). 113 | rows and columns are 0 indexed. 114 | """ 115 | self.labels[coordinates] = label 116 | 117 | def set_representation(self, thing, repr_type, source): 118 | """ Set the representation of different things in the 119 | environment. 120 | thing: a thing object. 121 | repr_type : type of representation can be either "color" or "img" 122 | source: Hex value in case of color. Image path in case of image. 123 | """ 124 | thing_class_name = thing.__class__.__name__ 125 | if repr_type not in ("img", "color"): 126 | raise ValueError('Invalid repr_type passed. Possible types are img/color') 127 | self.representation[thing_class_name] = {"type": repr_type, "source": source} 128 | 129 | def handle_click(self, coordinates): 130 | """ This method needs to be overidden. Make sure to include a 131 | self.show() call at the end. """ 132 | self.show() 133 | 134 | def map_to_render(self): 135 | default_representation = {"val": "default", "tooltip": ""} 136 | world_map = [[copy.deepcopy(default_representation) for _ in range(self.world.width)] 137 | for _ in range(self.world.height)] 138 | 139 | for thing in self.world.things: 140 | row, column = thing.location 141 | thing_class_name = thing.__class__.__name__ 142 | if thing_class_name not in self.representation: 143 | raise KeyError('Representation not found for {}'.format(thing_class_name)) 144 | world_map[row][column]["val"] = thing.__class__.__name__ 145 | 146 | for location, label in self.labels.items(): 147 | row, column = location 148 | world_map[row][column]["tooltip"] = label 149 | 150 | return json.dumps(world_map) 151 | 152 | def show(self): 153 | clear_output() 154 | total_html = _GRID_WORLD_HTML.format( 155 | self.object_name(), self.map_to_render(), 156 | self.block_size, json.dumps(self.representation), _JS_GRID_WORLD) 157 | display(HTML(total_html)) 158 | -------------------------------------------------------------------------------- /js/canvas.js: -------------------------------------------------------------------------------- 1 | /* 2 | JavaScript functions that are executed by running the corresponding methods of a Canvas object 3 | Donot use these functions by making a js file. Instead use the python Canvas class. 4 | See canvas.py for help on how to use the Canvas class to draw on the HTML Canvas 5 | */ 6 | 7 | 8 | //Manages the output of code executed in IPython kernel 9 | function output_callback(out, block){ 10 | console.log(out); 11 | //Handle error in python 12 | if(out.msg_type == "error"){ 13 | console.log("Error in python script!"); 14 | console.log(out.content); 15 | return ; 16 | } 17 | script = out.content.data['text/html']; 18 | script = script.substr(8, script.length - 17); 19 | eval(script) 20 | } 21 | 22 | //Handles mouse click by calling mouse_click of Canvas object with the co-ordinates as arguments 23 | function click_callback(element, event, varname){ 24 | var rect = element.getBoundingClientRect(); 25 | var x = event.clientX - rect.left; 26 | var y = event.clientY - rect.top; 27 | var kernel = IPython.notebook.kernel; 28 | var exec_str = varname + ".mouse_click(" + String(x) + ", " + String(y) + ")"; 29 | console.log(exec_str); 30 | kernel.execute(exec_str,{'iopub': {'output': output_callback}}, {silent: false}); 31 | } 32 | 33 | function rgbToHex(r,g,b){ 34 | var hexValue=(r<<16) + (g<<8) + (b<<0); 35 | var hexString=hexValue.toString(16); 36 | hexString ='#' + Array(7-hexString.length).join('0') + hexString; //Add 0 padding 37 | return hexString; 38 | } 39 | 40 | function toRad(x){ 41 | return x*Math.PI/180; 42 | } 43 | 44 | //Canvas class to store variables 45 | function Canvas(id){ 46 | this.canvas = document.getElementById(id); 47 | this.ctx = this.canvas.getContext("2d"); 48 | this.WIDTH = this.canvas.width; 49 | this.HEIGHT = this.canvas.height; 50 | this.MOUSE = {x:0,y:0}; 51 | } 52 | 53 | //Sets the fill color with which shapes are filled 54 | Canvas.prototype.fill = function(r, g, b){ 55 | this.ctx.fillStyle = rgbToHex(r,g,b); 56 | } 57 | 58 | //Set the stroke color 59 | Canvas.prototype.stroke = function(r, g, b){ 60 | this.ctx.strokeStyle = rgbToHex(r,g,b); 61 | } 62 | 63 | //Set width of the lines/strokes 64 | Canvas.prototype.strokeWidth = function(w){ 65 | this.ctx.lineWidth = w; 66 | } 67 | 68 | //Draw a rectangle with top left at (x,y) with 'w' width and 'h' height 69 | Canvas.prototype.rect = function(x, y, w, h){ 70 | this.ctx.fillRect(x,y,w,h); 71 | } 72 | 73 | //Draw a line with (x1, y1) and (x2, y2) as end points 74 | Canvas.prototype.line = function(x1, y1, x2, y2){ 75 | this.ctx.beginPath(); 76 | this.ctx.moveTo(x1, y1); 77 | this.ctx.lineTo(x2, y2); 78 | this.ctx.stroke(); 79 | } 80 | 81 | //Draw an arc with (x, y) as centre, 'r' as radius from angles start to stop 82 | Canvas.prototype.arc = function(x, y, r, start, stop){ 83 | this.ctx.beginPath(); 84 | this.ctx.arc(x, y, r, toRad(start), toRad(stop)); 85 | this.ctx.stroke(); 86 | } 87 | 88 | //Clear the HTML canvas 89 | Canvas.prototype.clear = function(){ 90 | this.ctx.clearRect(0, 0, this.WIDTH, this.HEIGHT); 91 | } 92 | 93 | //Change font, size and style 94 | Canvas.prototype.font = function(font_str){ 95 | this.ctx.font = font_str; 96 | } 97 | 98 | //Draws "filled" text on the canvas 99 | Canvas.prototype.fill_text = function(text, x, y){ 100 | this.ctx.fillText(text, x, y); 101 | } 102 | 103 | //Write text on the canvas 104 | Canvas.prototype.stroke_text = function(text, x, y){ 105 | this.ctx.strokeText(text, x, y); 106 | } 107 | 108 | 109 | //Test if the canvas functions are working 110 | Canvas.prototype.test_run = function(){ 111 | var dbg = false; 112 | if(dbg) 113 | alert("1"); 114 | this.clear(); 115 | if(dbg) 116 | alert("2"); 117 | this.fill(0, 200, 0); 118 | if(dbg) 119 | alert("3"); 120 | this.rect(this.MOUSE.x, this.MOUSE.y, 100, 200); 121 | if(dbg) 122 | alert("4"); 123 | this.stroke(0, 0, 50); 124 | if(dbg) 125 | alert("5"); 126 | this.line(0, 0, 100, 100); 127 | if(dbg) 128 | alert("6"); 129 | this.stroke(200, 200, 200); 130 | if(dbg) 131 | alert("7"); 132 | this.arc(200, 100, 50, 0, 360); 133 | if(dbg) 134 | alert("8"); 135 | } 136 | -------------------------------------------------------------------------------- /js/continuousworld.js: -------------------------------------------------------------------------------- 1 | var latest_output_area ="NONE"; // Jquery object for the DOM element of output area which was used most recently 2 | function handle_output(out, block){ 3 | var output = out.content.data["text/html"]; 4 | latest_output_area.html(output); 5 | } 6 | function polygon_complete(canvas, vertices){ 7 | latest_output_area = $(canvas).parents('.output_subarea'); 8 | var world_object_name = canvas.dataset.world_name; 9 | var command = world_object_name + ".handle_add_obstacle(" + JSON.stringify(vertices) + ")"; 10 | console.log("Executing Command: " + command); 11 | var kernel = IPython.notebook.kernel; 12 | var callbacks = { 'iopub' : {'output' : handle_output}}; 13 | kernel.execute(command,callbacks); 14 | } 15 | var canvas , ctx; 16 | function drawPolygon(array) { 17 | ctx.fillStyle = '#f00'; 18 | ctx.beginPath(); 19 | ctx.moveTo(array[0][0],array[0][1]); 20 | for(var i = 1;i1) 40 | { 41 | drawPoint(pArray[0][0],pArray[0][1]); 42 | } 43 | //check overlap 44 | if(ctx.isPointInPath(x, y) && (pArray.length>1)) { 45 | //Do something 46 | drawPolygon(pArray); 47 | polygon_complete(canvas,pArray); 48 | } 49 | else { 50 | var point = new Array(); 51 | point.push(x,y); 52 | pArray.push(point); 53 | } 54 | } 55 | function drawPoint(x, y) { 56 | ctx.beginPath(); 57 | ctx.arc(x, y, 5, 0, Math.PI*2); 58 | ctx.fillStyle = '#00f'; 59 | ctx.fill(); 60 | ctx.closePath(); 61 | } 62 | function initalizeObstacles(objects) { 63 | canvas = $('canvas.main-robo-world').get(0); 64 | ctx = canvas.getContext('2d'); 65 | $('canvas.main-robo-world').removeClass('main-robo-world'); 66 | for(var i=0;i').attr({height:size,width:size,src:val["source"]}).data({name:i,loaded:false}).load(function(){ 32 | // Check for all image loaded 33 | var execute=true; 34 | $(this).data("loaded",true); 35 | $.each($imgArray, function(i, val) { 36 | if(!$(this).data("loaded")) { 37 | execute=false; 38 | // exit on unloaded image 39 | return false; 40 | } 41 | }); 42 | if (execute) { 43 | // Converting loaded image to canvas covering block size. 44 | $.each($imgArray, function(i, val) { 45 | $imgArray[i] = $('').attr({width:size,height:size}).get(0); 46 | $imgArray[i].getContext('2d').drawImage(val.get(0),0,0,size,size); 47 | }); 48 | // initialize the world 49 | initializeWorld(); 50 | } 51 | }); 52 | } 53 | }); 54 | 55 | if(!hasImg) { 56 | initializeWorld(); 57 | } 58 | 59 | function initializeWorld(){ 60 | var $parentDiv = $('div.map-grid-world'); 61 | // remove object reference 62 | $('div.map-grid-world').removeClass('map-grid-world'); 63 | // get some info about the canvas 64 | var row = state.length; 65 | var column = state[0].length; 66 | var canvas = $parentDiv.find('canvas').get(0); 67 | var ctx = canvas.getContext('2d'); 68 | canvas.width = size * column; 69 | canvas.height = size * row; 70 | 71 | //Initialize previous positions 72 | for(var i=0;i=0 && gx=0 && gy self.cost(variable): 67 | return self.request(variable) 68 | 69 | return self.decnet.best_action() 70 | 71 | def request(self, variable): 72 | """Return the value of the given random variable as the next percept""" 73 | raise NotImplementedError 74 | 75 | def cost(self, var): 76 | """Return the cost of obtaining evidence through tests, consultants or questions""" 77 | raise NotImplementedError 78 | 79 | def vpi_cost_ratio(self, variables): 80 | """Return the VPI to cost ratio for the given variables""" 81 | v_by_c = [] 82 | for var in variables: 83 | v_by_c.append(self.vpi(var) / self.cost(var)) 84 | return v_by_c 85 | 86 | def vpi(self, variable): 87 | """Return VPI for a given variable""" 88 | vpi = 0.0 89 | prob_dist = self.infer(variable, self.observation, self.decnet).prob 90 | for item, _ in prob_dist.items(): 91 | post_prob = prob_dist[item] 92 | new_observation = list(self.observation) 93 | new_observation.append(item) 94 | expected_utility = self.decnet.get_expected_utility(variable, new_observation) 95 | vpi += post_prob * expected_utility 96 | 97 | vpi -= self.decnet.get_expected_utility(variable, self.observation) 98 | return vpi 99 | 100 | 101 | # _________________________________________________________________________ 102 | # chapter 25 Robotics 103 | # TODO: Implement continuous map for MonteCarlo similar to Fig25.10 from the book 104 | 105 | 106 | class MCLmap: 107 | """Map which provides probability distributions and sensor readings. 108 | Consists of discrete cells which are either an obstacle or empty""" 109 | 110 | def __init__(self, m): 111 | self.m = m 112 | self.nrows = len(m) 113 | self.ncols = len(m[0]) 114 | # list of empty spaces in the map 115 | self.empty = [(i, j) for i in range(self.nrows) for j in range(self.ncols) if not m[i][j]] 116 | 117 | def sample(self): 118 | """Returns a random kinematic state possible in the map""" 119 | pos = random.choice(self.empty) 120 | # 0N 1E 2S 3W 121 | orient = random.choice(range(4)) 122 | kin_state = pos + (orient,) 123 | return kin_state 124 | 125 | def ray_cast(self, sensor_num, kin_state): 126 | """Returns distace to nearest obstacle or map boundary in the direction of sensor""" 127 | pos = kin_state[:2] 128 | orient = kin_state[2] 129 | # sensor layout when orientation is 0 (towards North) 130 | # 0 131 | # 3R1 132 | # 2 133 | delta = ((sensor_num % 2 == 0) * (sensor_num - 1), (sensor_num % 2 == 1) * (2 - sensor_num)) 134 | # sensor direction changes based on orientation 135 | for _ in range(orient): 136 | delta = (delta[1], -delta[0]) 137 | range_count = 0 138 | while (0 <= pos[0] < self.nrows) and (0 <= pos[1] < self.nrows) and (not self.m[pos[0]][pos[1]]): 139 | pos = vector_add(pos, delta) 140 | range_count += 1 141 | return range_count 142 | 143 | 144 | def monte_carlo_localization(a, z, N, P_motion_sample, P_sensor, m, S=None): 145 | """Monte Carlo localization algorithm from Fig 25.9""" 146 | 147 | def ray_cast(sensor_num, kin_state, m): 148 | return m.ray_cast(sensor_num, kin_state) 149 | 150 | M = len(z) 151 | W = [0] * N 152 | S_ = [0] * N 153 | W_ = [0] * N 154 | v = a['v'] 155 | w = a['w'] 156 | 157 | if S is None: 158 | S = [m.sample() for _ in range(N)] 159 | 160 | for i in range(N): 161 | S_[i] = P_motion_sample(S[i], v, w) 162 | W_[i] = 1 163 | for j in range(M): 164 | z_ = ray_cast(j, S_[i], m) 165 | W_[i] = W_[i] * P_sensor(z[j], z_) 166 | 167 | S = weighted_sample_with_replacement(N, S_, W_) 168 | return S 169 | -------------------------------------------------------------------------------- /notebooks/chapter19/images/autoencoder.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/autoencoder.png -------------------------------------------------------------------------------- /notebooks/chapter19/images/backprop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/backprop.png -------------------------------------------------------------------------------- /notebooks/chapter19/images/corss_entropy_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/corss_entropy_plot.png -------------------------------------------------------------------------------- /notebooks/chapter19/images/mse_plot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/mse_plot.png -------------------------------------------------------------------------------- /notebooks/chapter19/images/nn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/nn.png -------------------------------------------------------------------------------- /notebooks/chapter19/images/nn_steps.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/nn_steps.png -------------------------------------------------------------------------------- /notebooks/chapter19/images/perceptron.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/perceptron.png -------------------------------------------------------------------------------- /notebooks/chapter19/images/rnn_connections.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/rnn_connections.png -------------------------------------------------------------------------------- /notebooks/chapter19/images/rnn_unit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/rnn_unit.png -------------------------------------------------------------------------------- /notebooks/chapter19/images/rnn_units.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/rnn_units.png -------------------------------------------------------------------------------- /notebooks/chapter19/images/vanilla.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter19/images/vanilla.png -------------------------------------------------------------------------------- /notebooks/chapter21/Active Reinforcement Learning.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# ACTIVE REINFORCEMENT LEARNING\n", 8 | "\n", 9 | "This notebook mainly focuses on active reinforce learning algorithms. For a general introduction to reinforcement learning and passive algorithms, please refer to the notebook of **[Passive Reinforcement Learning](./Passive%20Reinforcement%20Learning.ipynb)**.\n", 10 | "\n", 11 | "Unlike Passive Reinforcement Learning in Active Reinforcement Learning, we are not bound by a policy pi and we need to select our actions. In other words, the agent needs to learn an optimal policy. The fundamental tradeoff the agent needs to face is that of exploration vs. exploitation. \n", 12 | "\n", 13 | "## QLearning Agent\n", 14 | "\n", 15 | "The QLearningAgent class in the rl module implements the Agent Program described in **Fig 21.8** of the AIMA Book. In Q-Learning the agent learns an action-value function Q which gives the utility of taking a given action in a particular state. Q-Learning does not require a transition model and hence is a model-free method. Let us look into the source before we see some usage examples." 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": null, 21 | "metadata": {}, 22 | "outputs": [], 23 | "source": [ 24 | "%psource QLearningAgent" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "The Agent Program can be obtained by creating the instance of the class by passing the appropriate parameters. Because of the __ call __ method the object that is created behaves like a callable and returns an appropriate action as most Agent Programs do. To instantiate the object we need a `mdp` object similar to the `PassiveTDAgent`.\n", 32 | "\n", 33 | " Let us use the same `GridMDP` object we used above. **Figure 17.1 (sequential_decision_environment)** is similar to **Figure 21.1** but has some discounting parameter as **gamma = 0.9**. The enviroment also implements an exploration function **f** which returns fixed **Rplus** until agent has visited state, action **Ne** number of times. The method **actions_in_state** returns actions possible in given state. It is useful when applying max and argmax operations." 34 | ] 35 | }, 36 | { 37 | "cell_type": "markdown", 38 | "metadata": {}, 39 | "source": [ 40 | "Let us create our object now. We also use the **same alpha** as given in the footnote of the book on **page 769**: $\\alpha(n)=60/(59+n)$ We use **Rplus = 2** and **Ne = 5** as defined in the book. The pseudocode can be referred from **Fig 21.7** in the book." 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 12, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "import os, sys\n", 50 | "sys.path = [os.path.abspath(\"../../\")] + sys.path\n", 51 | "from rl4e import *\n", 52 | "from mdp import sequential_decision_environment, value_iteration" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 6, 58 | "metadata": {}, 59 | "outputs": [], 60 | "source": [ 61 | "q_agent = QLearningAgent(sequential_decision_environment, Ne=5, Rplus=2, \n", 62 | " alpha=lambda n: 60./(59+n))" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "Now to try out the q_agent we make use of the **run_single_trial** function in rl.py (which was also used above). Let us use **200** iterations." 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": 7, 75 | "metadata": {}, 76 | "outputs": [], 77 | "source": [ 78 | "for i in range(200):\n", 79 | " run_single_trial(q_agent,sequential_decision_environment)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "metadata": {}, 85 | "source": [ 86 | "Now let us see the Q Values. The keys are state-action pairs. Where different actions correspond according to:\n", 87 | "\n", 88 | "north = (0, 1) \n", 89 | "south = (0,-1) \n", 90 | "west = (-1, 0) \n", 91 | "east = (1, 0)" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "q_agent.Q" 101 | ] 102 | }, 103 | { 104 | "cell_type": "markdown", 105 | "metadata": {}, 106 | "source": [ 107 | "The Utility U of each state is related to Q by the following equation.\n", 108 | "\n", 109 | "$$U (s) = max_a Q(s, a)$$\n", 110 | "\n", 111 | "Let us convert the Q Values above into U estimates.\n", 112 | "\n" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": 9, 118 | "metadata": {}, 119 | "outputs": [], 120 | "source": [ 121 | "U = defaultdict(lambda: -1000.) # Very Large Negative Value for Comparison see below.\n", 122 | "for state_action, value in q_agent.Q.items():\n", 123 | " state, action = state_action\n", 124 | " if U[state] < value:\n", 125 | " U[state] = value" 126 | ] 127 | }, 128 | { 129 | "cell_type": "markdown", 130 | "metadata": {}, 131 | "source": [ 132 | "Now we can output the estimated utility values at each state:" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 10, 138 | "metadata": {}, 139 | "outputs": [ 140 | { 141 | "data": { 142 | "text/plain": [ 143 | "defaultdict(()>,\n", 144 | " {(0, 0): -0.0036556430391564178,\n", 145 | " (1, 0): -0.04862675963288682,\n", 146 | " (2, 0): 0.03384490363100474,\n", 147 | " (3, 0): -0.16618771401113092,\n", 148 | " (3, 1): -0.6015323978614368,\n", 149 | " (0, 1): 0.09161077177913537,\n", 150 | " (0, 2): 0.1834607974581678,\n", 151 | " (1, 2): 0.26393277962204903,\n", 152 | " (2, 2): 0.32369726495311274,\n", 153 | " (3, 2): 0.38898341569576245,\n", 154 | " (2, 1): -0.044858154562400485})" 155 | ] 156 | }, 157 | "execution_count": 10, 158 | "metadata": {}, 159 | "output_type": "execute_result" 160 | } 161 | ], 162 | "source": [ 163 | "U" 164 | ] 165 | }, 166 | { 167 | "cell_type": "markdown", 168 | "metadata": {}, 169 | "source": [ 170 | "Let us finally compare these estimates to value_iteration results." 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": 13, 176 | "metadata": {}, 177 | "outputs": [ 178 | { 179 | "name": "stdout", 180 | "output_type": "stream", 181 | "text": [ 182 | "{(0, 1): 0.3984432178350045, (1, 2): 0.649585681261095, (3, 2): 1.0, (0, 0): 0.2962883154554812, (3, 0): 0.12987274656746342, (3, 1): -1.0, (2, 1): 0.48644001739269643, (2, 0): 0.3447542300124158, (2, 2): 0.7953620878466678, (1, 0): 0.25386699846479516, (0, 2): 0.5093943765842497}\n" 183 | ] 184 | } 185 | ], 186 | "source": [ 187 | "print(value_iteration(sequential_decision_environment))" 188 | ] 189 | } 190 | ], 191 | "metadata": { 192 | "kernelspec": { 193 | "display_name": "Python 3", 194 | "language": "python", 195 | "name": "python3" 196 | }, 197 | "language_info": { 198 | "codemirror_mode": { 199 | "name": "ipython", 200 | "version": 3 201 | }, 202 | "file_extension": ".py", 203 | "mimetype": "text/x-python", 204 | "name": "python", 205 | "nbconvert_exporter": "python", 206 | "pygments_lexer": "ipython3", 207 | "version": "3.7.2" 208 | } 209 | }, 210 | "nbformat": 4, 211 | "nbformat_minor": 2 212 | } 213 | -------------------------------------------------------------------------------- /notebooks/chapter21/images/mdp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter21/images/mdp.png -------------------------------------------------------------------------------- /notebooks/chapter22/Introduction.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# NATURAL LANGUAGE PROCESSING\n", 8 | "\n", 9 | "The notebooks in this folder cover chapters 23 of the book *Artificial Intelligence: A Modern Approach*, 4th Edition. The implementations of the algorithms can be found in [nlp.py](https://github.com/aimacode/aima-python/blob/master/nlp4e.py).\n", 10 | "\n", 11 | "Run the below cell to import the code from the module and get started!" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "import os, sys\n", 21 | "sys.path = [os.path.abspath(\"../../\")] + sys.path\n", 22 | "from nlp4e import *\n", 23 | "from notebook4e import psource" 24 | ] 25 | }, 26 | { 27 | "cell_type": "markdown", 28 | "metadata": {}, 29 | "source": [ 30 | "## OVERVIEW\n", 31 | "\n", 32 | "**Natural Language Processing (NLP)** is a field of AI concerned with understanding, analyzing and using natural languages. This field is considered a difficult yet intriguing field of study since it is connected to how humans and their languages work.\n", 33 | "\n", 34 | "Applications of the field include translation, speech recognition, topic segmentation, information extraction and retrieval, and a lot more.\n", 35 | "\n", 36 | "Below we take a look at some algorithms in the field. Before we get right into it though, we will take a look at a very useful form of language, **context-free** languages. Even though they are a bit restrictive, they have been used a lot in research in natural language processing.\n", 37 | "\n", 38 | "Below is a summary of the demonstration files in this chapter." 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "## CONTENTS\n", 46 | "\n", 47 | "- Introduction: Introduction to the field of nlp and the table of contents.\n", 48 | "- Grammars: Introduction to grammar rules and lexicon of words of a language.\n", 49 | " - Context-free Grammar\n", 50 | " - Probabilistic Context-Free Grammar\n", 51 | " - Chomsky Normal Form\n", 52 | " - Lexicon\n", 53 | " - Grammar Rules\n", 54 | " - Implementation of Different Grammars\n", 55 | "- Parsing: The algorithms parsing sentences according to a certain kind of grammar.\n", 56 | " - Chart Parsing\n", 57 | " - CYK Parsing\n", 58 | " - A-star Parsing\n", 59 | " - Beam Search Parsing\n", 60 | " " 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [] 69 | } 70 | ], 71 | "metadata": { 72 | "kernelspec": { 73 | "display_name": "Python 3", 74 | "language": "python", 75 | "name": "python3" 76 | }, 77 | "language_info": { 78 | "codemirror_mode": { 79 | "name": "ipython", 80 | "version": 3 81 | }, 82 | "file_extension": ".py", 83 | "mimetype": "text/x-python", 84 | "name": "python", 85 | "nbconvert_exporter": "python", 86 | "pygments_lexer": "ipython3", 87 | "version": "3.7.2" 88 | } 89 | }, 90 | "nbformat": 4, 91 | "nbformat_minor": 2 92 | } 93 | -------------------------------------------------------------------------------- /notebooks/chapter22/images/parse_tree.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter22/images/parse_tree.png -------------------------------------------------------------------------------- /notebooks/chapter24/images/RCNN.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter24/images/RCNN.png -------------------------------------------------------------------------------- /notebooks/chapter24/images/derivative_of_gaussian.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter24/images/derivative_of_gaussian.png -------------------------------------------------------------------------------- /notebooks/chapter24/images/gradients.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter24/images/gradients.png -------------------------------------------------------------------------------- /notebooks/chapter24/images/laplacian.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter24/images/laplacian.png -------------------------------------------------------------------------------- /notebooks/chapter24/images/laplacian_kernels.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter24/images/laplacian_kernels.png -------------------------------------------------------------------------------- /notebooks/chapter24/images/stapler.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter24/images/stapler.png -------------------------------------------------------------------------------- /notebooks/chapter24/images/stapler_bbox.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/notebooks/chapter24/images/stapler_bbox.png -------------------------------------------------------------------------------- /notebooks/old notebooks/index.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# AIMA Python Binder Index\n", 8 | "\n", 9 | "Welcome to the AIMA Python Code Repository. You should be seeing this index notebook if you clicked on the **Launch Binder** button on the [repository](https://github.com/aimacode/aima-python). If you are viewing this notebook directly on Github we suggest that you use the **Launch Binder** button instead. Binder allows you to experiment with all the code in the browser itself without the need of installing anything on your local machine. Below is the list of notebooks that should assist you in navigating the different notebooks available. \n", 10 | "\n", 11 | "If you are completely new to AIMA Python or Jupyter Notebooks we suggest that you start with the Introduction Notebook.\n", 12 | "\n", 13 | "# List of Notebooks\n", 14 | "\n", 15 | "1. [**Introduction**](./intro.ipynb)\n", 16 | "\n", 17 | "2. [**Agents**](./agents.ipynb)\n", 18 | "\n", 19 | "3. [**Search**](./search.ipynb)\n", 20 | "\n", 21 | "4. [**Search - 4th edition**](./search4e.ipynb)\n", 22 | "\n", 23 | "4. [**Games**](./games.ipynb)\n", 24 | "\n", 25 | "5. [**Constraint Satisfaction Problems**](./csp.ipynb)\n", 26 | "\n", 27 | "6. [**Logic**](./logic.ipynb)\n", 28 | "\n", 29 | "7. [**Planning**](./planning.ipynb)\n", 30 | "\n", 31 | "8. [**Probability**](./probability.ipynb)\n", 32 | "\n", 33 | "9. [**Markov Decision Processes**](./mdp.ipynb)\n", 34 | "\n", 35 | "10. [**Learning**](./learning.ipynb)\n", 36 | "\n", 37 | "11. [**Reinforcement Learning**](./rl.ipynb)\n", 38 | "\n", 39 | "12. [**Statistical Language Processing Tools**](./text.ipynb)\n", 40 | "\n", 41 | "13. [**Natural Language Processing**](./nlp.ipynb)\n", 42 | "\n", 43 | "Besides the notebooks it is also possible to make direct modifications to the Python/JS code. To view/modify the complete set of files [click here](.) to view the Directory structure." 44 | ] 45 | } 46 | ], 47 | "metadata": { 48 | "kernelspec": { 49 | "display_name": "Python 3", 50 | "language": "python", 51 | "name": "python3" 52 | }, 53 | "language_info": { 54 | "codemirror_mode": { 55 | "name": "ipython", 56 | "version": 3 57 | }, 58 | "file_extension": ".py", 59 | "mimetype": "text/x-python", 60 | "name": "python", 61 | "nbconvert_exporter": "python", 62 | "pygments_lexer": "ipython3", 63 | "version": "3.5.1" 64 | } 65 | }, 66 | "nbformat": 4, 67 | "nbformat_minor": 0 68 | } 69 | -------------------------------------------------------------------------------- /notebooks/old notebooks/intro.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# An Introduction To `aima-python` \n", 8 | " \n", 9 | "The [aima-python](https://github.com/aimacode/aima-python) repository implements, in Python code, the algorithms in the textbook *[Artificial Intelligence: A Modern Approach](http://aima.cs.berkeley.edu)*. A typical module in the repository has the code for a single chapter in the book, but some modules combine several chapters. See [the index](https://github.com/aimacode/aima-python#index-of-code) if you can't find the algorithm you want. The code in this repository attempts to mirror the pseudocode in the textbook as closely as possible and to stress readability foremost; if you are looking for high-performance code with advanced features, there are other repositories for you. For each module, there are three/four files, for example:\n", 10 | "\n", 11 | "- [**`nlp.py`**](https://github.com/aimacode/aima-python/blob/master/nlp.py): Source code with data types and algorithms for natural language processing; functions have docstrings explaining their use.\n", 12 | "- [**`nlp.ipynb`**](https://github.com/aimacode/aima-python/blob/master/nlp.ipynb): A notebook like this one; gives more detailed examples and explanations of use.\n", 13 | "- [**`nlp_apps.ipynb`**](https://github.com/aimacode/aima-python/blob/master/nlp_apps.ipynb): A Jupyter notebook that gives example applications of the code.\n", 14 | "- [**`tests/test_nlp.py`**](https://github.com/aimacode/aima-python/blob/master/tests/test_nlp.py): Test cases, used to verify the code is correct, and also useful to see examples of use.\n", 15 | "\n", 16 | "There is also an [aima-java](https://github.com/aimacode/aima-java) repository, if you prefer Java.\n", 17 | " \n", 18 | "## What version of Python?\n", 19 | " \n", 20 | "The code is tested in Python [3.4](https://www.python.org/download/releases/3.4.3/) and [3.5](https://www.python.org/downloads/release/python-351/). If you try a different version of Python 3 and find a problem, please report it as an [Issue](https://github.com/aimacode/aima-python/issues).\n", 21 | " \n", 22 | "We recommend the [Anaconda](https://www.anaconda.com/download/) distribution of Python 3.5. It comes with additional tools like the powerful IPython interpreter, the Jupyter Notebook and many helpful packages for scientific computing. After installing Anaconda, you will be good to go to run all the code and all the IPython notebooks. \n", 23 | "\n", 24 | "## IPython notebooks \n", 25 | " \n", 26 | "The IPython notebooks in this repository explain how to use the modules, and give examples of usage. \n", 27 | "You can use them in three ways: \n", 28 | "\n", 29 | "1. View static HTML pages. (Just browse to the [repository](https://github.com/aimacode/aima-python) and click on a `.ipynb` file link.)\n", 30 | "2. Run, modify, and re-run code, live. (Download the repository (by [zip file](https://github.com/aimacode/aima-python/archive/master.zip) or by `git` commands), start a Jupyter notebook server with the shell command \"`jupyter notebook`\" (issued from the directory where the files are), and click on the notebook you want to interact with.)\n", 31 | "3. Binder - Click on the binder badge on the [repository](https://github.com/aimacode/aima-python) main page to open the notebooks in an executable environment, online. This method does not require any extra installation. The code can be executed and modified from the browser itself. Note that this is an unstable option; there is a chance the notebooks will never load.\n", 32 | "\n", 33 | " \n", 34 | "You can [read about notebooks](https://jupyter-notebook-beginner-guide.readthedocs.org/en/latest/) and then [get started](https://nbviewer.jupyter.org/github/jupyter/notebook/blob/master/docs/source/examples/Notebook/Running%20Code.ipynb)." 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "metadata": { 40 | "collapsed": true 41 | }, 42 | "source": [ 43 | "# Helpful Tips\n", 44 | "\n", 45 | "Most of these notebooks start by importing all the symbols in a module:" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 1, 51 | "metadata": { 52 | "collapsed": true 53 | }, 54 | "outputs": [], 55 | "source": [ 56 | "from logic import *" 57 | ] 58 | }, 59 | { 60 | "cell_type": "markdown", 61 | "metadata": {}, 62 | "source": [ 63 | "From there, the notebook alternates explanations with examples of use. You can run the examples as they are, and you can modify the code cells (or add new cells) and run your own examples. If you have some really good examples to add, you can make a github pull request.\n", 64 | "\n", 65 | "If you want to see the source code of a function, you can open a browser or editor and see it in another window, or from within the notebook you can use the IPython magic function `%psource` (for \"print source\") or the function `psource` from `notebook.py`. Also, if the algorithm has pseudocode available, you can read it by calling the `pseudocode` function with the name of the algorithm passed as a parameter." 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 2, 71 | "metadata": { 72 | "collapsed": true 73 | }, 74 | "outputs": [], 75 | "source": [ 76 | "%psource WalkSAT" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "from notebook import psource, pseudocode\n", 86 | "\n", 87 | "psource(WalkSAT)\n", 88 | "pseudocode(\"WalkSAT\")" 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": {}, 94 | "source": [ 95 | "Or see an abbreviated description of an object with a trailing question mark:" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 3, 101 | "metadata": { 102 | "collapsed": true 103 | }, 104 | "outputs": [], 105 | "source": [ 106 | "WalkSAT?" 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "metadata": {}, 112 | "source": [ 113 | "# Authors\n", 114 | "\n", 115 | "This notebook is written by [Chirag Vertak](https://github.com/chiragvartak) and [Peter Norvig](https://github.com/norvig)." 116 | ] 117 | } 118 | ], 119 | "metadata": { 120 | "kernelspec": { 121 | "display_name": "Python 3", 122 | "language": "python", 123 | "name": "python3" 124 | }, 125 | "language_info": { 126 | "codemirror_mode": { 127 | "name": "ipython", 128 | "version": 3 129 | }, 130 | "file_extension": ".py", 131 | "mimetype": "text/x-python", 132 | "name": "python", 133 | "nbconvert_exporter": "python", 134 | "pygments_lexer": "ipython3", 135 | "version": "3.5.3" 136 | } 137 | }, 138 | "nbformat": 4, 139 | "nbformat_minor": 1 140 | } 141 | -------------------------------------------------------------------------------- /probabilistic_learning.py: -------------------------------------------------------------------------------- 1 | """Learning probabilistic models. (Chapters 20)""" 2 | 3 | import heapq 4 | 5 | from utils import weighted_sampler, product, gaussian 6 | 7 | 8 | class CountingProbDist: 9 | """ 10 | A probability distribution formed by observing and counting examples. 11 | If p is an instance of this class and o is an observed value, then 12 | there are 3 main operations: 13 | p.add(o) increments the count for observation o by 1. 14 | p.sample() returns a random element from the distribution. 15 | p[o] returns the probability for o (as in a regular ProbDist). 16 | """ 17 | 18 | def __init__(self, observations=None, default=0): 19 | """ 20 | Create a distribution, and optionally add in some observations. 21 | By default this is an unsmoothed distribution, but saying default=1, 22 | for example, gives you add-one smoothing. 23 | """ 24 | if observations is None: 25 | observations = [] 26 | self.dictionary = {} 27 | self.n_obs = 0 28 | self.default = default 29 | self.sampler = None 30 | 31 | for o in observations: 32 | self.add(o) 33 | 34 | def add(self, o): 35 | """Add an observation o to the distribution.""" 36 | self.smooth_for(o) 37 | self.dictionary[o] += 1 38 | self.n_obs += 1 39 | self.sampler = None 40 | 41 | def smooth_for(self, o): 42 | """ 43 | Include o among the possible observations, whether or not 44 | it's been observed yet. 45 | """ 46 | if o not in self.dictionary: 47 | self.dictionary[o] = self.default 48 | self.n_obs += self.default 49 | self.sampler = None 50 | 51 | def __getitem__(self, item): 52 | """Return an estimate of the probability of item.""" 53 | self.smooth_for(item) 54 | return self.dictionary[item] / self.n_obs 55 | 56 | # (top() and sample() are not used in this module, but elsewhere.) 57 | 58 | def top(self, n): 59 | """Return (count, obs) tuples for the n most frequent observations.""" 60 | return heapq.nlargest(n, [(v, k) for (k, v) in self.dictionary.items()]) 61 | 62 | def sample(self): 63 | """Return a random sample from the distribution.""" 64 | if self.sampler is None: 65 | self.sampler = weighted_sampler(list(self.dictionary.keys()), list(self.dictionary.values())) 66 | return self.sampler() 67 | 68 | 69 | def NaiveBayesLearner(dataset, continuous=True, simple=False): 70 | if simple: 71 | return NaiveBayesSimple(dataset) 72 | if continuous: 73 | return NaiveBayesContinuous(dataset) 74 | else: 75 | return NaiveBayesDiscrete(dataset) 76 | 77 | 78 | def NaiveBayesSimple(distribution): 79 | """ 80 | A simple naive bayes classifier that takes as input a dictionary of 81 | CountingProbDist objects and classifies items according to these distributions. 82 | The input dictionary is in the following form: 83 | (ClassName, ClassProb): CountingProbDist 84 | """ 85 | target_dist = {c_name: prob for c_name, prob in distribution.keys()} 86 | attr_dists = {c_name: count_prob for (c_name, _), count_prob in distribution.items()} 87 | 88 | def predict(example): 89 | """Predict the target value for example. Calculate probabilities for each 90 | class and pick the max.""" 91 | 92 | def class_probability(target_val): 93 | attr_dist = attr_dists[target_val] 94 | return target_dist[target_val] * product(attr_dist[a] for a in example) 95 | 96 | return max(target_dist.keys(), key=class_probability) 97 | 98 | return predict 99 | 100 | 101 | def NaiveBayesDiscrete(dataset): 102 | """ 103 | Just count how many times each value of each input attribute 104 | occurs, conditional on the target value. Count the different 105 | target values too. 106 | """ 107 | 108 | target_vals = dataset.values[dataset.target] 109 | target_dist = CountingProbDist(target_vals) 110 | attr_dists = {(gv, attr): CountingProbDist(dataset.values[attr]) for gv in target_vals for attr in dataset.inputs} 111 | for example in dataset.examples: 112 | target_val = example[dataset.target] 113 | target_dist.add(target_val) 114 | for attr in dataset.inputs: 115 | attr_dists[target_val, attr].add(example[attr]) 116 | 117 | def predict(example): 118 | """ 119 | Predict the target value for example. Consider each possible value, 120 | and pick the most likely by looking at each attribute independently. 121 | """ 122 | 123 | def class_probability(target_val): 124 | return (target_dist[target_val] * product(attr_dists[target_val, attr][example[attr]] 125 | for attr in dataset.inputs)) 126 | 127 | return max(target_vals, key=class_probability) 128 | 129 | return predict 130 | 131 | 132 | def NaiveBayesContinuous(dataset): 133 | """ 134 | Count how many times each target value occurs. 135 | Also, find the means and deviations of input attribute values for each target value. 136 | """ 137 | means, deviations = dataset.find_means_and_deviations() 138 | 139 | target_vals = dataset.values[dataset.target] 140 | target_dist = CountingProbDist(target_vals) 141 | 142 | def predict(example): 143 | """Predict the target value for example. Consider each possible value, 144 | and pick the most likely by looking at each attribute independently.""" 145 | 146 | def class_probability(target_val): 147 | prob = target_dist[target_val] 148 | for attr in dataset.inputs: 149 | prob *= gaussian(means[target_val][attr], deviations[target_val][attr], example[attr]) 150 | return prob 151 | 152 | return max(target_vals, key=class_probability) 153 | 154 | return predict 155 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | filterwarnings = 3 | ignore::DeprecationWarning 4 | ignore::UserWarning 5 | ignore::RuntimeWarning 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cvxopt==1.2.5 2 | image==1.5.32 3 | ipython==7.18.1 4 | ipythonblocks==1.9.0 5 | ipywidgets==7.5.1 6 | jupyter==1.0.0 7 | keras==2.4.3 8 | matplotlib==3.3.2 9 | networkx==2.5 10 | numpy==1.18.5 11 | opencv-python==4.4.0.44 12 | pandas==1.1.3 13 | pillow==7.2.0 14 | pytest-cov==2.10.1 15 | qpsolvers==1.4 16 | scipy==1.5.2 17 | sortedcontainers==2.2.2 18 | -------------------------------------------------------------------------------- /search_helpers.py: -------------------------------------------------------------------------------- 1 | from inspect import getsource 2 | import heapq 3 | 4 | import matplotlib.pyplot as plt 5 | import networkx as nx 6 | from matplotlib import lines 7 | import ipywidgets as widgets 8 | 9 | from search import GraphProblem, romania_map 10 | from notebook import final_path_colors 11 | 12 | 13 | def show_tree(graph_data, node_colors = None): 14 | G = nx.Graph(graph_data['graph_dict']) 15 | node_colors = node_colors or graph_data['node_colors'] 16 | node_positions = graph_data['node_positions'] 17 | node_label_pos = graph_data['node_label_positions'] 18 | edge_weights= graph_data['edge_weights'] 19 | 20 | # set the size of the plot 21 | plt.figure(figsize=(8,5)) 22 | # draw the graph (both nodes and edges) with locations 23 | nx.draw(G, pos={k: node_positions[k] for k in G.nodes()}, 24 | node_color=[node_colors[node] for node in G.nodes()], linewidths=0.3, edgecolors='k') 25 | 26 | # draw labels for nodes 27 | node_label_handles = nx.draw_networkx_labels(G, pos=node_label_pos, font_size=14) 28 | 29 | # add a white bounding box behind the node labels 30 | [label.set_bbox(dict(facecolor='white', edgecolor='none')) for label in node_label_handles.values()] 31 | 32 | # add edge labels to the graph (for displaying the edge_weights) 33 | if next(iter(edge_weights.values())) != None: 34 | nx.draw_networkx_edge_labels(G, pos=node_positions, edge_labels=edge_weights, font_size=14) 35 | 36 | # add a legend 37 | white_circle = lines.Line2D([], [], color="white", marker='o', markersize=15, markerfacecolor="white") 38 | orange_circle = lines.Line2D([], [], color="orange", marker='o', markersize=15, markerfacecolor="orange") 39 | red_circle = lines.Line2D([], [], color="red", marker='o', markersize=15, markerfacecolor="red") 40 | gray_circle = lines.Line2D([], [], color="gray", marker='o', markersize=15, markerfacecolor="gray") 41 | green_circle = lines.Line2D([], [], color="green", marker='o', markersize=15, markerfacecolor="green") 42 | plt.legend((white_circle, orange_circle, red_circle, gray_circle, green_circle), 43 | ('Un-explored', 'Frontier', 'Currently Exploring', 'Explored', 'Final Solution'), 44 | numpoints=1, prop={'size':14}, loc=(.8,.75)) 45 | 46 | # show the plot. No need to use in notebooks. nx.draw will show the graph itself. 47 | plt.show() 48 | 49 | ## helper functions for visualisations 50 | 51 | 52 | def display_steps(graph_data, user_input, algorithm=None, problem=None): 53 | initial_node_colors = graph_data['node_colors'] 54 | if user_input == False: 55 | def slider_callback(iteration): 56 | # don't show graph for the first time running the cell calling this function 57 | try: 58 | show_tree(graph_data, node_colors=all_node_colors[iteration]) 59 | except: 60 | pass 61 | def visualize_callback(Visualize): 62 | if Visualize is True: 63 | button.value = False 64 | 65 | global all_node_colors 66 | 67 | iterations, all_node_colors, node = algorithm(problem) 68 | solution = node.solution() 69 | all_node_colors.append(final_path_colors(all_node_colors[0], problem, solution)) 70 | 71 | slider.max = len(all_node_colors) - 1 72 | 73 | for i in range(slider.max + 1): 74 | slider.value = i 75 | #time.sleep(.5) 76 | 77 | slider = widgets.IntSlider(min=0, max=1, step=1, value=0) 78 | slider_visual = widgets.interactive(slider_callback, iteration=slider) 79 | display(slider_visual) 80 | 81 | button = widgets.ToggleButton(value=False) 82 | button_visual = widgets.interactive(visualize_callback, Visualize=button) 83 | display(button_visual) 84 | 85 | if user_input == True: 86 | node_colors = dict(initial_node_colors) 87 | if isinstance(algorithm, dict): 88 | assert set(algorithm.keys()).issubset({"Breadth First Tree Search", 89 | "Depth First Tree Search", 90 | "Breadth First Search", 91 | "Depth First Graph Search", 92 | "Best First Graph Search", 93 | "Uniform Cost Search", 94 | "Depth Limited Search", 95 | "Iterative Deepening Search", 96 | "Greedy Best First Search", 97 | "A-star Search", 98 | "Recursive Best First Search"}) 99 | 100 | algo_dropdown = widgets.Dropdown(description="Search algorithm: ", 101 | options=sorted(list(algorithm.keys())), 102 | value="Breadth First Tree Search") 103 | display(algo_dropdown) 104 | elif algorithm is None: 105 | print("No algorithm to run.") 106 | return 0 107 | 108 | def slider_callback(iteration): 109 | # don't show graph for the first time running the cell calling this function 110 | try: 111 | show_tree(graph_data, node_colors=all_node_colors[iteration]) 112 | except: 113 | pass 114 | 115 | def visualize_callback(Visualize): 116 | if Visualize is True: 117 | button.value = False 118 | 119 | problem = GraphProblem(start_dropdown.value, end_dropdown.value, romania_map) 120 | global all_node_colors 121 | 122 | user_algorithm = algorithm[algo_dropdown.value] 123 | 124 | iterations, all_node_colors, node = user_algorithm(problem) 125 | solution = node.solution() 126 | all_node_colors.append(final_path_colors(all_node_colors[0], problem, solution)) 127 | 128 | slider.max = len(all_node_colors) - 1 129 | 130 | for i in range(slider.max + 1): 131 | slider.value = i 132 | #time.sleep(.5) 133 | 134 | start_dropdown = widgets.Dropdown(description="Start city: ", 135 | options=sorted(list(node_colors.keys())), value="Arad") 136 | display(start_dropdown) 137 | 138 | end_dropdown = widgets.Dropdown(description="Goal city: ", 139 | options=sorted(list(node_colors.keys())), value="Fagaras") 140 | display(end_dropdown) 141 | 142 | button = widgets.ToggleButton(value=False) 143 | button_visual = widgets.interactive(visualize_callback, Visualize=button) 144 | display(button_visual) 145 | 146 | slider = widgets.IntSlider(min=0, max=1, step=1, value=0) 147 | slider_visual = widgets.interactive(slider_callback, iteration=slider) 148 | display(slider_visual) 149 | 150 | class PriorityQueue: 151 | """A Queue in which the minimum (or maximum) element (as determined by f and 152 | order) is returned first. 153 | If order is 'min', the item with minimum f(x) is 154 | returned first; if order is 'max', then it is the item with maximum f(x). 155 | Also supports dict-like lookup.""" 156 | 157 | def __init__(self, order='min', f=lambda x: x): 158 | self.heap = [] 159 | 160 | if order == 'min': 161 | self.f = f 162 | elif order == 'max': # now item with max f(x) 163 | self.f = lambda x: -f(x) # will be popped first 164 | else: 165 | raise ValueError("order must be either 'min' or 'max'.") 166 | 167 | def append(self, item): 168 | """Insert item at its correct position.""" 169 | heapq.heappush(self.heap, (self.f(item), item)) 170 | 171 | def extend(self, items): 172 | """Insert each item in items at its correct position.""" 173 | for item in items: 174 | self.append(item) 175 | 176 | def pop(self): 177 | """Pop and return the item (with min or max f(x) value) 178 | depending on the order.""" 179 | if self.heap: 180 | return heapq.heappop(self.heap)[1] 181 | else: 182 | raise Exception('Trying to pop from empty PriorityQueue.') 183 | 184 | def getvalue(self, key): 185 | """Returns the first value associated with key in PriorityQueue. 186 | Raises KeyError if key is not present.""" 187 | for value, item in self.heap: 188 | if item == key: 189 | return value, item 190 | raise KeyError(str(key) + " is not in the priority queue") 191 | 192 | def __len__(self): 193 | """Return current capacity of PriorityQueue.""" 194 | return len(self.heap) 195 | 196 | def __contains__(self, key): 197 | """Return True if the key is in PriorityQueue.""" 198 | return any([item == key for _, item in self.heap]) 199 | 200 | def __getitem__(self, key): 201 | """Returns the first value associated with key in PriorityQueue. 202 | Raises KeyError if key is not present.""" 203 | for value, item in self.heap: 204 | if item == key: 205 | return item 206 | raise KeyError(str(key) + " is not in the priority queue") 207 | 208 | def __delitem__(self, key): 209 | """Delete the first occurrence of key.""" 210 | try: 211 | del self.heap[[item == key for _, item in self.heap].index(True)] 212 | except ValueError: 213 | raise KeyError(str(key) + " is not in the priority queue") 214 | heapq.heapify(self.heap) 215 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aimaTUM/aima-python/087a29eeda72dd940ed2c41060308b505581a97c/tests/__init__.py -------------------------------------------------------------------------------- /tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | filterwarnings = 3 | ignore::ResourceWarning -------------------------------------------------------------------------------- /tests/test_games.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from games import * 4 | 5 | # Creating the game instances 6 | f52 = Fig52Game() 7 | ttt = TicTacToe() 8 | 9 | random.seed("aima-python") 10 | 11 | 12 | def gen_state(to_move='X', x_positions=[], o_positions=[], h=3, v=3): 13 | """Given whose turn it is to move, the positions of X's on the board, the 14 | positions of O's on the board, and, (optionally) number of rows, columns 15 | and how many consecutive X's or O's required to win, return the corresponding 16 | game state""" 17 | 18 | moves = set([(x, y) for x in range(1, h + 1) for y in range(1, v + 1)]) - set(x_positions) - set(o_positions) 19 | moves = list(moves) 20 | board = {} 21 | for pos in x_positions: 22 | board[pos] = 'X' 23 | for pos in o_positions: 24 | board[pos] = 'O' 25 | return GameState(to_move=to_move, utility=0, board=board, moves=moves) 26 | 27 | 28 | def test_minmax_decision(): 29 | assert minmax_decision('A', f52) == 'a1' 30 | assert minmax_decision('B', f52) == 'b1' 31 | assert minmax_decision('C', f52) == 'c1' 32 | assert minmax_decision('D', f52) == 'd3' 33 | 34 | 35 | def test_alpha_beta_search(): 36 | assert alpha_beta_search('A', f52) == 'a1' 37 | assert alpha_beta_search('B', f52) == 'b1' 38 | assert alpha_beta_search('C', f52) == 'c1' 39 | assert alpha_beta_search('D', f52) == 'd3' 40 | 41 | state = gen_state(to_move='X', x_positions=[(1, 1), (3, 3)], 42 | o_positions=[(1, 2), (3, 2)]) 43 | assert alpha_beta_search(state, ttt) == (2, 2) 44 | 45 | state = gen_state(to_move='O', x_positions=[(1, 1), (3, 1), (3, 3)], 46 | o_positions=[(1, 2), (3, 2)]) 47 | assert alpha_beta_search(state, ttt) == (2, 2) 48 | 49 | state = gen_state(to_move='O', x_positions=[(1, 1)], 50 | o_positions=[]) 51 | assert alpha_beta_search(state, ttt) == (2, 2) 52 | 53 | state = gen_state(to_move='X', x_positions=[(1, 1), (3, 1)], 54 | o_positions=[(2, 2), (3, 1)]) 55 | assert alpha_beta_search(state, ttt) == (1, 3) 56 | 57 | 58 | def test_random_tests(): 59 | assert Fig52Game().play_game(alpha_beta_player, alpha_beta_player) == 3 60 | 61 | # The player 'X' (one who plays first) in TicTacToe never loses: 62 | assert ttt.play_game(alpha_beta_player, alpha_beta_player) >= 0 63 | 64 | # The player 'X' (one who plays first) in TicTacToe never loses: 65 | assert ttt.play_game(alpha_beta_player, random_player) >= 0 66 | 67 | 68 | if __name__ == "__main__": 69 | pytest.main() 70 | -------------------------------------------------------------------------------- /tests/test_games4e.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from games4e import * 4 | 5 | # Creating the game instances 6 | f52 = Fig52Game() 7 | ttt = TicTacToe() 8 | con4 = ConnectFour() 9 | 10 | random.seed("aima-python") 11 | 12 | 13 | def gen_state(to_move='X', x_positions=[], o_positions=[], h=3, v=3): 14 | """Given whose turn it is to move, the positions of X's on the board, the 15 | positions of O's on the board, and, (optionally) number of rows, columns 16 | and how many consecutive X's or O's required to win, return the corresponding 17 | game state""" 18 | 19 | moves = set([(x, y) for x in range(1, h + 1) for y in range(1, v + 1)]) - set(x_positions) - set(o_positions) 20 | moves = list(moves) 21 | board = {} 22 | for pos in x_positions: 23 | board[pos] = 'X' 24 | for pos in o_positions: 25 | board[pos] = 'O' 26 | return GameState(to_move=to_move, utility=0, board=board, moves=moves) 27 | 28 | 29 | def test_minmax_decision(): 30 | assert minmax_decision('A', f52) == 'a1' 31 | assert minmax_decision('B', f52) == 'b1' 32 | assert minmax_decision('C', f52) == 'c1' 33 | assert minmax_decision('D', f52) == 'd3' 34 | 35 | 36 | def test_alpha_beta_search(): 37 | assert alpha_beta_search('A', f52) == 'a1' 38 | assert alpha_beta_search('B', f52) == 'b1' 39 | assert alpha_beta_search('C', f52) == 'c1' 40 | assert alpha_beta_search('D', f52) == 'd3' 41 | 42 | state = gen_state(to_move='X', x_positions=[(1, 1), (3, 3)], 43 | o_positions=[(1, 2), (3, 2)]) 44 | assert alpha_beta_search(state, ttt) == (2, 2) 45 | 46 | state = gen_state(to_move='O', x_positions=[(1, 1), (3, 1), (3, 3)], 47 | o_positions=[(1, 2), (3, 2)]) 48 | assert alpha_beta_search(state, ttt) == (2, 2) 49 | 50 | state = gen_state(to_move='O', x_positions=[(1, 1)], 51 | o_positions=[]) 52 | assert alpha_beta_search(state, ttt) == (2, 2) 53 | 54 | state = gen_state(to_move='X', x_positions=[(1, 1), (3, 1)], 55 | o_positions=[(2, 2), (3, 1)]) 56 | assert alpha_beta_search(state, ttt) == (1, 3) 57 | 58 | 59 | def test_monte_carlo_tree_search(): 60 | state = gen_state(to_move='X', x_positions=[(1, 1), (3, 3)], 61 | o_positions=[(1, 2), (3, 2)]) 62 | assert monte_carlo_tree_search(state, ttt) == (2, 2) 63 | 64 | state = gen_state(to_move='O', x_positions=[(1, 1), (3, 1), (3, 3)], 65 | o_positions=[(1, 2), (3, 2)]) 66 | assert monte_carlo_tree_search(state, ttt) == (2, 2) 67 | 68 | # uncomment the following when removing the 3rd edition 69 | # state = gen_state(to_move='O', x_positions=[(1, 1)], 70 | # o_positions=[]) 71 | # assert monte_carlo_tree_search(state, ttt) == (2, 2) 72 | 73 | state = gen_state(to_move='X', x_positions=[(1, 1), (3, 1)], 74 | o_positions=[(2, 2), (3, 1)]) 75 | assert monte_carlo_tree_search(state, ttt) == (1, 3) 76 | 77 | # should never lose to a random or alpha_beta player in a ttt game 78 | assert ttt.play_game(mcts_player, random_player) >= 0 79 | assert ttt.play_game(mcts_player, alpha_beta_player) >= 0 80 | 81 | # should never lose to a random player in a connect four game 82 | assert con4.play_game(mcts_player, random_player) >= 0 83 | 84 | 85 | def test_random_tests(): 86 | assert Fig52Game().play_game(alpha_beta_player, alpha_beta_player) == 3 87 | 88 | # The player 'X' (one who plays first) in TicTacToe never loses: 89 | assert ttt.play_game(alpha_beta_player, alpha_beta_player) >= 0 90 | 91 | # The player 'X' (one who plays first) in TicTacToe never loses: 92 | assert ttt.play_game(alpha_beta_player, random_player) >= 0 93 | 94 | 95 | if __name__ == "__main__": 96 | pytest.main() 97 | -------------------------------------------------------------------------------- /tests/test_learning.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from learning import * 4 | 5 | random.seed("aima-python") 6 | 7 | 8 | def test_exclude(): 9 | iris = DataSet(name='iris', exclude=[3]) 10 | assert iris.inputs == [0, 1, 2] 11 | 12 | 13 | def test_parse_csv(): 14 | iris = open_data('iris.csv').read() 15 | assert parse_csv(iris)[0] == [5.1, 3.5, 1.4, 0.2, 'setosa'] 16 | 17 | 18 | def test_weighted_mode(): 19 | assert weighted_mode('abbaa', [1, 2, 3, 1, 2]) == 'b' 20 | 21 | 22 | def test_weighted_replicate(): 23 | assert weighted_replicate('ABC', [1, 2, 1], 4) == ['A', 'B', 'B', 'C'] 24 | 25 | 26 | def test_means_and_deviation(): 27 | iris = DataSet(name='iris') 28 | means, deviations = iris.find_means_and_deviations() 29 | assert round(means['setosa'][0], 3) == 5.006 30 | assert round(means['versicolor'][0], 3) == 5.936 31 | assert round(means['virginica'][0], 3) == 6.588 32 | assert round(deviations['setosa'][0], 3) == 0.352 33 | assert round(deviations['versicolor'][0], 3) == 0.516 34 | assert round(deviations['virginica'][0], 3) == 0.636 35 | 36 | 37 | def test_plurality_learner(): 38 | zoo = DataSet(name='zoo') 39 | pl = PluralityLearner(zoo) 40 | assert pl([1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 4, 1, 0, 1]) == 'mammal' 41 | 42 | 43 | def test_k_nearest_neighbors(): 44 | iris = DataSet(name='iris') 45 | knn = NearestNeighborLearner(iris, k=3) 46 | assert knn([5, 3, 1, 0.1]) == 'setosa' 47 | assert knn([6, 5, 3, 1.5]) == 'versicolor' 48 | assert knn([7.5, 4, 6, 2]) == 'virginica' 49 | 50 | 51 | def test_decision_tree_learner(): 52 | iris = DataSet(name='iris') 53 | dtl = DecisionTreeLearner(iris) 54 | assert dtl([5, 3, 1, 0.1]) == 'setosa' 55 | assert dtl([6, 5, 3, 1.5]) == 'versicolor' 56 | assert dtl([7.5, 4, 6, 2]) == 'virginica' 57 | 58 | 59 | def test_svc(): 60 | iris = DataSet(name='iris') 61 | classes = ['setosa', 'versicolor', 'virginica'] 62 | iris.classes_to_numbers(classes) 63 | n_samples, n_features = len(iris.examples), iris.target 64 | X, y = (np.array([x[:n_features] for x in iris.examples]), 65 | np.array([x[n_features] for x in iris.examples])) 66 | svm = MultiClassLearner(SVC()).fit(X, y) 67 | assert svm.predict([[5.0, 3.1, 0.9, 0.1]]) == 0 68 | assert svm.predict([[5.1, 3.5, 1.0, 0.0]]) == 0 69 | assert svm.predict([[4.9, 3.3, 1.1, 0.1]]) == 0 70 | assert svm.predict([[6.0, 3.0, 4.0, 1.1]]) == 1 71 | assert svm.predict([[6.1, 2.2, 3.5, 1.0]]) == 1 72 | assert svm.predict([[5.9, 2.5, 3.3, 1.1]]) == 1 73 | assert svm.predict([[7.5, 4.1, 6.2, 2.3]]) == 2 74 | assert svm.predict([[7.3, 4.0, 6.1, 2.4]]) == 2 75 | assert svm.predict([[7.0, 3.3, 6.1, 2.5]]) == 2 76 | 77 | 78 | def test_information_content(): 79 | assert information_content([]) == 0 80 | assert information_content([4]) == 0 81 | assert information_content([5, 4, 0, 2, 5, 0]) > 1.9 82 | assert information_content([5, 4, 0, 2, 5, 0]) < 2 83 | assert information_content([1.5, 2.5]) > 0.9 84 | assert information_content([1.5, 2.5]) < 1.0 85 | 86 | 87 | def test_random_forest(): 88 | iris = DataSet(name='iris') 89 | rf = RandomForest(iris) 90 | tests = [([5.0, 3.0, 1.0, 0.1], 'setosa'), 91 | ([5.1, 3.3, 1.1, 0.1], 'setosa'), 92 | ([6.0, 5.0, 3.0, 1.0], 'versicolor'), 93 | ([6.1, 2.2, 3.5, 1.0], 'versicolor'), 94 | ([7.5, 4.1, 6.2, 2.3], 'virginica'), 95 | ([7.3, 3.7, 6.1, 2.5], 'virginica')] 96 | assert grade_learner(rf, tests) >= 1 / 3 97 | 98 | 99 | def test_neural_network_learner(): 100 | iris = DataSet(name='iris') 101 | classes = ['setosa', 'versicolor', 'virginica'] 102 | iris.classes_to_numbers(classes) 103 | nnl = NeuralNetLearner(iris, [5], 0.15, 75) 104 | tests = [([5.0, 3.1, 0.9, 0.1], 0), 105 | ([5.1, 3.5, 1.0, 0.0], 0), 106 | ([4.9, 3.3, 1.1, 0.1], 0), 107 | ([6.0, 3.0, 4.0, 1.1], 1), 108 | ([6.1, 2.2, 3.5, 1.0], 1), 109 | ([5.9, 2.5, 3.3, 1.1], 1), 110 | ([7.5, 4.1, 6.2, 2.3], 2), 111 | ([7.3, 4.0, 6.1, 2.4], 2), 112 | ([7.0, 3.3, 6.1, 2.5], 2)] 113 | assert grade_learner(nnl, tests) >= 1 / 3 114 | assert err_ratio(nnl, iris) < 0.21 115 | 116 | 117 | def test_perceptron(): 118 | iris = DataSet(name='iris') 119 | iris.classes_to_numbers() 120 | pl = PerceptronLearner(iris) 121 | tests = [([5, 3, 1, 0.1], 0), 122 | ([5, 3.5, 1, 0], 0), 123 | ([6, 3, 4, 1.1], 1), 124 | ([6, 2, 3.5, 1], 1), 125 | ([7.5, 4, 6, 2], 2), 126 | ([7, 3, 6, 2.5], 2)] 127 | assert grade_learner(pl, tests) > 1 / 2 128 | assert err_ratio(pl, iris) < 0.4 129 | 130 | 131 | def test_random_weights(): 132 | min_value = -0.5 133 | max_value = 0.5 134 | num_weights = 10 135 | test_weights = random_weights(min_value, max_value, num_weights) 136 | assert len(test_weights) == num_weights 137 | for weight in test_weights: 138 | assert min_value <= weight <= max_value 139 | 140 | 141 | def test_ada_boost(): 142 | iris = DataSet(name='iris') 143 | iris.classes_to_numbers() 144 | wl = WeightedLearner(PerceptronLearner) 145 | ab = ada_boost(iris, wl, 5) 146 | tests = [([5, 3, 1, 0.1], 0), 147 | ([5, 3.5, 1, 0], 0), 148 | ([6, 3, 4, 1.1], 1), 149 | ([6, 2, 3.5, 1], 1), 150 | ([7.5, 4, 6, 2], 2), 151 | ([7, 3, 6, 2.5], 2)] 152 | assert grade_learner(ab, tests) > 2 / 3 153 | assert err_ratio(ab, iris) < 0.25 154 | 155 | 156 | if __name__ == "__main__": 157 | pytest.main() 158 | -------------------------------------------------------------------------------- /tests/test_mdp.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from mdp import * 4 | 5 | random.seed("aima-python") 6 | 7 | sequential_decision_environment_1 = GridMDP([[-0.1, -0.1, -0.1, +1], 8 | [-0.1, None, -0.1, -1], 9 | [-0.1, -0.1, -0.1, -0.1]], 10 | terminals=[(3, 2), (3, 1)]) 11 | 12 | sequential_decision_environment_2 = GridMDP([[-2, -2, -2, +1], 13 | [-2, None, -2, -1], 14 | [-2, -2, -2, -2]], 15 | terminals=[(3, 2), (3, 1)]) 16 | 17 | sequential_decision_environment_3 = GridMDP([[-1.0, -0.1, -0.1, -0.1, -0.1, 0.5], 18 | [-0.1, None, None, -0.5, -0.1, -0.1], 19 | [-0.1, None, 1.0, 3.0, None, -0.1], 20 | [-0.1, -0.1, -0.1, None, None, -0.1], 21 | [0.5, -0.1, -0.1, -0.1, -0.1, -1.0]], 22 | terminals=[(2, 2), (3, 2), (0, 4), (5, 0)]) 23 | 24 | 25 | def test_value_iteration(): 26 | assert value_iteration(sequential_decision_environment, .01) == { 27 | (3, 2): 1.0, (3, 1): -1.0, 28 | (3, 0): 0.12958868267972745, (0, 1): 0.39810203830605462, 29 | (0, 2): 0.50928545646220924, (1, 0): 0.25348746162470537, 30 | (0, 0): 0.29543540628363629, (1, 2): 0.64958064617168676, 31 | (2, 0): 0.34461306281476806, (2, 1): 0.48643676237737926, 32 | (2, 2): 0.79536093684710951} 33 | 34 | assert value_iteration(sequential_decision_environment_1, .01) == { 35 | (3, 2): 1.0, (3, 1): -1.0, 36 | (3, 0): -0.0897388258468311, (0, 1): 0.146419707398967840, 37 | (0, 2): 0.30596200514385086, (1, 0): 0.010092796415625799, 38 | (0, 0): 0.00633408092008296, (1, 2): 0.507390193380827400, 39 | (2, 0): 0.15072242145212010, (2, 1): 0.358309043654212570, 40 | (2, 2): 0.71675493618997840} 41 | 42 | assert value_iteration(sequential_decision_environment_2, .01) == { 43 | (3, 2): 1.0, (3, 1): -1.0, 44 | (3, 0): -3.5141584808407855, (0, 1): -7.8000009574737180, 45 | (0, 2): -6.1064293596058830, (1, 0): -7.1012549580376760, 46 | (0, 0): -8.5872244532783200, (1, 2): -3.9653547121245810, 47 | (2, 0): -5.3099468802901630, (2, 1): -3.3543366255753995, 48 | (2, 2): -1.7383376462930498} 49 | 50 | assert value_iteration(sequential_decision_environment_3, .01) == { 51 | (0, 0): 4.350592130345558, (0, 1): 3.640700980321895, (0, 2): 3.0734806370346943, (0, 3): 2.5754335063434937, 52 | (0, 4): -1.0, 53 | (1, 0): 3.640700980321895, (1, 1): 3.129579352304856, (1, 4): 2.0787517066719916, 54 | (2, 0): 3.0259220379893352, (2, 1): 2.5926103577982897, (2, 2): 1.0, (2, 4): 2.507774181360808, 55 | (3, 0): 2.5336747364500076, (3, 2): 3.0, (3, 3): 2.292172805400873, (3, 4): 2.996383110867515, 56 | (4, 0): 2.1014575936349886, (4, 3): 3.1297590518608907, (4, 4): 3.6408806798779287, 57 | (5, 0): -1.0, (5, 1): 2.5756132058995282, (5, 2): 3.0736603365907276, (5, 3): 3.6408806798779287, 58 | (5, 4): 4.350771829901593} 59 | 60 | 61 | def test_policy_iteration(): 62 | assert policy_iteration(sequential_decision_environment) == { 63 | (0, 0): (0, 1), (0, 1): (0, 1), (0, 2): (1, 0), 64 | (1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (0, 1), 65 | (2, 1): (0, 1), (2, 2): (1, 0), (3, 0): (-1, 0), 66 | (3, 1): None, (3, 2): None} 67 | 68 | assert policy_iteration(sequential_decision_environment_1) == { 69 | (0, 0): (0, 1), (0, 1): (0, 1), (0, 2): (1, 0), 70 | (1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (0, 1), 71 | (2, 1): (0, 1), (2, 2): (1, 0), (3, 0): (-1, 0), 72 | (3, 1): None, (3, 2): None} 73 | 74 | assert policy_iteration(sequential_decision_environment_2) == { 75 | (0, 0): (1, 0), (0, 1): (0, 1), (0, 2): (1, 0), 76 | (1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (1, 0), 77 | (2, 1): (1, 0), (2, 2): (1, 0), (3, 0): (0, 1), 78 | (3, 1): None, (3, 2): None} 79 | 80 | 81 | def test_best_policy(): 82 | pi = best_policy(sequential_decision_environment, value_iteration(sequential_decision_environment, .01)) 83 | assert sequential_decision_environment.to_arrows(pi) == [['>', '>', '>', '.'], 84 | ['^', None, '^', '.'], 85 | ['^', '>', '^', '<']] 86 | 87 | pi_1 = best_policy(sequential_decision_environment_1, value_iteration(sequential_decision_environment_1, .01)) 88 | assert sequential_decision_environment_1.to_arrows(pi_1) == [['>', '>', '>', '.'], 89 | ['^', None, '^', '.'], 90 | ['^', '>', '^', '<']] 91 | 92 | pi_2 = best_policy(sequential_decision_environment_2, value_iteration(sequential_decision_environment_2, .01)) 93 | assert sequential_decision_environment_2.to_arrows(pi_2) == [['>', '>', '>', '.'], 94 | ['^', None, '>', '.'], 95 | ['>', '>', '>', '^']] 96 | 97 | pi_3 = best_policy(sequential_decision_environment_3, value_iteration(sequential_decision_environment_3, .01)) 98 | assert sequential_decision_environment_3.to_arrows(pi_3) == [['.', '>', '>', '>', '>', '>'], 99 | ['v', None, None, '>', '>', '^'], 100 | ['v', None, '.', '.', None, '^'], 101 | ['v', '<', 'v', None, None, '^'], 102 | ['<', '<', '<', '<', '<', '.']] 103 | 104 | 105 | def test_transition_model(): 106 | transition_model = {'a': {'plan1': [(0.2, 'a'), (0.3, 'b'), (0.3, 'c'), (0.2, 'd')], 107 | 'plan2': [(0.4, 'a'), (0.15, 'b'), (0.45, 'c')], 108 | 'plan3': [(0.2, 'a'), (0.5, 'b'), (0.3, 'c')], 109 | }, 110 | 'b': {'plan1': [(0.2, 'a'), (0.6, 'b'), (0.2, 'c'), (0.1, 'd')], 111 | 'plan2': [(0.6, 'a'), (0.2, 'b'), (0.1, 'c'), (0.1, 'd')], 112 | 'plan3': [(0.3, 'a'), (0.3, 'b'), (0.4, 'c')], 113 | }, 114 | 'c': {'plan1': [(0.3, 'a'), (0.5, 'b'), (0.1, 'c'), (0.1, 'd')], 115 | 'plan2': [(0.5, 'a'), (0.3, 'b'), (0.1, 'c'), (0.1, 'd')], 116 | 'plan3': [(0.1, 'a'), (0.3, 'b'), (0.1, 'c'), (0.5, 'd')], 117 | }} 118 | 119 | mdp = MDP(init="a", actlist={"plan1", "plan2", "plan3"}, terminals={"d"}, states={"a", "b", "c", "d"}, 120 | transitions=transition_model) 121 | 122 | assert mdp.T("a", "plan3") == [(0.2, 'a'), (0.5, 'b'), (0.3, 'c')] 123 | assert mdp.T("b", "plan2") == [(0.6, 'a'), (0.2, 'b'), (0.1, 'c'), (0.1, 'd')] 124 | assert mdp.T("c", "plan1") == [(0.3, 'a'), (0.5, 'b'), (0.1, 'c'), (0.1, 'd')] 125 | 126 | 127 | def test_pomdp_value_iteration(): 128 | t_prob = [[[0.65, 0.35], [0.65, 0.35]], [[0.65, 0.35], [0.65, 0.35]], [[1.0, 0.0], [0.0, 1.0]]] 129 | e_prob = [[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]], [[0.8, 0.2], [0.3, 0.7]]] 130 | rewards = [[5, -10], [-20, 5], [-1, -1]] 131 | 132 | gamma = 0.95 133 | actions = ('0', '1', '2') 134 | states = ('0', '1') 135 | 136 | pomdp = POMDP(actions, t_prob, e_prob, rewards, states, gamma) 137 | utility = pomdp_value_iteration(pomdp, epsilon=5) 138 | 139 | for _, v in utility.items(): 140 | sum_ = 0 141 | for element in v: 142 | sum_ += sum(element) 143 | 144 | assert -9.76 < sum_ < -9.70 or 246.5 < sum_ < 248.5 or 0 < sum_ < 1 145 | 146 | 147 | def test_pomdp_value_iteration2(): 148 | t_prob = [[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]], [[1.0, 0.0], [0.0, 1.0]]] 149 | e_prob = [[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]], [[0.85, 0.15], [0.15, 0.85]]] 150 | rewards = [[-100, 10], [10, -100], [-1, -1]] 151 | 152 | gamma = 0.95 153 | actions = ('0', '1', '2') 154 | states = ('0', '1') 155 | 156 | pomdp = POMDP(actions, t_prob, e_prob, rewards, states, gamma) 157 | utility = pomdp_value_iteration(pomdp, epsilon=100) 158 | 159 | for _, v in utility.items(): 160 | sum_ = 0 161 | for element in v: 162 | sum_ += sum(element) 163 | 164 | assert -77.31 < sum_ < -77.25 or 799 < sum_ < 800 165 | 166 | 167 | if __name__ == "__main__": 168 | pytest.main() 169 | -------------------------------------------------------------------------------- /tests/test_mdp4e.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from mdp4e import * 4 | 5 | random.seed("aima-python") 6 | 7 | sequential_decision_environment_1 = GridMDP([[-0.1, -0.1, -0.1, +1], 8 | [-0.1, None, -0.1, -1], 9 | [-0.1, -0.1, -0.1, -0.1]], 10 | terminals=[(3, 2), (3, 1)]) 11 | 12 | sequential_decision_environment_2 = GridMDP([[-2, -2, -2, +1], 13 | [-2, None, -2, -1], 14 | [-2, -2, -2, -2]], 15 | terminals=[(3, 2), (3, 1)]) 16 | 17 | sequential_decision_environment_3 = GridMDP([[-1.0, -0.1, -0.1, -0.1, -0.1, 0.5], 18 | [-0.1, None, None, -0.5, -0.1, -0.1], 19 | [-0.1, None, 1.0, 3.0, None, -0.1], 20 | [-0.1, -0.1, -0.1, None, None, -0.1], 21 | [0.5, -0.1, -0.1, -0.1, -0.1, -1.0]], 22 | terminals=[(2, 2), (3, 2), (0, 4), (5, 0)]) 23 | 24 | 25 | def test_value_iteration(): 26 | ref1 = { 27 | (3, 2): 1.0, (3, 1): -1.0, 28 | (3, 0): 0.12958868267972745, (0, 1): 0.39810203830605462, 29 | (0, 2): 0.50928545646220924, (1, 0): 0.25348746162470537, 30 | (0, 0): 0.29543540628363629, (1, 2): 0.64958064617168676, 31 | (2, 0): 0.34461306281476806, (2, 1): 0.48643676237737926, 32 | (2, 2): 0.79536093684710951} 33 | assert sum(value_iteration(sequential_decision_environment, .01).values()) - sum(ref1.values()) < 0.0001 34 | 35 | ref2 = { 36 | (3, 2): 1.0, (3, 1): -1.0, 37 | (3, 0): -0.0897388258468311, (0, 1): 0.146419707398967840, 38 | (0, 2): 0.30596200514385086, (1, 0): 0.010092796415625799, 39 | (0, 0): 0.00633408092008296, (1, 2): 0.507390193380827400, 40 | (2, 0): 0.15072242145212010, (2, 1): 0.358309043654212570, 41 | (2, 2): 0.71675493618997840} 42 | assert sum(value_iteration(sequential_decision_environment_1, .01).values()) - sum(ref2.values()) < 0.0001 43 | 44 | ref3 = { 45 | (3, 2): 1.0, (3, 1): -1.0, 46 | (3, 0): -3.5141584808407855, (0, 1): -7.8000009574737180, 47 | (0, 2): -6.1064293596058830, (1, 0): -7.1012549580376760, 48 | (0, 0): -8.5872244532783200, (1, 2): -3.9653547121245810, 49 | (2, 0): -5.3099468802901630, (2, 1): -3.3543366255753995, 50 | (2, 2): -1.7383376462930498} 51 | assert sum(value_iteration(sequential_decision_environment_2, .01).values()) - sum(ref3.values()) < 0.0001 52 | 53 | ref4 = { 54 | (0, 0): 4.350592130345558, (0, 1): 3.640700980321895, (0, 2): 3.0734806370346943, (0, 3): 2.5754335063434937, 55 | (0, 4): -1.0, 56 | (1, 0): 3.640700980321895, (1, 1): 3.129579352304856, (1, 4): 2.0787517066719916, 57 | (2, 0): 3.0259220379893352, (2, 1): 2.5926103577982897, (2, 2): 1.0, (2, 4): 2.507774181360808, 58 | (3, 0): 2.5336747364500076, (3, 2): 3.0, (3, 3): 2.292172805400873, (3, 4): 2.996383110867515, 59 | (4, 0): 2.1014575936349886, (4, 3): 3.1297590518608907, (4, 4): 3.6408806798779287, 60 | (5, 0): -1.0, (5, 1): 2.5756132058995282, (5, 2): 3.0736603365907276, (5, 3): 3.6408806798779287, 61 | (5, 4): 4.350771829901593} 62 | assert sum(value_iteration(sequential_decision_environment_3, .01).values()) - sum(ref4.values()) < 0.001 63 | 64 | 65 | def test_policy_iteration(): 66 | assert policy_iteration(sequential_decision_environment) == { 67 | (0, 0): (0, 1), (0, 1): (0, 1), (0, 2): (1, 0), 68 | (1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (0, 1), 69 | (2, 1): (0, 1), (2, 2): (1, 0), (3, 0): (-1, 0), 70 | (3, 1): None, (3, 2): None} 71 | 72 | assert policy_iteration(sequential_decision_environment_1) == { 73 | (0, 0): (0, 1), (0, 1): (0, 1), (0, 2): (1, 0), 74 | (1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (0, 1), 75 | (2, 1): (0, 1), (2, 2): (1, 0), (3, 0): (-1, 0), 76 | (3, 1): None, (3, 2): None} 77 | 78 | assert policy_iteration(sequential_decision_environment_2) == { 79 | (0, 0): (1, 0), (0, 1): (0, 1), (0, 2): (1, 0), 80 | (1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (1, 0), 81 | (2, 1): (1, 0), (2, 2): (1, 0), (3, 0): (0, 1), 82 | (3, 1): None, (3, 2): None} 83 | 84 | 85 | def test_best_policy(): 86 | pi = best_policy(sequential_decision_environment, 87 | value_iteration(sequential_decision_environment, .01)) 88 | assert sequential_decision_environment.to_arrows(pi) == [['>', '>', '>', '.'], 89 | ['^', None, '^', '.'], 90 | ['^', '>', '^', '<']] 91 | 92 | pi_1 = best_policy(sequential_decision_environment_1, 93 | value_iteration(sequential_decision_environment_1, .01)) 94 | assert sequential_decision_environment_1.to_arrows(pi_1) == [['>', '>', '>', '.'], 95 | ['^', None, '^', '.'], 96 | ['^', '>', '^', '<']] 97 | 98 | pi_2 = best_policy(sequential_decision_environment_2, 99 | value_iteration(sequential_decision_environment_2, .01)) 100 | assert sequential_decision_environment_2.to_arrows(pi_2) == [['>', '>', '>', '.'], 101 | ['^', None, '>', '.'], 102 | ['>', '>', '>', '^']] 103 | 104 | pi_3 = best_policy(sequential_decision_environment_3, 105 | value_iteration(sequential_decision_environment_3, .01)) 106 | assert sequential_decision_environment_3.to_arrows(pi_3) == [['.', '>', '>', '>', '>', '>'], 107 | ['v', None, None, '>', '>', '^'], 108 | ['v', None, '.', '.', None, '^'], 109 | ['v', '<', 'v', None, None, '^'], 110 | ['<', '<', '<', '<', '<', '.']] 111 | 112 | 113 | def test_transition_model(): 114 | transition_model = {'a': {'plan1': [(0.2, 'a'), (0.3, 'b'), (0.3, 'c'), (0.2, 'd')], 115 | 'plan2': [(0.4, 'a'), (0.15, 'b'), (0.45, 'c')], 116 | 'plan3': [(0.2, 'a'), (0.5, 'b'), (0.3, 'c')], 117 | }, 118 | 'b': {'plan1': [(0.2, 'a'), (0.6, 'b'), (0.2, 'c'), (0.1, 'd')], 119 | 'plan2': [(0.6, 'a'), (0.2, 'b'), (0.1, 'c'), (0.1, 'd')], 120 | 'plan3': [(0.3, 'a'), (0.3, 'b'), (0.4, 'c')], 121 | }, 122 | 'c': {'plan1': [(0.3, 'a'), (0.5, 'b'), (0.1, 'c'), (0.1, 'd')], 123 | 'plan2': [(0.5, 'a'), (0.3, 'b'), (0.1, 'c'), (0.1, 'd')], 124 | 'plan3': [(0.1, 'a'), (0.3, 'b'), (0.1, 'c'), (0.5, 'd')], 125 | }} 126 | 127 | mdp = MDP(init="a", actlist={"plan1", "plan2", "plan3"}, terminals={"d"}, states={"a", "b", "c", "d"}, 128 | transitions=transition_model) 129 | 130 | assert mdp.T("a", "plan3") == [(0.2, 'a'), (0.5, 'b'), (0.3, 'c')] 131 | assert mdp.T("b", "plan2") == [(0.6, 'a'), (0.2, 'b'), (0.1, 'c'), (0.1, 'd')] 132 | assert mdp.T("c", "plan1") == [(0.3, 'a'), (0.5, 'b'), (0.1, 'c'), (0.1, 'd')] 133 | 134 | 135 | def test_pomdp_value_iteration(): 136 | t_prob = [[[0.65, 0.35], [0.65, 0.35]], [[0.65, 0.35], [0.65, 0.35]], [[1.0, 0.0], [0.0, 1.0]]] 137 | e_prob = [[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]], [[0.8, 0.2], [0.3, 0.7]]] 138 | rewards = [[5, -10], [-20, 5], [-1, -1]] 139 | 140 | gamma = 0.95 141 | actions = ('0', '1', '2') 142 | states = ('0', '1') 143 | 144 | pomdp = POMDP(actions, t_prob, e_prob, rewards, states, gamma) 145 | utility = pomdp_value_iteration(pomdp, epsilon=5) 146 | 147 | for _, v in utility.items(): 148 | sum_ = 0 149 | for element in v: 150 | sum_ += sum(element) 151 | 152 | assert -9.76 < sum_ < -9.70 or 246.5 < sum_ < 248.5 or 0 < sum_ < 1 153 | 154 | 155 | def test_pomdp_value_iteration2(): 156 | t_prob = [[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]], [[1.0, 0.0], [0.0, 1.0]]] 157 | e_prob = [[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]], [[0.85, 0.15], [0.15, 0.85]]] 158 | rewards = [[-100, 10], [10, -100], [-1, -1]] 159 | 160 | gamma = 0.95 161 | actions = ('0', '1', '2') 162 | states = ('0', '1') 163 | 164 | pomdp = POMDP(actions, t_prob, e_prob, rewards, states, gamma) 165 | utility = pomdp_value_iteration(pomdp, epsilon=100) 166 | 167 | for _, v in utility.items(): 168 | sum_ = 0 169 | for element in v: 170 | sum_ += sum(element) 171 | 172 | assert -77.31 < sum_ < -77.25 or 799 < sum_ < 800 173 | 174 | 175 | if __name__ == "__main__": 176 | pytest.main() 177 | -------------------------------------------------------------------------------- /tests/test_nlp.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import pytest 4 | import nlp 5 | 6 | from nlp import loadPageHTML, stripRawHTML, findOutlinks, onlyWikipediaURLS 7 | from nlp import expand_pages, relevant_pages, normalize, ConvergenceDetector, getInLinks 8 | from nlp import getOutLinks, Page, determineInlinks, HITS 9 | from nlp import Rules, Lexicon, Grammar, ProbRules, ProbLexicon, ProbGrammar 10 | from nlp import Chart, CYK_parse 11 | # Clumsy imports because we want to access certain nlp.py globals explicitly, because 12 | # they are accessed by functions within nlp.py 13 | 14 | from unittest.mock import patch 15 | from io import BytesIO 16 | 17 | random.seed("aima-python") 18 | 19 | 20 | def test_rules(): 21 | check = {'A': [['B', 'C'], ['D', 'E']], 'B': [['E'], ['a'], ['b', 'c']]} 22 | assert Rules(A="B C | D E", B="E | a | b c") == check 23 | 24 | 25 | def test_lexicon(): 26 | check = {'Article': ['the', 'a', 'an'], 'Pronoun': ['i', 'you', 'he']} 27 | lexicon = Lexicon(Article="the | a | an", Pronoun="i | you | he") 28 | assert lexicon == check 29 | 30 | 31 | def test_grammar(): 32 | rules = Rules(A="B C | D E", B="E | a | b c") 33 | lexicon = Lexicon(Article="the | a | an", Pronoun="i | you | he") 34 | grammar = Grammar("Simplegram", rules, lexicon) 35 | 36 | assert grammar.rewrites_for('A') == [['B', 'C'], ['D', 'E']] 37 | assert grammar.isa('the', 'Article') 38 | 39 | grammar = nlp.E_Chomsky 40 | for rule in grammar.cnf_rules(): 41 | assert len(rule) == 3 42 | 43 | 44 | def test_generation(): 45 | lexicon = Lexicon(Article="the | a | an", 46 | Pronoun="i | you | he") 47 | 48 | rules = Rules( 49 | S="Article | More | Pronoun", 50 | More="Article Pronoun | Pronoun Pronoun" 51 | ) 52 | 53 | grammar = Grammar("Simplegram", rules, lexicon) 54 | 55 | sentence = grammar.generate_random('S') 56 | for token in sentence.split(): 57 | found = False 58 | for non_terminal, terminals in grammar.lexicon.items(): 59 | if token in terminals: 60 | found = True 61 | assert found 62 | 63 | 64 | def test_prob_rules(): 65 | check = {'A': [(['B', 'C'], 0.3), (['D', 'E'], 0.7)], 66 | 'B': [(['E'], 0.1), (['a'], 0.2), (['b', 'c'], 0.7)]} 67 | rules = ProbRules(A="B C [0.3] | D E [0.7]", B="E [0.1] | a [0.2] | b c [0.7]") 68 | assert rules == check 69 | 70 | 71 | def test_prob_lexicon(): 72 | check = {'Article': [('the', 0.5), ('a', 0.25), ('an', 0.25)], 73 | 'Pronoun': [('i', 0.4), ('you', 0.3), ('he', 0.3)]} 74 | lexicon = ProbLexicon(Article="the [0.5] | a [0.25] | an [0.25]", 75 | Pronoun="i [0.4] | you [0.3] | he [0.3]") 76 | assert lexicon == check 77 | 78 | 79 | def test_prob_grammar(): 80 | rules = ProbRules(A="B C [0.3] | D E [0.7]", B="E [0.1] | a [0.2] | b c [0.7]") 81 | lexicon = ProbLexicon(Article="the [0.5] | a [0.25] | an [0.25]", 82 | Pronoun="i [0.4] | you [0.3] | he [0.3]") 83 | grammar = ProbGrammar("Simplegram", rules, lexicon) 84 | 85 | assert grammar.rewrites_for('A') == [(['B', 'C'], 0.3), (['D', 'E'], 0.7)] 86 | assert grammar.isa('the', 'Article') 87 | 88 | grammar = nlp.E_Prob_Chomsky 89 | for rule in grammar.cnf_rules(): 90 | assert len(rule) == 4 91 | 92 | 93 | def test_prob_generation(): 94 | lexicon = ProbLexicon(Verb="am [0.5] | are [0.25] | is [0.25]", 95 | Pronoun="i [0.4] | you [0.3] | he [0.3]") 96 | 97 | rules = ProbRules( 98 | S="Verb [0.5] | More [0.3] | Pronoun [0.1] | nobody is here [0.1]", 99 | More="Pronoun Verb [0.7] | Pronoun Pronoun [0.3]" 100 | ) 101 | 102 | grammar = ProbGrammar("Simplegram", rules, lexicon) 103 | 104 | sentence = grammar.generate_random('S') 105 | assert len(sentence) == 2 106 | 107 | 108 | def test_chart_parsing(): 109 | chart = Chart(nlp.E0) 110 | parses = chart.parses('the stench is in 2 2') 111 | assert len(parses) == 1 112 | 113 | 114 | def test_CYK_parse(): 115 | grammar = nlp.E_Prob_Chomsky 116 | words = ['the', 'robot', 'is', 'good'] 117 | P = CYK_parse(words, grammar) 118 | assert len(P) == 52 119 | 120 | grammar = nlp.E_Prob_Chomsky_ 121 | words = ['astronomers', 'saw', 'stars'] 122 | P = CYK_parse(words, grammar) 123 | assert len(P) == 32 124 | 125 | 126 | # ______________________________________________________________________________ 127 | # Data Setup 128 | 129 | testHTML = """Keyword String 1: A man is a male human. 130 | Keyword String 2: Like most other male mammals, a man inherits an 131 | X from his mom and a Y from his dad. 132 | Links: 133 | href="https://google.com.au" 134 | < href="/wiki/TestThing" > href="/wiki/TestBoy" 135 | href="/wiki/TestLiving" href="/wiki/TestMan" >""" 136 | testHTML2 = "a mom and a dad" 137 | testHTML3 = """ 138 | 139 | 140 | 141 | Page Title 142 | 143 | 144 | 145 |

AIMA book

146 | 147 | 148 | 149 | """ 150 | 151 | pA = Page("A", ["B", "C", "E"], ["D"], 1, 6) 152 | pB = Page("B", ["E"], ["A", "C", "D"], 2, 5) 153 | pC = Page("C", ["B", "E"], ["A", "D"], 3, 4) 154 | pD = Page("D", ["A", "B", "C", "E"], [], 4, 3) 155 | pE = Page("E", [], ["A", "B", "C", "D", "F"], 5, 2) 156 | pF = Page("F", ["E"], [], 6, 1) 157 | pageDict = {pA.address: pA, pB.address: pB, pC.address: pC, 158 | pD.address: pD, pE.address: pE, pF.address: pF} 159 | nlp.pagesIndex = pageDict 160 | nlp.pagesContent = {pA.address: testHTML, pB.address: testHTML2, 161 | pC.address: testHTML, pD.address: testHTML2, 162 | pE.address: testHTML, pF.address: testHTML2} 163 | 164 | 165 | # This test takes a long time (> 60 secs) 166 | # def test_loadPageHTML(): 167 | # # first format all the relative URLs with the base URL 168 | # addresses = [examplePagesSet[0] + x for x in examplePagesSet[1:]] 169 | # loadedPages = loadPageHTML(addresses) 170 | # relURLs = ['Ancient_Greek','Ethics','Plato','Theology'] 171 | # fullURLs = ["https://en.wikipedia.org/wiki/"+x for x in relURLs] 172 | # assert all(x in loadedPages for x in fullURLs) 173 | # assert all(loadedPages.get(key,"") != "" for key in addresses) 174 | 175 | 176 | @patch('urllib.request.urlopen', return_value=BytesIO(testHTML3.encode())) 177 | def test_stripRawHTML(html_mock): 178 | addr = "https://en.wikipedia.org/wiki/Ethics" 179 | aPage = loadPageHTML([addr]) 180 | someHTML = aPage[addr] 181 | strippedHTML = stripRawHTML(someHTML) 182 | assert "" not in strippedHTML and "" not in strippedHTML 183 | assert "AIMA book" in someHTML and "AIMA book" in strippedHTML 184 | 185 | 186 | def test_determineInlinks(): 187 | assert set(determineInlinks(pA)) == set(['B', 'C', 'E']) 188 | assert set(determineInlinks(pE)) == set([]) 189 | assert set(determineInlinks(pF)) == set(['E']) 190 | 191 | 192 | def test_findOutlinks_wiki(): 193 | testPage = pageDict[pA.address] 194 | outlinks = findOutlinks(testPage, handleURLs=onlyWikipediaURLS) 195 | assert "https://en.wikipedia.org/wiki/TestThing" in outlinks 196 | assert "https://en.wikipedia.org/wiki/TestThing" in outlinks 197 | assert "https://google.com.au" not in outlinks 198 | 199 | 200 | # ______________________________________________________________________________ 201 | # HITS Helper Functions 202 | 203 | 204 | def test_expand_pages(): 205 | pages = {k: pageDict[k] for k in ('F')} 206 | pagesTwo = {k: pageDict[k] for k in ('A', 'E')} 207 | expanded_pages = expand_pages(pages) 208 | assert all(x in expanded_pages for x in ['F', 'E']) 209 | assert all(x not in expanded_pages for x in ['A', 'B', 'C', 'D']) 210 | expanded_pages = expand_pages(pagesTwo) 211 | print(expanded_pages) 212 | assert all(x in expanded_pages for x in ['A', 'B', 'C', 'D', 'E', 'F']) 213 | 214 | 215 | def test_relevant_pages(): 216 | pages = relevant_pages("his dad") 217 | assert all((x in pages) for x in ['A', 'C', 'E']) 218 | assert all((x not in pages) for x in ['B', 'D', 'F']) 219 | pages = relevant_pages("mom and dad") 220 | assert all((x in pages) for x in ['A', 'B', 'C', 'D', 'E', 'F']) 221 | pages = relevant_pages("philosophy") 222 | assert all((x not in pages) for x in ['A', 'B', 'C', 'D', 'E', 'F']) 223 | 224 | 225 | def test_normalize(): 226 | normalize(pageDict) 227 | print(page.hub for addr, page in nlp.pagesIndex.items()) 228 | expected_hub = [1 / 91 ** 0.5, 2 / 91 ** 0.5, 3 / 91 ** 0.5, 4 / 91 ** 0.5, 5 / 91 ** 0.5, 229 | 6 / 91 ** 0.5] # Works only for sample data above 230 | expected_auth = list(reversed(expected_hub)) 231 | assert len(expected_hub) == len(expected_auth) == len(nlp.pagesIndex) 232 | assert expected_hub == [page.hub for addr, page in sorted(nlp.pagesIndex.items())] 233 | assert expected_auth == [page.authority for addr, page in sorted(nlp.pagesIndex.items())] 234 | 235 | 236 | def test_detectConvergence(): 237 | # run detectConvergence once to initialise history 238 | convergence = ConvergenceDetector() 239 | convergence() 240 | assert convergence() # values haven't changed so should return True 241 | # make tiny increase/decrease to all values 242 | for _, page in nlp.pagesIndex.items(): 243 | page.hub += 0.0003 244 | page.authority += 0.0004 245 | # retest function with values. Should still return True 246 | assert convergence() 247 | for _, page in nlp.pagesIndex.items(): 248 | page.hub += 3000000 249 | page.authority += 3000000 250 | # retest function with values. Should now return false 251 | assert not convergence() 252 | 253 | 254 | def test_getInlinks(): 255 | inlnks = getInLinks(pageDict['A']) 256 | assert sorted(inlnks) == pageDict['A'].inlinks 257 | 258 | 259 | def test_getOutlinks(): 260 | outlnks = getOutLinks(pageDict['A']) 261 | assert sorted(outlnks) == pageDict['A'].outlinks 262 | 263 | 264 | def test_HITS(): 265 | HITS('inherit') 266 | auth_list = [pA.authority, pB.authority, pC.authority, pD.authority, pE.authority, pF.authority] 267 | hub_list = [pA.hub, pB.hub, pC.hub, pD.hub, pE.hub, pF.hub] 268 | assert max(auth_list) == pD.authority 269 | assert max(hub_list) == pE.hub 270 | 271 | 272 | if __name__ == '__main__': 273 | pytest.main() 274 | -------------------------------------------------------------------------------- /tests/test_nlp4e.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import pytest 4 | import nlp 5 | 6 | from nlp4e import Rules, Lexicon, Grammar, ProbRules, ProbLexicon, ProbGrammar, E0 7 | from nlp4e import Chart, CYK_parse, subspan, astar_search_parsing, beam_search_parsing 8 | 9 | # Clumsy imports because we want to access certain nlp.py globals explicitly, because 10 | # they are accessed by functions within nlp.py 11 | 12 | random.seed("aima-python") 13 | 14 | 15 | def test_rules(): 16 | check = {'A': [['B', 'C'], ['D', 'E']], 'B': [['E'], ['a'], ['b', 'c']]} 17 | assert Rules(A="B C | D E", B="E | a | b c") == check 18 | 19 | 20 | def test_lexicon(): 21 | check = {'Article': ['the', 'a', 'an'], 'Pronoun': ['i', 'you', 'he']} 22 | lexicon = Lexicon(Article="the | a | an", Pronoun="i | you | he") 23 | assert lexicon == check 24 | 25 | 26 | def test_grammar(): 27 | rules = Rules(A="B C | D E", B="E | a | b c") 28 | lexicon = Lexicon(Article="the | a | an", Pronoun="i | you | he") 29 | grammar = Grammar("Simplegram", rules, lexicon) 30 | 31 | assert grammar.rewrites_for('A') == [['B', 'C'], ['D', 'E']] 32 | assert grammar.isa('the', 'Article') 33 | 34 | grammar = nlp.E_Chomsky 35 | for rule in grammar.cnf_rules(): 36 | assert len(rule) == 3 37 | 38 | 39 | def test_generation(): 40 | lexicon = Lexicon(Article="the | a | an", 41 | Pronoun="i | you | he") 42 | 43 | rules = Rules( 44 | S="Article | More | Pronoun", 45 | More="Article Pronoun | Pronoun Pronoun" 46 | ) 47 | 48 | grammar = Grammar("Simplegram", rules, lexicon) 49 | 50 | sentence = grammar.generate_random('S') 51 | for token in sentence.split(): 52 | found = False 53 | for non_terminal, terminals in grammar.lexicon.items(): 54 | if token in terminals: 55 | found = True 56 | assert found 57 | 58 | 59 | def test_prob_rules(): 60 | check = {'A': [(['B', 'C'], 0.3), (['D', 'E'], 0.7)], 61 | 'B': [(['E'], 0.1), (['a'], 0.2), (['b', 'c'], 0.7)]} 62 | rules = ProbRules(A="B C [0.3] | D E [0.7]", B="E [0.1] | a [0.2] | b c [0.7]") 63 | assert rules == check 64 | 65 | 66 | def test_prob_lexicon(): 67 | check = {'Article': [('the', 0.5), ('a', 0.25), ('an', 0.25)], 68 | 'Pronoun': [('i', 0.4), ('you', 0.3), ('he', 0.3)]} 69 | lexicon = ProbLexicon(Article="the [0.5] | a [0.25] | an [0.25]", 70 | Pronoun="i [0.4] | you [0.3] | he [0.3]") 71 | assert lexicon == check 72 | 73 | 74 | def test_prob_grammar(): 75 | rules = ProbRules(A="B C [0.3] | D E [0.7]", B="E [0.1] | a [0.2] | b c [0.7]") 76 | lexicon = ProbLexicon(Article="the [0.5] | a [0.25] | an [0.25]", 77 | Pronoun="i [0.4] | you [0.3] | he [0.3]") 78 | grammar = ProbGrammar("Simplegram", rules, lexicon) 79 | 80 | assert grammar.rewrites_for('A') == [(['B', 'C'], 0.3), (['D', 'E'], 0.7)] 81 | assert grammar.isa('the', 'Article') 82 | 83 | grammar = nlp.E_Prob_Chomsky 84 | for rule in grammar.cnf_rules(): 85 | assert len(rule) == 4 86 | 87 | 88 | def test_prob_generation(): 89 | lexicon = ProbLexicon(Verb="am [0.5] | are [0.25] | is [0.25]", 90 | Pronoun="i [0.4] | you [0.3] | he [0.3]") 91 | 92 | rules = ProbRules( 93 | S="Verb [0.5] | More [0.3] | Pronoun [0.1] | nobody is here [0.1]", 94 | More="Pronoun Verb [0.7] | Pronoun Pronoun [0.3]") 95 | 96 | grammar = ProbGrammar("Simplegram", rules, lexicon) 97 | 98 | sentence = grammar.generate_random('S') 99 | assert len(sentence) == 2 100 | 101 | 102 | def test_chart_parsing(): 103 | chart = Chart(nlp.E0) 104 | parses = chart.parses('the stench is in 2 2') 105 | assert len(parses) == 1 106 | 107 | 108 | def test_CYK_parse(): 109 | grammar = nlp.E_Prob_Chomsky 110 | words = ['the', 'robot', 'is', 'good'] 111 | P = CYK_parse(words, grammar) 112 | assert len(P) == 5 113 | 114 | grammar = nlp.E_Prob_Chomsky_ 115 | words = ['astronomers', 'saw', 'stars'] 116 | P = CYK_parse(words, grammar) 117 | assert len(P) == 3 118 | 119 | 120 | def test_subspan(): 121 | spans = subspan(3) 122 | assert spans.__next__() == (1, 1, 2) 123 | assert spans.__next__() == (2, 2, 3) 124 | assert spans.__next__() == (1, 1, 3) 125 | assert spans.__next__() == (1, 2, 3) 126 | 127 | 128 | def test_text_parsing(): 129 | words = ["the", "wumpus", "is", "dead"] 130 | grammer = E0 131 | assert astar_search_parsing(words, grammer) == 'S' 132 | assert beam_search_parsing(words, grammer) == 'S' 133 | words = ["the", "is", "wupus", "dead"] 134 | assert astar_search_parsing(words, grammer) is False 135 | assert beam_search_parsing(words, grammer) is False 136 | 137 | 138 | if __name__ == '__main__': 139 | pytest.main() 140 | -------------------------------------------------------------------------------- /tests/test_probabilistic_learning.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import pytest 4 | 5 | from learning import DataSet 6 | from probabilistic_learning import * 7 | 8 | random.seed("aima-python") 9 | 10 | 11 | def test_naive_bayes(): 12 | iris = DataSet(name='iris') 13 | # discrete 14 | nbd = NaiveBayesLearner(iris, continuous=False) 15 | assert nbd([5, 3, 1, 0.1]) == 'setosa' 16 | assert nbd([6, 3, 4, 1.1]) == 'versicolor' 17 | assert nbd([7.7, 3, 6, 2]) == 'virginica' 18 | # continuous 19 | nbc = NaiveBayesLearner(iris, continuous=True) 20 | assert nbc([5, 3, 1, 0.1]) == 'setosa' 21 | assert nbc([6, 5, 3, 1.5]) == 'versicolor' 22 | assert nbc([7, 3, 6.5, 2]) == 'virginica' 23 | # simple 24 | data1 = 'a' * 50 + 'b' * 30 + 'c' * 15 25 | dist1 = CountingProbDist(data1) 26 | data2 = 'a' * 30 + 'b' * 45 + 'c' * 20 27 | dist2 = CountingProbDist(data2) 28 | data3 = 'a' * 20 + 'b' * 20 + 'c' * 35 29 | dist3 = CountingProbDist(data3) 30 | dist = {('First', 0.5): dist1, ('Second', 0.3): dist2, ('Third', 0.2): dist3} 31 | nbs = NaiveBayesLearner(dist, simple=True) 32 | assert nbs('aab') == 'First' 33 | assert nbs(['b', 'b']) == 'Second' 34 | assert nbs('ccbcc') == 'Third' 35 | 36 | 37 | if __name__ == "__main__": 38 | pytest.main() 39 | -------------------------------------------------------------------------------- /tests/test_reinforcement_learning.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from reinforcement_learning import * 4 | from mdp import sequential_decision_environment 5 | 6 | random.seed("aima-python") 7 | 8 | north = (0, 1) 9 | south = (0, -1) 10 | west = (-1, 0) 11 | east = (1, 0) 12 | 13 | policy = { 14 | (0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, 15 | (0, 1): north, (2, 1): north, (3, 1): None, 16 | (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west, 17 | } 18 | 19 | 20 | def test_PassiveDUEAgent(): 21 | agent = PassiveDUEAgent(policy, sequential_decision_environment) 22 | for i in range(200): 23 | run_single_trial(agent, sequential_decision_environment) 24 | agent.estimate_U() 25 | # Agent does not always produce same results. 26 | # Check if results are good enough. 27 | # print(agent.U[(0, 0)], agent.U[(0,1)], agent.U[(1,0)]) 28 | assert agent.U[(0, 0)] > 0.15 # In reality around 0.3 29 | assert agent.U[(0, 1)] > 0.15 # In reality around 0.4 30 | assert agent.U[(1, 0)] > 0 # In reality around 0.2 31 | 32 | 33 | def test_PassiveADPAgent(): 34 | agent = PassiveADPAgent(policy, sequential_decision_environment) 35 | for i in range(100): 36 | run_single_trial(agent, sequential_decision_environment) 37 | 38 | # Agent does not always produce same results. 39 | # Check if results are good enough. 40 | # print(agent.U[(0, 0)], agent.U[(0,1)], agent.U[(1,0)]) 41 | assert agent.U[(0, 0)] > 0.15 # In reality around 0.3 42 | assert agent.U[(0, 1)] > 0.15 # In reality around 0.4 43 | assert agent.U[(1, 0)] > 0 # In reality around 0.2 44 | 45 | 46 | def test_PassiveTDAgent(): 47 | agent = PassiveTDAgent(policy, sequential_decision_environment, alpha=lambda n: 60. / (59 + n)) 48 | for i in range(200): 49 | run_single_trial(agent, sequential_decision_environment) 50 | 51 | # Agent does not always produce same results. 52 | # Check if results are good enough. 53 | assert agent.U[(0, 0)] > 0.15 # In reality around 0.3 54 | assert agent.U[(0, 1)] > 0.15 # In reality around 0.35 55 | assert agent.U[(1, 0)] > 0.15 # In reality around 0.25 56 | 57 | 58 | def test_QLearning(): 59 | q_agent = QLearningAgent(sequential_decision_environment, Ne=5, Rplus=2, alpha=lambda n: 60. / (59 + n)) 60 | 61 | for i in range(200): 62 | run_single_trial(q_agent, sequential_decision_environment) 63 | 64 | # Agent does not always produce same results. 65 | # Check if results are good enough. 66 | assert q_agent.Q[((0, 1), (0, 1))] >= -0.5 # In reality around 0.1 67 | assert q_agent.Q[((1, 0), (0, -1))] <= 0.5 # In reality around -0.1 68 | 69 | 70 | if __name__ == '__main__': 71 | pytest.main() 72 | -------------------------------------------------------------------------------- /tests/test_reinforcement_learning4e.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from mdp4e import sequential_decision_environment 4 | from reinforcement_learning4e import * 5 | 6 | random.seed("aima-python") 7 | 8 | north = (0, 1) 9 | south = (0, -1) 10 | west = (-1, 0) 11 | east = (1, 0) 12 | 13 | policy = {(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None, 14 | (0, 1): north, (2, 1): north, (3, 1): None, 15 | (0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west} 16 | 17 | 18 | def test_PassiveDUEAgent(): 19 | agent = PassiveDUEAgent(policy, sequential_decision_environment) 20 | for i in range(200): 21 | run_single_trial(agent, sequential_decision_environment) 22 | agent.estimate_U() 23 | # Agent does not always produce same results. 24 | # Check if results are good enough. 25 | # print(agent.U[(0, 0)], agent.U[(0,1)], agent.U[(1,0)]) 26 | assert agent.U[(0, 0)] > 0.15 # In reality around 0.3 27 | assert agent.U[(0, 1)] > 0.15 # In reality around 0.4 28 | assert agent.U[(1, 0)] > 0 # In reality around 0.2 29 | 30 | 31 | def test_PassiveADPAgent(): 32 | agent = PassiveADPAgent(policy, sequential_decision_environment) 33 | for i in range(100): 34 | run_single_trial(agent, sequential_decision_environment) 35 | 36 | # Agent does not always produce same results. 37 | # Check if results are good enough. 38 | # print(agent.U[(0, 0)], agent.U[(0,1)], agent.U[(1,0)]) 39 | assert agent.U[(0, 0)] > 0.15 # In reality around 0.3 40 | assert agent.U[(0, 1)] > 0.15 # In reality around 0.4 41 | assert agent.U[(1, 0)] > 0 # In reality around 0.2 42 | 43 | 44 | def test_PassiveTDAgent(): 45 | agent = PassiveTDAgent(policy, sequential_decision_environment, alpha=lambda n: 60. / (59 + n)) 46 | for i in range(200): 47 | run_single_trial(agent, sequential_decision_environment) 48 | 49 | # Agent does not always produce same results. 50 | # Check if results are good enough. 51 | assert agent.U[(0, 0)] > 0.15 # In reality around 0.3 52 | assert agent.U[(0, 1)] > 0.15 # In reality around 0.35 53 | assert agent.U[(1, 0)] > 0.15 # In reality around 0.25 54 | 55 | 56 | def test_QLearning(): 57 | q_agent = QLearningAgent(sequential_decision_environment, Ne=5, Rplus=2, alpha=lambda n: 60. / (59 + n)) 58 | 59 | for i in range(200): 60 | run_single_trial(q_agent, sequential_decision_environment) 61 | 62 | # Agent does not always produce same results. 63 | # Check if results are good enough. 64 | assert q_agent.Q[((0, 1), (0, 1))] >= -0.5 # In reality around 0.1 65 | assert q_agent.Q[((1, 0), (0, -1))] <= 0.5 # In reality around -0.1 66 | 67 | 68 | if __name__ == '__main__': 69 | pytest.main() 70 | -------------------------------------------------------------------------------- /tests/test_text.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from text import * 7 | from utils import open_data 8 | 9 | random.seed("aima-python") 10 | 11 | 12 | def test_text_models(): 13 | flatland = open_data("EN-text/flatland.txt").read() 14 | wordseq = words(flatland) 15 | P1 = UnigramWordModel(wordseq) 16 | P2 = NgramWordModel(2, wordseq) 17 | P3 = NgramWordModel(3, wordseq) 18 | 19 | # Test top 20 | assert P1.top(5) == [(2081, 'the'), (1479, 'of'), 21 | (1021, 'and'), (1008, 'to'), 22 | (850, 'a')] 23 | 24 | assert P2.top(5) == [(368, ('of', 'the')), (152, ('to', 'the')), 25 | (152, ('in', 'the')), (86, ('of', 'a')), 26 | (80, ('it', 'is'))] 27 | 28 | assert P3.top(5) == [(30, ('a', 'straight', 'line')), 29 | (19, ('of', 'three', 'dimensions')), 30 | (16, ('the', 'sense', 'of')), 31 | (13, ('by', 'the', 'sense')), 32 | (13, ('as', 'well', 'as'))] 33 | 34 | # Test isclose 35 | assert np.isclose(P1['the'], 0.0611, rtol=0.001) 36 | assert np.isclose(P2['of', 'the'], 0.0108, rtol=0.01) 37 | assert np.isclose(P3['so', 'as', 'to'], 0.000323, rtol=0.001) 38 | 39 | # Test cond_prob.get 40 | assert P2.cond_prob.get(('went',)) is None 41 | assert P3.cond_prob['in', 'order'].dictionary == {'to': 6} 42 | 43 | # Test dictionary 44 | test_string = 'unigram' 45 | wordseq = words(test_string) 46 | P1 = UnigramWordModel(wordseq) 47 | assert P1.dictionary == {('unigram'): 1} 48 | 49 | test_string = 'bigram text' 50 | wordseq = words(test_string) 51 | P2 = NgramWordModel(2, wordseq) 52 | assert P2.dictionary == {('bigram', 'text'): 1} 53 | 54 | test_string = 'test trigram text here' 55 | wordseq = words(test_string) 56 | P3 = NgramWordModel(3, wordseq) 57 | assert ('test', 'trigram', 'text') in P3.dictionary 58 | assert ('trigram', 'text', 'here') in P3.dictionary 59 | 60 | 61 | def test_char_models(): 62 | test_string = 'test unigram' 63 | wordseq = words(test_string) 64 | P1 = UnigramCharModel(wordseq) 65 | 66 | expected_unigrams = {'n': 1, 's': 1, 'e': 1, 'i': 1, 'm': 1, 'g': 1, 'r': 1, 'a': 1, 't': 2, 'u': 1} 67 | assert len(P1.dictionary) == len(expected_unigrams) 68 | for char in test_string.replace(' ', ''): 69 | assert char in P1.dictionary 70 | 71 | test_string = 'alpha beta' 72 | wordseq = words(test_string) 73 | P1 = NgramCharModel(1, wordseq) 74 | 75 | assert len(P1.dictionary) == len(set(test_string)) 76 | for char in set(test_string): 77 | assert tuple(char) in P1.dictionary 78 | 79 | test_string = 'bigram' 80 | wordseq = words(test_string) 81 | P2 = NgramCharModel(2, wordseq) 82 | 83 | expected_bigrams = {(' ', 'b'): 1, ('b', 'i'): 1, ('i', 'g'): 1, ('g', 'r'): 1, ('r', 'a'): 1, ('a', 'm'): 1} 84 | 85 | assert len(P2.dictionary) == len(expected_bigrams) 86 | for bigram, count in expected_bigrams.items(): 87 | assert bigram in P2.dictionary 88 | assert P2.dictionary[bigram] == count 89 | 90 | test_string = 'bigram bigram' 91 | wordseq = words(test_string) 92 | P2 = NgramCharModel(2, wordseq) 93 | 94 | expected_bigrams = {(' ', 'b'): 2, ('b', 'i'): 2, ('i', 'g'): 2, ('g', 'r'): 2, ('r', 'a'): 2, ('a', 'm'): 2} 95 | 96 | assert len(P2.dictionary) == len(expected_bigrams) 97 | for bigram, count in expected_bigrams.items(): 98 | assert bigram in P2.dictionary 99 | assert P2.dictionary[bigram] == count 100 | 101 | test_string = 'trigram' 102 | wordseq = words(test_string) 103 | P3 = NgramCharModel(3, wordseq) 104 | expected_trigrams = {(' ', 't', 'r'): 1, ('t', 'r', 'i'): 1, 105 | ('r', 'i', 'g'): 1, ('i', 'g', 'r'): 1, 106 | ('g', 'r', 'a'): 1, ('r', 'a', 'm'): 1} 107 | 108 | assert len(P3.dictionary) == len(expected_trigrams) 109 | for bigram, count in expected_trigrams.items(): 110 | assert bigram in P3.dictionary 111 | assert P3.dictionary[bigram] == count 112 | 113 | test_string = 'trigram trigram trigram' 114 | wordseq = words(test_string) 115 | P3 = NgramCharModel(3, wordseq) 116 | expected_trigrams = {(' ', 't', 'r'): 3, ('t', 'r', 'i'): 3, 117 | ('r', 'i', 'g'): 3, ('i', 'g', 'r'): 3, 118 | ('g', 'r', 'a'): 3, ('r', 'a', 'm'): 3} 119 | 120 | assert len(P3.dictionary) == len(expected_trigrams) 121 | for bigram, count in expected_trigrams.items(): 122 | assert bigram in P3.dictionary 123 | assert P3.dictionary[bigram] == count 124 | 125 | 126 | def test_samples(): 127 | story = open_data("EN-text/flatland.txt").read() 128 | story += open_data("gutenberg.txt").read() 129 | wordseq = words(story) 130 | P1 = UnigramWordModel(wordseq) 131 | P2 = NgramWordModel(2, wordseq) 132 | P3 = NgramWordModel(3, wordseq) 133 | 134 | s1 = P1.samples(10) 135 | s2 = P3.samples(10) 136 | s3 = P3.samples(10) 137 | 138 | assert len(s1.split(' ')) == 10 139 | assert len(s2.split(' ')) == 10 140 | assert len(s3.split(' ')) == 10 141 | 142 | 143 | def test_viterbi_segmentation(): 144 | flatland = open_data("EN-text/flatland.txt").read() 145 | wordseq = words(flatland) 146 | P = UnigramWordModel(wordseq) 147 | text = "itiseasytoreadwordswithoutspaces" 148 | 149 | s, p = viterbi_segment(text, P) 150 | assert s == [ 151 | 'it', 'is', 'easy', 'to', 'read', 'words', 'without', 'spaces'] 152 | 153 | 154 | def test_shift_encoding(): 155 | code = shift_encode("This is a secret message.", 17) 156 | 157 | assert code == 'Kyzj zj r jvtivk dvjjrxv.' 158 | 159 | 160 | def test_shift_decoding(): 161 | flatland = open_data("EN-text/flatland.txt").read() 162 | ring = ShiftDecoder(flatland) 163 | msg = ring.decode('Kyzj zj r jvtivk dvjjrxv.') 164 | 165 | assert msg == 'This is a secret message.' 166 | 167 | 168 | def test_permutation_decoder(): 169 | gutenberg = open_data("gutenberg.txt").read() 170 | flatland = open_data("EN-text/flatland.txt").read() 171 | 172 | pd = PermutationDecoder(canonicalize(gutenberg)) 173 | assert pd.decode('aba') in ('ece', 'ete', 'tat', 'tit', 'txt') 174 | 175 | pd = PermutationDecoder(canonicalize(flatland)) 176 | assert pd.decode('aba') in ( 177 | 'ded', 'did', 'ece', 'ele', 'eme', 'ere', 'eve', 'eye', 'iti', 'mom', 'ses', 'tat', 'tit') 178 | 179 | 180 | def test_rot13_encoding(): 181 | code = rot13('Hello, world!') 182 | 183 | assert code == 'Uryyb, jbeyq!' 184 | 185 | 186 | def test_rot13_decoding(): 187 | flatland = open_data("EN-text/flatland.txt").read() 188 | ring = ShiftDecoder(flatland) 189 | msg = ring.decode(rot13('Hello, world!')) 190 | 191 | assert msg == 'Hello, world!' 192 | 193 | 194 | def test_counting_probability_distribution(): 195 | D = CountingProbDist() 196 | 197 | for i in range(10000): 198 | D.add(random.choice('123456')) 199 | 200 | ps = [D[n] for n in '123456'] 201 | 202 | assert 1 / 7 <= min(ps) <= max(ps) <= 1 / 5 203 | 204 | 205 | def test_ir_system(): 206 | from collections import namedtuple 207 | Results = namedtuple('IRResults', ['score', 'url']) 208 | 209 | uc = UnixConsultant() 210 | 211 | def verify_query(query, expected): 212 | assert len(expected) == len(query) 213 | 214 | for expected, (score, d) in zip(expected, query): 215 | doc = uc.documents[d] 216 | assert "{0:.2f}".format( 217 | expected.score) == "{0:.2f}".format(score * 100) 218 | assert os.path.basename(expected.url) == os.path.basename(doc.url) 219 | 220 | return True 221 | 222 | q1 = uc.query("how do I remove a file") 223 | assert verify_query(q1, [ 224 | Results(76.83, "aima-data/MAN/rm.txt"), 225 | Results(67.83, "aima-data/MAN/tar.txt"), 226 | Results(67.79, "aima-data/MAN/cp.txt"), 227 | Results(66.58, "aima-data/MAN/zip.txt"), 228 | Results(64.58, "aima-data/MAN/gzip.txt"), 229 | Results(63.74, "aima-data/MAN/pine.txt"), 230 | Results(62.95, "aima-data/MAN/shred.txt"), 231 | Results(57.46, "aima-data/MAN/pico.txt"), 232 | Results(43.38, "aima-data/MAN/login.txt"), 233 | Results(41.93, "aima-data/MAN/ln.txt")]) 234 | 235 | q2 = uc.query("how do I delete a file") 236 | assert verify_query(q2, [ 237 | Results(75.47, "aima-data/MAN/diff.txt"), 238 | Results(69.12, "aima-data/MAN/pine.txt"), 239 | Results(63.56, "aima-data/MAN/tar.txt"), 240 | Results(60.63, "aima-data/MAN/zip.txt"), 241 | Results(57.46, "aima-data/MAN/pico.txt"), 242 | Results(51.28, "aima-data/MAN/shred.txt"), 243 | Results(26.72, "aima-data/MAN/tr.txt")]) 244 | 245 | q3 = uc.query("email") 246 | assert verify_query(q3, [ 247 | Results(18.39, "aima-data/MAN/pine.txt"), 248 | Results(12.01, "aima-data/MAN/info.txt"), 249 | Results(9.89, "aima-data/MAN/pico.txt"), 250 | Results(8.73, "aima-data/MAN/grep.txt"), 251 | Results(8.07, "aima-data/MAN/zip.txt")]) 252 | 253 | q4 = uc.query("word count for files") 254 | assert verify_query(q4, [ 255 | Results(128.15, "aima-data/MAN/grep.txt"), 256 | Results(94.20, "aima-data/MAN/find.txt"), 257 | Results(81.71, "aima-data/MAN/du.txt"), 258 | Results(55.45, "aima-data/MAN/ps.txt"), 259 | Results(53.42, "aima-data/MAN/more.txt"), 260 | Results(42.00, "aima-data/MAN/dd.txt"), 261 | Results(12.85, "aima-data/MAN/who.txt")]) 262 | 263 | q5 = uc.query("learn: date") 264 | assert verify_query(q5, []) 265 | 266 | q6 = uc.query("2003") 267 | assert verify_query(q6, [ 268 | Results(14.58, "aima-data/MAN/pine.txt"), 269 | Results(11.62, "aima-data/MAN/jar.txt")]) 270 | 271 | 272 | def test_words(): 273 | assert words("``EGAD!'' Edgar cried.") == ['egad', 'edgar', 'cried'] 274 | 275 | 276 | def test_canonicalize(): 277 | assert canonicalize("``EGAD!'' Edgar cried.") == 'egad edgar cried' 278 | 279 | 280 | def test_translate(): 281 | text = 'orange apple lemon ' 282 | func = lambda x: ('s ' + x) if x == ' ' else x 283 | 284 | assert translate(text, func) == 'oranges apples lemons ' 285 | 286 | 287 | def test_bigrams(): 288 | assert bigrams('this') == ['th', 'hi', 'is'] 289 | assert bigrams(['this', 'is', 'a', 'test']) == [['this', 'is'], ['is', 'a'], ['a', 'test']] 290 | 291 | 292 | if __name__ == '__main__': 293 | pytest.main() 294 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from utils import * 3 | import random 4 | 5 | random.seed("aima-python") 6 | 7 | 8 | def test_sequence(): 9 | assert sequence(1) == (1,) 10 | assert sequence("helloworld") == "helloworld" 11 | assert sequence({"hello": 4, "world": 5}) == ({"hello": 4, "world": 5},) 12 | assert sequence([1, 2, 3]) == [1, 2, 3] 13 | assert sequence((4, 5, 6)) == (4, 5, 6) 14 | assert sequence([(1, 2), (2, 3), (4, 5)]) == [(1, 2), (2, 3), (4, 5)] 15 | assert sequence(([1, 2], [3, 4], [5, 6])) == ([1, 2], [3, 4], [5, 6]) 16 | 17 | 18 | def test_remove_all_list(): 19 | assert remove_all(4, []) == [] 20 | assert remove_all(4, [1, 2, 3, 4]) == [1, 2, 3] 21 | assert remove_all(4, [4, 1, 4, 2, 3, 4, 4]) == [1, 2, 3] 22 | assert remove_all(1, [2, 3, 4, 5, 6]) == [2, 3, 4, 5, 6] 23 | 24 | 25 | def test_remove_all_string(): 26 | assert remove_all('s', '') == '' 27 | assert remove_all('s', 'This is a test. Was a test.') == 'Thi i a tet. Wa a tet.' 28 | assert remove_all('a', 'artificial intelligence: a modern approach') == 'rtificil intelligence: modern pproch' 29 | 30 | 31 | def test_unique(): 32 | assert unique([1, 2, 3, 2, 1]) == [1, 2, 3] 33 | assert unique([1, 5, 6, 7, 6, 5]) == [1, 5, 6, 7] 34 | assert unique([1, 2, 3, 4, 5]) == [1, 2, 3, 4, 5] 35 | 36 | 37 | def test_count(): 38 | assert count([1, 2, 3, 4, 2, 3, 4]) == 7 39 | assert count("aldpeofmhngvia") == 14 40 | assert count([True, False, True, True, False]) == 3 41 | assert count([5 > 1, len("abc") == 3, 3 + 1 == 5]) == 2 42 | assert count("aima") == 4 43 | 44 | 45 | def test_multimap(): 46 | assert multimap([(1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (4, 5)]) == \ 47 | {1: [2, 3, 4], 2: [3, 4], 4: [5]} 48 | assert multimap([("a", 2), ("a", 3), ("a", 4), ("b", 3), ("b", 4), ("c", 5)]) == \ 49 | {'a': [2, 3, 4], 'b': [3, 4], 'c': [5]} 50 | 51 | 52 | def test_product(): 53 | assert product([1, 2, 3, 4]) == 24 54 | assert product(list(range(1, 11))) == 3628800 55 | 56 | 57 | def test_first(): 58 | assert first('word') == 'w' 59 | assert first('') is None 60 | assert first('', 'empty') == 'empty' 61 | assert first([1, 2, 3, 4, 5]) == 1 62 | assert first([]) is None 63 | assert first(range(10)) == 0 64 | assert first(x for x in range(10) if x > 3) == 4 65 | assert first(x for x in range(10) if x > 100) is None 66 | assert first((1, 2, 3)) == 1 67 | assert first(range(2, 10)) == 2 68 | assert first([(1, 2), (1, 3), (1, 4)]) == (1, 2) 69 | assert first({1: "one", 2: "two", 3: "three"}) == 1 70 | 71 | 72 | def test_is_in(): 73 | e = [] 74 | assert is_in(e, [1, e, 3]) is True 75 | assert is_in(e, [1, [], 3]) is False 76 | 77 | 78 | def test_mode(): 79 | assert mode([12, 32, 2, 1, 2, 3, 2, 3, 2, 3, 44, 3, 12, 4, 9, 0, 3, 45, 3]) == 3 80 | assert mode("absndkwoajfkalwpdlsdlfllalsflfdslgflal") == 'l' 81 | assert mode("artificialintelligence") == 'i' 82 | 83 | 84 | def test_power_set(): 85 | assert power_set([1, 2, 3]) == [(1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] 86 | 87 | 88 | def test_histogram(): 89 | assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1]) == [(1, 2), (2, 3), (4, 2), (5, 1), (7, 1), (9, 1)] 90 | assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1], 0, lambda x: x * x) == \ 91 | [(1, 2), (4, 3), (16, 2), (25, 1), (49, 1), (81, 1)] 92 | assert histogram([1, 2, 4, 2, 4, 5, 7, 9, 2, 1], 1) == [(2, 3), (4, 2), (1, 2), (9, 1), (7, 1), (5, 1)] 93 | 94 | 95 | def test_euclidean(): 96 | distance = euclidean_distance([1, 2], [3, 4]) 97 | assert round(distance, 2) == 2.83 98 | 99 | distance = euclidean_distance([1, 2, 3], [4, 5, 6]) 100 | assert round(distance, 2) == 5.2 101 | 102 | distance = euclidean_distance([0, 0, 0], [0, 0, 0]) 103 | assert distance == 0 104 | 105 | 106 | def test_cross_entropy(): 107 | loss = cross_entropy_loss([1, 0], [0.9, 0.3]) 108 | assert round(loss, 2) == 0.23 109 | 110 | loss = cross_entropy_loss([1, 0, 0, 1], [0.9, 0.3, 0.5, 0.75]) 111 | assert round(loss, 2) == 0.36 112 | 113 | loss = cross_entropy_loss([1, 0, 0, 1, 1, 0, 1, 1], [0.9, 0.3, 0.5, 0.75, 0.85, 0.14, 0.93, 0.79]) 114 | assert round(loss, 2) == 0.26 115 | 116 | 117 | def test_rms_error(): 118 | assert rms_error([2, 2], [2, 2]) == 0 119 | assert rms_error((0, 0), (0, 1)) == np.sqrt(0.5) 120 | assert rms_error((1, 0), (0, 1)) == 1 121 | assert rms_error((0, 0), (0, -1)) == np.sqrt(0.5) 122 | assert rms_error((0, 0.5), (0, -0.5)) == np.sqrt(0.5) 123 | 124 | 125 | def test_manhattan_distance(): 126 | assert manhattan_distance([2, 2], [2, 2]) == 0 127 | assert manhattan_distance([0, 0], [0, 1]) == 1 128 | assert manhattan_distance([1, 0], [0, 1]) == 2 129 | assert manhattan_distance([0, 0], [0, -1]) == 1 130 | assert manhattan_distance([0, 0.5], [0, -0.5]) == 1 131 | 132 | 133 | def test_mean_boolean_error(): 134 | assert mean_boolean_error([1, 1], [0, 0]) == 1 135 | assert mean_boolean_error([0, 1], [1, 0]) == 1 136 | assert mean_boolean_error([1, 1], [0, 1]) == 0.5 137 | assert mean_boolean_error([0, 0], [0, 0]) == 0 138 | assert mean_boolean_error([1, 1], [1, 1]) == 0 139 | 140 | 141 | def test_mean_error(): 142 | assert mean_error([2, 2], [2, 2]) == 0 143 | assert mean_error([0, 0], [0, 1]) == 0.5 144 | assert mean_error([1, 0], [0, 1]) == 1 145 | assert mean_error([0, 0], [0, -1]) == 0.5 146 | assert mean_error([0, 0.5], [0, -0.5]) == 0.5 147 | 148 | 149 | def test_dot_product(): 150 | assert dot_product([1, 2, 3], [1000, 100, 10]) == 1230 151 | assert dot_product([1, 2, 3], [0, 0, 0]) == 0 152 | 153 | 154 | def test_vector_add(): 155 | assert vector_add((0, 1), (8, 9)) == (8, 10) 156 | assert vector_add((1, 1, 1), (2, 2, 2)) == (3, 3, 3) 157 | 158 | 159 | def test_rounder(): 160 | assert rounder(5.3330000300330) == 5.3330 161 | assert rounder(10.234566) == 10.2346 162 | assert rounder([1.234566, 0.555555, 6.010101]) == [1.2346, 0.5556, 6.0101] 163 | assert rounder([[1.234566, 0.555555, 6.010101], 164 | [10.505050, 12.121212, 6.030303]]) == [[1.2346, 0.5556, 6.0101], [10.5051, 12.1212, 6.0303]] 165 | 166 | 167 | def test_num_or_str(): 168 | assert num_or_str('42') == 42 169 | assert num_or_str(' 42x ') == '42x' 170 | 171 | 172 | def test_normalize(): 173 | assert normalize([1, 2, 1]) == [0.25, 0.5, 0.25] 174 | 175 | 176 | def test_gaussian(): 177 | assert gaussian(1, 0.5, 0.7) == 0.6664492057835993 178 | assert gaussian(5, 2, 4.5) == 0.19333405840142462 179 | assert gaussian(3, 1, 3) == 0.3989422804014327 180 | 181 | 182 | def test_weighted_choice(): 183 | choices = [('a', 0.5), ('b', 0.3), ('c', 0.2)] 184 | choice = weighted_choice(choices) 185 | assert choice in choices 186 | 187 | 188 | def compare_list(x, y): 189 | return all([elm_x == y[i] for i, elm_x in enumerate(x)]) 190 | 191 | 192 | def test_distance(): 193 | assert distance((1, 2), (5, 5)) == 5.0 194 | 195 | 196 | def test_distance_squared(): 197 | assert distance_squared((1, 2), (5, 5)) == 25.0 198 | 199 | 200 | def test_turn_heading(): 201 | assert turn_heading((0, 1), 1) == (-1, 0) 202 | assert turn_heading((0, 1), -1) == (1, 0) 203 | assert turn_heading((1, 0), 1) == (0, 1) 204 | assert turn_heading((1, 0), -1) == (0, -1) 205 | assert turn_heading((0, -1), 1) == (1, 0) 206 | assert turn_heading((0, -1), -1) == (-1, 0) 207 | assert turn_heading((-1, 0), 1) == (0, -1) 208 | assert turn_heading((-1, 0), -1) == (0, 1) 209 | 210 | 211 | def test_turn_left(): 212 | assert turn_left((0, 1)) == (-1, 0) 213 | 214 | 215 | def test_turn_right(): 216 | assert turn_right((0, 1)) == (1, 0) 217 | 218 | 219 | def test_step(): 220 | assert step(1) == step(0.5) == 1 221 | assert step(0) == 1 222 | assert step(-1) == step(-0.5) == 0 223 | 224 | 225 | def test_Expr(): 226 | A, B, C = symbols('A, B, C') 227 | assert symbols('A, B, C') == (Symbol('A'), Symbol('B'), Symbol('C')) 228 | assert A.op == repr(A) == 'A' 229 | assert arity(A) == 0 and A.args == () 230 | 231 | b = Expr('+', A, 1) 232 | assert arity(b) == 2 and b.op == '+' and b.args == (A, 1) 233 | 234 | u = Expr('-', b) 235 | assert arity(u) == 1 and u.op == '-' and u.args == (b,) 236 | 237 | assert (b ** u) == (b ** u) 238 | assert (b ** u) != (u ** b) 239 | 240 | assert A + b * C ** 2 == A + (b * (C ** 2)) 241 | 242 | ex = C + 1 / (A % 1) 243 | assert list(subexpressions(ex)) == [(C + (1 / (A % 1))), C, (1 / (A % 1)), 1, (A % 1), A, 1] 244 | assert A in subexpressions(ex) 245 | assert B not in subexpressions(ex) 246 | 247 | 248 | def test_expr(): 249 | P, Q, x, y, z, GP = symbols('P, Q, x, y, z, GP') 250 | assert (expr(y + 2 * x) 251 | == expr('y + 2 * x') 252 | == Expr('+', y, Expr('*', 2, x))) 253 | assert expr('P & Q ==> P') == Expr('==>', P & Q, P) 254 | assert expr('P & Q <=> Q & P') == Expr('<=>', (P & Q), (Q & P)) 255 | assert expr('P(x) | P(y) & Q(z)') == (P(x) | (P(y) & Q(z))) 256 | # x is grandparent of z if x is parent of y and y is parent of z: 257 | assert (expr('GP(x, z) <== P(x, y) & P(y, z)') == Expr('<==', GP(x, z), P(x, y) & P(y, z))) 258 | 259 | 260 | def test_min_priority_queue(): 261 | queue = PriorityQueue(f=lambda x: x[1]) 262 | queue.append((1, 100)) 263 | queue.append((2, 30)) 264 | queue.append((3, 50)) 265 | assert queue.pop() == (2, 30) 266 | assert len(queue) == 2 267 | assert queue[(3, 50)] == 50 268 | assert (1, 100) in queue 269 | del queue[(1, 100)] 270 | assert (1, 100) not in queue 271 | queue.extend([(1, 100), (4, 10)]) 272 | assert queue.pop() == (4, 10) 273 | assert len(queue) == 2 274 | 275 | 276 | def test_max_priority_queue(): 277 | queue = PriorityQueue(order='max', f=lambda x: x[1]) 278 | queue.append((1, 100)) 279 | queue.append((2, 30)) 280 | queue.append((3, 50)) 281 | assert queue.pop() == (1, 100) 282 | 283 | 284 | def test_priority_queue_with_objects(): 285 | class Test: 286 | def __init__(self, a, b): 287 | self.a = a 288 | self.b = b 289 | 290 | def __eq__(self, other): 291 | return self.a == other.a 292 | 293 | queue = PriorityQueue(f=lambda x: x.b) 294 | queue.append(Test(1, 100)) 295 | other = Test(1, 10) 296 | assert queue[other] == 100 297 | assert other in queue 298 | del queue[other] 299 | assert len(queue) == 0 300 | 301 | 302 | if __name__ == '__main__': 303 | pytest.main() 304 | --------------------------------------------------------------------------------