├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE.md ├── README.md ├── competitive ├── __init__.py ├── other │ ├── __init__.py │ ├── backup_discs.py │ └── maze_solver.py └── t3chfest │ ├── 2017 │ ├── __init__.py │ ├── task-1.py │ ├── task-2.py │ ├── task-3.py │ ├── task-4.py │ ├── task-5.py │ └── task-6.py │ ├── 2018 │ ├── __init__.py │ ├── task_1.py │ ├── task_2.py │ ├── task_3.py │ ├── task_4.py │ ├── task_5.py │ └── task_6.py │ └── __init__.py ├── environment ├── __init__.py ├── concurrency │ └── asyncio │ │ └── hello_world.py ├── containerizing │ ├── __init__.py │ └── docker │ │ ├── __init__.py │ │ ├── hello_docker_container.py │ │ └── hello_docker_image.py ├── logging │ ├── __init__.py │ └── dynamic_handler.py └── scripting │ ├── argparse │ ├── echo.py │ ├── integer_operation.py │ └── integers.py │ ├── fabric │ └── hello_world │ │ └── fabfile.py │ ├── fire │ └── calculator.py │ └── torrent_crawler.py ├── iot └── pybluez │ └── initial-example.py ├── miscellaneous ├── __init__.py ├── hello_worlds │ ├── 3dplot.py │ ├── __init__.py │ ├── bucles.py │ ├── bucles1.py │ ├── control.py │ ├── control1.py │ ├── edad.py │ ├── entrada.py │ ├── euclides.py │ ├── fibonacci.py │ ├── funcion.py │ ├── matrixExample.py │ ├── prueba.py │ └── sesion_1_1.py ├── maze_print.py └── wolframExample.py ├── notebooks ├── bokeh-datetime-hover.ipynb ├── getting-started │ ├── getting-started.ipynb │ └── tensorflow-getting-started.ipynb └── metropolis-hastings.ipynb ├── numerical ├── __init__.py ├── arrow │ ├── __init__.py │ ├── hello_world.py │ ├── modin_read.py │ └── pandas_read.py ├── data_science │ ├── __init__.py │ ├── cumlist.py │ ├── logic_function_tree_learning.py │ ├── models │ │ ├── __init__.py │ │ └── trees │ │ │ ├── __init__.py │ │ │ ├── gain_ranking_continous.py │ │ │ ├── gainranking.py │ │ │ ├── id3.py │ │ │ └── j48.py │ ├── res │ │ ├── ThoraricSurgery.csv │ │ ├── ThoraricSurgery_discrete.csv │ │ ├── __init__.py │ │ ├── careval.csv │ │ ├── credit.csv │ │ ├── data_sets.py │ │ ├── followers.csv │ │ ├── logic_data_set.py │ │ ├── logic_function.csv │ │ ├── main │ │ │ ├── DiscretizeThoraricSurgery.py │ │ │ ├── __init__.py │ │ │ └── distance_functions.py │ │ ├── users.csv │ │ ├── weather.csv │ │ ├── weather_nominal.csv │ │ ├── weather_semi_nominal.csv │ │ └── wiki-Vote.csv │ └── weka_relative_error.py ├── math │ ├── algebra │ │ ├── matrix-det-eigen.py │ │ └── matrix_decomposition │ │ │ └── qr │ │ │ ├── givens_rotations.py │ │ │ └── householder_reflections.py │ ├── combinatorics │ │ ├── k_elections_with_reemplacement_with_order.py │ │ ├── k_elections_without_reemplacement_with_order.py │ │ ├── k_elections_without_reemplacement_without_order.py │ │ └── k_elections_without_reemplacement_without_order_fixed_groups.py │ ├── integration │ │ └── double-integral-exercise-19.py │ ├── interpreter.py │ ├── numeric_methods │ │ └── polinomial_evaluation.py │ ├── stats │ │ ├── distributions │ │ │ ├── chi-square-inverse-mean.py │ │ │ ├── lognorm_percentiles.py │ │ │ ├── normal-percentage.py │ │ │ └── probability-exercise-generator.py │ │ ├── stats_template.py │ │ ├── stochastic_processes │ │ │ ├── entrega-01.ipynb │ │ │ └── markov_chains.ipynb │ │ └── transformations │ │ │ ├── normal-power2-transformation.py │ │ │ └── uniform-cos-transformation.py │ └── template.py ├── modin │ ├── __init__.py │ ├── hello_world.py │ └── multi_file_dataset.py ├── tensorflow │ ├── __init__.py │ ├── hello_world.py │ ├── naive_pagerank.py │ ├── pagerank.py │ ├── pagerank_wiki_vote.py │ └── sparse_from_file.py ├── utils │ ├── __init__.py │ ├── constants.py │ ├── csv_to_hdf.py │ ├── csv_to_parquet.py │ └── csv_to_sqlite.py └── vaex │ ├── __init__.py │ ├── csv_to_arrow.py │ └── csv_to_columnar_hdf.py ├── operations_research ├── ortools │ ├── introduction.py │ └── pickup_and_delivery.py └── pulp │ └── getting_started.py ├── poetry.lock ├── pyproject.toml ├── text ├── __init__.py └── regex │ ├── __init__.py │ ├── regex_distance.py │ └── string_matching.py ├── utils ├── collections │ ├── cycled_set.py │ ├── labeled_tree.py │ ├── oscar_dict.py │ └── tree.py └── json.py ├── visualization ├── __init__.py └── bokeh │ ├── __init__.py │ ├── grouped_bar_plot_example_01.py │ ├── stacked_plot_example_01.py │ ├── stacked_plot_example_02.py │ └── time_series_plot_example.py └── web ├── __init__.py └── django ├── __init__.py ├── django_first_app ├── README.md ├── __init__.py ├── db.sqlite3 ├── django_first_app │ ├── __init__.py │ ├── settings.py │ ├── urls.py │ └── wsgi.py ├── manage.py └── polls │ ├── __init__.py │ ├── admin.py │ ├── apps.py │ ├── migrations │ ├── 0001_initial.py │ └── __init__.py │ ├── models.py │ ├── templates │ └── polls │ │ ├── detail.html │ │ ├── index.html │ │ ├── results.html │ │ └── style.css │ ├── tests.py │ ├── urls.py │ └── views.py └── graphene ├── __init__.py ├── graphene-django-tutorial ├── __init__.py ├── cookbook │ ├── __init__.py │ ├── ingredients │ │ ├── __init__.py │ │ ├── admin.py │ │ ├── apps.py │ │ ├── fixtures │ │ │ └── ingredients.json │ │ ├── migrations │ │ │ ├── 0001_initial.py │ │ │ └── __init__.py │ │ ├── models.py │ │ ├── schema.py │ │ ├── tests.py │ │ └── views.py │ ├── schema.py │ ├── settings.py │ ├── urls.py │ └── wsgi.py ├── db.sqlite3 └── manage.py └── graphene-quickstart ├── __init__.py ├── lesson-01.py ├── lesson-02-enums.py ├── lesson-03-scalars.py ├── lesson-04-interfaces.py ├── lesson-05-abstract-types.py ├── lesson-06-object-types.py ├── lesson-07-schema.py ├── lesson-08-mutations.py ├── lesson-09-context.py └── lesson-10-middleware.py /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.gitignore.io/api/python,pycharm,macos,git,windows,linux,jupyternotebook 3 | 4 | logs/* 5 | 6 | ### Python ### 7 | # Byte-compiled / optimized / DLL files 8 | __pycache__/ 9 | *.py[cod] 10 | *$py.class 11 | 12 | # C extensions 13 | *.so 14 | 15 | # Distribution / packaging 16 | .Python 17 | env/ 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *,cover 53 | .hypothesis/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | target/ 75 | 76 | # Jupyter Notebook 77 | .ipynb_checkpoints 78 | 79 | # pyenv 80 | .python-version 81 | 82 | # celery beat schedule file 83 | celerybeat-schedule 84 | 85 | # dotenv 86 | .env 87 | 88 | # virtualenv 89 | .venv/ 90 | venv/ 91 | ENV/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | 96 | # Rope project settings 97 | .ropeproject 98 | 99 | 100 | ### PyCharm ### 101 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 102 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 103 | 104 | # User-specific stuff: 105 | .idea/workspace.xml 106 | .idea/tasks.xml 107 | 108 | # Sensitive or high-churn files: 109 | .idea/dataSources/ 110 | .idea/dataSources.ids 111 | .idea/dataSources.xml 112 | .idea/dataSources.local.xml 113 | .idea/sqlDataSources.xml 114 | .idea/dynamic.xml 115 | .idea/uiDesigner.xml 116 | 117 | # Gradle: 118 | .idea/gradle.xml 119 | .idea/libraries 120 | 121 | # Mongo Explorer plugin: 122 | .idea/mongoSettings.xml 123 | 124 | ## File-based project format: 125 | *.iws 126 | 127 | ## Plugin-specific files: 128 | 129 | # IntelliJ 130 | /out/ 131 | 132 | # mpeltonen/sbt-idea plugin 133 | .idea_modules/ 134 | 135 | # JIRA plugin 136 | atlassian-ide-plugin.xml 137 | 138 | # Crashlytics plugin (for Android Studio and IntelliJ) 139 | com_crashlytics_export_strings.xml 140 | crashlytics.properties 141 | crashlytics-build.properties 142 | fabric.properties 143 | 144 | ### PyCharm Patch ### 145 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 146 | 147 | # *.iml 148 | # modules.xml 149 | # .idea/misc.xml 150 | # *.ipr 151 | 152 | 153 | ### macOS ### 154 | *.DS_Store 155 | .AppleDouble 156 | .LSOverride 157 | 158 | # Icon must end with two \r 159 | Icon 160 | # Thumbnails 161 | ._* 162 | # Files that might appear in the root of a volume 163 | .DocumentRevisions-V100 164 | .fseventsd 165 | .Spotlight-V100 166 | .TemporaryItems 167 | .Trashes 168 | .VolumeIcon.icns 169 | .com.apple.timemachine.donotpresent 170 | # Directories potentially created on remote AFP share 171 | .AppleDB 172 | .AppleDesktop 173 | Network Trash Folder 174 | Temporary Items 175 | .apdisk 176 | 177 | 178 | ### Git ### 179 | *.orig 180 | 181 | ### JupyterNotebook ### 182 | .ipynb_checkpoints 183 | */.ipynb_checkpoints/* 184 | 185 | # Remove previous ipynb_checkpoints 186 | # git rm -r .ipynb_checkpoints/ 187 | # 188 | 189 | ### Windows ### 190 | # Windows thumbnail cache files 191 | Thumbs.db 192 | ehthumbs.db 193 | ehthumbs_vista.db 194 | 195 | # Folder config file 196 | Desktop.ini 197 | 198 | # Recycle Bin used on file shares 199 | $RECYCLE.BIN/ 200 | 201 | # Windows Installer files 202 | *.cab 203 | *.msi 204 | *.msm 205 | *.msp 206 | 207 | # Windows shortcuts 208 | *.lnk 209 | 210 | 211 | ### Linux ### 212 | *~ 213 | 214 | # temporary files which can be created if a process still has a handle open of a deleted file 215 | .fuse_hidden* 216 | 217 | # KDE directory preferences 218 | .directory 219 | 220 | # Linux trash folder which might appear on any partition or disk 221 | .Trash-* 222 | 223 | # .nfs files are created when an open file is removed but is still being accessed 224 | .nfs* 225 | 226 | # End of https://www.gitignore.io/api/python,pycharm,macos,git,windows,linux,jupyternotebook 227 | 228 | # Input / Output files 229 | *.output 230 | *.out 231 | *.in 232 | *.input 233 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at sergio@garciparedes.me. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | We love contributions from everyone. 4 | By participating in this project, 5 | you agree to abide by the thoughtbot [code of conduct]. 6 | 7 | [code of conduct]: CODE_OF_CONDUCT.md 8 | 9 | We expect everyone to follow the code of conduct 10 | anywhere in thoughtbot's project codebases, 11 | issue trackers, chatrooms, and mailing lists. 12 | 13 | ## Contributing Code 14 | 15 | 1. Fork it! 16 | 2. Create an issue. 17 | 3. Create your feature branch identified by your issue id number: `git checkout -b issue-###` 18 | 4. Commit your changes: `git commit -am 'Add some feature'` 19 | 5. Push to the branch: `git push origin issue-###` 20 | 6. Submit a pull request 🙂 21 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | 1. Definitions 5 | -------------- 6 | 7 | 1.1. "Contributor" 8 | means each individual or legal entity that creates, contributes to 9 | the creation of, or owns Covered Software. 10 | 11 | 1.2. "Contributor Version" 12 | means the combination of the Contributions of others (if any) used 13 | by a Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | means Covered Software of a particular Contributor. 17 | 18 | 1.4. "Covered Software" 19 | means Source Code Form to which the initial Contributor has attached 20 | the notice in Exhibit A, the Executable Form of such Source Code 21 | Form, and Modifications of such Source Code Form, in each case 22 | including portions thereof. 23 | 24 | 1.5. "Incompatible With Secondary Licenses" 25 | means 26 | 27 | (a) that the initial Contributor has attached the notice described 28 | in Exhibit B to the Covered Software; or 29 | 30 | (b) that the Covered Software was made available under the terms of 31 | version 1.1 or earlier of the License, but not also under the 32 | terms of a Secondary License. 33 | 34 | 1.6. "Executable Form" 35 | means any form of the work other than Source Code Form. 36 | 37 | 1.7. "Larger Work" 38 | means a work that combines Covered Software with other material, in 39 | a separate file or files, that is not Covered Software. 40 | 41 | 1.8. "License" 42 | means this document. 43 | 44 | 1.9. "Licensable" 45 | means having the right to grant, to the maximum extent possible, 46 | whether at the time of the initial grant or subsequently, any and 47 | all of the rights conveyed by this License. 48 | 49 | 1.10. "Modifications" 50 | means any of the following: 51 | 52 | (a) any file in Source Code Form that results from an addition to, 53 | deletion from, or modification of the contents of Covered 54 | Software; or 55 | 56 | (b) any new file in Source Code Form that contains any Covered 57 | Software. 58 | 59 | 1.11. "Patent Claims" of a Contributor 60 | means any patent claim(s), including without limitation, method, 61 | process, and apparatus claims, in any patent Licensable by such 62 | Contributor that would be infringed, but for the grant of the 63 | License, by the making, using, selling, offering for sale, having 64 | made, import, or transfer of either its Contributions or its 65 | Contributor Version. 66 | 67 | 1.12. "Secondary License" 68 | means either the GNU General Public License, Version 2.0, the GNU 69 | Lesser General Public License, Version 2.1, the GNU Affero General 70 | Public License, Version 3.0, or any later versions of those 71 | licenses. 72 | 73 | 1.13. "Source Code Form" 74 | means the form of the work preferred for making modifications. 75 | 76 | 1.14. "You" (or "Your") 77 | means an individual or a legal entity exercising rights under this 78 | License. For legal entities, "You" includes any entity that 79 | controls, is controlled by, or is under common control with You. For 80 | purposes of this definition, "control" means (a) the power, direct 81 | or indirect, to cause the direction or management of such entity, 82 | whether by contract or otherwise, or (b) ownership of more than 83 | fifty percent (50%) of the outstanding shares or beneficial 84 | ownership of such entity. 85 | 86 | 2. License Grants and Conditions 87 | -------------------------------- 88 | 89 | 2.1. Grants 90 | 91 | Each Contributor hereby grants You a world-wide, royalty-free, 92 | non-exclusive license: 93 | 94 | (a) under intellectual property rights (other than patent or trademark) 95 | Licensable by such Contributor to use, reproduce, make available, 96 | modify, display, perform, distribute, and otherwise exploit its 97 | Contributions, either on an unmodified basis, with Modifications, or 98 | as part of a Larger Work; and 99 | 100 | (b) under Patent Claims of such Contributor to make, use, sell, offer 101 | for sale, have made, import, and otherwise transfer either its 102 | Contributions or its Contributor Version. 103 | 104 | 2.2. Effective Date 105 | 106 | The licenses granted in Section 2.1 with respect to any Contribution 107 | become effective for each Contribution on the date the Contributor first 108 | distributes such Contribution. 109 | 110 | 2.3. Limitations on Grant Scope 111 | 112 | The licenses granted in this Section 2 are the only rights granted under 113 | this License. No additional rights or licenses will be implied from the 114 | distribution or licensing of Covered Software under this License. 115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 116 | Contributor: 117 | 118 | (a) for any code that a Contributor has removed from Covered Software; 119 | or 120 | 121 | (b) for infringements caused by: (i) Your and any other third party's 122 | modifications of Covered Software, or (ii) the combination of its 123 | Contributions with other software (except as part of its Contributor 124 | Version); or 125 | 126 | (c) under Patent Claims infringed by Covered Software in the absence of 127 | its Contributions. 128 | 129 | This License does not grant any rights in the trademarks, service marks, 130 | or logos of any Contributor (except as may be necessary to comply with 131 | the notice requirements in Section 3.4). 132 | 133 | 2.4. Subsequent Licenses 134 | 135 | No Contributor makes additional grants as a result of Your choice to 136 | distribute the Covered Software under a subsequent version of this 137 | License (see Section 10.2) or under the terms of a Secondary License (if 138 | permitted under the terms of Section 3.3). 139 | 140 | 2.5. Representation 141 | 142 | Each Contributor represents that the Contributor believes its 143 | Contributions are its original creation(s) or it has sufficient rights 144 | to grant the rights to its Contributions conveyed by this License. 145 | 146 | 2.6. Fair Use 147 | 148 | This License is not intended to limit any rights You have under 149 | applicable copyright doctrines of fair use, fair dealing, or other 150 | equivalents. 151 | 152 | 2.7. Conditions 153 | 154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 155 | in Section 2.1. 156 | 157 | 3. Responsibilities 158 | ------------------- 159 | 160 | 3.1. Distribution of Source Form 161 | 162 | All distribution of Covered Software in Source Code Form, including any 163 | Modifications that You create or to which You contribute, must be under 164 | the terms of this License. You must inform recipients that the Source 165 | Code Form of the Covered Software is governed by the terms of this 166 | License, and how they can obtain a copy of this License. You may not 167 | attempt to alter or restrict the recipients' rights in the Source Code 168 | Form. 169 | 170 | 3.2. Distribution of Executable Form 171 | 172 | If You distribute Covered Software in Executable Form then: 173 | 174 | (a) such Covered Software must also be made available in Source Code 175 | Form, as described in Section 3.1, and You must inform recipients of 176 | the Executable Form how they can obtain a copy of such Source Code 177 | Form by reasonable means in a timely manner, at a charge no more 178 | than the cost of distribution to the recipient; and 179 | 180 | (b) You may distribute such Executable Form under the terms of this 181 | License, or sublicense it under different terms, provided that the 182 | license for the Executable Form does not attempt to limit or alter 183 | the recipients' rights in the Source Code Form under this License. 184 | 185 | 3.3. Distribution of a Larger Work 186 | 187 | You may create and distribute a Larger Work under terms of Your choice, 188 | provided that You also comply with the requirements of this License for 189 | the Covered Software. If the Larger Work is a combination of Covered 190 | Software with a work governed by one or more Secondary Licenses, and the 191 | Covered Software is not Incompatible With Secondary Licenses, this 192 | License permits You to additionally distribute such Covered Software 193 | under the terms of such Secondary License(s), so that the recipient of 194 | the Larger Work may, at their option, further distribute the Covered 195 | Software under the terms of either this License or such Secondary 196 | License(s). 197 | 198 | 3.4. Notices 199 | 200 | You may not remove or alter the substance of any license notices 201 | (including copyright notices, patent notices, disclaimers of warranty, 202 | or limitations of liability) contained within the Source Code Form of 203 | the Covered Software, except that You may alter any license notices to 204 | the extent required to remedy known factual inaccuracies. 205 | 206 | 3.5. Application of Additional Terms 207 | 208 | You may choose to offer, and to charge a fee for, warranty, support, 209 | indemnity or liability obligations to one or more recipients of Covered 210 | Software. However, You may do so only on Your own behalf, and not on 211 | behalf of any Contributor. You must make it absolutely clear that any 212 | such warranty, support, indemnity, or liability obligation is offered by 213 | You alone, and You hereby agree to indemnify every Contributor for any 214 | liability incurred by such Contributor as a result of warranty, support, 215 | indemnity or liability terms You offer. You may include additional 216 | disclaimers of warranty and limitations of liability specific to any 217 | jurisdiction. 218 | 219 | 4. Inability to Comply Due to Statute or Regulation 220 | --------------------------------------------------- 221 | 222 | If it is impossible for You to comply with any of the terms of this 223 | License with respect to some or all of the Covered Software due to 224 | statute, judicial order, or regulation then You must: (a) comply with 225 | the terms of this License to the maximum extent possible; and (b) 226 | describe the limitations and the code they affect. Such description must 227 | be placed in a text file included with all distributions of the Covered 228 | Software under this License. Except to the extent prohibited by statute 229 | or regulation, such description must be sufficiently detailed for a 230 | recipient of ordinary skill to be able to understand it. 231 | 232 | 5. Termination 233 | -------------- 234 | 235 | 5.1. The rights granted under this License will terminate automatically 236 | if You fail to comply with any of its terms. However, if You become 237 | compliant, then the rights granted under this License from a particular 238 | Contributor are reinstated (a) provisionally, unless and until such 239 | Contributor explicitly and finally terminates Your grants, and (b) on an 240 | ongoing basis, if such Contributor fails to notify You of the 241 | non-compliance by some reasonable means prior to 60 days after You have 242 | come back into compliance. Moreover, Your grants from a particular 243 | Contributor are reinstated on an ongoing basis if such Contributor 244 | notifies You of the non-compliance by some reasonable means, this is the 245 | first time You have received notice of non-compliance with this License 246 | from such Contributor, and You become compliant prior to 30 days after 247 | Your receipt of the notice. 248 | 249 | 5.2. If You initiate litigation against any entity by asserting a patent 250 | infringement claim (excluding declaratory judgment actions, 251 | counter-claims, and cross-claims) alleging that a Contributor Version 252 | directly or indirectly infringes any patent, then the rights granted to 253 | You by any and all Contributors for the Covered Software under Section 254 | 2.1 of this License shall terminate. 255 | 256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 257 | end user license agreements (excluding distributors and resellers) which 258 | have been validly granted by You or Your distributors under this License 259 | prior to termination shall survive termination. 260 | 261 | ************************************************************************ 262 | * * 263 | * 6. Disclaimer of Warranty * 264 | * ------------------------- * 265 | * * 266 | * Covered Software is provided under this License on an "as is" * 267 | * basis, without warranty of any kind, either expressed, implied, or * 268 | * statutory, including, without limitation, warranties that the * 269 | * Covered Software is free of defects, merchantable, fit for a * 270 | * particular purpose or non-infringing. The entire risk as to the * 271 | * quality and performance of the Covered Software is with You. * 272 | * Should any Covered Software prove defective in any respect, You * 273 | * (not any Contributor) assume the cost of any necessary servicing, * 274 | * repair, or correction. This disclaimer of warranty constitutes an * 275 | * essential part of this License. No use of any Covered Software is * 276 | * authorized under this License except under this disclaimer. * 277 | * * 278 | ************************************************************************ 279 | 280 | ************************************************************************ 281 | * * 282 | * 7. Limitation of Liability * 283 | * -------------------------- * 284 | * * 285 | * Under no circumstances and under no legal theory, whether tort * 286 | * (including negligence), contract, or otherwise, shall any * 287 | * Contributor, or anyone who distributes Covered Software as * 288 | * permitted above, be liable to You for any direct, indirect, * 289 | * special, incidental, or consequential damages of any character * 290 | * including, without limitation, damages for lost profits, loss of * 291 | * goodwill, work stoppage, computer failure or malfunction, or any * 292 | * and all other commercial damages or losses, even if such party * 293 | * shall have been informed of the possibility of such damages. This * 294 | * limitation of liability shall not apply to liability for death or * 295 | * personal injury resulting from such party's negligence to the * 296 | * extent applicable law prohibits such limitation. Some * 297 | * jurisdictions do not allow the exclusion or limitation of * 298 | * incidental or consequential damages, so this exclusion and * 299 | * limitation may not apply to You. * 300 | * * 301 | ************************************************************************ 302 | 303 | 8. Litigation 304 | ------------- 305 | 306 | Any litigation relating to this License may be brought only in the 307 | courts of a jurisdiction where the defendant maintains its principal 308 | place of business and such litigation shall be governed by laws of that 309 | jurisdiction, without reference to its conflict-of-law provisions. 310 | Nothing in this Section shall prevent a party's ability to bring 311 | cross-claims or counter-claims. 312 | 313 | 9. Miscellaneous 314 | ---------------- 315 | 316 | This License represents the complete agreement concerning the subject 317 | matter hereof. If any provision of this License is held to be 318 | unenforceable, such provision shall be reformed only to the extent 319 | necessary to make it enforceable. Any law or regulation which provides 320 | that the language of a contract shall be construed against the drafter 321 | shall not be used to construe this License against a Contributor. 322 | 323 | 10. Versions of the License 324 | --------------------------- 325 | 326 | 10.1. New Versions 327 | 328 | Mozilla Foundation is the license steward. Except as provided in Section 329 | 10.3, no one other than the license steward has the right to modify or 330 | publish new versions of this License. Each version will be given a 331 | distinguishing version number. 332 | 333 | 10.2. Effect of New Versions 334 | 335 | You may distribute the Covered Software under the terms of the version 336 | of the License under which You originally received the Covered Software, 337 | or under the terms of any subsequent version published by the license 338 | steward. 339 | 340 | 10.3. Modified Versions 341 | 342 | If you create software not governed by this License, and you want to 343 | create a new license for such software, you may create and use a 344 | modified version of this License if you rename the license and remove 345 | any references to the name of the license steward (except to note that 346 | such modified license differs from this License). 347 | 348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 349 | Licenses 350 | 351 | If You choose to distribute Source Code Form that is Incompatible With 352 | Secondary Licenses under the terms of this version of the License, the 353 | notice described in Exhibit B of this License must be attached. 354 | 355 | Exhibit A - Source Code Form License Notice 356 | ------------------------------------------- 357 | 358 | This Source Code Form is subject to the terms of the Mozilla Public 359 | License, v. 2.0. If a copy of the MPL was not distributed with this 360 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 361 | 362 | If it is not possible or desirable to put the notice in a particular 363 | file, then You may include the notice in a location (such as a LICENSE 364 | file in a relevant directory) where a recipient would be likely to look 365 | for such a notice. 366 | 367 | You may add additional accurate notices of copyright ownership. 368 | 369 | Exhibit B - "Incompatible With Secondary Licenses" Notice 370 | --------------------------------------------------------- 371 | 372 | This Source Code Form is "Incompatible With Secondary Licenses", as 373 | defined by the Mozilla Public License, v. 2.0. 374 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Python Examples 2 | 3 | ## Description 4 | 5 | Set of awesome Python Examples. 6 | 7 | ## Contents 8 | 9 | * TODO: Write contents 10 | 11 | ## Contributors 12 | 13 | * Sergio García Prado - [@garciparedes](http://garciparedes.me) 14 | 15 | ## Contributing 16 | 17 | See [CONTRIBUTING.md](CONTRIBUTING.md). 18 | 19 | ## License 20 | 21 | This repository is licensed under [Mozilla Public License Version 2.0](LICENSE). 22 | -------------------------------------------------------------------------------- /competitive/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/competitive/__init__.py -------------------------------------------------------------------------------- /competitive/other/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/competitive/other/__init__.py -------------------------------------------------------------------------------- /competitive/other/backup_discs.py: -------------------------------------------------------------------------------- 1 | from itertools import combinations 2 | from typing import List, Tuple, Generator 3 | 4 | 5 | def packs(elements: List[float], bin_size: float) -> Generator[List[float], None, None]: 6 | elements = sorted(elements) 7 | for s in reversed(range(1, len(elements) + 1)): 8 | for group in combinations(elements, s): 9 | if bin_size < sum(group): 10 | break 11 | yield group 12 | 13 | 14 | def improves_result(best: List[float], new: List[float]) -> bool: 15 | if len(best) < len(new): 16 | return True 17 | if len(best) == len(new) and sum(best) < sum(new): 18 | return True 19 | return False 20 | 21 | 22 | def planificar(elements: List[float], bins: int, bin_size: float) -> List[Tuple[float]]: 23 | result = list() 24 | 25 | while elements and len(result) < bins: 26 | best_group = tuple() 27 | for group in packs(elements, bin_size): 28 | if not improves_result(best_group, group): 29 | continue 30 | best_group = group 31 | 32 | if sum(best_group) == bin_size: 33 | break 34 | 35 | result.append(best_group) 36 | for v in best_group: 37 | elements.remove(v) 38 | 39 | return result 40 | 41 | 42 | def main(): 43 | result = planificar([10, 15, 20, 5], 2, 25) 44 | print(result) 45 | 46 | mark = sum(len(group) for group in result) 47 | print(mark) 48 | 49 | 50 | if __name__ == '__main__': 51 | main() 52 | -------------------------------------------------------------------------------- /competitive/other/maze_solver.py: -------------------------------------------------------------------------------- 1 | from itertools import chain 2 | from typing import Tuple, Sequence, List, Set 3 | 4 | Path = Sequence[Tuple[int, int]] 5 | 6 | 7 | def maze_solver(edges: Set[Tuple[int, int]], start_node: int, end_node: int, path: Path = tuple()) -> Path: 8 | if end_node in chain(*path): 9 | return path 10 | 11 | best_path = None 12 | for current_edge in set(edge for edge in edges if start_node in edge): 13 | 14 | current_start = current_edge[0] if current_edge[1] == start_node else current_edge[1] 15 | current_nodes = set(chain(*path)) 16 | current_edges = set(edge for edge in edges if not current_nodes.intersection(edge)) 17 | current_path = (*path, current_edge) 18 | 19 | new_path = maze_solver(current_edges, current_start, end_node, current_path) 20 | 21 | if new_path is None: 22 | continue 23 | if end_node not in chain(*new_path): 24 | continue 25 | if best_path is not None and not len(new_path) < len(best_path): 26 | continue 27 | 28 | best_path = new_path 29 | 30 | return best_path 31 | 32 | 33 | def main() -> None: 34 | graph = { 35 | (0, 1), 36 | (1, 2), 37 | (2, 3), 38 | (1, 3), 39 | } 40 | 41 | solution = maze_solver(graph, 3, 0) 42 | print(solution) 43 | 44 | 45 | if __name__ == '__main__': 46 | main() 47 | -------------------------------------------------------------------------------- /competitive/t3chfest/2017/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/competitive/t3chfest/2017/__init__.py -------------------------------------------------------------------------------- /competitive/t3chfest/2017/task-1.py: -------------------------------------------------------------------------------- 1 | # you can write to stdout for debugging purposes, e.g. 2 | # print "this is a debug message" 3 | def solution(N): 4 | b = bin(N).split('0b')[1] 5 | l = len(b) 6 | i = l / 2 7 | while (i > 0): 8 | j = 1 9 | while (b[0:i] == b[i * j:i * (j + 1)]): 10 | j += 1 11 | if (l == i * j): 12 | return i 13 | elif (l > i * j and l < i * (j + 1) and b[:l % i] == b[-(l % i):]): 14 | return i 15 | i -= 1 16 | return -1 17 | 18 | 19 | print(solution(3)) 20 | print(solution(7)) 21 | print(solution(15)) 22 | print(solution(955)) 23 | print(solution(954)) 24 | print(solution(102)) 25 | print(solution(1000000000)) 26 | -------------------------------------------------------------------------------- /competitive/t3chfest/2017/task-2.py: -------------------------------------------------------------------------------- 1 | # you can write to stdout for debugging purposes, e.g. 2 | # print "this is a debug message" 3 | import re 4 | 5 | 6 | def solution(N, S, T): 7 | ships = [] 8 | list_S = re.findall('([0-9]+)([A-Z]) ([0-9]+)([A-Z])', S) 9 | list_T = re.findall('([0-9]+)([A-Z])', T) 10 | 11 | for s_tuple in list_S: 12 | tiles = (abs(int(s_tuple[2]) - int(s_tuple[0])) + 1) * (abs(ord(s_tuple[3]) - ord(s_tuple[1])) + 1) 13 | max_disp = max(abs(int(s_tuple[2]) - int(s_tuple[0])) + 1, (abs(ord(s_tuple[3]) - ord(s_tuple[1])) + 1)) 14 | ships.append([s_tuple, tiles, tiles, max_disp]) 15 | 16 | for h_tuple in list_T: 17 | for ship in ships: 18 | if (check_hit(ship, h_tuple)): 19 | ship[1] -= 1 20 | break 21 | sunked = 0 22 | hits = 0 23 | for ship in ships: 24 | if (ship[1] == 0): 25 | sunked += 1 26 | elif (ship[1] < ship[2]): 27 | hits += 1 28 | return str(sunked) + "," + str(hits) 29 | 30 | 31 | def check_hit(ship, hit): 32 | return max(abs(int(ship[0][0]) - int(hit[0])) + abs(int(ship[0][2]) - int(hit[0])), 33 | abs(ord(ship[0][1]) - ord(hit[1])) + abs(ord(ship[0][3]) - ord(hit[1]))) < ship[3] 34 | 35 | 36 | print(solution(12, '1B 4D', '1A')) 37 | 38 | print(solution(4, '1B 2C,2D 4D', '2B 2D 3D 4D 4A')) 39 | print(solution(12, '1A 2A,12A 12A', '12A')) 40 | print(solution(3, '1A 1B,2C 2C', '1B')) 41 | -------------------------------------------------------------------------------- /competitive/t3chfest/2017/task-3.py: -------------------------------------------------------------------------------- 1 | # you can write to stdout for debugging purposes, e.g. 2 | # print "this is a debug message" 3 | 4 | def solution(T): 5 | c = find_capital(T) 6 | Graph = list(enumerate(T)); 7 | Distances = [0] * (len(T) - 1) 8 | 9 | deep = 0 10 | Graph.pop(c) 11 | Queue = [c] 12 | while (len(Queue) != 0): 13 | 14 | if (Queue[0] != -1): 15 | Queue.append(-1) 16 | 17 | for road in list(Graph): 18 | if (Queue[0] == road[1]): 19 | Distances[deep] += 1 20 | Queue.append(road[0]) 21 | Graph.remove(road) 22 | 23 | if (Queue.pop(0) == -1): 24 | deep += 1 25 | return Distances 26 | 27 | 28 | def find_capital(T): 29 | for deep, j in enumerate(T): 30 | if (deep == j): 31 | return deep 32 | 33 | 34 | print(solution([9, 1, 4, 9, 0, 4, 8, 9, 0, 1])) 35 | -------------------------------------------------------------------------------- /competitive/t3chfest/2017/task-4.py: -------------------------------------------------------------------------------- 1 | # you can write to stdout for debugging purposes, e.g. 2 | # print 'this is a debug message' 3 | 4 | def solution(A, B): 5 | resultList, remainderList = [str(A // B)], [A % B] 6 | A %= B 7 | stop = False 8 | while A != 0 and not stop: 9 | quotient, A = divmod(A * 10, B) 10 | resultList.append(str(quotient)) 11 | if A in remainderList: 12 | resultList.insert(remainderList.index(A) + 1, '(') 13 | resultList.append(')') 14 | stop = True 15 | else: 16 | remainderList.append(A) 17 | if (len(resultList) == 1): 18 | return resultList[0] 19 | else: 20 | resultList.insert(1, '.') 21 | return ''.join(resultList) 22 | 23 | 24 | print(solution(12, 3)) 25 | print(solution(1, 2)) 26 | print(solution(5, 4)) 27 | print(solution(1, 3)) 28 | print(solution(3, 28)) 29 | -------------------------------------------------------------------------------- /competitive/t3chfest/2017/task-5.py: -------------------------------------------------------------------------------- 1 | # you can write to stdout for debugging purposes, e.g. 2 | # print "this is a debug message" 3 | 4 | def solution(A, D): 5 | # write your code in Python 2.7 6 | season = 0 7 | M = [False for col in range(len(A))] 8 | iterator = A[0:min(D, len(A))] 9 | while (season < len(A) + 1): 10 | for i, a in enumerate(iterator): 11 | if (a >= 0 and a <= season): 12 | M[i] = True 13 | iterator.extend(A[(len(iterator)):min(i + D + 1, len(A))]) 14 | j = D 15 | while (j > 0): 16 | if (M[-j]): 17 | return season 18 | j -= 1 19 | season += 1 20 | return -1 21 | 22 | 23 | print(solution([1, -1, 0, 2, 3, 5], 3)) 24 | print(solution([3, 2, 1], 1)) 25 | print(solution([1, 2, 3, 4, -1, -1, -1], 3)) 26 | -------------------------------------------------------------------------------- /competitive/t3chfest/2017/task-6.py: -------------------------------------------------------------------------------- 1 | # you can write to stdout for debugging purposes, e.g. 2 | # print "this is a debug message" 3 | import time 4 | 5 | 6 | def solution(A): 7 | # write your code in Python 2.7 8 | result = 0 9 | data = dict() 10 | for a in A: 11 | if not a in data: 12 | d = 2 ** a 13 | if a % 2 == 1: 14 | d *= -1 15 | data[(a)] = d 16 | result += data[a] 17 | return result 18 | 19 | 20 | time1 = time.time() 21 | (solution([1000000, 999999, 999998, 999998, 7])) 22 | time2 = time.time() 23 | print('took %0.3f ms' % ((time2 - time1) * 1000.0)) 24 | 25 | time1 = time.time() 26 | (solution([1000000, 1000000, 1000000, 7])) 27 | time2 = time.time() 28 | print('took %0.3f ms' % ((time2 - time1) * 1000.0)) 29 | -------------------------------------------------------------------------------- /competitive/t3chfest/2018/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/competitive/t3chfest/2018/__init__.py -------------------------------------------------------------------------------- /competitive/t3chfest/2018/task_1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | def check_vector(A): 4 | parity = A[0] % 2 5 | for i in range(1, len(A)): 6 | if A[i] % 2 != parity: 7 | return False 8 | return True 9 | 10 | def equal_vector(A): 11 | for i in range(len(A) - 1): 12 | if A[i] != A[i + 1]: 13 | return False 14 | return True 15 | 16 | 17 | def solution(A): 18 | if len(A) == 0: 19 | return 0 20 | elif check_vector(A): 21 | count = 0 22 | matched = equal_vector(A) 23 | while not matched: 24 | count += 1 25 | mean = (sum(A) / len(A)) 26 | for i in range(len(A)): 27 | if A[i] - mean < 0: 28 | A[i] += 1 29 | else: 30 | A[i] -= 1 31 | matched = equal_vector(A) 32 | return count 33 | else: 34 | return -1 35 | 36 | if __name__ == '__main__': 37 | print(solution([200, -200, 10, 2])) 38 | -------------------------------------------------------------------------------- /competitive/t3chfest/2018/task_2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | def swap(S, T): 4 | for i in range(len(T) - 1): 5 | if (S[i] == T[i + 1] and S[i + 1] == T[i] and 6 | S[i] != T[i] and S[i + 1] != T[i + 1]): 7 | if S[i + 2:] == T[i + 2:]: 8 | return "SWAP " + S[i] + " " + S[i + 1] 9 | else: 10 | return "IMPOSSIBLE" 11 | 12 | 13 | def insertion(A, B, action): 14 | for i in range(len(B)): 15 | if i == len(A) or A[i] != B[i]: 16 | if A[i:] == B[i+1:]: 17 | return action + " " + B[i] 18 | else: 19 | return "IMPOSSIBLE" 20 | 21 | 22 | def solution(S, T): 23 | diff = len(S) - len(T) 24 | if diff == 1: 25 | return insertion(T, S, "DELETE") 26 | elif diff == - 1: 27 | return insertion(S, T, "INSERT") 28 | elif diff == 0: 29 | if S == T: 30 | return "NOTHING" 31 | else: 32 | return swap(S, T) 33 | else: 34 | return "IMPOSSIBLE" 35 | 36 | 37 | if __name__ == '__main__': 38 | print(solution("inces","nicse")) 39 | -------------------------------------------------------------------------------- /competitive/t3chfest/2018/task_3.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | 4 | def solution(K, A): 5 | c = 0 6 | for i in range(len(A)): 7 | for j in range(i, len(A)): 8 | if A[i] + A[j] == K: 9 | if i != j: 10 | c += 2 11 | else: 12 | c += 1 13 | return c 14 | 15 | 16 | if __name__ == '__main__': 17 | print(solution(6, [1, 8, -3, 0, 1, 3, -2, 4, 5])) 18 | -------------------------------------------------------------------------------- /competitive/t3chfest/2018/task_4.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | def solution(A): 4 | sorted_A = sorted(A) 5 | c = 0 6 | i = 0 7 | while i < len(A): 8 | if A[i] != sorted_A[i]: 9 | j = i + 1 10 | while j < len(A) and A[j] != sorted_A[i]: 11 | j += 1 12 | i = j 13 | c += 1 14 | i += 1 15 | return c 16 | 17 | 18 | 19 | if __name__ == '__main__': 20 | print(solution([1, 5, 4, 9, 8, 7, 12, 13, 14])) 21 | print(solution([4, 3, 2, 6, 1])) 22 | print(solution([2, 1, 3])) 23 | -------------------------------------------------------------------------------- /competitive/t3chfest/2018/task_5.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | def solution(A): 4 | c = 0 5 | i = 0 6 | while i < len(A) - 1: 7 | if A[i] > A[i + 1]: 8 | 9 | j = i - 1 10 | while j >= 0 and A[j] > A[i + 1] and not A[j] > A[j + 1]: 11 | j -= 1 12 | l = i - j 13 | 14 | j = i + 1 15 | while j < len(A) and A[j] < A[i] and j - (i + 1) < l: 16 | j += 1 17 | r = j - (i + 1) 18 | 19 | if r < l: 20 | i += r 21 | c += r 22 | else: 23 | c+=l 24 | 25 | i += 1 26 | return c 27 | 28 | 29 | 30 | if __name__ == '__main__': 31 | print(solution([1,2,3,4,5,6,7,5,6,7,8,10,10,10,10,10,10,10,10,10,10,9,1, 11,2])) 32 | -------------------------------------------------------------------------------- /competitive/t3chfest/2018/task_6.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | bit = lambda x: x % 2 4 | carry = lambda x: 1 if x < 0 else -1 if x > 1 else 0 5 | 6 | def solution(A): 7 | R = [] 8 | if len(A) > 0: 9 | R.append(bit(A[0])) 10 | c = carry(A[0]) 11 | for i in range(1, len(A)): 12 | n = A[i] + A[i - 1] + c 13 | R.append(bit(n)) 14 | c = carry(n) 15 | n = A[-1] + c 16 | R.append(bit(n)) 17 | while len(R) > 0 and R[-1] == 0: 18 | R.pop() 19 | return R 20 | 21 | 22 | if __name__ == '__main__': 23 | print(solution([0])) 24 | -------------------------------------------------------------------------------- /competitive/t3chfest/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/competitive/t3chfest/__init__.py -------------------------------------------------------------------------------- /environment/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/environment/__init__.py -------------------------------------------------------------------------------- /environment/concurrency/asyncio/hello_world.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | 4 | async def main(): 5 | print('Hello ...') 6 | await asyncio.sleep(1) 7 | print('... World!') 8 | 9 | 10 | if __name__ == '__main__': 11 | asyncio.run(main()) 12 | -------------------------------------------------------------------------------- /environment/containerizing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/environment/containerizing/__init__.py -------------------------------------------------------------------------------- /environment/containerizing/docker/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/environment/containerizing/docker/__init__.py -------------------------------------------------------------------------------- /environment/containerizing/docker/hello_docker_container.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | import docker 4 | 5 | 6 | def main(): 7 | start = datetime.now() 8 | 9 | client = docker.from_env() 10 | 11 | apt_dependencies = ' '.join([ 12 | 'vim', 13 | ]) 14 | 15 | pip_dependencies = ' '.join([ 16 | 'numpy', 17 | 'pandas', 18 | ]) 19 | 20 | code = ';'.join([ 21 | 'import numpy as np', 22 | 'size = 100', 23 | 'random_numbers = np.random.uniform(size=size)', 24 | 'print(random_numbers)' 25 | ]) 26 | 27 | container = client.containers.run("python", 'tail -f /dev/null', detach=True) 28 | result = container.exec_run(f"apt-get update") 29 | print(result.output.decode()) 30 | 31 | result = container.exec_run(f"apt-get install -y {apt_dependencies}") 32 | print(result.output.decode()) 33 | 34 | result = container.exec_run(f"pip install {pip_dependencies}") 35 | print(result.output.decode()) 36 | 37 | result = container.exec_run(f"python -c '{code}'") 38 | print(result.output.decode()) 39 | 40 | container.stop() 41 | end = datetime.now() 42 | 43 | print(f'Elapsed Time: {end - start}') 44 | 45 | if __name__ == '__main__': 46 | main() 47 | -------------------------------------------------------------------------------- /environment/containerizing/docker/hello_docker_image.py: -------------------------------------------------------------------------------- 1 | from io import StringIO, BytesIO 2 | from datetime import datetime 3 | 4 | import docker 5 | 6 | 7 | def main(): 8 | 9 | client = docker.from_env() 10 | 11 | apt_dependencies = ' '.join([ 12 | 'vim', 13 | ]) 14 | 15 | pip_dependencies = ' '.join([ 16 | 'numpy', 17 | ]) 18 | 19 | code = ';'.join([ 20 | 'import numpy as np', 21 | 'size = 100', 22 | 'random_numbers = np.random.uniform(size=size)', 23 | 'print(random_numbers)' 24 | ]) 25 | 26 | version = 'latest' 27 | tag = 'hello-docker-py' 28 | 29 | output = BytesIO('\n'.join([ 30 | f'FROM python:{version}', 31 | f'RUN apt-get update && apt-get install -y {apt_dependencies}', 32 | f'RUN pip install {pip_dependencies}', 33 | ]).encode()) 34 | client.images.build(fileobj=output, tag=tag) 35 | 36 | start = datetime.now() 37 | 38 | container = client.containers.run(tag, 'tail -f /dev/null', detach=True) 39 | 40 | result = container.exec_run(f"python -c '{code}'") 41 | print(result.output.decode()) 42 | 43 | result = container.exec_run(f"python -c '{code}'") 44 | print(result.output.decode()) 45 | 46 | end = datetime.now() 47 | 48 | print(f'Elapsed Time: {end - start}') 49 | container.stop() 50 | 51 | 52 | if __name__ == '__main__': 53 | main() 54 | -------------------------------------------------------------------------------- /environment/logging/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/environment/logging/__init__.py -------------------------------------------------------------------------------- /environment/logging/dynamic_handler.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | 4 | from typing import List 5 | from io import StringIO 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class DynamicHandler(logging.StreamHandler): 11 | def __init__(self): 12 | super().__init__(stream=StringIO()) 13 | self.level = logging.INFO 14 | self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 15 | 16 | def activate(self, target: logging.Logger = logging.getLogger()) -> None: 17 | target.setLevel(logging.DEBUG) # "Necessary" condition. 18 | target.addHandler(self) 19 | 20 | def deactivate(self, target: logging.Logger = logging.getLogger()) -> None: 21 | target.removeHandler(self) 22 | self.flush() 23 | 24 | @property 25 | def rows(self) -> List[str]: 26 | stream = self.stream 27 | stream.seek(os.SEEK_SET) 28 | results = stream.readlines() 29 | stream.seek(os.SEEK_END) 30 | results = [row.strip() for row in results] 31 | return results 32 | 33 | 34 | def function(): 35 | logger.info('Starting info message...') 36 | logger.debug('Starting debug message...') 37 | # loggable business logic... 38 | logger.info('Completed info message.') 39 | logger.debug('Completed debug message.') 40 | 41 | 42 | def main(): 43 | handler = DynamicHandler() 44 | 45 | logger.info('A info message.') 46 | logger.debug('A debug message.') 47 | 48 | handler.activate() 49 | function() 50 | handler.deactivate() 51 | 52 | logger.info('Another info message.') 53 | logger.debug('Another debug message.') 54 | 55 | print(f'Handler results:\n{handler.rows}') 56 | 57 | 58 | if __name__ == '__main__': 59 | main() 60 | -------------------------------------------------------------------------------- /environment/scripting/argparse/echo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | import argparse 4 | 5 | parser = argparse.ArgumentParser() 6 | parser.add_argument("text", help="echo the string you use here") 7 | args = parser.parse_args() 8 | 9 | print(args.text) 10 | -------------------------------------------------------------------------------- /environment/scripting/argparse/integer_operation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import argparse 5 | 6 | parser = argparse.ArgumentParser(description='Apply operations over numbers') 7 | 8 | parser.add_argument('number', type=int, help='number to operate') 9 | parser.add_argument('--other', type=int, help='other to operate') 10 | parser.add_argument('-s', '--square', help='display a square of a given number', action='store_true') 11 | parser.add_argument('-c', '--cube', help='display a cube of a given number', action='store_true') 12 | parser.add_argument('--sum', help='apply sum', action='store_true') 13 | 14 | args = parser.parse_args() 15 | 16 | number = args.number 17 | 18 | if args.square: 19 | number **= 2 20 | 21 | if args.cube: 22 | number **= 3 23 | 24 | if args.other: 25 | other = args.other 26 | if args.sum: 27 | number += other 28 | 29 | print(number) 30 | -------------------------------------------------------------------------------- /environment/scripting/argparse/integers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import argparse 5 | 6 | parser = argparse.ArgumentParser(description='Process some integers.') 7 | 8 | parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer for the accumulator') 9 | parser.add_argument('--sum', dest='accumulate', action='store_const', const=sum, default=max, help='sum the integers (default: find the max)') 10 | 11 | args = parser.parse_args() 12 | print(args.accumulate(args.integers)) 13 | -------------------------------------------------------------------------------- /environment/scripting/fabric/hello_world/fabfile.py: -------------------------------------------------------------------------------- 1 | from fabric import task 2 | 3 | 4 | @task 5 | def hello(c): 6 | c.run(f'echo "Hello World!"') 7 | -------------------------------------------------------------------------------- /environment/scripting/fire/calculator.py: -------------------------------------------------------------------------------- 1 | import fire 2 | 3 | 4 | class Calculator(object): 5 | """A simple calculator class.""" 6 | 7 | def double(self, number): 8 | return 2 * number 9 | 10 | 11 | if __name__ == '__main__': 12 | fire.Fire(Calculator) 13 | -------------------------------------------------------------------------------- /environment/scripting/torrent_crawler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import urllib.request as req 4 | import re 5 | import os 6 | from pathlib import Path 7 | 8 | 9 | def main(): 10 | url = input().strip() 11 | path = Path(input().strip()) 12 | opener = req.build_opener() 13 | opener.addheaders = [('User-agent', 'Mozilla/5.0')] 14 | req.install_opener(opener) 15 | page = req.urlopen(url).read().decode('utf-8') 16 | links = re.findall(r'"([^"]+\.torrent)"', page) 17 | for i, url in enumerate(links): 18 | file_name = "torrent_" + str(i) + ".torrent" 19 | req.urlretrieve(url, path / file_name) 20 | print(url) 21 | 22 | if __name__ == '__main__': 23 | main() 24 | -------------------------------------------------------------------------------- /iot/pybluez/initial-example.py: -------------------------------------------------------------------------------- 1 | ''' 2 | URL: https://github.com/karulis/pybluez/blob/master/examples/simple/inquiry.py 3 | ''' 4 | 5 | # simple inquiry example 6 | import bluetooth 7 | 8 | nearby_devices = bluetooth.discover_devices(lookup_names=True) 9 | print("found %d devices" % len(nearby_devices)) 10 | 11 | for addr, name in nearby_devices: 12 | print(" %s - %s" % (addr, name)) -------------------------------------------------------------------------------- /miscellaneous/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/miscellaneous/__init__.py -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/3dplot.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | Author: Gabriel 4 | http://stackoverflow.com/users/1391441/gabriel 5 | 6 | Chinmay Kanchi 7 | http://stackoverflow.com/users/148765/chinmay-kanchi 8 | 9 | Example from stackoverflow of matplotlib 10 | http://stackoverflow.com/a/1986020/3921457 11 | 12 | ''' 13 | 14 | from matplotlib import pyplot 15 | import pylab 16 | from mpl_toolkits.mplot3d import Axes3D 17 | import random 18 | 19 | 20 | fig = pylab.figure() 21 | ax = Axes3D(fig) 22 | 23 | sequence_containing_x_vals = list(range(0,100)) 24 | sequence_containing_y_vals = list(range(0,100)) 25 | sequence_containing_z_vals = list(range(0,100)) 26 | 27 | random.shuffle(sequence_containing_x_vals) 28 | random.shuffle(sequence_containing_y_vals) 29 | random.shuffle(sequence_containing_z_vals) 30 | 31 | #ax.scatter(sequence_containing_x_vals, sequence_containing_y_vals, sequence_containing_z_vals) 32 | 33 | ax.scatter(2,3,3,'z', 100,'y') 34 | ax.scatter(2,5,3,'z', 100, 'r') 35 | ax.scatter(100,100,100,'z', 100, 'r') 36 | 37 | pyplot.show() 38 | -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/miscellaneous/hello_worlds/__init__.py -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/bucles.py: -------------------------------------------------------------------------------- 1 | a = 5 2 | while a > 0: 3 | print("Hola") 4 | a -= 1 5 | -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/bucles1.py: -------------------------------------------------------------------------------- 1 | r = [1,2,3,4,5,6] 2 | 3 | for i in r: 4 | print(i) 5 | 6 | l = r[1 : 4] 7 | for i in l: 8 | print(i) -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/control.py: -------------------------------------------------------------------------------- 1 | a = int (raw_input( )) 2 | 3 | #Escribe "Es un número" 4 | print "Es un numero" 5 | 6 | if a >= 5 : 7 | print "Mayor" 8 | print "o igual que 5" 9 | else: 10 | print "Menor que 5" -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/control1.py: -------------------------------------------------------------------------------- 1 | mes = int (raw_input()) 2 | 3 | if mes == 1: 4 | print("enero") 5 | 6 | elif mes ==2: 7 | print("febrero") 8 | 9 | elif mes == 3: 10 | print("marzo") 11 | else: 12 | print("otro mes") -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/edad.py: -------------------------------------------------------------------------------- 1 | edad = 0 2 | 3 | while edad < 18: 4 | edad += 1 5 | print("Felicidades, tienes ", str(edad)) 6 | -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/entrada.py: -------------------------------------------------------------------------------- 1 | from pip._vendor.distlib.compat import raw_input 2 | 3 | a = raw_input(); 4 | print(a) 5 | print(type(a)) 6 | try: 7 | a = int(a) 8 | print(type(a)) 9 | print(a) 10 | except: 11 | print("No era el numero") 12 | -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/euclides.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Sergio Garcia Prado 3 | www.garciparedes.me 4 | 5 | Example of Euclides Algorithm. 6 | """ 7 | 8 | from pip._vendor.distlib.compat import raw_input 9 | 10 | 11 | def euclides(m, n): 12 | while (m > 0): 13 | t = m 14 | m = n % m 15 | n = t 16 | return n 17 | 18 | 19 | def main(): 20 | print("GCD Euclides Algorithm.") 21 | 22 | numA = raw_input("Number A: ") 23 | numB = raw_input("Number B: ") 24 | 25 | print("GCD of %s and %s is %s" % (numA, numB, euclides(3, 6))) 26 | 27 | 28 | if __name__ == "__main__": 29 | main() 30 | -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/fibonacci.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Sergio Garcia Prado 3 | www.garciparedes.me 4 | 5 | Fibonacci example implemented recursive and Iterative mind. 6 | """ 7 | 8 | import time 9 | 10 | from pip._vendor.distlib.compat import raw_input 11 | 12 | """ Funtions """ 13 | 14 | """ 15 | Recursive Version 16 | """ 17 | 18 | 19 | def recursiveFib(n): 20 | if (n < 2): 21 | return n 22 | else: 23 | return (recursiveFib(n - 1) + recursiveFib(n - 2)) 24 | 25 | 26 | """ 27 | Iterative Version 28 | """ 29 | 30 | 31 | def iterativeFib(n): 32 | i = 1 33 | j = 0 34 | for k in range(n): 35 | j = i + j 36 | i = j - i 37 | return j 38 | 39 | 40 | """ MAIN """ 41 | 42 | print("""Fibonacci Example""") 43 | 44 | number = int(raw_input("Insert Integer Number: ")) 45 | print('') 46 | print("""Iterative Version""") 47 | start = time.time() 48 | print(iterativeFib(number)) 49 | print("it took", time.time() - start, "seconds.") 50 | 51 | print('') 52 | 53 | print("""Recursive Version""") 54 | start = time.time() 55 | print(recursiveFib(number)) 56 | print("it took", time.time() - start, "seconds.") 57 | -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/funcion.py: -------------------------------------------------------------------------------- 1 | def f(p, *otros): 2 | """Esta función imprime en pantalla 3 | el primer valor, el los siguientes en forma de Tupla 4 | y la tupla separada.""" 5 | 6 | print("Numero") 7 | print(p) 8 | 9 | print("Tupla junta") 10 | print(otros) 11 | 12 | print("Tupla separada") 13 | for i in otros: 14 | print(i) 15 | 16 | 17 | f(8) 18 | f(8, 1) 19 | f(8, 1, 2) 20 | f(8, 1, 2, 3) 21 | 22 | 23 | def sumar(x, y): 24 | # Suma dos números y los devuelve como una función 25 | return x + y 26 | 27 | 28 | suma = sumar(3, 4) 29 | print(suma) 30 | -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/matrixExample.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Sergio Garcia Prado 3 | www.garciparedes.me 4 | """ 5 | 6 | from random import randint 7 | 8 | dimension = 30 9 | 10 | matrix = [[0 for x in range(dimension)] for x in range(dimension)] 11 | 12 | for i in range(len(matrix)): 13 | linea = "" 14 | for j in range(len(matrix[i])): 15 | matrix[i][j] = randint(0, 1) 16 | linea += str(matrix[i][j]) 17 | print(linea) 18 | -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/prueba.py: -------------------------------------------------------------------------------- 1 | ###Programa que imprima los 25 primeros numeros naturales 2 | print("Me llamo Sergio García") 3 | n = 1 4 | while n <= 25: 5 | print(n) 6 | n += 1 -------------------------------------------------------------------------------- /miscellaneous/hello_worlds/sesion_1_1.py: -------------------------------------------------------------------------------- 1 | mat = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] 2 | valor = mat[0][1] 3 | linea = mat[0] 4 | print(valor) 5 | print(linea) 6 | -------------------------------------------------------------------------------- /miscellaneous/maze_print.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | U_SYMBOL = '_' 4 | S_SYMBOL = ' ' 5 | V_SYMBOL = '|' 6 | B_SYMBOL = '\n' 7 | 8 | 9 | def generate_random_maze(rows, columns): 10 | dist = [0] * 3 + [1] * 1 11 | return [[random.choice(dist) for i in range(columns)] for j in range(rows)] 12 | 13 | 14 | def stringify_list_2d(list_2d): 15 | s = '' 16 | for row in list_2d: 17 | for column in row: 18 | s += str(column) + S_SYMBOL 19 | s += B_SYMBOL 20 | return s 21 | 22 | 23 | def stringify_maze(maze): 24 | s = '' 25 | for i in range(len(maze)): 26 | if i == 0: 27 | s += S_SYMBOL 28 | for j in range(len(maze[i])): 29 | s += U_SYMBOL + S_SYMBOL 30 | s += B_SYMBOL 31 | s += V_SYMBOL 32 | for j in range(len(maze[i])): 33 | if i == len(maze) - 1 or maze[i][j] != maze[i + 1][j]: 34 | s += U_SYMBOL 35 | else: 36 | s += S_SYMBOL 37 | if j == len(maze[i]) - 1 or maze[i][j] != maze[i][j + 1]: 38 | s += V_SYMBOL 39 | else: 40 | s += S_SYMBOL 41 | s += B_SYMBOL 42 | return s 43 | 44 | 45 | def print_maze(maze): 46 | print(stringify_maze(maze)) 47 | 48 | 49 | def print_list_2d(list_2d): 50 | print(stringify_list_2d(list_2d)) 51 | 52 | 53 | if __name__ == '__main__': 54 | rows = 20 55 | columns = 30 56 | maze = generate_random_maze(rows, columns) 57 | print_list_2d(maze) 58 | print_maze(maze) 59 | -------------------------------------------------------------------------------- /miscellaneous/wolframExample.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Sergio Garcia Prado 3 | www.garciparedes.me 4 | """ 5 | 6 | import wolframalpha 7 | 8 | client = wolframalpha.Client("") 9 | 10 | operation = input("Indique la operacion: ") 11 | 12 | res = client.query(operation) 13 | print(next(res.results).text) 14 | -------------------------------------------------------------------------------- /notebooks/getting-started/getting-started.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "print(\"hola\")" 12 | ] 13 | } 14 | ], 15 | "metadata": { 16 | "kernelspec": { 17 | "display_name": "Python 2", 18 | "language": "python", 19 | "name": "python2" 20 | }, 21 | "language_info": { 22 | "codemirror_mode": { 23 | "name": "ipython", 24 | "version": 2 25 | }, 26 | "file_extension": ".py", 27 | "mimetype": "text/x-python", 28 | "name": "python", 29 | "nbconvert_exporter": "python", 30 | "pygments_lexer": "ipython2", 31 | "version": "2.7.6" 32 | } 33 | }, 34 | "nbformat": 4, 35 | "nbformat_minor": 0 36 | } 37 | -------------------------------------------------------------------------------- /notebooks/getting-started/tensorflow-getting-started.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "source": [ 6 | "# Tensorflow Getting Started Notebook" 7 | ], 8 | "metadata": {} 9 | }, 10 | { 11 | "cell_type": "code", 12 | "source": [ 13 | "import tensorflow as tf\n", 14 | "import numpy as np" 15 | ], 16 | "outputs": [], 17 | "execution_count": 2, 18 | "metadata": { 19 | "collapsed": false, 20 | "outputHidden": false, 21 | "inputHidden": false 22 | } 23 | }, 24 | { 25 | "cell_type": "code", 26 | "source": [ 27 | "print(\"tensorflow version: \" + tf.__version__)\n", 28 | "print(\"numpy version: \" + np.__version__)\n" 29 | ], 30 | "outputs": [ 31 | { 32 | "output_type": "stream", 33 | "name": "stdout", 34 | "text": [ 35 | "tensorflow version: 1.3.0\n", 36 | "numpy version: 1.13.3\n" 37 | ] 38 | } 39 | ], 40 | "execution_count": 3, 41 | "metadata": { 42 | "collapsed": false, 43 | "outputHidden": false, 44 | "inputHidden": false 45 | } 46 | }, 47 | { 48 | "cell_type": "code", 49 | "source": [ 50 | "x = np.array([[1, 2],[3, 4]])\n", 51 | "print(x)" 52 | ], 53 | "outputs": [ 54 | { 55 | "output_type": "stream", 56 | "name": "stdout", 57 | "text": [ 58 | "[[1 2]\n", 59 | " [3 4]]\n" 60 | ] 61 | } 62 | ], 63 | "execution_count": 4, 64 | "metadata": { 65 | "collapsed": false, 66 | "outputHidden": false, 67 | "inputHidden": false 68 | } 69 | }, 70 | { 71 | "cell_type": "code", 72 | "source": [ 73 | "y = np.array([[5, 6],[7, 8]])\n", 74 | "print(y)" 75 | ], 76 | "outputs": [ 77 | { 78 | "output_type": "stream", 79 | "name": "stdout", 80 | "text": [ 81 | "[[5 6]\n", 82 | " [7 8]]\n" 83 | ] 84 | } 85 | ], 86 | "execution_count": 5, 87 | "metadata": { 88 | "collapsed": false, 89 | "outputHidden": false, 90 | "inputHidden": false 91 | } 92 | }, 93 | { 94 | "cell_type": "code", 95 | "source": [ 96 | "with tf.Session() as sess:\n", 97 | " \n", 98 | " tf_x = tf.Variable(x)\n", 99 | " tf_y = tf.Variable(y)\n", 100 | " \n", 101 | " tf_init = tf.variables_initializer([tf_x, tf_y])\n", 102 | " sess.run(tf_init)\n", 103 | " \n", 104 | " tf_add = tf.add(tf_x, tf_y)\n", 105 | " np_add = sess.run(tf_add)\n", 106 | " \n", 107 | " print(np_add)" 108 | ], 109 | "outputs": [ 110 | { 111 | "output_type": "stream", 112 | "name": "stdout", 113 | "text": [ 114 | "[[ 6 8]\n", 115 | " [10 12]]\n" 116 | ] 117 | } 118 | ], 119 | "execution_count": 6, 120 | "metadata": { 121 | "collapsed": false, 122 | "outputHidden": false, 123 | "inputHidden": false 124 | } 125 | } 126 | ], 127 | "metadata": { 128 | "kernelspec": { 129 | "name": "python3", 130 | "language": "python", 131 | "display_name": "Python 3" 132 | }, 133 | "kernel_info": { 134 | "name": "python3" 135 | }, 136 | "language_info": { 137 | "name": "python", 138 | "version": "3.6.4", 139 | "mimetype": "text/x-python", 140 | "codemirror_mode": { 141 | "name": "ipython", 142 | "version": 3 143 | }, 144 | "pygments_lexer": "ipython3", 145 | "nbconvert_exporter": "python", 146 | "file_extension": ".py" 147 | }, 148 | "nteract": { 149 | "version": "0.4.3" 150 | } 151 | }, 152 | "nbformat": 4, 153 | "nbformat_minor": 4 154 | } -------------------------------------------------------------------------------- /numerical/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/numerical/__init__.py -------------------------------------------------------------------------------- /numerical/arrow/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/numerical/arrow/__init__.py -------------------------------------------------------------------------------- /numerical/arrow/hello_world.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pandas as pd 4 | from pathlib import Path 5 | 6 | logging.basicConfig(level=logging.INFO) 7 | logger = logging.getLogger(__name__) 8 | 9 | CSV_FILE_PATH = Path(__file__).parents[1] / 'data.csv' 10 | PARQUET_FILE_PATH = Path(__file__).parents[1] / 'data.parquet' 11 | 12 | 13 | def main(): 14 | logger.info(f'Starting...') 15 | 16 | file_path = Path(CSV_FILE_PATH) 17 | df = pd.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 10]}) 18 | df.to_parquet(file_path) 19 | 20 | df = pd.read_parquet(file_path) 21 | logger.info(df) 22 | 23 | logger.info(f'Finished!') 24 | 25 | 26 | if __name__ == '__main__': 27 | main() 28 | -------------------------------------------------------------------------------- /numerical/arrow/modin_read.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import modin.pandas as pd 4 | from ..utils import ( 5 | PARQUET_FILE_PATH, 6 | ) 7 | 8 | logging.basicConfig(level=logging.INFO) 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | def main(): 13 | logger.info(f'Starting...') 14 | 15 | logger.info(f'Parquet Stored Size: {PARQUET_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 16 | 17 | df = pd.read_parquet(PARQUET_FILE_PATH) 18 | logger.info(f'In memory Size: {df.memory_usage(deep=True).sum() / 1024 ** 3:.3f} GB') 19 | 20 | logger.info(f'Finished!') 21 | 22 | 23 | if __name__ == '__main__': 24 | main() 25 | -------------------------------------------------------------------------------- /numerical/arrow/pandas_read.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pandas as pd 4 | from pathlib import Path 5 | 6 | logging.basicConfig(level=logging.INFO) 7 | logger = logging.getLogger(__name__) 8 | 9 | PARQUET_FILE_PATH = Path(__file__).parents[1] / 'data.parquet' 10 | 11 | 12 | def main(): 13 | logger.info(f'Starting...') 14 | 15 | logger.info(f'Parquet Stored Size: {PARQUET_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 16 | 17 | df = pd.read_parquet(PARQUET_FILE_PATH) 18 | logger.info(f'In memory Size: {df.memory_usage(deep=True).sum() / 1024 ** 3:.3f} GB') 19 | 20 | logger.info(f'Finished!') 21 | 22 | 23 | if __name__ == '__main__': 24 | main() 25 | -------------------------------------------------------------------------------- /numerical/data_science/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/numerical/data_science/__init__.py -------------------------------------------------------------------------------- /numerical/data_science/cumlist.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | 3 | 4 | def main(): 5 | base = pd.DataFrame({ 6 | 'value': ['hola', 'adios', 'lalala', 'hasta luego'], 7 | 'flag': [False, True, True, True], 8 | 'label': ['A', 'A', 'B', 'B'] 9 | }) 10 | base['value'] = base['value'].apply(lambda value: [value]) 11 | base['index'] = base.index 12 | 13 | transformed = base.groupby('label').apply(lambda df: df['value'][df['flag']].cumsum()).reset_index() 14 | 15 | result = base.drop(columns={'value'}).merge( 16 | transformed.drop(columns={'label'}), 17 | left_on='index', 18 | right_on='level_1', 19 | how='left', 20 | ) 21 | result = result.drop(columns={'index', 'level_1'}) 22 | result['value'] = result['value'].apply(lambda value: value if isinstance(value, list) else []) 23 | print(result) 24 | 25 | 26 | if __name__ == '__main__': 27 | main() 28 | -------------------------------------------------------------------------------- /numerical/data_science/logic_function_tree_learning.py: -------------------------------------------------------------------------------- 1 | from numerical.data_science.res import DataSets as ds 2 | from sklearn.model_selection import train_test_split 3 | from sklearn import tree 4 | from sklearn.tree import DecisionTreeClassifier 5 | 6 | 7 | def learn_function(a, b, c, d, e): 8 | return bool(not (a and b) or not (c and d)) != bool(e) 9 | 10 | 11 | if __name__ == '__main__': 12 | np_data = ds.generate_from_logic_method(learn_function).data 13 | clf = tree.DecisionTreeClassifier() 14 | 15 | X_train, X_test, y_train, y_test = train_test_split(np_data[:,:-1],np_data[:,-1], test_size=0.33, 16 | random_state=42) 17 | clf = DecisionTreeClassifier() 18 | clf = clf.fit(X_train, y_train) 19 | -------------------------------------------------------------------------------- /numerical/data_science/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/numerical/data_science/models/__init__.py -------------------------------------------------------------------------------- /numerical/data_science/models/trees/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/numerical/data_science/models/trees/__init__.py -------------------------------------------------------------------------------- /numerical/data_science/models/trees/gain_ranking_continous.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from numerical.data_science.res import DataSets 4 | from numerical.data_science import GainRanking 5 | 6 | 7 | class GainRankingContinous(GainRanking): 8 | """ 9 | Class GainRankingContinous 10 | """ 11 | 12 | def __init__(self, data_input, class_name, debug=False): 13 | GainRanking.__init__(self, data_input, class_name, debug) 14 | 15 | def gain(self, subdata, h_S): 16 | result = pd.Series(index=subdata.columns) 17 | for column in subdata.columns: 18 | if subdata[column].dtype.name != 'category': 19 | if (subdata[column].dtype == np.int or 20 | subdata[column].dtype == np.int16 or 21 | subdata[column].dtype == np.int32 or 22 | subdata[column].dtype == np.int64 or 23 | subdata[column].dtype == np.int128): 24 | subdata.ix[:, column] = self.discretize(subdata[column]) 25 | else: 26 | raise NotImplementedError 27 | 28 | a = self.sub_entropy(subdata[column]) 29 | counts = subdata[column].value_counts() 30 | p = (counts / counts.sum()) 31 | result[column] = (h_S - (p * a).sum()) 32 | return result, subdata 33 | 34 | def discretize(self, subdata): 35 | new_data = pd.concat([subdata, self.data[self.class_name]], axis=1).sort_values(by=subdata.name) 36 | temp = new_data[self.class_name].iloc[0] 37 | i = 0 38 | cuts = [] 39 | for current in new_data[self.class_name]: 40 | if temp != current: 41 | cuts.append((new_data[subdata.name].iloc[i] + new_data[subdata.name].iloc[i - 1]) / 2) 42 | temp = current 43 | i += 1 44 | alt = pd.DataFrame() 45 | for cut in cuts: 46 | alt = pd.concat([alt, subdata.apply( 47 | GainRankingContinous.discretize_split, 1, args=(cut,)).astype('category').rename(cut)], axis=1) 48 | return alt[self.gain(alt, self.h_S)[0].idxmax()] 49 | 50 | @staticmethod 51 | def discretize_split(value, point): 52 | if value < point: 53 | return ' < ' + str(point) 54 | else: 55 | return '>= ' + str(point) 56 | 57 | 58 | if __name__ == '__main__': 59 | data_pd_2 = DataSets.get_weather_semi_nominal().ix[:, 1:] 60 | 61 | print(GainRankingContinous(data_pd_2, data_pd_2.columns[-1])) 62 | -------------------------------------------------------------------------------- /numerical/data_science/models/trees/gainranking.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | from numerical.data_science.res import DataSets 4 | 5 | 6 | class GainRanking: 7 | """ 8 | Class GainRanking 9 | """ 10 | 11 | def __init__(self, data_input, class_name, debug=False): 12 | self.data = data_input 13 | self.class_name = class_name 14 | self.debug = debug 15 | self.h_S = self.entropy(self.data) 16 | self._gain_list = None 17 | 18 | @property 19 | def gain_list(self): 20 | if self._gain_list is None: 21 | columns = self.data.columns[self.data.columns != self.class_name] 22 | self._gain_list, self.data.ix[:, columns] = self.gain(self.data.ix[:, columns], self.h_S) 23 | return self._gain_list 24 | 25 | @property 26 | def gain_winner(self): 27 | return self.gain_list.idxmax(), self.data[self.gain_list.idxmax()] 28 | 29 | def gain(self, subdata, h_S): 30 | result = pd.Series(index=subdata.columns) 31 | for column in subdata.columns: 32 | a = self.sub_entropy(subdata[column]) 33 | counts = subdata[column].value_counts() 34 | p = (counts / counts.sum()) 35 | result[column] = (h_S - (p * a).sum()) 36 | return result, subdata 37 | 38 | def entropy(self, subdata): 39 | counts = subdata[self.class_name].value_counts() 40 | p = (counts / counts.sum()) 41 | return (p * np.log2(1 / p)).sum() 42 | 43 | def sub_entropy(self, subdata): 44 | result = pd.Series(index=subdata.unique()) 45 | cross = pd.concat([subdata, self.data[self.class_name]], axis=1) 46 | for cat in subdata.unique(): 47 | result[cat] = self.entropy(cross[subdata == cat]) 48 | return result 49 | 50 | def __str__(self): 51 | return str(self.gain_list) 52 | 53 | 54 | if __name__ == '__main__': 55 | data_pd = DataSets.get_weber_nominal() 56 | print(GainRanking(data_pd, data_pd.columns[-1])) 57 | -------------------------------------------------------------------------------- /numerical/data_science/models/trees/id3.py: -------------------------------------------------------------------------------- 1 | from numerical.data_science.res import DataSets 2 | from numerical.data_science import GainRanking 3 | from utils.collections.labeled_tree import LabeledTree 4 | 5 | 6 | class ID3: 7 | def __init__(self, training_set, class_name, ranking=GainRanking): 8 | self.class_name = class_name 9 | self.ranking = ranking 10 | self.tree = self.generate_tree(training_set) 11 | 12 | def generate_tree(self, data_pd): 13 | win, categories = self.ranking(data_pd, self.class_name).gain_winner 14 | tree = LabeledTree(win) 15 | for v1 in categories: 16 | d = data_pd.ix[data_pd[win] == v1, data_pd.columns != win] 17 | 18 | if len(d[self.class_name].unique()) == 1: 19 | tree.add_child(v1, d[self.class_name].unique()[0]) 20 | 21 | elif d.shape[1] == 1: 22 | tree.add_child(v1, " ".join(d[self.class_name].unique())) 23 | 24 | elif d.shape[0] == 0: 25 | tree.add_child(v1, None) 26 | 27 | else: 28 | tree.add_child(v1, self.generate_tree(d)) 29 | 30 | return tree 31 | 32 | def __str__(self): 33 | return str(self.tree) 34 | 35 | 36 | if __name__ == '__main__': 37 | data_pd = DataSets.get_weber_nominal() 38 | id3_tennis = ID3(data_pd, data_pd.columns[-1]) 39 | print(id3_tennis) 40 | 41 | ''' 42 | pd_careval = DataSets.get_car_eval() 43 | id3_careval = ID3(pd_careval, pd_careval.columns[-1]) 44 | print(id3_careval) 45 | ''' 46 | 47 | pd_credit = DataSets.get_credit().ix[:, 1:] 48 | id3_credit = ID3(pd_credit, pd_credit.columns[0]) 49 | print(id3_credit) 50 | -------------------------------------------------------------------------------- /numerical/data_science/models/trees/j48.py: -------------------------------------------------------------------------------- 1 | from numerical.data_science.res import DataSets 2 | from numerical.data_science import GainRankingContinous 3 | from numerical.data_science import ID3 4 | 5 | 6 | class J48(ID3): 7 | def __init__(self, training_set, class_name): 8 | ID3.__init__(self, training_set, class_name, ranking=GainRankingContinous) 9 | 10 | 11 | if __name__ == '__main__': 12 | data_pd_2 = DataSets.get_weather() 13 | j48_tennis = J48(data_pd_2, data_pd_2.columns[-1]) 14 | print(j48_tennis) 15 | 16 | -------------------------------------------------------------------------------- /numerical/data_science/res/__init__.py: -------------------------------------------------------------------------------- 1 | from .data_sets import ( 2 | DataSets 3 | ) 4 | -------------------------------------------------------------------------------- /numerical/data_science/res/credit.csv: -------------------------------------------------------------------------------- 1 | Nº,Riesgo, Historia,Deuda,Avales,Ingresos 2 | 1,alto,mala,alta,no,0 a 2M 3 | 2,alto,desconocida,alta,no,2 a 5M 4 | 3,moderado,desconocida,baja,no,2 a 5M 5 | 4,alto,desconocida,baja,no,0 a 2M 6 | 5,bajo,desconocida,baja,no,más de 5M 7 | 6,bajo,desconocida,baja,adecuados,más de 5M 8 | 7,alto,mala,baja,no,0 a 2M 9 | 8,moderado,mala,baja,adecuados,más de 5M 10 | 9,bajo,buena,baja,no,más de 5M 11 | 10,bajo,buena,alta,adecuados,más de 5M 12 | 11,alto,buena,alta,no,0 a 2M 13 | 12,moderado,buena,alta,no,2 a 5M 14 | 13,bajo,buena,alta,no,más de 5M 15 | 14,alto,mala,alta,no,2 a 5M -------------------------------------------------------------------------------- /numerical/data_science/res/data_sets.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | from .logic_data_set import LogicDataSet 3 | 4 | 5 | class DataSets: 6 | @staticmethod 7 | def _get_path(): 8 | return "./res" 9 | 10 | @staticmethod 11 | def get_weber_nominal(): 12 | outLook = pd.Series(["Sunny", "Overcast", "Rain"], dtype="category") 13 | temp = pd.Series(["Hot", "Mild", "Cold"], dtype="category") 14 | humidity = pd.Series(["High", "Normal"], dtype="category") 15 | wind = pd.Series(["Weak", "Strong"], dtype="category") 16 | playTennis = pd.Series(["Yes", "No"], dtype="category") 17 | 18 | columns = pd.Index(["Outlook", "Temperature", "Humidity", "Wind", "PlayTennis"]) 19 | 20 | data = [ 21 | [outLook[0], temp[0], humidity[0], wind[0], playTennis[1]], 22 | [outLook[0], temp[0], humidity[0], wind[1], playTennis[1]], 23 | [outLook[1], temp[0], humidity[0], wind[0], playTennis[0]], 24 | [outLook[2], temp[1], humidity[0], wind[0], playTennis[0]], 25 | [outLook[2], temp[2], humidity[1], wind[0], playTennis[0]], 26 | [outLook[2], temp[2], humidity[1], wind[1], playTennis[1]], 27 | [outLook[1], temp[2], humidity[1], wind[1], playTennis[0]], 28 | [outLook[0], temp[1], humidity[0], wind[0], playTennis[1]], 29 | [outLook[0], temp[2], humidity[1], wind[0], playTennis[0]], 30 | [outLook[2], temp[1], humidity[1], wind[0], playTennis[0]], 31 | [outLook[0], temp[1], humidity[1], wind[1], playTennis[0]], 32 | [outLook[1], temp[1], humidity[0], wind[1], playTennis[0]], 33 | [outLook[1], temp[0], humidity[1], wind[0], playTennis[0]], 34 | [outLook[2], temp[1], humidity[0], wind[1], playTennis[1]], 35 | ] 36 | 37 | return pd.DataFrame(data, columns=columns, dtype="category") 38 | 39 | @staticmethod 40 | def get_weather(): 41 | types = { 42 | 'outlook': 'category', 43 | 'temperature': 'int32', 'humidity': 'int32', 44 | 'windy': 'category', 'play': 'category', 45 | } 46 | 47 | return pd.read_csv(DataSets._get_path() + '/weather.csv', dtype=types) 48 | 49 | @staticmethod 50 | def get_weather_semi_nominal(): 51 | types = { 52 | 'Nº': 'int32', 'Outlook': 'category', 53 | 'Temperature': 'int32', 'Humidity': 'category', 54 | 'Wind': 'category', 'PlayTennis': 'category', 55 | } 56 | 57 | return pd.read_csv(DataSets._get_path() + '/weather_semi_nominal.csv', dtype=types) 58 | 59 | @staticmethod 60 | def get_car_eval(): 61 | return pd.read_csv(DataSets._get_path() + '/careval.csv') 62 | 63 | @staticmethod 64 | def get_credit(): 65 | return pd.read_csv(DataSets._get_path() + '/credit.csv') 66 | 67 | @staticmethod 68 | def get_presion(): 69 | pd_class = pd.Series(["-", "+"], dtype="category") 70 | 71 | columns = pd.Index(["Presion", "Clase"]) 72 | 73 | data = [ 74 | 40, pd_class[0], 75 | 48, pd_class[0], 76 | 60, pd_class[0], 77 | 72, pd_class[0], 78 | 80, pd_class[0], 79 | 90, pd_class[0], 80 | ] 81 | 82 | return pd.DataFrame(data, columns=columns) 83 | 84 | @staticmethod 85 | def generate_from_logic_method(logic_m): 86 | return LogicDataSet(logic_m) 87 | 88 | @staticmethod 89 | def get_wiki_vote(): 90 | return pd.read_csv(DataSets._get_path() + '/wiki-Vote.csv') 91 | 92 | @staticmethod 93 | def get_followers(): 94 | return pd.read_csv(DataSets._get_path() + '/followers.csv') 95 | 96 | @staticmethod 97 | def get_users(): 98 | return pd.read_csv(DataSets._get_path() + '/users.csv') 99 | 100 | @staticmethod 101 | def get_thoraric_surgery(): 102 | types = { 103 | 'DGN': 'category', 'PRE4': 'float64', 'PRE5': 'float64', 'PRE6': 'category', 'PRE7': 'category', 104 | 'PRE8': 'category', 'PRE9': 'category', 'PRE10': 'category', 'PRE11': 'category', 'PRE14': 'category', 105 | 'PRE17': 'category', 'PRE19': 'category', 'PRE25': 'category', 'PRE30': 'category', 'PRE32': 'category', 106 | 'AGE': 'int32', 'Risk1Yr': 'category' 107 | } 108 | 109 | return pd.read_csv(DataSets._get_path() + '/ThoraricSurgery.csv', dtype=types) 110 | -------------------------------------------------------------------------------- /numerical/data_science/res/followers.csv: -------------------------------------------------------------------------------- 1 | src,dst 2 | 2,1 3 | 4,1 4 | 1,2 5 | 6,3 6 | 7,3 7 | 7,6 8 | 6,7 9 | 3,7 10 | -------------------------------------------------------------------------------- /numerical/data_science/res/logic_data_set.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import string as st 4 | from inspect import signature 5 | 6 | 7 | class LogicDataSet(): 8 | def __init__(self, logic_function): 9 | self.logic_function = logic_function 10 | self.num_vars = len(signature(self.logic_function).parameters) 11 | self._data = None 12 | pass 13 | 14 | @property 15 | def data(self): 16 | if self._data is None: 17 | self.generate_data() 18 | return self._data 19 | 20 | @property 21 | def data_as_pandas(self): 22 | return pd.DataFrame(self.data, columns=list(st.ascii_uppercase)[:self.num_vars] + ['Result']) 23 | 24 | @staticmethod 25 | def _array_logic_function(input_data, logic_f): 26 | input_data[-1] = logic_f(*input_data[:-1]) 27 | return input_data 28 | 29 | def __str__(self): 30 | return str(self.data) 31 | 32 | def generate_data(self): 33 | self._data = np.zeros([2 ** self.num_vars, self.num_vars + 1], dtype=bool) 34 | for i in range(self.num_vars): 35 | self._data[:, i] = np.tile([0] * 2 ** i + [1] * 2 ** i, [2 ** (self.num_vars - i - 1)]) 36 | self._data = np.apply_along_axis(LogicDataSet._array_logic_function, 1, self._data, self.logic_function) 37 | 38 | 39 | def l(a, b, c, d, e): 40 | return bool(not (a and b) or not (c and d)) != bool(e) 41 | 42 | 43 | if __name__ == '__main__': 44 | pd_l = LogicDataSet(l).data_as_pandas 45 | # pd_l.to_csv('logic_function.csv', index=False) 46 | print(pd_l) 47 | -------------------------------------------------------------------------------- /numerical/data_science/res/logic_function.csv: -------------------------------------------------------------------------------- 1 | A,B,C,D,E,Result 2 | False,False,False,False,False,True 3 | True,False,False,False,False,True 4 | False,True,False,False,False,True 5 | True,True,False,False,False,True 6 | False,False,True,False,False,True 7 | True,False,True,False,False,True 8 | False,True,True,False,False,True 9 | True,True,True,False,False,True 10 | False,False,False,True,False,True 11 | True,False,False,True,False,True 12 | False,True,False,True,False,True 13 | True,True,False,True,False,True 14 | False,False,True,True,False,True 15 | True,False,True,True,False,True 16 | False,True,True,True,False,True 17 | True,True,True,True,False,False 18 | False,False,False,False,True,False 19 | True,False,False,False,True,False 20 | False,True,False,False,True,False 21 | True,True,False,False,True,False 22 | False,False,True,False,True,False 23 | True,False,True,False,True,False 24 | False,True,True,False,True,False 25 | True,True,True,False,True,False 26 | False,False,False,True,True,False 27 | True,False,False,True,True,False 28 | False,True,False,True,True,False 29 | True,True,False,True,True,False 30 | False,False,True,True,True,False 31 | True,False,True,True,True,False 32 | False,True,True,True,True,False 33 | True,True,True,True,True,True 34 | -------------------------------------------------------------------------------- /numerical/data_science/res/main/DiscretizeThoraricSurgery.py: -------------------------------------------------------------------------------- 1 | from numerical.data_science.res import DataSets 2 | 3 | 4 | def discretize_AGE(value): 5 | if value <= 62: 6 | return 'AGE_1' 7 | elif 62 < value <= 65: 8 | return 'AGE_3' 9 | elif 65 < value <= 70: 10 | return 'AGE_2' 11 | else: 12 | return 'AGE_4' 13 | 14 | def discretize_PRE4(value): 15 | if value <= 2.66: 16 | return 'PRE4_1' 17 | elif 2.66 < value <= 2.88: 18 | return 'PRE4_2' 19 | else: 20 | return 'PRE4_3' 21 | 22 | 23 | def discretize_PRE5(value): 24 | if value <= 2.05: 25 | return 'PRE5_1' 26 | else: 27 | return 'PRE5_2' 28 | 29 | 30 | 31 | if __name__ == '__main__': 32 | pd_data = DataSets.get_thoraric_surgery() 33 | 34 | pd_data['PRE4'] = pd_data['PRE4'].apply(discretize_PRE4,1).astype('category') 35 | pd_data['PRE5'] = pd_data['PRE5'].apply(discretize_PRE5,1).astype('category') 36 | pd_data['AGE'] = pd_data['AGE'].apply(discretize_AGE,1).astype('category') 37 | 38 | print(pd_data) 39 | pd_data.to_csv('ThoraricSurgery_discrete.csv', index=False) 40 | -------------------------------------------------------------------------------- /numerical/data_science/res/main/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/numerical/data_science/res/main/__init__.py -------------------------------------------------------------------------------- /numerical/data_science/res/main/distance_functions.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | 4 | 5 | def euclidean_distance(a, b): 6 | return math.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2) 7 | 8 | 9 | def weighted_euclidean_distance(a, b, w=[0.2, 0.8]): 10 | return math.sqrt(w[0] * (a[0] - b[0]) ** 2 + w[1] * (a[1] - b[1]) ** 2) 11 | 12 | 13 | def manhattan_distance(a, b): 14 | return abs(a[0] - b[0]) + abs(a[1] - b[1]) 15 | 16 | 17 | def hamming_distance(a, b): 18 | return int(a[0] != b[0]) + int(a[1] != b[1]) 19 | 20 | 21 | def distance_to_set(instance, set, function): 22 | result = list() 23 | for item in set: 24 | result.append(function(instance, item)) 25 | return result 26 | 27 | 28 | def normalize(x): 29 | return (x - x.min(axis=0)) / (x.max(axis=0) - x.min(axis=0)) 30 | 31 | 32 | if __name__ == '__main__': 33 | estrellas = [ 34 | [1, 1], 35 | [1, 4], 36 | [3, 1], 37 | [5, 3] 38 | ] 39 | 40 | circulos = [ 41 | [2, 1], 42 | [5, 2], 43 | [6, 1] 44 | ] 45 | 46 | instancia = [3, 3] 47 | 48 | x = np.array(estrellas + circulos) 49 | y = np.array(instancia) 50 | 51 | x_normed = normalize(x) 52 | y_normed = (y - x.min(axis=0)) / (x.max(axis=0) - x.min(axis=0)) 53 | 54 | # print(x_normed) 55 | 56 | # print(estrellas) 57 | # print(circulos) 58 | # print(instancia) 59 | print(distance_to_set(y_normed, x_normed, euclidean_distance)) 60 | print(distance_to_set(y_normed, x_normed, weighted_euclidean_distance)) 61 | print(distance_to_set(instancia, estrellas + circulos, manhattan_distance)) 62 | print(distance_to_set(instancia, estrellas + circulos, hamming_distance)) 63 | -------------------------------------------------------------------------------- /numerical/data_science/res/users.csv: -------------------------------------------------------------------------------- 1 | id,username,name 2 | 1,BarackObama,Barack Obama 3 | 2,ladygaga,Goddess of Love 4 | 3,jeresig,John Resig 5 | 4,justinbieber,Justin Bieber 6 | 6,matei_zaharia,Matei Zaharia 7 | 7,odersky,Martin Odersky 8 | 8,anonsys 9 | -------------------------------------------------------------------------------- /numerical/data_science/res/weather.csv: -------------------------------------------------------------------------------- 1 | outlook,temperature,humidity,windy,play 2 | sunny,85,85,FALSE,no 3 | sunny,80,90,TRUE,no 4 | overcast,83,86,FALSE,yes 5 | rainy,70,96,FALSE,yes 6 | rainy,68,80,FALSE,yes 7 | rainy,65,70,TRUE,no 8 | overcast,64,65,TRUE,yes 9 | sunny,72,95,FALSE,no 10 | sunny,69,70,FALSE,yes 11 | rainy,75,80,FALSE,yes 12 | sunny,75,70,TRUE,yes 13 | overcast,72,90,TRUE,yes 14 | overcast,81,75,FALSE,yes 15 | rainy,71,91,TRUE,no 16 | -------------------------------------------------------------------------------- /numerical/data_science/res/weather_nominal.csv: -------------------------------------------------------------------------------- 1 | ,Outlook,Temperature,Humidity,Wind,PlayTennis 2 | 0,Sunny,Hot,High,Weak,No 3 | 1,Sunny,Hot,High,Strong,No 4 | 2,Overcast,Hot,High,Weak,Yes 5 | 3,Rain,Mild,High,Weak,Yes 6 | 4,Rain,Cold,Normal,Weak,Yes 7 | 5,Rain,Cold,Normal,Strong,No 8 | 6,Overcast,Cold,Normal,Strong,Yes 9 | 7,Sunny,Mild,High,Weak,No 10 | 8,Sunny,Cold,Normal,Weak,Yes 11 | 9,Rain,Mild,Normal,Weak,Yes 12 | 10,Sunny,Mild,Normal,Strong,Yes 13 | 11,Overcast,Mild,High,Strong,Yes 14 | 12,Overcast,Hot,Normal,Weak,Yes 15 | 13,Rain,Mild,High,Strong,No 16 | -------------------------------------------------------------------------------- /numerical/data_science/res/weather_semi_nominal.csv: -------------------------------------------------------------------------------- 1 | Nº,Outlook,Temperature,Humidity,Wind,PlayTennis 2 | 0,Sunny,23,High,Weak,No 3 | 1,Sunny,24,High,Strong,No 4 | 2,Overcast,25,High,Weak,Yes 5 | 3,Rain,15,High,Weak,Yes 6 | 4,Rain,5,Normal,Weak,Yes 7 | 5,Rain,4,Normal,Strong,No 8 | 6,Overcast,3,Normal,Strong,Yes 9 | 7,Sunny,12,High,Weak,No 10 | 8,Sunny,7,Normal,Weak,Yes 11 | 9,Rain,14,Normal,Weak,Yes 12 | 10,Sunny,16,Normal,Strong,Yes 13 | 11,Overcast,17,High,Strong,Yes 14 | 12,Overcast,26,Normal,Weak,Yes 15 | 13,Rain,13,High,Strong,No 16 | -------------------------------------------------------------------------------- /numerical/data_science/weka_relative_error.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import pandas as pd 5 | 6 | 7 | if __name__ == "__main__": 8 | file_name = sys.argv[1] 9 | error_ratio_list = [float(i) for i in sys.argv[2:]] 10 | file_error = pd.read_csv(file_name) 11 | 12 | for error_ratio in error_ratio_list: 13 | error_count = (file_error['error'].abs() / file_error['actual']<= error_ratio).sum() 14 | print('Max Rel. Error: ' + str(error_ratio) + '\t-> Error Ratio:\t' + str(1 - error_count / float(file_error.shape[0]))) 15 | -------------------------------------------------------------------------------- /numerical/math/algebra/matrix-det-eigen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | 5 | def main(): 6 | 7 | a = np.matrix([[1, 0], [- 5, 7]]) 8 | det_a = np.linalg.det(a) 9 | eigvals_a = np.linalg.eigvals(a) 10 | 11 | print("Matriz A:") 12 | print(a) 13 | print("Determinante de A:") 14 | print(det_a) 15 | print("Valores Propios de A:") 16 | print(eigvals_a) 17 | b = np.matrix([[3, 1], [2, np.sqrt(np.pi)]]) 18 | det_b = np.linalg.det(b) 19 | eigvals_b = np.linalg.eigvals(b) 20 | 21 | print("Matriz A:") 22 | print(b) 23 | print("Determinante de B:") 24 | print(det_b) 25 | print("Valores Propios de B:") 26 | print(eigvals_b) 27 | 28 | c = np.matrix([[- 2, 0, 1], [3, 2, 4], [- 1, - 1, 0]]) 29 | det_c = np.linalg.det(c) 30 | eigvals_c = np.linalg.eigvals(c) 31 | 32 | print("Matriz C:") 33 | print(c) 34 | print("Determinante de C:") 35 | print(det_c) 36 | print("Valores Propios de C:") 37 | print(eigvals_c) 38 | 39 | if __name__ == '__main__': 40 | main() 41 | -------------------------------------------------------------------------------- /numerical/math/algebra/matrix_decomposition/qr/givens_rotations.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from typing import Tuple 4 | 5 | import numpy as np 6 | 7 | 8 | def rotator(A: np.ndarray, i: int, j: int, k: int) -> np.ndarray: 9 | G = np.eye(A.shape[0]) 10 | r = np.sqrt(A[i, k] ** 2 + A[j, k] ** 2) 11 | if A[j, k] != 0: 12 | G[i, i] = A[i, k] / r 13 | G[i, j] = A[j, k] / r 14 | G[j, i] = - A[j, k] / r 15 | G[j, j] = A[i, k] / r 16 | return G 17 | 18 | 19 | def qr_givens(A: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: 20 | Q = np.eye(A.shape[0]) 21 | for j in range(A.shape[1]): 22 | G = np.eye(A.shape[0]) 23 | for i in reversed(range(j, A.shape[1])): 24 | G = rotator(G @ A, i, i + 1, j) @ G 25 | Q = Q @ np.transpose(G) 26 | A = G @ A 27 | return (Q, A) 28 | 29 | 30 | def main() -> None: 31 | 32 | A = np.array([[1,-1,1], 33 | [1,1,1], 34 | [1,1,-1], 35 | [1,1,1]]) 36 | 37 | b = np.array([1, 1, -1, 0]) 38 | 39 | [Q, R] = qr_givens(A) 40 | c = (np.transpose(Q) @ b).flatten() 41 | x = np.linalg.solve(R[0:R.shape[1], :], c[0:R.shape[1]]) 42 | 43 | print("A =", np.round(A, decimals=2), '\n', sep='\n') 44 | print("Q =", np.round(Q, decimals=2), '\n', sep='\n') 45 | print("R =", np.round(R, decimals=2), '\n', sep='\n') 46 | print("Q @ R =", np.round(Q @ R, decimals=2), '\n', sep='\n') 47 | print("b =", np.round(b, decimals=2), '\n', sep='\n') 48 | print("c =", np.round(c, decimals=2), '\n', sep='\n') 49 | print("x =", np.round(x, decimals=2), '\n', sep='\n') 50 | 51 | if __name__ == '__main__': 52 | main() 53 | -------------------------------------------------------------------------------- /numerical/math/algebra/matrix_decomposition/qr/householder_reflections.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from typing import Tuple 4 | 5 | import numpy as np 6 | 7 | 8 | def reflector(A: np.ndarray, i: int) -> np.ndarray: 9 | x = np.take(A, [i], 1) 10 | y = np.concatenate([x[:i], [[np.sqrt(np.sum(np.power(x[i:],2)))]], 11 | np.zeros([A.shape[0] - i - 1, 1])]) 12 | u = (x - y) 13 | return np.eye(A.shape[0]) - (2 / np.squeeze(u.T @ u)) * (u @ u.T) 14 | 15 | 16 | def qr_householder(A: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: 17 | Q = np.eye(A.shape[0]) 18 | for i in range(A.shape[1]): 19 | R = reflector(A, i) 20 | Q = Q @ R 21 | A = R @ A 22 | return (Q, A) 23 | 24 | 25 | def main() -> None: 26 | 27 | A = np.array([[1,-1,1], 28 | [1,1,1], 29 | [1,1,-1], 30 | [1,1,1]]) 31 | 32 | b = np.array([1, 1, -1, 0]) 33 | 34 | [Q, R] = qr_householder(A) 35 | c = (np.transpose(Q) @ b).flatten() 36 | x = np.linalg.solve(R[0:R.shape[1], :], c[0:R.shape[1]]) 37 | 38 | print("A =", np.round(A, decimals=2), '\n', sep='\n') 39 | print("Q =", np.round(Q, decimals=2), '\n', sep='\n') 40 | print("R =", np.round(R, decimals=2), '\n', sep='\n') 41 | print("Q @ R =", np.round(Q @ R, decimals=2), '\n', sep='\n') 42 | print("b =", np.round(b, decimals=2), '\n', sep='\n') 43 | print("c =", np.round(c, decimals=2), '\n', sep='\n') 44 | print("x =", np.round(x, decimals=2), '\n', sep='\n') 45 | 46 | if __name__ == '__main__': 47 | main() 48 | -------------------------------------------------------------------------------- /numerical/math/combinatorics/k_elections_with_reemplacement_with_order.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import scipy as sp 4 | import numpy as np 5 | import math 6 | from matplotlib import pyplot as plt 7 | 8 | 9 | combs = lambda n, k: n ** k 10 | 11 | 12 | def main(): 13 | n = 4 14 | k = 3 15 | print("n =", n, "k =", k, 16 | "k elections with replacement =", combs(n, k)) 17 | 18 | 19 | if __name__ == '__main__': 20 | main() 21 | -------------------------------------------------------------------------------- /numerical/math/combinatorics/k_elections_without_reemplacement_with_order.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import scipy as sp 4 | import numpy as np 5 | import math 6 | 7 | from matplotlib import pyplot as plt 8 | 9 | from functools import reduce 10 | 11 | 12 | combs = lambda n, k: reduce(lambda a, b: a * b, range(n-k+1, n+1)) 13 | 14 | 15 | def main(): 16 | n = 4 17 | k = 3 18 | print("n =", n, "k =", k, 19 | "k elections without replacement =", combs(n, k)) 20 | 21 | 22 | if __name__ == '__main__': 23 | main() 24 | -------------------------------------------------------------------------------- /numerical/math/combinatorics/k_elections_without_reemplacement_without_order.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import scipy.special as sp_special 4 | import numpy as np 5 | import math 6 | 7 | from matplotlib import pyplot as plt 8 | 9 | from functools import reduce 10 | 11 | 12 | combs = lambda n, n_i: sp_special.factorial(n) / np.prod(sp_special.factorial(n_i)) 13 | 14 | 15 | def main(): 16 | n = 5 17 | n_i = np.array([2, 2, 1]) 18 | print("n =", n, "n_i =", n_i, 19 | "k elections without replacement =", combs(n, n_i)) 20 | 21 | 22 | if __name__ == '__main__': 23 | main() 24 | -------------------------------------------------------------------------------- /numerical/math/combinatorics/k_elections_without_reemplacement_without_order_fixed_groups.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import scipy.special as sp_special 4 | import numpy as np 5 | import math 6 | 7 | from matplotlib import pyplot as plt 8 | 9 | from functools import reduce 10 | 11 | 12 | combs = lambda n, k: sp_special.comb(n, k, exact=True) 13 | 14 | 15 | def main(): 16 | n = 4 17 | k = 3 18 | print("n =", n, "k =", k, 19 | "k elections without replacement =", combs(n, k)) 20 | 21 | 22 | if __name__ == '__main__': 23 | main() 24 | -------------------------------------------------------------------------------- /numerical/math/integration/double-integral-exercise-19.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | from scipy import integrate 5 | import cmath 6 | 7 | 8 | def integrate_function(f, a1, a2, b1, b2): 9 | return integrate.dblquad(f, a1, a2, b1, b2)[0] 10 | 11 | 12 | def main(): 13 | 14 | e_19_1 = integrate_function(lambda y, x: x * y ** 2, -1, 1, 15 | lambda x: -x, lambda x: x) 16 | 17 | print("E19.1:\t" + str(e_19_1)) 18 | 19 | e_19_2 = integrate_function(lambda y, x: x ** 2 + y ** 2, 0, 1, 20 | lambda x: x ** 3, lambda x: x ** 2) 21 | print("E19.2:\t" + str(e_19_2)) 22 | 23 | e_19_3 = integrate_function(lambda y, x: (x * y - y ** 2) ** (1 / 2), 0, 1, 24 | lambda x: 0, lambda x: x) 25 | print("E19.3:\t" + str(e_19_3)) 26 | 27 | e_19_4 = integrate_function(lambda x, y: 12 - 3 * x - 4 * y, 0, 1, 28 | lambda b: b ** 2, lambda b: 3 - 2 * b) 29 | print("E19.4:\t" + str(e_19_4)) 30 | 31 | e_19_5 = integrate_function(lambda y, x: x ** 2 + y ** 2, - np.sqrt(2), 1, 32 | lambda b1: b1 ** 2, lambda b2: 2) 33 | print("E19.5:\t" + str(e_19_5)) 34 | 35 | e_19_6 = integrate_function(lambda y, x: y ** 2, -2, 6, 36 | lambda b1: np.abs(b1), 37 | lambda b2: np.divide(b2, 2) + 3) 38 | print("E19.6:\t" + str(e_19_6)) 39 | 40 | e_19_7 = integrate_function(lambda y, x: 2 * 41 | cmath.sqrt(1 - x ** 2).real + 1, 42 | - np.arccos(0.5), np.arccos(0.5), 43 | lambda b1: 1 - cmath.sqrt(1 - b1 ** 2).real, 44 | lambda b2: cmath.sqrt(1 - b2 ** 2).real) 45 | print("E19.7:\t" + str(e_19_7)) 46 | 47 | e24_1 = integrate_function(lambda y, x: 1, 0, 1, 48 | lambda b1: 0, lambda b2: b2 ** 2) 49 | print("E24.1:\t" + str(e24_1)) 50 | 51 | e24_3 = integrate_function(lambda y, x: y * np.sin(x), 0, np.pi / 2, 52 | lambda b1: 0, lambda b2: np.cos(b2)) 53 | print("E24.3:\t" + str(e24_3)) 54 | 55 | e27_1 = integrate_function(lambda y, x: x * y, 0, 1, 56 | lambda b1: b1, lambda b2: 1) 57 | print("E27.1:\t" + str(e27_1)) 58 | 59 | 60 | if __name__ == '__main__': 61 | main() 62 | -------------------------------------------------------------------------------- /numerical/math/interpreter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | import math 5 | from matplotlib import pyplot as plt 6 | from sympy import * 7 | 8 | # Code Here 9 | -------------------------------------------------------------------------------- /numerical/math/numeric_methods/polinomial_evaluation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | 5 | def truncate_digits(value, digits=None): 6 | if digits is not None: 7 | sign = value < 0 8 | value = abs(value) % 10 ** digits 9 | shift = 0 10 | while digits > shift and value > 1: 11 | shift += 1 12 | value = value / 10 13 | value = np.round(value, decimals=digits) * 10 ** shift 14 | if sign == True: 15 | value *= - 1 16 | return value 17 | 18 | 19 | def direct_eval(polinomial, x_0, digits=None): 20 | r = 0.0 21 | cached = np.ones(polinomial.shape[0]) 22 | for i in range(1, polinomial.shape[0]): 23 | cached[i] = truncate_digits(cached[i - 1] * x_0, digits=digits) 24 | for i in range(polinomial.shape[0]): 25 | r = truncate_digits(r + truncate_digits(cached[i] * polinomial[i], 26 | digits=digits), digits=digits) 27 | return r 28 | 29 | 30 | def nested_eval(polinomial, x_0, digits=None): 31 | r = 0.0 32 | for i in reversed(range(1, polinomial.shape[0])): 33 | r = truncate_digits(x_0 * truncate_digits(r + polinomial[i], 34 | digits=digits), digits=digits) 35 | return truncate_digits(r + truncate_digits(polinomial[0], digits=digits), 36 | digits=digits) 37 | 38 | 39 | def main(): 40 | 41 | p_x = np.array([- 0.149, 3, - 6, 1]) 42 | x_0 = 4.71 43 | print(direct_eval(p_x, x_0, 3)) 44 | print(nested_eval(p_x, x_0, 3)) 45 | 46 | if __name__ == '__main__': 47 | main() 48 | -------------------------------------------------------------------------------- /numerical/math/stats/distributions/chi-square-inverse-mean.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | from matplotlib import pyplot as plt 5 | from scipy import stats 6 | 7 | def main(): 8 | 9 | plt.plot(np.linspace(0.5,1,200),stats.norm.ppf(np.linspace(0.5,1,200))) 10 | plt.grid(True) 11 | plt.xlabel('alpha') 12 | plt.ylabel('Z_alpha') 13 | plt.show() 14 | 15 | if __name__ == '__main__': 16 | main() 17 | -------------------------------------------------------------------------------- /numerical/math/stats/distributions/lognorm_percentiles.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | from scipy.stats import norm 6 | 7 | def main(): 8 | ''' 9 | X -> LogNormal(nu, sigma) 10 | P(X<40) = 0.4 11 | P(X<50) = 0.55 12 | ''' 13 | 14 | perc1 = 0.4 15 | p1 = 40 16 | 17 | perc2 = 0.55 18 | p2 = 50 19 | 20 | z_04 = norm.ppf(perc1) 21 | z_055 = norm.ppf(perc2) 22 | 23 | sigma = (np.log(p1)-np.log(p2))/(z_04 - z_055) 24 | nu = (z_055 * sigma + np.log(40)) 25 | 26 | print('nu = ' + str(nu)) 27 | print('sigma = ' + str(sigma)) 28 | 29 | if __name__ == '__main__': 30 | main() 31 | -------------------------------------------------------------------------------- /numerical/math/stats/distributions/normal-percentage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | from sympy import * 5 | from mpmath import gamma, e 6 | 7 | def main(): 8 | 9 | a = 0 10 | b = oo 11 | 12 | x = Symbol('x') 13 | k = 2 #Symbol('k') 14 | 15 | f = (x ** (k / 2 - 1) * e ** (-x / 2)) / (2 ** (k / 2) * gamma(k / 2)) 16 | g = 1 / x 17 | 18 | print(sympify(integrate( g * f, (x, a,b)))) 19 | 20 | if __name__ == '__main__': 21 | main() 22 | -------------------------------------------------------------------------------- /numerical/math/stats/distributions/probability-exercise-generator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | import pandas as pd 5 | from matplotlib import pyplot as plt 6 | from scipy import stats 7 | 8 | def main(): 9 | 10 | t1 = np.array(['1.1','1.2','1.3','1.4','1.5', 11 | '1.6', '1.7', '1.8', '1.9', '1.10', 12 | '1.11', '1.12', '1.13', '1.15', '1.16', 13 | '1.17', '1.18', '1.19', '1.20']) 14 | 15 | t2 = np.array(['1.14', '1.21', '1.22', '1.23', '1.24', 16 | '1.25', '1.26', '1.27', '1.28', '1.29', 17 | '1.31', '1.32']) 18 | 19 | t3 = np.array(['2.1','2.2','2.3','2.4','2.6', 20 | '2.7','2.8','2.9ab','2.13','2.18', 21 | '2.19','2.21','2.22','2.23','2.24', 22 | '2.25','2.35']) 23 | 24 | t4 = np.array(['2.5','2.9c','2.15','2.16','2.17','lavadoras', 25 | '2.26','2.27','2.28','2.29', '2.31', '2.33' 26 | '2.37','2.38','2.39']) 27 | 28 | exercises = pd.DataFrame([t1,t2,t3,t4]).T 29 | # print(exercises) 30 | 31 | t_rand = np.random.randint(0, exercises.shape[1]) 32 | print(t_rand) 33 | e_rand = np.random.randint(0, exercises.count()[t_rand]) 34 | 35 | print(exercises.iloc[e_rand, t_rand]) 36 | 37 | 38 | if __name__ == '__main__': 39 | main() 40 | -------------------------------------------------------------------------------- /numerical/math/stats/stats_template.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | import pandas as pd 5 | from matplotlib import pyplot as plt 6 | from scipy import stats 7 | 8 | 9 | def main(): 10 | 11 | # Code Here 12 | 13 | pass 14 | 15 | if __name__ == '__main__': 16 | main() 17 | -------------------------------------------------------------------------------- /numerical/math/stats/stochastic_processes/entrega-01.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 2, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "def stationary_distribution(P: np.array) -> np.array:\n", 26 | " A = P - np.eye(len(P))\n", 27 | " A[:, (len(P) - 1)] = np.ones([len(P)])\n", 28 | " \n", 29 | " p_stationary = np.linalg.pinv(A)[len(P) - 1, :]\n", 30 | " return p_stationary" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": {}, 36 | "source": [ 37 | "## Exercise: Ruiz Family\n", 38 | "\n", 39 | "La familia Ruiz recibe el periódico todas las mañanas, y lo coloca en el revistero después de leerlo. Cada tarde, con probabilidad 0.3, alguien coge todos los periódicos del revistero y los lleva al contenedor de papel. Por otro lado, si hay al menos 5 periódicos en el montón, el señor Ruiz los lleva al contenedor." 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | "##### a) Construye una cadena de Markov que cuente el número de periódicos que hay en el revistero cada noche. ¿Cómo son los estados?" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 3, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [ 55 | "transition_ruiz = np.array([[0.0, 1.0, 0.0, 0.0, 0.0],\n", 56 | " [0.3, 0.0, 0.7, 0.0, 0.0],\n", 57 | " [0.3, 0.0, 0.0, 0.7, 0.0],\n", 58 | " [0.3, 0.0, 0.0, 0.0, 0.7],\n", 59 | " [1.0, 0.0, 0.0, 0.0, 0.0]])" 60 | ] 61 | }, 62 | { 63 | "cell_type": "markdown", 64 | "metadata": {}, 65 | "source": [ 66 | "##### b) Si el domingo por la noche está vacío el revistero, ¿Cuál es la probabilidad de que haya 1 periódico el miércoles por la noche?" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": 4, 72 | "metadata": {}, 73 | "outputs": [ 74 | { 75 | "data": { 76 | "text/plain": [ 77 | "array([[0.237, 0.21 , 0.21 , 0. , 0.343],\n", 78 | " [0.469, 0.237, 0.147, 0.147, 0. ],\n", 79 | " [0.126, 0.58 , 0.147, 0.147, 0. ],\n", 80 | " [0.273, 0.09 , 0.49 , 0.147, 0. ],\n", 81 | " [0.21 , 0.3 , 0. , 0.49 , 0. ]])" 82 | ] 83 | }, 84 | "execution_count": 4, 85 | "metadata": {}, 86 | "output_type": "execute_result" 87 | } 88 | ], 89 | "source": [ 90 | "np.linalg.matrix_power(transition_ruiz, 4)" 91 | ] 92 | }, 93 | { 94 | "cell_type": "markdown", 95 | "metadata": {}, 96 | "source": [ 97 | "##### c) Calcula la probabilidad, a largo plazo, de que el revistero esté vacío una noche cualquiera." 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": 5, 103 | "metadata": {}, 104 | "outputs": [ 105 | { 106 | "data": { 107 | "text/plain": [ 108 | "array([0.28304557, 0.28304557, 0.1981319 , 0.13869233, 0.09708463])" 109 | ] 110 | }, 111 | "execution_count": 5, 112 | "metadata": {}, 113 | "output_type": "execute_result" 114 | } 115 | ], 116 | "source": [ 117 | "stationary_distribution(transition_ruiz)" 118 | ] 119 | }, 120 | { 121 | "cell_type": "markdown", 122 | "metadata": {}, 123 | "source": [ 124 | "### Exercise 1.36" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": 6, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "transition_36 = np.array([[ 0, 0, 1],\n", 134 | " [0.05, 0.95, 0],\n", 135 | " [ 0, 0.02, 0.98]])" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "metadata": {}, 141 | "source": [ 142 | "#### Exercise 1.36 a)" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": 7, 148 | "metadata": {}, 149 | "outputs": [ 150 | { 151 | "data": { 152 | "text/plain": [ 153 | "array([0.01408451, 0.28169014, 0.70422535])" 154 | ] 155 | }, 156 | "execution_count": 7, 157 | "metadata": {}, 158 | "output_type": "execute_result" 159 | } 160 | ], 161 | "source": [ 162 | "stationary_distribution(transition_36)" 163 | ] 164 | }, 165 | { 166 | "cell_type": "markdown", 167 | "metadata": {}, 168 | "source": [ 169 | "#### Exercise 1.36 b)" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": 8, 175 | "metadata": {}, 176 | "outputs": [ 177 | { 178 | "data": { 179 | "text/plain": [ 180 | "array([71. , 3.55, 1.42])" 181 | ] 182 | }, 183 | "execution_count": 8, 184 | "metadata": {}, 185 | "output_type": "execute_result" 186 | } 187 | ], 188 | "source": [ 189 | "1 / stationary_distribution(transition_36)" 190 | ] 191 | }, 192 | { 193 | "cell_type": "markdown", 194 | "metadata": {}, 195 | "source": [ 196 | "### Exercise 1.48" 197 | ] 198 | }, 199 | { 200 | "cell_type": "code", 201 | "execution_count": 9, 202 | "metadata": {}, 203 | "outputs": [ 204 | { 205 | "data": { 206 | "text/plain": [ 207 | "array([[0. , 0.5, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.5],\n", 208 | " [0.5, 0. , 0.5, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n", 209 | " [0. , 0.5, 0. , 0.5, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n", 210 | " [0. , 0. , 0.5, 0. , 0.5, 0. , 0. , 0. , 0. , 0. , 0. , 0. ],\n", 211 | " [0. , 0. , 0. , 0.5, 0. , 0.5, 0. , 0. , 0. , 0. , 0. , 0. ],\n", 212 | " [0. , 0. , 0. , 0. , 0.5, 0. , 0.5, 0. , 0. , 0. , 0. , 0. ],\n", 213 | " [0. , 0. , 0. , 0. , 0. , 0.5, 0. , 0.5, 0. , 0. , 0. , 0. ],\n", 214 | " [0. , 0. , 0. , 0. , 0. , 0. , 0.5, 0. , 0.5, 0. , 0. , 0. ],\n", 215 | " [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.5, 0. , 0.5, 0. , 0. ],\n", 216 | " [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.5, 0. , 0.5, 0. ],\n", 217 | " [0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.5, 0. , 0.5],\n", 218 | " [0.5, 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0. , 0.5, 0. ]])" 219 | ] 220 | }, 221 | "execution_count": 9, 222 | "metadata": {}, 223 | "output_type": "execute_result" 224 | } 225 | ], 226 | "source": [ 227 | "n = 12\n", 228 | "transition_48 = np.zeros([n, n])\n", 229 | "for i in range(n):\n", 230 | " transition_48[i, [(i - 1) % n, (i + 1) % n]] = [0.5] * 2\n", 231 | "transition_48" 232 | ] 233 | }, 234 | { 235 | "cell_type": "markdown", 236 | "metadata": {}, 237 | "source": [ 238 | "#### Exercise 1.48 a)" 239 | ] 240 | }, 241 | { 242 | "cell_type": "markdown", 243 | "metadata": {}, 244 | "source": [ 245 | "La distribución estacionaria será: \n", 246 | "$$\\pi_{i} = 1 / 12 \\ \\forall i \\in {1, 2, ..., 12}$$\n", 247 | "Por cumplir la matriz de transición la propiedad de ser *doblemente estocástica* (tanto filas como columnas suman la unidad). Por tanto, dado que se pide el número medio de pasos:\n", 248 | "$$ E_i(T_i) = \\frac{1}{\\pi_i} = \\frac{1}{1 / 12} = 12 \\ \\forall i \\in {1, 2, ..., 12}$$\n", 249 | "\n", 250 | "El mismo resultado se obtiene al ejecutar las operaciones:" 251 | ] 252 | }, 253 | { 254 | "cell_type": "code", 255 | "execution_count": 10, 256 | "metadata": {}, 257 | "outputs": [ 258 | { 259 | "data": { 260 | "text/plain": [ 261 | "array([12., 12., 12., 12., 12., 12., 12., 12., 12., 12., 12., 12.])" 262 | ] 263 | }, 264 | "execution_count": 10, 265 | "metadata": {}, 266 | "output_type": "execute_result" 267 | } 268 | ], 269 | "source": [ 270 | "1 / stationary_distribution(transition_48)" 271 | ] 272 | }, 273 | { 274 | "cell_type": "markdown", 275 | "metadata": {}, 276 | "source": [ 277 | "#### Exercise 1.48 b)" 278 | ] 279 | }, 280 | { 281 | "cell_type": "code", 282 | "execution_count": 11, 283 | "metadata": {}, 284 | "outputs": [ 285 | { 286 | "name": "stdout", 287 | "output_type": "stream", 288 | "text": [ 289 | "0.0881\n", 290 | "0.08825\n", 291 | "0.09096666666666667\n", 292 | "0.0914\n", 293 | "0.09116\n", 294 | "0.09143333333333334\n", 295 | "0.0909\n", 296 | "0.0906625\n", 297 | "0.09092222222222222\n", 298 | "0.09117\n", 299 | "0.09117\n" 300 | ] 301 | } 302 | ], 303 | "source": [ 304 | "import numpy as np\n", 305 | "\n", 306 | "n = 100000\n", 307 | "y = 0\n", 308 | "d = 12\n", 309 | "for n_temp in range(1, n + 1):\n", 310 | " visited = set()\n", 311 | " k = np.random.choice(range(d))\n", 312 | " \n", 313 | " position = k\n", 314 | " s = str(position % d) + ' '\n", 315 | " visited.add(position % d)\n", 316 | " \n", 317 | " position += np.random.choice([-1, 1])\n", 318 | " s += str(position % d) + ' '\n", 319 | " visited.add(position % d)\n", 320 | " \n", 321 | " while(position % d != k):\n", 322 | " position += np.random.choice([-1, 1])\n", 323 | " s += str(position % d) + ' '\n", 324 | " visited.add(position % d)\n", 325 | " \n", 326 | " y += (len(visited) == d)\n", 327 | " \n", 328 | " # print(y, s, sep=', ')\n", 329 | " if n_temp % 10000 == 0:\n", 330 | " print(y / n_temp)\n", 331 | "print(y / n)" 332 | ] 333 | } 334 | ], 335 | "metadata": { 336 | "kernelspec": { 337 | "display_name": "Python 3", 338 | "language": "python", 339 | "name": "python3" 340 | }, 341 | "language_info": { 342 | "codemirror_mode": { 343 | "name": "ipython", 344 | "version": 3 345 | }, 346 | "file_extension": ".py", 347 | "mimetype": "text/x-python", 348 | "name": "python", 349 | "nbconvert_exporter": "python", 350 | "pygments_lexer": "ipython3", 351 | "version": "3.7.0" 352 | } 353 | }, 354 | "nbformat": 4, 355 | "nbformat_minor": 2 356 | } 357 | -------------------------------------------------------------------------------- /numerical/math/stats/transformations/normal-power2-transformation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | from scipy import stats 6 | 7 | def main(): 8 | 9 | bins = 100 10 | 11 | elems_per_bin = 10 ** 5 12 | 13 | x = np.random.normal(0,1, bins * elems_per_bin) 14 | y = np.power(x,2) 15 | 16 | f_x = np.histogram(x, bins)[0] / (elems_per_bin*(np.max(x)-np.min(x))) 17 | f_y = np.histogram(y, bins)[0]/ (elems_per_bin*(np.max(y)-np.min(y))) 18 | 19 | plt.plot(np.linspace(np.min(x),np.max(x),bins),f_x) 20 | plt.plot(np.linspace(np.min(y),np.max(y),bins),f_y) 21 | 22 | plt.gca().set_ylim([0,1]) 23 | plt.yticks(np.arange(0,1,0.1)) 24 | plt.grid(True) 25 | 26 | plt.show() 27 | 28 | if __name__ == '__main__': 29 | main() 30 | -------------------------------------------------------------------------------- /numerical/math/stats/transformations/uniform-cos-transformation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | import matplotlib.pyplot as plt 5 | from scipy import stats 6 | 7 | def main(): 8 | 9 | bins = 100 10 | 11 | elems_per_bin = 10 ** 5 12 | 13 | x = np.random.uniform(0,2*np.pi, bins * elems_per_bin) 14 | y = np.cos(x) 15 | 16 | f_x = np.histogram(x, bins)[0] / (elems_per_bin*(np.max(x)-np.min(x))) 17 | f_y = np.histogram(y, bins)[0]/ (elems_per_bin*(np.max(y)-np.min(y))) 18 | 19 | plt.plot(np.linspace(np.min(x),np.max(x),bins),f_x) 20 | plt.plot(np.linspace(np.min(y),np.max(y),bins),f_y) 21 | 22 | plt.gca().set_ylim([0,1]) 23 | plt.yticks(np.arange(0,1,0.1)) 24 | plt.grid(True) 25 | 26 | plt.show() 27 | 28 | if __name__ == '__main__': 29 | main() 30 | -------------------------------------------------------------------------------- /numerical/math/template.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import numpy as np 4 | import math 5 | from matplotlib import pyplot as plt 6 | from sympy import * 7 | 8 | 9 | def main() -> None: 10 | 11 | # Code Here 12 | 13 | pass 14 | 15 | if __name__ == '__main__': 16 | main() 17 | -------------------------------------------------------------------------------- /numerical/modin/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/numerical/modin/__init__.py -------------------------------------------------------------------------------- /numerical/modin/hello_world.py: -------------------------------------------------------------------------------- 1 | import modin.pandas as pd 2 | import numpy as np 3 | 4 | 5 | def main(): 6 | frame_data = np.random.randint(0, 100, size=(2 ** 10, 2 ** 8)) 7 | df = pd.DataFrame(frame_data) 8 | print(df.mean()) 9 | 10 | 11 | if __name__ == '__main__': 12 | main() 13 | -------------------------------------------------------------------------------- /numerical/modin/multi_file_dataset.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from pathlib import Path 3 | 4 | import modin.pandas as pd 5 | 6 | 7 | def main(): 8 | directory_path = Path('data/') 9 | pattern = '*.csv' 10 | 11 | start = datetime.now() 12 | 13 | df = pd.concat( 14 | (file_to_dataframe(file_path) for file_path in directory_path.glob(pattern)), 15 | sort=False, 16 | copy=False, 17 | ) 18 | 19 | end = datetime.now() 20 | 21 | print(f'Units mean: "{df["KWMENG_C"].mean()}"') 22 | print(f'Elapsed "{end - start}" seconds') 23 | 24 | 25 | def file_to_dataframe(file_path: Path) -> pd.DataFrame: 26 | print(f'Reading "{file_path}"...') 27 | return pd.read_csv(file_path, sep='|', decimal=',', encoding='latin-1') 28 | 29 | 30 | if __name__ == '__main__': 31 | main() 32 | -------------------------------------------------------------------------------- /numerical/tensorflow/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/numerical/tensorflow/__init__.py -------------------------------------------------------------------------------- /numerical/tensorflow/hello_world.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | def main(): 5 | node1 = tf.constant(3.0, tf.float32) 6 | node2 = tf.constant(4.0) # also tf.float32 implicitly 7 | print(node1, node2) 8 | 9 | sess = tf.Session() 10 | print(sess.run([node1, node2])) 11 | 12 | node3 = tf.add(node1, node2) 13 | print("node3: ", node3) 14 | print("sess.run(node3): ", sess.run(node3)) 15 | 16 | a = tf.placeholder(tf.float32) 17 | b = tf.placeholder(tf.float32) 18 | adder_node = a + b # + provides a shortcut for tf.add(a, b) 19 | print(sess.run(adder_node, {a: 3, b: 4.5})) 20 | print(sess.run(adder_node, {a: [1, 3], b: [2, 4]})) 21 | 22 | add_and_triple = adder_node * 3. 23 | print(sess.run(add_and_triple, {a: 3, b: 4.5})) 24 | 25 | W = tf.Variable([.3], tf.float32) 26 | b = tf.Variable([-.3], tf.float32) 27 | x = tf.placeholder(tf.float32) 28 | linear_model = W * x + b 29 | 30 | init = tf.global_variables_initializer() 31 | sess.run(init) 32 | 33 | print(sess.run(linear_model, {x: [1, 2, 3, 4]})) 34 | 35 | y = tf.placeholder(tf.float32) 36 | squared_deltas = tf.square(linear_model - y) 37 | loss = tf.reduce_sum(squared_deltas) 38 | print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})) 39 | 40 | fixW = tf.assign(W, [-1.]) 41 | fixb = tf.assign(b, [1.]) 42 | sess.run([fixW, fixb]) 43 | print(sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})) 44 | 45 | 46 | if __name__ == '__main__': 47 | main() 48 | -------------------------------------------------------------------------------- /numerical/tensorflow/naive_pagerank.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | def main(): 5 | a_raw = [ 6 | [0.0, 0.0, 1.0, 1.0], 7 | [1.0, 0.0, 0.0, 0.0], 8 | [1.0, 1.0, 0.0, 1.0], 9 | [1.0, 1.0, 0.0, 0.0] 10 | ] 11 | 12 | v_raw = [ 13 | [0.25], 14 | [0.25], 15 | [0.25], 16 | [0.25] 17 | ] 18 | steps = 7 19 | 20 | a = tf.Variable(a_raw, tf.float32) 21 | v = tf.Variable(v_raw, tf.float32) 22 | 23 | transition = tf.div(a, tf.reduce_sum(a, 0)) 24 | page_rank = tf.matmul(transition, v) 25 | 26 | run_iteration = tf.assign(v, page_rank) 27 | 28 | init = tf.global_variables_initializer() 29 | with tf.Session() as sess: 30 | sess.run(init) 31 | 32 | for step in range(steps): 33 | print("Iteration: " + str(step)) 34 | print(sess.run(run_iteration)) 35 | 36 | tf.summary.FileWriter('logs/.', sess.graph) 37 | pass 38 | 39 | 40 | if __name__ == '__main__': 41 | main() 42 | -------------------------------------------------------------------------------- /numerical/tensorflow/pagerank.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | def main(): 5 | a_raw = [ 6 | [0.0, 0.0, 1.0, 1.0], 7 | [1.0, 0.0, 0.0, 0.0], 8 | [1.0, 1.0, 0.0, 1.0], 9 | [1.0, 1.0, 0.0, 0.0] 10 | ] 11 | beta = 0.85 12 | steps = 7 13 | 14 | a = tf.Variable(a_raw, tf.float32) 15 | n = int(a.get_shape()[0]) 16 | 17 | v = tf.Variable(tf.fill([n, 1], 1 / n), tf.float32) 18 | 19 | o_degree = tf.reduce_sum(a, 0) 20 | 21 | condition = tf.not_equal(o_degree, 0) 22 | 23 | transition = tf.transpose( 24 | tf.where(condition, 25 | tf.transpose(beta * tf.div(a, o_degree) + (1 - beta) / n), 26 | tf.fill([n, n], 1 / n))) 27 | 28 | page_rank = tf.matmul(transition, v) 29 | 30 | run_iteration = tf.assign(v, page_rank) 31 | 32 | init = tf.global_variables_initializer() 33 | with tf.Session() as sess: 34 | sess.run(init) 35 | print(sess.run(transition)) 36 | 37 | for step in range(steps): 38 | sess.run(run_iteration) 39 | 40 | print(sess.run(v)) 41 | 42 | tf.summary.FileWriter('logs/.', sess.graph) 43 | pass 44 | 45 | 46 | if __name__ == '__main__': 47 | main() 48 | -------------------------------------------------------------------------------- /numerical/tensorflow/pagerank_wiki_vote.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from numerical.data_science.res import DataSets 4 | 5 | 6 | def ranked(x): 7 | # x will be a numpy array with the contents of the placeholder below 8 | return np.argsort(x, axis=0) 9 | 10 | 11 | def main(): 12 | steps = 20 13 | 14 | data_set = DataSets.get_wiki_vote() 15 | data_set -= 1 16 | n_raw = data_set.max(axis=0).max() + 1 17 | 18 | beta = tf.constant(0.85, tf.float32, name="Beta") 19 | n = tf.constant(n_raw, tf.float32, name="NodeCounts") 20 | 21 | a = tf.Variable(tf.transpose( 22 | tf.scatter_nd(data_set.values.tolist(), data_set.shape[0] * [1.0], 23 | [n_raw, n_raw])), tf.float64, name="AdjacencyMatrix") 24 | 25 | v = tf.Variable(tf.fill([n_raw, 1], tf.pow(n, -1)), name="PageRankVector") 26 | 27 | o_degree = tf.reduce_sum(a, 0) 28 | 29 | condition = tf.not_equal(o_degree, 0) 30 | 31 | transition = tf.transpose( 32 | tf.where(condition, 33 | tf.transpose(beta * tf.div(a, o_degree) + (1 - beta) / n), 34 | tf.fill([n_raw, n_raw], tf.pow(n, -1)))) 35 | 36 | page_rank = tf.matmul(transition, v, a_is_sparse=True) 37 | 38 | run_iteration = tf.assign(v, page_rank) 39 | 40 | ranks = tf.transpose(tf.py_func(ranked, [-v], tf.int64))[0] 41 | init = tf.global_variables_initializer() 42 | with tf.Session() as sess: 43 | sess.run(init) 44 | 45 | for step in range(steps): 46 | sess.run(run_iteration) 47 | 48 | print(sess.run(v)) 49 | print(sess.run(ranks)) 50 | np.savetxt('logs/test.csv', sess.run(ranks), fmt='%i') 51 | tf.summary.FileWriter('logs/.', sess.graph) 52 | pass 53 | 54 | 55 | if __name__ == '__main__': 56 | main() 57 | -------------------------------------------------------------------------------- /numerical/tensorflow/sparse_from_file.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | from numerical.data_science.res import DataSets 4 | 5 | 6 | def main(): 7 | data_set = DataSets.get_followers() - 1 8 | n = data_set.max(axis=0).max() + 1 9 | 10 | init = tf.global_variables_initializer() 11 | with tf.Session() as sess: 12 | sess.run(init) 13 | print(sess.run( 14 | tf.scatter_nd(data_set.values.tolist(), data_set.shape[0] * [1.0], 15 | [n, n]))) 16 | 17 | 18 | if __name__ == '__main__': 19 | main() 20 | -------------------------------------------------------------------------------- /numerical/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .constants import ( 2 | PARQUET_FILE_PATH, 3 | HDF_FILE_PATH, 4 | COLUMNAR_HDF_FILE_PATH, 5 | ARROW_FILE_PATH, 6 | CSV_FILE_PATH, 7 | SQLITE_FILE_PATH, 8 | DIRECTORY_PATH, 9 | TMP_PATH, 10 | CHUNK_SIZE, 11 | ) 12 | -------------------------------------------------------------------------------- /numerical/utils/constants.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | DIRECTORY_PATH = Path(__file__).parents[1] 4 | 5 | CSV_FILE_PATH = DIRECTORY_PATH / 'data.csv' 6 | PARQUET_FILE_PATH = DIRECTORY_PATH / 'data.parquet' 7 | ARROW_FILE_PATH = DIRECTORY_PATH / 'data.arrow' 8 | SQLITE_FILE_PATH = DIRECTORY_PATH / 'data.sqlite' 9 | HDF_FILE_PATH = DIRECTORY_PATH / 'data.hdf5' 10 | COLUMNAR_HDF_FILE_PATH = DIRECTORY_PATH / 'data-columnar.hdf5' 11 | 12 | TMP_PATH = DIRECTORY_PATH / 'tmp' 13 | 14 | CHUNK_SIZE = 100_000 15 | TABLE_NAME = 'data' 16 | -------------------------------------------------------------------------------- /numerical/utils/csv_to_hdf.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import pandas as pd 3 | 4 | from numerical.utils.constants import ( 5 | CSV_FILE_PATH, 6 | HDF_FILE_PATH, 7 | CHUNK_SIZE, 8 | TABLE_NAME, 9 | ) 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | def csv_to_hdf(): 15 | logger.info(f'Starting...') 16 | 17 | logger.info(f'CSV Stored Size: {CSV_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 18 | 19 | if HDF_FILE_PATH.exists(): 20 | HDF_FILE_PATH.unlink() 21 | 22 | stream = pd.read_csv( 23 | CSV_FILE_PATH, 24 | chunksize=CHUNK_SIZE, 25 | low_memory=False, 26 | sep=',', 27 | encoding='latin-1', 28 | ) 29 | for i, chunk in enumerate(stream, 1): 30 | print(f'{i}-th iteration\tInserting "{len(chunk)}" rows on "{TABLE_NAME}"...') 31 | chunk.to_hdf(HDF_FILE_PATH, TABLE_NAME) 32 | 33 | logger.info(f'HDF Stored Size: {HDF_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 34 | 35 | logger.info(f'Finished!') 36 | 37 | 38 | def main(): 39 | logging.basicConfig(level=logging.INFO) 40 | 41 | csv_to_hdf() 42 | 43 | 44 | if __name__ == '__main__': 45 | main() 46 | -------------------------------------------------------------------------------- /numerical/utils/csv_to_parquet.py: -------------------------------------------------------------------------------- 1 | """ From: 'https://stackoverflow.com/a/45618618/3921457' """ 2 | import logging 3 | 4 | import pandas as pd 5 | import pyarrow as pa 6 | import pyarrow.parquet as pq 7 | 8 | from numerical.utils.constants import ( 9 | CSV_FILE_PATH, 10 | PARQUET_FILE_PATH, 11 | CHUNK_SIZE, 12 | ) 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | 17 | def csv_to_parquet(): 18 | logger.info(f'Starting...') 19 | 20 | stream = pd.read_csv( 21 | CSV_FILE_PATH, 22 | chunksize=CHUNK_SIZE, 23 | low_memory=False, 24 | sep=',', 25 | encoding='latin-1', 26 | ) 27 | 28 | logger.info(f'CSV Stored Size: {CSV_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 29 | 30 | chunk = next(stream) 31 | logger.debug(f'Processing 1-th chunk...') 32 | parquet_schema = pa.Table.from_pandas(chunk).schema 33 | parquet_writer = pq.ParquetWriter(PARQUET_FILE_PATH, parquet_schema, compression='snappy') 34 | 35 | for i, chunk in enumerate(stream, 2): 36 | logger.debug(f'Processing {i}-th chunk...') 37 | table = pa.Table.from_pandas(chunk, parquet_schema) 38 | parquet_writer.write_table(table) 39 | 40 | parquet_writer.close() 41 | 42 | logger.info(f'Parquet Stored Size: {PARQUET_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 43 | 44 | logger.info(f'Finished!') 45 | 46 | 47 | def main(): 48 | logging.basicConfig(level=logging.INFO) 49 | 50 | csv_to_parquet() 51 | 52 | 53 | if __name__ == '__main__': 54 | main() 55 | -------------------------------------------------------------------------------- /numerical/utils/csv_to_sqlite.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sqlite3 3 | import pandas as pd 4 | 5 | from numerical.utils.constants import ( 6 | CSV_FILE_PATH, 7 | SQLITE_FILE_PATH, 8 | CHUNK_SIZE, 9 | TABLE_NAME 10 | ) 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | def csv_to_sqlite(): 16 | logger.info(f'Starting...') 17 | 18 | logger.info(f'CSV Stored Size: {CSV_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 19 | 20 | connection = sqlite3.connect(SQLITE_FILE_PATH) 21 | 22 | logger.info(f'Dropping "{TABLE_NAME}" table...') 23 | connection.execute(f'DROP TABLE IF EXISTS {TABLE_NAME};') 24 | 25 | stream = pd.read_csv( 26 | CSV_FILE_PATH, 27 | chunksize=CHUNK_SIZE, 28 | low_memory=False, 29 | sep=',', 30 | encoding='latin-1', 31 | ) 32 | for i, chunk in enumerate(stream, 1): 33 | logger.info(f'{i}-th iteration\tInserting "{len(chunk)}" rows on "{TABLE_NAME}"...') 34 | chunk.to_sql(TABLE_NAME, connection, if_exists='append', chunksize=10_000) 35 | 36 | 37 | logger.info(f'SQLITE Stored Size: {SQLITE_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 38 | 39 | logger.info(f'Finished!') 40 | 41 | 42 | def main(): 43 | logging.basicConfig(level=logging.INFO) 44 | csv_to_sqlite() 45 | 46 | 47 | if __name__ == '__main__': 48 | main() 49 | -------------------------------------------------------------------------------- /numerical/vaex/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/numerical/vaex/__init__.py -------------------------------------------------------------------------------- /numerical/vaex/csv_to_arrow.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import vaex 4 | 5 | from numerical.utils.constants import ( 6 | CSV_FILE_PATH, 7 | ARROW_FILE_PATH, 8 | HDF_FILE_PATH, 9 | ) 10 | 11 | logging.basicConfig(level=logging.INFO) 12 | logger = logging.getLogger(__name__) 13 | 14 | 15 | def main(): 16 | logger.info(f'Starting...') 17 | 18 | logger.info(f'CSV Stored Size: {CSV_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 19 | 20 | df = vaex.open(str(CSV_FILE_PATH), convert=str(HDF_FILE_PATH)) 21 | logger.info(f'HDF5 Stored Size: {HDF_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 22 | 23 | df.export(str(ARROW_FILE_PATH)) 24 | logger.info(f'ARROW Stored Size: {ARROW_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 25 | 26 | logger.info(f'Finished!') 27 | 28 | 29 | if __name__ == '__main__': 30 | main() 31 | -------------------------------------------------------------------------------- /numerical/vaex/csv_to_columnar_hdf.py: -------------------------------------------------------------------------------- 1 | from shutil import rmtree 2 | 3 | import pandas as pd 4 | 5 | import vaex 6 | 7 | from numerical.utils import ( 8 | CSV_FILE_PATH, 9 | CHUNK_SIZE, 10 | COLUMNAR_HDF_FILE_PATH, 11 | TMP_PATH, 12 | ) 13 | 14 | 15 | def main(): 16 | print(f'HDF5 Stored Size: {CSV_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 17 | 18 | stream = pd.read_csv( 19 | CSV_FILE_PATH, 20 | chunksize=CHUNK_SIZE, 21 | low_memory=False, 22 | sep=',', 23 | encoding='latin-1', 24 | ) 25 | TMP_PATH.mkdir(parents=True, exist_ok=True) 26 | for i, chunk in enumerate(stream): 27 | print(f'Processing {i + 1}-th chunk containing "{len(chunk)}" rows of data...') 28 | df_chunk = vaex.from_pandas(chunk, copy_index=False) 29 | export_path = TMP_PATH / f'part_{i}.hdf5' 30 | df_chunk.export_hdf5(str(export_path)) 31 | 32 | df = vaex.open(str(TMP_PATH / 'part*')) 33 | 34 | df.export_hdf5(str(COLUMNAR_HDF_FILE_PATH)) 35 | print(f'HDF5 Stored Size: {COLUMNAR_HDF_FILE_PATH.stat().st_size / 1024 ** 3:.3f} GB') 36 | 37 | rmtree(TMP_PATH) 38 | 39 | 40 | if __name__ == '__main__': 41 | main() 42 | -------------------------------------------------------------------------------- /operations_research/ortools/introduction.py: -------------------------------------------------------------------------------- 1 | from ortools.linear_solver.pywraplp import Solver 2 | 3 | 4 | def main(): 5 | # Create the linear solver with the GLOP backend. 6 | solver = Solver('simple_lp_program', Solver.GLOP_LINEAR_PROGRAMMING) 7 | 8 | # Create the variables x and y. 9 | x = solver.NumVar(0, 1, 'x') 10 | y = solver.NumVar(0, 2, 'y') 11 | 12 | print('Number of variables =', solver.NumVariables()) 13 | 14 | # Create a linear constraint, 0 <= x + y <= 2. 15 | ct = solver.Constraint(0, 2, 'ct') 16 | ct.SetCoefficient(x, 1) 17 | ct.SetCoefficient(y, 1) 18 | 19 | print('Number of constraints =', solver.NumConstraints()) 20 | 21 | # Create the objective function, 3 * x + y. 22 | objective = solver.Objective() 23 | objective.SetCoefficient(x, 3) 24 | objective.SetCoefficient(y, 1) 25 | objective.SetMaximization() 26 | 27 | solver.Solve() 28 | 29 | print('Solution:') 30 | print('Objective value =', objective.Value()) 31 | print('x =', x.solution_value()) 32 | print('y =', y.solution_value()) 33 | 34 | 35 | if __name__ == '__main__': 36 | main() 37 | -------------------------------------------------------------------------------- /operations_research/ortools/pickup_and_delivery.py: -------------------------------------------------------------------------------- 1 | """Simple Pickup Delivery Problem (PDP).""" 2 | 3 | from ortools.constraint_solver import ( 4 | routing_enums_pb2, 5 | pywrapcp, 6 | ) 7 | 8 | 9 | def create_data_model(): 10 | """Stores the data for the problem.""" 11 | data = dict() 12 | data['distance_matrix'] = [ 13 | [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662], 14 | [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210], 15 | [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754], 16 | [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358], 17 | [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244], 18 | [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708], 19 | [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480], 20 | [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856], 21 | [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514], 22 | [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468], 23 | [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354], 24 | [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844], 25 | [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730], 26 | [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536], 27 | [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194], 28 | [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798], 29 | [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0], 30 | ] 31 | data['pickups_deliveries'] = [ 32 | [1, 6], 33 | [2, 10], 34 | [4, 3], 35 | [5, 9], 36 | [7, 8], 37 | [15, 11], 38 | [13, 12], 39 | [16, 14], 40 | ] 41 | data['num_vehicles'] = 4 42 | data['depot'] = 0 43 | return data 44 | 45 | 46 | def print_solution(data, manager, routing, assignment): 47 | """Prints assignment on console.""" 48 | total_distance = 0 49 | for vehicle_id in range(data['num_vehicles']): 50 | index = routing.Start(vehicle_id) 51 | plan_output = 'Route for vehicle {}:\n'.format(vehicle_id) 52 | route_distance = 0 53 | while not routing.IsEnd(index): 54 | plan_output += ' {} -> '.format(manager.IndexToNode(index)) 55 | previous_index = index 56 | index = assignment.Value(routing.NextVar(index)) 57 | route_distance += routing.GetArcCostForVehicle( 58 | previous_index, index, vehicle_id) 59 | plan_output += '{}\n'.format(manager.IndexToNode(index)) 60 | plan_output += 'Distance of the route: {}m\n'.format(route_distance) 61 | print(plan_output) 62 | total_distance += route_distance 63 | print('Total Distance of all routes: {}m'.format(total_distance)) 64 | 65 | 66 | def main(): 67 | """Entry point of the program.""" 68 | # Instantiate the data problem. 69 | data = create_data_model() 70 | 71 | # Create the routing index manager. 72 | manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']), data['num_vehicles'], data['depot']) 73 | 74 | # Create Routing Model. 75 | routing = pywrapcp.RoutingModel(manager) 76 | 77 | # Define cost of each arc. 78 | def distance_callback(from_index, to_index): 79 | """Returns the manhattan distance between the two nodes.""" 80 | # Convert from routing variable Index to distance matrix NodeIndex. 81 | from_node = manager.IndexToNode(from_index) 82 | to_node = manager.IndexToNode(to_index) 83 | return data['distance_matrix'][from_node][to_node] 84 | 85 | transit_callback_index = routing.RegisterTransitCallback(distance_callback) 86 | routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index) 87 | 88 | # Add Distance constraint. 89 | dimension_name = 'Distance' 90 | routing.AddDimension( 91 | transit_callback_index, 92 | 0, # no slack 93 | 3000, # vehicle maximum travel distance 94 | True, # start cumul to zero 95 | dimension_name, 96 | ) 97 | distance_dimension = routing.GetDimensionOrDie(dimension_name) 98 | distance_dimension.SetGlobalSpanCostCoefficient(100) 99 | 100 | solver = routing.solver() 101 | # Define Transportation Requests. 102 | for request in data['pickups_deliveries']: 103 | pickup_index = manager.NodeToIndex(request[0]) 104 | delivery_index = manager.NodeToIndex(request[1]) 105 | routing.AddPickupAndDelivery(pickup_index, delivery_index) 106 | solver.Add(routing.VehicleVar(pickup_index) == routing.VehicleVar(delivery_index)) 107 | solver.Add(distance_dimension.CumulVar(pickup_index) <= distance_dimension.CumulVar(delivery_index)) 108 | 109 | # Setting first solution heuristic. 110 | search_parameters = pywrapcp.DefaultRoutingSearchParameters() 111 | search_parameters.first_solution_strategy = routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION 112 | 113 | # Solve the problem. 114 | assignment = routing.SolveWithParameters(search_parameters) 115 | 116 | # Print solution on console. 117 | if assignment: 118 | print_solution(data, manager, routing, assignment) 119 | 120 | 121 | if __name__ == '__main__': 122 | main() 123 | -------------------------------------------------------------------------------- /operations_research/pulp/getting_started.py: -------------------------------------------------------------------------------- 1 | from pulp import ( 2 | LpVariable, 3 | LpProblem, 4 | LpMinimize, 5 | LpStatus, 6 | ) 7 | 8 | 9 | def main(): 10 | x = LpVariable("x", 0, 3) 11 | y = LpVariable("y", 0, 1) 12 | 13 | problem = LpProblem("myProblem", LpMinimize) 14 | problem += x + y <= 2 15 | problem += -4 * x + y 16 | 17 | status = problem.solve() 18 | 19 | print(f'Status: {LpStatus[status]}') 20 | for var in problem.variables(): 21 | print(f'{var.name} = {var.value()}') 22 | print(f'objective = {problem.objective.value()}') 23 | 24 | 25 | if __name__ == '__main__': 26 | main() 27 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "python-examples" 3 | version = "0.1.0" 4 | description = "" 5 | authors = ["garciparedes "] 6 | 7 | [tool.poetry.dependencies] 8 | python = "^3.7" 9 | tensorflow = "^2.0" 10 | matplotlib = "^3.1" 11 | pandas = "^0.25" 12 | scipy = "^1." 13 | mpmath = "^1.0" 14 | sympy = "^1.2" 15 | wolframalpha = "^3.0" 16 | graphene-django = "^2.2" 17 | scikit-learn = "^0.21" 18 | fabric = "*" 19 | docker = "*" 20 | ortools = "^7.4" 21 | modin = {version = "^0.6", extras = ["all"]} 22 | deap = "^1.3" 23 | pyarrow = "^0.15" 24 | dask = {extras = ["dataframe"], version = "^2.9.2"} 25 | vaex = "^2.5.0" 26 | jupyterlab = "^1.2.5" 27 | jupyter = "^1.0.0" 28 | tables = "^3.6.1" 29 | [tool.poetry.dev-dependencies] 30 | 31 | [build-system] 32 | requires = ["poetry^1.0"] 33 | build-backend = "poetry.masonry.api" 34 | -------------------------------------------------------------------------------- /text/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/text/__init__.py -------------------------------------------------------------------------------- /text/regex/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/text/regex/__init__.py -------------------------------------------------------------------------------- /text/regex/regex_distance.py: -------------------------------------------------------------------------------- 1 | # This code is based on "https://gist.github.com/wil3/1671fbde4c698565040a" gist. 2 | import logging 3 | 4 | logging.basicConfig(level=logging.INFO) 5 | logger = logging.getLogger(__name__) 6 | 7 | 8 | def regex_dist(regex: str, target: str): 9 | logger.info(f'Computing distance between "{regex}" and "{target}"...') 10 | 11 | def regex_dist_aux(r_i, t_i): 12 | if r_i == -1 and t_i == -1: 13 | return 0 14 | if r_i == -1: 15 | return t_i + 1 16 | if t_i == -1: 17 | i, counter = r_i, 0 18 | while i >= 0: 19 | char = regex[i] 20 | if char in ('?', '*', '+'): 21 | i -= 2 22 | else: 23 | i -= 1 24 | if char not in ('?', '*'): 25 | counter += 1 26 | return counter 27 | 28 | if memo[r_i][t_i] is not None: 29 | return memo[r_i][t_i] 30 | 31 | # Regex special cases 32 | if regex[r_i] == '.': 33 | memo[r_i][t_i] = regex_dist_aux(r_i - 1, t_i - 1) 34 | return memo[r_i][t_i] 35 | 36 | if regex[r_i] == '+' or regex[r_i] == '*' or regex[r_i] == '?': 37 | if regex[r_i - 1] == target[t_i]: 38 | if regex[r_i] == '?': 39 | memo[r_i][t_i] = regex_dist_aux(r_i - 2, t_i - 1) 40 | else: 41 | memo[r_i][t_i] = min(regex_dist_aux(r_i - 2, t_i - 1), regex_dist_aux(r_i, t_i - 1)) 42 | else: 43 | additional_cost = 1 if (regex[r_i] == '+') else 0 44 | memo[r_i][t_i] = min(regex_dist_aux(r_i - 2, t_i - 1) + 1, 45 | regex_dist_aux(r_i, t_i - 1) + 1, 46 | regex_dist_aux(r_i - 2, t_i) + additional_cost) 47 | return memo[r_i][t_i] 48 | 49 | # Other characters 50 | if regex[r_i] == target[t_i]: 51 | memo[r_i][t_i] = regex_dist_aux(r_i - 1, t_i - 1) 52 | else: 53 | memo[r_i][t_i] = min(regex_dist_aux(r_i - 1, t_i - 1) + 1, 54 | regex_dist_aux(r_i, t_i - 1) + 1, 55 | regex_dist_aux(r_i - 1, t_i) + 1) 56 | return memo[r_i][t_i] 57 | 58 | memo = [[None] * (len(target) + 1) for _ in range(len(regex) + 1)] 59 | distance = regex_dist_aux(len(regex) - 1, len(target) - 1) 60 | logger.info(f'The distance between "{regex}" and "{target}" is "{distance}"...') 61 | return distance 62 | 63 | 64 | def main(): 65 | # Some examples 66 | assert regex_dist("OrchestraaaQA+a", "CarthorseQAAAA") == 10 67 | assert regex_dist("A+b", "AAb") == 0 68 | assert regex_dist("A+b", "AAAAAb") == 0 69 | assert regex_dist("A+b", "AAAAAb03b") == 3 70 | assert regex_dist("A..b", "AAAAAb03b") == 5 71 | assert regex_dist("q+", "A") == 1 72 | assert regex_dist("q+a?a+", "A") == 2 73 | assert regex_dist("q+a?a+A+", "A") == 2 74 | assert regex_dist("q+a?a+A+.", "A") == 3 75 | assert regex_dist("q+A", "AAAAAb03b") == 8 76 | 77 | 78 | if __name__ == '__main__': 79 | main() 80 | -------------------------------------------------------------------------------- /text/regex/string_matching.py: -------------------------------------------------------------------------------- 1 | # !/usr/bin/env python 2 | 3 | # This code is based on "https://gist.github.com/wil3/1671fbde4c698565040a" gist. 4 | 5 | import string 6 | import re 7 | import random 8 | import operator as op 9 | from math import ceil, floor 10 | from pathlib import Path 11 | from typing import List, Tuple 12 | 13 | import numpy 14 | from random import ( 15 | random, 16 | choice, 17 | randint, 18 | seed, 19 | ) 20 | from deap import algorithms 21 | from deap import base 22 | from deap import creator 23 | from deap import tools 24 | 25 | VALID_CHARS = list(string.printable) 26 | MAX_LENGTH = 100 27 | 28 | MATE_RATIO = 0.6 29 | MUTATION_RATIO = 0.9 30 | POPULATION_SIZE = 50 31 | MAX_GENERATIONS = 10e6 32 | PATTERN = re.compile(r'\w{8}') 33 | 34 | 35 | def get_random_char() -> chr: 36 | return choice(VALID_CHARS) 37 | 38 | 39 | def gen_word(minimum: int, maximum: int) -> List[chr]: 40 | length = randint(minimum, maximum) 41 | return [get_random_char() for _ in range(length)] 42 | 43 | 44 | def evaluate(individual: List[chr]) -> Tuple[float]: 45 | if not re.fullmatch(PATTERN, ''.join(individual)): 46 | return float('inf'), 47 | return len(individual), 48 | 49 | 50 | def mutate(individual: List[chr]) -> Tuple[List[chr]]: 51 | r = random() 52 | c = get_random_char() 53 | pos = randint(0, len(individual) - 1) 54 | if r < 1 / 3: 55 | individual.append(c) 56 | elif 1 / 3 <= r < 2 / 3: 57 | individual.pop(pos) 58 | else: 59 | individual[pos] = c 60 | return individual, 61 | 62 | 63 | def mate(ind1: List[chr], ind2: List[chr]) -> Tuple[List[chr], ...]: 64 | for i in range(min(len(ind1), len(ind2))): 65 | if random() < MATE_RATIO: 66 | ind1[i], ind2[i] = ind2[i], ind1[i] 67 | return ind1, ind2 68 | 69 | 70 | def build() -> base.Toolbox: 71 | creator.create("FitnessMin", base.Fitness, weights=(-1.0,)) 72 | creator.create("Individual", list, fitness=creator.FitnessMin) 73 | 74 | toolbox = base.Toolbox() 75 | # Attribute generator 76 | toolbox.register("attr_item", get_random_char) 77 | toolbox.register("attr_len", randint, 0, MAX_LENGTH) 78 | 79 | toolbox.register("word", gen_word, 1, MAX_LENGTH) 80 | # Structure initializers 81 | # toolbox.register("individual", init_individual) 82 | # toolbox.register("individual",tools.initRepeat, creator.Individual, 83 | # toolbox.attr_item, toolbox.attr_len) 84 | toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.word) 85 | toolbox.register("population", tools.initRepeat, list, toolbox.individual) 86 | # Operator registering 87 | toolbox.register("evaluate", evaluate) 88 | toolbox.register("mate", mate) 89 | toolbox.register("mutate", mutate) 90 | toolbox.register("select", tools.selBest) 91 | 92 | return toolbox 93 | 94 | 95 | def execute(toolbox: base.Toolbox, cases: int = 100) -> List[str]: 96 | population = toolbox.population(n=POPULATION_SIZE) 97 | hall_of_fame = tools.ParetoFront() 98 | 99 | stats = tools.Statistics(lambda i: i.fitness.values) 100 | stats.register("avg", numpy.mean, axis=0) 101 | stats.register("std", numpy.std, axis=0) 102 | stats.register("min", numpy.min, axis=0) 103 | stats.register("max", numpy.max, axis=0) 104 | 105 | logbook = tools.Logbook() 106 | logbook.header = "gen", "evals", "std", "min", "avg", "max", "best" 107 | 108 | # Evaluate every individuals 109 | for individual in population: 110 | individual.fitness.values = toolbox.evaluate(individual) 111 | 112 | hall_of_fame.update(population) 113 | record = stats.compile(population) 114 | logbook.record(gen=0, evals=len(population), **record) 115 | print(logbook.stream) 116 | 117 | generated_cases = list 118 | last_fitness = float('inf') 119 | current_fitness = None 120 | generation_count = 1 121 | while generation_count <= MAX_GENERATIONS and (last_fitness != current_fitness or current_fitness == float('inf')): 122 | last_fitness = current_fitness 123 | 124 | # Select the next generation individuals 125 | offspring = toolbox.select(population, floor(POPULATION_SIZE * 0.9)) 126 | 127 | # Clone the selected individuals 128 | offspring = list(toolbox.map(toolbox.clone, offspring)) 129 | 130 | # Add new individuals from the population 131 | offspring += toolbox.population(n=POPULATION_SIZE - len(offspring)) 132 | 133 | # Apply crossover and mutation on the offspring 134 | for child1, child2 in zip(offspring[::2], offspring[1::2]): 135 | if not random() < MATE_RATIO: 136 | continue 137 | toolbox.mate(child1, child2) 138 | del child1.fitness.values 139 | del child2.fitness.values 140 | 141 | for mutant in offspring: 142 | if not random() < MUTATION_RATIO: 143 | continue 144 | toolbox.mutate(mutant) 145 | del mutant.fitness.values 146 | 147 | # Evaluate the individuals with an invalid fitness 148 | invalid_ind = [individual for individual in offspring if not individual.fitness.valid] 149 | for individual in offspring: 150 | individual.fitness.values = toolbox.evaluate(individual) 151 | 152 | generated_cases = tools.selBest(population, k=cases) 153 | current_fitness = sum(toolbox.map(op.itemgetter(0), toolbox.map(toolbox.evaluate, generated_cases))) 154 | best = choice(generated_cases) 155 | word = "".join(best) 156 | 157 | # Select the next generation population 158 | population = toolbox.select(population + offspring, POPULATION_SIZE) 159 | record = stats.compile(population) 160 | logbook.record(gen=generation_count, evals=len(invalid_ind), best=word, **record) 161 | print(logbook.stream) 162 | 163 | generation_count += 1 164 | 165 | return [''.join(case) for case in generated_cases] 166 | 167 | 168 | def main() -> None: 169 | seed(56) 170 | 171 | toolbox = build() 172 | generated_cases = execute(toolbox, 100) 173 | print(generated_cases) 174 | 175 | 176 | if __name__ == "__main__": 177 | main() 178 | -------------------------------------------------------------------------------- /utils/collections/cycled_set.py: -------------------------------------------------------------------------------- 1 | from collections.abc import MutableSet 2 | 3 | 4 | class CycledSet(MutableSet): 5 | def __init__(self, iterable=()): 6 | self.data = set(iterable) 7 | 8 | def __contains__(self, value): 9 | return value in self.data 10 | 11 | def __iter__(self): 12 | while True: 13 | if any(self.data): 14 | yield from self._iter_data() 15 | else: 16 | yield None 17 | 18 | def _iter_data(self): 19 | copied_data = set(self.data) 20 | for element in copied_data: 21 | if element not in self.data: 22 | continue 23 | yield element 24 | 25 | def __len__(self): 26 | return len(self.data) 27 | 28 | def __repr__(self): 29 | return repr(self.data) 30 | 31 | def add(self, item): 32 | self.data.add(item) 33 | 34 | def discard(self, item): 35 | self.data.discard(item) 36 | -------------------------------------------------------------------------------- /utils/collections/labeled_tree.py: -------------------------------------------------------------------------------- 1 | class LabeledTree(object): 2 | def __init__(self, data): 3 | self.data = data 4 | self.children = {} 5 | 6 | def add_child(self, label, obj): 7 | self.children[label] = obj 8 | 9 | def __str__(self): 10 | return self.str() 11 | 12 | def str(self, deep=1): 13 | 14 | str_out = str(self.data) 15 | 16 | for key, value in self.children.items(): 17 | 18 | try: 19 | value = value.str(deep + 1) 20 | except AttributeError: 21 | value = value 22 | 23 | str_out += "\n" + deep * "\t" + str(key) + " --> " + str(value) 24 | return str_out 25 | -------------------------------------------------------------------------------- /utils/collections/oscar_dict.py: -------------------------------------------------------------------------------- 1 | from collections import UserDict 2 | 3 | 4 | class OscarDict(UserDict): 5 | 6 | def __getitem__(self, key): 7 | value = self.data[key] 8 | if callable(value): 9 | value = value() 10 | self.data[key] = value 11 | return value 12 | 13 | 14 | def main(): 15 | base = {'a': 3, 'b': lambda: 'Oscar'} 16 | fancy = OscarDict(base) 17 | 18 | print(fancy['a']) 19 | print(fancy['b']) 20 | 21 | fancy['c'] = lambda: 'Sergio' 22 | 23 | print(fancy['c']) 24 | 25 | 26 | if __name__ == '__main__': 27 | main() 28 | -------------------------------------------------------------------------------- /utils/collections/tree.py: -------------------------------------------------------------------------------- 1 | class Tree(object): 2 | def __init__(self, data): 3 | self.data = data 4 | self.children = [] 5 | 6 | def add_child(self, obj): 7 | self.children.append(obj) 8 | 9 | def __str__(self): 10 | return self.str() 11 | 12 | def str(self, deep=1): 13 | 14 | str_out = str(self.data) 15 | 16 | for child in self.children: 17 | 18 | try: 19 | value = child.str(deep + 1) 20 | except AttributeError: 21 | value = child 22 | 23 | str_out += "\n" + deep * "\t" + " --> " + str(value) 24 | return str_out 25 | -------------------------------------------------------------------------------- /utils/json.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | __author__ = "Sergio Garcia Prado" 4 | __license__ = "MPL-2.0" 5 | __version__ = "1.0.0" 6 | __maintainer__ = "Sergio Garcia Prado" 7 | __email__ = "sergio@garciparedes.me" 8 | 9 | import json 10 | 11 | 12 | def dict_to_json(dict_data): 13 | return json.dumps(dict_data, indent=2, sort_keys=True) 14 | -------------------------------------------------------------------------------- /visualization/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/visualization/__init__.py -------------------------------------------------------------------------------- /visualization/bokeh/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/visualization/bokeh/__init__.py -------------------------------------------------------------------------------- /visualization/bokeh/grouped_bar_plot_example_01.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | 4 | # EXAMPLE FROM: https://bokeh.pydata.org/en/latest/docs/user_guide/categorical.html#nested-categories 5 | 6 | from bokeh.io import show 7 | from bokeh.models import ColumnDataSource, FactorRange 8 | from bokeh.plotting import figure 9 | from bokeh.transform import factor_cmap 10 | from bokeh.palettes import Spectral6 11 | 12 | def main() -> None: 13 | fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries'] 14 | years = ['2015', '2016', '2017'] 15 | 16 | data = {'fruits' : fruits, 17 | '2015' : [2, 1, 4, 3, 2, 4], 18 | '2016' : [5, 3, 3, 2, 4, 6], 19 | '2017' : [3, 2, 4, 4, 5, 3]} 20 | 21 | # this creates [ ("Apples", "2015"), ("Apples", "2016"), ("Apples", "2017"), ("Pears", "2015), ... ] 22 | x = [ (fruit, year) for fruit in fruits for year in years ] 23 | 24 | counts = sum(zip(data['2015'], data['2016'], data['2017']), tuple()) # like an hstack 25 | 26 | source = ColumnDataSource(data=dict(x=x, counts=counts)) 27 | 28 | p = figure(x_range=FactorRange(*x)) 29 | 30 | p.vbar(x='x', top='counts', width=0.9, source=source, line_color="white", 31 | # use the palette to colormap based on the the x[1:2] values 32 | fill_color=factor_cmap('x', palette=Spectral6, factors=years, start=1, end=2)) 33 | 34 | p.y_range.start = 0 35 | p.x_range.range_padding = 0.1 36 | p.xaxis.major_label_orientation = 1 37 | p.xgrid.grid_line_color = None 38 | 39 | show(p) 40 | 41 | if __name__ == '__main__': 42 | main() 43 | -------------------------------------------------------------------------------- /visualization/bokeh/stacked_plot_example_01.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | 4 | # EXAMPLE FROM: https://stackoverflow.com/a/43936905/3921457 5 | 6 | import pandas as pd 7 | from bokeh.models import ColumnDataSource 8 | from bokeh.plotting import show, output_notebook, figure as bf 9 | 10 | 11 | 12 | def main() -> None: 13 | df = pd.DataFrame({'S': [34,23, 12, 9], 14 | 'P':[65, 44, 81,23]}) 15 | 16 | df_comb = df.join(df.divide(df.sum(axis=1), axis=0), rsuffix='_w').join(df.divide(df.sum(axis=1) * 2, axis=0), rsuffix='_w_labelheights') 17 | df_comb['P_w_labelheights'] += df_comb['S_w'] 18 | df_comb 19 | 20 | f = bf() 21 | source = ColumnDataSource(df_comb) 22 | 23 | s = f.vbar(x='index', bottom=0, top='S_w', width=0.5, source=source) 24 | p = f.vbar(x='index', bottom='S_w', top=1, width=0.5, source=source, color='orange') 25 | 26 | s_label = f.text(x='index', y='S_w_labelheights', source=source, text='S') 27 | p_label = f.text(x='index', y='P_w_labelheights', source=source, text='P') 28 | 29 | show(f) 30 | 31 | 32 | if __name__ == '__main__': 33 | main() 34 | -------------------------------------------------------------------------------- /visualization/bokeh/stacked_plot_example_02.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | 4 | # EXAMPLE FROM: https://bokeh.pydata.org/en/latest/docs/user_guide/categorical.html#stacked 5 | 6 | from bokeh.core.properties import value 7 | from bokeh.io import show 8 | from bokeh.plotting import figure 9 | 10 | 11 | def main() -> None: 12 | 13 | fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries'] 14 | years = ["2015", "2016", "2017"] 15 | colors = ["#c9d9d3", "#718dbf", "#e84d60"] 16 | 17 | data = {'fruits' : fruits, 18 | '2015' : [2, 1, 4, 3, 2, 4], 19 | '2016' : [5, 3, 4, 2, 4, 6], 20 | '2017' : [3, 2, 4, 4, 5, 3]} 21 | 22 | p = figure(x_range=fruits, plot_height=250, title="Fruit Counts by Year", 23 | toolbar_location=None, tools="") 24 | 25 | p.vbar_stack(years, x='fruits', width=0.9, color=colors, source=data, 26 | legend=[value(x) for x in years]) 27 | 28 | p.y_range.start = 0 29 | p.x_range.range_padding = 0.1 30 | p.xgrid.grid_line_color = None 31 | p.axis.minor_tick_line_color = None 32 | p.outline_line_color = None 33 | p.legend.location = "top_left" 34 | p.legend.orientation = "horizontal" 35 | 36 | show(p) 37 | 38 | if __name__ == '__main__': 39 | main() 40 | -------------------------------------------------------------------------------- /visualization/bokeh/time_series_plot_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | 4 | # EXAMPLE FROM: https://stackoverflow.com/a/45984782/3921457 5 | 6 | 7 | import pandas as pd 8 | 9 | from bokeh.models import ColumnDataSource 10 | from bokeh.plotting import figure, show 11 | 12 | 13 | def main() -> None: 14 | dic = { 15 | '2017-08-11': {'Yes': 157, 'Not sure': 2, 'No': 1}, 16 | '2017-08-22': {'Yes': 142, 'Not sure': 12}, 17 | '2017-08-01': {'Yes': 112, 'Others': 10, 'Not sure': 4, 'No': 9}, 18 | '2017-08-17': {'Yes': 117, 'No': 12, 'Not sure': 11, 'Others': 2}, 19 | '2017-08-25': {'Yes': 61, 'Not sure': 9}, 20 | '2017-08-23': {'Yes': 268, 'Not sure': 20, 'No': 1}, 21 | '2017-07-10': {'Yes': 123, 'Not sure': 4, 'No': 1}, 22 | '2017-08-10': {'Yes': 343, 'Not sure': 20}, 23 | '2017-07-13': {'Yes': 116, 'Others': 1, 'Not sure': 14, 'No': 2}, 24 | '2017-07-14': {'Yes': 255, 'Not sure': 22, 'No': 6}, 25 | '2017-08-07': {'Yes': 73, 'Others': 3, 'Not sure': 4, 'No': 5}, 26 | '2017-08-04': {'Not sure': 11, 'Others': 8, 'Yes': 178, 'No': 10}, 27 | '2017-08-16': {'Not sure': 10, 'Yes': 219}, 28 | '2017-07-18': {'Yes': 1, 'No': 1}, 29 | '2017-08-15': {'Yes': 301, 'Others': 4, 'Not sure': 37, 'No': 31}, 30 | '2017-08-08': {'Yes': 38, 'No': 2, 'Others': 1}, 31 | '2017-08-09': {'Yes': 120, 'Not sure': 3}, 32 | '2017-08-28': {'Yes': 206, 'Others': 2, 'Not sure': 18, 'No': 24}, 33 | '2017-08-14': {'Yes': 46, 'No': 3, 'Not sure': 5, 'Others': 7} 34 | } 35 | 36 | df = pd.DataFrame.from_dict(dic, orient="index") 37 | df = df.fillna(0) 38 | df.index = pd.to_datetime(df.index) 39 | df.index.name = 'Date' 40 | df.sort_index(inplace=True) 41 | 42 | df['Total'] = df.Yes + df['Not sure'] + df.No + df.Others 43 | df['Precision'] = round(df.Yes/df.Total, 2) 44 | df 45 | source = ColumnDataSource(df) 46 | 47 | p = figure(x_axis_type="datetime", plot_width=800, plot_height=350) 48 | p.line('Date', 'Precision', source=source) 49 | 50 | show(p) 51 | 52 | 53 | if __name__ == '__main__': 54 | main() 55 | -------------------------------------------------------------------------------- /web/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/__init__.py -------------------------------------------------------------------------------- /web/django/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/__init__.py -------------------------------------------------------------------------------- /web/django/django_first_app/README.md: -------------------------------------------------------------------------------- 1 | # Django-first-app 2 | 3 | Username: admin 4 | Password: passwordadmin 5 | 6 | ## Like it? Awesome! 7 | If you find this tool useful, consider supporting or [buying me a beer](https://www.paypal.me/garciparedes/2):) -------------------------------------------------------------------------------- /web/django/django_first_app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/django_first_app/__init__.py -------------------------------------------------------------------------------- /web/django/django_first_app/db.sqlite3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/django_first_app/db.sqlite3 -------------------------------------------------------------------------------- /web/django/django_first_app/django_first_app/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/django_first_app/django_first_app/__init__.py -------------------------------------------------------------------------------- /web/django/django_first_app/django_first_app/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for django_first_app project. 3 | 4 | Generated by 'django-admin startproject' using Django 1.9.6. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/1.9/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/1.9/ref/settings/ 11 | """ 12 | 13 | import os 14 | 15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 | 18 | 19 | # Quick-start development settings - unsuitable for production 20 | # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ 21 | 22 | # SECURITY WARNING: keep the secret key used in production secret! 23 | SECRET_KEY = '6jf-1s7m*9a#@kcc7&&!d(10=c9kp&$d)o)=4qtj@*ch04a_#z' 24 | 25 | # SECURITY WARNING: don't run with debug turned on in production! 26 | DEBUG = True 27 | 28 | ALLOWED_HOSTS = [] 29 | 30 | 31 | # Application definition 32 | 33 | INSTALLED_APPS = [ 34 | 'polls.apps.PollsConfig', 35 | 'django.contrib.admin', 36 | 'django.contrib.auth', 37 | 'django.contrib.contenttypes', 38 | 'django.contrib.sessions', 39 | 'django.contrib.messages', 40 | 'django.contrib.staticfiles', 41 | ] 42 | 43 | MIDDLEWARE_CLASSES = [ 44 | 'django.middleware.security.SecurityMiddleware', 45 | 'django.contrib.sessions.middleware.SessionMiddleware', 46 | 'django.middleware.common.CommonMiddleware', 47 | 'django.middleware.csrf.CsrfViewMiddleware', 48 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 49 | 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 50 | 'django.contrib.messages.middleware.MessageMiddleware', 51 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 52 | ] 53 | 54 | ROOT_URLCONF = 'django_first_app.urls' 55 | 56 | TEMPLATES = [ 57 | { 58 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 59 | 'DIRS': [], 60 | 'APP_DIRS': True, 61 | 'OPTIONS': { 62 | 'context_processors': [ 63 | 'django.template.context_processors.debug', 64 | 'django.template.context_processors.request', 65 | 'django.contrib.auth.context_processors.auth', 66 | 'django.contrib.messages.context_processors.messages', 67 | ], 68 | }, 69 | }, 70 | ] 71 | 72 | WSGI_APPLICATION = 'django_first_app.wsgi.application' 73 | 74 | 75 | # Database 76 | # https://docs.djangoproject.com/en/1.9/ref/settings/#databases 77 | 78 | DATABASES = { 79 | 'default': { 80 | 'ENGINE': 'django.db.backends.sqlite3', 81 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 82 | } 83 | } 84 | 85 | 86 | # Password validation 87 | # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators 88 | 89 | AUTH_PASSWORD_VALIDATORS = [ 90 | { 91 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 92 | }, 93 | { 94 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 95 | }, 96 | { 97 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 98 | }, 99 | { 100 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 101 | }, 102 | ] 103 | 104 | 105 | # Internationalization 106 | # https://docs.djangoproject.com/en/1.9/topics/i18n/ 107 | 108 | LANGUAGE_CODE = 'en-us' 109 | 110 | TIME_ZONE = 'UTC' 111 | 112 | USE_I18N = True 113 | 114 | USE_L10N = True 115 | 116 | USE_TZ = True 117 | 118 | 119 | # Static files (CSS, JavaScript, Images) 120 | # https://docs.djangoproject.com/en/1.9/howto/static-files/ 121 | 122 | STATIC_URL = '/static/' 123 | -------------------------------------------------------------------------------- /web/django/django_first_app/django_first_app/urls.py: -------------------------------------------------------------------------------- 1 | """django_first_app URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/1.9/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.conf.urls import url, include 14 | 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) 15 | """ 16 | from web.django import include, url 17 | from web.django import admin 18 | 19 | urlpatterns = [ 20 | url(r'^polls/', include('polls.urls')), 21 | url(r'^admin/', admin.site.urls), 22 | ] 23 | -------------------------------------------------------------------------------- /web/django/django_first_app/django_first_app/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for django_first_app project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from web.django import get_wsgi_application 13 | 14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_first_app.settings") 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /web/django/django_first_app/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import sys 4 | 5 | if __name__ == "__main__": 6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_first_app.settings") 7 | 8 | from web.django import execute_from_command_line 9 | 10 | execute_from_command_line(sys.argv) 11 | -------------------------------------------------------------------------------- /web/django/django_first_app/polls/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/django_first_app/polls/__init__.py -------------------------------------------------------------------------------- /web/django/django_first_app/polls/admin.py: -------------------------------------------------------------------------------- 1 | # Register your models here. 2 | 3 | from web.django import admin 4 | 5 | from .models import Question, Choice 6 | 7 | 8 | class ChoiceInline(admin.TabularInline): 9 | model = Choice 10 | extra = 3 11 | 12 | 13 | class QuestionAdmin(admin.ModelAdmin): 14 | fieldsets = [ 15 | (None, {'fields': ['question_text']}), 16 | ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}), 17 | ] 18 | inlines = [ChoiceInline] 19 | list_display = ('question_text', 'pub_date', 'was_published_recently') 20 | list_filter = ['pub_date'] 21 | search_fields = ['question_text'] 22 | 23 | 24 | admin.site.register(Question, QuestionAdmin) 25 | -------------------------------------------------------------------------------- /web/django/django_first_app/polls/apps.py: -------------------------------------------------------------------------------- 1 | from web.django import AppConfig 2 | 3 | 4 | class PollsConfig(AppConfig): 5 | name = 'polls' 6 | -------------------------------------------------------------------------------- /web/django/django_first_app/polls/migrations/0001_initial.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by Django 1.9.6 on 2017-01-31 17:51 3 | from __future__ import unicode_literals 4 | 5 | from web.django import migrations, models 6 | from web import django 7 | 8 | 9 | class Migration(migrations.Migration): 10 | 11 | initial = True 12 | 13 | dependencies = [ 14 | ] 15 | 16 | operations = [ 17 | migrations.CreateModel( 18 | name='Choice', 19 | fields=[ 20 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 21 | ('choice_text', models.CharField(max_length=200)), 22 | ('votes', models.IntegerField(default=0)), 23 | ], 24 | ), 25 | migrations.CreateModel( 26 | name='Question', 27 | fields=[ 28 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 29 | ('question_text', models.CharField(max_length=200)), 30 | ('pub_date', models.DateTimeField(verbose_name='date published')), 31 | ], 32 | ), 33 | migrations.AddField( 34 | model_name='choice', 35 | name='question', 36 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'), 37 | ), 38 | ] 39 | -------------------------------------------------------------------------------- /web/django/django_first_app/polls/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/django_first_app/polls/migrations/__init__.py -------------------------------------------------------------------------------- /web/django/django_first_app/polls/models.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | from web.django import models 4 | from web.django import timezone 5 | 6 | 7 | # Create your models here. 8 | 9 | 10 | class Question(models.Model): 11 | question_text = models.CharField(max_length=200) 12 | pub_date = models.DateTimeField('date published') 13 | 14 | def __str__(self): 15 | return self.question_text 16 | 17 | def was_published_recently(self): 18 | now = timezone.now() 19 | return now - datetime.timedelta(days=1) <= self.pub_date <= now 20 | 21 | was_published_recently.admin_order_field = 'pub_date' 22 | was_published_recently.boolean = True 23 | was_published_recently.short_description = 'Published recently?' 24 | 25 | 26 | class Choice(models.Model): 27 | question = models.ForeignKey(Question, on_delete=models.CASCADE) 28 | choice_text = models.CharField(max_length=200) 29 | votes = models.IntegerField(default=0) 30 | 31 | def __str__(self): 32 | return self.choice_text 33 | -------------------------------------------------------------------------------- /web/django/django_first_app/polls/templates/polls/detail.html: -------------------------------------------------------------------------------- 1 |

{{ question.question_text }}

2 | 3 | {% if error_message %}

{{ error_message }}

{% endif %} 4 | 5 |
6 | {% csrf_token %} 7 | {% for choice in question.choice_set.all %} 8 | 9 |
10 | {% endfor %} 11 | 12 |
-------------------------------------------------------------------------------- /web/django/django_first_app/polls/templates/polls/index.html: -------------------------------------------------------------------------------- 1 | {% load static %} 2 | 3 | 4 | 5 | 6 | {% if latest_question_list %} 7 | 12 | {% else %} 13 |

No polls are available.

14 | {% endif %} -------------------------------------------------------------------------------- /web/django/django_first_app/polls/templates/polls/results.html: -------------------------------------------------------------------------------- 1 |

{{ question.question_text }}

2 | 3 | 8 | 9 | Vote again? -------------------------------------------------------------------------------- /web/django/django_first_app/polls/templates/polls/style.css: -------------------------------------------------------------------------------- 1 | li a { 2 | color: green; 3 | } -------------------------------------------------------------------------------- /web/django/django_first_app/polls/tests.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | from web.django import timezone 4 | from web.django import TestCase 5 | from web.django import reverse 6 | 7 | from .models import Question 8 | 9 | 10 | class QuestionMethodTests(TestCase): 11 | def test_was_published_recently_with_future_question(self): 12 | """ 13 | was_published_recently() should return False for questions whose 14 | pub_date is in the future. 15 | """ 16 | time = timezone.now() + datetime.timedelta(days=30) 17 | future_question = Question(pub_date=time) 18 | self.assertIs(future_question.was_published_recently(), False) 19 | 20 | def test_was_published_recently_with_old_question(self): 21 | """ 22 | was_published_recently() should return False for questions whose 23 | pub_date is older than 1 day. 24 | """ 25 | time = timezone.now() - datetime.timedelta(days=30) 26 | old_question = Question(pub_date=time) 27 | self.assertIs(old_question.was_published_recently(), False) 28 | 29 | def test_was_published_recently_with_recent_question(self): 30 | """ 31 | was_published_recently() should return True for questions whose 32 | pub_date is within the last day. 33 | """ 34 | time = timezone.now() - datetime.timedelta(hours=1) 35 | recent_question = Question(pub_date=time) 36 | self.assertIs(recent_question.was_published_recently(), True) 37 | 38 | 39 | def create_question(question_text, days): 40 | """ 41 | Creates a question with the given `question_text` and published the 42 | given number of `days` offset to now (negative for questions published 43 | in the past, positive for questions that have yet to be published). 44 | """ 45 | time = timezone.now() + datetime.timedelta(days=days) 46 | return Question.objects.create(question_text=question_text, pub_date=time) 47 | 48 | 49 | class QuestionViewTests(TestCase): 50 | def test_index_view_with_no_questions(self): 51 | """ 52 | If no questions exist, an appropriate message should be displayed. 53 | """ 54 | response = self.client.get(reverse('polls:index')) 55 | self.assertEqual(response.status_code, 200) 56 | self.assertContains(response, "No polls are available.") 57 | self.assertQuerysetEqual(response.context['latest_question_list'], []) 58 | 59 | def test_index_view_with_a_past_question(self): 60 | """ 61 | Questions with a pub_date in the past should be displayed on the 62 | index page. 63 | """ 64 | create_question(question_text="Past question.", days=-30) 65 | response = self.client.get(reverse('polls:index')) 66 | self.assertQuerysetEqual( 67 | response.context['latest_question_list'], 68 | [''] 69 | ) 70 | 71 | def test_index_view_with_a_future_question(self): 72 | """ 73 | Questions with a pub_date in the future should not be displayed on 74 | the index page. 75 | """ 76 | create_question(question_text="Future question.", days=30) 77 | response = self.client.get(reverse('polls:index')) 78 | self.assertContains(response, "No polls are available.") 79 | self.assertQuerysetEqual(response.context['latest_question_list'], []) 80 | 81 | def test_index_view_with_future_question_and_past_question(self): 82 | """ 83 | Even if both past and future questions exist, only past questions 84 | should be displayed. 85 | """ 86 | create_question(question_text="Past question.", days=-30) 87 | create_question(question_text="Future question.", days=30) 88 | response = self.client.get(reverse('polls:index')) 89 | self.assertQuerysetEqual( 90 | response.context['latest_question_list'], 91 | [''] 92 | ) 93 | 94 | def test_index_view_with_two_past_questions(self): 95 | """ 96 | The questions index page may display multiple questions. 97 | """ 98 | create_question(question_text="Past question 1.", days=-30) 99 | create_question(question_text="Past question 2.", days=-5) 100 | response = self.client.get(reverse('polls:index')) 101 | self.assertQuerysetEqual( 102 | response.context['latest_question_list'], 103 | ['', ''] 104 | ) 105 | 106 | 107 | class QuestionIndexDetailTests(TestCase): 108 | def test_detail_view_with_a_future_question(self): 109 | """ 110 | The detail view of a question with a pub_date in the future should 111 | return a 404 not found. 112 | """ 113 | future_question = create_question(question_text='Future question.', days=5) 114 | url = reverse('polls:detail', args=(future_question.id,)) 115 | response = self.client.get(url) 116 | self.assertEqual(response.status_code, 404) 117 | 118 | def test_detail_view_with_a_past_question(self): 119 | """ 120 | The detail view of a question with a pub_date in the past should 121 | display the question's text. 122 | """ 123 | past_question = create_question(question_text='Past Question.', days=-5) 124 | url = reverse('polls:detail', args=(past_question.id,)) 125 | response = self.client.get(url) 126 | self.assertContains(response, past_question.question_text) 127 | -------------------------------------------------------------------------------- /web/django/django_first_app/polls/urls.py: -------------------------------------------------------------------------------- 1 | from web.django import url 2 | 3 | from . import views 4 | 5 | app_name = 'polls' 6 | urlpatterns = [ 7 | url(r'^$', views.IndexView.as_view(), name='index'), 8 | url(r'^(?P[0-9]+)/$', views.DetailView.as_view(), name='detail'), 9 | url(r'^(?P[0-9]+)/results/$', views.ResultsView.as_view(), name='results'), 10 | url(r'^(?P[0-9]+)/vote/$', views.vote, name='vote'), 11 | ] 12 | -------------------------------------------------------------------------------- /web/django/django_first_app/polls/views.py: -------------------------------------------------------------------------------- 1 | from web.django import get_object_or_404, render 2 | from web.django import HttpResponseRedirect 3 | from web.django import reverse 4 | from web.django import generic 5 | from web.django import timezone 6 | 7 | from .models import Choice, Question 8 | 9 | 10 | # Create your views here. 11 | 12 | class IndexView(generic.ListView): 13 | template_name = 'polls/index.html' 14 | context_object_name = 'latest_question_list' 15 | 16 | def get_queryset(self): 17 | """ 18 | Return the last five published questions (not including those set to be 19 | published in the future). 20 | """ 21 | return Question.objects.filter( 22 | pub_date__lte=timezone.now() 23 | ).order_by('-pub_date')[:5] 24 | 25 | 26 | class DetailView(generic.DetailView): 27 | model = Question 28 | template_name = 'polls/detail.html' 29 | 30 | def get_queryset(self): 31 | """ 32 | Excludes any questions that aren't published yet. 33 | """ 34 | return Question.objects.filter(pub_date__lte=timezone.now()) 35 | 36 | 37 | class ResultsView(generic.DetailView): 38 | model = Question 39 | template_name = 'polls/results.html' 40 | 41 | 42 | def vote(request, question_id): 43 | question = get_object_or_404(Question, pk=question_id) 44 | try: 45 | selected_choice = question.choice_set.get(pk=request.POST['choice']) 46 | except (KeyError, Choice.DoesNotExist): 47 | # Redisplay the question voting form. 48 | return render(request, 'polls/detail.html', { 49 | 'question': question, 50 | 'error_message': "You didn't select a choice.", 51 | }) 52 | else: 53 | selected_choice.votes += 1 54 | selected_choice.save() 55 | # Always return an HttpResponseRedirect after successfully dealing 56 | # with POST data. This prevents data from being posted twice if a 57 | # user hits the Back button. 58 | return HttpResponseRedirect(reverse('polls:results', args=(question.id,))) 59 | -------------------------------------------------------------------------------- /web/django/graphene/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/graphene/__init__.py -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/graphene/graphene-django-tutorial/__init__.py -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/graphene/graphene-django-tutorial/cookbook/__init__.py -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/ingredients/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/graphene/graphene-django-tutorial/cookbook/ingredients/__init__.py -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/ingredients/admin.py: -------------------------------------------------------------------------------- 1 | # Register your models here. 2 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/ingredients/apps.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from web.django import AppConfig 4 | 5 | 6 | class IngredientsConfig(AppConfig): 7 | name = 'cookbook.ingredients' 8 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/ingredients/fixtures/ingredients.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "model": "ingredients.category", 4 | "pk": 1, 5 | "fields": { 6 | "name": "Dairy" 7 | } 8 | }, 9 | { 10 | "model": "ingredients.category", 11 | "pk": 2, 12 | "fields": { 13 | "name": "Meat" 14 | } 15 | }, 16 | { 17 | "model": "ingredients.ingredient", 18 | "pk": 1, 19 | "fields": { 20 | "name": "Eggs", 21 | "notes": "Good old eggs", 22 | "category": 1 23 | } 24 | }, 25 | { 26 | "model": "ingredients.ingredient", 27 | "pk": 2, 28 | "fields": { 29 | "name": "Milk", 30 | "notes": "Comes from a cow", 31 | "category": 1 32 | } 33 | }, 34 | { 35 | "model": "ingredients.ingredient", 36 | "pk": 3, 37 | "fields": { 38 | "name": "Beef", 39 | "notes": "Much like milk, this comes from a cow", 40 | "category": 2 41 | } 42 | }, 43 | { 44 | "model": "ingredients.ingredient", 45 | "pk": 4, 46 | "fields": { 47 | "name": "Chicken", 48 | "notes": "Definitely doesn't come from a cow", 49 | "category": 2 50 | } 51 | } 52 | ] -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/ingredients/migrations/0001_initial.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Generated by Django 1.9.6 on 2017-02-01 10:36 3 | from __future__ import unicode_literals 4 | 5 | from web.django import migrations, models 6 | from web import django 7 | 8 | 9 | class Migration(migrations.Migration): 10 | 11 | initial = True 12 | 13 | dependencies = [ 14 | ] 15 | 16 | operations = [ 17 | migrations.CreateModel( 18 | name='Category', 19 | fields=[ 20 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 21 | ('name', models.CharField(max_length=100)), 22 | ], 23 | ), 24 | migrations.CreateModel( 25 | name='Ingredient', 26 | fields=[ 27 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), 28 | ('name', models.CharField(max_length=100)), 29 | ('notes', models.TextField()), 30 | ('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ingredients', to='ingredients.Category')), 31 | ], 32 | ), 33 | ] 34 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/ingredients/migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/graphene/graphene-django-tutorial/cookbook/ingredients/migrations/__init__.py -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/ingredients/models.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | from web.django import models 4 | 5 | 6 | # Create your models here. 7 | 8 | 9 | class Category(models.Model): 10 | name = models.CharField(max_length=100) 11 | 12 | def __str__(self): 13 | return self.name 14 | 15 | 16 | class Ingredient(models.Model): 17 | name = models.CharField(max_length=100) 18 | notes = models.TextField() 19 | category = models.ForeignKey(Category, related_name='ingredients') 20 | 21 | def __str__(self): 22 | return self.name 23 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/ingredients/schema.py: -------------------------------------------------------------------------------- 1 | from graphene import relay, ObjectType, AbstractType 2 | from graphene_django import DjangoObjectType 3 | from graphene_django.filter import DjangoFilterConnectionField 4 | 5 | from .models import Category, Ingredient 6 | 7 | 8 | # Graphene will automatically map the Category model's fields onto the CategoryNode. 9 | # This is configured in the CategoryNode's Meta class (as you can see below) 10 | class CategoryNode(DjangoObjectType): 11 | class Meta: 12 | model = Category 13 | filter_fields = ['name', 'ingredients'] 14 | interfaces = (relay.Node, ) 15 | 16 | 17 | class IngredientNode(DjangoObjectType): 18 | class Meta: 19 | model = Ingredient 20 | # Allow for some more advanced filtering here 21 | filter_fields = { 22 | 'name': ['exact', 'icontains', 'istartswith'], 23 | 'notes': ['exact', 'icontains'], 24 | 'category': ['exact'], 25 | 'category__name': ['exact'], 26 | } 27 | interfaces = (relay.Node, ) 28 | 29 | 30 | class Query(AbstractType): 31 | category = relay.Node.Field(CategoryNode) 32 | all_categories = DjangoFilterConnectionField(CategoryNode) 33 | 34 | ingredient = relay.Node.Field(IngredientNode) 35 | all_ingredients = DjangoFilterConnectionField(IngredientNode) -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/ingredients/tests.py: -------------------------------------------------------------------------------- 1 | # Create your tests here. 2 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/ingredients/views.py: -------------------------------------------------------------------------------- 1 | # Create your views here. 2 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/schema.py: -------------------------------------------------------------------------------- 1 | import graphene 2 | 3 | from .ingredients import schema as i_schema 4 | 5 | 6 | class Query(i_schema.Query, graphene.ObjectType): 7 | # This class will inherit from multiple Queries 8 | # as we begin to add more apps to our project 9 | pass 10 | 11 | 12 | schema = graphene.Schema(query=Query) 13 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for cookbook project. 3 | 4 | Generated by 'django-admin startproject' using Django 1.9.6. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/1.9/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/1.9/ref/settings/ 11 | """ 12 | 13 | import os 14 | 15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 | 18 | 19 | # Quick-start development settings - unsuitable for production 20 | # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ 21 | 22 | # SECURITY WARNING: keep the secret key used in production secret! 23 | SECRET_KEY = '$@-69dolh++9@k^5ka79)fje!aoj6ra%)la7w^#m3u^9zd#tl3' 24 | 25 | # SECURITY WARNING: don't run with debug turned on in production! 26 | DEBUG = True 27 | 28 | ALLOWED_HOSTS = [] 29 | 30 | 31 | # Application definition 32 | 33 | INSTALLED_APPS = [ 34 | 'django.contrib.admin', 35 | 'django.contrib.auth', 36 | 'django.contrib.contenttypes', 37 | 'django.contrib.sessions', 38 | 'django.contrib.messages', 39 | 'django.contrib.staticfiles', 40 | 'graphene_django', 41 | 'cookbook.ingredients', 42 | ] 43 | 44 | GRAPHENE = { 45 | 'SCHEMA': 'cookbook.schema.schema' 46 | } 47 | 48 | MIDDLEWARE_CLASSES = [ 49 | 'django.middleware.security.SecurityMiddleware', 50 | 'django.contrib.sessions.middleware.SessionMiddleware', 51 | 'django.middleware.common.CommonMiddleware', 52 | 'django.middleware.csrf.CsrfViewMiddleware', 53 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 54 | 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 55 | 'django.contrib.messages.middleware.MessageMiddleware', 56 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 57 | ] 58 | 59 | ROOT_URLCONF = 'cookbook.urls' 60 | 61 | TEMPLATES = [ 62 | { 63 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 64 | 'DIRS': [], 65 | 'APP_DIRS': True, 66 | 'OPTIONS': { 67 | 'context_processors': [ 68 | 'django.template.context_processors.debug', 69 | 'django.template.context_processors.request', 70 | 'django.contrib.auth.context_processors.auth', 71 | 'django.contrib.messages.context_processors.messages', 72 | ], 73 | }, 74 | }, 75 | ] 76 | 77 | WSGI_APPLICATION = 'cookbook.wsgi.application' 78 | 79 | 80 | # Database 81 | # https://docs.djangoproject.com/en/1.9/ref/settings/#databases 82 | 83 | DATABASES = { 84 | 'default': { 85 | 'ENGINE': 'django.db.backends.sqlite3', 86 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 87 | } 88 | } 89 | 90 | 91 | # Password validation 92 | # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators 93 | 94 | AUTH_PASSWORD_VALIDATORS = [ 95 | { 96 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 97 | }, 98 | { 99 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 100 | }, 101 | { 102 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 103 | }, 104 | { 105 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 106 | }, 107 | ] 108 | 109 | 110 | # Internationalization 111 | # https://docs.djangoproject.com/en/1.9/topics/i18n/ 112 | 113 | LANGUAGE_CODE = 'en-us' 114 | 115 | TIME_ZONE = 'UTC' 116 | 117 | USE_I18N = True 118 | 119 | USE_L10N = True 120 | 121 | USE_TZ = True 122 | 123 | 124 | # Static files (CSS, JavaScript, Images) 125 | # https://docs.djangoproject.com/en/1.9/howto/static-files/ 126 | 127 | STATIC_URL = '/static/' 128 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/urls.py: -------------------------------------------------------------------------------- 1 | """cookbook URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/1.9/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.conf.urls import url, include 14 | 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) 15 | """ 16 | from web.django import url 17 | from web.django import admin 18 | 19 | from graphene_django.views import GraphQLView 20 | 21 | urlpatterns = [ 22 | url(r'^admin/', admin.site.urls), 23 | url(r'^graphql', GraphQLView.as_view(graphiql=True)), 24 | ] 25 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/cookbook/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for cookbook project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from web.django import get_wsgi_application 13 | 14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cookbook.settings") 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/db.sqlite3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/graphene/graphene-django-tutorial/db.sqlite3 -------------------------------------------------------------------------------- /web/django/graphene/graphene-django-tutorial/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import sys 4 | 5 | if __name__ == "__main__": 6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cookbook.settings") 7 | 8 | from web.django import execute_from_command_line 9 | 10 | execute_from_command_line(sys.argv) 11 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/garciparedes/python-examples/a6ed431643b45b910423d3fc4ba27625501a01e5/web/django/graphene/graphene-quickstart/__init__.py -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/lesson-01.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | URL: http://docs.graphene-python.org/en/latest/quickstart/ 5 | """ 6 | 7 | import graphene 8 | import utils.json as uj 9 | 10 | 11 | class Query(graphene.ObjectType): 12 | hello = graphene.String() 13 | 14 | def resolve_hello(self, args, context, info): 15 | return 'World' 16 | 17 | 18 | schema = graphene.Schema(query=Query) 19 | 20 | result = schema.execute('{ hello }') 21 | 22 | print(uj.dict_to_json(result.data)) 23 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/lesson-02-enums.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | URL: http://docs.graphene-python.org/en/latest/types/enums/ 5 | """ 6 | 7 | import graphene 8 | 9 | 10 | class Episode(graphene.Enum): 11 | NEWHOPE = 4 12 | EMPIRE = 5 13 | JEDI = 6 14 | 15 | @property 16 | def description(self): 17 | if self == Episode.NEWHOPE: 18 | return 'New Hope Episode' 19 | return 'Other episode' 20 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/lesson-03-scalars.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | URL: http://docs.graphene-python.org/en/latest/types/scalars/ 5 | """ 6 | 7 | import datetime 8 | 9 | import graphene 10 | from graphene.types import Scalar 11 | from graphql.language import ast 12 | 13 | 14 | class DateTime(Scalar): 15 | """DateTime Scalar Description""" 16 | 17 | @staticmethod 18 | def serialize(dt): 19 | return dt.isoformat() 20 | 21 | @staticmethod 22 | def parse_literal(node): 23 | if isinstance(node, ast.StringValue): 24 | return datetime.datetime.strptime( 25 | node.value, "%Y-%m-%dT%H:%M:%S.%f") 26 | 27 | @staticmethod 28 | def parse_value(value): 29 | return datetime.datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f") 30 | 31 | 32 | class Person(graphene.ObjectType): 33 | name = graphene.String() 34 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/lesson-04-interfaces.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | URL: http://docs.graphene-python.org/en/latest/types/interfaces/ 5 | """ 6 | 7 | import graphene 8 | 9 | 10 | class Character(graphene.Interface): 11 | name = graphene.String() 12 | 13 | 14 | # Human is a Character implementation 15 | class Human(graphene.ObjectType): 16 | class Meta: 17 | interfaces = (Character,) 18 | 19 | born_in = graphene.String() 20 | 21 | 22 | # Droid is a Character implementation 23 | class Droid(graphene.ObjectType): 24 | class Meta: 25 | interfaces = (Character,) 26 | 27 | function = graphene.String() 28 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/lesson-05-abstract-types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | URL: http://docs.graphene-python.org/en/latest/types/abstracttypes/ 5 | """ 6 | 7 | import graphene 8 | 9 | 10 | class UserFields(graphene.AbstractType): 11 | name = graphene.String() 12 | 13 | 14 | class User(graphene.ObjectType, UserFields): 15 | pass 16 | 17 | 18 | class UserInput(graphene.InputObjectType, UserFields): 19 | pass 20 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/lesson-06-object-types.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | URL: http://docs.graphene-python.org/en/latest/types/objecttypes/ 5 | """ 6 | 7 | import graphene 8 | 9 | 10 | class Person(graphene.ObjectType): 11 | first_name = graphene.String() 12 | last_name = graphene.String() 13 | full_name = graphene.String() 14 | 15 | def resolve_full_name(self, args, context, info): 16 | return '{} {}'.format(self.first_name, self.last_name) 17 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/lesson-07-schema.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | URL: http://docs.graphene-python.org/en/latest/types/schema/ 5 | """ 6 | 7 | import graphene 8 | 9 | 10 | class Person(graphene.ObjectType): 11 | last_name = graphene.String() 12 | other_name = graphene.String(name='_other_Name') 13 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/lesson-08-mutations.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | URL: http://docs.graphene-python.org/en/latest/types/mutations/ 5 | """ 6 | 7 | import graphene 8 | import utils.json as uj 9 | 10 | 11 | class Person(graphene.ObjectType): 12 | name = graphene.String() 13 | age = graphene.Int() 14 | 15 | 16 | class PersonInput(graphene.InputObjectType): 17 | name = graphene.String() 18 | age = graphene.Int() 19 | 20 | 21 | class CreatePerson(graphene.Mutation): 22 | class Input: 23 | person_data = graphene.Argument(PersonInput) 24 | 25 | ok = graphene.Boolean() 26 | person = graphene.Field(lambda: Person) 27 | 28 | def mutate(self, args, context, info): 29 | p_data = args.get('person_data') 30 | 31 | name = p_data.get('name') 32 | age = p_data.get('age') 33 | 34 | person = Person(name=name, age=age) 35 | ok = True 36 | return CreatePerson(person=person, ok=ok) 37 | 38 | 39 | class LatLngInput(graphene.InputObjectType): 40 | lat = graphene.Float() 41 | lng = graphene.Float() 42 | 43 | 44 | # A location has a latlng associated to it 45 | class LocationInput(graphene.InputObjectType): 46 | name = graphene.String() 47 | latlng = graphene.InputField(LatLngInput) 48 | 49 | 50 | class MyMutations(graphene.ObjectType): 51 | create_person = CreatePerson.Field() 52 | 53 | 54 | schema = graphene.Schema(mutation=MyMutations) 55 | 56 | query_string = 'mutation myFirstMutation {' \ 57 | ' createPerson(personData: {name:"Peter", age: 24}) {' \ 58 | ' person {' \ 59 | ' name,' \ 60 | ' age' \ 61 | ' }' \ 62 | ' ok' \ 63 | ' }' \ 64 | '}' 65 | 66 | result = schema.execute(query_string) 67 | 68 | print(uj.dict_to_json(result.data)) 69 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/lesson-09-context.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | URL: http://docs.graphene-python.org/en/latest/execution/#context 5 | """ 6 | 7 | import graphene 8 | import utils.json as uj 9 | 10 | 11 | class Query(graphene.ObjectType): 12 | name = graphene.String() 13 | 14 | def resolve_name(self, args, context, info): 15 | return context.get('name') 16 | 17 | 18 | schema = graphene.Schema(Query) 19 | result = schema.execute('{ name }', context_value={'name': 'Syrus'}) 20 | 21 | print(uj.dict_to_json(result.data)) 22 | -------------------------------------------------------------------------------- /web/django/graphene/graphene-quickstart/lesson-10-middleware.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | URL: http://docs.graphene-python.org/en/latest/execution/middleware/ 5 | """ 6 | 7 | import graphene 8 | import utils.json as uj 9 | 10 | 11 | class AuthorizationMiddleware(object): 12 | def resolve(self, next, root, args, context, info): 13 | if info.field_name == 'user': 14 | return None 15 | return next(root, args, context, info) 16 | 17 | 18 | class Query(graphene.ObjectType): 19 | name = graphene.String() 20 | 21 | def resolve_name(self, args, context, info): 22 | return context.get('name') 23 | 24 | 25 | schema = graphene.Schema(Query) 26 | result = schema.execute('{ name }', context_value={'name': 'Syrus'}, middleware=[AuthorizationMiddleware()]) 27 | 28 | print(uj.dict_to_json(result.data)) 29 | --------------------------------------------------------------------------------