├── .gitignore ├── LICENSE ├── Parquet Examples ├── parquet_pandas_example.ipynb ├── parquet_pyspark_example.ipynb └── parquet_text_analyis.ipynb ├── PySpark Examples ├── PySpark - Extract Popular Images.ipynb ├── PySpark - Find Images Shared Between Domains.ipynb ├── PySpark - Finding Hyperlinks within Collection on Pages with Certain Keyword.ipynb ├── PySpark - aut standard derivatives.ipynb ├── PySpark - saveToDisk.ipynb ├── arch-derivatives.ipynb ├── aut-pyspark-documentation-examples.ipynb └── aut_pyspark.ipynb ├── README.md ├── arch ├── arch_example.ipynb ├── audio-information.ipynb ├── css-file-information.ipynb ├── domain-frequency.ipynb ├── domain-graph.ipynb ├── filtering_examples.ipynb ├── html-file-information.ipynb ├── image-graph.ipynb ├── image-information.ipynb ├── js-file-information.ipynb ├── json-file-information.ipynb ├── pdf-information.ipynb ├── plain-text-file-information.ipynb ├── powerpoint-information.ipynb ├── spreadsheet-information.ipynb ├── video-information.ipynb ├── web-graph.ipynb ├── web-pages.ipynb └── xml-file-information.ipynb ├── assets ├── jupyter-shell.png ├── juypter.png └── ny-schedule.png ├── aut-issues └── aut_428_testing.ipynb ├── datathon-nyc ├── README.md ├── parquet_pandas_stonewall.ipynb ├── parquet_pandas_stonewall_cc_vm.ipynb ├── parquet_text_analyis_popline.ipynb ├── parquet_text_analyis_popline_cc_vm.ipynb └── stonewall_text_analysis.ipynb ├── geocities ├── geocities_binary_information.ipynb └── geocities_domain_frequency.ipynb └── word-document-information.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints/ 2 | datathon-nyc/data/ 3 | .DS_Store 4 | all-domains/ 5 | full-text/ 6 | test.gexf 7 | test.graphml 8 | 9 | # Byte-compiled / optimized / DLL files 10 | __pycache__/ 11 | *.py[cod] 12 | *$py.class 13 | 14 | # C extensions 15 | *.so 16 | 17 | # Distribution / packaging 18 | .Python 19 | build/ 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | wheels/ 31 | pip-wheel-metadata/ 32 | share/python-wheels/ 33 | *.egg-info/ 34 | .installed.cfg 35 | *.egg 36 | MANIFEST 37 | 38 | # PyInstaller 39 | # Usually these files are written by a python script from a template 40 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 41 | *.manifest 42 | *.spec 43 | 44 | # Installer logs 45 | pip-log.txt 46 | pip-delete-this-directory.txt 47 | 48 | # Unit test / coverage reports 49 | htmlcov/ 50 | .tox/ 51 | .nox/ 52 | .coverage 53 | .coverage.* 54 | .cache 55 | nosetests.xml 56 | coverage.xml 57 | *.cover 58 | *.py,cover 59 | .hypothesis/ 60 | .pytest_cache/ 61 | cover/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | db.sqlite3 71 | db.sqlite3-journal 72 | 73 | # Flask stuff: 74 | instance/ 75 | .webassets-cache 76 | 77 | # Scrapy stuff: 78 | .scrapy 79 | 80 | # Sphinx documentation 81 | docs/_build/ 82 | 83 | # PyBuilder 84 | .pybuilder/ 85 | target/ 86 | 87 | # Jupyter Notebook 88 | .ipynb_checkpoints 89 | 90 | # IPython 91 | profile_default/ 92 | ipython_config.py 93 | 94 | # pyenv 95 | # For a library or package, you might want to ignore these files since the code is 96 | # intended to run in multiple environments; otherwise, check them in: 97 | # .python-version 98 | 99 | # pipenv 100 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 101 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 102 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 103 | # install all needed dependencies. 104 | #Pipfile.lock 105 | 106 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 107 | __pypackages__/ 108 | 109 | # Celery stuff 110 | celerybeat-schedule 111 | celerybeat.pid 112 | 113 | # SageMath parsed files 114 | *.sage.py 115 | 116 | # Environments 117 | .env 118 | .venv 119 | env/ 120 | venv/ 121 | ENV/ 122 | env.bak/ 123 | venv.bak/ 124 | 125 | # Spyder project settings 126 | .spyderproject 127 | .spyproject 128 | 129 | # Rope project settings 130 | .ropeproject 131 | 132 | # mkdocs documentation 133 | /site 134 | 135 | # mypy 136 | .mypy_cache/ 137 | .dmypy.json 138 | dmypy.json 139 | 140 | # Pyre type checker 141 | .pyre/ 142 | 143 | # pytype static type analyzer 144 | .pytype/ 145 | 146 | # Cython debug symbols 147 | cython_debug/ 148 | 149 | # static files generated from Django application using `collectstatic` 150 | media 151 | static 152 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2019 Archives Unleashed 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /PySpark Examples/PySpark - Extract Popular Images.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from aut import *" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 2, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "# Web archive collection; images.\n", 19 | "images = WebArchive(sc, sqlContext, \"/home/nruest/Projects/au/sample-data/geocities\").images()" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": 3, 25 | "metadata": {}, 26 | "outputs": [ 27 | { 28 | "name": "stdout", 29 | "output_type": "stream", 30 | "text": [ 31 | "[2022-05-28T20:42:18.472Z - 00000 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 32 | ] 33 | }, 34 | { 35 | "name": "stderr", 36 | "output_type": "stream", 37 | "text": [ 38 | "22/05/28 16:42:19 WARN PDFParser: J2KImageReader not loaded. JPEG2000 files will not be processed.\n", 39 | "See https://pdfbox.apache.org/2.0/dependencies.html#jai-image-io\n", 40 | "for optional dependencies.\n", 41 | "\n", 42 | "22/05/28 16:42:19 WARN TesseractOCRParser: Tesseract OCR is installed and will be automatically applied to image files unless\n", 43 | "you've excluded the TesseractOCRParser from the default parser.\n", 44 | "Tesseract may dramatically slow down content extraction (TIKA-2359).\n", 45 | "As of Tika 1.15 (and prior versions), Tesseract is automatically called.\n", 46 | "In future versions of Tika, users may need to turn the TesseractOCRParser on via TikaConfig.\n", 47 | "22/05/28 16:42:19 WARN SQLite3Parser: org.xerial's sqlite-jdbc is not loaded.\n", 48 | "Please provide the jar on your classpath to parse sqlite files.\n", 49 | "See tika-parsers/pom.xml for the correct version.\n" 50 | ] 51 | }, 52 | { 53 | "name": "stdout", 54 | "output_type": "stream", 55 | "text": [ 56 | "+--------------+--------------------+--------------------+---------+--------------------+--------------+-----+------+--------------------+--------------------+--------------------+\n", 57 | "| crawl_date| url| filename|extension|mime_type_web_server|mime_type_tika|width|height| md5| sha1| bytes|\n", 58 | "+--------------+--------------------+--------------------+---------+--------------------+--------------+-----+------+--------------------+--------------------+--------------------+\n", 59 | "|20091027143512|http://geocities....|bodyboardhowto_08...| gif| image/gif| image/gif| 122| 42|17827882f7bf42860...|054ed6bee447d4165...|R0lGODlhegAqAPcAA...|\n", 60 | "|20091027143512|http://geocities....|products.htm_cmp_...| gif| image/gif| image/gif| 140| 60|899bc6e3309b0fc78...|c12218204f212d37d...|R0lGODlhjAA8AOMAA...|\n", 61 | "|20091027143512|http://geocities....| choc1.jpg| jpg| image/jpeg| image/jpeg| 110| 110|955f6c342ffed6823...|5933e16f8c8444821...|/9j/4AAQSkZJRgABA...|\n", 62 | "|20091027143511|http://geocities....| 0087.jpg| jpg| image/jpeg| image/jpeg| 75| 100|47718718ddfd7d43a...|3af77b7faeaa5a0d3...|/9j/4AAQSkZJRgABA...|\n", 63 | "|20091027143511|http://geocities....| newyears_1.gif| gif| image/gif| image/gif| 200| 274|8ada65828daff258e...|004a7bdadfadc1e28...|R0lGODlhyAASAfcAA...|\n", 64 | "|20091027143511|http://geocities....| garage.jpg| jpg| image/jpeg| image/jpeg| 600| 422|626a59f173d5ea2f0...|2ed34b7c41340a141...|/9j/4AAQSkZJRgABA...|\n", 65 | "|20091027143512|http://www.geocit...| leevi02.jpg| jpg| image/jpeg| image/jpeg| 176| 150|86138dc411c8a079b...|0b0ef066f53840938...|/9j/4AAQSkZJRgABA...|\n", 66 | "|20091027143512|http://geocities....| rose_open.gif| gif| image/gif| image/gif| 120| 112|d6acf3534878f1cb9...|ece458c0c5c5351fd...|R0lGODlheABwAPQAA...|\n", 67 | "|20091027143512|http://www.geocit...| enterCLR.gif| gif| image/gif| image/gif| 100| 70|cd80d70bf2517e0f1...|a3829ce2ebadb488a...|R0lGODlhZABGANQAA...|\n", 68 | "|20091027143512|http://geocities....| Film0002_18.jpg| jpg| image/jpeg| image/jpeg| 192| 300|824557238e36624f7...|317cb2ab866247a7a...|/9j/4AAQSkZJRgABA...|\n", 69 | "|20091027143512|http://geocities....|mike.htm_cmp_bloc...| gif| image/gif| image/gif| 600| 60|11ec6724ac8f61b95...|b0151deb6b48df348...|R0lGODlhWAI8AOMAA...|\n", 70 | "|20091027143512|http://geocities....| 0017.jpg| jpg| image/jpeg| image/jpeg| 75| 100|797a0805347d6384c...|8fc1436904aaaebd5...|/9j/4AAQSkZJRgABA...|\n", 71 | "|20091027143512|http://www.geocit...| mary01.jpg| jpg| image/jpeg| image/jpeg| 181| 377|08f510fe75f6f70d8...|663ecdc8835e93005...|/9j/4AAQSkZJRgABA...|\n", 72 | "|20091027143512|http://geocities....| dayl_button.gif| gif| image/gif| image/gif| 42| 30|29838b4f98b9cccef...|9599ee281e4c20fd0...|R0lGODlhKgAeAPcAA...|\n", 73 | "|20091027143512|http://geocities....| MTW48.jpg| jpg| image/jpeg| image/jpeg| 246| 262|ccbc878b6832f15e6...|130179b92a90c98fa...|/9j/4AAQSkZJRgABA...|\n", 74 | "|20091027143512|http://geocities....|up_cmp_blocks110_...| gif| image/gif| image/gif| 140| 60|6377484ef666ab5f5...|d4d4fb8c42403221b...|R0lGODlhjAA8AOMAA...|\n", 75 | "|20091027143512|http://geocities....| red-dk-lg.gif| gif| image/gif| image/gif| 19| 19|a3c296a2371b5221d...|ca2b55197ae856934...|R0lGODlhEwATAIQAA...|\n", 76 | "|20091027143512|http://www.geocit...| jesse.jpg| jpg| image/jpeg| image/jpeg| 183| 150|eca62b02a39127626...|3a61c2277de824f8f...|/9j/4AAQSkZJRgABA...|\n", 77 | "|20091027143513|http://geocities....|skatepagepics_12.gif| gif| image/gif| image/gif| 125| 42|d87e2a1bd5e6d9614...|951679fe427db957b...|R0lGODlhfQAqAPcAA...|\n", 78 | "|20091027143513|http://geocities....| skylight_small.jpg| jpg| image/jpeg| image/jpeg| 100| 62|ea69da4bb38c7a519...|9e8e1a0b9a1aab72a...|/9j/4AAQSkZJRgABA...|\n", 79 | "+--------------+--------------------+--------------------+---------+--------------------+--------------+-----+------+--------------------+--------------------+--------------------+\n", 80 | "only showing top 20 rows\n", 81 | "\n" 82 | ] 83 | }, 84 | { 85 | "name": "stderr", 86 | "output_type": "stream", 87 | "text": [ 88 | "\r", 89 | " \r" 90 | ] 91 | } 92 | ], 93 | "source": [ 94 | "# Show images DataFrame\n", 95 | "images.show()" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 4, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "popular_images = ExtractPopularImages(images, 20, 10, 10)" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 5, 110 | "metadata": {}, 111 | "outputs": [ 112 | { 113 | "name": "stdout", 114 | "output_type": "stream", 115 | "text": [ 116 | "[2022-05-28T20:42:22.910Z - 00003 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 117 | "[2022-05-28T20:42:22.911Z - 00007 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 118 | "[2022-05-28T20:42:22.931Z - 00004 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 119 | "[2022-05-28T20:42:22.911Z - 00006 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 120 | "[2022-05-28T20:42:22.978Z - 00005 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 121 | "[2022-05-28T20:42:22.977Z - 00010 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 122 | "[2022-05-28T20:42:22.948Z - 00001 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 123 | "[2022-05-28T20:42:22.948Z - 00008 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 124 | "[2022-05-28T20:42:22.948Z - 00009 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 125 | "[2022-05-28T20:42:22.990Z - 00002 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 126 | ] 127 | }, 128 | { 129 | "name": "stderr", 130 | "output_type": "stream", 131 | "text": [ 132 | "\r", 133 | "[Stage 1:> (0 + 10) / 10][Stage 2:> (0 + 2) / 10]\r" 134 | ] 135 | }, 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | "[2022-05-28T20:42:23.978Z - 00012 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 141 | "[2022-05-28T20:42:23.988Z - 00011 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 142 | ] 143 | }, 144 | { 145 | "name": "stderr", 146 | "output_type": "stream", 147 | "text": [ 148 | "[Stage 1:> (0 + 10) / 10][Stage 2:> (0 + 2) / 10]\r" 149 | ] 150 | }, 151 | { 152 | "name": "stdout", 153 | "output_type": "stream", 154 | "text": [ 155 | "[2022-05-28T20:52:49.161Z - 00013 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 156 | ] 157 | }, 158 | { 159 | "name": "stderr", 160 | "output_type": "stream", 161 | "text": [ 162 | "\r", 163 | "[Stage 1:=> (1 + 9) / 10][Stage 2:> (0 + 3) / 10]\r" 164 | ] 165 | }, 166 | { 167 | "name": "stdout", 168 | "output_type": "stream", 169 | "text": [ 170 | "[2022-05-28T20:53:21.039Z - 00014 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 171 | ] 172 | }, 173 | { 174 | "name": "stderr", 175 | "output_type": "stream", 176 | "text": [ 177 | "\r", 178 | "[Stage 1:=> (1 + 9) / 10][Stage 2:=> (1 + 3) / 10]\r" 179 | ] 180 | }, 181 | { 182 | "name": "stdout", 183 | "output_type": "stream", 184 | "text": [ 185 | "[2022-05-28T20:53:24.247Z - 00015 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 186 | ] 187 | }, 188 | { 189 | "name": "stderr", 190 | "output_type": "stream", 191 | "text": [ 192 | "\r", 193 | "[Stage 1:===> (2 + 8) / 10][Stage 2:=> (1 + 4) / 10]\r" 194 | ] 195 | }, 196 | { 197 | "name": "stdout", 198 | "output_type": "stream", 199 | "text": [ 200 | "[2022-05-28T20:53:45.707Z - 00016 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 201 | ] 202 | }, 203 | { 204 | "name": "stderr", 205 | "output_type": "stream", 206 | "text": [ 207 | "[Stage 1:=======> (4 + 6) / 10][Stage 2:=> (1 + 6) / 10]\r" 208 | ] 209 | }, 210 | { 211 | "name": "stdout", 212 | "output_type": "stream", 213 | "text": [ 214 | "[2022-05-28T20:54:08.417Z - 00017 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 215 | "[2022-05-28T20:54:20.033Z - 00018 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 216 | ] 217 | }, 218 | { 219 | "name": "stderr", 220 | "output_type": "stream", 221 | "text": [ 222 | "\r", 223 | "[Stage 1:=========> (5 + 5) / 10][Stage 2:=> (1 + 7) / 10]\r" 224 | ] 225 | }, 226 | { 227 | "name": "stdout", 228 | "output_type": "stream", 229 | "text": [ 230 | "[2022-05-28T20:54:22.604Z - 00019 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 231 | ] 232 | }, 233 | { 234 | "name": "stderr", 235 | "output_type": "stream", 236 | "text": [ 237 | "\r", 238 | "[Stage 1:==========> (6 + 4) / 10][Stage 2:=> (1 + 8) / 10]\r" 239 | ] 240 | }, 241 | { 242 | "name": "stdout", 243 | "output_type": "stream", 244 | "text": [ 245 | "[2022-05-28T20:54:25.446Z - 00020 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 246 | ] 247 | }, 248 | { 249 | "name": "stderr", 250 | "output_type": "stream", 251 | "text": [ 252 | "[Stage 3:==================================================> (188 + 12) / 200]\r" 253 | ] 254 | }, 255 | { 256 | "name": "stdout", 257 | "output_type": "stream", 258 | "text": [ 259 | "+--------------------+-----+\n", 260 | "| url|count|\n", 261 | "+--------------------+-----+\n", 262 | "|http://geocities....| 755|\n", 263 | "|http://geocities....| 58|\n", 264 | "|http://geocities....| 56|\n", 265 | "|http://geocities....| 51|\n", 266 | "|http://www.geocit...| 43|\n", 267 | "|http://geocities....| 39|\n", 268 | "|http://geocities....| 33|\n", 269 | "|http://geocities....| 31|\n", 270 | "|http://geocities....| 30|\n", 271 | "|http://geocities....| 29|\n", 272 | "|http://i24.photob...| 28|\n", 273 | "|http://geocities....| 26|\n", 274 | "|http://geocities....| 25|\n", 275 | "|http://geocities....| 25|\n", 276 | "|http://geocities....| 24|\n", 277 | "|http://geocities....| 24|\n", 278 | "|http://geocities....| 22|\n", 279 | "|http://geocities....| 22|\n", 280 | "|http://geocities....| 22|\n", 281 | "|http://www.geocit...| 22|\n", 282 | "+--------------------+-----+\n", 283 | "\n" 284 | ] 285 | }, 286 | { 287 | "name": "stderr", 288 | "output_type": "stream", 289 | "text": [ 290 | "\r", 291 | " \r" 292 | ] 293 | } 294 | ], 295 | "source": [ 296 | "popular_images.show()" 297 | ] 298 | } 299 | ], 300 | "metadata": { 301 | "kernelspec": { 302 | "display_name": "Python 3 (ipykernel)", 303 | "language": "python", 304 | "name": "python3" 305 | }, 306 | "language_info": { 307 | "codemirror_mode": { 308 | "name": "ipython", 309 | "version": 3 310 | }, 311 | "file_extension": ".py", 312 | "mimetype": "text/x-python", 313 | "name": "python", 314 | "nbconvert_exporter": "python", 315 | "pygments_lexer": "ipython3", 316 | "version": "3.9.9" 317 | } 318 | }, 319 | "nbformat": 4, 320 | "nbformat_minor": 2 321 | } 322 | -------------------------------------------------------------------------------- /PySpark Examples/PySpark - Find Images Shared Between Domains.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from aut import *\n", 10 | "from pyspark.sql.functions import asc, countDistinct, first" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 3, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "images = WebArchive(sc, sqlContext, \"/home/nruest/Projects/au/sample-data/geocities\") \\\n", 20 | " .images() \\\n", 21 | " .select(remove_prefix_www(extract_domain(\"url\")).alias(\"domain\"), \"url\", \"md5\")" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": 4, 27 | "metadata": {}, 28 | "outputs": [ 29 | { 30 | "name": "stderr", 31 | "output_type": "stream", 32 | "text": [ 33 | "\r", 34 | "[Stage 0:> (0 + 1) / 1]\r" 35 | ] 36 | }, 37 | { 38 | "name": "stdout", 39 | "output_type": "stream", 40 | "text": [ 41 | "[2022-05-28T00:11:17.610Z - 00000 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 42 | ] 43 | }, 44 | { 45 | "name": "stderr", 46 | "output_type": "stream", 47 | "text": [ 48 | "22/05/27 20:11:18 WARN PDFParser: J2KImageReader not loaded. JPEG2000 files will not be processed.\n", 49 | "See https://pdfbox.apache.org/2.0/dependencies.html#jai-image-io\n", 50 | "for optional dependencies.\n", 51 | "\n", 52 | "22/05/27 20:11:18 WARN TesseractOCRParser: Tesseract OCR is installed and will be automatically applied to image files unless\n", 53 | "you've excluded the TesseractOCRParser from the default parser.\n", 54 | "Tesseract may dramatically slow down content extraction (TIKA-2359).\n", 55 | "As of Tika 1.15 (and prior versions), Tesseract is automatically called.\n", 56 | "In future versions of Tika, users may need to turn the TesseractOCRParser on via TikaConfig.\n", 57 | "22/05/27 20:11:18 WARN SQLite3Parser: org.xerial's sqlite-jdbc is not loaded.\n", 58 | "Please provide the jar on your classpath to parse sqlite files.\n", 59 | "See tika-parsers/pom.xml for the correct version.\n" 60 | ] 61 | }, 62 | { 63 | "name": "stdout", 64 | "output_type": "stream", 65 | "text": [ 66 | "+-------------+--------------------+--------------------+\n", 67 | "| domain| url| md5|\n", 68 | "+-------------+--------------------+--------------------+\n", 69 | "|geocities.com|http://geocities....|17827882f7bf42860...|\n", 70 | "|geocities.com|http://geocities....|899bc6e3309b0fc78...|\n", 71 | "|geocities.com|http://geocities....|955f6c342ffed6823...|\n", 72 | "|geocities.com|http://geocities....|47718718ddfd7d43a...|\n", 73 | "|geocities.com|http://geocities....|8ada65828daff258e...|\n", 74 | "+-------------+--------------------+--------------------+\n", 75 | "only showing top 5 rows\n", 76 | "\n" 77 | ] 78 | }, 79 | { 80 | "name": "stderr", 81 | "output_type": "stream", 82 | "text": [ 83 | "\r", 84 | " \r" 85 | ] 86 | } 87 | ], 88 | "source": [ 89 | "images.show(5, True)" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 5, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "links = images.groupBy(\"md5\") \\\n", 99 | " .count() \\\n", 100 | " .where(countDistinct(\"domain\")>=2)" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": 6, 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [ 109 | "result = images.join(links, \"md5\") \\\n", 110 | " .groupBy(\"domain\", \"md5\") \\\n", 111 | " .agg(first(\"url\").alias(\"image_url\")) \\\n", 112 | " .orderBy(asc(\"md5\"))" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": 7, 118 | "metadata": {}, 119 | "outputs": [ 120 | { 121 | "name": "stdout", 122 | "output_type": "stream", 123 | "text": [ 124 | "[2022-05-28T00:11:22.907Z - 00012 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 125 | "[2022-05-28T00:11:22.907Z - 00011 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 126 | ] 127 | }, 128 | { 129 | "name": "stderr", 130 | "output_type": "stream", 131 | "text": [ 132 | "\r", 133 | "[Stage 1:> (0 + 10) / 10][Stage 3:> (0 + 2) / 10]\r" 134 | ] 135 | }, 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | "[2022-05-28T00:11:23.366Z - 00001 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 141 | "[2022-05-28T00:11:23.412Z - 00008 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 142 | "[2022-05-28T00:11:23.412Z - 00003 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 143 | "[2022-05-28T00:11:23.412Z - 00006 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 144 | "[2022-05-28T00:11:23.416Z - 00005 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 145 | "[2022-05-28T00:11:23.417Z - 00002 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 146 | "[2022-05-28T00:11:23.417Z - 00010 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 147 | "[2022-05-28T00:11:23.418Z - 00007 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 148 | "[2022-05-28T00:11:23.420Z - 00009 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 149 | "[2022-05-28T00:11:23.420Z - 00004 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 150 | ] 151 | }, 152 | { 153 | "name": "stderr", 154 | "output_type": "stream", 155 | "text": [ 156 | "[Stage 1:> (0 + 10) / 10][Stage 3:> (0 + 2) / 10]\r" 157 | ] 158 | }, 159 | { 160 | "name": "stdout", 161 | "output_type": "stream", 162 | "text": [ 163 | "[2022-05-28T00:20:43.093Z - 00013 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 164 | ] 165 | }, 166 | { 167 | "name": "stderr", 168 | "output_type": "stream", 169 | "text": [ 170 | "\r", 171 | "[Stage 1:=> (1 + 9) / 10][Stage 3:> (0 + 3) / 10]\r" 172 | ] 173 | }, 174 | { 175 | "name": "stdout", 176 | "output_type": "stream", 177 | "text": [ 178 | "[2022-05-28T00:21:25.142Z - 00014 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 179 | ] 180 | }, 181 | { 182 | "name": "stderr", 183 | "output_type": "stream", 184 | "text": [ 185 | "\r", 186 | "[Stage 1:=> (1 + 9) / 10][Stage 3:=> (1 + 3) / 10]\r" 187 | ] 188 | }, 189 | { 190 | "name": "stdout", 191 | "output_type": "stream", 192 | "text": [ 193 | "[2022-05-28T00:21:36.821Z - 00015 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 194 | ] 195 | }, 196 | { 197 | "name": "stderr", 198 | "output_type": "stream", 199 | "text": [ 200 | "[Stage 1:=====> (3 + 7) / 10][Stage 3:=> (1 + 5) / 10]\r" 201 | ] 202 | }, 203 | { 204 | "name": "stdout", 205 | "output_type": "stream", 206 | "text": [ 207 | "[2022-05-28T00:21:40.555Z - 00016 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 208 | "[2022-05-28T00:21:54.640Z - 00017 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 209 | ] 210 | }, 211 | { 212 | "name": "stderr", 213 | "output_type": "stream", 214 | "text": [ 215 | "[Stage 1:=========> (5 + 5) / 10][Stage 3:=> (1 + 7) / 10]\r" 216 | ] 217 | }, 218 | { 219 | "name": "stdout", 220 | "output_type": "stream", 221 | "text": [ 222 | "[2022-05-28T00:22:03.398Z - 00018 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 223 | "[2022-05-28T00:22:05.184Z - 00019 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 224 | ] 225 | }, 226 | { 227 | "name": "stderr", 228 | "output_type": "stream", 229 | "text": [ 230 | "[Stage 1:============> (7 + 3) / 10][Stage 3:=> (1 + 9) / 10]\r" 231 | ] 232 | }, 233 | { 234 | "name": "stdout", 235 | "output_type": "stream", 236 | "text": [ 237 | "[2022-05-28T00:22:09.652Z - 00020 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 238 | ] 239 | }, 240 | { 241 | "name": "stderr", 242 | "output_type": "stream", 243 | "text": [ 244 | "[Stage 4:================================================> (178 + 13) / 200]\r" 245 | ] 246 | }, 247 | { 248 | "name": "stdout", 249 | "output_type": "stream", 250 | "text": [ 251 | "+--------------------------+--------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+\n", 252 | "|domain |md5 |image_url |\n", 253 | "+--------------------------+--------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+\n", 254 | "|stevenspointjournal.com |022ac0cde4560864fc99a3d9e5210f6b|http://cmsimg.stevenspointjournal.com/apps/pbcsi.dll/bilde?Avis=U0&Dato=20091026&Kategori=CWS01&Lopenr=910260803&Ref=PH&NewTbl=1&Itemnr=1&maxW=130&Border=0 |\n", 255 | "|marshfieldnewsherald.com |022ac0cde4560864fc99a3d9e5210f6b|http://cmsimg.marshfieldnewsherald.com/apps/pbcsi.dll/bilde?Avis=U0&Dato=20091026&Kategori=CWS01&Lopenr=910260803&Ref=PH&NewTbl=1&Itemnr=1&maxW=130&Border=0 |\n", 256 | "|wisconsinrapidstribune.com|022ac0cde4560864fc99a3d9e5210f6b|http://cmsimg.wisconsinrapidstribune.com/apps/pbcsi.dll/bilde?Avis=U0&Dato=20091026&Kategori=CWS01&Lopenr=910260803&Ref=PH&NewTbl=1&Itemnr=1&maxW=130&Border=0|\n", 257 | "|thenorthwestern.com |06fe40f8d7b9b5fd7e552dac73100044|http://sitelife.thenorthwestern.com/ver1.0/Content/images/store/3/12/63a80d76-f400-4af5-bc63-4b6af4528150.Small.gif |\n", 258 | "|marshfieldnewsherald.com |06fe40f8d7b9b5fd7e552dac73100044|http://sitelife.marshfieldnewsherald.com/ver1.0/Content/images/store/4/5/a47bbc91-a893-4b62-95d0-04b4de0739ea.Small.gif |\n", 259 | "|stevenspointjournal.com |0d2fb10487f4dfad02b14d00832049a3|http://www.stevenspointjournal.com/graphics/viewadvertisers.gif |\n", 260 | "|postcrescent.com |0d2fb10487f4dfad02b14d00832049a3|http://www.postcrescent.com/graphics/viewadvertisers.gif |\n", 261 | "|thenorthwestern.com |0d2fb10487f4dfad02b14d00832049a3|http://www.thenorthwestern.com/graphics/viewadvertisers.gif |\n", 262 | "|fdlreporter.com |0d2fb10487f4dfad02b14d00832049a3|http://www.fdlreporter.com/graphics/viewadvertisers.gif |\n", 263 | "|geocities.com |0d4937515413df19faf9902cf06b6f88|http://geocities.com/Colosseum/Base/7341/usa3.gif |\n", 264 | "+--------------------------+--------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+\n", 265 | "only showing top 10 rows\n", 266 | "\n" 267 | ] 268 | }, 269 | { 270 | "name": "stderr", 271 | "output_type": "stream", 272 | "text": [ 273 | "\r", 274 | "[Stage 4:====================================================> (192 + 8) / 200]\r", 275 | "\r", 276 | " \r" 277 | ] 278 | } 279 | ], 280 | "source": [ 281 | "result.show(10, False)" 282 | ] 283 | } 284 | ], 285 | "metadata": { 286 | "kernelspec": { 287 | "display_name": "Python 3 (ipykernel)", 288 | "language": "python", 289 | "name": "python3" 290 | }, 291 | "language_info": { 292 | "codemirror_mode": { 293 | "name": "ipython", 294 | "version": 3 295 | }, 296 | "file_extension": ".py", 297 | "mimetype": "text/x-python", 298 | "name": "python", 299 | "nbconvert_exporter": "python", 300 | "pygments_lexer": "ipython3", 301 | "version": "3.9.9" 302 | } 303 | }, 304 | "nbformat": 4, 305 | "nbformat_minor": 2 306 | } 307 | -------------------------------------------------------------------------------- /PySpark Examples/PySpark - Finding Hyperlinks within Collection on Pages with Certain Keyword.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from aut import *\n", 10 | "from pyspark.sql.functions import col, explode_outer" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "webpages = WebArchive(sc, sqlContext, \"/home/nruest/Projects/au/sample-data/geocities\") \\\n", 20 | " .all() \\\n", 21 | " .filter(\"crawl_date is not NULL\")\\\n", 22 | " .filter(~(col(\"url\").rlike(\".*robots\\\\.txt$\")) & (col(\"mime_type_web_server\").rlike(\"text/html\") | col(\"mime_type_web_server\").rlike(\"application/xhtml+xml\") | col(\"url\").rlike(\"(?i).*htm$\") | col(\"url\").rlike(\"(?i).*html$\")))\\\n", 23 | " .filter(col(\"http_status_code\") == 200)\\\n", 24 | " .select(\"domain\", \"url\", \"crawl_date\", explode_outer(extract_links(\"url\", \"raw_content\")).alias(\"link\")) \\\n", 25 | " .filter(col(\"raw_content\").like(\"%food%\")) \\\n", 26 | " .select(\"url\", \"domain\", \"crawl_date\", col(\"link._1\").alias(\"destination_page\"))" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 3, 32 | "metadata": {}, 33 | "outputs": [ 34 | { 35 | "name": "stdout", 36 | "output_type": "stream", 37 | "text": [ 38 | "[2022-05-29T22:12:01.819Z - 00000 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 39 | ] 40 | }, 41 | { 42 | "name": "stderr", 43 | "output_type": "stream", 44 | "text": [ 45 | "22/05/29 18:12:02 WARN PDFParser: J2KImageReader not loaded. JPEG2000 files will not be processed.\n", 46 | "See https://pdfbox.apache.org/2.0/dependencies.html#jai-image-io\n", 47 | "for optional dependencies.\n", 48 | "\n", 49 | "22/05/29 18:12:02 WARN TesseractOCRParser: Tesseract OCR is installed and will be automatically applied to image files unless\n", 50 | "you've excluded the TesseractOCRParser from the default parser.\n", 51 | "Tesseract may dramatically slow down content extraction (TIKA-2359).\n", 52 | "As of Tika 1.15 (and prior versions), Tesseract is automatically called.\n", 53 | "In future versions of Tika, users may need to turn the TesseractOCRParser on via TikaConfig.\n", 54 | "22/05/29 18:12:02 WARN SQLite3Parser: org.xerial's sqlite-jdbc is not loaded.\n", 55 | "Please provide the jar on your classpath to parse sqlite files.\n", 56 | "See tika-parsers/pom.xml for the correct version.\n" 57 | ] 58 | }, 59 | { 60 | "name": "stdout", 61 | "output_type": "stream", 62 | "text": [ 63 | "+-------------------------------------------------------------------------------------+-------------+--------------+--------------------------------------------+\n", 64 | "|url |domain |crawl_date |destination_page |\n", 65 | "+-------------------------------------------------------------------------------------+-------------+--------------+--------------------------------------------+\n", 66 | "|http://geocities.com/kelsoonbutler/boardDocs/BoardMeetingMinutes_AGM_March12_2002.htm|geocities.com|20091027143512|null |\n", 67 | "|http://geocities.com/krooyaimaha/health5.htm |geocities.com|20091027143546|http://geocities.com/krooyaimaha/health5.htm|\n", 68 | "|http://geocities.com/krooyaimaha/health5.htm |geocities.com|20091027143546|http://geocities.com/krooyaimaha/health5.htm|\n", 69 | "|http://geocities.com/krooyaimaha/health5.htm |geocities.com|20091027143546|http://geocities.com/krooyaimaha/health5.htm|\n", 70 | "|http://geocities.com/krooyaimaha/health5.htm |geocities.com|20091027143546|http://geocities.com/krooyaimaha/health5.htm|\n", 71 | "+-------------------------------------------------------------------------------------+-------------+--------------+--------------------------------------------+\n", 72 | "only showing top 5 rows\n", 73 | "\n" 74 | ] 75 | }, 76 | { 77 | "name": "stderr", 78 | "output_type": "stream", 79 | "text": [ 80 | "\r", 81 | " \r" 82 | ] 83 | } 84 | ], 85 | "source": [ 86 | "webpages.show(5, False)" 87 | ] 88 | } 89 | ], 90 | "metadata": { 91 | "kernelspec": { 92 | "display_name": "Python 3 (ipykernel)", 93 | "language": "python", 94 | "name": "python3" 95 | }, 96 | "language_info": { 97 | "codemirror_mode": { 98 | "name": "ipython", 99 | "version": 3 100 | }, 101 | "file_extension": ".py", 102 | "mimetype": "text/x-python", 103 | "name": "python", 104 | "nbconvert_exporter": "python", 105 | "pygments_lexer": "ipython3", 106 | "version": "3.9.9" 107 | } 108 | }, 109 | "nbformat": 4, 110 | "nbformat_minor": 2 111 | } 112 | -------------------------------------------------------------------------------- /PySpark Examples/PySpark - aut standard derivatives.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from aut import *\n", 10 | "from pyspark.sql.functions import col, desc" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "# Web archive collection; web pages.\n", 20 | "webpages = WebArchive(sc, sqlContext, \"/home/nruest/Projects/au/sample-data/geocities\").webpages()" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 3, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "# Web archive collection; web graph.\n", 30 | "webgraph = WebArchive(sc, sqlContext, \"/home/nruest/Projects/au/sample-data/geocities\").webgraph()" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 4, 36 | "metadata": {}, 37 | "outputs": [ 38 | { 39 | "name": "stdout", 40 | "output_type": "stream", 41 | "text": [ 42 | "[2022-05-28T20:17:58.285Z - 00009 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 43 | "[2022-05-28T20:17:58.285Z - 00003 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 44 | "[2022-05-28T20:17:58.285Z - 00005 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 45 | "[2022-05-28T20:17:58.285Z - 00002 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 46 | "[2022-05-28T20:17:58.285Z - 00008 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 47 | "[2022-05-28T20:17:58.285Z - 00006 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 48 | "[2022-05-28T20:17:58.285Z - 00004 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 49 | "[2022-05-28T20:17:58.285Z - 00001 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 50 | "[2022-05-28T20:17:58.285Z - 00000 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 51 | "[2022-05-28T20:17:58.285Z - 00007 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 52 | ] 53 | }, 54 | { 55 | "name": "stderr", 56 | "output_type": "stream", 57 | "text": [ 58 | "22/05/28 16:17:59 WARN PDFParser: J2KImageReader not loaded. JPEG2000 files will not be processed.\n", 59 | "See https://pdfbox.apache.org/2.0/dependencies.html#jai-image-io\n", 60 | "for optional dependencies.\n", 61 | "\n", 62 | "22/05/28 16:17:59 WARN TesseractOCRParser: Tesseract OCR is installed and will be automatically applied to image files unless\n", 63 | "you've excluded the TesseractOCRParser from the default parser.\n", 64 | "Tesseract may dramatically slow down content extraction (TIKA-2359).\n", 65 | "As of Tika 1.15 (and prior versions), Tesseract is automatically called.\n", 66 | "In future versions of Tika, users may need to turn the TesseractOCRParser on via TikaConfig.\n", 67 | "22/05/28 16:17:59 WARN SQLite3Parser: org.xerial's sqlite-jdbc is not loaded.\n", 68 | "Please provide the jar on your classpath to parse sqlite files.\n", 69 | "See tika-parsers/pom.xml for the correct version.\n", 70 | " \r" 71 | ] 72 | } 73 | ], 74 | "source": [ 75 | "# Domain frequency file.\n", 76 | "webpages.groupBy(\"domain\")\\\n", 77 | " .count()\\\n", 78 | " .sort(col(\"count\")\\\n", 79 | " .desc())\\\n", 80 | " .write\\\n", 81 | " .option(\"timestampFormat\", \"yyyy/MM/dd HH:mm:ss ZZ\")\\\n", 82 | " .format(\"csv\")\\\n", 83 | " .option(\"escape\", \"\\\"\")\\\n", 84 | " .option(\"encoding\", \"utf-8\")\\\n", 85 | " .save(\"all-domains\")" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": 5, 91 | "metadata": {}, 92 | "outputs": [ 93 | { 94 | "name": "stdout", 95 | "output_type": "stream", 96 | "text": [ 97 | "[2022-05-28T20:22:47.865Z - 00439 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 98 | "[2022-05-28T20:22:47.868Z - 00432 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 99 | "[2022-05-28T20:22:47.867Z - 00434 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 100 | "[2022-05-28T20:22:47.866Z - 00435 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 101 | "[2022-05-28T20:22:47.869Z - 00433 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 102 | "[2022-05-28T20:22:47.868Z - 00438 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 103 | "[2022-05-28T20:22:47.877Z - 00440 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 104 | "[2022-05-28T20:22:47.877Z - 00436 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 105 | "[2022-05-28T20:22:47.890Z - 00437 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 106 | ] 107 | }, 108 | { 109 | "name": "stderr", 110 | "output_type": "stream", 111 | "text": [ 112 | "\r", 113 | "[Stage 5:> (0 + 10) / 10]\r" 114 | ] 115 | }, 116 | { 117 | "name": "stdout", 118 | "output_type": "stream", 119 | "text": [ 120 | "[2022-05-28T20:22:49.243Z - 00431 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 121 | ] 122 | }, 123 | { 124 | "name": "stderr", 125 | "output_type": "stream", 126 | "text": [ 127 | " \r" 128 | ] 129 | } 130 | ], 131 | "source": [ 132 | "# Full-text.\n", 133 | "webpages.write\\\n", 134 | " .option(\"timestampFormat\", \"yyyy/MM/dd HH:mm:ss ZZ\")\\\n", 135 | " .format(\"csv\")\\\n", 136 | " .option(\"escape\", \"\\\"\")\\\n", 137 | " .option(\"encoding\", \"utf-8\")\\\n", 138 | " .save(\"full-text\")" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": 6, 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [ 147 | "# Create DataFrame for GraphML output\n", 148 | "graph = webgraph.groupBy(\"crawl_date\", remove_prefix_www(extract_domain(\"src\")).alias(\"src_domain\"), remove_prefix_www(extract_domain(\"dest\")).alias(\"dest_domain\"))\\\n", 149 | " .count()\\\n", 150 | " .filter((col(\"dest_domain\").isNotNull()) & (col(\"dest_domain\") !=\"\"))\\\n", 151 | " .filter((col(\"src_domain\").isNotNull()) & (col(\"src_domain\") !=\"\"))\\\n", 152 | " .filter(col(\"count\") > 5)\\\n", 153 | " .orderBy(desc(\"count\"))" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": 7, 159 | "metadata": {}, 160 | "outputs": [ 161 | { 162 | "name": "stdout", 163 | "output_type": "stream", 164 | "text": [ 165 | "[2022-05-28T20:27:28.329Z - 00449 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 166 | "[2022-05-28T20:27:28.329Z - 00445 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 167 | "[2022-05-28T20:27:28.329Z - 00446 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 168 | "[2022-05-28T20:27:28.329Z - 00447 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 169 | "[2022-05-28T20:27:28.329Z - 00441 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 170 | "[2022-05-28T20:27:28.329Z - 00444 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 171 | "[2022-05-28T20:27:28.330Z - 00450 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 172 | "[2022-05-28T20:27:28.329Z - 00448 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 173 | "[2022-05-28T20:27:28.332Z - 00443 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n", 174 | "[2022-05-28T20:27:28.332Z - 00442 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n" 175 | ] 176 | }, 177 | { 178 | "name": "stderr", 179 | "output_type": "stream", 180 | "text": [ 181 | " \r" 182 | ] 183 | } 184 | ], 185 | "source": [ 186 | "# Write the GraphML out to a file.\n", 187 | "WriteGraphML(graph.collect(), \"test.graphml\")" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": 8, 193 | "metadata": {}, 194 | "outputs": [], 195 | "source": [ 196 | "# Write the GEXF out to a file.\n", 197 | "\n", 198 | "# Not part of auk standard derivative process,\n", 199 | "# but apart of https://github.com/archivesunleashed/aut/pull/466 testing.\n", 200 | "\n", 201 | "WriteGEXF(graph.collect(), \"test.gexf\")" 202 | ] 203 | } 204 | ], 205 | "metadata": { 206 | "kernelspec": { 207 | "display_name": "Python 3 (ipykernel)", 208 | "language": "python", 209 | "name": "python3" 210 | }, 211 | "language_info": { 212 | "codemirror_mode": { 213 | "name": "ipython", 214 | "version": 3 215 | }, 216 | "file_extension": ".py", 217 | "mimetype": "text/x-python", 218 | "name": "python", 219 | "nbconvert_exporter": "python", 220 | "pygments_lexer": "ipython3", 221 | "version": "3.9.9" 222 | } 223 | }, 224 | "nbformat": 4, 225 | "nbformat_minor": 2 226 | } 227 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Archives Unleashed Notebooks 2 | 3 | Contains various examples of notebooks for working with web archives with the Archives Unleashed Toolkit, and derivatives generated by the Archives Unleashed Toolkit. 4 | 5 | ## License 6 | 7 | Licensed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0). 8 | 9 | ## Acknowledgments 10 | 11 | This work is primarily supported by the [Andrew W. Mellon Foundation](https://mellon.org/). Other financial and in-kind support comes from the [Social Sciences and Humanities Research Council](http://www.sshrc-crsh.gc.ca/), [Compute Canada](https://www.computecanada.ca/), the [Ontario Ministry of Research, Innovation, and Science](https://www.ontario.ca/page/ministry-research-innovation-and-science), [York University Libraries](https://www.library.yorku.ca/web/), [Start Smart Labs](http://www.startsmartlabs.com/), and the [Faculty of Arts](https://uwaterloo.ca/arts/) and [David R. Cheriton School of Computer Science](https://cs.uwaterloo.ca/) at the [University of Waterloo](https://uwaterloo.ca/). 12 | 13 | Any opinions, findings, and conclusions or recommendations expressed are those of the researchers and do not necessarily reflect the views of the sponsors. 14 | -------------------------------------------------------------------------------- /arch/domain-frequency.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "domain-frequency.ipynb", 7 | "provenance": [], 8 | "include_colab_link": true 9 | }, 10 | "kernelspec": { 11 | "name": "python3", 12 | "display_name": "Python 3" 13 | }, 14 | "language_info": { 15 | "name": "python" 16 | } 17 | }, 18 | "cells": [ 19 | { 20 | "cell_type": "markdown", 21 | "metadata": { 22 | "id": "view-in-github", 23 | "colab_type": "text" 24 | }, 25 | "source": [ 26 | "\"Open" 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "source": [ 32 | "# Domain Frequency Dataset Exploration\n", 33 | "\n", 34 | "We're going to take a look at a few examples of how we can explore the Domain Frequency dataset. First we need to enter the URL for our Domain Frequency dataset. You can get this by right clicking the Download icon, and selecting \"Copy Link\"." 35 | ], 36 | "metadata": { 37 | "id": "vAyuRQ2PJIdc" 38 | } 39 | }, 40 | { 41 | "cell_type": "code", 42 | "source": [ 43 | "dataset = 'https://webdata.archive-it.org/ait/files/download/ARCHIVEIT-06689/DomainFrequencyExtraction/domain-frequency.csv.gz?access=FUO54Y4B3J3GAZSDD6ML7DQENUN5BDOX' #@param {type:\"string\"}\n", 44 | "print(dataset)" 45 | ], 46 | "metadata": { 47 | "colab": { 48 | "base_uri": "https://localhost:8080/" 49 | }, 50 | "cellView": "form", 51 | "id": "RfhJiesWVpAf", 52 | "outputId": "277fad1b-e6c8-4d4f-d2cb-11f38f484138" 53 | }, 54 | "execution_count": 1, 55 | "outputs": [ 56 | { 57 | "output_type": "stream", 58 | "name": "stdout", 59 | "text": [ 60 | "https://webdata.archive-it.org/ait/files/download/ARCHIVEIT-06689/DomainFrequencyExtraction/domain-frequency.csv.gz?access=FUO54Y4B3J3GAZSDD6ML7DQENUN5BDOX\n" 61 | ] 62 | } 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "source": [ 68 | "# Environment\n", 69 | "\n", 70 | "Next, we'll set up our environment so we can load our Domain Frequency dataset into [pandas](https://pandas.pydata.org) and use [Altair](https://altair-viz.github.io/) for plots. Altair is useful for creating plots since they can be easily exported as a PNG or SVG." 71 | ], 72 | "metadata": { 73 | "id": "Z14F2cIWJVW0" 74 | } 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 2, 79 | "metadata": { 80 | "id": "Chh6tt3HHF1s" 81 | }, 82 | "outputs": [], 83 | "source": [ 84 | "import pandas as pd\n", 85 | "import altair as alt" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "source": [ 91 | "## Data Table Display\n", 92 | "\n", 93 | "Colab includes an extension that renders pandas DataFrames into interactive displays that can be filtered, sorted, and explored dynamically. This can be very useful for taking a look at what each DataFrame provides!\n", 94 | "\n", 95 | "Data table display for pandas DataFrames can be enabled by running:\n", 96 | "```python\n", 97 | "%load_ext google.colab.data_table\n", 98 | "```\n", 99 | "and disabled by running\n", 100 | "```python\n", 101 | "%unload_ext google.colab.data_table\n", 102 | "```" 103 | ], 104 | "metadata": { 105 | "id": "sH81XCf3I3xY" 106 | } 107 | }, 108 | { 109 | "cell_type": "code", 110 | "source": [ 111 | "%load_ext google.colab.data_table" 112 | ], 113 | "metadata": { 114 | "id": "-qyCnbvBI7n6" 115 | }, 116 | "execution_count": 3, 117 | "outputs": [] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "source": [ 122 | "# Loading our ARCH Dataset as a DataFrame\n", 123 | "\n", 124 | "---\n", 125 | "\n", 126 | "\n", 127 | "Next, we'll create a pandas DataFrame from our dataset and show a preview of it using the Data Table Display." 128 | ], 129 | "metadata": { 130 | "id": "6prR7j1zI_D5" 131 | } 132 | }, 133 | { 134 | "cell_type": "code", 135 | "source": [ 136 | "domains = pd.read_csv(dataset, compression='gzip')\n", 137 | "domains" 138 | ], 139 | "metadata": { 140 | "colab": { 141 | "base_uri": "https://localhost:8080/", 142 | "height": 441 143 | }, 144 | "id": "YL0LQaUNHRKx", 145 | "outputId": "498e4153-bb5a-427e-9eba-9a3aa10abdfe" 146 | }, 147 | "execution_count": 4, 148 | "outputs": [ 149 | { 150 | "output_type": "stream", 151 | "name": "stdout", 152 | "text": [ 153 | "Warning: total number of rows (20407) exceeds max_rows (20000). Falling back to pandas display.\n" 154 | ] 155 | }, 156 | { 157 | "output_type": "execute_result", 158 | "data": { 159 | "text/plain": [ 160 | " domain count\n", 161 | "0 sld.cu 11002591\n", 162 | "1 cult.cu 5197445\n", 163 | "2 facebook.com 1985543\n", 164 | "3 icrt.cu 1936127\n", 165 | "4 cubava.cu 967903\n", 166 | "... ... ...\n", 167 | "20402 yardbarker.com 1\n", 168 | "20403 bufetetoro.com 1\n", 169 | "20404 understandingwar.org 1\n", 170 | "20405 headaches.org 1\n", 171 | "20406 nursite.com.ar 1\n", 172 | "\n", 173 | "[20407 rows x 2 columns]" 174 | ], 175 | "text/html": [ 176 | "\n", 177 | "
\n", 178 | "
\n", 179 | "
\n", 180 | "\n", 193 | "\n", 194 | " \n", 195 | " \n", 196 | " \n", 197 | " \n", 198 | " \n", 199 | " \n", 200 | " \n", 201 | " \n", 202 | " \n", 203 | " \n", 204 | " \n", 205 | " \n", 206 | " \n", 207 | " \n", 208 | " \n", 209 | " \n", 210 | " \n", 211 | " \n", 212 | " \n", 213 | " \n", 214 | " \n", 215 | " \n", 216 | " \n", 217 | " \n", 218 | " \n", 219 | " \n", 220 | " \n", 221 | " \n", 222 | " \n", 223 | " \n", 224 | " \n", 225 | " \n", 226 | " \n", 227 | " \n", 228 | " \n", 229 | " \n", 230 | " \n", 231 | " \n", 232 | " \n", 233 | " \n", 234 | " \n", 235 | " \n", 236 | " \n", 237 | " \n", 238 | " \n", 239 | " \n", 240 | " \n", 241 | " \n", 242 | " \n", 243 | " \n", 244 | " \n", 245 | " \n", 246 | " \n", 247 | " \n", 248 | " \n", 249 | " \n", 250 | " \n", 251 | " \n", 252 | " \n", 253 | " \n", 254 | " \n", 255 | " \n", 256 | " \n", 257 | " \n", 258 | "
domaincount
0sld.cu11002591
1cult.cu5197445
2facebook.com1985543
3icrt.cu1936127
4cubava.cu967903
.........
20402yardbarker.com1
20403bufetetoro.com1
20404understandingwar.org1
20405headaches.org1
20406nursite.com.ar1
\n", 259 | "

20407 rows × 2 columns

\n", 260 | "
\n", 261 | " \n", 271 | " \n", 272 | " \n", 309 | "\n", 310 | " \n", 334 | "
\n", 335 | "
\n", 336 | " " 337 | ] 338 | }, 339 | "metadata": {}, 340 | "execution_count": 4 341 | } 342 | ] 343 | }, 344 | { 345 | "cell_type": "markdown", 346 | "source": [ 347 | "# Create our first vizualization\n", 348 | "\n", 349 | "What does the distribution of domains look like?\n", 350 | "\n", 351 | "Here we can see which domains occur most frequently within the collection." 352 | ], 353 | "metadata": { 354 | "id": "l7KkFWeYIIAp" 355 | } 356 | }, 357 | { 358 | "cell_type": "code", 359 | "metadata": { 360 | "colab": { 361 | "base_uri": "https://localhost:8080/", 362 | "height": 857 363 | }, 364 | "id": "C_X_YSD4PyQi", 365 | "outputId": "72a830e8-4f94-4dfb-8996-ec6067f5b3a3" 366 | }, 367 | "source": [ 368 | "top_domains = domains.sort_values(\"count\", ascending=False).head(10)\n", 369 | "\n", 370 | "top_domains_bar = (\n", 371 | " alt.Chart(top_domains)\n", 372 | " .mark_bar()\n", 373 | " .encode(\n", 374 | " x=alt.X(\"domain:O\", title=\"Domain\", sort=\"-y\"),\n", 375 | " y=alt.Y(\"count:Q\", title=\"Count, Mean of Count\"),\n", 376 | " )\n", 377 | ")\n", 378 | "\n", 379 | "top_domains_rule = (\n", 380 | " alt.Chart(top_domains).mark_rule(color=\"red\").encode(y=\"mean(count):Q\")\n", 381 | ")\n", 382 | "\n", 383 | "top_domains_text = top_domains_bar.mark_text(align=\"center\", baseline=\"bottom\").encode(\n", 384 | " text=\"count:Q\"\n", 385 | ")\n", 386 | "\n", 387 | "(top_domains_bar + top_domains_rule + top_domains_text).properties(\n", 388 | " width=1400, height=700, title=\"Domains Distribution\"\n", 389 | ")" 390 | ], 391 | "execution_count": 5, 392 | "outputs": [ 393 | { 394 | "output_type": "execute_result", 395 | "data": { 396 | "text/html": [ 397 | "\n", 398 | "
\n", 399 | "" 452 | ], 453 | "text/plain": [ 454 | "alt.LayerChart(...)" 455 | ] 456 | }, 457 | "metadata": {}, 458 | "execution_count": 5 459 | } 460 | ] 461 | }, 462 | { 463 | "cell_type": "markdown", 464 | "source": [ 465 | "## Top Level Domain Analysis\n", 466 | "\n", 467 | "pandas allows you to create new columns in a DataFrame based off of existing data. This comes in handy for a number of use cases with the available data that we have. In this case, let's create a new column, `tld`, which is based off an existing column, 'domain'. This example should provide you with an implementation pattern for expanding on these datasets to do further research and analysis.\n", 468 | "\n", 469 | "A [top-level domain](https://en.wikipedia.org/wiki/Top-level_domain) refers to the highest domain in an address - i.e. `.ca`, `.com`, `.org`, or yes, even `.pizza`.\n", 470 | "\n", 471 | "Things get a bit complicated, however, in some national TLDs. While `qc.ca` (the domain for Quebec) isn't really a top-level domain, it has many of the features of one as people can directly register under it. Below, we'll use the command `suffix` to include this. \n", 472 | "\n", 473 | "> You can learn more about suffixes at https://publicsuffix.org.\n", 474 | "\n", 475 | "We'll take the `domain` column and extract the `tld` from it using [`tldextract`](https://github.com/john-kurkowski/tldextract).\n", 476 | "\n", 477 | "First we'll add the [`tldextract`](https://github.com/john-kurkowski/tldextract) library to the notebook. Then, we'll create the new column." 478 | ], 479 | "metadata": { 480 | "id": "sYUfJ2PRILda" 481 | } 482 | }, 483 | { 484 | "cell_type": "code", 485 | "source": [ 486 | "%%capture\n", 487 | "\n", 488 | "!pip install tldextract" 489 | ], 490 | "metadata": { 491 | "id": "k3FbVW71IQFs" 492 | }, 493 | "execution_count": 6, 494 | "outputs": [] 495 | }, 496 | { 497 | "cell_type": "code", 498 | "source": [ 499 | "import tldextract\n", 500 | "\n", 501 | "domains[\"tld\"] = domains.apply(\n", 502 | " lambda row: tldextract.extract(row.domain).suffix, axis=1\n", 503 | ")\n", 504 | "domains" 505 | ], 506 | "metadata": { 507 | "colab": { 508 | "base_uri": "https://localhost:8080/", 509 | "height": 441 510 | }, 511 | "id": "_wbejg5zISq_", 512 | "outputId": "f32ac550-3475-4ee8-ab82-97175674ae95" 513 | }, 514 | "execution_count": 7, 515 | "outputs": [ 516 | { 517 | "output_type": "stream", 518 | "name": "stdout", 519 | "text": [ 520 | "Warning: total number of rows (20407) exceeds max_rows (20000). Falling back to pandas display.\n" 521 | ] 522 | }, 523 | { 524 | "output_type": "execute_result", 525 | "data": { 526 | "text/plain": [ 527 | " domain count tld\n", 528 | "0 sld.cu 11002591 cu\n", 529 | "1 cult.cu 5197445 cu\n", 530 | "2 facebook.com 1985543 com\n", 531 | "3 icrt.cu 1936127 cu\n", 532 | "4 cubava.cu 967903 cu\n", 533 | "... ... ... ...\n", 534 | "20402 yardbarker.com 1 com\n", 535 | "20403 bufetetoro.com 1 com\n", 536 | "20404 understandingwar.org 1 org\n", 537 | "20405 headaches.org 1 org\n", 538 | "20406 nursite.com.ar 1 com.ar\n", 539 | "\n", 540 | "[20407 rows x 3 columns]" 541 | ], 542 | "text/html": [ 543 | "\n", 544 | "
\n", 545 | "
\n", 546 | "
\n", 547 | "\n", 560 | "\n", 561 | " \n", 562 | " \n", 563 | " \n", 564 | " \n", 565 | " \n", 566 | " \n", 567 | " \n", 568 | " \n", 569 | " \n", 570 | " \n", 571 | " \n", 572 | " \n", 573 | " \n", 574 | " \n", 575 | " \n", 576 | " \n", 577 | " \n", 578 | " \n", 579 | " \n", 580 | " \n", 581 | " \n", 582 | " \n", 583 | " \n", 584 | " \n", 585 | " \n", 586 | " \n", 587 | " \n", 588 | " \n", 589 | " \n", 590 | " \n", 591 | " \n", 592 | " \n", 593 | " \n", 594 | " \n", 595 | " \n", 596 | " \n", 597 | " \n", 598 | " \n", 599 | " \n", 600 | " \n", 601 | " \n", 602 | " \n", 603 | " \n", 604 | " \n", 605 | " \n", 606 | " \n", 607 | " \n", 608 | " \n", 609 | " \n", 610 | " \n", 611 | " \n", 612 | " \n", 613 | " \n", 614 | " \n", 615 | " \n", 616 | " \n", 617 | " \n", 618 | " \n", 619 | " \n", 620 | " \n", 621 | " \n", 622 | " \n", 623 | " \n", 624 | " \n", 625 | " \n", 626 | " \n", 627 | " \n", 628 | " \n", 629 | " \n", 630 | " \n", 631 | " \n", 632 | " \n", 633 | " \n", 634 | " \n", 635 | " \n", 636 | " \n", 637 | "
domaincounttld
0sld.cu11002591cu
1cult.cu5197445cu
2facebook.com1985543com
3icrt.cu1936127cu
4cubava.cu967903cu
............
20402yardbarker.com1com
20403bufetetoro.com1com
20404understandingwar.org1org
20405headaches.org1org
20406nursite.com.ar1com.ar
\n", 638 | "

20407 rows × 3 columns

\n", 639 | "
\n", 640 | " \n", 650 | " \n", 651 | " \n", 688 | "\n", 689 | " \n", 713 | "
\n", 714 | "
\n", 715 | " " 716 | ] 717 | }, 718 | "metadata": {}, 719 | "execution_count": 7 720 | } 721 | ] 722 | }, 723 | { 724 | "cell_type": "code", 725 | "source": [ 726 | "tld_count = domains[\"tld\"].value_counts()\n", 727 | "tld_count" 728 | ], 729 | "metadata": { 730 | "colab": { 731 | "base_uri": "https://localhost:8080/" 732 | }, 733 | "id": "BiT8SNV_IZbX", 734 | "outputId": "33c92b94-59db-484b-860d-e674c2469598" 735 | }, 736 | "execution_count": 8, 737 | "outputs": [ 738 | { 739 | "output_type": "execute_result", 740 | "data": { 741 | "text/plain": [ 742 | "com 9179\n", 743 | "org 3192\n", 744 | "net 783\n", 745 | "es 609\n", 746 | "cu 446\n", 747 | " ... \n", 748 | "mus.br 1\n", 749 | "gov.hk 1\n", 750 | "gov.pl 1\n", 751 | "edu.mk 1\n", 752 | "edu.my 1\n", 753 | "Name: tld, Length: 484, dtype: int64" 754 | ] 755 | }, 756 | "metadata": {}, 757 | "execution_count": 8 758 | } 759 | ] 760 | }, 761 | { 762 | "cell_type": "code", 763 | "source": [ 764 | "tld_count = (\n", 765 | " domains[\"tld\"]\n", 766 | " .value_counts()\n", 767 | " .rename_axis(\"TLD\")\n", 768 | " .reset_index(name=\"Count\")\n", 769 | " .head(10)\n", 770 | ")\n", 771 | "\n", 772 | "tld_bar = (\n", 773 | " alt.Chart(tld_count)\n", 774 | " .mark_bar()\n", 775 | " .encode(x=alt.X(\"TLD:O\", sort=\"-y\"), y=alt.Y(\"Count:Q\"))\n", 776 | ")\n", 777 | "\n", 778 | "tld_rule = alt.Chart(tld_count).mark_rule(color=\"red\").encode(y=\"mean(Count):Q\")\n", 779 | "\n", 780 | "tld_text = tld_bar.mark_text(align=\"center\", baseline=\"bottom\").encode(text=\"Count:Q\")\n", 781 | "\n", 782 | "(tld_bar + tld_rule + tld_text).properties(\n", 783 | " width=1400, height=700, title=\"Top Level Domain Distribution\"\n", 784 | ")" 785 | ], 786 | "metadata": { 787 | "colab": { 788 | "base_uri": "https://localhost:8080/", 789 | "height": 810 790 | }, 791 | "id": "0ZKqaADbIbbr", 792 | "outputId": "930d20ee-d594-4d2e-f538-2c055e8709d4" 793 | }, 794 | "execution_count": 9, 795 | "outputs": [ 796 | { 797 | "output_type": "execute_result", 798 | "data": { 799 | "text/html": [ 800 | "\n", 801 | "
\n", 802 | "" 855 | ], 856 | "text/plain": [ 857 | "alt.LayerChart(...)" 858 | ] 859 | }, 860 | "metadata": {}, 861 | "execution_count": 9 862 | } 863 | ] 864 | } 865 | ] 866 | } -------------------------------------------------------------------------------- /arch/html-file-information.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "provenance": [], 7 | "include_colab_link": true 8 | }, 9 | "kernelspec": { 10 | "name": "python3", 11 | "display_name": "Python 3" 12 | }, 13 | "language_info": { 14 | "name": "python" 15 | } 16 | }, 17 | "cells": [ 18 | { 19 | "cell_type": "markdown", 20 | "metadata": { 21 | "id": "view-in-github", 22 | "colab_type": "text" 23 | }, 24 | "source": [ 25 | "\"Open" 26 | ] 27 | }, 28 | { 29 | "cell_type": "markdown", 30 | "source": [ 31 | "# HTML Information Dataset Exploration\n", 32 | "\n", 33 | "We're going to take a look at a few examples of how we can explore the HTML Information dataset. \n", 34 | "\n", 35 | "The first thing we need to do is enter the URL for our HTML Information dataset in the cell below. You can get this by right clicking the Download icon, and selecting \"Copy Link\"." 36 | ], 37 | "metadata": { 38 | "id": "vAyuRQ2PJIdc" 39 | } 40 | }, 41 | { 42 | "cell_type": "code", 43 | "source": [ 44 | "dataset = 'https://webdata.archive-it.org/ait/files/download/ARCHIVEIT-14462/TextFilesInformationExtraction/html-file-information.csv.gz?access=UCQ7VUUU4NDLKSGIPQD2R2WUGLOQXWPQ' #@param {type:\"string\"}\n", 45 | "print(dataset)" 46 | ], 47 | "metadata": { 48 | "colab": { 49 | "base_uri": "https://localhost:8080/" 50 | }, 51 | "cellView": "form", 52 | "id": "RfhJiesWVpAf", 53 | "outputId": "507325b4-2e56-4aaf-bc7b-55b38fe221c8" 54 | }, 55 | "execution_count": 1, 56 | "outputs": [ 57 | { 58 | "output_type": "stream", 59 | "name": "stdout", 60 | "text": [ 61 | "https://webdata.archive-it.org/ait/files/download/ARCHIVEIT-14462/TextFilesInformationExtraction/html-file-information.csv.gz?access=UCQ7VUUU4NDLKSGIPQD2R2WUGLOQXWPQ\n" 62 | ] 63 | } 64 | ] 65 | }, 66 | { 67 | "cell_type": "markdown", 68 | "source": [ 69 | "## pandas\n", 70 | "\n", 71 | "Next, we'll setup our environment so we can load our HTML Information dataset into [pandas](https://pandas.pydata.org) DataFrames. If you're unfamiliar with DataFrames, but you've worked with spreadsheets before, you should quickly feel comfortable." 72 | ], 73 | "metadata": { 74 | "id": "Z14F2cIWJVW0" 75 | } 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 2, 80 | "metadata": { 81 | "id": "Chh6tt3HHF1s" 82 | }, 83 | "outputs": [], 84 | "source": [ 85 | "import pandas as pd" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "source": [ 91 | "## Data Table Display\n", 92 | "\n", 93 | "Colab includes an extension that renders pandas DataFrames into interactive displays that can be filtered, sorted, and explored dynamically. This can be very useful for taking a look at what each DataFrame provides, and doing some intital filtering!\n", 94 | "\n", 95 | "Data table display for pandas DataFrames can be enabled by running:\n", 96 | "```python\n", 97 | "%load_ext google.colab.data_table\n", 98 | "```\n", 99 | "and disabled by running\n", 100 | "```python\n", 101 | "%unload_ext google.colab.data_table\n", 102 | "```" 103 | ], 104 | "metadata": { 105 | "id": "sH81XCf3I3xY" 106 | } 107 | }, 108 | { 109 | "cell_type": "code", 110 | "source": [ 111 | "%load_ext google.colab.data_table" 112 | ], 113 | "metadata": { 114 | "id": "-qyCnbvBI7n6" 115 | }, 116 | "execution_count": 3, 117 | "outputs": [] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "source": [ 122 | "## Loading our ARCH Dataset as a DataFrame\n", 123 | "\n", 124 | "---\n", 125 | "\n", 126 | "\n", 127 | "Next, we'll create pandas DataFrame from our dataset and show a preview of it using the Data Table Display." 128 | ], 129 | "metadata": { 130 | "id": "6prR7j1zI_D5" 131 | } 132 | }, 133 | { 134 | "cell_type": "code", 135 | "source": [ 136 | "html = pd.read_csv(dataset, compression='gzip')\n", 137 | "html" 138 | ], 139 | "metadata": { 140 | "id": "YL0LQaUNHRKx" 141 | }, 142 | "execution_count": 4, 143 | "outputs": [] 144 | }, 145 | { 146 | "cell_type": "markdown", 147 | "metadata": { 148 | "id": "3HPwOCNAvqMe" 149 | }, 150 | "source": [ 151 | "# Data Analysis\n", 152 | "\n", 153 | "Now that we have all of our datasets loaded up, we can begin to work with them!" 154 | ] 155 | }, 156 | { 157 | "cell_type": "markdown", 158 | "metadata": { 159 | "id": "J6Pkg0prv3BE" 160 | }, 161 | "source": [ 162 | "## Counting total files, and unique files\n", 163 | "\n", 164 | "Let's take a quick look at how to count items in DataFrames, and use total and unique files as an example to work with.\n", 165 | "\n", 166 | "It's definitely work checking out the [pandas documentation](https://pandas.pydata.org/docs/index.html). There are a lot of good examples available, along with a robust [API reference](https://pandas.pydata.org/docs/reference/index.html#api)." 167 | ] 168 | }, 169 | { 170 | "cell_type": "markdown", 171 | "metadata": { 172 | "id": "DFX4Gl3wv7bi" 173 | }, 174 | "source": [ 175 | "\n", 176 | "### How many html files are in this collection?\n", 177 | "\n", 178 | "We can take our `html` variable and try a couple of functions to get the same answer.\n", 179 | "\n", 180 | "1. `len(html.index)`\n", 181 | " * Get the length of the DataFrame's index.\n", 182 | "2. `html.shape[0]`\n", 183 | " * Get the shape or dimensionality of the DataFrame, and take the first item in the tuple.\n", 184 | "3. `html.count()`\n", 185 | " * Count the number of rows for each column.\n", 186 | "\n" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "metadata": { 192 | "id": "HTv8Oet3jiTH" 193 | }, 194 | "source": [ 195 | "len(html.index)" 196 | ], 197 | "execution_count": 5, 198 | "outputs": [] 199 | }, 200 | { 201 | "cell_type": "code", 202 | "metadata": { 203 | "id": "6rYEERnTjifk" 204 | }, 205 | "source": [ 206 | "html.shape[0]" 207 | ], 208 | "execution_count": 6, 209 | "outputs": [] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "metadata": { 214 | "id": "bn-1v127aKIG" 215 | }, 216 | "source": [ 217 | "html.count()" 218 | ], 219 | "execution_count": 7, 220 | "outputs": [] 221 | }, 222 | { 223 | "cell_type": "markdown", 224 | "metadata": { 225 | "id": "38veKiPhwKo4" 226 | }, 227 | "source": [ 228 | "### How many unique html files are in the collection?\n", 229 | "\n", 230 | " We can see if an HTML file is unique or not by computing an [MD5 hash](https://en.wikipedia.org/wiki/MD5#MD5_hashes) of it, and comparing them. The exact same html file might have a filename of `example.html` or `foo.html`. If the hash is computed for each, we can see that even with different file names, they are actually the same html file. So, since we have both a `MD5` and `SHA1` hash column available in our DataFrame, we can just find the unique values, and count them!\n", 231 | "\n", 232 | "\n" 233 | ] 234 | }, 235 | { 236 | "cell_type": "code", 237 | "metadata": { 238 | "id": "WesM3kQowM5B" 239 | }, 240 | "source": [ 241 | "len(html.md5.unique())" 242 | ], 243 | "execution_count": 8, 244 | "outputs": [] 245 | }, 246 | { 247 | "cell_type": "markdown", 248 | "metadata": { 249 | "id": "ZIXkI0-1wWQf" 250 | }, 251 | "source": [ 252 | "### What are the top 10 most occurring html files in the collection?\n", 253 | "\n", 254 | "Here we can take advantage of [`value_counts()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.value_counts.html) to provide us with a list of MD5 hashes and their respective counts." 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "metadata": { 260 | "id": "8Ts03OFyjPIM" 261 | }, 262 | "source": [ 263 | "html[\"md5\"].value_counts().head(10)" 264 | ], 265 | "execution_count": 9, 266 | "outputs": [] 267 | }, 268 | { 269 | "cell_type": "markdown", 270 | "metadata": { 271 | "id": "FG7pGZUEwlaI" 272 | }, 273 | "source": [ 274 | "\n", 275 | "### What's the information around all of the occurances of `d41d8cd98f00b204e9800998ecf8427e`?\n", 276 | "\n", 277 | "What, you mean you don't know what `d41d8cd98f00b204e9800998ecf8427e` means? \n", 278 | "\n", 279 | "Let's find those HTML files in the DataFrame. We can here see some of the filenames used, its dimensions, and its URL.\n" 280 | ] 281 | }, 282 | { 283 | "cell_type": "code", 284 | "metadata": { 285 | "id": "msmmm65lkSIK" 286 | }, 287 | "source": [ 288 | "html.loc[html[\"md5\"] == \"d41d8cd98f00b204e9800998ecf8427e\"]" 289 | ], 290 | "execution_count": null, 291 | "outputs": [] 292 | }, 293 | { 294 | "cell_type": "markdown", 295 | "metadata": { 296 | "id": "GbLLZW2awzCv" 297 | }, 298 | "source": [ 299 | "### What are the top 10 most occuring filenames in the collection?\n", 300 | "\n", 301 | "Note that this is of course different than the MD5 results up above. Here we are focusing _just_ on filename. So `a16180790160.html ` for example, might actually be referring to different HTML files who happen to have the same name.\n", 302 | "\n", 303 | "Here we can use `value_counts()` again, but this time we'll create a variable for the top filenames so we can use it later.\n", 304 | "\n" 305 | ] 306 | }, 307 | { 308 | "cell_type": "code", 309 | "metadata": { 310 | "id": "pQaw54ACkwdZ" 311 | }, 312 | "source": [ 313 | "top_filenames = html[\"filename\"].value_counts().head(10)\n", 314 | "top_filenames" 315 | ], 316 | "execution_count": null, 317 | "outputs": [] 318 | }, 319 | { 320 | "cell_type": "markdown", 321 | "metadata": { 322 | "id": "Z7F3re20BQRI" 323 | }, 324 | "source": [ 325 | "### Let's create our first graph!\n", 326 | "\n", 327 | "We'll first plot the data with the pandas [plot](https://pandas.pydata.org/docs/reference/api/pandas.Series.plot.html) functionality, and then with [Altair](https://altair-viz.github.io/)." 328 | ] 329 | }, 330 | { 331 | "cell_type": "code", 332 | "metadata": { 333 | "id": "sRvlstfsBWEZ" 334 | }, 335 | "source": [ 336 | "top_filenames_chart = top_filenames.plot.bar(figsize=(25, 10))\n", 337 | "\n", 338 | "top_filenames_chart.set_title(\"Top Filenames\", fontsize=22)\n", 339 | "top_filenames_chart.set_xlabel(\"Filename\", fontsize=20)\n", 340 | "top_filenames_chart.set_ylabel(\"Count\", fontsize=20)" 341 | ], 342 | "execution_count": null, 343 | "outputs": [] 344 | }, 345 | { 346 | "cell_type": "markdown", 347 | "metadata": { 348 | "id": "pQgeOObvgLvK" 349 | }, 350 | "source": [ 351 | "Now let's setup [Altair](https://altair-viz.github.io/), and plot the data. Altair is useful for creating vizualizations since they can be easily exported as a PNG or SVG." 352 | ] 353 | }, 354 | { 355 | "cell_type": "code", 356 | "metadata": { 357 | "id": "Q7Z4J6qjWaVM" 358 | }, 359 | "source": [ 360 | "import altair as alt" 361 | ], 362 | "execution_count": null, 363 | "outputs": [] 364 | }, 365 | { 366 | "cell_type": "code", 367 | "metadata": { 368 | "id": "s0xwvILYWkgg" 369 | }, 370 | "source": [ 371 | "top_filenames_altair = (\n", 372 | " html[\"filename\"]\n", 373 | " .value_counts()\n", 374 | " .head(10)\n", 375 | " .rename_axis(\"Filename\")\n", 376 | " .reset_index(name=\"Count\")\n", 377 | ")\n", 378 | "\n", 379 | "filenames_bar = (\n", 380 | " alt.Chart(top_filenames_altair)\n", 381 | " .mark_bar()\n", 382 | " .encode(x=alt.X(\"Filename:O\", sort=\"-y\"), y=alt.Y(\"Count:Q\"))\n", 383 | ")\n", 384 | "\n", 385 | "filenames_rule = (\n", 386 | " alt.Chart(top_filenames_altair).mark_rule(color=\"red\").encode(y=\"mean(Count):Q\")\n", 387 | ")\n", 388 | "\n", 389 | "\n", 390 | "filenames_text = filenames_bar.mark_text(align=\"center\", baseline=\"bottom\").encode(\n", 391 | " text=\"Count:Q\"\n", 392 | ")\n", 393 | "\n", 394 | "(filenames_bar + filenames_rule + filenames_text).properties(\n", 395 | " width=1400, height=700, title=\"Top Filenames\"\n", 396 | ")" 397 | ], 398 | "execution_count": null, 399 | "outputs": [] 400 | }, 401 | { 402 | "cell_type": "markdown", 403 | "metadata": { 404 | "id": "BneaN9cgGoly" 405 | }, 406 | "source": [ 407 | "### How about a file format distribution?\n", 408 | "\n", 409 | "What _kind_ of html files are present? We can discover this by checking their \"media type\", or [MIME type](https://en.wikipedia.org/wiki/Media_type). \n", 410 | "\n", 411 | "\n", 412 | "\n", 413 | "\n" 414 | ] 415 | }, 416 | { 417 | "cell_type": "code", 418 | "metadata": { 419 | "id": "RDd-J8D-GwDk" 420 | }, 421 | "source": [ 422 | "html_mime_types = (\n", 423 | " html[\"mime_type_tika\"]\n", 424 | " .value_counts()\n", 425 | " .head(5)\n", 426 | " .rename_axis(\"MIME Type\")\n", 427 | " .reset_index(name=\"Count\")\n", 428 | ")\n", 429 | "\n", 430 | "html_mimes_bar = (\n", 431 | " alt.Chart(html_mime_types)\n", 432 | " .mark_bar()\n", 433 | " .encode(x=alt.X(\"MIME Type:O\", sort=\"-y\"), y=alt.Y(\"Count:Q\"))\n", 434 | ")\n", 435 | "\n", 436 | "html_mime_rule = (\n", 437 | " alt.Chart(html_mime_types).mark_rule(color=\"red\").encode(y=\"mean(Count):Q\")\n", 438 | ")\n", 439 | "\n", 440 | "html_mime_text = html_mimes_bar.mark_text(align=\"center\", baseline=\"bottom\").encode(\n", 441 | " text=\"Count:Q\"\n", 442 | ")\n", 443 | "\n", 444 | "(html_mimes_bar + html_mime_rule + html_mime_text).properties(\n", 445 | " width=1400, height=700, title=\"HTML File Format Distribution\"\n", 446 | ")" 447 | ], 448 | "execution_count": null, 449 | "outputs": [] 450 | }, 451 | { 452 | "cell_type": "markdown", 453 | "metadata": { 454 | "id": "QUJR-jjqNxCL" 455 | }, 456 | "source": [ 457 | "### How do I get the actual html?\n", 458 | "\n", 459 | "...or, how do I get to the actual binary files described by each file format information derivative?\n", 460 | "\n", 461 | "There are a few options!\n", 462 | "\n", 463 | "1. `wget` or `curl` from the live URL, or a replay URL\n", 464 | " * Live web URL\n", 465 | " * `wget` or `curl` the value of the `url` column\n", 466 | " * Replay web URL\n", 467 | " * `wget` or `curl` the value of the `crawl_date` and `url` column using the following pattern:\n", 468 | " * `https://web.archive.org/web/` + `crawl_date` + `/` + `url`\n", 469 | " * https://web.archive.org/web/20120119124734/http://www.archive.org/images/glogo.png\n", 470 | " * `http://wayback.archive-it.org/14462/` + `crawl_date` + `/` + `url`\n", 471 | " * https://wayback.archive-it.org/14462/20210524212740/https://ruebot.net/visualization/elxn42/featured_hu33a17dfb90e2c5ed77f783db14a6e53a_5126291_550x0_resize_q90_box_2.png\n", 472 | "2. Use a scripting language, such as Python\n", 473 | " * Make use of the `url` and `filename` columns (and `crawl_date` if you want to use the replay URL)\n", 474 | " * `import requests`\n", 475 | " * `requests.get(url, allow_redirects=True)`\n", 476 | " * `open('filename', 'wb').write(r.content)`\n", 477 | "3. Use the [Archives Unleashed Toolkit](https://aut.docs.archivesunleashed.org/docs/extract-binary) (if you have access to the W/ARC files)." 478 | ] 479 | }, 480 | { 481 | "cell_type": "markdown", 482 | "source": [ 483 | "If you wanted to download the HTML files using the replay URL, below is a method for doing so.\n", 484 | "\n", 485 | "First, you'll want to setup a replay url base url. Here we'll use the Archive-It Wayback instance for the collection." 486 | ], 487 | "metadata": { 488 | "id": "8yoFE2xLAlwk" 489 | } 490 | }, 491 | { 492 | "cell_type": "code", 493 | "source": [ 494 | "wayback_url = 'http://wayback.archive-it.org/14462/'" 495 | ], 496 | "metadata": { 497 | "id": "RxrOHn_2AicZ" 498 | }, 499 | "execution_count": null, 500 | "outputs": [] 501 | }, 502 | { 503 | "cell_type": "markdown", 504 | "source": [ 505 | "Next we'll create a new column using a lambda function. If you're familiar working with spreadsheets, what we're doing here is basically concatenating some column values together and creating a new column." 506 | ], 507 | "metadata": { 508 | "id": "jCTmrEBGA-Yd" 509 | } 510 | }, 511 | { 512 | "cell_type": "code", 513 | "source": [ 514 | "html['replay_url'] = html.apply(lambda row: str(wayback_url + str(row['crawl_date']) + \"/\" + row['url']), axis=1)" 515 | ], 516 | "metadata": { 517 | "id": "p5HoWxkFA6C8" 518 | }, 519 | "execution_count": null, 520 | "outputs": [] 521 | }, 522 | { 523 | "cell_type": "markdown", 524 | "source": [ 525 | "Then we can export that new column we created out to a file, so we can use it with `wget` to download all the html files!" 526 | ], 527 | "metadata": { 528 | "id": "q87tSYyIBIiZ" 529 | } 530 | }, 531 | { 532 | "cell_type": "code", 533 | "source": [ 534 | "html['replay_url'].head().to_csv('14462_html_urls.txt', index=False, header=False)" 535 | ], 536 | "metadata": { 537 | "id": "wyhpsf9wBN1a" 538 | }, 539 | "execution_count": null, 540 | "outputs": [] 541 | }, 542 | { 543 | "cell_type": "markdown", 544 | "source": [ 545 | "Finally, we can pass the file to `wget` to use as a download list. You can also speed this process up using `xargs` or `parallel`." 546 | ], 547 | "metadata": { 548 | "id": "81a3q7feu5Ji" 549 | } 550 | }, 551 | { 552 | "cell_type": "code", 553 | "source": [ 554 | "!wget --random-wait -i 14462_html_urls.txt" 555 | ], 556 | "metadata": { 557 | "id": "D_p4qGKoBRaZ" 558 | }, 559 | "execution_count": null, 560 | "outputs": [] 561 | } 562 | ] 563 | } -------------------------------------------------------------------------------- /assets/jupyter-shell.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/archivesunleashed/notebooks/8211b24ab6106e4da5f422b30cbc8b7bac83b4b6/assets/jupyter-shell.png -------------------------------------------------------------------------------- /assets/juypter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/archivesunleashed/notebooks/8211b24ab6106e4da5f422b30cbc8b7bac83b4b6/assets/juypter.png -------------------------------------------------------------------------------- /assets/ny-schedule.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/archivesunleashed/notebooks/8211b24ab6106e4da5f422b30cbc8b7bac83b4b6/assets/ny-schedule.png -------------------------------------------------------------------------------- /datathon-nyc/README.md: -------------------------------------------------------------------------------- 1 | # Archives Unleashed NYC Datathon 2 | 3 | Welcome to the Archives Unleashed NY Datathon! Below you will find a variety of resources that will be useful throughout our event. 4 | 5 | * [Requirements](#requirements) 6 | * [Schedule](#schedule) 7 | * [Team Collaboration Resources](#team-collaboration-resources) 8 | * [Homework Refresher](#homework-refresher) 9 | * [Additional/External Tools for Analysis](#additionalexternal-tools-for-analysis) 10 | * [Analysis Platforms](#analysis-platforms) 11 | * [Inspiration](#inspiration) 12 | * [Virtual Machines](#virtual-machines) 13 | * [Getting Started](#getting-started) 14 | * [Shell into assigned VM](#shell-into-assigned-vm) 15 | * [Start Juypter Notebook](#start-juypter-notebook) 16 | * [Start Spark with AUT](#start-spark-with-aut) 17 | * [Datasets](#datasets) 18 | 19 | ## Requirements 20 | 21 | - **Zoom** (login information will be provided via Slack/email). 22 | - **Slack** (datathon-ny-2020): our team will use Slack to communicate with participants. You can also use this platform to create a team channel (free) to collaborate (text + calls) during the datathon. 23 | - [Github Account](https://github.com/join) and [Google Account](https://accounts.google.com/signin/v2/identifier?service=g4np&passive=1209600&continue=https%3A%2F%2Fwww.google.com%2Fnonprofits%2Faccount%2Fsignup%3Flocality%3Dus&followup=https%3A%2F%2Fwww.google.com%2Fnonprofits%2Faccount%2Fsignup%3Flocality%3Dus&flowName=GlifWebSignIn&flowEntry=ServiceLogin) to work with our resources and your teams. 24 | 25 | - **Wifi + Space**: since we are all working remotely, find yourself a comfortable working space with a reliable internet connection. 26 | 27 | ## Schedule 28 | 29 | The datathon will be held March 26-27th (EST) online via (Zoom). 30 | 31 | The event will start off with introductions and a look at the tools you’ll be using. After a team formation activity, the majority of our time will be dedicated for group work. We will also be doing a few check-ins via Zoom to make sure everyone is doing fine. Final presentations will happen on Friday. 32 | 33 | ![](../assets/ny-schedule.png) 34 | 35 | Unfortunately, because we can’t meet in person we won’t be holding a dinner/social, but feel free to use our hashtag **#hackarchives** in your social media posts. 36 | 37 | ## Team Collaboration Resources 38 | 39 | - **[#NY Datathon (Online) Resources](https://drive.google.com/drive/u/0/folders/1KdqBwpUTheYei_IjPaIXFxVLI5UH1_dL)**: This is a shared folder for participants, housing our introductory Google slides, additional resources, and where teams will save their final presentations. 40 | - **[Team Final Projects Folder](https://drive.google.com/drive/u/0/folders/138xKFQwHrjJEnruZ21lkK5UAxFPU2GLz)**: we have set up a folder where you can directly work on your presentations. We ask that you use Google Slides to help us with quick transitions between groups. Please name your slides using the following convention: AU-NY2020-TeamName. 41 | - **[Quick Guide to Setting up Colab Notebooks](https://youtu.be/JDXQRUp_Tx4)**: If you need a quick review of how to set up your Colab Notebook environment, we have a short tutorial. 42 | - **Project Examples**: Need a bit more inspiration before starting? Checkout projects from previous datathons: [Toronto](https://archivesunleashed.org/toronto/) | [Vancouver](https://archivesunleashed.org/vancouver/) | [Washington](https://archivesunleashed.org/washington/) 43 | 44 | 45 | ## Homework Refresher 46 | 47 | - **[Command Line/Terminal Tutorial](https://programminghistorian.org/en/lessons/intro-to-bash)**: For those new to command line, or if you'd like a refresher, check out this tutorial on the Programing Historian Introduction to the Bash Command Line by Ian Milligan and James Baker. 48 | - **[Archives Unleashed Toolkit Walkthrough](https://github.com/archivesunleashed/aut-docs/blob/master/current/toolkit-walkthrough.md)**: While we’ll be doing a lot of work in the notebooks during the datathon, it is good to have a conceptual understanding around working with data. 49 | - **Archives Unleashed Notebooks Reading**: Nick Ruest has been working on some new methods of working with derivatives created through the Archives Unleashed Cloud. To learn more about this development, check out our latest Medium post: [Cloud-hosted web archive data: The winding path to web archive collections as data](https://news.archivesunleashed.org/cloud-hosted-web-archive-data-the-winding-path-to-web-archive-collections-as-data-a2b3428701b7). 50 | - **[Notebooks Repository Walkthrough]()**: we suggest that you run through the two created for this event to become familiar with how the environment works and the types of analysis you can run. In each link click the “Open in Colab” button at the top of the page to launch and start exploring. 51 | - [parquet_pandas_stonewall.ipynb](https://github.com/archivesunleashed/notebooks/blob/master/parquet_pandas_stonewall.ipynb) 52 | - [parquet_text_analyis_popline.ipynb](https://github.com/archivesunleashed/notebooks/blob/master/parquet_text_analyis_popline.ipynb) 53 | 54 | ## Additional/External Tools for Analysis 55 | 56 | - [AUK Tutorials](https://cloud.archivesunleashed.org/derivatives): provide basics on using a variety of external tools to work with dataset derivatives. 57 | - [An Introduction to Gephi (Beginner)](https://cloud.archivesunleashed.org/derivatives/basic-gephi) 58 | - [Network Graphing Archived Websites With Gephi (Intermediate)](https://cloud.archivesunleashed.org/derivatives/gephi) 59 | - [Grep - Filtering the Full-Text Derivative File](https://cloud.archivesunleashed.org/derivatives/text-filtering) 60 | - [Text Analysis Part One: Beyond the Keyword Search: Using AntConc](https://cloud.archivesunleashed.org/derivatives/text-antconc) 61 | - [Text Analysis Part Two: Sentiment Analysis With the Natural Language Toolkit](https://cloud.archivesunleashed.org/derivatives/text-sentiment) 62 | - [Voyant](https://voyant-tools.org) 63 | 64 | Also don’t forget about Excel (or Google spreadsheets)! 65 | 66 | ## Analysis Platforms 67 | 68 | We have a couple options for analysis platforms ([datasets are listed below](datathon-nyc#datasets)): 69 | 70 | - If you'd like to keep things relatively easy, and browser based, you're welcome to use [Google Colaboratory](https://colab.research.google.com). We have a couple options, in addition to the two mentioned above, available in the repo ([parquet_pandas_example.ipynb](https://github.com/archivesunleashed/notebooks/blob/master/parquet_pandas_example.ipynb), [parquet_pyspark_example.ipynb](https://github.com/archivesunleashed/notebooks/blob/master/parquet_pyspark_example.ipynb), and [parquet_text_analyis.ipynb](https://github.com/archivesunleashed/notebooks/blob/master/parquet_text_analyis.ipynb)) that can be used as starting points. 71 | - If you're comfortable with the command line, you're welcome to use one of the virtual machines provided by [Compute Canada](https://www.computecanada.ca/home/). 72 | 73 | The virtual machines, and the machine backing a given Google Colab notebook are roughly similar resource-wise. If you'd like to use a Compute Canada virtual machine, you'll have a bit more storage space, and you'll have much more control. You're welcome to install what you need to on the machines with `apt` to complete your project. 74 | 75 | ## Inspiration 76 | 77 | If you're looking for inspiration, check out the following notebook resources: 78 | 79 | - [GLAM Workbench](https://glam-workbench.github.io/) 80 | - [Awesome Jupyter GLAM](https://github.com/LibraryCarpentry/awesome-jupyter-glam) 81 | - [Getting started with ODate](https://o-date.github.io/support/notebooks-toc/) 82 | - [Jupyter notebooks for digital humanities](https://github.com/quinnanya/dh-jupyter) 83 | - [Spark NLP Workshop](https://github.com/JohnSnowLabs/spark-nlp-workshop) 84 | - [UW Interactive Data Lab, Data Visualization Curriculum](https://github.com/uwdata/visualization-curriculum) 85 | - [Sentiment Analysis with Pyspark](https://github.com/tthustla/setiment_analysis_pyspark/blob/master/Sentiment%20Analysis%20with%20PySpark.ipynb) 86 | - [Analysis of Car Accidents in Canada using PySpark](https://github.com/SinghGursimran/Analysis-of-Car-Accidents-in-Canada-using-PySpark/) 87 | - [Save Page Now Analysis](https://github.com/edsu/spn) 88 | 89 | If you'd like to add your notebook to the repo at the end of the datathon, we'd love to have it! 90 | 91 | ## Virtual Machines 92 | 93 | **c8-30gb-430gb** 94 | 95 | - 8 cores 96 | - 30G RAM 97 | - 11G `/` 98 | - 398G `/mnt` 99 | - Python 3.7.3 ([Anaconda](https://www.anaconda.com/distribution/)) 100 | - findspark, wordcloud, spacy, pyarrow, jupyter, tldextract, jupyter_contrib_nbextensions, pyspark 101 | - feel free to `conda install` or `pip install` whatever else you need 102 | - OpenJDK 8 103 | - Spark 2.4.5 104 | - `SPARK_HOME=/home/ubuntu/spark` 105 | - jq 106 | 107 | Four machines available: 108 | 109 | - 206.167.181.146 (datathon1) 110 | - 206.167.182.14 (datathon2) 111 | - 206.167.181.104 (datathon3) 112 | - 206.167.181.105 (datathon4) 113 | 114 | **c8-45gb-430gb** 115 | 116 | - 8 Cores 117 | - 45G RAM 118 | - 11G `/` 119 | - 398G `/mnt` 120 | - Python 3.7.3 ([Anaconda](https://www.anaconda.com/distribution/)) 121 | - findspark, wordcloud, spacy, pyarrow, jupyter, tldextract, jupyter_contrib_nbextensions, pyspark 122 | - feel free to `conda install` or `pip install` whatever else you need 123 | - OpenJDK 8 124 | - Spark 2.4.5 125 | - `SPARK_HOME=/home/ubuntu/spark` 126 | - jq 127 | 128 | One machine available: 129 | 130 | - 206.167.181.253 (datathon5) 131 | 132 | ## Getting Started 133 | 134 | Please note, the datathon hosts (Ian, Jimmy, Nick, and Sam) all use macOS or Linux variants. If you are on a Windows machine, things might be a little bit more difficult. If you have the [Windows SubSystem for Linux](https://www.howtogeek.com/249966/how-to-install-and-use-the-linux-bash-shell-on-windows-10/) installed, you should be in a really good place. 135 | 136 | You may want to create a separate folder on your desktop or home directory to keep all of your datathon work in; this will also allow you to point terminal to one directory (folder). 137 | 138 | ### Shell into assigned VM 139 | 140 | Ian or Nick will provide you with key to access a virtual machine via ssh. You'll need to download or copy that key to your own machine, and apply the appropriate permissions to it. The permissions on the key should be `600`. You can do this with the following command on your own laptop before shelling in: 141 | 142 | ```bash 143 | chmod 600 /path/to/archives-hackathon.key 144 | ``` 145 | 146 | Once you have the permissions set on the key, you can shell into your assigned datathon virtual machine with the provided key, and IP address: 147 | 148 | Example: 149 | 150 | ```bash 151 | ssh -L 8888:localhost:8888 -i ~/.ssh/archives-hackathon.key ubuntu@206.167.181.253 152 | ``` 153 | 154 | ### Start Juypter Notebook 155 | 156 | ```bash 157 | cd /mnt/notebooks/datathon-nyc 158 | jupyter notebook --no-browser 159 | ``` 160 | 161 | Click on the the localhost link to open up Juypter. 162 | 163 | ![](../assets/jupyter-shell.png) 164 | 165 | Select one of the two example notebooks. 166 | 167 | ![](../assets/juypter.png) 168 | 169 | 170 | ### Start Spark with AUT 171 | 172 | If you'd like to use Apache Spark and the Archives Unleashed Toolkit to analyze WARC/ARCs, you can get Spark started with the toolkit with the following command: 173 | 174 | ```bash 175 | ~/spark/bin/spark-shell --packages "io.archivesunleashed:aut:0.50.0" 176 | ``` 177 | 178 | Documentation for the 0.50.0 release is available [here](https://github.com/archivesunleashed/aut-docs/tree/master/aut-0.50.0), and if you need a refresher on the datathon homework, it is available [here](https://github.com/archivesunleashed/aut-docs/blob/master/aut-0.50.0/toolkit-walkthrough.md). 179 | 180 | ## Datasets 181 | 182 | Scholarly derivatives created on cloud.archivesunleashed.org, and Parquet files should be downloaded to `/mnt/data`. If any team would like some WARC/ARC files from a collection, please work with Nick Ruest as soon as possible (`ruebot` in Slack). 183 | 184 | **Ivy Plus Libraries Confederation** 185 | 186 | - [National Statistical Offices and Central Banks Web Archive](https://zenodo.org/record/3633683) 187 | - [Contemporary Composers Web Archive (CCWA)](https://zenodo.org/record/3692559) 188 | - [#MeToo and the Women's Rights Movement in China Web Archive](https://zenodo.org/record/3633681) 189 | - [Geologic Field Trip Guidebooks Web Archive](https://zenodo.org/record/3666295) 190 | - [Literary Authors from Europe and Eurasia Web Archive](https://zenodo.org/record/3632728) 191 | - [Web Archive of Independent News Sites on Turkish Affairs](https://zenodo.org/record/3633234) 192 | - [State Elections Web Archive](https://zenodo.org/record/3635634) 193 | - [Brazilian Presidential Transition (2018) Web Archive](https://zenodo.org/record/3659692) 194 | - [Collaborative Architecture, Urbanism, and Sustainability Web Archive (CAUSEWAY)](https://zenodo.org/record/3674173) 195 | - [Global Webcomics Web Archive](https://zenodo.org/record/3633737) 196 | - [Queer Japan Web Archive](https://zenodo.org/record/3633284) 197 | - [Extreme Right Movements in Europe](https://zenodo.org/record/3633161) 198 | - [Latin American and Caribbean Contemporary Art Web Archive](https://zenodo.org/record/3633118) 199 | - [Popline and K4Health Web Archive](https://zenodo.org/record/3633022) 200 | - [Eastern Europe and Former Soviet Union Web Archive](https://zenodo.org/record/3633031) 201 | - [Independent Documentary Filmmakers from China, Hong Kong, and Taiwan Web Archive](https://zenodo.org/record/3632912) 202 | 203 | **Columbia University Libraries** 204 | 205 | - [General](https://zenodo.org/record/3633290) 206 | - [Resistance](https://zenodo.org/record/3660457) 207 | - [Stonewall 50 Commemoration](https://zenodo.org/record/3631347) 208 | - [Freely Accessible eJournals](https://zenodo.org/record/3633671) 209 | - [Avery Library Historic Preservation and Urban Planning](https://doi.org/10.5683/SP2/Z68EVJ) 210 | - [Rare Book and Manuscript Library](https://zenodo.org/record/3701593) 211 | - [Burke Library New York City Religions](https://zenodo.org/record/3701455) 212 | 213 | ## Sponsors + Special Thanks 214 | 215 | This event is possible thanks to the generous support from: 216 | 217 | [Andrew W. Mellon Foundation](https://mellon.org/), [Columbia University Libraries](https://library.columbia.edu), [Ivy Plus Libraries Confederation](https://library.columbia.edu/collections/web-archives/Ivy_Plus_Libraries.html), [Faculty of Arts](https://uwaterloo.ca/arts/) and [David R. Cheriton School of Computer Science](https://cs.uwaterloo.ca/) at the [University of Waterloo](https://uwaterloo.ca/), [York University Libraries](https://www.library.yorku.ca/web/), [Compute Canada](https://www.computecanada.ca/), and [Start Smart Labs](http://www.startsmartlabs.com/). 218 | 219 | We'd also like to say a special thanks to Columbia University and Ivy Plus libraries Confederation for for providing access to their collections! 220 | -------------------------------------------------------------------------------- /geocities/geocities_domain_frequency.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "geocities-domain-frequency.ipynb", 7 | "provenance": [], 8 | "authorship_tag": "ABX9TyM2I/5HIhKxgXC4r24AYu/i", 9 | "include_colab_link": true 10 | }, 11 | "kernelspec": { 12 | "name": "python3", 13 | "display_name": "Python 3" 14 | }, 15 | "language_info": { 16 | "name": "python" 17 | } 18 | }, 19 | "cells": [ 20 | { 21 | "cell_type": "markdown", 22 | "metadata": { 23 | "id": "view-in-github", 24 | "colab_type": "text" 25 | }, 26 | "source": [ 27 | "\"Open" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "source": [ 33 | "# Working with Geocities Derivatives | Domain Frequency\n", 34 | "\n", 35 | "In this notebook we'll download some data from the [GeoCities Web Archive Collection Derivatives](https://archive.org/details/geocities-webarchive-collection-derivatives) to demonstrate a few examples of further exploration of web archive data." 36 | ], 37 | "metadata": { 38 | "id": "ZMfzPF61MeaJ" 39 | } 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "source": [ 44 | "# Datasets\n", 45 | "\n", 46 | "First, we will need to download some derivative data from [GeoCities Web Archive Collection Derivatives](https://archive.org/details/geocities-webarchive-collection-derivatives)." 47 | ], 48 | "metadata": { 49 | "id": "SRt0p0ViMkSh" 50 | } 51 | }, 52 | { 53 | "cell_type": "code", 54 | "source": [ 55 | "%%capture\n", 56 | "\n", 57 | "!mkdir data\n", 58 | "\n", 59 | "!wget \"https://archive.org/download/geocities-webarchive-collection-derivatives/geocities-domain-frequency.csv.gz\" -P data" 60 | ], 61 | "metadata": { 62 | "id": "WO_U-V1sMu6N" 63 | }, 64 | "execution_count": 1, 65 | "outputs": [] 66 | }, 67 | { 68 | "cell_type": "markdown", 69 | "source": [ 70 | "Unzip the data." 71 | ], 72 | "metadata": { 73 | "id": "bGKddBwyNBvJ" 74 | } 75 | }, 76 | { 77 | "cell_type": "code", 78 | "source": [ 79 | "!gunzip data/*" 80 | ], 81 | "metadata": { 82 | "colab": { 83 | "base_uri": "https://localhost:8080/" 84 | }, 85 | "id": "ycdyNiE8NC81", 86 | "outputId": "034e9302-44d7-4d0c-9d2a-c8947b884352" 87 | }, 88 | "execution_count": 23, 89 | "outputs": [ 90 | { 91 | "output_type": "stream", 92 | "name": "stdout", 93 | "text": [ 94 | "gzip: data/geocities-domain-frequency.csv: unknown suffix -- ignored\n" 95 | ] 96 | } 97 | ] 98 | }, 99 | { 100 | "cell_type": "markdown", 101 | "source": [ 102 | "Let's check our `data` directory, and make sure they've downloaded." 103 | ], 104 | "metadata": { 105 | "id": "YqJrkzNFNFnE" 106 | } 107 | }, 108 | { 109 | "cell_type": "code", 110 | "source": [ 111 | "!ls -1 data" 112 | ], 113 | "metadata": { 114 | "colab": { 115 | "base_uri": "https://localhost:8080/" 116 | }, 117 | "id": "fDEOuQHTNNyr", 118 | "outputId": "e0ad699b-2115-413e-cbc1-4f7b27a2f188" 119 | }, 120 | "execution_count": 24, 121 | "outputs": [ 122 | { 123 | "output_type": "stream", 124 | "name": "stdout", 125 | "text": [ 126 | "geocities-domain-frequency.csv\n" 127 | ] 128 | } 129 | ] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "source": [ 134 | "# Environment\n", 135 | "\n", 136 | "Next, we'll setup our environment so we can load our derivatives into [pandas](https://pandas.pydata.org), build charts with [Altair](https://altair-viz.github.io/), and use the [Data Table extension for Colab](https://colab.research.google.com/notebooks/data_table.ipynb)." 137 | ], 138 | "metadata": { 139 | "id": "2x2RuLJnOWT0" 140 | } 141 | }, 142 | { 143 | "cell_type": "code", 144 | "source": [ 145 | "import pandas as pd\n", 146 | "import altair as alt" 147 | ], 148 | "metadata": { 149 | "id": "9_Tz1e1FOZ_E" 150 | }, 151 | "execution_count": 28, 152 | "outputs": [] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "source": [ 157 | "%load_ext google.colab.data_table" 158 | ], 159 | "metadata": { 160 | "id": "1q_8190bOgNn" 161 | }, 162 | "execution_count": 19, 163 | "outputs": [] 164 | }, 165 | { 166 | "cell_type": "markdown", 167 | "source": [ 168 | "## Let's take a look at the domain frequency derivative." 169 | ], 170 | "metadata": { 171 | "id": "H2nGI5pYOjTP" 172 | } 173 | }, 174 | { 175 | "cell_type": "code", 176 | "source": [ 177 | "domain_frequency = pd.read_csv(\"data/geocities-domain-frequency.csv\")\n", 178 | "domain_frequency" 179 | ], 180 | "metadata": { 181 | "colab": { 182 | "base_uri": "https://localhost:8080/", 183 | "height": 441 184 | }, 185 | "id": "8jWQvOOpOoYh", 186 | "outputId": "ad264a43-1ef5-483b-a7ad-473ad5493153" 187 | }, 188 | "execution_count": 25, 189 | "outputs": [ 190 | { 191 | "output_type": "stream", 192 | "name": "stdout", 193 | "text": [ 194 | "Warning: total number of rows (147923) exceeds max_rows (20000). Falling back to pandas display.\n" 195 | ] 196 | }, 197 | { 198 | "output_type": "execute_result", 199 | "data": { 200 | "text/plain": [ 201 | " domain count\n", 202 | "0 geocities.com 57922449\n", 203 | "1 yahoo.com 1110567\n", 204 | "2 amazon.com 87675\n", 205 | "3 myspace.com 67706\n", 206 | "4 bravenet.com 62904\n", 207 | "... ... ...\n", 208 | "147918 manosguardanapo.blogspot.com 1\n", 209 | "147919 bragaplasticos.com.ar 1\n", 210 | "147920 abed.org.br 1\n", 211 | "147921 mailearners.net 1\n", 212 | "147922 greensfirst.com 1\n", 213 | "\n", 214 | "[147923 rows x 2 columns]" 215 | ], 216 | "text/html": [ 217 | "\n", 218 | "
\n", 219 | "
\n", 220 | "
\n", 221 | "\n", 234 | "\n", 235 | " \n", 236 | " \n", 237 | " \n", 238 | " \n", 239 | " \n", 240 | " \n", 241 | " \n", 242 | " \n", 243 | " \n", 244 | " \n", 245 | " \n", 246 | " \n", 247 | " \n", 248 | " \n", 249 | " \n", 250 | " \n", 251 | " \n", 252 | " \n", 253 | " \n", 254 | " \n", 255 | " \n", 256 | " \n", 257 | " \n", 258 | " \n", 259 | " \n", 260 | " \n", 261 | " \n", 262 | " \n", 263 | " \n", 264 | " \n", 265 | " \n", 266 | " \n", 267 | " \n", 268 | " \n", 269 | " \n", 270 | " \n", 271 | " \n", 272 | " \n", 273 | " \n", 274 | " \n", 275 | " \n", 276 | " \n", 277 | " \n", 278 | " \n", 279 | " \n", 280 | " \n", 281 | " \n", 282 | " \n", 283 | " \n", 284 | " \n", 285 | " \n", 286 | " \n", 287 | " \n", 288 | " \n", 289 | " \n", 290 | " \n", 291 | " \n", 292 | " \n", 293 | " \n", 294 | " \n", 295 | " \n", 296 | " \n", 297 | " \n", 298 | " \n", 299 | "
domaincount
0geocities.com57922449
1yahoo.com1110567
2amazon.com87675
3myspace.com67706
4bravenet.com62904
.........
147918manosguardanapo.blogspot.com1
147919bragaplasticos.com.ar1
147920abed.org.br1
147921mailearners.net1
147922greensfirst.com1
\n", 300 | "

147923 rows × 2 columns

\n", 301 | "
\n", 302 | " \n", 312 | " \n", 313 | " \n", 350 | "\n", 351 | " \n", 375 | "
\n", 376 | "
\n", 377 | " " 378 | ] 379 | }, 380 | "metadata": {}, 381 | "execution_count": 25 382 | } 383 | ] 384 | }, 385 | { 386 | "cell_type": "markdown", 387 | "source": [ 388 | "What does the distribution of domains look like?\n", 389 | "\n", 390 | "Here we can see which domains are the most frequent within the collection." 391 | ], 392 | "metadata": { 393 | "id": "TCGVtS9NPBOP" 394 | } 395 | }, 396 | { 397 | "cell_type": "code", 398 | "source": [ 399 | "top_domains = domain_frequency.sort_values(\"count\", ascending=False).head(10)\n", 400 | "\n", 401 | "top_domains_bar = (\n", 402 | " alt.Chart(top_domains)\n", 403 | " .mark_bar()\n", 404 | " .encode(\n", 405 | " x=alt.X(\"domain:O\", title=\"Domain\", sort=\"-y\"),\n", 406 | " y=alt.Y(\"count:Q\", title=\"Count, Mean of Count\"),\n", 407 | " )\n", 408 | ")\n", 409 | "\n", 410 | "top_domains_rule = (\n", 411 | " alt.Chart(top_domains).mark_rule(color=\"red\").encode(y=\"mean(count):Q\")\n", 412 | ")\n", 413 | "\n", 414 | "top_domains_text = top_domains_bar.mark_text(align=\"center\", baseline=\"bottom\").encode(\n", 415 | " text=\"count:Q\"\n", 416 | ")\n", 417 | "\n", 418 | "(top_domains_bar + top_domains_rule + top_domains_text).properties(\n", 419 | " width=1400, height=700, title=\"Domains Distribution\"\n", 420 | ")" 421 | ], 422 | "metadata": { 423 | "colab": { 424 | "base_uri": "https://localhost:8080/", 425 | "height": 900 426 | }, 427 | "id": "7ZD7BOqCPCtR", 428 | "outputId": "ac7c9f19-2333-4af1-f42e-eec65dcf02fe" 429 | }, 430 | "execution_count": 29, 431 | "outputs": [ 432 | { 433 | "output_type": "execute_result", 434 | "data": { 435 | "text/html": [ 436 | "\n", 437 | "
\n", 438 | "" 491 | ], 492 | "text/plain": [ 493 | "alt.LayerChart(...)" 494 | ] 495 | }, 496 | "metadata": {}, 497 | "execution_count": 29 498 | } 499 | ] 500 | }, 501 | { 502 | "cell_type": "markdown", 503 | "source": [ 504 | "### Top Level Domain Analysis\n", 505 | "\n", 506 | "pandas allows you to create new columns in a DataFrame based off of existing data. This comes in handy for a number of use cases with the available data that we have. In this case, let's create a new column, `tld`, which is based off an existing column, 'domain'. This example should provide you with an implementation pattern for expanding on these datasets to do further research and analysis.\n", 507 | "\n", 508 | "A [top-level domain](https://en.wikipedia.org/wiki/Top-level_domain) refers to the highest domain in an address - i.e. `.ca`, `.com`, `.org`, or yes, even `.pizza`.\n", 509 | "\n", 510 | "Things get a bit complicated, however, in some national TLDs. While `qc.ca` (the domain for Quebec) isn't really a top-level domain, it has many of the features of one as people can directly register under it. Below, we'll use the command `suffix` to include this. \n", 511 | "\n", 512 | "> You can learn more about suffixes at https://publicsuffix.org.\n", 513 | "\n", 514 | "We'll take the `domain` column and extract the `tld` from it with [`tldextract`](https://github.com/john-kurkowski/tldextract).\n", 515 | "\n", 516 | "First we'll add the [`tldextract`](https://github.com/john-kurkowski/tldextract) library to the notebook. Then, we'll create the new column." 517 | ], 518 | "metadata": { 519 | "id": "q9bX2kc_Psh0" 520 | } 521 | }, 522 | { 523 | "cell_type": "code", 524 | "source": [ 525 | "%%capture\n", 526 | "\n", 527 | "!pip install tldextract" 528 | ], 529 | "metadata": { 530 | "id": "FDRjlCfUPtmV" 531 | }, 532 | "execution_count": 30, 533 | "outputs": [] 534 | }, 535 | { 536 | "cell_type": "code", 537 | "source": [ 538 | "import tldextract\n", 539 | "\n", 540 | "domain_frequency[\"tld\"] = domain_frequency.apply(\n", 541 | " lambda row: tldextract.extract(row.domain).suffix, axis=1\n", 542 | ")\n", 543 | "domain_frequency" 544 | ], 545 | "metadata": { 546 | "colab": { 547 | "base_uri": "https://localhost:8080/", 548 | "height": 441 549 | }, 550 | "id": "TxDxk67_PwnX", 551 | "outputId": "96753d2b-62dc-4759-c9d5-7d97f9fb25e4" 552 | }, 553 | "execution_count": 31, 554 | "outputs": [ 555 | { 556 | "output_type": "stream", 557 | "name": "stdout", 558 | "text": [ 559 | "Warning: total number of rows (147923) exceeds max_rows (20000). Falling back to pandas display.\n" 560 | ] 561 | }, 562 | { 563 | "output_type": "execute_result", 564 | "data": { 565 | "text/plain": [ 566 | " domain count tld\n", 567 | "0 geocities.com 57922449 com\n", 568 | "1 yahoo.com 1110567 com\n", 569 | "2 amazon.com 87675 com\n", 570 | "3 myspace.com 67706 com\n", 571 | "4 bravenet.com 62904 com\n", 572 | "... ... ... ...\n", 573 | "147918 manosguardanapo.blogspot.com 1 com\n", 574 | "147919 bragaplasticos.com.ar 1 com.ar\n", 575 | "147920 abed.org.br 1 org.br\n", 576 | "147921 mailearners.net 1 net\n", 577 | "147922 greensfirst.com 1 com\n", 578 | "\n", 579 | "[147923 rows x 3 columns]" 580 | ], 581 | "text/html": [ 582 | "\n", 583 | "
\n", 584 | "
\n", 585 | "
\n", 586 | "\n", 599 | "\n", 600 | " \n", 601 | " \n", 602 | " \n", 603 | " \n", 604 | " \n", 605 | " \n", 606 | " \n", 607 | " \n", 608 | " \n", 609 | " \n", 610 | " \n", 611 | " \n", 612 | " \n", 613 | " \n", 614 | " \n", 615 | " \n", 616 | " \n", 617 | " \n", 618 | " \n", 619 | " \n", 620 | " \n", 621 | " \n", 622 | " \n", 623 | " \n", 624 | " \n", 625 | " \n", 626 | " \n", 627 | " \n", 628 | " \n", 629 | " \n", 630 | " \n", 631 | " \n", 632 | " \n", 633 | " \n", 634 | " \n", 635 | " \n", 636 | " \n", 637 | " \n", 638 | " \n", 639 | " \n", 640 | " \n", 641 | " \n", 642 | " \n", 643 | " \n", 644 | " \n", 645 | " \n", 646 | " \n", 647 | " \n", 648 | " \n", 649 | " \n", 650 | " \n", 651 | " \n", 652 | " \n", 653 | " \n", 654 | " \n", 655 | " \n", 656 | " \n", 657 | " \n", 658 | " \n", 659 | " \n", 660 | " \n", 661 | " \n", 662 | " \n", 663 | " \n", 664 | " \n", 665 | " \n", 666 | " \n", 667 | " \n", 668 | " \n", 669 | " \n", 670 | " \n", 671 | " \n", 672 | " \n", 673 | " \n", 674 | " \n", 675 | " \n", 676 | "
domaincounttld
0geocities.com57922449com
1yahoo.com1110567com
2amazon.com87675com
3myspace.com67706com
4bravenet.com62904com
............
147918manosguardanapo.blogspot.com1com
147919bragaplasticos.com.ar1com.ar
147920abed.org.br1org.br
147921mailearners.net1net
147922greensfirst.com1com
\n", 677 | "

147923 rows × 3 columns

\n", 678 | "
\n", 679 | " \n", 689 | " \n", 690 | " \n", 727 | "\n", 728 | " \n", 752 | "
\n", 753 | "
\n", 754 | " " 755 | ] 756 | }, 757 | "metadata": {}, 758 | "execution_count": 31 759 | } 760 | ] 761 | }, 762 | { 763 | "cell_type": "code", 764 | "source": [ 765 | "tld_count = domain_frequency[\"tld\"].value_counts()\n", 766 | "tld_count" 767 | ], 768 | "metadata": { 769 | "colab": { 770 | "base_uri": "https://localhost:8080/" 771 | }, 772 | "id": "K6z23OV_PzEH", 773 | "outputId": "04b25575-24f1-42c3-d580-d4446b1a2c24" 774 | }, 775 | "execution_count": 32, 776 | "outputs": [ 777 | { 778 | "output_type": "execute_result", 779 | "data": { 780 | "text/plain": [ 781 | "com 87723\n", 782 | "net 11855\n", 783 | "org 11018\n", 784 | "de 4450\n", 785 | "tk 3508\n", 786 | " ... \n", 787 | "sc.gov.br 1\n", 788 | "gen.nz 1\n", 789 | "kg 1\n", 790 | "tn.us 1\n", 791 | "wi.us 1\n", 792 | "Name: tld, Length: 741, dtype: int64" 793 | ] 794 | }, 795 | "metadata": {}, 796 | "execution_count": 32 797 | } 798 | ] 799 | }, 800 | { 801 | "cell_type": "code", 802 | "source": [ 803 | "tld_count = (\n", 804 | " domain_frequency[\"tld\"]\n", 805 | " .value_counts()\n", 806 | " .rename_axis(\"TLD\")\n", 807 | " .reset_index(name=\"Count\")\n", 808 | " .head(10)\n", 809 | ")\n", 810 | "\n", 811 | "tld_bar = (\n", 812 | " alt.Chart(tld_count)\n", 813 | " .mark_bar()\n", 814 | " .encode(x=alt.X(\"TLD:O\", sort=\"-y\"), y=alt.Y(\"Count:Q\"))\n", 815 | ")\n", 816 | "\n", 817 | "tld_rule = alt.Chart(tld_count).mark_rule(color=\"red\").encode(y=\"mean(Count):Q\")\n", 818 | "\n", 819 | "tld_text = tld_bar.mark_text(align=\"center\", baseline=\"bottom\").encode(text=\"Count:Q\")\n", 820 | "\n", 821 | "(tld_bar + tld_rule + tld_text).properties(\n", 822 | " width=1400, height=700, title=\"Top Level Domain Distribution\"\n", 823 | ")" 824 | ], 825 | "metadata": { 826 | "colab": { 827 | "base_uri": "https://localhost:8080/", 828 | "height": 830 829 | }, 830 | "id": "mwusvgV0P6Co", 831 | "outputId": "3348cb71-e01b-4c82-f380-a6e558d9a7b7" 832 | }, 833 | "execution_count": 33, 834 | "outputs": [ 835 | { 836 | "output_type": "execute_result", 837 | "data": { 838 | "text/html": [ 839 | "\n", 840 | "
\n", 841 | "" 894 | ], 895 | "text/plain": [ 896 | "alt.LayerChart(...)" 897 | ] 898 | }, 899 | "metadata": {}, 900 | "execution_count": 33 901 | } 902 | ] 903 | } 904 | ] 905 | } --------------------------------------------------------------------------------