├── assets
├── juypter.png
├── ny-schedule.png
└── jupyter-shell.png
├── README.md
├── .gitignore
├── PySpark Examples
├── PySpark - Finding Hyperlinks within Collection on Pages with Certain Keyword.ipynb
├── PySpark - aut standard derivatives.ipynb
├── PySpark - Find Images Shared Between Domains.ipynb
└── PySpark - Extract Popular Images.ipynb
├── LICENSE
├── datathon-nyc
└── README.md
├── arch
├── html-file-information.ipynb
└── domain-frequency.ipynb
└── geocities
└── geocities_domain_frequency.ipynb
/assets/juypter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/archivesunleashed/notebooks/HEAD/assets/juypter.png
--------------------------------------------------------------------------------
/assets/ny-schedule.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/archivesunleashed/notebooks/HEAD/assets/ny-schedule.png
--------------------------------------------------------------------------------
/assets/jupyter-shell.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/archivesunleashed/notebooks/HEAD/assets/jupyter-shell.png
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Archives Unleashed Notebooks
2 |
3 | Contains various examples of notebooks for working with web archives with the Archives Unleashed Toolkit, and derivatives generated by the Archives Unleashed Toolkit.
4 |
5 | ## License
6 |
7 | Licensed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0).
8 |
9 | ## Acknowledgments
10 |
11 | This work is primarily supported by the [Andrew W. Mellon Foundation](https://mellon.org/). Other financial and in-kind support comes from the [Social Sciences and Humanities Research Council](http://www.sshrc-crsh.gc.ca/), [Compute Canada](https://www.computecanada.ca/), the [Ontario Ministry of Research, Innovation, and Science](https://www.ontario.ca/page/ministry-research-innovation-and-science), [York University Libraries](https://www.library.yorku.ca/web/), [Start Smart Labs](http://www.startsmartlabs.com/), and the [Faculty of Arts](https://uwaterloo.ca/arts/) and [David R. Cheriton School of Computer Science](https://cs.uwaterloo.ca/) at the [University of Waterloo](https://uwaterloo.ca/).
12 |
13 | Any opinions, findings, and conclusions or recommendations expressed are those of the researchers and do not necessarily reflect the views of the sponsors.
14 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .ipynb_checkpoints/
2 | datathon-nyc/data/
3 | .DS_Store
4 | all-domains/
5 | full-text/
6 | test.gexf
7 | test.graphml
8 |
9 | # Byte-compiled / optimized / DLL files
10 | __pycache__/
11 | *.py[cod]
12 | *$py.class
13 |
14 | # C extensions
15 | *.so
16 |
17 | # Distribution / packaging
18 | .Python
19 | build/
20 | develop-eggs/
21 | dist/
22 | downloads/
23 | eggs/
24 | .eggs/
25 | lib/
26 | lib64/
27 | parts/
28 | sdist/
29 | var/
30 | wheels/
31 | pip-wheel-metadata/
32 | share/python-wheels/
33 | *.egg-info/
34 | .installed.cfg
35 | *.egg
36 | MANIFEST
37 |
38 | # PyInstaller
39 | # Usually these files are written by a python script from a template
40 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
41 | *.manifest
42 | *.spec
43 |
44 | # Installer logs
45 | pip-log.txt
46 | pip-delete-this-directory.txt
47 |
48 | # Unit test / coverage reports
49 | htmlcov/
50 | .tox/
51 | .nox/
52 | .coverage
53 | .coverage.*
54 | .cache
55 | nosetests.xml
56 | coverage.xml
57 | *.cover
58 | *.py,cover
59 | .hypothesis/
60 | .pytest_cache/
61 | cover/
62 |
63 | # Translations
64 | *.mo
65 | *.pot
66 |
67 | # Django stuff:
68 | *.log
69 | local_settings.py
70 | db.sqlite3
71 | db.sqlite3-journal
72 |
73 | # Flask stuff:
74 | instance/
75 | .webassets-cache
76 |
77 | # Scrapy stuff:
78 | .scrapy
79 |
80 | # Sphinx documentation
81 | docs/_build/
82 |
83 | # PyBuilder
84 | .pybuilder/
85 | target/
86 |
87 | # Jupyter Notebook
88 | .ipynb_checkpoints
89 |
90 | # IPython
91 | profile_default/
92 | ipython_config.py
93 |
94 | # pyenv
95 | # For a library or package, you might want to ignore these files since the code is
96 | # intended to run in multiple environments; otherwise, check them in:
97 | # .python-version
98 |
99 | # pipenv
100 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
101 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
102 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
103 | # install all needed dependencies.
104 | #Pipfile.lock
105 |
106 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
107 | __pypackages__/
108 |
109 | # Celery stuff
110 | celerybeat-schedule
111 | celerybeat.pid
112 |
113 | # SageMath parsed files
114 | *.sage.py
115 |
116 | # Environments
117 | .env
118 | .venv
119 | env/
120 | venv/
121 | ENV/
122 | env.bak/
123 | venv.bak/
124 |
125 | # Spyder project settings
126 | .spyderproject
127 | .spyproject
128 |
129 | # Rope project settings
130 | .ropeproject
131 |
132 | # mkdocs documentation
133 | /site
134 |
135 | # mypy
136 | .mypy_cache/
137 | .dmypy.json
138 | dmypy.json
139 |
140 | # Pyre type checker
141 | .pyre/
142 |
143 | # pytype static type analyzer
144 | .pytype/
145 |
146 | # Cython debug symbols
147 | cython_debug/
148 |
149 | # static files generated from Django application using `collectstatic`
150 | media
151 | static
152 |
--------------------------------------------------------------------------------
/PySpark Examples/PySpark - Finding Hyperlinks within Collection on Pages with Certain Keyword.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from aut import *\n",
10 | "from pyspark.sql.functions import col, explode_outer"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "webpages = WebArchive(sc, sqlContext, \"/home/nruest/Projects/au/sample-data/geocities\") \\\n",
20 | " .all() \\\n",
21 | " .filter(\"crawl_date is not NULL\")\\\n",
22 | " .filter(~(col(\"url\").rlike(\".*robots\\\\.txt$\")) & (col(\"mime_type_web_server\").rlike(\"text/html\") | col(\"mime_type_web_server\").rlike(\"application/xhtml+xml\") | col(\"url\").rlike(\"(?i).*htm$\") | col(\"url\").rlike(\"(?i).*html$\")))\\\n",
23 | " .filter(col(\"http_status_code\") == 200)\\\n",
24 | " .select(\"domain\", \"url\", \"crawl_date\", explode_outer(extract_links(\"url\", \"raw_content\")).alias(\"link\")) \\\n",
25 | " .filter(col(\"raw_content\").like(\"%food%\")) \\\n",
26 | " .select(\"url\", \"domain\", \"crawl_date\", col(\"link._1\").alias(\"destination_page\"))"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": 3,
32 | "metadata": {},
33 | "outputs": [
34 | {
35 | "name": "stdout",
36 | "output_type": "stream",
37 | "text": [
38 | "[2022-05-29T22:12:01.819Z - 00000 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
39 | ]
40 | },
41 | {
42 | "name": "stderr",
43 | "output_type": "stream",
44 | "text": [
45 | "22/05/29 18:12:02 WARN PDFParser: J2KImageReader not loaded. JPEG2000 files will not be processed.\n",
46 | "See https://pdfbox.apache.org/2.0/dependencies.html#jai-image-io\n",
47 | "for optional dependencies.\n",
48 | "\n",
49 | "22/05/29 18:12:02 WARN TesseractOCRParser: Tesseract OCR is installed and will be automatically applied to image files unless\n",
50 | "you've excluded the TesseractOCRParser from the default parser.\n",
51 | "Tesseract may dramatically slow down content extraction (TIKA-2359).\n",
52 | "As of Tika 1.15 (and prior versions), Tesseract is automatically called.\n",
53 | "In future versions of Tika, users may need to turn the TesseractOCRParser on via TikaConfig.\n",
54 | "22/05/29 18:12:02 WARN SQLite3Parser: org.xerial's sqlite-jdbc is not loaded.\n",
55 | "Please provide the jar on your classpath to parse sqlite files.\n",
56 | "See tika-parsers/pom.xml for the correct version.\n"
57 | ]
58 | },
59 | {
60 | "name": "stdout",
61 | "output_type": "stream",
62 | "text": [
63 | "+-------------------------------------------------------------------------------------+-------------+--------------+--------------------------------------------+\n",
64 | "|url |domain |crawl_date |destination_page |\n",
65 | "+-------------------------------------------------------------------------------------+-------------+--------------+--------------------------------------------+\n",
66 | "|http://geocities.com/kelsoonbutler/boardDocs/BoardMeetingMinutes_AGM_March12_2002.htm|geocities.com|20091027143512|null |\n",
67 | "|http://geocities.com/krooyaimaha/health5.htm |geocities.com|20091027143546|http://geocities.com/krooyaimaha/health5.htm|\n",
68 | "|http://geocities.com/krooyaimaha/health5.htm |geocities.com|20091027143546|http://geocities.com/krooyaimaha/health5.htm|\n",
69 | "|http://geocities.com/krooyaimaha/health5.htm |geocities.com|20091027143546|http://geocities.com/krooyaimaha/health5.htm|\n",
70 | "|http://geocities.com/krooyaimaha/health5.htm |geocities.com|20091027143546|http://geocities.com/krooyaimaha/health5.htm|\n",
71 | "+-------------------------------------------------------------------------------------+-------------+--------------+--------------------------------------------+\n",
72 | "only showing top 5 rows\n",
73 | "\n"
74 | ]
75 | },
76 | {
77 | "name": "stderr",
78 | "output_type": "stream",
79 | "text": [
80 | "\r",
81 | " \r"
82 | ]
83 | }
84 | ],
85 | "source": [
86 | "webpages.show(5, False)"
87 | ]
88 | }
89 | ],
90 | "metadata": {
91 | "kernelspec": {
92 | "display_name": "Python 3 (ipykernel)",
93 | "language": "python",
94 | "name": "python3"
95 | },
96 | "language_info": {
97 | "codemirror_mode": {
98 | "name": "ipython",
99 | "version": 3
100 | },
101 | "file_extension": ".py",
102 | "mimetype": "text/x-python",
103 | "name": "python",
104 | "nbconvert_exporter": "python",
105 | "pygments_lexer": "ipython3",
106 | "version": "3.9.9"
107 | }
108 | },
109 | "nbformat": 4,
110 | "nbformat_minor": 2
111 | }
112 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2019 Archives Unleashed
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/PySpark Examples/PySpark - aut standard derivatives.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from aut import *\n",
10 | "from pyspark.sql.functions import col, desc"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 2,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "# Web archive collection; web pages.\n",
20 | "webpages = WebArchive(sc, sqlContext, \"/home/nruest/Projects/au/sample-data/geocities\").webpages()"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 3,
26 | "metadata": {},
27 | "outputs": [],
28 | "source": [
29 | "# Web archive collection; web graph.\n",
30 | "webgraph = WebArchive(sc, sqlContext, \"/home/nruest/Projects/au/sample-data/geocities\").webgraph()"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 4,
36 | "metadata": {},
37 | "outputs": [
38 | {
39 | "name": "stdout",
40 | "output_type": "stream",
41 | "text": [
42 | "[2022-05-28T20:17:58.285Z - 00009 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
43 | "[2022-05-28T20:17:58.285Z - 00003 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
44 | "[2022-05-28T20:17:58.285Z - 00005 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
45 | "[2022-05-28T20:17:58.285Z - 00002 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
46 | "[2022-05-28T20:17:58.285Z - 00008 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
47 | "[2022-05-28T20:17:58.285Z - 00006 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
48 | "[2022-05-28T20:17:58.285Z - 00004 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
49 | "[2022-05-28T20:17:58.285Z - 00001 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
50 | "[2022-05-28T20:17:58.285Z - 00000 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
51 | "[2022-05-28T20:17:58.285Z - 00007 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
52 | ]
53 | },
54 | {
55 | "name": "stderr",
56 | "output_type": "stream",
57 | "text": [
58 | "22/05/28 16:17:59 WARN PDFParser: J2KImageReader not loaded. JPEG2000 files will not be processed.\n",
59 | "See https://pdfbox.apache.org/2.0/dependencies.html#jai-image-io\n",
60 | "for optional dependencies.\n",
61 | "\n",
62 | "22/05/28 16:17:59 WARN TesseractOCRParser: Tesseract OCR is installed and will be automatically applied to image files unless\n",
63 | "you've excluded the TesseractOCRParser from the default parser.\n",
64 | "Tesseract may dramatically slow down content extraction (TIKA-2359).\n",
65 | "As of Tika 1.15 (and prior versions), Tesseract is automatically called.\n",
66 | "In future versions of Tika, users may need to turn the TesseractOCRParser on via TikaConfig.\n",
67 | "22/05/28 16:17:59 WARN SQLite3Parser: org.xerial's sqlite-jdbc is not loaded.\n",
68 | "Please provide the jar on your classpath to parse sqlite files.\n",
69 | "See tika-parsers/pom.xml for the correct version.\n",
70 | " \r"
71 | ]
72 | }
73 | ],
74 | "source": [
75 | "# Domain frequency file.\n",
76 | "webpages.groupBy(\"domain\")\\\n",
77 | " .count()\\\n",
78 | " .sort(col(\"count\")\\\n",
79 | " .desc())\\\n",
80 | " .write\\\n",
81 | " .option(\"timestampFormat\", \"yyyy/MM/dd HH:mm:ss ZZ\")\\\n",
82 | " .format(\"csv\")\\\n",
83 | " .option(\"escape\", \"\\\"\")\\\n",
84 | " .option(\"encoding\", \"utf-8\")\\\n",
85 | " .save(\"all-domains\")"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": 5,
91 | "metadata": {},
92 | "outputs": [
93 | {
94 | "name": "stdout",
95 | "output_type": "stream",
96 | "text": [
97 | "[2022-05-28T20:22:47.865Z - 00439 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
98 | "[2022-05-28T20:22:47.868Z - 00432 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
99 | "[2022-05-28T20:22:47.867Z - 00434 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
100 | "[2022-05-28T20:22:47.866Z - 00435 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
101 | "[2022-05-28T20:22:47.869Z - 00433 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
102 | "[2022-05-28T20:22:47.868Z - 00438 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
103 | "[2022-05-28T20:22:47.877Z - 00440 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
104 | "[2022-05-28T20:22:47.877Z - 00436 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
105 | "[2022-05-28T20:22:47.890Z - 00437 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
106 | ]
107 | },
108 | {
109 | "name": "stderr",
110 | "output_type": "stream",
111 | "text": [
112 | "\r",
113 | "[Stage 5:> (0 + 10) / 10]\r"
114 | ]
115 | },
116 | {
117 | "name": "stdout",
118 | "output_type": "stream",
119 | "text": [
120 | "[2022-05-28T20:22:49.243Z - 00431 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
121 | ]
122 | },
123 | {
124 | "name": "stderr",
125 | "output_type": "stream",
126 | "text": [
127 | " \r"
128 | ]
129 | }
130 | ],
131 | "source": [
132 | "# Full-text.\n",
133 | "webpages.write\\\n",
134 | " .option(\"timestampFormat\", \"yyyy/MM/dd HH:mm:ss ZZ\")\\\n",
135 | " .format(\"csv\")\\\n",
136 | " .option(\"escape\", \"\\\"\")\\\n",
137 | " .option(\"encoding\", \"utf-8\")\\\n",
138 | " .save(\"full-text\")"
139 | ]
140 | },
141 | {
142 | "cell_type": "code",
143 | "execution_count": 6,
144 | "metadata": {},
145 | "outputs": [],
146 | "source": [
147 | "# Create DataFrame for GraphML output\n",
148 | "graph = webgraph.groupBy(\"crawl_date\", remove_prefix_www(extract_domain(\"src\")).alias(\"src_domain\"), remove_prefix_www(extract_domain(\"dest\")).alias(\"dest_domain\"))\\\n",
149 | " .count()\\\n",
150 | " .filter((col(\"dest_domain\").isNotNull()) & (col(\"dest_domain\") !=\"\"))\\\n",
151 | " .filter((col(\"src_domain\").isNotNull()) & (col(\"src_domain\") !=\"\"))\\\n",
152 | " .filter(col(\"count\") > 5)\\\n",
153 | " .orderBy(desc(\"count\"))"
154 | ]
155 | },
156 | {
157 | "cell_type": "code",
158 | "execution_count": 7,
159 | "metadata": {},
160 | "outputs": [
161 | {
162 | "name": "stdout",
163 | "output_type": "stream",
164 | "text": [
165 | "[2022-05-28T20:27:28.329Z - 00449 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
166 | "[2022-05-28T20:27:28.329Z - 00445 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
167 | "[2022-05-28T20:27:28.329Z - 00446 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
168 | "[2022-05-28T20:27:28.329Z - 00447 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
169 | "[2022-05-28T20:27:28.329Z - 00441 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
170 | "[2022-05-28T20:27:28.329Z - 00444 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
171 | "[2022-05-28T20:27:28.330Z - 00450 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
172 | "[2022-05-28T20:27:28.329Z - 00448 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
173 | "[2022-05-28T20:27:28.332Z - 00443 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
174 | "[2022-05-28T20:27:28.332Z - 00442 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
175 | ]
176 | },
177 | {
178 | "name": "stderr",
179 | "output_type": "stream",
180 | "text": [
181 | " \r"
182 | ]
183 | }
184 | ],
185 | "source": [
186 | "# Write the GraphML out to a file.\n",
187 | "WriteGraphML(graph.collect(), \"test.graphml\")"
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": 8,
193 | "metadata": {},
194 | "outputs": [],
195 | "source": [
196 | "# Write the GEXF out to a file.\n",
197 | "\n",
198 | "# Not part of auk standard derivative process,\n",
199 | "# but apart of https://github.com/archivesunleashed/aut/pull/466 testing.\n",
200 | "\n",
201 | "WriteGEXF(graph.collect(), \"test.gexf\")"
202 | ]
203 | }
204 | ],
205 | "metadata": {
206 | "kernelspec": {
207 | "display_name": "Python 3 (ipykernel)",
208 | "language": "python",
209 | "name": "python3"
210 | },
211 | "language_info": {
212 | "codemirror_mode": {
213 | "name": "ipython",
214 | "version": 3
215 | },
216 | "file_extension": ".py",
217 | "mimetype": "text/x-python",
218 | "name": "python",
219 | "nbconvert_exporter": "python",
220 | "pygments_lexer": "ipython3",
221 | "version": "3.9.9"
222 | }
223 | },
224 | "nbformat": 4,
225 | "nbformat_minor": 2
226 | }
227 |
--------------------------------------------------------------------------------
/datathon-nyc/README.md:
--------------------------------------------------------------------------------
1 | # Archives Unleashed NYC Datathon
2 |
3 | Welcome to the Archives Unleashed NY Datathon! Below you will find a variety of resources that will be useful throughout our event.
4 |
5 | * [Requirements](#requirements)
6 | * [Schedule](#schedule)
7 | * [Team Collaboration Resources](#team-collaboration-resources)
8 | * [Homework Refresher](#homework-refresher)
9 | * [Additional/External Tools for Analysis](#additionalexternal-tools-for-analysis)
10 | * [Analysis Platforms](#analysis-platforms)
11 | * [Inspiration](#inspiration)
12 | * [Virtual Machines](#virtual-machines)
13 | * [Getting Started](#getting-started)
14 | * [Shell into assigned VM](#shell-into-assigned-vm)
15 | * [Start Juypter Notebook](#start-juypter-notebook)
16 | * [Start Spark with AUT](#start-spark-with-aut)
17 | * [Datasets](#datasets)
18 |
19 | ## Requirements
20 |
21 | - **Zoom** (login information will be provided via Slack/email).
22 | - **Slack** (datathon-ny-2020): our team will use Slack to communicate with participants. You can also use this platform to create a team channel (free) to collaborate (text + calls) during the datathon.
23 | - [Github Account](https://github.com/join) and [Google Account](https://accounts.google.com/signin/v2/identifier?service=g4np&passive=1209600&continue=https%3A%2F%2Fwww.google.com%2Fnonprofits%2Faccount%2Fsignup%3Flocality%3Dus&followup=https%3A%2F%2Fwww.google.com%2Fnonprofits%2Faccount%2Fsignup%3Flocality%3Dus&flowName=GlifWebSignIn&flowEntry=ServiceLogin) to work with our resources and your teams.
24 |
25 | - **Wifi + Space**: since we are all working remotely, find yourself a comfortable working space with a reliable internet connection.
26 |
27 | ## Schedule
28 |
29 | The datathon will be held March 26-27th (EST) online via (Zoom).
30 |
31 | The event will start off with introductions and a look at the tools you’ll be using. After a team formation activity, the majority of our time will be dedicated for group work. We will also be doing a few check-ins via Zoom to make sure everyone is doing fine. Final presentations will happen on Friday.
32 |
33 | 
34 |
35 | Unfortunately, because we can’t meet in person we won’t be holding a dinner/social, but feel free to use our hashtag **#hackarchives** in your social media posts.
36 |
37 | ## Team Collaboration Resources
38 |
39 | - **[#NY Datathon (Online) Resources](https://drive.google.com/drive/u/0/folders/1KdqBwpUTheYei_IjPaIXFxVLI5UH1_dL)**: This is a shared folder for participants, housing our introductory Google slides, additional resources, and where teams will save their final presentations.
40 | - **[Team Final Projects Folder](https://drive.google.com/drive/u/0/folders/138xKFQwHrjJEnruZ21lkK5UAxFPU2GLz)**: we have set up a folder where you can directly work on your presentations. We ask that you use Google Slides to help us with quick transitions between groups. Please name your slides using the following convention: AU-NY2020-TeamName.
41 | - **[Quick Guide to Setting up Colab Notebooks](https://youtu.be/JDXQRUp_Tx4)**: If you need a quick review of how to set up your Colab Notebook environment, we have a short tutorial.
42 | - **Project Examples**: Need a bit more inspiration before starting? Checkout projects from previous datathons: [Toronto](https://archivesunleashed.org/toronto/) | [Vancouver](https://archivesunleashed.org/vancouver/) | [Washington](https://archivesunleashed.org/washington/)
43 |
44 |
45 | ## Homework Refresher
46 |
47 | - **[Command Line/Terminal Tutorial](https://programminghistorian.org/en/lessons/intro-to-bash)**: For those new to command line, or if you'd like a refresher, check out this tutorial on the Programing Historian Introduction to the Bash Command Line by Ian Milligan and James Baker.
48 | - **[Archives Unleashed Toolkit Walkthrough](https://github.com/archivesunleashed/aut-docs/blob/master/current/toolkit-walkthrough.md)**: While we’ll be doing a lot of work in the notebooks during the datathon, it is good to have a conceptual understanding around working with data.
49 | - **Archives Unleashed Notebooks Reading**: Nick Ruest has been working on some new methods of working with derivatives created through the Archives Unleashed Cloud. To learn more about this development, check out our latest Medium post: [Cloud-hosted web archive data: The winding path to web archive collections as data](https://news.archivesunleashed.org/cloud-hosted-web-archive-data-the-winding-path-to-web-archive-collections-as-data-a2b3428701b7).
50 | - **[Notebooks Repository Walkthrough]()**: we suggest that you run through the two created for this event to become familiar with how the environment works and the types of analysis you can run. In each link click the “Open in Colab” button at the top of the page to launch and start exploring.
51 | - [parquet_pandas_stonewall.ipynb](https://github.com/archivesunleashed/notebooks/blob/master/parquet_pandas_stonewall.ipynb)
52 | - [parquet_text_analyis_popline.ipynb](https://github.com/archivesunleashed/notebooks/blob/master/parquet_text_analyis_popline.ipynb)
53 |
54 | ## Additional/External Tools for Analysis
55 |
56 | - [AUK Tutorials](https://cloud.archivesunleashed.org/derivatives): provide basics on using a variety of external tools to work with dataset derivatives.
57 | - [An Introduction to Gephi (Beginner)](https://cloud.archivesunleashed.org/derivatives/basic-gephi)
58 | - [Network Graphing Archived Websites With Gephi (Intermediate)](https://cloud.archivesunleashed.org/derivatives/gephi)
59 | - [Grep - Filtering the Full-Text Derivative File](https://cloud.archivesunleashed.org/derivatives/text-filtering)
60 | - [Text Analysis Part One: Beyond the Keyword Search: Using AntConc](https://cloud.archivesunleashed.org/derivatives/text-antconc)
61 | - [Text Analysis Part Two: Sentiment Analysis With the Natural Language Toolkit](https://cloud.archivesunleashed.org/derivatives/text-sentiment)
62 | - [Voyant](https://voyant-tools.org)
63 |
64 | Also don’t forget about Excel (or Google spreadsheets)!
65 |
66 | ## Analysis Platforms
67 |
68 | We have a couple options for analysis platforms ([datasets are listed below](datathon-nyc#datasets)):
69 |
70 | - If you'd like to keep things relatively easy, and browser based, you're welcome to use [Google Colaboratory](https://colab.research.google.com). We have a couple options, in addition to the two mentioned above, available in the repo ([parquet_pandas_example.ipynb](https://github.com/archivesunleashed/notebooks/blob/master/parquet_pandas_example.ipynb), [parquet_pyspark_example.ipynb](https://github.com/archivesunleashed/notebooks/blob/master/parquet_pyspark_example.ipynb), and [parquet_text_analyis.ipynb](https://github.com/archivesunleashed/notebooks/blob/master/parquet_text_analyis.ipynb)) that can be used as starting points.
71 | - If you're comfortable with the command line, you're welcome to use one of the virtual machines provided by [Compute Canada](https://www.computecanada.ca/home/).
72 |
73 | The virtual machines, and the machine backing a given Google Colab notebook are roughly similar resource-wise. If you'd like to use a Compute Canada virtual machine, you'll have a bit more storage space, and you'll have much more control. You're welcome to install what you need to on the machines with `apt` to complete your project.
74 |
75 | ## Inspiration
76 |
77 | If you're looking for inspiration, check out the following notebook resources:
78 |
79 | - [GLAM Workbench](https://glam-workbench.github.io/)
80 | - [Awesome Jupyter GLAM](https://github.com/LibraryCarpentry/awesome-jupyter-glam)
81 | - [Getting started with ODate](https://o-date.github.io/support/notebooks-toc/)
82 | - [Jupyter notebooks for digital humanities](https://github.com/quinnanya/dh-jupyter)
83 | - [Spark NLP Workshop](https://github.com/JohnSnowLabs/spark-nlp-workshop)
84 | - [UW Interactive Data Lab, Data Visualization Curriculum](https://github.com/uwdata/visualization-curriculum)
85 | - [Sentiment Analysis with Pyspark](https://github.com/tthustla/setiment_analysis_pyspark/blob/master/Sentiment%20Analysis%20with%20PySpark.ipynb)
86 | - [Analysis of Car Accidents in Canada using PySpark](https://github.com/SinghGursimran/Analysis-of-Car-Accidents-in-Canada-using-PySpark/)
87 | - [Save Page Now Analysis](https://github.com/edsu/spn)
88 |
89 | If you'd like to add your notebook to the repo at the end of the datathon, we'd love to have it!
90 |
91 | ## Virtual Machines
92 |
93 | **c8-30gb-430gb**
94 |
95 | - 8 cores
96 | - 30G RAM
97 | - 11G `/`
98 | - 398G `/mnt`
99 | - Python 3.7.3 ([Anaconda](https://www.anaconda.com/distribution/))
100 | - findspark, wordcloud, spacy, pyarrow, jupyter, tldextract, jupyter_contrib_nbextensions, pyspark
101 | - feel free to `conda install` or `pip install` whatever else you need
102 | - OpenJDK 8
103 | - Spark 2.4.5
104 | - `SPARK_HOME=/home/ubuntu/spark`
105 | - jq
106 |
107 | Four machines available:
108 |
109 | - 206.167.181.146 (datathon1)
110 | - 206.167.182.14 (datathon2)
111 | - 206.167.181.104 (datathon3)
112 | - 206.167.181.105 (datathon4)
113 |
114 | **c8-45gb-430gb**
115 |
116 | - 8 Cores
117 | - 45G RAM
118 | - 11G `/`
119 | - 398G `/mnt`
120 | - Python 3.7.3 ([Anaconda](https://www.anaconda.com/distribution/))
121 | - findspark, wordcloud, spacy, pyarrow, jupyter, tldextract, jupyter_contrib_nbextensions, pyspark
122 | - feel free to `conda install` or `pip install` whatever else you need
123 | - OpenJDK 8
124 | - Spark 2.4.5
125 | - `SPARK_HOME=/home/ubuntu/spark`
126 | - jq
127 |
128 | One machine available:
129 |
130 | - 206.167.181.253 (datathon5)
131 |
132 | ## Getting Started
133 |
134 | Please note, the datathon hosts (Ian, Jimmy, Nick, and Sam) all use macOS or Linux variants. If you are on a Windows machine, things might be a little bit more difficult. If you have the [Windows SubSystem for Linux](https://www.howtogeek.com/249966/how-to-install-and-use-the-linux-bash-shell-on-windows-10/) installed, you should be in a really good place.
135 |
136 | You may want to create a separate folder on your desktop or home directory to keep all of your datathon work in; this will also allow you to point terminal to one directory (folder).
137 |
138 | ### Shell into assigned VM
139 |
140 | Ian or Nick will provide you with key to access a virtual machine via ssh. You'll need to download or copy that key to your own machine, and apply the appropriate permissions to it. The permissions on the key should be `600`. You can do this with the following command on your own laptop before shelling in:
141 |
142 | ```bash
143 | chmod 600 /path/to/archives-hackathon.key
144 | ```
145 |
146 | Once you have the permissions set on the key, you can shell into your assigned datathon virtual machine with the provided key, and IP address:
147 |
148 | Example:
149 |
150 | ```bash
151 | ssh -L 8888:localhost:8888 -i ~/.ssh/archives-hackathon.key ubuntu@206.167.181.253
152 | ```
153 |
154 | ### Start Juypter Notebook
155 |
156 | ```bash
157 | cd /mnt/notebooks/datathon-nyc
158 | jupyter notebook --no-browser
159 | ```
160 |
161 | Click on the the localhost link to open up Juypter.
162 |
163 | 
164 |
165 | Select one of the two example notebooks.
166 |
167 | 
168 |
169 |
170 | ### Start Spark with AUT
171 |
172 | If you'd like to use Apache Spark and the Archives Unleashed Toolkit to analyze WARC/ARCs, you can get Spark started with the toolkit with the following command:
173 |
174 | ```bash
175 | ~/spark/bin/spark-shell --packages "io.archivesunleashed:aut:0.50.0"
176 | ```
177 |
178 | Documentation for the 0.50.0 release is available [here](https://github.com/archivesunleashed/aut-docs/tree/master/aut-0.50.0), and if you need a refresher on the datathon homework, it is available [here](https://github.com/archivesunleashed/aut-docs/blob/master/aut-0.50.0/toolkit-walkthrough.md).
179 |
180 | ## Datasets
181 |
182 | Scholarly derivatives created on cloud.archivesunleashed.org, and Parquet files should be downloaded to `/mnt/data`. If any team would like some WARC/ARC files from a collection, please work with Nick Ruest as soon as possible (`ruebot` in Slack).
183 |
184 | **Ivy Plus Libraries Confederation**
185 |
186 | - [National Statistical Offices and Central Banks Web Archive](https://zenodo.org/record/3633683)
187 | - [Contemporary Composers Web Archive (CCWA)](https://zenodo.org/record/3692559)
188 | - [#MeToo and the Women's Rights Movement in China Web Archive](https://zenodo.org/record/3633681)
189 | - [Geologic Field Trip Guidebooks Web Archive](https://zenodo.org/record/3666295)
190 | - [Literary Authors from Europe and Eurasia Web Archive](https://zenodo.org/record/3632728)
191 | - [Web Archive of Independent News Sites on Turkish Affairs](https://zenodo.org/record/3633234)
192 | - [State Elections Web Archive](https://zenodo.org/record/3635634)
193 | - [Brazilian Presidential Transition (2018) Web Archive](https://zenodo.org/record/3659692)
194 | - [Collaborative Architecture, Urbanism, and Sustainability Web Archive (CAUSEWAY)](https://zenodo.org/record/3674173)
195 | - [Global Webcomics Web Archive](https://zenodo.org/record/3633737)
196 | - [Queer Japan Web Archive](https://zenodo.org/record/3633284)
197 | - [Extreme Right Movements in Europe](https://zenodo.org/record/3633161)
198 | - [Latin American and Caribbean Contemporary Art Web Archive](https://zenodo.org/record/3633118)
199 | - [Popline and K4Health Web Archive](https://zenodo.org/record/3633022)
200 | - [Eastern Europe and Former Soviet Union Web Archive](https://zenodo.org/record/3633031)
201 | - [Independent Documentary Filmmakers from China, Hong Kong, and Taiwan Web Archive](https://zenodo.org/record/3632912)
202 |
203 | **Columbia University Libraries**
204 |
205 | - [General](https://zenodo.org/record/3633290)
206 | - [Resistance](https://zenodo.org/record/3660457)
207 | - [Stonewall 50 Commemoration](https://zenodo.org/record/3631347)
208 | - [Freely Accessible eJournals](https://zenodo.org/record/3633671)
209 | - [Avery Library Historic Preservation and Urban Planning](https://doi.org/10.5683/SP2/Z68EVJ)
210 | - [Rare Book and Manuscript Library](https://zenodo.org/record/3701593)
211 | - [Burke Library New York City Religions](https://zenodo.org/record/3701455)
212 |
213 | ## Sponsors + Special Thanks
214 |
215 | This event is possible thanks to the generous support from:
216 |
217 | [Andrew W. Mellon Foundation](https://mellon.org/), [Columbia University Libraries](https://library.columbia.edu), [Ivy Plus Libraries Confederation](https://library.columbia.edu/collections/web-archives/Ivy_Plus_Libraries.html), [Faculty of Arts](https://uwaterloo.ca/arts/) and [David R. Cheriton School of Computer Science](https://cs.uwaterloo.ca/) at the [University of Waterloo](https://uwaterloo.ca/), [York University Libraries](https://www.library.yorku.ca/web/), [Compute Canada](https://www.computecanada.ca/), and [Start Smart Labs](http://www.startsmartlabs.com/).
218 |
219 | We'd also like to say a special thanks to Columbia University and Ivy Plus libraries Confederation for for providing access to their collections!
220 |
--------------------------------------------------------------------------------
/PySpark Examples/PySpark - Find Images Shared Between Domains.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 2,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from aut import *\n",
10 | "from pyspark.sql.functions import asc, countDistinct, first"
11 | ]
12 | },
13 | {
14 | "cell_type": "code",
15 | "execution_count": 3,
16 | "metadata": {},
17 | "outputs": [],
18 | "source": [
19 | "images = WebArchive(sc, sqlContext, \"/home/nruest/Projects/au/sample-data/geocities\") \\\n",
20 | " .images() \\\n",
21 | " .select(remove_prefix_www(extract_domain(\"url\")).alias(\"domain\"), \"url\", \"md5\")"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": 4,
27 | "metadata": {},
28 | "outputs": [
29 | {
30 | "name": "stderr",
31 | "output_type": "stream",
32 | "text": [
33 | "\r",
34 | "[Stage 0:> (0 + 1) / 1]\r"
35 | ]
36 | },
37 | {
38 | "name": "stdout",
39 | "output_type": "stream",
40 | "text": [
41 | "[2022-05-28T00:11:17.610Z - 00000 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
42 | ]
43 | },
44 | {
45 | "name": "stderr",
46 | "output_type": "stream",
47 | "text": [
48 | "22/05/27 20:11:18 WARN PDFParser: J2KImageReader not loaded. JPEG2000 files will not be processed.\n",
49 | "See https://pdfbox.apache.org/2.0/dependencies.html#jai-image-io\n",
50 | "for optional dependencies.\n",
51 | "\n",
52 | "22/05/27 20:11:18 WARN TesseractOCRParser: Tesseract OCR is installed and will be automatically applied to image files unless\n",
53 | "you've excluded the TesseractOCRParser from the default parser.\n",
54 | "Tesseract may dramatically slow down content extraction (TIKA-2359).\n",
55 | "As of Tika 1.15 (and prior versions), Tesseract is automatically called.\n",
56 | "In future versions of Tika, users may need to turn the TesseractOCRParser on via TikaConfig.\n",
57 | "22/05/27 20:11:18 WARN SQLite3Parser: org.xerial's sqlite-jdbc is not loaded.\n",
58 | "Please provide the jar on your classpath to parse sqlite files.\n",
59 | "See tika-parsers/pom.xml for the correct version.\n"
60 | ]
61 | },
62 | {
63 | "name": "stdout",
64 | "output_type": "stream",
65 | "text": [
66 | "+-------------+--------------------+--------------------+\n",
67 | "| domain| url| md5|\n",
68 | "+-------------+--------------------+--------------------+\n",
69 | "|geocities.com|http://geocities....|17827882f7bf42860...|\n",
70 | "|geocities.com|http://geocities....|899bc6e3309b0fc78...|\n",
71 | "|geocities.com|http://geocities....|955f6c342ffed6823...|\n",
72 | "|geocities.com|http://geocities....|47718718ddfd7d43a...|\n",
73 | "|geocities.com|http://geocities....|8ada65828daff258e...|\n",
74 | "+-------------+--------------------+--------------------+\n",
75 | "only showing top 5 rows\n",
76 | "\n"
77 | ]
78 | },
79 | {
80 | "name": "stderr",
81 | "output_type": "stream",
82 | "text": [
83 | "\r",
84 | " \r"
85 | ]
86 | }
87 | ],
88 | "source": [
89 | "images.show(5, True)"
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": 5,
95 | "metadata": {},
96 | "outputs": [],
97 | "source": [
98 | "links = images.groupBy(\"md5\") \\\n",
99 | " .count() \\\n",
100 | " .where(countDistinct(\"domain\")>=2)"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 6,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "result = images.join(links, \"md5\") \\\n",
110 | " .groupBy(\"domain\", \"md5\") \\\n",
111 | " .agg(first(\"url\").alias(\"image_url\")) \\\n",
112 | " .orderBy(asc(\"md5\"))"
113 | ]
114 | },
115 | {
116 | "cell_type": "code",
117 | "execution_count": 7,
118 | "metadata": {},
119 | "outputs": [
120 | {
121 | "name": "stdout",
122 | "output_type": "stream",
123 | "text": [
124 | "[2022-05-28T00:11:22.907Z - 00012 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
125 | "[2022-05-28T00:11:22.907Z - 00011 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
126 | ]
127 | },
128 | {
129 | "name": "stderr",
130 | "output_type": "stream",
131 | "text": [
132 | "\r",
133 | "[Stage 1:> (0 + 10) / 10][Stage 3:> (0 + 2) / 10]\r"
134 | ]
135 | },
136 | {
137 | "name": "stdout",
138 | "output_type": "stream",
139 | "text": [
140 | "[2022-05-28T00:11:23.366Z - 00001 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
141 | "[2022-05-28T00:11:23.412Z - 00008 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
142 | "[2022-05-28T00:11:23.412Z - 00003 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
143 | "[2022-05-28T00:11:23.412Z - 00006 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
144 | "[2022-05-28T00:11:23.416Z - 00005 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
145 | "[2022-05-28T00:11:23.417Z - 00002 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
146 | "[2022-05-28T00:11:23.417Z - 00010 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
147 | "[2022-05-28T00:11:23.418Z - 00007 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
148 | "[2022-05-28T00:11:23.420Z - 00009 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
149 | "[2022-05-28T00:11:23.420Z - 00004 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
150 | ]
151 | },
152 | {
153 | "name": "stderr",
154 | "output_type": "stream",
155 | "text": [
156 | "[Stage 1:> (0 + 10) / 10][Stage 3:> (0 + 2) / 10]\r"
157 | ]
158 | },
159 | {
160 | "name": "stdout",
161 | "output_type": "stream",
162 | "text": [
163 | "[2022-05-28T00:20:43.093Z - 00013 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
164 | ]
165 | },
166 | {
167 | "name": "stderr",
168 | "output_type": "stream",
169 | "text": [
170 | "\r",
171 | "[Stage 1:=> (1 + 9) / 10][Stage 3:> (0 + 3) / 10]\r"
172 | ]
173 | },
174 | {
175 | "name": "stdout",
176 | "output_type": "stream",
177 | "text": [
178 | "[2022-05-28T00:21:25.142Z - 00014 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
179 | ]
180 | },
181 | {
182 | "name": "stderr",
183 | "output_type": "stream",
184 | "text": [
185 | "\r",
186 | "[Stage 1:=> (1 + 9) / 10][Stage 3:=> (1 + 3) / 10]\r"
187 | ]
188 | },
189 | {
190 | "name": "stdout",
191 | "output_type": "stream",
192 | "text": [
193 | "[2022-05-28T00:21:36.821Z - 00015 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
194 | ]
195 | },
196 | {
197 | "name": "stderr",
198 | "output_type": "stream",
199 | "text": [
200 | "[Stage 1:=====> (3 + 7) / 10][Stage 3:=> (1 + 5) / 10]\r"
201 | ]
202 | },
203 | {
204 | "name": "stdout",
205 | "output_type": "stream",
206 | "text": [
207 | "[2022-05-28T00:21:40.555Z - 00016 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
208 | "[2022-05-28T00:21:54.640Z - 00017 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
209 | ]
210 | },
211 | {
212 | "name": "stderr",
213 | "output_type": "stream",
214 | "text": [
215 | "[Stage 1:=========> (5 + 5) / 10][Stage 3:=> (1 + 7) / 10]\r"
216 | ]
217 | },
218 | {
219 | "name": "stdout",
220 | "output_type": "stream",
221 | "text": [
222 | "[2022-05-28T00:22:03.398Z - 00018 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
223 | "[2022-05-28T00:22:05.184Z - 00019 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
224 | ]
225 | },
226 | {
227 | "name": "stderr",
228 | "output_type": "stream",
229 | "text": [
230 | "[Stage 1:============> (7 + 3) / 10][Stage 3:=> (1 + 9) / 10]\r"
231 | ]
232 | },
233 | {
234 | "name": "stdout",
235 | "output_type": "stream",
236 | "text": [
237 | "[2022-05-28T00:22:09.652Z - 00020 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
238 | ]
239 | },
240 | {
241 | "name": "stderr",
242 | "output_type": "stream",
243 | "text": [
244 | "[Stage 4:================================================> (178 + 13) / 200]\r"
245 | ]
246 | },
247 | {
248 | "name": "stdout",
249 | "output_type": "stream",
250 | "text": [
251 | "+--------------------------+--------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+\n",
252 | "|domain |md5 |image_url |\n",
253 | "+--------------------------+--------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+\n",
254 | "|stevenspointjournal.com |022ac0cde4560864fc99a3d9e5210f6b|http://cmsimg.stevenspointjournal.com/apps/pbcsi.dll/bilde?Avis=U0&Dato=20091026&Kategori=CWS01&Lopenr=910260803&Ref=PH&NewTbl=1&Itemnr=1&maxW=130&Border=0 |\n",
255 | "|marshfieldnewsherald.com |022ac0cde4560864fc99a3d9e5210f6b|http://cmsimg.marshfieldnewsherald.com/apps/pbcsi.dll/bilde?Avis=U0&Dato=20091026&Kategori=CWS01&Lopenr=910260803&Ref=PH&NewTbl=1&Itemnr=1&maxW=130&Border=0 |\n",
256 | "|wisconsinrapidstribune.com|022ac0cde4560864fc99a3d9e5210f6b|http://cmsimg.wisconsinrapidstribune.com/apps/pbcsi.dll/bilde?Avis=U0&Dato=20091026&Kategori=CWS01&Lopenr=910260803&Ref=PH&NewTbl=1&Itemnr=1&maxW=130&Border=0|\n",
257 | "|thenorthwestern.com |06fe40f8d7b9b5fd7e552dac73100044|http://sitelife.thenorthwestern.com/ver1.0/Content/images/store/3/12/63a80d76-f400-4af5-bc63-4b6af4528150.Small.gif |\n",
258 | "|marshfieldnewsherald.com |06fe40f8d7b9b5fd7e552dac73100044|http://sitelife.marshfieldnewsherald.com/ver1.0/Content/images/store/4/5/a47bbc91-a893-4b62-95d0-04b4de0739ea.Small.gif |\n",
259 | "|stevenspointjournal.com |0d2fb10487f4dfad02b14d00832049a3|http://www.stevenspointjournal.com/graphics/viewadvertisers.gif |\n",
260 | "|postcrescent.com |0d2fb10487f4dfad02b14d00832049a3|http://www.postcrescent.com/graphics/viewadvertisers.gif |\n",
261 | "|thenorthwestern.com |0d2fb10487f4dfad02b14d00832049a3|http://www.thenorthwestern.com/graphics/viewadvertisers.gif |\n",
262 | "|fdlreporter.com |0d2fb10487f4dfad02b14d00832049a3|http://www.fdlreporter.com/graphics/viewadvertisers.gif |\n",
263 | "|geocities.com |0d4937515413df19faf9902cf06b6f88|http://geocities.com/Colosseum/Base/7341/usa3.gif |\n",
264 | "+--------------------------+--------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------+\n",
265 | "only showing top 10 rows\n",
266 | "\n"
267 | ]
268 | },
269 | {
270 | "name": "stderr",
271 | "output_type": "stream",
272 | "text": [
273 | "\r",
274 | "[Stage 4:====================================================> (192 + 8) / 200]\r",
275 | "\r",
276 | " \r"
277 | ]
278 | }
279 | ],
280 | "source": [
281 | "result.show(10, False)"
282 | ]
283 | }
284 | ],
285 | "metadata": {
286 | "kernelspec": {
287 | "display_name": "Python 3 (ipykernel)",
288 | "language": "python",
289 | "name": "python3"
290 | },
291 | "language_info": {
292 | "codemirror_mode": {
293 | "name": "ipython",
294 | "version": 3
295 | },
296 | "file_extension": ".py",
297 | "mimetype": "text/x-python",
298 | "name": "python",
299 | "nbconvert_exporter": "python",
300 | "pygments_lexer": "ipython3",
301 | "version": "3.9.9"
302 | }
303 | },
304 | "nbformat": 4,
305 | "nbformat_minor": 2
306 | }
307 |
--------------------------------------------------------------------------------
/PySpark Examples/PySpark - Extract Popular Images.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from aut import *"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "# Web archive collection; images.\n",
19 | "images = WebArchive(sc, sqlContext, \"/home/nruest/Projects/au/sample-data/geocities\").images()"
20 | ]
21 | },
22 | {
23 | "cell_type": "code",
24 | "execution_count": 3,
25 | "metadata": {},
26 | "outputs": [
27 | {
28 | "name": "stdout",
29 | "output_type": "stream",
30 | "text": [
31 | "[2022-05-28T20:42:18.472Z - 00000 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
32 | ]
33 | },
34 | {
35 | "name": "stderr",
36 | "output_type": "stream",
37 | "text": [
38 | "22/05/28 16:42:19 WARN PDFParser: J2KImageReader not loaded. JPEG2000 files will not be processed.\n",
39 | "See https://pdfbox.apache.org/2.0/dependencies.html#jai-image-io\n",
40 | "for optional dependencies.\n",
41 | "\n",
42 | "22/05/28 16:42:19 WARN TesseractOCRParser: Tesseract OCR is installed and will be automatically applied to image files unless\n",
43 | "you've excluded the TesseractOCRParser from the default parser.\n",
44 | "Tesseract may dramatically slow down content extraction (TIKA-2359).\n",
45 | "As of Tika 1.15 (and prior versions), Tesseract is automatically called.\n",
46 | "In future versions of Tika, users may need to turn the TesseractOCRParser on via TikaConfig.\n",
47 | "22/05/28 16:42:19 WARN SQLite3Parser: org.xerial's sqlite-jdbc is not loaded.\n",
48 | "Please provide the jar on your classpath to parse sqlite files.\n",
49 | "See tika-parsers/pom.xml for the correct version.\n"
50 | ]
51 | },
52 | {
53 | "name": "stdout",
54 | "output_type": "stream",
55 | "text": [
56 | "+--------------+--------------------+--------------------+---------+--------------------+--------------+-----+------+--------------------+--------------------+--------------------+\n",
57 | "| crawl_date| url| filename|extension|mime_type_web_server|mime_type_tika|width|height| md5| sha1| bytes|\n",
58 | "+--------------+--------------------+--------------------+---------+--------------------+--------------+-----+------+--------------------+--------------------+--------------------+\n",
59 | "|20091027143512|http://geocities....|bodyboardhowto_08...| gif| image/gif| image/gif| 122| 42|17827882f7bf42860...|054ed6bee447d4165...|R0lGODlhegAqAPcAA...|\n",
60 | "|20091027143512|http://geocities....|products.htm_cmp_...| gif| image/gif| image/gif| 140| 60|899bc6e3309b0fc78...|c12218204f212d37d...|R0lGODlhjAA8AOMAA...|\n",
61 | "|20091027143512|http://geocities....| choc1.jpg| jpg| image/jpeg| image/jpeg| 110| 110|955f6c342ffed6823...|5933e16f8c8444821...|/9j/4AAQSkZJRgABA...|\n",
62 | "|20091027143511|http://geocities....| 0087.jpg| jpg| image/jpeg| image/jpeg| 75| 100|47718718ddfd7d43a...|3af77b7faeaa5a0d3...|/9j/4AAQSkZJRgABA...|\n",
63 | "|20091027143511|http://geocities....| newyears_1.gif| gif| image/gif| image/gif| 200| 274|8ada65828daff258e...|004a7bdadfadc1e28...|R0lGODlhyAASAfcAA...|\n",
64 | "|20091027143511|http://geocities....| garage.jpg| jpg| image/jpeg| image/jpeg| 600| 422|626a59f173d5ea2f0...|2ed34b7c41340a141...|/9j/4AAQSkZJRgABA...|\n",
65 | "|20091027143512|http://www.geocit...| leevi02.jpg| jpg| image/jpeg| image/jpeg| 176| 150|86138dc411c8a079b...|0b0ef066f53840938...|/9j/4AAQSkZJRgABA...|\n",
66 | "|20091027143512|http://geocities....| rose_open.gif| gif| image/gif| image/gif| 120| 112|d6acf3534878f1cb9...|ece458c0c5c5351fd...|R0lGODlheABwAPQAA...|\n",
67 | "|20091027143512|http://www.geocit...| enterCLR.gif| gif| image/gif| image/gif| 100| 70|cd80d70bf2517e0f1...|a3829ce2ebadb488a...|R0lGODlhZABGANQAA...|\n",
68 | "|20091027143512|http://geocities....| Film0002_18.jpg| jpg| image/jpeg| image/jpeg| 192| 300|824557238e36624f7...|317cb2ab866247a7a...|/9j/4AAQSkZJRgABA...|\n",
69 | "|20091027143512|http://geocities....|mike.htm_cmp_bloc...| gif| image/gif| image/gif| 600| 60|11ec6724ac8f61b95...|b0151deb6b48df348...|R0lGODlhWAI8AOMAA...|\n",
70 | "|20091027143512|http://geocities....| 0017.jpg| jpg| image/jpeg| image/jpeg| 75| 100|797a0805347d6384c...|8fc1436904aaaebd5...|/9j/4AAQSkZJRgABA...|\n",
71 | "|20091027143512|http://www.geocit...| mary01.jpg| jpg| image/jpeg| image/jpeg| 181| 377|08f510fe75f6f70d8...|663ecdc8835e93005...|/9j/4AAQSkZJRgABA...|\n",
72 | "|20091027143512|http://geocities....| dayl_button.gif| gif| image/gif| image/gif| 42| 30|29838b4f98b9cccef...|9599ee281e4c20fd0...|R0lGODlhKgAeAPcAA...|\n",
73 | "|20091027143512|http://geocities....| MTW48.jpg| jpg| image/jpeg| image/jpeg| 246| 262|ccbc878b6832f15e6...|130179b92a90c98fa...|/9j/4AAQSkZJRgABA...|\n",
74 | "|20091027143512|http://geocities....|up_cmp_blocks110_...| gif| image/gif| image/gif| 140| 60|6377484ef666ab5f5...|d4d4fb8c42403221b...|R0lGODlhjAA8AOMAA...|\n",
75 | "|20091027143512|http://geocities....| red-dk-lg.gif| gif| image/gif| image/gif| 19| 19|a3c296a2371b5221d...|ca2b55197ae856934...|R0lGODlhEwATAIQAA...|\n",
76 | "|20091027143512|http://www.geocit...| jesse.jpg| jpg| image/jpeg| image/jpeg| 183| 150|eca62b02a39127626...|3a61c2277de824f8f...|/9j/4AAQSkZJRgABA...|\n",
77 | "|20091027143513|http://geocities....|skatepagepics_12.gif| gif| image/gif| image/gif| 125| 42|d87e2a1bd5e6d9614...|951679fe427db957b...|R0lGODlhfQAqAPcAA...|\n",
78 | "|20091027143513|http://geocities....| skylight_small.jpg| jpg| image/jpeg| image/jpeg| 100| 62|ea69da4bb38c7a519...|9e8e1a0b9a1aab72a...|/9j/4AAQSkZJRgABA...|\n",
79 | "+--------------+--------------------+--------------------+---------+--------------------+--------------+-----+------+--------------------+--------------------+--------------------+\n",
80 | "only showing top 20 rows\n",
81 | "\n"
82 | ]
83 | },
84 | {
85 | "name": "stderr",
86 | "output_type": "stream",
87 | "text": [
88 | "\r",
89 | " \r"
90 | ]
91 | }
92 | ],
93 | "source": [
94 | "# Show images DataFrame\n",
95 | "images.show()"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": 4,
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "popular_images = ExtractPopularImages(images, 20, 10, 10)"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": 5,
110 | "metadata": {},
111 | "outputs": [
112 | {
113 | "name": "stdout",
114 | "output_type": "stream",
115 | "text": [
116 | "[2022-05-28T20:42:22.910Z - 00003 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
117 | "[2022-05-28T20:42:22.911Z - 00007 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
118 | "[2022-05-28T20:42:22.931Z - 00004 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
119 | "[2022-05-28T20:42:22.911Z - 00006 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
120 | "[2022-05-28T20:42:22.978Z - 00005 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
121 | "[2022-05-28T20:42:22.977Z - 00010 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
122 | "[2022-05-28T20:42:22.948Z - 00001 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
123 | "[2022-05-28T20:42:22.948Z - 00008 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
124 | "[2022-05-28T20:42:22.948Z - 00009 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
125 | "[2022-05-28T20:42:22.990Z - 00002 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
126 | ]
127 | },
128 | {
129 | "name": "stderr",
130 | "output_type": "stream",
131 | "text": [
132 | "\r",
133 | "[Stage 1:> (0 + 10) / 10][Stage 2:> (0 + 2) / 10]\r"
134 | ]
135 | },
136 | {
137 | "name": "stdout",
138 | "output_type": "stream",
139 | "text": [
140 | "[2022-05-28T20:42:23.978Z - 00012 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143243-00104-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
141 | "[2022-05-28T20:42:23.988Z - 00011 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143512-00103-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
142 | ]
143 | },
144 | {
145 | "name": "stderr",
146 | "output_type": "stream",
147 | "text": [
148 | "[Stage 1:> (0 + 10) / 10][Stage 2:> (0 + 2) / 10]\r"
149 | ]
150 | },
151 | {
152 | "name": "stdout",
153 | "output_type": "stream",
154 | "text": [
155 | "[2022-05-28T20:52:49.161Z - 00013 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143856-00108-ia400107.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
156 | ]
157 | },
158 | {
159 | "name": "stderr",
160 | "output_type": "stream",
161 | "text": [
162 | "\r",
163 | "[Stage 1:=> (1 + 9) / 10][Stage 2:> (0 + 3) / 10]\r"
164 | ]
165 | },
166 | {
167 | "name": "stdout",
168 | "output_type": "stream",
169 | "text": [
170 | "[2022-05-28T20:53:21.039Z - 00014 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143351-00117-ia400103.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
171 | ]
172 | },
173 | {
174 | "name": "stderr",
175 | "output_type": "stream",
176 | "text": [
177 | "\r",
178 | "[Stage 1:=> (1 + 9) / 10][Stage 2:=> (1 + 3) / 10]\r"
179 | ]
180 | },
181 | {
182 | "name": "stdout",
183 | "output_type": "stream",
184 | "text": [
185 | "[2022-05-28T20:53:24.247Z - 00015 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142649-00105-ia400111.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
186 | ]
187 | },
188 | {
189 | "name": "stderr",
190 | "output_type": "stream",
191 | "text": [
192 | "\r",
193 | "[Stage 1:===> (2 + 8) / 10][Stage 2:=> (1 + 4) / 10]\r"
194 | ]
195 | },
196 | {
197 | "name": "stdout",
198 | "output_type": "stream",
199 | "text": [
200 | "[2022-05-28T20:53:45.707Z - 00016 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143340-00105-ia400105.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
201 | ]
202 | },
203 | {
204 | "name": "stderr",
205 | "output_type": "stream",
206 | "text": [
207 | "[Stage 1:=======> (4 + 6) / 10][Stage 2:=> (1 + 6) / 10]\r"
208 | ]
209 | },
210 | {
211 | "name": "stdout",
212 | "output_type": "stream",
213 | "text": [
214 | "[2022-05-28T20:54:08.417Z - 00017 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143841-00136-ia400104.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n",
215 | "[2022-05-28T20:54:20.033Z - 00018 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143451-00102-ia400108.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
216 | ]
217 | },
218 | {
219 | "name": "stderr",
220 | "output_type": "stream",
221 | "text": [
222 | "\r",
223 | "[Stage 1:=========> (5 + 5) / 10][Stage 2:=> (1 + 7) / 10]\r"
224 | ]
225 | },
226 | {
227 | "name": "stdout",
228 | "output_type": "stream",
229 | "text": [
230 | "[2022-05-28T20:54:22.604Z - 00019 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027142731-00177-ia400130.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
231 | ]
232 | },
233 | {
234 | "name": "stderr",
235 | "output_type": "stream",
236 | "text": [
237 | "\r",
238 | "[Stage 1:==========> (6 + 4) / 10][Stage 2:=> (1 + 8) / 10]\r"
239 | ]
240 | },
241 | {
242 | "name": "stdout",
243 | "output_type": "stream",
244 | "text": [
245 | "[2022-05-28T20:54:25.446Z - 00020 - HdfsIO] Opening file file:/home/nruest/Projects/au/sample-data/geocities/GEOCITIES-20091027143300-00114-ia400112.us.archive.org.warc.gz (Offset: 0, length: 0, decompress: false, strategy: BlockWise [dynamic])\n"
246 | ]
247 | },
248 | {
249 | "name": "stderr",
250 | "output_type": "stream",
251 | "text": [
252 | "[Stage 3:==================================================> (188 + 12) / 200]\r"
253 | ]
254 | },
255 | {
256 | "name": "stdout",
257 | "output_type": "stream",
258 | "text": [
259 | "+--------------------+-----+\n",
260 | "| url|count|\n",
261 | "+--------------------+-----+\n",
262 | "|http://geocities....| 755|\n",
263 | "|http://geocities....| 58|\n",
264 | "|http://geocities....| 56|\n",
265 | "|http://geocities....| 51|\n",
266 | "|http://www.geocit...| 43|\n",
267 | "|http://geocities....| 39|\n",
268 | "|http://geocities....| 33|\n",
269 | "|http://geocities....| 31|\n",
270 | "|http://geocities....| 30|\n",
271 | "|http://geocities....| 29|\n",
272 | "|http://i24.photob...| 28|\n",
273 | "|http://geocities....| 26|\n",
274 | "|http://geocities....| 25|\n",
275 | "|http://geocities....| 25|\n",
276 | "|http://geocities....| 24|\n",
277 | "|http://geocities....| 24|\n",
278 | "|http://geocities....| 22|\n",
279 | "|http://geocities....| 22|\n",
280 | "|http://geocities....| 22|\n",
281 | "|http://www.geocit...| 22|\n",
282 | "+--------------------+-----+\n",
283 | "\n"
284 | ]
285 | },
286 | {
287 | "name": "stderr",
288 | "output_type": "stream",
289 | "text": [
290 | "\r",
291 | " \r"
292 | ]
293 | }
294 | ],
295 | "source": [
296 | "popular_images.show()"
297 | ]
298 | }
299 | ],
300 | "metadata": {
301 | "kernelspec": {
302 | "display_name": "Python 3 (ipykernel)",
303 | "language": "python",
304 | "name": "python3"
305 | },
306 | "language_info": {
307 | "codemirror_mode": {
308 | "name": "ipython",
309 | "version": 3
310 | },
311 | "file_extension": ".py",
312 | "mimetype": "text/x-python",
313 | "name": "python",
314 | "nbconvert_exporter": "python",
315 | "pygments_lexer": "ipython3",
316 | "version": "3.9.9"
317 | }
318 | },
319 | "nbformat": 4,
320 | "nbformat_minor": 2
321 | }
322 |
--------------------------------------------------------------------------------
/arch/html-file-information.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "provenance": [],
7 | "include_colab_link": true
8 | },
9 | "kernelspec": {
10 | "name": "python3",
11 | "display_name": "Python 3"
12 | },
13 | "language_info": {
14 | "name": "python"
15 | }
16 | },
17 | "cells": [
18 | {
19 | "cell_type": "markdown",
20 | "metadata": {
21 | "id": "view-in-github",
22 | "colab_type": "text"
23 | },
24 | "source": [
25 | ""
26 | ]
27 | },
28 | {
29 | "cell_type": "markdown",
30 | "source": [
31 | "# HTML Information Dataset Exploration\n",
32 | "\n",
33 | "We're going to take a look at a few examples of how we can explore the HTML Information dataset. \n",
34 | "\n",
35 | "The first thing we need to do is enter the URL for our HTML Information dataset in the cell below. You can get this by right clicking the Download icon, and selecting \"Copy Link\"."
36 | ],
37 | "metadata": {
38 | "id": "vAyuRQ2PJIdc"
39 | }
40 | },
41 | {
42 | "cell_type": "code",
43 | "source": [
44 | "dataset = 'https://webdata.archive-it.org/ait/files/download/ARCHIVEIT-14462/TextFilesInformationExtraction/html-file-information.csv.gz?access=UCQ7VUUU4NDLKSGIPQD2R2WUGLOQXWPQ' #@param {type:\"string\"}\n",
45 | "print(dataset)"
46 | ],
47 | "metadata": {
48 | "colab": {
49 | "base_uri": "https://localhost:8080/"
50 | },
51 | "cellView": "form",
52 | "id": "RfhJiesWVpAf",
53 | "outputId": "507325b4-2e56-4aaf-bc7b-55b38fe221c8"
54 | },
55 | "execution_count": 1,
56 | "outputs": [
57 | {
58 | "output_type": "stream",
59 | "name": "stdout",
60 | "text": [
61 | "https://webdata.archive-it.org/ait/files/download/ARCHIVEIT-14462/TextFilesInformationExtraction/html-file-information.csv.gz?access=UCQ7VUUU4NDLKSGIPQD2R2WUGLOQXWPQ\n"
62 | ]
63 | }
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "source": [
69 | "## pandas\n",
70 | "\n",
71 | "Next, we'll setup our environment so we can load our HTML Information dataset into [pandas](https://pandas.pydata.org) DataFrames. If you're unfamiliar with DataFrames, but you've worked with spreadsheets before, you should quickly feel comfortable."
72 | ],
73 | "metadata": {
74 | "id": "Z14F2cIWJVW0"
75 | }
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": 2,
80 | "metadata": {
81 | "id": "Chh6tt3HHF1s"
82 | },
83 | "outputs": [],
84 | "source": [
85 | "import pandas as pd"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "source": [
91 | "## Data Table Display\n",
92 | "\n",
93 | "Colab includes an extension that renders pandas DataFrames into interactive displays that can be filtered, sorted, and explored dynamically. This can be very useful for taking a look at what each DataFrame provides, and doing some intital filtering!\n",
94 | "\n",
95 | "Data table display for pandas DataFrames can be enabled by running:\n",
96 | "```python\n",
97 | "%load_ext google.colab.data_table\n",
98 | "```\n",
99 | "and disabled by running\n",
100 | "```python\n",
101 | "%unload_ext google.colab.data_table\n",
102 | "```"
103 | ],
104 | "metadata": {
105 | "id": "sH81XCf3I3xY"
106 | }
107 | },
108 | {
109 | "cell_type": "code",
110 | "source": [
111 | "%load_ext google.colab.data_table"
112 | ],
113 | "metadata": {
114 | "id": "-qyCnbvBI7n6"
115 | },
116 | "execution_count": 3,
117 | "outputs": []
118 | },
119 | {
120 | "cell_type": "markdown",
121 | "source": [
122 | "## Loading our ARCH Dataset as a DataFrame\n",
123 | "\n",
124 | "---\n",
125 | "\n",
126 | "\n",
127 | "Next, we'll create pandas DataFrame from our dataset and show a preview of it using the Data Table Display."
128 | ],
129 | "metadata": {
130 | "id": "6prR7j1zI_D5"
131 | }
132 | },
133 | {
134 | "cell_type": "code",
135 | "source": [
136 | "html = pd.read_csv(dataset, compression='gzip')\n",
137 | "html"
138 | ],
139 | "metadata": {
140 | "id": "YL0LQaUNHRKx"
141 | },
142 | "execution_count": 4,
143 | "outputs": []
144 | },
145 | {
146 | "cell_type": "markdown",
147 | "metadata": {
148 | "id": "3HPwOCNAvqMe"
149 | },
150 | "source": [
151 | "# Data Analysis\n",
152 | "\n",
153 | "Now that we have all of our datasets loaded up, we can begin to work with them!"
154 | ]
155 | },
156 | {
157 | "cell_type": "markdown",
158 | "metadata": {
159 | "id": "J6Pkg0prv3BE"
160 | },
161 | "source": [
162 | "## Counting total files, and unique files\n",
163 | "\n",
164 | "Let's take a quick look at how to count items in DataFrames, and use total and unique files as an example to work with.\n",
165 | "\n",
166 | "It's definitely work checking out the [pandas documentation](https://pandas.pydata.org/docs/index.html). There are a lot of good examples available, along with a robust [API reference](https://pandas.pydata.org/docs/reference/index.html#api)."
167 | ]
168 | },
169 | {
170 | "cell_type": "markdown",
171 | "metadata": {
172 | "id": "DFX4Gl3wv7bi"
173 | },
174 | "source": [
175 | "\n",
176 | "### How many html files are in this collection?\n",
177 | "\n",
178 | "We can take our `html` variable and try a couple of functions to get the same answer.\n",
179 | "\n",
180 | "1. `len(html.index)`\n",
181 | " * Get the length of the DataFrame's index.\n",
182 | "2. `html.shape[0]`\n",
183 | " * Get the shape or dimensionality of the DataFrame, and take the first item in the tuple.\n",
184 | "3. `html.count()`\n",
185 | " * Count the number of rows for each column.\n",
186 | "\n"
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "metadata": {
192 | "id": "HTv8Oet3jiTH"
193 | },
194 | "source": [
195 | "len(html.index)"
196 | ],
197 | "execution_count": 5,
198 | "outputs": []
199 | },
200 | {
201 | "cell_type": "code",
202 | "metadata": {
203 | "id": "6rYEERnTjifk"
204 | },
205 | "source": [
206 | "html.shape[0]"
207 | ],
208 | "execution_count": 6,
209 | "outputs": []
210 | },
211 | {
212 | "cell_type": "code",
213 | "metadata": {
214 | "id": "bn-1v127aKIG"
215 | },
216 | "source": [
217 | "html.count()"
218 | ],
219 | "execution_count": 7,
220 | "outputs": []
221 | },
222 | {
223 | "cell_type": "markdown",
224 | "metadata": {
225 | "id": "38veKiPhwKo4"
226 | },
227 | "source": [
228 | "### How many unique html files are in the collection?\n",
229 | "\n",
230 | " We can see if an HTML file is unique or not by computing an [MD5 hash](https://en.wikipedia.org/wiki/MD5#MD5_hashes) of it, and comparing them. The exact same html file might have a filename of `example.html` or `foo.html`. If the hash is computed for each, we can see that even with different file names, they are actually the same html file. So, since we have both a `MD5` and `SHA1` hash column available in our DataFrame, we can just find the unique values, and count them!\n",
231 | "\n",
232 | "\n"
233 | ]
234 | },
235 | {
236 | "cell_type": "code",
237 | "metadata": {
238 | "id": "WesM3kQowM5B"
239 | },
240 | "source": [
241 | "len(html.md5.unique())"
242 | ],
243 | "execution_count": 8,
244 | "outputs": []
245 | },
246 | {
247 | "cell_type": "markdown",
248 | "metadata": {
249 | "id": "ZIXkI0-1wWQf"
250 | },
251 | "source": [
252 | "### What are the top 10 most occurring html files in the collection?\n",
253 | "\n",
254 | "Here we can take advantage of [`value_counts()`](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.value_counts.html) to provide us with a list of MD5 hashes and their respective counts."
255 | ]
256 | },
257 | {
258 | "cell_type": "code",
259 | "metadata": {
260 | "id": "8Ts03OFyjPIM"
261 | },
262 | "source": [
263 | "html[\"md5\"].value_counts().head(10)"
264 | ],
265 | "execution_count": 9,
266 | "outputs": []
267 | },
268 | {
269 | "cell_type": "markdown",
270 | "metadata": {
271 | "id": "FG7pGZUEwlaI"
272 | },
273 | "source": [
274 | "\n",
275 | "### What's the information around all of the occurances of `d41d8cd98f00b204e9800998ecf8427e`?\n",
276 | "\n",
277 | "What, you mean you don't know what `d41d8cd98f00b204e9800998ecf8427e` means? \n",
278 | "\n",
279 | "Let's find those HTML files in the DataFrame. We can here see some of the filenames used, its dimensions, and its URL.\n"
280 | ]
281 | },
282 | {
283 | "cell_type": "code",
284 | "metadata": {
285 | "id": "msmmm65lkSIK"
286 | },
287 | "source": [
288 | "html.loc[html[\"md5\"] == \"d41d8cd98f00b204e9800998ecf8427e\"]"
289 | ],
290 | "execution_count": null,
291 | "outputs": []
292 | },
293 | {
294 | "cell_type": "markdown",
295 | "metadata": {
296 | "id": "GbLLZW2awzCv"
297 | },
298 | "source": [
299 | "### What are the top 10 most occuring filenames in the collection?\n",
300 | "\n",
301 | "Note that this is of course different than the MD5 results up above. Here we are focusing _just_ on filename. So `a16180790160.html ` for example, might actually be referring to different HTML files who happen to have the same name.\n",
302 | "\n",
303 | "Here we can use `value_counts()` again, but this time we'll create a variable for the top filenames so we can use it later.\n",
304 | "\n"
305 | ]
306 | },
307 | {
308 | "cell_type": "code",
309 | "metadata": {
310 | "id": "pQaw54ACkwdZ"
311 | },
312 | "source": [
313 | "top_filenames = html[\"filename\"].value_counts().head(10)\n",
314 | "top_filenames"
315 | ],
316 | "execution_count": null,
317 | "outputs": []
318 | },
319 | {
320 | "cell_type": "markdown",
321 | "metadata": {
322 | "id": "Z7F3re20BQRI"
323 | },
324 | "source": [
325 | "### Let's create our first graph!\n",
326 | "\n",
327 | "We'll first plot the data with the pandas [plot](https://pandas.pydata.org/docs/reference/api/pandas.Series.plot.html) functionality, and then with [Altair](https://altair-viz.github.io/)."
328 | ]
329 | },
330 | {
331 | "cell_type": "code",
332 | "metadata": {
333 | "id": "sRvlstfsBWEZ"
334 | },
335 | "source": [
336 | "top_filenames_chart = top_filenames.plot.bar(figsize=(25, 10))\n",
337 | "\n",
338 | "top_filenames_chart.set_title(\"Top Filenames\", fontsize=22)\n",
339 | "top_filenames_chart.set_xlabel(\"Filename\", fontsize=20)\n",
340 | "top_filenames_chart.set_ylabel(\"Count\", fontsize=20)"
341 | ],
342 | "execution_count": null,
343 | "outputs": []
344 | },
345 | {
346 | "cell_type": "markdown",
347 | "metadata": {
348 | "id": "pQgeOObvgLvK"
349 | },
350 | "source": [
351 | "Now let's setup [Altair](https://altair-viz.github.io/), and plot the data. Altair is useful for creating vizualizations since they can be easily exported as a PNG or SVG."
352 | ]
353 | },
354 | {
355 | "cell_type": "code",
356 | "metadata": {
357 | "id": "Q7Z4J6qjWaVM"
358 | },
359 | "source": [
360 | "import altair as alt"
361 | ],
362 | "execution_count": null,
363 | "outputs": []
364 | },
365 | {
366 | "cell_type": "code",
367 | "metadata": {
368 | "id": "s0xwvILYWkgg"
369 | },
370 | "source": [
371 | "top_filenames_altair = (\n",
372 | " html[\"filename\"]\n",
373 | " .value_counts()\n",
374 | " .head(10)\n",
375 | " .rename_axis(\"Filename\")\n",
376 | " .reset_index(name=\"Count\")\n",
377 | ")\n",
378 | "\n",
379 | "filenames_bar = (\n",
380 | " alt.Chart(top_filenames_altair)\n",
381 | " .mark_bar()\n",
382 | " .encode(x=alt.X(\"Filename:O\", sort=\"-y\"), y=alt.Y(\"Count:Q\"))\n",
383 | ")\n",
384 | "\n",
385 | "filenames_rule = (\n",
386 | " alt.Chart(top_filenames_altair).mark_rule(color=\"red\").encode(y=\"mean(Count):Q\")\n",
387 | ")\n",
388 | "\n",
389 | "\n",
390 | "filenames_text = filenames_bar.mark_text(align=\"center\", baseline=\"bottom\").encode(\n",
391 | " text=\"Count:Q\"\n",
392 | ")\n",
393 | "\n",
394 | "(filenames_bar + filenames_rule + filenames_text).properties(\n",
395 | " width=1400, height=700, title=\"Top Filenames\"\n",
396 | ")"
397 | ],
398 | "execution_count": null,
399 | "outputs": []
400 | },
401 | {
402 | "cell_type": "markdown",
403 | "metadata": {
404 | "id": "BneaN9cgGoly"
405 | },
406 | "source": [
407 | "### How about a file format distribution?\n",
408 | "\n",
409 | "What _kind_ of html files are present? We can discover this by checking their \"media type\", or [MIME type](https://en.wikipedia.org/wiki/Media_type). \n",
410 | "\n",
411 | "\n",
412 | "\n",
413 | "\n"
414 | ]
415 | },
416 | {
417 | "cell_type": "code",
418 | "metadata": {
419 | "id": "RDd-J8D-GwDk"
420 | },
421 | "source": [
422 | "html_mime_types = (\n",
423 | " html[\"mime_type_tika\"]\n",
424 | " .value_counts()\n",
425 | " .head(5)\n",
426 | " .rename_axis(\"MIME Type\")\n",
427 | " .reset_index(name=\"Count\")\n",
428 | ")\n",
429 | "\n",
430 | "html_mimes_bar = (\n",
431 | " alt.Chart(html_mime_types)\n",
432 | " .mark_bar()\n",
433 | " .encode(x=alt.X(\"MIME Type:O\", sort=\"-y\"), y=alt.Y(\"Count:Q\"))\n",
434 | ")\n",
435 | "\n",
436 | "html_mime_rule = (\n",
437 | " alt.Chart(html_mime_types).mark_rule(color=\"red\").encode(y=\"mean(Count):Q\")\n",
438 | ")\n",
439 | "\n",
440 | "html_mime_text = html_mimes_bar.mark_text(align=\"center\", baseline=\"bottom\").encode(\n",
441 | " text=\"Count:Q\"\n",
442 | ")\n",
443 | "\n",
444 | "(html_mimes_bar + html_mime_rule + html_mime_text).properties(\n",
445 | " width=1400, height=700, title=\"HTML File Format Distribution\"\n",
446 | ")"
447 | ],
448 | "execution_count": null,
449 | "outputs": []
450 | },
451 | {
452 | "cell_type": "markdown",
453 | "metadata": {
454 | "id": "QUJR-jjqNxCL"
455 | },
456 | "source": [
457 | "### How do I get the actual html?\n",
458 | "\n",
459 | "...or, how do I get to the actual binary files described by each file format information derivative?\n",
460 | "\n",
461 | "There are a few options!\n",
462 | "\n",
463 | "1. `wget` or `curl` from the live URL, or a replay URL\n",
464 | " * Live web URL\n",
465 | " * `wget` or `curl` the value of the `url` column\n",
466 | " * Replay web URL\n",
467 | " * `wget` or `curl` the value of the `crawl_date` and `url` column using the following pattern:\n",
468 | " * `https://web.archive.org/web/` + `crawl_date` + `/` + `url`\n",
469 | " * https://web.archive.org/web/20120119124734/http://www.archive.org/images/glogo.png\n",
470 | " * `http://wayback.archive-it.org/14462/` + `crawl_date` + `/` + `url`\n",
471 | " * https://wayback.archive-it.org/14462/20210524212740/https://ruebot.net/visualization/elxn42/featured_hu33a17dfb90e2c5ed77f783db14a6e53a_5126291_550x0_resize_q90_box_2.png\n",
472 | "2. Use a scripting language, such as Python\n",
473 | " * Make use of the `url` and `filename` columns (and `crawl_date` if you want to use the replay URL)\n",
474 | " * `import requests`\n",
475 | " * `requests.get(url, allow_redirects=True)`\n",
476 | " * `open('filename', 'wb').write(r.content)`\n",
477 | "3. Use the [Archives Unleashed Toolkit](https://aut.docs.archivesunleashed.org/docs/extract-binary) (if you have access to the W/ARC files)."
478 | ]
479 | },
480 | {
481 | "cell_type": "markdown",
482 | "source": [
483 | "If you wanted to download the HTML files using the replay URL, below is a method for doing so.\n",
484 | "\n",
485 | "First, you'll want to setup a replay url base url. Here we'll use the Archive-It Wayback instance for the collection."
486 | ],
487 | "metadata": {
488 | "id": "8yoFE2xLAlwk"
489 | }
490 | },
491 | {
492 | "cell_type": "code",
493 | "source": [
494 | "wayback_url = 'http://wayback.archive-it.org/14462/'"
495 | ],
496 | "metadata": {
497 | "id": "RxrOHn_2AicZ"
498 | },
499 | "execution_count": null,
500 | "outputs": []
501 | },
502 | {
503 | "cell_type": "markdown",
504 | "source": [
505 | "Next we'll create a new column using a lambda function. If you're familiar working with spreadsheets, what we're doing here is basically concatenating some column values together and creating a new column."
506 | ],
507 | "metadata": {
508 | "id": "jCTmrEBGA-Yd"
509 | }
510 | },
511 | {
512 | "cell_type": "code",
513 | "source": [
514 | "html['replay_url'] = html.apply(lambda row: str(wayback_url + str(row['crawl_date']) + \"/\" + row['url']), axis=1)"
515 | ],
516 | "metadata": {
517 | "id": "p5HoWxkFA6C8"
518 | },
519 | "execution_count": null,
520 | "outputs": []
521 | },
522 | {
523 | "cell_type": "markdown",
524 | "source": [
525 | "Then we can export that new column we created out to a file, so we can use it with `wget` to download all the html files!"
526 | ],
527 | "metadata": {
528 | "id": "q87tSYyIBIiZ"
529 | }
530 | },
531 | {
532 | "cell_type": "code",
533 | "source": [
534 | "html['replay_url'].head().to_csv('14462_html_urls.txt', index=False, header=False)"
535 | ],
536 | "metadata": {
537 | "id": "wyhpsf9wBN1a"
538 | },
539 | "execution_count": null,
540 | "outputs": []
541 | },
542 | {
543 | "cell_type": "markdown",
544 | "source": [
545 | "Finally, we can pass the file to `wget` to use as a download list. You can also speed this process up using `xargs` or `parallel`."
546 | ],
547 | "metadata": {
548 | "id": "81a3q7feu5Ji"
549 | }
550 | },
551 | {
552 | "cell_type": "code",
553 | "source": [
554 | "!wget --random-wait -i 14462_html_urls.txt"
555 | ],
556 | "metadata": {
557 | "id": "D_p4qGKoBRaZ"
558 | },
559 | "execution_count": null,
560 | "outputs": []
561 | }
562 | ]
563 | }
--------------------------------------------------------------------------------
/arch/domain-frequency.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "name": "domain-frequency.ipynb",
7 | "provenance": [],
8 | "include_colab_link": true
9 | },
10 | "kernelspec": {
11 | "name": "python3",
12 | "display_name": "Python 3"
13 | },
14 | "language_info": {
15 | "name": "python"
16 | }
17 | },
18 | "cells": [
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {
22 | "id": "view-in-github",
23 | "colab_type": "text"
24 | },
25 | "source": [
26 | "
"
27 | ]
28 | },
29 | {
30 | "cell_type": "markdown",
31 | "source": [
32 | "# Domain Frequency Dataset Exploration\n",
33 | "\n",
34 | "We're going to take a look at a few examples of how we can explore the Domain Frequency dataset. First we need to enter the URL for our Domain Frequency dataset. You can get this by right clicking the Download icon, and selecting \"Copy Link\"."
35 | ],
36 | "metadata": {
37 | "id": "vAyuRQ2PJIdc"
38 | }
39 | },
40 | {
41 | "cell_type": "code",
42 | "source": [
43 | "dataset = 'https://webdata.archive-it.org/ait/files/download/ARCHIVEIT-06689/DomainFrequencyExtraction/domain-frequency.csv.gz?access=FUO54Y4B3J3GAZSDD6ML7DQENUN5BDOX' #@param {type:\"string\"}\n",
44 | "print(dataset)"
45 | ],
46 | "metadata": {
47 | "colab": {
48 | "base_uri": "https://localhost:8080/"
49 | },
50 | "cellView": "form",
51 | "id": "RfhJiesWVpAf",
52 | "outputId": "277fad1b-e6c8-4d4f-d2cb-11f38f484138"
53 | },
54 | "execution_count": 1,
55 | "outputs": [
56 | {
57 | "output_type": "stream",
58 | "name": "stdout",
59 | "text": [
60 | "https://webdata.archive-it.org/ait/files/download/ARCHIVEIT-06689/DomainFrequencyExtraction/domain-frequency.csv.gz?access=FUO54Y4B3J3GAZSDD6ML7DQENUN5BDOX\n"
61 | ]
62 | }
63 | ]
64 | },
65 | {
66 | "cell_type": "markdown",
67 | "source": [
68 | "# Environment\n",
69 | "\n",
70 | "Next, we'll set up our environment so we can load our Domain Frequency dataset into [pandas](https://pandas.pydata.org) and use [Altair](https://altair-viz.github.io/) for plots. Altair is useful for creating plots since they can be easily exported as a PNG or SVG."
71 | ],
72 | "metadata": {
73 | "id": "Z14F2cIWJVW0"
74 | }
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": 2,
79 | "metadata": {
80 | "id": "Chh6tt3HHF1s"
81 | },
82 | "outputs": [],
83 | "source": [
84 | "import pandas as pd\n",
85 | "import altair as alt"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "source": [
91 | "## Data Table Display\n",
92 | "\n",
93 | "Colab includes an extension that renders pandas DataFrames into interactive displays that can be filtered, sorted, and explored dynamically. This can be very useful for taking a look at what each DataFrame provides!\n",
94 | "\n",
95 | "Data table display for pandas DataFrames can be enabled by running:\n",
96 | "```python\n",
97 | "%load_ext google.colab.data_table\n",
98 | "```\n",
99 | "and disabled by running\n",
100 | "```python\n",
101 | "%unload_ext google.colab.data_table\n",
102 | "```"
103 | ],
104 | "metadata": {
105 | "id": "sH81XCf3I3xY"
106 | }
107 | },
108 | {
109 | "cell_type": "code",
110 | "source": [
111 | "%load_ext google.colab.data_table"
112 | ],
113 | "metadata": {
114 | "id": "-qyCnbvBI7n6"
115 | },
116 | "execution_count": 3,
117 | "outputs": []
118 | },
119 | {
120 | "cell_type": "markdown",
121 | "source": [
122 | "# Loading our ARCH Dataset as a DataFrame\n",
123 | "\n",
124 | "---\n",
125 | "\n",
126 | "\n",
127 | "Next, we'll create a pandas DataFrame from our dataset and show a preview of it using the Data Table Display."
128 | ],
129 | "metadata": {
130 | "id": "6prR7j1zI_D5"
131 | }
132 | },
133 | {
134 | "cell_type": "code",
135 | "source": [
136 | "domains = pd.read_csv(dataset, compression='gzip')\n",
137 | "domains"
138 | ],
139 | "metadata": {
140 | "colab": {
141 | "base_uri": "https://localhost:8080/",
142 | "height": 441
143 | },
144 | "id": "YL0LQaUNHRKx",
145 | "outputId": "498e4153-bb5a-427e-9eba-9a3aa10abdfe"
146 | },
147 | "execution_count": 4,
148 | "outputs": [
149 | {
150 | "output_type": "stream",
151 | "name": "stdout",
152 | "text": [
153 | "Warning: total number of rows (20407) exceeds max_rows (20000). Falling back to pandas display.\n"
154 | ]
155 | },
156 | {
157 | "output_type": "execute_result",
158 | "data": {
159 | "text/plain": [
160 | " domain count\n",
161 | "0 sld.cu 11002591\n",
162 | "1 cult.cu 5197445\n",
163 | "2 facebook.com 1985543\n",
164 | "3 icrt.cu 1936127\n",
165 | "4 cubava.cu 967903\n",
166 | "... ... ...\n",
167 | "20402 yardbarker.com 1\n",
168 | "20403 bufetetoro.com 1\n",
169 | "20404 understandingwar.org 1\n",
170 | "20405 headaches.org 1\n",
171 | "20406 nursite.com.ar 1\n",
172 | "\n",
173 | "[20407 rows x 2 columns]"
174 | ],
175 | "text/html": [
176 | "\n",
177 | "
| \n", 197 | " | domain | \n", 198 | "count | \n", 199 | "
|---|---|---|
| 0 | \n", 204 | "sld.cu | \n", 205 | "11002591 | \n", 206 | "
| 1 | \n", 209 | "cult.cu | \n", 210 | "5197445 | \n", 211 | "
| 2 | \n", 214 | "facebook.com | \n", 215 | "1985543 | \n", 216 | "
| 3 | \n", 219 | "icrt.cu | \n", 220 | "1936127 | \n", 221 | "
| 4 | \n", 224 | "cubava.cu | \n", 225 | "967903 | \n", 226 | "
| ... | \n", 229 | "... | \n", 230 | "... | \n", 231 | "
| 20402 | \n", 234 | "yardbarker.com | \n", 235 | "1 | \n", 236 | "
| 20403 | \n", 239 | "bufetetoro.com | \n", 240 | "1 | \n", 241 | "
| 20404 | \n", 244 | "understandingwar.org | \n", 245 | "1 | \n", 246 | "
| 20405 | \n", 249 | "headaches.org | \n", 250 | "1 | \n", 251 | "
| 20406 | \n", 254 | "nursite.com.ar | \n", 255 | "1 | \n", 256 | "
20407 rows × 2 columns
\n", 260 | "| \n", 564 | " | domain | \n", 565 | "count | \n", 566 | "tld | \n", 567 | "
|---|---|---|---|
| 0 | \n", 572 | "sld.cu | \n", 573 | "11002591 | \n", 574 | "cu | \n", 575 | "
| 1 | \n", 578 | "cult.cu | \n", 579 | "5197445 | \n", 580 | "cu | \n", 581 | "
| 2 | \n", 584 | "facebook.com | \n", 585 | "1985543 | \n", 586 | "com | \n", 587 | "
| 3 | \n", 590 | "icrt.cu | \n", 591 | "1936127 | \n", 592 | "cu | \n", 593 | "
| 4 | \n", 596 | "cubava.cu | \n", 597 | "967903 | \n", 598 | "cu | \n", 599 | "
| ... | \n", 602 | "... | \n", 603 | "... | \n", 604 | "... | \n", 605 | "
| 20402 | \n", 608 | "yardbarker.com | \n", 609 | "1 | \n", 610 | "com | \n", 611 | "
| 20403 | \n", 614 | "bufetetoro.com | \n", 615 | "1 | \n", 616 | "com | \n", 617 | "
| 20404 | \n", 620 | "understandingwar.org | \n", 621 | "1 | \n", 622 | "org | \n", 623 | "
| 20405 | \n", 626 | "headaches.org | \n", 627 | "1 | \n", 628 | "org | \n", 629 | "
| 20406 | \n", 632 | "nursite.com.ar | \n", 633 | "1 | \n", 634 | "com.ar | \n", 635 | "
20407 rows × 3 columns
\n", 639 | "| \n", 238 | " | domain | \n", 239 | "count | \n", 240 | "
|---|---|---|
| 0 | \n", 245 | "geocities.com | \n", 246 | "57922449 | \n", 247 | "
| 1 | \n", 250 | "yahoo.com | \n", 251 | "1110567 | \n", 252 | "
| 2 | \n", 255 | "amazon.com | \n", 256 | "87675 | \n", 257 | "
| 3 | \n", 260 | "myspace.com | \n", 261 | "67706 | \n", 262 | "
| 4 | \n", 265 | "bravenet.com | \n", 266 | "62904 | \n", 267 | "
| ... | \n", 270 | "... | \n", 271 | "... | \n", 272 | "
| 147918 | \n", 275 | "manosguardanapo.blogspot.com | \n", 276 | "1 | \n", 277 | "
| 147919 | \n", 280 | "bragaplasticos.com.ar | \n", 281 | "1 | \n", 282 | "
| 147920 | \n", 285 | "abed.org.br | \n", 286 | "1 | \n", 287 | "
| 147921 | \n", 290 | "mailearners.net | \n", 291 | "1 | \n", 292 | "
| 147922 | \n", 295 | "greensfirst.com | \n", 296 | "1 | \n", 297 | "
147923 rows × 2 columns
\n", 301 | "| \n", 603 | " | domain | \n", 604 | "count | \n", 605 | "tld | \n", 606 | "
|---|---|---|---|
| 0 | \n", 611 | "geocities.com | \n", 612 | "57922449 | \n", 613 | "com | \n", 614 | "
| 1 | \n", 617 | "yahoo.com | \n", 618 | "1110567 | \n", 619 | "com | \n", 620 | "
| 2 | \n", 623 | "amazon.com | \n", 624 | "87675 | \n", 625 | "com | \n", 626 | "
| 3 | \n", 629 | "myspace.com | \n", 630 | "67706 | \n", 631 | "com | \n", 632 | "
| 4 | \n", 635 | "bravenet.com | \n", 636 | "62904 | \n", 637 | "com | \n", 638 | "
| ... | \n", 641 | "... | \n", 642 | "... | \n", 643 | "... | \n", 644 | "
| 147918 | \n", 647 | "manosguardanapo.blogspot.com | \n", 648 | "1 | \n", 649 | "com | \n", 650 | "
| 147919 | \n", 653 | "bragaplasticos.com.ar | \n", 654 | "1 | \n", 655 | "com.ar | \n", 656 | "
| 147920 | \n", 659 | "abed.org.br | \n", 660 | "1 | \n", 661 | "org.br | \n", 662 | "
| 147921 | \n", 665 | "mailearners.net | \n", 666 | "1 | \n", 667 | "net | \n", 668 | "
| 147922 | \n", 671 | "greensfirst.com | \n", 672 | "1 | \n", 673 | "com | \n", 674 | "
147923 rows × 3 columns
\n", 678 | "