├── .circleci └── config.yml ├── .github └── workflows │ ├── codeql-analysis.yml │ └── test.yml ├── .gitignore ├── LICENSE.txt ├── README.md ├── current_requirements.txt ├── pyproject.toml ├── python-examples ├── 4sq-example.py ├── __init__.py ├── all.py ├── amazon_ec2_boto-example.py ├── arabic_dict-example.py ├── argparse-example.py ├── assets │ ├── abba.png │ ├── archive_name.tar.gz │ ├── cnn.txt │ ├── dictionary-list.html │ ├── discordia.json │ ├── discordia.pkl │ ├── index.html │ ├── iptc-example.jpg~ │ ├── pastedumpexample.json │ ├── test.xls │ ├── testimage.jpg~ │ └── testtweets.txt ├── audio_waveform-example.py ├── base64-example.py ├── bitcoin-example-1.py ├── browser-example.py ├── bs4_email_regex-example.py ├── chrome-headless-example.py ├── circles-example.py ├── cvlib_example.py ├── djvu-pdf-example.py ├── exif_reader-example.py ├── flask-example.py ├── fuzzywuzzy-example.py ├── geoname-example.py ├── get_geo-example.py ├── hashing_example.py ├── html2plaintext-example.py ├── http.html ├── httpserver-example.py ├── hug-postgresql-example.py ├── hug_api_example.py ├── hug_post_server-example.py ├── imaplib-example.py ├── instagram_geo-example.py ├── iptcinfo3-example.py ├── json-example.py ├── list.txt ├── main.py ├── mechanize-example.py ├── merge-pdfs-example.py ├── multi-categorization-tweets-example.py ├── ngrams-example.py ├── opencv_facial_recognition-example.py ├── parse_divs-example.py ├── pdf2random_text-example.py ├── pdfquery-example.py ├── pickle_load-example.py ├── pinboard-example.py ├── polyglot-example.py ├── pyzillow-example.py ├── quandl-example.py ├── read-spreadsheet-example.py ├── read_wav_display_audio-example.py ├── request_post_go_with_hug_post-example.py ├── requests-example.py ├── rethinkdb-example.py ├── ryu-example.py ├── scapy_arp-example.py ├── scatterplot-example.py ├── scrape_twitter-example.py ├── sentiment_analysis_nltk-example.py ├── server-example.py ├── setup.py ├── shodan-example.py ├── smash-site-example.py ├── speech-example.py ├── spider.py ├── spotify-example.py ├── stem_tor-example.py ├── test_tor-example.py ├── textract-example.py ├── tika-example-too.py ├── tika-example.py ├── tika-get-text-example.py ├── tkinter-example.py ├── tor-example.py ├── triplot-example.py ├── tuple-example.py ├── urllib3_proxymanager-example.py ├── useragents.py └── websockify-example.py ├── requirements.txt ├── setup.cfg └── uv.lock /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | working_directory: ~/python-examples 5 | docker: 6 | - image: circleci/python:3.9.7 # every job must define an image for the docker executor and subsequent jobs may define a different image. 7 | environment: 8 | PIPENV_VENV_IN_PROJECT: true 9 | steps: 10 | - checkout # checkout source code to working directory 11 | - run: 12 | command: | # use pipenv to install dependencies 13 | sudo apt-get install python3-numpy libicu-dev 14 | sudo pip install pipenv 15 | mkdir reports 16 | pipenv install 17 | pipenv run py.test --junitxml=reports/pytest/pytest-report.xml 18 | - save_cache: 19 | key: deps9-{{ .Branch }}-{{ checksum "Pipfile.lock" }} 20 | paths: 21 | - ".venv" 22 | - "/usr/local/bin" 23 | - "/usr/local/lib/python3.9/site-packages" 24 | - store_test_results: 25 | path: reports 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | name: "CodeQL" 2 | 3 | on: 4 | push: 5 | branches: [ "master" ] 6 | pull_request: 7 | branches: [ "master" ] 8 | schedule: 9 | - cron: '38 22 * * 5' 10 | 11 | jobs: 12 | analyze: 13 | name: Analyze 14 | runs-on: ubuntu-latest 15 | permissions: 16 | actions: read 17 | contents: read 18 | security-events: write 19 | 20 | strategy: 21 | fail-fast: false 22 | matrix: 23 | language: [ 'python' ] 24 | 25 | steps: 26 | - name: Checkout repository 27 | uses: actions/checkout@v4 28 | 29 | - name: Initialize CodeQL 30 | uses: github/codeql-action/init@v3 31 | with: 32 | languages: ${{ matrix.language }} 33 | 34 | - name: Autobuild 35 | uses: github/codeql-action/autobuild@v3 36 | 37 | - name: Perform CodeQL Analysis 38 | uses: github/codeql-action/analyze@v3 39 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: [ main, master ] 6 | pull_request: 7 | branches: [ main, master ] 8 | 9 | jobs: 10 | test: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: ["3.12", "3.11"] 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Install uv 20 | uses: astral-sh/setup-uv@v3 21 | with: 22 | version: "latest" 23 | 24 | - name: Set up Python ${{ matrix.python-version }} 25 | uses: actions/setup-python@v5 26 | with: 27 | python-version: ${{ matrix.python-version }} 28 | 29 | - name: Install dependencies 30 | run: uv sync --dev 31 | 32 | - name: Run tests 33 | run: uv run pytest 34 | 35 | - name: Security audit 36 | run: uv run pip-audit --desc 37 | 38 | - name: Lint with flake8 39 | run: | 40 | uv run flake8 --count --select=E9,F63,F7,F82 --show-source --statistics 41 | uv run flake8 --count --exit-zero --max-complexity=10 --max-line-length=100 --statistics 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | .DS_Store 3 | .cache/ 4 | .idea 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # paths created 13 | flaskme/ 14 | 15 | # Distribution / packaging 16 | .Python 17 | env/ 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *,cover 52 | .hypothesis/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # IPython Notebook 76 | .ipynb_checkpoints 77 | 78 | # pyenv 79 | .python-version 80 | 81 | # celery beat schedule file 82 | celerybeat-schedule 83 | 84 | # dotenv 85 | .env 86 | 87 | # virtualenv 88 | venv/ 89 | ENV/ 90 | 91 | # Spyder project settings 92 | .spyderproject 93 | 94 | # Rope project settings 95 | .ropeproject 96 | configs.py 97 | .cache* 98 | 99 | # stupid vscode bs 100 | .vscode 101 | .vscode/* 102 | 103 | # any extraneous output bs 104 | *.pdf 105 | *.djvu 106 | *.wav 107 | *.doc 108 | *.jpg 109 | *.xml 110 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 James Campbell 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # python-examples 2 | 3 | [![CI](https://github.com/james-see/python-examples/workflows/Tests/badge.svg)](https://github.com/james-see/python-examples/actions) 4 | [![CodeQL](https://github.com/james-see/python-examples/workflows/CodeQL/badge.svg)](https://github.com/james-see/python-examples/actions) 5 | 6 | This is a collection of python examples I created for some key libraries in Python that I use all the time. 7 | 8 | It is a way for me to remember and hopefully get others started. 9 | 10 | Start your Python journey in Python 3. Onward and upward. 11 | 12 | ## 🚀 Quick Start 13 | 14 | This project uses [uv](https://github.com/astral-sh/uv) for modern Python package management. 15 | 16 | ### Prerequisites 17 | 18 | - Python 3.12 or higher 19 | - uv package manager (install via `curl -LsSf https://astral.sh/uv/install.sh | sh`) 20 | 21 | ### Installation 22 | 23 | ```bash 24 | # Clone the repository 25 | git clone https://github.com/james-see/python-examples.git 26 | cd python-examples 27 | 28 | # Install dependencies 29 | uv sync --dev 30 | 31 | # Run tests 32 | uv run pytest 33 | 34 | # Run linting 35 | uv run flake8 36 | ``` 37 | 38 | ### Development 39 | 40 | ```bash 41 | # Add new dependencies 42 | uv add package-name 43 | 44 | # Run a specific example 45 | uv run python example-name.py 46 | 47 | # Format code 48 | uv run black . 49 | uv run isort . 50 | ``` 51 | 52 | ## 🛠️ Project Structure 53 | 54 | - **python-examples/**: Main package containing all example scripts 55 | - **assets/**: Sample data files for examples 56 | - **tests/**: Test files 57 | - **.github/workflows/**: GitHub Actions for CI/CD 58 | 59 | **urllib** (built-in to python3) 60 | 61 | 1. [access foursquare API](#foursquare-api-example) 62 | 63 | ## *By python 3.x package:* 64 | 65 | **http.server** (built-in to python3) 66 | 67 | 1. [http example](#http-example) 68 | 69 | **pdfquery** (install by `uv add pdfquery`) 70 | 71 | 1. [pdfquery example](#pdfquery-example) 72 | 73 | **PyPDF2** (install by `uv add PyPDF2`) 74 | 75 | 1. [pdf merge example](#pdf-merge-example) 76 | 77 | **argparse** (built-in to python3) 78 | 79 | 1. [argparse example](#argparse-example) 80 | 81 | **bs4** (install via `uv add beautifulsoup4`) 82 | 83 | 1. [html to text parser](#html-to-text-example) 84 | 2. [email parser](#email-parser-example) 85 | 86 | **fuzzywuzzy** (install by `uv add fuzzywuzzy`) 87 | 88 | 1. [fuzzywuzzy](#fuzzywuzzy-example) 89 | 90 | **rethinkdb** (install by `uv add rethinkdb`) 91 | 92 | 1. [rethinkdb example](#rethinkdb-example) 93 | 94 | **quandl** (install by `uv add quandl`) 95 | 96 | 1. [quandl api access example](#quandl-example) 97 | 98 | **hug** (install by `uv add hug`) 99 | 100 | 1. [hug api access example](#hug-example) 101 | 102 | **base64** (package is built-in) 103 | 104 | 1. [base64 encode & decode example](#base64-example) 105 | 106 | **http.server** (package is built-in) 107 | 108 | 1. [web server example](#server-example) 109 | 110 | **hashlib** (package is built-in) 111 | 112 | 1. [sha 256 hash example](#sha-example) 113 | 114 | **nltk** (install via `uv add nltk`) 115 | 116 | 1. [sentiment analysis example](#sentiment-example) 117 | 118 | **exifread** (install via `uv add exifread`) 119 | 120 | 1. [read exif example](#exifread-example) 121 | 122 | **json** (built-in to python3) 123 | 124 | 1. [json to python object example](#json-to-python-object-example) 125 | 126 | **urllib3** (install via `uv add urllib3`) 127 | 128 | 1. [google mask search example](#google-mask-example) 129 | 2. [urllib3 proxymanager example](#proxymanager-example) 130 | 131 | **blockchain** (install via `uv add blockchain`) 132 | 133 | 1. [wallet query example](#bitcoin-wallet-example) 134 | 135 | **PySocks** ([package download link](https://github.com/Anorov/PySocks)) 136 | 137 | 1. [connect to tor and print .onion site](#tor-connect-example) 138 | 139 | **shodan** (install via `uv add shodan`) 140 | 141 | 1. [shodan count example](#shodan-count-example) 142 | 2. [google lat/long and shodan enrichment geo search example](#google-geo-and-shodan-example) 143 | 144 | **websockify** (install via `uv add websockify`) 145 | 146 | 1. [websockify example](#websockify-example) 147 | 148 | **scrapy** ([package download link](http://scrapy.org/download/)) 149 | 150 | 1. [crawl all internal links for a domain](#scrapy-spider-example) 151 | 152 | **iptcinfo3** (install via `uv add iptcinfo3`) 153 | 154 | 1. [iptcinfo3 example](#iptcinfo3-example) 155 | 156 | **imaplib** (build-in python3x) 157 | 158 | 1. [imaplib example](#imaplib-example) 159 | 160 | ## http.server Example 161 | 162 | This example runs a web server to http://127.0.0.1:8000. Go to http://127.0.0.1:8000/web.html to verify it is working. 163 | 164 | #### Run the example 165 | 166 | ```bash 167 | uv run python example-http-server.py 168 | ``` 169 | 170 | This will output that it is running on port 8000. 171 | 172 | ## pdfquery Example 173 | 174 | This example takes in the first argument the name of the pdf you want to get text from and prints the text found in the pdf to the screen. 175 | 176 | ### Run the example 177 | 178 | ```bash 179 | uv run python pdfquery-example.py mypdf.pdf 180 | ``` 181 | 182 | This will output the text of the pdf to stdout if it contains any. 183 | 184 | ## PDF Merge Example 185 | 186 | This example reads in a list of pdfs, you can specify the prefix of the list of pdfs using the `-p` argument or default is read\_. So for example, read_001.pdf, read_002.pdf would automatically get merged into a single pdf called merged.pdf. You can also set the output name via the `-o` argument. 187 | 188 | ### Run the example 189 | 190 | To get the help file: 191 | 192 | ```bash 193 | uv run python merge-pdfs-example.py -h 194 | ``` 195 | 196 | To run it on a list of pdfs with prefix `test` and output `final.pdf`: 197 | 198 | ```bash 199 | uv run python merge-pdfs-example.py -p test -o final.pdf 200 | ``` 201 | 202 | ## Pattern Twitter Search Example 203 | 204 | The first example I created is pattern-example-twitter.py. Pattern is a great library that is installed via pip and can query Google, Twitter, etc. out of the box. 205 | 206 | This twitter example connects to twitter and searches either a random string or terms you set via the terminal with the -s 'search terms'. 207 | 208 | Terminal Example 209 | 210 | ```python3 211 | python3 pattern-example-twitter.py -s 'Hello World' 212 | ``` 213 | 214 | ## Tor Connect Example 215 | 216 | Tor (The Onion Router) has a particular socks port and connection setup that needs configured to connect in Python. This example shows you how. You must already have [Tor](http://torproject.org/download) installed. 217 | 218 | *Note:* You need to install the Socksipy package for this to work, which has an actively maintained fork in [PySocks](https://github.com/Anorov/PySocks). It is easy if you already have pip (and if you don't have pip you should). $ pip install PySocks 219 | 220 | Then make sure your code (like the example) has import socks. 221 | 222 | ### Run the tor connect example 223 | 224 | Just simply run it from the terminal window: 225 | 226 | ```python3 227 | python tor-example.py 228 | ``` 229 | 230 | This will return the DuckDuckGo .onion html as proof that it is working. 231 | 232 | ## Google Search Example 233 | 234 | The Google seach portion of the pattern library was very useful. This example shows you that you can compare the popularity of phrases or sets of terms together using percentages and the sort() command. It selects 10 random words to search on from the imported included dictionary list that is in the assets folder. This doesn't work anymore. Thanks for nothing Google. 235 | 236 | ### Run the example 237 | 238 | ```python3 239 | python3 pattern-example-google.py -c 'sexy' 240 | ``` 241 | 242 | Returns: 243 | 244 | ```python3 245 | 89.13% "sexy seemed" 246 | 2.17% "sexy impassive" 247 | 1.09% "sexy spiegels" 248 | 1.09% "sexy slumping" 249 | 1.09% "sexy quietuses" 250 | 1.09% "sexy noncooperation" 251 | 1.09% "sexy miriness" 252 | 1.09% "sexy incompliancy" 253 | 1.09% "sexy evaporators" 254 | 1.09% "sexy cudgeler" 255 | ``` 256 | 257 | ## hug example 258 | 259 | hug is a great easy-to-use api to help route things on your web app 260 | 261 | ### Run the example 262 | 263 | ```python3 264 | python3 hug_api_example.py 265 | ``` 266 | 267 | This will output hug and start a listener process on 127.0.0.1:8000 268 | 269 | Then you can go to http://localhost:8000/happy_birthday?name=Hug&age=1 and see the output. 270 | 271 | ## base64 Example 272 | 273 | Converting data to base64 ensure a nice obsfuscation layer for data transport. 274 | 275 | ### Run the base64 example 276 | 277 | ```python3 278 | python3 base64_example.py 279 | ``` 280 | 281 | This will output a html string that is encoded into base64. 282 | 283 | ## Html to Text Example 284 | 285 | Beautiful Soup is a great library to parse and select html or iterate through the DOM. 286 | For this example to work you need to install Beautiful Soup: 287 | 288 | ```bash 289 | uv add beautifulsoup4 290 | ``` 291 | 292 | ### Run the Example 293 | 294 | ```python3 295 | python3 example-html2plaintext.py 296 | ``` 297 | 298 | #### Returns 299 | 300 | ```python3 301 | [*-*]Before html with text: 302 | ------------------ 303 | 304 | 305 | THIS IS AN EXAMPLE by @jamescampbell 306 | 307 | 308 | 309 |

Hello World

310 |

I hope you enjoy this example.

311 | ------------------ 312 | 313 | 314 | 315 | [*-*]After cleanMe() function: 316 | ------------------- 317 | THIS IS AN EXAMPLE by @jamescampbell 318 | Hello World 319 | I hope you enjoy this example. 320 | ------------------- 321 | ``` 322 | 323 | ## FuzzyWuzzy Example 324 | 325 | This example searches for 'cowboy' and returns Dallas Cowboys as the closest match from the list available. 326 | 327 | ### Run the Example 328 | 329 | ```python3 330 | python3 fuzzywuzzy-example.py 331 | ``` 332 | 333 | #### Returns 334 | 335 | ```python3 336 | Dallas Cowboys, 90 337 | ``` 338 | 339 | ## Google Mask Example 340 | 341 | This example used to do three things, 1. sets your search term, 2 . set your number of mask search terms, and 3. selects a random user agent for each search query. Google killed their API for this, so byebye. 342 | 343 | ### Run the Example 344 | 345 | ```python3 346 | doesn't work anymore because google killed their API 347 | ``` 348 | 349 | ## Server Example 350 | 351 | This example starts an http server on localhost:10010 and returns data when you visit the page 352 | 353 | ### Run the Example 354 | 355 | ```python3 356 | python3 server-example.py 357 | ``` 358 | 359 | ## Scrapy Spider Example 360 | 361 | This example gets the list of all internal links for any domain by following all internal homepage links and their links. 362 | 363 | ### Run the Example 364 | 365 | ```python3 366 | python3 spider.py -u jamescampbell.us 367 | ``` 368 | 369 | ## Bitcoin Wallet Example 370 | 371 | This example queries the blockchain.info API for an example wallet address and returns the ip address and dates for the transactions as well as the final wallet balance. 372 | 373 | ### Run the Example 374 | 375 | ```python3 376 | python3 bitcoin-example-1.py 377 | ``` 378 | 379 | ## Exifread Example 380 | 381 | This example gets the exif data from an image file 382 | 383 | ### Run the Example 384 | 385 | ```python3 386 | python3 exif-reader.py assets/cat.jpg 387 | ``` 388 | 389 | #### Output 390 | 391 | ```python3 392 | Total tags found: 66 393 | Key: Interoperability InteroperabilityVersion, value [48, 49, 48, 48] 394 | Key: EXIF InteroperabilityOffset, value 36724 395 | Key: Image Software, value SLT-A57 v1.02 396 | Key: EXIF UserComment, value [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 397 | Key: Image Orientation, value Horizontal (normal) 398 | Key: Thumbnail JPEGInterchangeFormat, value 37012 399 | Key: Interoperability InteroperabilityIndex, value R98 400 | Key: Image ResolutionUnit, value Pixels/Inch 401 | Key: EXIF ExifImageWidth, value 4912 402 | Key: EXIF ComponentsConfiguration, value YCbCr 403 | Key: EXIF FNumber, value 28/5 404 | Key: Thumbnail Software, value SLT-A57 v1.02 405 | Key: EXIF DateTimeDigitized, value 2013:04:07 14:13:38 406 | Key: EXIF ExposureProgram, value Aperture Priority 407 | Key: EXIF DateTimeOriginal, value 2013:04:07 14:13:38 408 | Key: EXIF Sharpness, value Normal 409 | Key: EXIF RecommendedExposureIndex, value 3200 410 | Key: EXIF MakerNote, value [83, 79, 78, 89, 32, 68, 83, 67, 32, 0, 0, 0, 78, 0, 3, 16, 4, 0, 16, 0, ... ] 411 | Key: EXIF CustomRendered, value Normal 412 | Key: EXIF Saturation, value Normal 413 | Key: EXIF ExposureTime, value 1/80 414 | Key: Image Make, value SONY 415 | Key: EXIF ExifImageLength, value 3264 416 | Key: EXIF DigitalZoomRatio, value 1 417 | Key: Image Model, value SLT-A57 418 | Key: EXIF Contrast, value Normal 419 | Key: EXIF SensitivityType, value Recommended Exposure Index 420 | Key: Thumbnail Orientation, value Horizontal (normal) 421 | Key: Thumbnail YResolution, value 72 422 | Key: Thumbnail Model, value SLT-A57 423 | Key: Image PrintIM, value [80, 114, 105, 110, 116, 73, 77, 0, 48, 51, 48, 48, 0, 0, 3, 0, 2, 0, 1, 0, ... ] 424 | Key: Thumbnail Make, value SONY 425 | Key: EXIF CompressedBitsPerPixel, value 2 426 | Key: EXIF MeteringMode, value Pattern 427 | Key: EXIF MaxApertureValue, value 49/32 428 | Key: Image YCbCrPositioning, value Co-sited 429 | Key: EXIF BrightnessValue, value 303/320 430 | Key: EXIF FlashPixVersion, value 0100 431 | Key: EXIF WhiteBalance, value Auto 432 | Key: EXIF LensModel, value 50mm F1.7 433 | Key: Thumbnail YCbCrPositioning, value Co-sited 434 | Key: Image DateTime, value 2013:04:07 14:13:38 435 | Key: EXIF ExifVersion, value 0230 436 | Key: Thumbnail ImageDescription, value 437 | Key: Image ExifOffset, value 360 438 | Key: Thumbnail JPEGInterchangeFormatLength, value 7654 439 | Key: EXIF ExposureMode, value Auto Bracket 440 | Key: EXIF SceneType, value Directly Photographed 441 | Key: EXIF LensSpecification, value [50, 50, 17/10, 17/10] 442 | Key: Image XResolution, value 350 443 | Key: EXIF ExposureBiasValue, value 0 444 | Key: EXIF ColorSpace, value sRGB 445 | Key: EXIF ISOSpeedRatings, value 3200 446 | Key: EXIF SceneCaptureType, value Standard 447 | Key: EXIF FocalLengthIn35mmFilm, value 75 448 | Key: Image YResolution, value 350 449 | Key: Thumbnail DateTime, value 2013:04:07 14:13:38 450 | Key: EXIF FocalLength, value 50 451 | Key: Thumbnail Compression, value JPEG (old-style) 452 | Key: EXIF FileSource, value Digital Camera 453 | Key: EXIF Flash, value Flash did not fire, compulsory flash mode 454 | Key: Image ImageDescription, value 455 | Key: Thumbnail XResolution, value 72 456 | Key: Thumbnail ResolutionUnit, value Pixels/Inch 457 | Key: EXIF LightSource, value Unknown 458 | ``` 459 | 460 | ## Sentiment Example 461 | 462 | This example takes a test list of tweets and returns positive or negative. It works in Python 3. 463 | 464 | ### Run the Example 465 | 466 | ```python3 467 | python3 sentiment-analysis-nltk-example.py testtweets.txt 468 | ``` 469 | 470 | ### Output 471 | 472 | ```python3 473 | negative 474 | positive 475 | negative 476 | positive 477 | negative 478 | Positive count: 2 479 | Negative count: 3 480 | ``` 481 | 482 | ## hashlib example 483 | 484 | The hashlib package generates hashes from strings. This example uses the sha256 hash algorithm. 485 | 486 | ### Run the Example 487 | 488 | ```python3 489 | python3 hashlib_example.py 490 | ``` 491 | 492 | ## Proxymanager Example 493 | 494 | This example uses urllib3 in Python 3 to connect through a privoxy connection and return status, headers, and content. 495 | 496 | ### Run the Example 497 | 498 | ```python3 499 | python3 urllib3-proxymanager-example.py 500 | ``` 501 | 502 | #### Output 503 | 504 | ```python3 505 | 200 506 | HTTPHeaderDict({'Content-Length': '5255', 'Proxy-Connection': 'keep-alive', 'ETag': '"564e8118-1487"', 'Server': 'nginx', 'Cache-Control': 'no-cache', 'Expires': 'Fri, 20 Nov 2015 02:15:59 GMT', 'Accept-Ranges': 'bytes', 'Content-Type': 'text/html; charset=UTF-8', 'Connection': 'keep-alive', 'Date': 'Fri, 20 Nov 2015 02:16:00 GMT'}) 507 | 508 | 509 | 510 | 511 | 512 | 513 | 514 | 515 | 516 | 517 | 518 | 519 | 520 | 521 | 522 | 523 | 524 | 525 | 526 | 527 | 528 | 529 | 530 | 531 | 532 | 533 | 534 | 535 | 536 | 537 | 538 | 539 | 540 | 541 | 542 | 543 | DuckDuckGo 544 | 545 | 546 | 547 | 548 | 549 | 550 | 551 | 552 | 553 | 557 | 558 | 559 | 560 | 561 | 564 | 565 | 566 | 567 |
568 | 569 |
570 | 571 | 572 | 573 |
574 | 575 |
576 |
577 |
578 | 584 | 585 |
586 | 593 | 594 |
595 | 596 | 597 | 598 | 599 | 600 | 608 |
609 |
610 | 611 | 612 | 613 | 614 |
615 |
616 |
617 | 618 | 619 | 631 | 632 | 633 | 634 | 635 |
636 | 637 | 638 | 639 | This is a link: 640 | 641 | About DuckDuckGo 642 | Duck it! 643 | 644 | This is a link: 645 | Help Spread DuckDuckGo! 646 | This is a link: 647 | Take a Tour 648 | ``` 649 | 650 | ## Quandl Example 651 | 652 | This example gets the stocks from AAPL into a dataframe and prints it. 653 | 654 | ### Run the Example 655 | 656 | ```python3 657 | python3 quandl-example.py 658 | ``` 659 | 660 | #### Output 661 | 662 | ```python3 663 | first date: 2001-12-31 664 | Total days of stock data available: 4 665 | [Finished in 1.6s] 666 | ``` 667 | 668 | ## Json to Python Object Example 669 | 670 | This example takes a json object and converts it to python and iterates through the values. It works for Python 3 or Python 2.7 671 | 672 | ### Run the Example 673 | 674 | ```python3 675 | python3 json-example.py 676 | ``` 677 | 678 | ## Foursquare API Example 679 | 680 | This example connects to Foursquare and asks for a city, country input and venue name and returns back the JSON and the Latitude and Longitude as well as the link to display the Qwant Map zoomed into that location 681 | 682 | ### Run the Example 683 | 684 | ```python3 685 | python 4sq-example.py 686 | ``` 687 | 688 | #### Output 689 | 690 | ```python3 691 | 692 | What city do you want to search in? (no spaces, include country): London,UK 693 | What is the name of the venue to search?: Millenium Hotel 694 | { 695 | u'geocode': { 696 | u'parents': [ 697 | 698 | ], 699 | u'what': u'', 700 | u'where': u'londonuk', 701 | u'feature': { 702 | u'highlightedName': u'London, 703 | GreaterLondon, 704 | UK', 705 | u'displayName': u'London, 706 | GreaterLondon, 707 | UnitedKingdom', 708 | u'name': u'London', 709 | u'longId': u'72057594040571679', 710 | u'cc': u'GB', 711 | u'id': u'geonameid: 2643743', 712 | u'geometry': { 713 | u'center': { 714 | u'lat': 51.50853, 715 | u'lng': -0.12574 716 | }, 717 | u'bounds': { 718 | u'sw': { 719 | u'lat': 51.28467404417054, 720 | u'lng': -0.5085579279369435 721 | }, 722 | u'ne': { 723 | u'lat': 51.691643999655895, 724 | u'lng': 0.33418999705203406 725 | } 726 | } 727 | }, 728 | u'matchedName': u'London, 729 | GreaterLondon, 730 | UK', 731 | u'woeType': 7, 732 | u'slug': u'london' 733 | } 734 | }, 735 | u'venues': [ 736 | { 737 | u'verified': True, 738 | u'name': u'MillenniumHotelLondonMayfair', 739 | u'referralId': u'v-1434850451', 740 | u'url': u'http: //www.millenniumhotels.co.uk', 741 | u'storeId': u'', 742 | u'hereNow': { 743 | u'count': 0, 744 | u'groups': [ 745 | 746 | ], 747 | u'summary': u'Nobodyhere' 748 | }, 749 | u'specials': { 750 | u'count': 0, 751 | u'items': [ 752 | 753 | ] 754 | }, 755 | u'contact': { 756 | u'facebookName': u'Millennium&CopthorneHotelsEurope', 757 | u'twitter': u'millenniumeu', 758 | u'phone': u'+442076299400', 759 | u'facebook': u'456685494411593', 760 | u'formattedPhone': u'+442076299400', 761 | u'facebookUsername': u'MillenniumEU' 762 | }, 763 | u'location': { 764 | u'city': u'Mayfair', 765 | u'cc': u'GB', 766 | u'country': u'UnitedKingdom', 767 | u'postalCode': u'W1K2HP', 768 | u'state': u'GreaterLondon', 769 | u'formattedAddress': [ 770 | u'44GrosvenorSquare', 771 | u'Mayfair', 772 | u'GreaterLondon', 773 | u'W1K2HP', 774 | u'UnitedKingdom' 775 | ], 776 | u'address': u'44GrosvenorSquare', 777 | u'lat': 51.51086806955976, 778 | u'lng': -0.1512632169763817 779 | }, 780 | u'stats': { 781 | u'tipCount': 31, 782 | u'checkinsCount': 3586, 783 | u'usersCount': 1559 784 | }, 785 | u'id': u'4ac518b5f964a52090a020e3', 786 | u'categories': [ 787 | { 788 | u'pluralName': u'Hotels', 789 | u'primary': True, 790 | u'name': u'Hotel', 791 | u'shortName': u'Hotel', 792 | u'id': u'4bf58dd8d48988d1fa931735', 793 | u'icon': { 794 | u'prefix': u'https: //ss3.4sqi.net/img/categories_v2/travel/hotel_', 795 | u'suffix': u'.png' 796 | } 797 | } 798 | ] 799 | } 800 | ] 801 | } 802 | 803 | Lat/Long: 51.5108680696, -0.151263216976 804 | ``` 805 | 806 | ## argparse Example 807 | 808 | This example sets some basic args. 809 | 810 | ### Run the Example 811 | 812 | ```python3 813 | python3 argparse.py -h 814 | ``` 815 | 816 | Returns: 817 | 818 | ```python3 819 | usage: argparse example [-h] [-a] [-v] [--verbose] 820 | 821 | Example on how to use argparse 822 | 823 | positional arguments: 824 | +a Turn A on 825 | 826 | optional arguments: 827 | -h, --help show this help message and exit 828 | -a Turn A off 829 | -v, --version show program's version number and exit 830 | --verbose verbose flag 831 | ``` 832 | 833 | ## Shodan Count Example 834 | 835 | This example connects to shodan api via your configs.py file with proper api key variable set and then queries for nginx in Glasgow, GB. 836 | 837 | ### Run the Example 838 | 839 | ```python3 840 | python3 shodan-example.py 841 | ``` 842 | 843 | #### Returns 844 | 845 | ```python3 846 | Results found: 246 847 | [Finished in 0.6s] 848 | ``` 849 | 850 | ## Google GEO and Shodan Example 851 | 852 | This example takes an address, gets the lat/long, and searches in shodan for matches near that location. 853 | 854 | ### Run the Example 855 | 856 | ```python3 857 | python3 get-geo-example.py 858 | ``` 859 | 860 | #### Returns 861 | 862 | ```python3 863 | geo:58.98691099999999,-2.960873,3 864 | Results found: 572 865 | [Finished in 0.7s] 866 | ``` 867 | 868 | ## RethinkDB Example 869 | 870 | This example takes pastebin archive daily json data into a test table in rethinkdb and pulls out values from it. 871 | 872 | ### Run the rethinkdb Example 873 | 874 | ```python3 875 | python3 rethink-example.py 876 | ``` 877 | 878 | ### Returns rethink stuff 879 | 880 | ## Websockify Example 881 | 882 | This example uses websockify. 883 | 884 | ```python3 885 | python3 websockify-example.py :8015 :80 886 | ``` 887 | 888 | ### Returns 889 | 890 | ??? 891 | 892 | ## Imaplib Example 893 | 894 | This example uses imaplib. 895 | 896 | ### Run the imaplib example 897 | 898 | ```python3 899 | python3 imaplib-example.py [your gmail] [your password] 900 | ``` 901 | 902 | ### Returns 903 | 904 | Latest unread mail from account and continues to run until you `CTRL+C` 905 | 906 | *More coming soon!* 907 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "python-examples" 3 | version = "0.1.0" 4 | description = "examples of common python libs" 5 | authors = [ 6 | {name = "James Campbell"}, 7 | ] 8 | readme = "README.md" 9 | requires-python = ">=3.12" 10 | dependencies = [ 11 | "beautifulsoup4>=4.12.3", 12 | "pysocks>=1.7.1", 13 | "rethinkdb>=2.4.10", 14 | "quandl>=3.7.0", 15 | "nltk>=3.9.1", 16 | "exifread>=3.0.0", 17 | "blockchain>=1.4.4", 18 | "websockify>=0.11.0", 19 | "shodan>=1.31.0", 20 | "urllib3>=2.2.1", 21 | "fuzzywuzzy>=0.18.0", 22 | "scrapy>=2.12.0", 23 | "pytest>=8.3.0", 24 | # Security note: pip 25.2 has known tarfile vulnerability (GHSA-4xh5-x5gv-qwph) 25 | # scrapy 2.13.3 has old DoS vulnerability (PYSEC-2017-83) - consider if needed 26 | "termcolor>=2.4.0", 27 | "pycld2>=0.41", 28 | "polyglot>=16.7.4", 29 | "tika>=2.6.0", 30 | "pyzillow>=0.7.0", 31 | "geotext>=0.4.0", 32 | "tabulate>=0.9.0", 33 | "tqdm>=4.66.4", 34 | "redis>=5.0.4", 35 | "psycopg2-binary>=2.9.9", 36 | "pypdf2>=3.0.1", 37 | "pinboard>=2.1.9", 38 | "webdriver-manager>=4.0.2", 39 | "scapy>=2.5.0", 40 | "matplotlib>=3.9.0", 41 | "iptcinfo3>=2.1.4", 42 | "requests>=2.31.0", 43 | "lxml>=5.2.0", 44 | "pillow>=10.3.0", 45 | "numpy>=1.26.0", 46 | ] 47 | 48 | [tool.uv] 49 | dev-dependencies = [ 50 | "pytest>=8.3.0", 51 | "black>=24.0.0", 52 | "flake8>=7.0.0", 53 | "isort>=5.13.0", 54 | "pip-audit>=2.9.0", 55 | ] 56 | 57 | [build-system] 58 | requires = ["setuptools>=68.0", "wheel"] 59 | build-backend = "setuptools.build_meta" 60 | 61 | [tool.setuptools] 62 | include-package-data = true 63 | 64 | [tool.setuptools.packages.find] 65 | exclude = ["tests*", "*.tests*"] 66 | 67 | [tool.setuptools.package-data] 68 | "python_examples" = ["*"] 69 | -------------------------------------------------------------------------------- /python-examples/4sq-example.py: -------------------------------------------------------------------------------- 1 | """Python example to connect and retrieve values from foursquare API.""" 2 | import json # the API call returns JSON formatted data 3 | import datetime as dt # need date in v= or the api call doesn't work 4 | from urllib.parse import quote 5 | from urllib.request import urlopen 6 | 7 | # globals 8 | limiter = "1" # make sure to keep as string due to concat issues otherwise 9 | cityer = input("What city do you want to search in? (no spaces, include country): ") 10 | queryer = quote(input("What is the name of the venue to search?: ")) 11 | clientider = "5HIGYBY4D24FXGCYTMYBUBGLYQSLORV03CRUS4E53F3GZ1VS" 12 | clientsecreter = "B11MC3TFDEY10XQQTUDQGGTKDGCCJBOHD4RPY5VYEW12ZNIN" 13 | dater = dt.datetime.today().strftime("%Y%m%d") # the v needs YYYYMMDD format 14 | # the actual api call url 15 | foursquareapivenuesearch = ( 16 | "https://api.foursquare.com/v2/venues/search?limit=" 17 | + limiter 18 | + "&near=" 19 | + cityer 20 | + "&query=" 21 | + queryer 22 | + "&client_id=" 23 | + clientider 24 | + "&client_secret=" 25 | + clientsecreter 26 | + "&v=" 27 | + dater 28 | ) 29 | request = urlopen(foursquareapivenuesearch) # open the url 30 | dataconvert = json.loads(request.read()) # read the data returned from url 31 | print(dataconvert["response"]) # print list of return json 32 | lat = str( 33 | dataconvert["response"]["venues"][0]["location"]["lat"] 34 | ) # foursquare response latitude 35 | lng = str( 36 | dataconvert["response"]["venues"][0]["location"]["lng"] 37 | ) # foursquare response longitude 38 | print(f"\nLat/Long: {lat}, {lng}") # print lat long only 39 | quantmapstring = f"https://www.qwant.com/maps/#map=20.08/{lat}/{lng}" 40 | print(f"Here is the quant map page for that location:\n {quantmapstring}") 41 | print("Right click on link and open in browser to view the location.\n") 42 | -------------------------------------------------------------------------------- /python-examples/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/james-see/python-examples/24ef8e164738827e25a049dfebcd9e2e56d8dc1d/python-examples/__init__.py -------------------------------------------------------------------------------- /python-examples/all.py: -------------------------------------------------------------------------------- 1 | """Way to check all examples.""" 2 | import os 3 | from os import listdir 4 | from os.path import isfile, join 5 | 6 | cwd = os.getcwd() 7 | print(cwd) 8 | onlyfiles = [f for f in listdir(cwd) if isfile(join(cwd, f))] 9 | for i in onlyfiles: 10 | if i.endswith(".py"): 11 | print(i.split("-")[0]) 12 | -------------------------------------------------------------------------------- /python-examples/amazon_ec2_boto-example.py: -------------------------------------------------------------------------------- 1 | # Author: James Campbell 2 | # What: Amazon EC2 example 3 | # Requirements: pip3 install boto3 awscli and run aws configure 4 | try: 5 | import boto3 6 | except Exception: 7 | exit('do "pip3 install boto" and "pip3 install awscli" first') 8 | 9 | ec2 = boto3.client("ec2") 10 | response = ec2.describe_instances() 11 | print(response) 12 | -------------------------------------------------------------------------------- /python-examples/arabic_dict-example.py: -------------------------------------------------------------------------------- 1 | """Python arabic example.""" 2 | # Author: James Campbell 3 | # Date: 2015-05-25 4 | # Last Updated: 3 July 2019 5 | exampledict = {"ا": "ALIF", "ع": "AYN"} 6 | keys = exampledict.keys() 7 | values = exampledict.values() 8 | print(keys) 9 | print(values) 10 | -------------------------------------------------------------------------------- /python-examples/argparse-example.py: -------------------------------------------------------------------------------- 1 | """Example of how to use argparse lib properly.""" 2 | # Author: James Campbell 3 | # Date: 2016-06-09 4 | # Date Updated: 3 July 2019 5 | # What: Example of how to use argparse moduel properly (Python3) 6 | import argparse 7 | 8 | parser = argparse.ArgumentParser( 9 | description="Example on how to use argparse", prog="argparse example" 10 | ) 11 | parser.add_argument("-a", action="store_false", default=None, help="Turn A off") 12 | parser.add_argument("+a", action="store_true", default=None, help="Turn A on") 13 | parser.add_argument("-v", "--version", action="version", version="VERSION 2.0") 14 | parser.add_argument("--verbose", action="store_true", help="verbose flag") 15 | args = parser.parse_args() 16 | if args.verbose: 17 | print("~ Very Verbose!") 18 | else: 19 | print("~ Not verbose") 20 | -------------------------------------------------------------------------------- /python-examples/assets/abba.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/james-see/python-examples/24ef8e164738827e25a049dfebcd9e2e56d8dc1d/python-examples/assets/abba.png -------------------------------------------------------------------------------- /python-examples/assets/archive_name.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/james-see/python-examples/24ef8e164738827e25a049dfebcd9e2e56d8dc1d/python-examples/assets/archive_name.tar.gz -------------------------------------------------------------------------------- /python-examples/assets/cnn.txt: -------------------------------------------------------------------------------- 1 | //store.cnn.com 2 | http://www.cnn.com/privacy 3 | http://www.cnn.com/specials/space-science 4 | http://www.cnn.com/more 5 | http://www.cnn.com/specials/opinion/opinion-social-issues 6 | http://www.turner.com 7 | http://www.cnn.com/email/subscription 8 | http://www.cnn.com/specials/politics/congress-capitol-hill 9 | http://www.cnn.com/ 10 | http://www.cnn.com/about 11 | http://www.cnn.com/specials/sport/winter-olympics-2018 12 | http://www.cnn.com/europe 13 | http://www.cnn.com/specials/tech/gadget 14 | http://www.cnn.com/asia 15 | http://bleacherreport.com/college-football 16 | //tours.cnn.com 17 | http://www.cnn.com/travel/food-and-drink 18 | http://www.cnn.com/travel 19 | http://www.cnn.com/terms 20 | http://www.cnn.com/china 21 | http://www.cnn.com/specials/profiles 22 | https://www.facebook.com/cnn 23 | http://www.cnn.com/specials/cnn-underscored/wellness/ 24 | http://www.cnn.com/newsletters 25 | http://www.cnn.com/world 26 | http://www.cnn.com/specials/tech/innovative-cities 27 | http://www.cnn.com/accessibility 28 | http://www.cnn.com/specials/politics/fact-check-politics 29 | http://www.cnn.com/videos 30 | https://money.cnn.com/data/markets/ 31 | http://www.cnn.com/travel/play 32 | http://www.cnn.com/specials/politics/supreme-court-nine 33 | http://www.cnn.com/specials/digital-studios 34 | http://www.cnn.com/specials/tech/mission-ahead 35 | http://www.cnn.com/specials/health/food-diet 36 | http://www.cnn.com/2017/03/04/vr/how-to-watch-vr 37 | http://www.cnn.com/specials/politics/president-donald-trump-45 38 | http://www.cnn.com/business/media 39 | http://www.cnn.com/style/architecture 40 | http://www.cnn.com/cnn-underscored/ 41 | http://www.cnn.com/africa 42 | http://www.cnn.com/style/fashion 43 | http://www.cnn.com/style/arts 44 | http://www.cnn.com/specials/tech/upstarts 45 | http://www.cnn.com/travel/stay 46 | http://www.cnn.com/australia 47 | http://bleacherreport.com/nfl 48 | http://www.cnn.com/travel/destinations 49 | http://www.cnn.com/americas 50 | http://www.cnn.com/politics 51 | http://www.cnn.com/business/videos 52 | http://www.cnn.com/specials/tech/innovate 53 | http://bleacherreport.com/nba 54 | http://www.cnn.com/specials/vr/vr-archives 55 | http://www.cnn.com/specials/health/parenting 56 | http://www.cnn.com/specials/more/cnn-leadership 57 | http://www.cnn.com/middle-east 58 | http://cnnnewsource.com 59 | http://www.cnn.com/specials/cnn-underscored/lifestyle/ 60 | http://www.cnn.com/specials/photos 61 | https://www.turnerjobs.com/search-jobs?orgIds=1174&ac=19299 62 | http://www.cnn.com# 63 | http://www.cnn.com/specials/tv/all-shows 64 | http://www.cnn.com/opinions 65 | //collection.cnn.com 66 | http://bleacherreport.com 67 | http://www.cnn.com/specials/tech/business-evolved 68 | http://www.cnn.com/health 69 | http://www.cnn.com/specials/videos/hln 70 | http://www.cnn.com/transcripts 71 | //coupons.cnn.com 72 | http://www.cnn.com/entertainment/tv-shows 73 | http://www.cnn.com/us 74 | http://www.cnn.com/specials/opinion/opinion-politics 75 | http://www.cnn.com/vr 76 | http://bleacherreport.com/world-football 77 | http://www.cnn.com/specials/us/crime-and-justice 78 | http://www.cnn.com/specials/health/live-longer 79 | http://www.cnn.com/india 80 | http://www.cnn.com/uk 81 | http://www.cnn.com/specials/cnn-underscored/gadgets/ 82 | http://www.cnn.com/specials/cnn-investigations 83 | http://www.cnn.com/business 84 | http://www.cnn.com/specials/health/wellness 85 | http://www.cnn.com/style/autos 86 | http://www.cnn.com/specials/tech/work-transformed 87 | http://www.cnn.com/business/tech 88 | http://www.cnn.com/specials/us/energy-and-environment 89 | http://cnn.it/go2 90 | https://instagram.com/cnn 91 | http://www.cnn.com/specials/health/fitness-excercise 92 | http://www.cnn.com/entertainment/movies 93 | http://www.cnn.com/business/success 94 | http://www.cnn.com/style/design 95 | http://www.cnn.com/travel/videos 96 | http://www.cnn.com/style/luxury 97 | //store.cnn.com/?utm_source=cnn.com&utm_medium=referral&utm_campaign=navbar 98 | http://www.cnn.com/style/videos 99 | http://www.cnn.com/style 100 | http://www.cnn.com/entertainment 101 | http://www.cnn.com/specials/cnn-underscored/explore/ 102 | http://www.cnn.com/specials/us/extreme-weather 103 | http://www.cnn.com/entertainment/culture 104 | http://www.cnn.com/specials/politics/2020-election-coverage 105 | http://www.cnn.com//cnn.it/go2 106 | http://www.cnn.com/tv/schedule/cnn 107 | http://bleacherreport.com/mlb 108 | http://www.cnn.com/entertainment/celebrities 109 | http://www.cnn.com/business/perspectives 110 | http://www.cnn.com/specials/videos/digital-shorts 111 | http://www.cnn.com/specials/cnn-longform 112 | https://twitter.com/cnn 113 | -------------------------------------------------------------------------------- /python-examples/assets/discordia.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/james-see/python-examples/24ef8e164738827e25a049dfebcd9e2e56d8dc1d/python-examples/assets/discordia.pkl -------------------------------------------------------------------------------- /python-examples/assets/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | Test Page 4 | 5 | 6 |

Level 1 header

7 |

Subheading

8 |

Normal text here

9 | 10 | 11 | -------------------------------------------------------------------------------- /python-examples/assets/iptc-example.jpg~: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/james-see/python-examples/24ef8e164738827e25a049dfebcd9e2e56d8dc1d/python-examples/assets/iptc-example.jpg~ -------------------------------------------------------------------------------- /python-examples/assets/test.xls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/james-see/python-examples/24ef8e164738827e25a049dfebcd9e2e56d8dc1d/python-examples/assets/test.xls -------------------------------------------------------------------------------- /python-examples/assets/testimage.jpg~: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/james-see/python-examples/24ef8e164738827e25a049dfebcd9e2e56d8dc1d/python-examples/assets/testimage.jpg~ -------------------------------------------------------------------------------- /python-examples/assets/testtweets.txt: -------------------------------------------------------------------------------- 1 | this is not good. 2 | i like all of this. 3 | this is a good test. 4 | having a good day 5 | raining outside, but I have to go out 6 | -------------------------------------------------------------------------------- /python-examples/audio_waveform-example.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import wave 4 | # to install pyaudio on osx: brew install portaudio then pip 5 | # install --allow-external pyaudio --allow-unverified pyaudio pyaudio 6 | import pyaudio 7 | import speech_recognition as sr # pip install speechrecognition 8 | 9 | r = sr.Recognizer() 10 | with sr.Microphone() as source: # use the default microphone as the audio source 11 | audio = r.listen(source) 12 | 13 | CHUNK = 1024 14 | FORMAT = pyaudio.paInt16 # paInt8 15 | CHANNELS = 1 16 | RATE = 44100 # sample rate 17 | RECORD_SECONDS = 5 18 | WAVE_OUTPUT_FILENAME = "output.wav" 19 | 20 | p = pyaudio.PyAudio() 21 | 22 | stream = p.open( 23 | format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK 24 | ) # buffer 25 | 26 | print("* recording") 27 | 28 | frames = [] 29 | 30 | for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)): 31 | data = stream.read(CHUNK) 32 | frames.append(data) # 2 bytes(16 bits) per channel 33 | wf = wave.open("temp.wav", "wb") 34 | wf.setnchannels(CHANNELS) 35 | wf.setsampwidth(p.get_sample_size(FORMAT)) 36 | wf.setframerate(RATE) 37 | wf.writeframes(b"".join(frames)) 38 | wf.close() 39 | print("* done recording") 40 | stream.stop_stream() 41 | stream.close() 42 | p.terminate() 43 | spf = wave.open("temp.wav", "r") 44 | signal = spf.readframes(-1) 45 | signal = np.fromstring(signal, "Int16") 46 | plt.figure(1) 47 | plt.title("Signal Wave...") 48 | plt.plot(signal) 49 | plt.show() 50 | exit() 51 | -------------------------------------------------------------------------------- /python-examples/bitcoin-example-1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # Author: James Campbell 3 | # Date: 2015-11-22 4 | # Date Updated: 2019-06-19 5 | # What: Accesses the blockchain module and queries some data as example 6 | import sys 7 | from sys import exit 8 | import datetime 9 | from blockchain import blockexplorer 10 | 11 | # example address test 12 | address = blockexplorer.get_address("1SDude3hVWoAT2sFxy3JkH2VrcUXPM4PA") 13 | if len(sys.argv) > 1: # you can pass a bitcoin address in from terminal 14 | print(sys.argv[1]) 15 | address = blockexplorer.get_address(sys.argv[1]) 16 | print(address) 17 | # final balance 18 | print(f"\nFinal balance of wallet: {address.final_balance}") # add decimal after first 19 | transactions = address.transactions 20 | print(f"\nList of {len(transactions)} transactions: \n -----------------------------") 21 | for trans in transactions: 22 | print( 23 | f"Ip address of relayed transaction: {trans.relayed_by}" 24 | ) # print ip address of the relayed transaction 25 | print(f"Hash of the transaction: {trans.hash}") # hash of the transaction 26 | print(f"Time of the transaction: {trans.time}") # time of the transaction 27 | timestamp = trans.time 28 | fixedtime = datetime.datetime.fromtimestamp(timestamp) 29 | fixer = fixedtime.strftime("%Y-%m-%d %H:%M:%S") 30 | print(f"Converted time: {fixedtime.strftime('%Y-%m-%d %H:%M:%S')}\n") 31 | # inventory only works for transactions up to 1 hour old 32 | # inv = blockexplorere.get_inventory_data(trans.hash) 33 | # print('Time: %s Initial IP: %s' % (fixer, inv.initial_ip)) 34 | exit() 35 | -------------------------------------------------------------------------------- /python-examples/browser-example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | """ 3 | Date Updated: 2019-06-19 4 | Author: James Campbell 5 | What: Loads CNN.com using mechanize and saves all the links to assets/cnn.txt 6 | """ 7 | import mechanize 8 | 9 | base_url = "http://www.cnn.com" 10 | title = "cnn" 11 | 12 | 13 | def crawl(site): 14 | seed_url = site 15 | br = mechanize.Browser() 16 | 17 | br.set_handle_robots(False) 18 | br.set_handle_equiv(False) 19 | 20 | br.open(seed_url) 21 | 22 | link_bank = [] 23 | 24 | for link in br.links(): 25 | if link.url[0:4] == "http": 26 | link_bank.append(link.url) 27 | if link.url[0] == "/": 28 | url = link.url 29 | if url.find(".com") == -1: 30 | if url.find(".org") == -1: 31 | link_bank.append(base_url + link.url) 32 | else: 33 | link_bank.append(link.url) 34 | else: 35 | link_bank.append(link.url) 36 | 37 | if link.url[0] == "#": 38 | link_bank.append(base_url + link.url) 39 | 40 | link_bank = list(set(link_bank)) 41 | my_file = open(f"./assets/{title}.txt", "w") 42 | for link in link_bank: 43 | my_file.write(link + "\n") 44 | my_file.close() 45 | return link_bank 46 | 47 | 48 | crawl(base_url) 49 | raise SystemExit(0) 50 | -------------------------------------------------------------------------------- /python-examples/bs4_email_regex-example.py: -------------------------------------------------------------------------------- 1 | """ 2 | What: bs4 get email from beautiful soup object 3 | Author: James Campbell 4 | Date: 2015-10-09 5 | Updated Date: 3 July 2019 6 | """ 7 | from bs4 import BeautifulSoup 8 | import re 9 | import sys 10 | 11 | 12 | def get_emails(soupcontent): 13 | """ 14 | soupcontent: expected to be a bs4 object 15 | Description: This functions gets emails from bs4 object and returns a list 16 | """ 17 | emaillist = [] 18 | soupere = soupcontent.find_all( 19 | text=re.compile( 20 | r"\S[a-zA-Z0-9._%+-].*?[a-zA-Z0-9_%+-]+@[a-zA-Z0-9.-]+?\.[a-zA-Z]{2,4}?\b" 21 | ) 22 | ) 23 | for soupe in soupere: 24 | soupe = soupe.strip() 25 | if soupe == "": 26 | continue 27 | emaillist.append(soupe) 28 | return emaillist 29 | 30 | 31 | htmlcontent = """ 32 |

This is an email: james@jamescampbell.us and this is not: james.

""" 33 | soupobject = BeautifulSoup(htmlcontent, "html.parser") 34 | finallist = get_emails(soupobject) 35 | 36 | # prove that it worked 37 | for email in finallist: 38 | print(email) 39 | 40 | sys.exit() 41 | -------------------------------------------------------------------------------- /python-examples/chrome-headless-example.py: -------------------------------------------------------------------------------- 1 | from selenium import webdriver 2 | from webdriver_manager.chrome import ChromeDriverManager 3 | instauser = "default" 4 | 5 | 6 | def getuser(): 7 | """Get instagram username.""" 8 | instauser = input('What instagram user to get information about? ["q" for quit]: ') 9 | return instauser 10 | 11 | 12 | def getcontent(instauser): 13 | """Headless chrome gets the content we need.""" 14 | options = webdriver.ChromeOptions() 15 | options.add_argument('--headless') 16 | options.add_argument('--no-sandbox') 17 | driver = webdriver.Chrome(ChromeDriverManager().install(), options=options) 18 | driver.get('https://www.instagram.com/{}/'.format(instauser)) 19 | print(driver.title) 20 | userdata = driver.execute_script("return _sharedData.entry_data.ProfilePage[0].graphql.user") 21 | # print all of the data 22 | print(f"User ID: {userdata['id']}\n\ 23 | Biography: {userdata['biography']}\n\ 24 | Friends: {userdata['edge_followed_by']['count']}\n\ 25 | Following: {userdata['edge_follow']['count']}") 26 | 27 | 28 | def main(): 29 | """Run the program.""" 30 | instauser = getuser() 31 | if instauser != 'q': 32 | getcontent(instauser) 33 | else: 34 | print('Thanks for playing.') 35 | 36 | 37 | if __name__ == "__main__": 38 | main() 39 | -------------------------------------------------------------------------------- /python-examples/circles-example.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | x = [1, 2, 3, 4, 5] 3 | y = [10, 20, 30, 40, 50] 4 | r = [100, 80, 60, 40, 20] # in points, not data units 5 | fig, ax = plt.subplots(1, 1) 6 | ax.scatter(x, y, s=r) 7 | fig.show() 8 | -------------------------------------------------------------------------------- /python-examples/cvlib_example.py: -------------------------------------------------------------------------------- 1 | """Example using cvlib.""" 2 | import cvlib as cv 3 | from cvlib.object_detection import draw_bbox 4 | import cv2 5 | 6 | img = image = cv2.imread("assets/sv.jpg") 7 | bbox, label, conf = cv.detect_common_objects(img, model="largess") 8 | print(label) 9 | 10 | output_image = draw_bbox(img, bbox, label, conf) 11 | cv2.imwrite("cvlib-example-out.jpg", output_image) 12 | -------------------------------------------------------------------------------- /python-examples/djvu-pdf-example.py: -------------------------------------------------------------------------------- 1 | """djvu to pdf script example.""" 2 | # Date Updated: 1 July 2019 3 | # pre-req osx: djvu2pdf (brew install djvu2pdf with homebrew installed) 4 | # pre-req Ubuntu / Debian: sudo apt-get install djvulibre-bin ghostscript 5 | import fnmatch 6 | import os 7 | import subprocess 8 | # global variables (change to suit your needs) 9 | inputfolderpath = '~' # set to import folder path 10 | outputpath = '~' # set to output folder (must exist) 11 | operationtype = input('Input from folder (1) or single file (2)?: ') 12 | 13 | 14 | def find_files(directory, pattern): 15 | """Find specific files in a directory and sub directories.""" 16 | for _, _, files in os.walk(directory): 17 | for basename in files: 18 | if fnmatch.fnmatch(basename, pattern): 19 | filename = basename 20 | yield filename 21 | 22 | 23 | if operationtype == '1': 24 | i = 0 25 | print(f"Input dir & sub directory underneath set as {inputfolderpath}") 26 | for filename in find_files(inputfolderpath, '*.djvu'): 27 | print(f"[*] Processing DJVU to PDF for {filename}...") 28 | i = i + 1 29 | inputfull = inputfolderpath+filename 30 | outputfilename = filename[:-4]+i+'pdf' # make filename unique 31 | outputfilepath = outputpath 32 | p = subprocess.Popen(["djvu2pdf", inputfull], stdout=subprocess.PIPE) 33 | output, err = p.communicate() 34 | subprocess.call(["mv", outputfilename, outputfilepath]) 35 | print('[-] Processing finished for %s' % filename) 36 | print(f"[--] processed {i} file(s) [--]") 37 | exit('\n\"Sanity is madness put to good uses.\" - George Santayana\n') 38 | 39 | elif operationtype == '2': 40 | filename = input('What filename to process? (leave blank for example): ') 41 | if 'djvu' in filename: 42 | print('Processing DJVU to PDF...') 43 | p = subprocess.Popen(["djvu2pdf", filename], stdout=subprocess.PIPE) 44 | output, err = p.communicate() 45 | print('Processing finished') 46 | exit('Completed sucessfully') 47 | else: 48 | print('No djvu file to process, running sample') 49 | print('Processing DJVU to PDF...') 50 | p = subprocess.Popen(["djvu2pdf", "assets/example.djvu"], 51 | stdout=subprocess.PIPE) 52 | output, err = p.communicate() 53 | print('Processing finished') 54 | exit('Completed sucessfully') 55 | 56 | 57 | elif operationtype == '': 58 | exit('You hit enter without inputing anything, nice work, exiting.') 59 | -------------------------------------------------------------------------------- /python-examples/exif_reader-example.py: -------------------------------------------------------------------------------- 1 | """Show how to get exif data and iptc data from various libraries.""" 2 | import sys 3 | 4 | from PIL import Image 5 | 6 | try: 7 | import exifread 8 | except ModuleNotFoundError as e: 9 | print(e, "pip3 install this!") 10 | exit(1) 11 | try: 12 | import iptcinfo3 13 | except ModuleNotFoundError as e: 14 | print(e, "pip3 install this!") 15 | exit(1) 16 | 17 | # Open image file for reading (binary mode) 18 | path_name = 'assets/cat.jpg' 19 | f = sys.argv[1] # check to see if image in command line 20 | f = open(f, 'rb') 21 | 22 | # Return Exif tags 23 | tags = exifread.process_file(f) 24 | totaltags = len(tags) 25 | print('-------EXIF DATA FOUND-------') 26 | print(f"Total EXIF tags found: {totaltags}") 27 | for tag in tags.keys(): 28 | print("Key: %s, value %s" % (tag, tags[tag])) 29 | print('-----------------END EXIF DATA-------') 30 | 31 | im = Image.open(sys.argv[1]) 32 | try: 33 | info = iptcinfo3.IPTCInfo(sys.argv[1]) 34 | print('-------IPTC DATA FOUND-------') 35 | for k, v in info._data.items(): 36 | print(k, v) 37 | info['city'] = '#magistræde #🇩🇰' 38 | info.save() 39 | except Exception as e: 40 | if str(e) != "No IPTC data found.": 41 | raise 42 | -------------------------------------------------------------------------------- /python-examples/flask-example.py: -------------------------------------------------------------------------------- 1 | """Flask example.""" 2 | import os 3 | from http import HTTPStatus 4 | 5 | from flask import Flask, request 6 | from flask import jsonify 7 | 8 | from werkzeug.utils import secure_filename 9 | """ 10 | Author: James Campbell 11 | Date: Mon May 23 16:26:36 2016 12 | Date Updated: 2 July 2019 13 | What is this code: An example Flask connection 14 | Why?: For me to remember later 15 | """ 16 | 17 | app = Flask(__name__) 18 | ALLOWED_EXTENSIONS = ["zip", "gz", "bz2"] 19 | 20 | 21 | def allowed_filename(filename: str) -> bool: 22 | """Define allowed file extensions.""" 23 | return "." in filename and filename.rsplit(".", 1)[1] in ALLOWED_EXTENSIONS 24 | 25 | 26 | @app.route("/") 27 | def hello_world(): 28 | """Hello world example.""" 29 | return """\ 30 | Flask test\ 31 | Hello, simply run\ 32 |
curl -X POST localhost:6969/upload\
33 |             -F file=@"assets/archive_name.tar.gz" -i
to test from\ 34 | same folder you executed
python3\
35 |                     flask-example.py
36 | """ 37 | 38 | 39 | @app.route("/upload", methods=["POST"]) 40 | def upload_csv() -> str: 41 | """Upload CSV example.""" 42 | submitted_file = request.files["file"] 43 | if submitted_file and allowed_filename(submitted_file.filename): 44 | filename = secure_filename(submitted_file.filename) 45 | directory = os.path.join(app.config["UPLOAD_FOLDER"]) 46 | if not os.path.exists(directory): 47 | os.mkdir(directory) 48 | basedir = os.path.abspath(os.path.dirname(__file__)) 49 | submitted_file.save( 50 | os.path.join(basedir, app.config["UPLOAD_FOLDER"], filename) 51 | ) 52 | out = { 53 | "status": HTTPStatus.OK, 54 | "filename": filename, 55 | "message": f"{filename} saved successful.", 56 | } 57 | return jsonify(out) 58 | 59 | 60 | if __name__ == "__main__": 61 | app.config["UPLOAD_FOLDER"] = "flaskme/" 62 | app.run(port=6969, debug=True) 63 | 64 | # curl -X POST localhost:6969/upload -F file=@"assets/archive_name.tar.gz" -i 65 | -------------------------------------------------------------------------------- /python-examples/fuzzywuzzy-example.py: -------------------------------------------------------------------------------- 1 | """Example to use fuzzywuzzy which does a fuzzy match fast.""" 2 | # Author: James Campbell 3 | # Date: August 11th 2016 4 | # Date Updated: 2 July 2019 5 | 6 | from fuzzywuzzy import process 7 | 8 | choices = ["Atlanta Falcons", "New York Jets", "Dallas Cowboys"] 9 | hello = process.extractOne("cowboys", choices) 10 | print(hello) # should print Dallas Cowboys 11 | -------------------------------------------------------------------------------- /python-examples/geoname-example.py: -------------------------------------------------------------------------------- 1 | """Shows how to use geotext.""" 2 | from geotext import GeoText 3 | 4 | places = GeoText("London is a great city") 5 | places.cities 6 | # "London" 7 | 8 | # filter by country code 9 | result = GeoText('I loved Rio de Janeiro and Havana', 'BR').cities 10 | print(result) 11 | # 'Rio de Janeiro' 12 | 13 | print(GeoText('New York, Texas, and also China').country_mentions) 14 | # OrderedDict([(u'US', 2), (u'CN', 1)]) 15 | -------------------------------------------------------------------------------- /python-examples/get_geo-example.py: -------------------------------------------------------------------------------- 1 | """Google maps to shodan .""" 2 | print("google api killed this example.") 3 | -------------------------------------------------------------------------------- /python-examples/html2plaintext-example.py: -------------------------------------------------------------------------------- 1 | """Example on how to get plaintext from html using python's beautiful soup.""" 2 | # Author: James Campbell 3 | # Date: 2015 05 19 4 | # Date Updated: 2 July 2019 5 | from bs4 import BeautifulSoup 6 | 7 | 8 | def cleanMe(html): 9 | """Clean html into text only for-real.""" 10 | soup = BeautifulSoup(html, "lxml") # create a new bs4 object from html 11 | for script in soup(["script", "style"]): # remove all javascript & css 12 | script.extract() 13 | # get text 14 | text = soup.get_text() 15 | # break into lines and remove leading and trailing space on each 16 | lines = (line.strip() for line in text.splitlines()) 17 | # break multi-headlines into a line each 18 | chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) 19 | # drop blank lines 20 | text = '\n'.join(chunk for chunk in chunks if chunk) 21 | return text 22 | 23 | 24 | testhtml = """ 25 | \n\nTHIS IS AN EXAMPLE by @jamescampbell 26 | \n\n 27 | \n\n

Hello World

\n

I hope you enjoy this example. 28 |

29 | """ 30 | 31 | print('\n\n[*-*]Before html with text:\n------------------') 32 | print(testhtml) 33 | print('------------------\n\n\n\n[*-*]After cleanMe() function:\n------------') 34 | print(cleanMe(testhtml)) 35 | print('-------------------\n\n') 36 | -------------------------------------------------------------------------------- /python-examples/http.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 |
6 |

hello world

7 |
8 | 9 | 10 | -------------------------------------------------------------------------------- /python-examples/httpserver-example.py: -------------------------------------------------------------------------------- 1 | """Example using web server in python.""" 2 | 3 | import http.server 4 | import socketserver 5 | 6 | PORT = 8000 7 | 8 | Handler = http.server.SimpleHTTPRequestHandler 9 | Handler.extensions_map.update({ 10 | '.webapp': 'application/x-web-app-manifest+json', 11 | }) 12 | 13 | httpd = socketserver.TCPServer(("", PORT), Handler) 14 | 15 | print("Serving at port: {}".format(PORT)) 16 | httpd.serve_forever() 17 | -------------------------------------------------------------------------------- /python-examples/hug-postgresql-example.py: -------------------------------------------------------------------------------- 1 | """Example connect and run write to postgresql.""" 2 | import hug 3 | import psycopg2 4 | """ 5 | # don't forget to create postgres sql t database first: 6 | # $ createdb t 7 | # then connect to psql 8 | # $ psql -h localhost -d test 2 ↵ 9 | # psql (9.5.3) 10 | # Type "help" for help. 11 | # then create test user 12 | # t=# create user t with password 'test'; 13 | # CREATE ROLE 14 | # t=# grant all privileges on database test to test; 15 | # GRANT 16 | # t=# 17 | """ 18 | 19 | 20 | @hug.get('/test') 21 | def test_connect(): 22 | """Test connection to db.""" 23 | psycopg2.connect("dbname='t' user='t' host='localhost' password='test'") 24 | return ('connected successfully to db! ready for queries.') 25 | 26 | 27 | @hug.get('/checktable') 28 | def test_write(user='t', table='testtable'): 29 | """Test write to DB.""" 30 | conn = psycopg2.connect("dbname='t' user='t' host='localhost' password='test'") 31 | print('connected successfully to db! ready for queries.') 32 | cur = conn.cursor() 33 | cur.execute("select exists(select relname from pg_class where relname='" + table + "')") 34 | exists = cur.fetchone()[0] 35 | print(exists) 36 | cur.close() 37 | if exists: 38 | return 'THIS TABLE EXISTS' 39 | else: 40 | return 'This Table does not exist' 41 | -------------------------------------------------------------------------------- /python-examples/hug_api_example.py: -------------------------------------------------------------------------------- 1 | """A basic (single function) API written using Hug.""" 2 | import hug 3 | 4 | import redis 5 | """ 6 | Make sure you have redis installed via pip and redis-cli can connect 7 | example add data first: http://127.0.0.1:8000/redis_add?ape=123456&rname=phrase 8 | example call http://127.0.0.1:8000/redis_call?ape=123456&rname=phrase 9 | """ 10 | 11 | r = redis.StrictRedis(host='127.0.0.1', port=6379) 12 | 13 | 14 | @hug.get('/happy_birthday') 15 | def happy_birthday(name, age: hug.types.number = 1): 16 | """Says happy birthday to a user.""" 17 | return "Happy {age} Birthday {name}!".format(**locals()) 18 | 19 | 20 | @hug.get('/redis_call') 21 | def a_redis_call(rname, ape=1): 22 | """Simple redis call.""" 23 | if ape == 1: 24 | return "no valid api key specified, nice try though".format(**locals()) 25 | if r.sismember('ape', ape) != 1: 26 | return "no valid api key specified, nice try though".format(**locals()) 27 | else: 28 | coolness = r.get(rname).decode('utf8') 29 | r.decr(ape) 30 | numleft = r.get(str(ape)) 31 | print(numleft) 32 | return "Authenticated w {ape}. You have {numleft} queries left. This\ 33 | is the {rname} value you requested: {coolness}".format(**locals()) 34 | 35 | 36 | @hug.get('/redis_add') 37 | def add_redis(rname, ape=1): 38 | """Add to redis int.""" 39 | r.sadd('ape', int(ape)) 40 | r.set(ape, 1000) 41 | r.set(rname, 'a nice value here') 42 | return "added successfully".format(**locals()) 43 | -------------------------------------------------------------------------------- /python-examples/hug_post_server-example.py: -------------------------------------------------------------------------------- 1 | """Hug post server example.""" 2 | import datetime 3 | 4 | import hug 5 | 6 | 7 | @hug.post('/test') 8 | def post_data(body): 9 | """Post data from hug endpoint.""" 10 | now = str(datetime.datetime.now())[:19] 11 | print("GOT {}: {}".format(type(body), repr(body))) 12 | with open('collector.txt', 'a+', encoding='utf8') as f: 13 | f.write('{} <<< {}\n'.format(now, body)) 14 | -------------------------------------------------------------------------------- /python-examples/imaplib-example.py: -------------------------------------------------------------------------------- 1 | """ 2 | What: Connect and get unread mail from imap example 3 | Author: James Campbell 4 | Date: 5 August 2019 5 | """ 6 | import time 7 | import imaplib 8 | import email 9 | import sys 10 | import os 11 | import html2text 12 | # ------------------------------------------------- 13 | # 14 | # Utility to read email from Gmail Using Python 15 | # 16 | # ------------------------------------------------ 17 | 18 | FROM_EMAIL = "scfith@gmail.com" 19 | FROM_PWD = sys.argv[1] 20 | SMTP_SERVER = "imap.gmail.com" 21 | SMTP_PORT = 993 22 | detach_dir = '~/Downloads' 23 | 24 | 25 | def get_body(email_message): 26 | """Get body of email message.""" 27 | for payload in email_message.get_payload(): 28 | break 29 | return payload.get_payload() 30 | 31 | 32 | def two_way_email(server, uname, pwd): 33 | """Fetch and read latest unseen messages.""" 34 | username = uname 35 | password = pwd 36 | readonly = True 37 | mail = imaplib.IMAP4_SSL(server) 38 | mail.login(username, password) 39 | mail.select("inbox", readonly) 40 | try: 41 | result, data = mail.uid('search', None, '(UNSEEN)') 42 | inbox_item_list = data[0].split() 43 | most_recent = inbox_item_list[-1] 44 | result2, email_data = mail.uid('fetch', most_recent, '(RFC822)') 45 | raw_email = email_data[0][1].decode("UTF-8") 46 | email_message = email.message_from_string(raw_email) 47 | 48 | for part in email_message.walk(): 49 | if part.get_content_maintype() == 'multipart': 50 | continue 51 | if part.get('Content-Disposition') is None: 52 | continue 53 | 54 | filename = part.get_filename() 55 | att_path = os.path.join(detach_dir, filename) 56 | 57 | if not os.path.isfile(att_path): 58 | fp = open(att_path, 'wb') 59 | fp.write(part.get_payload(decode=True)) 60 | fp.close() 61 | print('Downloaded file:', filename) 62 | if email_message.is_multipart(): 63 | for payload in email_message.get_payload(): 64 | print('To:\t\t', email_message['To']) 65 | print('From:\t', email_message['From']) 66 | print('Subject:', email_message['Subject']) 67 | print('Date:\t',email_message['Date']) 68 | for part in email_message.walk(): 69 | if (part.get_content_type() == 'text/plain') and (part.get('Content-Disposition') is None): 70 | print('Body:\t',part.get_payload()) 71 | break 72 | else: 73 | print('To:\t\t', email_message['To']) 74 | print('From:\t', email_message['From']) 75 | print('Subject:', email_message['Subject']) 76 | print('Date:\t', email_message['Date']) 77 | print('Thread-Index:\t', email_message['Thread-Index']) 78 | text = f"{email_message.get_payload(decode=True)}" 79 | html = text.replace("b'", "") 80 | h = html2text.HTML2Text() 81 | h.ignore_links = True 82 | output = (h.handle(f'''{html}''').replace("\\r\\n", "")) 83 | output = output.replace("'", "") 84 | print(output) 85 | 86 | except IndexError: 87 | print("No new email") 88 | 89 | 90 | while True: 91 | two_way_email("imap.gmail.com", sys.argv[1], sys.argv[2]) 92 | time.sleep(10) -------------------------------------------------------------------------------- /python-examples/instagram_geo-example.py: -------------------------------------------------------------------------------- 1 | """Python example to connect and retrieve values from foursquare API.""" 2 | # the example gets instagram users associated with that location's lat/long 3 | # Facebook killed Instagram's API so the second part does not work at all 4 | # Date Updated: 1 July 2019 5 | import datetime as dt # need date in v= as YYYYMMDD 6 | import json # the API call returns JSON formatted data 7 | import urllib # needed to do urlencode 8 | import urllib.parse 9 | import urllib.request 10 | # globals Foursquare API 11 | limiter = '1' # make sure to keep as string due to concat issues otherwise 12 | cityer = input('What city to search? (no spaces, include country): ') 13 | queryer = urllib.parse.quote(input('What is the name of venue to search?: ')) 14 | clientider = '5HIGYBY4D24FXGCYTMYBUBGLYQSLORV03CRUS4E53F3GZ1VS' 15 | clientsecreter = 'B11MC3TFDEY10XQQTUDQGGTKDGCCJBOHD4RPY5VYEW12ZNIN' 16 | dater = dt.datetime.today().strftime("%Y%m%d") # the v needs YYYYMMDD format 17 | # the actual api call url 18 | foursquareapivenuesearch = f"https://api.foursquare.com/v2/venues/search?limit={limiter}&near={cityer}&query={queryer}&client_id={clientider}&client_secret={clientsecreter}&v={dater}" 19 | request = urllib.request.urlopen(foursquareapivenuesearch) # open the url 20 | dataconvert = json.loads(request.read()) # read the data returned from url 21 | lat = str(dataconvert['response']['venues'][0]['location']['lat']) 22 | lng = str(dataconvert['response']['venues'][0]['location']['lng']) 23 | print(f'\nLat/Long for {urllib.parse.unquote(queryer)}: {lat}, {lng} \n') 24 | 25 | # now take the lat/long and plug it into instagram's api for big win 26 | # note: would probably turn this into a function to make it noice and modular 27 | 28 | # INSTAGRAM API KILLED BY FACEBOOK - FUCK YOU FACEBOOK! 29 | print("This example was killed by Facebook killing API's. Complain to Facebook.") 30 | # globals Instagram API 31 | # instaclientid = '35b999a6d51344cc98ebb061da538999' 32 | # instaaccess_token='290277.35b999a.e2423222efa04c058b0e9b95cbf77c07' 33 | # -------------------------------------------------------------------------------- /python-examples/iptcinfo3-example.py: -------------------------------------------------------------------------------- 1 | """ 2 | What: IPTCINFO3 example 3 | Author: James Campbell 4 | Date: 5 July 2019 5 | """ 6 | import iptcinfo3 7 | 8 | try: 9 | info = iptcinfo3.IPTCInfo('assets/guy881.jpg', inp_charset="cp1250", 10 | out_charset='cp1250', force=True) 11 | print('-------IPTC DATA FOUND-------') 12 | print(info.packedIIMData()) 13 | for k, v in info._data.items(): 14 | print(f"KEY: {k} VALUE: {str(v)}") 15 | # info['city'] = '#magistræde #🇩🇰' 16 | # info.save() 17 | except Exception as e: 18 | if str(e) != "No IPTC data found.": 19 | raise 20 | -------------------------------------------------------------------------------- /python-examples/json-example.py: -------------------------------------------------------------------------------- 1 | """Example on how to use json.""" 2 | # Date Updated: 1 July 2019 3 | import json 4 | jsontestdata = '{"a":"red","b":"orange","c":"blue"}' 5 | loadedjson = json.loads(jsontestdata) 6 | 7 | 8 | def itera(): 9 | """Quick method iterator.""" 10 | print('iterator method') 11 | for _, value in loadedjson.items(): # use .iteritems() if python 2.7 12 | print(value) 13 | 14 | # to show that this works as well instead of using the iterator above 15 | 16 | 17 | def iteratoo(): 18 | """Old school method.""" 19 | print('old school method') 20 | for value in loadedjson: 21 | print(loadedjson[value]) 22 | 23 | 24 | itera() 25 | iteratoo() 26 | -------------------------------------------------------------------------------- /python-examples/list.txt: -------------------------------------------------------------------------------- 1 | jamescampbell.us 2 | btrnt.com 3 | 2fa.party 4 | bigelow.com 5 | cnn.com 6 | -------------------------------------------------------------------------------- /python-examples/main.py: -------------------------------------------------------------------------------- 1 | print("you made it") 2 | -------------------------------------------------------------------------------- /python-examples/mechanize-example.py: -------------------------------------------------------------------------------- 1 | """Mechanize to loop through links on a domain.""" 2 | # Date Updated: 1 July 2019 3 | # Author: James Campbell 4 | import mechanize 5 | 6 | br = mechanize.Browser() 7 | br.addheaders = [("User-agent", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.1) Gecko/2008071615 Fedora/3.0.1-1.fc9 Firefox/3.0.1")] 8 | br.open("http://www.jamescampbell.us") 9 | linklists = br.links() 10 | print(f"Total links found: {str(len(list(linklists)))} ") 11 | print(list(linklists)) 12 | for link in br.links(): 13 | print(link.text, link.url) 14 | -------------------------------------------------------------------------------- /python-examples/merge-pdfs-example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # Author: James Campbell 3 | # Date: 2017-03-07 4 | # Date Updated: 2019-06-19 5 | # What: combine a list of pdfs into one pdf from assets/testpdf1.pdf & testpdf2.pdf 6 | from PyPDF2 import PdfFileMerger, PdfFileReader 7 | 8 | filenames = ["assets/testpdf1.pdf", "assets/testpdf2.pdf"] 9 | merger = PdfFileMerger() 10 | for filename in filenames: 11 | merger.append(PdfFileReader(open(filename, "rb"))) 12 | merger.write("./assets/pypdf2_example_output.pdf") 13 | -------------------------------------------------------------------------------- /python-examples/multi-categorization-tweets-example.py: -------------------------------------------------------------------------------- 1 | # author: James Campbell 2 | # what: example three+ categorization of tweets using nltk 3 | # date created: November 23 2015 4 | import nltk 5 | import sys 6 | from sys import exit 7 | 8 | pos_tweets = [('I love this car', 'positive'), 9 | ('This view is amazing', 'positive'), 10 | ('I feel great this morning', 'positive'), 11 | ('I am so excited about the concert', 'positive'), 12 | ('He is my best friend', 'positive'), 13 | ('Going well', 'positive'), 14 | ('Thank you', 'positive'), 15 | ('Hope you are doing well', 'positive'), 16 | ('I am very happy', 'positive'), 17 | ('Good for you', 'positive'), 18 | ('all good. I know about it and I accept it.', 'positive'), 19 | ('This is really good!', 'positive'), 20 | ('Tomorrow is going to be fun.', 'positive'), 21 | ('Smiling all around.', 'positive'), 22 | ('These are great apples today.', 'positive'), 23 | ('How about them apples? Thomas is a happy boy.', 'positive'), 24 | ('Thomas is very zen. He is well-mannered.', 'positive'), 25 | ('happy and good lots of light!', 'positive'), 26 | ('I like this new iphone very much', 'positive')] 27 | 28 | neg_tweets = [('I do not like this car', 'negative'), 29 | ('This view is horrible', 'negative'), 30 | ('I feel tired this morning', 'negative'), 31 | ('I am not looking forward to the concert', 'negative'), 32 | ('He is my enemy', 'negative'), 33 | ('I am a bad boy', 'negative'), 34 | ('This is not good', 'negative'), 35 | ('I am bothered by this', 'negative'), 36 | ('I am not connected with this', 'negative'), 37 | ('Sadistic creep you ass. Die.', 'negative'), 38 | ('All sorts of crazy and scary as hell.', 'negative'), 39 | ('Not his emails, no.', 'negative'), 40 | ('His father is dead. Returned obviously.', 'negative'), 41 | ('He has a bomb.', 'negative'), 42 | ('Too fast to be on foot. We cannot catch them.', 'negative'), 43 | ('Feeling so stupid stoopid stupid!', 'negative'), 44 | (':-(( :-(', 'negative'), 45 | ('This is the worst way imaginable, all of this traffic', 'negative')] 46 | 47 | rain_tweets = [('this rain is craze today', 'rain'), 48 | ('Nov 23 17:30 Temperature 3C no or few clouds Wind SW 6 km/h Humidity 70% France', 'rain'), 49 | ('missin climbing mountains in the rain', 'rain'), 50 | ('There are days in live broadcasting Torrential rain in Paris ', 'rain'), 51 | ('Heavy Rain today in!', 'rain'), 52 | ('Woman in the boulangerie started complaining about the rain. I said, "its better than terrorists". Need to finesse my jovial patter', 'rain'), 53 | ('Light to moderate rain over NCR', 'rain'), 54 | ('After a cold night last night, tonight will be milder and mainly frost-free, with this band of rain. Jo', 'rain'), 55 | ('But I love the rain. And it rains frequently these days~ So it makes me feel rather good', 'rain'), 56 | ('With 1000 mm rain already and more rain forecasted 4 Chennai, Nov 2015 will overtake Oct 2005 and Nov 1918 to become the Wettest Month EVER!', 'rain'), 57 | ('It is raining today. Wet!', 'rain'), 58 | ('Lots of rain today. Raining!', 'rain'), 59 | ('Why is it raining?', 'rain'), 60 | ('So much rain!', 'rain'), 61 | ('it always rains this time of year', 'rain'), 62 | ('raining', 'rain'), 63 | ('raining outside today, rained yesterday too', 'rain'), 64 | ('rainy weather today! jeez', 'rain'), 65 | ('Rain has finally extinguished a #wildfire in Olympic National Park that had been burning since May', 'rain'), 66 | ('The rain had us indoors for Thursdays celebration', 'rain'), 67 | ('Rain (hourly) 0.0 mm, Pressure: 1012 hPa, falling slowly', 'rain'), 68 | ('That aspiration yours outfit make ends meet spite of the rainy weather this midsummer?: Edb', 'rain'), 69 | ('Glasgow\'s bright lights of Gordon st tonight #rain #Glasgow', 'rain'), 70 | ('Why is it raining? Because it always rains this time of year', 'rain'), 71 | ('The forecast for this week\'s weather includes lots of rain!', 'rain'), 72 | ('Morning Has Broken: Morning has BrokenAs I sit in my warm car in between rain squalls I am looking out', 'rain'), 73 | ('Wind 2.0 mph SW. Barometer 1021.10 mb, Falling. Temperature 5.5 °C. Rain today 0.2 mm. Humidity 78%', 'rain')] 74 | 75 | tweets = [] 76 | for (words, sentiment) in pos_tweets + neg_tweets + rain_tweets: 77 | words_filtered = [e.lower() for e in words.split() if len(e) >= 2] 78 | tweets.append((words_filtered, sentiment)) 79 | 80 | 81 | def get_words_in_tweets(tweets): 82 | all_words = [] 83 | for (words, sentiment) in tweets: 84 | all_words.extend(words) 85 | return all_words 86 | 87 | 88 | def get_word_features(wordlist): 89 | wordlist = nltk.FreqDist(wordlist) 90 | word_features = wordlist.keys() 91 | return word_features 92 | 93 | 94 | def extract_features(document): 95 | document_words = set(document) 96 | features = {} 97 | for word in word_features: 98 | features['contains(%s)' % word] = (word in document_words) 99 | return features 100 | 101 | 102 | word_features = get_word_features(get_words_in_tweets(tweets)) 103 | 104 | training_set = nltk.classify.apply_features(extract_features, tweets) 105 | classifier = nltk.NaiveBayesClassifier.train(training_set) 106 | 107 | runtweets = [] # setup to import a list of tweets here if you wish into a python list 108 | if len(sys.argv) > 1: # if param passed 4 name of text file w/ list of tweets 109 | tweetfile = sys.argv[1] 110 | with open(tweetfile, "r") as ins: 111 | for line in ins: 112 | runtweets.append(line) 113 | runtweets.append('I am a bad boy') # should be negative 114 | runtweets.append('rain today') # should be rain 115 | runtweets.append('so stupid') # should be negative 116 | runtweets.append('it is raining outside') # should be rain 117 | runtweets.append('I love it') # should be positive 118 | runtweets.append('so good') # should be positive 119 | poscount = 0 120 | negcount = 0 121 | raincount = 0 122 | for tweett in runtweets: 123 | valued = classifier.classify(extract_features(tweett.split())) 124 | print(valued) 125 | if valued == 'negative': 126 | negcount = negcount + 1 127 | if valued == 'positive': 128 | poscount = poscount + 1 129 | if valued == 'rain': 130 | raincount = raincount + 1 131 | print('Positive count: %s \nNegative count: %s \nRain count: %s' % (poscount, negcount, raincount)) 132 | exit() 133 | -------------------------------------------------------------------------------- /python-examples/ngrams-example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # Author: James Campbell 3 | # Date: November 11 2016 4 | # What: Example using nltk tokenize and ngrams 5 | from nltk.tokenize import word_tokenize 6 | from nltk.util import ngrams 7 | import re 8 | import string 9 | 10 | 11 | def get_ngrams(text, n): 12 | ngramnums = word_tokenize(text) 13 | ll = [x for x in ngramnums if not re.fullmatch('[' + string.punctuation + ']+', x)] 14 | ll = ngrams(ll, n) 15 | return [' '.join(grams) for grams in ll] 16 | 17 | 18 | ngramer = get_ngrams("This is a sentence to parse out ngrams for it.", 4) 19 | for gram in ngramer: 20 | print(gram) 21 | -------------------------------------------------------------------------------- /python-examples/opencv_facial_recognition-example.py: -------------------------------------------------------------------------------- 1 | # Author: James Cmapbell 2 | # Date: November 18 2015 3 | # Updated: 3 July 2019 4 | # What: Example Facial recognition 5 | # Example: run python3 opencv_facial_recognition-example.py 6 | # assets/abba.png assets/haarcascade_frontalface_default.xml 7 | # nomenclature is python [script] [image to recognize] [classifier to use] 8 | import cv2 9 | import sys 10 | 11 | # Get user supplied values 12 | imagePath = sys.argv[1] 13 | cascPath = sys.argv[2] 14 | 15 | # Create the haar cascade 16 | faceCascade = cv2.CascadeClassifier(cascPath) 17 | 18 | # Read the image 19 | image = cv2.imread(imagePath) 20 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 21 | 22 | # Detect faces in the image 23 | faces = faceCascade.detectMultiScale( 24 | gray, 25 | scaleFactor=1.1, 26 | minNeighbors=5, 27 | minSize=(30, 30), 28 | flags=cv2.CASCADE_SCALE_IMAGE, 29 | ) 30 | 31 | print("Found {0} faces!".format(len(faces))) 32 | 33 | # Draw a rectangle around the faces 34 | for (x, y, w, h) in faces: 35 | cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) 36 | 37 | cv2.imshow("Faces found", image) 38 | cv2.waitKey(0) 39 | -------------------------------------------------------------------------------- /python-examples/parse_divs-example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # Author: James Campbell 3 | # What: Parse divs from transactions.html file 4 | # Date: 06-07-2017 5 | import csv 6 | from bs4 import BeautifulSoup, SoupStrainer 7 | 8 | with open('/Users/jc/Downloads/transactions.html') as f: 9 | #transaction_strainer = SoupStrainer('div', {'class': 'transaction'}) 10 | #soup = BeautifulSoup(f, 'html.parser',parse_only=transaction_strainer) 11 | soup = BeautifulSoup(f,'html.parser') 12 | listOfTransactions = soup.findAll('div',{'class':'transaction'}) 13 | print(listOfTransactions[5]) 14 | with open('/Users/jc/Downloads/transactions.csv', 'w', newline='') as tcsv: 15 | tscvwritten = csv.writer(tcsv, delimiter=',') 16 | for item in listOfTransactions: 17 | descriptionOfTransaction = item.find('div',{'class':'description'}).text 18 | amountOfTransaction = item.find('span',{'class':'tx-amount'}).text 19 | dateOfTransaction = item.find('div',{'class':'timestamp'}).text 20 | tscvwritten.writerow([dateOfTransaction,amountOfTransaction,descriptionOfTransaction]) 21 | #print(listOfTransactions[0].find('span',{'class':'tx-amount'}).text) 22 | #print(len(maindivs)) 23 | #print(len(amounts)) 24 | print("processed {} transactions".format(len(listOfTransactions))) 25 | 26 | #print(amounts[1]) 27 | exit('working so far') -------------------------------------------------------------------------------- /python-examples/pdf2random_text-example.py: -------------------------------------------------------------------------------- 1 | 2 | from PyPDF2 import PdfFileWriter, PdfFileReader 3 | import sys 4 | # globals 5 | if len(sys.argv) < 2: 6 | pdffile = 'assets/fw9.pdf' 7 | else: 8 | pdffile = sys.argv[1] 9 | 10 | 11 | output = PdfFileWriter() 12 | input1 = PdfFileReader(open(pdffile, "rb")) 13 | 14 | # print how many pages input1 has: 15 | print("pdf has %d pages." % input1.getNumPages()) 16 | print(input1.getDocumentInfo()) 17 | print(input1.getXmpMetadata()) 18 | if input1.isEncrypted: 19 | print('encrypted') 20 | else: 21 | print('not encrypted') 22 | i = 4 23 | content = "" 24 | while i < 6: 25 | texter = input1.getPage(i).extractText() 26 | # print(texter) 27 | i = i + 1 28 | content += texter + "\n" 29 | content = " ".join(content.replace("\xa0", " ").strip().split()) 30 | # add page 1 from input1 to output document, unchanged 31 | output.addPage(input1.getPage(0)) 32 | 33 | # add page 2 from input1, but rotated clockwise 90 degrees 34 | output.addPage(input1.getPage(1).rotateClockwise(90)) 35 | 36 | # add page 3 from input1, rotated the other way: 37 | output.addPage(input1.getPage(2).rotateCounterClockwise(90)) 38 | # alt: output.addPage(input1.getPage(2).rotateClockwise(270)) 39 | 40 | # add page 4 from input1, but first add a watermark from another PDF: 41 | page4 = input1.getPage(3) 42 | watermark = PdfFileReader(open(pdffile, "rb")) 43 | page4.mergePage(watermark.getPage(0)) 44 | output.addPage(page4) 45 | 46 | 47 | # add page 5 from input1, but crop it to half size: 48 | page5 = input1.getPage(4) 49 | page5.mediaBox.upperRight = ( 50 | page5.mediaBox.getUpperRight_x() / 2, 51 | page5.mediaBox.getUpperRight_y() / 2 52 | ) 53 | output.addPage(page5) 54 | 55 | # add some Javascript to launch the print window on opening this PDF. 56 | # the password dialog may prevent the print dialog from being shown, 57 | # comment the the encription lines, if that's the case, to try this out 58 | output.addJS("this.print({bUI:true,bSilent:false,bShrinkToFit:true});") 59 | 60 | # finally, write "output" to document-output.pdf 61 | outputStream = open("PyPDF2-output.pdf", "wb") 62 | output.write(outputStream) 63 | -------------------------------------------------------------------------------- /python-examples/pdfquery-example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # Author: James Campbell 3 | # Date: 03-15-2017 4 | # Last Modified 06-03-2019 5 | # What: take a pdf and get text from it using pdfquery 6 | import sys 7 | import pdfquery 8 | 9 | # globals 10 | if sys.argv[1] == None: 11 | pdffile = "book1.pdf" 12 | else: 13 | pdffile = sys.argv[1] 14 | 15 | pdf = pdfquery.PDFQuery(pdffile) 16 | pdf.load() 17 | text = pdf.pq.text() 18 | if text != "": 19 | print(text) 20 | else: 21 | exit("need to ocr pdf first") 22 | -------------------------------------------------------------------------------- /python-examples/pickle_load-example.py: -------------------------------------------------------------------------------- 1 | # pickle load example 2 | import pickle 3 | import random 4 | 5 | with open('assets/discordia.pkl', 'rb') as f: 6 | discordia = pickle.load(f) 7 | 8 | 9 | def getran(tex): 10 | texter = random.choice(tex) 11 | if len(texter) < 140 and len(texter) > 0: 12 | return texter 13 | else: 14 | globular = getran(tex) 15 | return globular 16 | 17 | 18 | def to140(data): 19 | loser = [] 20 | for listitem in data: 21 | if len(listitem) < 140 and len(listitem) > 0: 22 | loser.append(listitem) 23 | return loser 24 | 25 | 26 | print(getran(discordia)) 27 | exit('there ya go') 28 | -------------------------------------------------------------------------------- /python-examples/pinboard-example.py: -------------------------------------------------------------------------------- 1 | """Example on how to use pinboard api using your api key and tag.""" 2 | # Note, get your api token at https://pinboard.in/settings/password 3 | # Set the token in a configs.py file that you set pinapi = "your token" 4 | import pinboard 5 | from configs import pinapi 6 | import datetime 7 | 8 | one_day_ago = datetime.datetime.now() - datetime.timedelta(days=1) 9 | pb = pinboard.Pinboard(pinapi) # set to your api here username:api 10 | sec = pb.posts.all(tag=["wrk"], results=10, fromdt=one_day_ago) 11 | for key in sec: 12 | print(key.description, key.tags, key.url, key.extended) 13 | -------------------------------------------------------------------------------- /python-examples/polyglot-example.py: -------------------------------------------------------------------------------- 1 | # requires pyicu and pycld2 and pre-req brew install icu4c 2 | from polyglot.detect import Detector 3 | 4 | arabic_text = u""" 5 | أفاد مصدر امني في قيادة عمليات صلاح الدين في العراق بأن " القوات الامنية تتوقف لليوم 6 | الثالث على التوالي عن التقدم الى داخل مدينة تكريت بسبب 7 | انتشار قناصي التنظيم الذي يطلق على نفسه اسم "الدولة الاسلامية" والعبوات الناسفة 8 | والمنازل المفخخة والانتحاريين، فضلا عن ان القوات الامنية تنتظر وصول تعزيزات اضافية ". 9 | """ 10 | 11 | detector = Detector(arabic_text) 12 | print(detector.language) 13 | -------------------------------------------------------------------------------- /python-examples/pyzillow-example.py: -------------------------------------------------------------------------------- 1 | """Example using pyzillow, requires free zillow api key.""" 2 | from pyzillow.pyzillow import ZillowWrapper, GetDeepSearchResults, GetUpdatedPropertyDetails 3 | import argparse 4 | from pprint import pprint 5 | from configs import zillowapi # set your zillowapi='yourapikey' in configs.py file 6 | """You need a zillow api key: https://www.zillow.com/howto/api/APIOverview.htm""" 7 | # arguments 8 | parser = argparse.ArgumentParser(description='zillow data example') 9 | parser.add_argument('-a', '--address', dest='address', help='address to search', 10 | default='1943 N Upland St, Arlington, VA, 22207', required=False) 11 | parser.add_argument('-v', '--verbose', dest='verbose', 12 | help='print more stuff', action='store_true') 13 | parser.add_argument('-z', '--zipcode', dest='zipcode', 14 | help='zipcode', default=22207) 15 | parser.add_argument('--apikey', help='zillow api key', required=False, default=zillowapi) 16 | args = parser.parse_args() 17 | 18 | address = args.address 19 | zipcode = args.zipcode 20 | 21 | 22 | def get_wrapper(): 23 | """Set the API key properly.""" 24 | zillow_data = ZillowWrapper(args.apikey) 25 | return zillow_data 26 | 27 | 28 | def search_address(zillow_data): 29 | """Get results from address input and zipcode input.""" 30 | deep_search_response = zillow_data.get_deep_search_results( 31 | address, zipcode) 32 | result = GetDeepSearchResults(deep_search_response) 33 | if args.verbose: 34 | print(result) 35 | return result 36 | 37 | 38 | def get_details(zillow_id): 39 | """Get updated detailed property data.""" 40 | zillow_data = get_wrapper() 41 | updated_property_details_response = zillow_data.get_updated_property_details( 42 | zillow_id) 43 | result = GetUpdatedPropertyDetails(updated_property_details_response) 44 | if args.verbose: 45 | print(result) 46 | return result 47 | 48 | 49 | def main(): 50 | """Run the API calls to show the example data.""" 51 | zillow_data = get_wrapper() 52 | result = search_address(zillow_data) 53 | if args.verbose: 54 | pprint(vars(result)) 55 | all_details = get_details(result.zillow_id) 56 | if args.verbose: 57 | pprint(vars(all_details)) 58 | 59 | 60 | if __name__ == "__main__": 61 | main() 62 | -------------------------------------------------------------------------------- /python-examples/quandl-example.py: -------------------------------------------------------------------------------- 1 | # Author: James Campbell 2 | # Date Created: May 21st 2016 3 | # Date Updated: 2 July 2019 4 | # What: Example to get stock prices 5 | from sys import exit 6 | try: 7 | import quandl 8 | except Exception: 9 | exit('quandl module required, run pip or pip3 install quandl --update') 10 | try: 11 | from configs import myqkey 12 | except Exception: 13 | print('no configs file set, create a file called configs.py and add var myqkey = "whatever"') 14 | myqkey = 'yoursecretkeyfromquandl.com' 15 | # set API key 16 | quandl.ApiConfig.api_key = myqkey # get free key at quandl.com 17 | 18 | dataset_data = quandl.Dataset('WIKI/AAPL').data(params={'start_date': '2001-01-01', 19 | 'end_date': '2010-01-01', 20 | 'collapse': 'annual', 21 | 'transformation': 'rdiff', 'rows': 4}) 22 | print('first date: {}'.format(dataset_data[0].date)) 23 | print('Total days of stock data available: {}'.format(len(dataset_data))) 24 | print('The data includes the following columns: {}'.format(dataset_data.column_names)) 25 | -------------------------------------------------------------------------------- /python-examples/read-spreadsheet-example.py: -------------------------------------------------------------------------------- 1 | # read values from a spreadsheet 2 | import os 3 | import fnmatch 4 | import xlrd 5 | 6 | 7 | def find_files(directory, pattern): 8 | for root, dirs, files in os.walk(directory): 9 | for basename in files: 10 | if fnmatch.fnmatch(basename, pattern): 11 | filename = basename 12 | yield filename 13 | 14 | 15 | filename = 'assets/test.xls' 16 | header = 1 17 | outputfilename = '' 18 | pathoffile = '' 19 | 20 | workbook = xlrd.open_workbook(filename) 21 | worksheet = workbook.sheet_by_name('Sheet1') 22 | num_rows = worksheet.nrows - 1 23 | num_cells = worksheet.ncols - 1 24 | if header == 1: 25 | curr_row = 0 26 | else: 27 | curr_row = -1 28 | curr_cell = -1 29 | while curr_row < num_rows: 30 | curr_row += 1 31 | row = worksheet.row(curr_row) 32 | print('Row:', curr_row) 33 | outputfilename = worksheet.cell_value(curr_row, 0) 34 | pathoffile = worksheet.cell_value(curr_row, 1) 35 | print('Filename: %s\nPath: %s' % (outputfilename, pathoffile)) 36 | -------------------------------------------------------------------------------- /python-examples/read_wav_display_audio-example.py: -------------------------------------------------------------------------------- 1 | # read and display wav audio file example 2 | from scipy.io.wavfile import read 3 | import matplotlib.pyplot as plt 4 | 5 | # globals 6 | fullfilepath = '/Users/mbpjc/projects/digiclean2/noise-files/naylor_noisy.wav' 7 | resultpath = '/Users/mbpjc/projects/digiclean2/output/' 8 | # read audio samples 9 | input_data = read("/Users.wav") 10 | audio = input_data[1] 11 | 12 | plt.plot(audio) 13 | 14 | # label the axes 15 | plt.ylabel("Amplitude") 16 | plt.xlabel("Time") 17 | # set the title 18 | plt.title("Sample Wav") 19 | # display the plot 20 | plt.show() 21 | -------------------------------------------------------------------------------- /python-examples/request_post_go_with_hug_post-example.py: -------------------------------------------------------------------------------- 1 | 2 | import requests 3 | 4 | foreigntext = 'станция' 5 | url = 'http://127.0.0.1:8000/test' 6 | sendtext = {'hello': foreigntext} 7 | print(sendtext) 8 | r = requests.post(url, json=sendtext) 9 | -------------------------------------------------------------------------------- /python-examples/requests-example.py: -------------------------------------------------------------------------------- 1 | # Author: James Campbell 2 | # What: requests example that checks domain for RSS feed 3 | import requests 4 | from bs4 import BeautifulSoup 5 | 6 | 7 | def get_rss_feed(website_url): 8 | """Get RSS feed.""" 9 | if website_url is None: 10 | print("URL should not be null") 11 | else: 12 | source_code = requests.get(website_url) 13 | plain_text = source_code.text 14 | soup = BeautifulSoup(plain_text, "lxml") 15 | for link in soup.find_all("link", {"type": "application/rss+xml"}): 16 | href = link.get('href') 17 | print("RSS feed for " + website_url + "is --> " + str(href)) 18 | 19 | 20 | get_rss_feed("https://0x41.no/") 21 | -------------------------------------------------------------------------------- /python-examples/rethinkdb-example.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: James Campbell 3 | Date: July 3rd 2016 4 | Date Updated: 3 July 2019 5 | What: RethinkDB python example create and put data into it from pastebin site. 6 | Documentation on RethinkDB: https://rethinkdb.com/api/python/ 7 | Pre: Must brew install rethinkdb and start it up first 8 | """ 9 | import json 10 | import rethinkdb as rdb 11 | r = rdb.RethinkDB() 12 | 13 | # for url implementation instead of json file (comment out example file open below -jc 14 | # url = 'http://psbdmp.com/api/dump/daily' 15 | # data = urllib.request.urlopen(url) 16 | # d = json.loads(data.read().decode('utf-8-sig')) 17 | 18 | """ 19 | json keys: id, data, datahash, tags, addedtime, viewed, deleted, unixtime, banned, leakedemails, 20 | info, total, dumped, formated, removed, textdata, spID 21 | """ 22 | # open example file 23 | with open('assets/pastedumpexample.json', 'rU', encoding='utf-8') as json_data: 24 | d = json.load(json_data) 25 | 26 | i = 0 27 | 28 | # list of tables to check if they exist 29 | tables = ['hellopaste'] # you can check multiple tables here by adding more in list 30 | conn = r.connect('127.0.0.1', 28015).repl() 31 | for table in tables: 32 | if not r.db('test').table_list().contains(table).run(conn): 33 | r.db('test').table_create(table).run(conn) 34 | 35 | for datarow in d['data']: 36 | try: 37 | ider = datarow['id'] 38 | except Exception: 39 | ider = '' 40 | try: 41 | dataer = datarow['data'] 42 | except Exception: 43 | dataer = '' 44 | try: 45 | datahasher = datarow['datahash'] 46 | except Exception: 47 | datahasher = '' 48 | try: 49 | tagser = datarow['tags'] 50 | except Exception: 51 | tagser = '' 52 | try: 53 | addedtimeer = datarow['addedtime'] 54 | except Exception: 55 | addedtimeer = '' 56 | try: 57 | vieweder = datarow['viewed'] 58 | except Exception: 59 | vieweder = '' 60 | try: 61 | deleteder = datarow['deleted'] 62 | except Exception: 63 | deleteder = '' 64 | try: 65 | unixtimeer = datarow['unixtime'] 66 | except Exception: 67 | unixtimeer = '' 68 | try: 69 | banneder = datarow['banned'] 70 | except Exception: 71 | banneder = '' 72 | try: 73 | leakedemailser = datarow['leakedemails'] 74 | except Exception: 75 | leakedemailser = '' 76 | try: 77 | infoer = datarow['info'] 78 | except Exception: 79 | infoer = '' 80 | try: 81 | totaler = datarow['total'] 82 | except Exception: 83 | totaler = '' 84 | try: 85 | dumpeder = datarow['dumped'] 86 | except Exception: 87 | dumpeder = '' 88 | try: 89 | formateder = datarow['formated'] 90 | except Exception: 91 | formateder = '' 92 | try: 93 | removeder = datarow['removed'] 94 | except Exception: 95 | removeder = '' 96 | try: 97 | textdataer = datarow['textdata'] 98 | except Exception: 99 | textdataer = '' 100 | try: 101 | spider = datarow['spID'] 102 | except Exception: 103 | spider = '' 104 | r.table('hellopaste').insert({'id': ider, 105 | 'data': dataer, 106 | 'datahash': datahasher, 107 | 'tags': tagser, 108 | 'addedtime': addedtimeer, 109 | 'viewed': vieweder, 110 | 'deleted': deleteder, 111 | 'unixtime': unixtimeer, 112 | 'banned': banneder, 113 | 'leakedemails': leakedemailser, 114 | 'info': infoer, 115 | 'total': totaler, 116 | 'dumped': dumpeder, 117 | 'formated': formateder, 118 | 'removed': removeder, 119 | 'textdata': textdataer, 120 | 'spID': datarow['spID']}).run(conn) 121 | # test using pluck 122 | # tv_shows = r.table('hellopaste').pluck('id').run(conn) 123 | # print(tv_shows) 124 | 125 | # count total rows in table 126 | totalrows = r.table('hellopaste').count().run() 127 | print('total paste entries: {}'.format(totalrows)) 128 | 129 | # return back any that include emails 130 | totalwithemails = r.table('hellopaste').count( 131 | lambda hellopaste: hellopaste['textdata'] 132 | .match("[A-Z0-9._%-]+@[A-Z0-9._%-]+\.[A-Z]{2,4}")).run() 133 | print('total with emails in text of paste: {}'.format(totalwithemails)) 134 | # print each id and datetime stamp 135 | for doc in r.table('hellopaste').run(): 136 | print(doc['id'], doc['addedtime']) 137 | conn.close() 138 | -------------------------------------------------------------------------------- /python-examples/ryu-example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # Example code for using the ryu module to do SDN (Software Defined Networking) 3 | # Author: James Campbell 4 | # Date Updated: 12-12-2015 5 | 6 | # from http://ryu.readthedocs.org/en/latest/writing_ryu_app.html 7 | from ryu.base import app_manager 8 | from ryu.controller import ofp_event 9 | from ryu.controller.handler import MAIN_DISPATCHER 10 | from ryu.controller.handler import set_ev_cls 11 | 12 | class L2Switch(app_manager.RyuApp): 13 | def __init__(self, *args, **kwargs): 14 | super(L2Switch, self).__init__(*args, **kwargs) 15 | 16 | @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) 17 | def packet_in_handler(self, ev): 18 | msg = ev.msg 19 | dp = msg.datapath 20 | ofp = dp.ofproto 21 | ofp_parser = dp.ofproto_parser 22 | 23 | actions = [ofp_parser.OFPActionOutput(ofp.OFPP_FLOOD)] 24 | out = ofp_parser.OFPPacketOut( 25 | datapath=dp, buffer_id=msg.buffer_id, in_port=msg.in_port, 26 | actions=actions) 27 | dp.send_msg(out) 28 | -------------------------------------------------------------------------------- /python-examples/scapy_arp-example.py: -------------------------------------------------------------------------------- 1 | """Scapy sniff ARP packets, must run as sudo!""" 2 | from scapy.all import sniff 3 | pkts = sniff(filter="arp", count=10) 4 | print(pkts.summary()) 5 | -------------------------------------------------------------------------------- /python-examples/scatterplot-example.py: -------------------------------------------------------------------------------- 1 | """Nice random scatter bubble numpy and matplotlib example.""" 2 | # author: James Campbell 3 | # Date Created: 2015 06 02 4 | 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | 8 | N = 50 9 | x = np.random.rand(N) 10 | y = np.random.rand(N) 11 | colors = np.random.rand(N) 12 | area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses 13 | 14 | plt.scatter(x, y, s=area, c=colors, alpha=0.5) 15 | plt.show() -------------------------------------------------------------------------------- /python-examples/scrape_twitter-example.py: -------------------------------------------------------------------------------- 1 | """scrape twitter example going to web page for twitter profile.""" 2 | # author: James Campbell 3 | # date: 2015 06 02 4 | # Date Updated: 3 July 2019 5 | import urllib.request 6 | import urllib.parse 7 | import random 8 | from bs4 import BeautifulSoup 9 | 10 | useragents = ['Mozilla/5.0', 'Bandicout Broadway 2.4', 'Carls Crawler Critter 1.0', 11 | 'Dirty Dungeon Diksearch 69', 'Internet Explorer but better'] 12 | 13 | 14 | def singlerando(listofterms): 15 | randomed = random.choice(listofterms) 16 | return randomed 17 | 18 | 19 | def parseT(twitterpage): 20 | soup = BeautifulSoup(twitterpage, "lxml") # create a new bs4 object from the html data loaded 21 | for script in soup(["script", "style"]): # remove all javascript and stylesheet code 22 | script.extract() 23 | # get text 24 | tester = soup.find_all("p", class_="tweet-text") 25 | print(tester[1].text) 26 | exit() 27 | 28 | 29 | def searchT(searchfor): 30 | randomuseragent = singlerando(useragents) # select a random user agent from list 31 | headers = {'User-Agent': randomuseragent} # get random header from above 32 | url = 'https://twitter.com/%s' % searchfor # GOOGLE ajax API string 33 | search_response_pre = urllib.request.Request( 34 | url, None, headers) # key to get the random headers to work 35 | search_response = urllib.request.urlopen(search_response_pre) 36 | search_results = search_response.read().decode("utf8") 37 | # print(search_results) 38 | parseT(search_results) 39 | 40 | 41 | diction = [] 42 | subset = [] 43 | twitteruser = input('Enter twitter user: ') 44 | searchT(twitteruser) 45 | -------------------------------------------------------------------------------- /python-examples/sentiment_analysis_nltk-example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # PYTHON EXAMPLE TO DO SENTIMENT ANALYSIS ON TWEETS 3 | # Author: James Campbell 4 | # Date: 2015-07-01 5 | # Updated: 2015-11-16 6 | # USE FOR PYTHON 3 only 7 | import nltk 8 | import sys 9 | import pickle 10 | 11 | pos_tweets = [('I love this car', 'positive'), 12 | ('This view is amazing', 'positive'), 13 | ('I feel great this morning', 'positive'), 14 | ('I am so excited about the concert', 'positive'), 15 | ('He is my best friend', 'positive'), 16 | ('Going well', 'positive'), 17 | ('Thank you', 'positive'), 18 | ('Hope you are doing well', 'positive'), 19 | ('I am very happy', 'positive'), 20 | ('Good for you', 'positive'), 21 | ('It is all good. I know about it and I accept it.', 'positive'), 22 | ('This is really good!', 'positive'), 23 | ('Tomorrow is going to be fun.', 'positive'), 24 | ('Smiling all around.', 'positive'), 25 | ('These are great apples today.', 'positive'), 26 | ('How about them apples? Thomas is a happy boy.', 'positive'), 27 | ('Thomas is very zen. He is well-mannered.', 'positive')] 28 | 29 | neg_tweets = [('I do not like this car', 'negative'), 30 | ('This view is horrible', 'negative'), 31 | ('I feel tired this morning', 'negative'), 32 | ('I am not looking forward to the concert', 'negative'), 33 | ('He is my enemy', 'negative'), 34 | ('I am a bad boy', 'negative'), 35 | ('This is not good', 'negative'), 36 | ('I am bothered by this', 'negative'), 37 | ('I am not connected with this', 'negative'), 38 | ('Sadistic creep you ass. Die.', 'negative'), 39 | ('All sorts of crazy and scary as hell.', 'negative'), 40 | ('Not his emails, no.', 'negative'), 41 | ('His father is dead. Returned obviously.', 'negative'), 42 | ('He has a bomb.', 'negative'), 43 | ('Too fast to be on foot. We cannot catch them.', 'negative')] 44 | 45 | tweets = [] 46 | for (words, sentiment) in pos_tweets + neg_tweets: 47 | words_filtered = [e.lower() for e in words.split() if len(e) >= 3] 48 | tweets.append((words_filtered, sentiment)) 49 | 50 | 51 | def get_words_in_tweets(tweets): 52 | all_words = [] 53 | for (words, sentiment) in tweets: 54 | all_words.extend(words) 55 | return all_words 56 | 57 | 58 | def get_word_features(wordlist): 59 | wordlist = nltk.FreqDist(wordlist) 60 | word_features = wordlist.keys() 61 | return word_features 62 | 63 | 64 | def extract_features(document): 65 | document_words = set(document) 66 | features = {} 67 | for word in word_features: 68 | features['contains(%s)' % word] = (word in document_words) 69 | return features 70 | 71 | 72 | word_features = get_word_features(get_words_in_tweets(tweets)) 73 | 74 | training_set = nltk.classify.apply_features(extract_features, tweets) 75 | classifier = nltk.NaiveBayesClassifier.train(training_set) 76 | 77 | # optional to save your classifier so you can load it 78 | # elsewhere without having to rebuild training set every time 79 | save_classifier = open("assets/tweetposneg.pickle", "wb") 80 | pickle.dump(classifier, save_classifier) 81 | save_classifier.close() 82 | 83 | # optional load from classifier that was saved previously 84 | # classifier_f = open("naivebayes.pickle", "rb") 85 | # classifier = pickle.load(classifier_f) 86 | # classifier_f.close() 87 | 88 | runtweets = [] # setup to import a list of tweets here if you wish into a python list 89 | if len(sys.argv) > 1: # if param passed 4 name of text file w/ list of tweets 90 | tweetfile = sys.argv[1] 91 | with open(tweetfile, "r") as ins: 92 | for line in ins: 93 | runtweets.append(line) 94 | runtweets.append('I am a bad boy') # test tweet incase 95 | poscount = 0 96 | negcount = 0 97 | for tweett in runtweets: 98 | valued = classifier.classify(extract_features(tweett.split())) 99 | print(valued) 100 | if valued == 'negative': 101 | negcount = negcount + 1 102 | else: 103 | poscount = poscount + 1 104 | print('Positive count: %s \nNegative count: %s' % (poscount, negcount)) 105 | -------------------------------------------------------------------------------- /python-examples/server-example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # Author: James Campbell 3 | # Date: May 23rd 2016 4 | # Date Updated: June 8th 2016 5 | # What: Starts a http server as an example (works in Python 3) 6 | import time 7 | from http.server import BaseHTTPRequestHandler, HTTPServer 8 | 9 | # default unless set at prompt 10 | hostPort = 10010 11 | 12 | customPort = input("Default port 10010, hit enter or type custom one now: ") 13 | if customPort != '': 14 | hostPort = int(customPort) 15 | hostName = "localhost" 16 | 17 | 18 | class MyServer(BaseHTTPRequestHandler): 19 | def do_GET(self): 20 | self.send_response(200) 21 | self.send_header("Content-type", "text/html") 22 | self.end_headers() 23 | self.wfile.write( 24 | bytes("MY PYTHON WEB SERVER PAGE!", "utf-8")) 25 | self.wfile.write( 26 | bytes("

This is a test.

", "utf-8")) 27 | # you can use if else to check path and do custom things based on path accessed 28 | if self.path == '/win': 29 | self.wfile.write(bytes("

YOU WIN! @ path %s

" % self.path, "utf-8")) 30 | else: 31 | self.wfile.write(bytes("

You accessed path: %s

" % self.path, "utf-8")) 32 | self.wfile.write(bytes("", "utf-8")) 33 | 34 | 35 | myServer = HTTPServer((hostName, hostPort), MyServer) 36 | print(time.asctime(), "Server Starts - %s:%s" % (hostName, hostPort)) 37 | 38 | # continue to serve until a keypress in terminal 39 | try: 40 | myServer.serve_forever() 41 | except KeyboardInterrupt: 42 | pass 43 | 44 | myServer.server_close() 45 | print(time.asctime(), "Server Stops - %s:%s" % (hostName, hostPort)) 46 | # python3 -m http.server 10010 --bind 127.0.0.1 47 | -------------------------------------------------------------------------------- /python-examples/setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a setup.py script generated by py2applet 3 | 4 | Usage: 5 | python setup.py py2app 6 | """ 7 | 8 | from setuptools import setup 9 | 10 | APP = ['tkinter-example.py'] 11 | DATA_FILES = [] 12 | OPTIONS = {'argv_emulation': True} 13 | 14 | setup( 15 | app=APP, 16 | data_files=DATA_FILES, 17 | options={'py2app': OPTIONS}, 18 | setup_requires=['py2app'], 19 | ) 20 | -------------------------------------------------------------------------------- /python-examples/shodan-example.py: -------------------------------------------------------------------------------- 1 | """Example on using shodan api, requires shodan api key.""" 2 | # Author: James Campbell 3 | # Date: June 23rd 2016 4 | # Date Updated: 3 July 2019 5 | # What: Shodan example 6 | from configs import globalshodankey # have a configs.py file with shodan api key 7 | import shodan 8 | 9 | shodan_api_key = globalshodankey # set in configs.py 10 | try: 11 | api = shodan.Shodan(shodan_api_key) 12 | except Exception: 13 | exit('make sure you have the shodan key setup in configs.py as globalshodankey = "yourkey"') 14 | try: 15 | results = api.count('country:GB city:Glasgow Nginx') 16 | if int(results['total']) == 0: 17 | print('NONE FOUND! TRY AGAIN') 18 | else: 19 | print(f"Results found for NGINX in Glasgow: {results['total']}") 20 | except Exception: 21 | exit('failed') 22 | -------------------------------------------------------------------------------- /python-examples/smash-site-example.py: -------------------------------------------------------------------------------- 1 | import urllib 2 | import urllib.request 3 | import sys 4 | from concurrent.futures import ThreadPoolExecutor 5 | 6 | # globals 7 | if len(sys.argv) < 2: 8 | link = 'https://jamescampbell.us' 9 | else: 10 | link = sys.argv[1] 11 | 12 | headers = {} 13 | headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686)" 14 | p = 1 15 | 16 | 17 | def getyou(linked=link): 18 | global p 19 | f = urllib.request.Request(linked, headers=headers) 20 | urllib.request.urlopen(f) 21 | p = p + 1 22 | print('IM P %d' % (p,)) 23 | 24 | 25 | executor = ThreadPoolExecutor(max_workers=100) 26 | futures = [] 27 | i = 1 28 | while i < 500: 29 | a = executor.submit(getyou, link) 30 | futures.append(a) 31 | i = i + 1 32 | 33 | -------------------------------------------------------------------------------- /python-examples/speech-example.py: -------------------------------------------------------------------------------- 1 | # speech recognition example 2 | # author: James Campbell 3 | # date: 2015-05-26 4 | # to install pyaudio on osx: brew install portaudio then 5 | # pip install --allow-external pyaudio --allow-unverified pyaudio pyaudio 6 | import speech_recognition as sr # pip install speechrecognition 7 | from termcolor import colored 8 | r = sr.Recognizer() 9 | with sr.Microphone() as source: # use the default microphone as the audio source 10 | # listen for the first phrase and extract it into audio data 11 | audio = r.listen(source) 12 | 13 | try: 14 | # recognize speech using Google Speech Recognition 15 | print("You said " + colored(r.recognize(audio), 'yellow')) 16 | if r.recognize(audio) == 'exit': 17 | exit('goodbye') 18 | except LookupError: # speech is unintelligible 19 | print("Could not understand audio") 20 | -------------------------------------------------------------------------------- /python-examples/spider.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | # getting all links example crawling jamescampbell.us 3 | # author: James Campbell 4 | # Date Created: 2015 05 22 5 | # Date Updated: 2 July 2019 6 | import argparse 7 | from scrapy.spiders import CrawlSpider, Rule 8 | from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor 9 | from scrapy.item import Item, Field 10 | 11 | # terminal arguments parser globals - do not change 12 | parser = argparse.ArgumentParser() 13 | parser.add_argument('-u', action='store', dest='url', 14 | help='Domain to crawl') 15 | parser.add_argument('-c', action='store_const', dest='constant_value', 16 | const='value-to-store', 17 | help='Store a constant value') 18 | parser.add_argument('--version', action='version', version='%(prog)s 1.0') 19 | results = parser.parse_args() 20 | 21 | # setup the default search terms 22 | domainer = 'jamescampbell.us' # default search term if none set is a random term from a dict 23 | if results.url is not None: # if search terms set then change from default to that 24 | domainer = results.url # set from argparse above in globals section 25 | 26 | 27 | DOMAIN = domainer 28 | URL = 'https://%s' % DOMAIN 29 | 30 | 31 | class MyItem(Item): 32 | url = Field() 33 | 34 | 35 | class someSpider(CrawlSpider): 36 | name = 'crawltest' 37 | allowed_domains = ['jamescampbell.us'] 38 | start_urls = ['https://jamescampbell.us'] 39 | rules = (Rule(LxmlLinkExtractor(allow=()), callback='parse_obj', follow=True),) 40 | 41 | def parse_obj(self, response): 42 | item = MyItem() 43 | item['url'] = [] 44 | for link in LxmlLinkExtractor(allow=(), deny=self.allowed_domains).extract_links(response): 45 | item['url'].append(link.url) 46 | print(link.url) 47 | return item 48 | 49 | 50 | someSpider() 51 | -------------------------------------------------------------------------------- /python-examples/spotify-example.py: -------------------------------------------------------------------------------- 1 | # Author: James Campbell 2 | # What: uses spotipy to get example data 3 | import sys 4 | import spotipy 5 | import spotipy.util as util 6 | 7 | scope = 'user-library-read' 8 | 9 | if len(sys.argv) > 1: 10 | username = sys.argv[1] 11 | else: 12 | print("Usage: %s username" % (sys.argv[0],)) 13 | sys.exit() 14 | 15 | token = util.prompt_for_user_token(username, scope) 16 | 17 | if token: 18 | sp = spotipy.Spotify(auth=token) 19 | results = sp.current_user_saved_tracks() 20 | for item in results['items']: 21 | track = item['track'] 22 | print(track['name'] + ' - ' + track['artists'][0]['name']) 23 | else: 24 | print("Can't get token for", username) 25 | -------------------------------------------------------------------------------- /python-examples/stem_tor-example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | # configs.py file you need to create 4 | from configs import stempass 5 | from stem.control import Controller 6 | from flask import Flask 7 | 8 | app = Flask(__name__) 9 | 10 | 11 | @app.route('/') 12 | def index(): 13 | global result 14 | hoster = result.hostname 15 | return "\ 16 |

Hi Grandma! {}

{}".format(hoster)
17 | 
18 | 
19 | print(' * Connecting to tor')
20 | 
21 | with Controller.from_port() as controller:
22 |     controller.authenticate(password=stempass)
23 | 
24 |     # All hidden services have a directory on disk. Lets put ours in tor's data
25 |     # directory.
26 | 
27 |     hidden_service_dir = os.path.join(controller.get_conf('DataDirectory', '/tmp'), 'hello_world')
28 | 
29 |     # Create a hidden service where visitors of port 80 get redirected to local
30 |     # port 5000 (this is where Flask runs by default).
31 | 
32 |     print(" * Creating our hidden service in %s" % hidden_service_dir)
33 |     result = controller.create_hidden_service(hidden_service_dir, 80, target_port=5000)
34 | 
35 |     # The hostname is only available when we can read the hidden service
36 |     # directory. This requires us to be running with the same user as tor.
37 | 
38 |     if result.hostname:
39 |         print(" * Our service is available at %s, press ctrl+c to quit" % result.hostname)
40 |     else:
41 |         print(" * Unable to read the hidden service directory")
42 | 
43 |     try:
44 |         app.run()
45 |     finally:
46 |         # Shut down the hidden service and clean it off disk. Note that you *don't*
47 |         # want to delete the hidden service directory if you'd like to have this
48 |         # same *.onion address in the future.
49 | 
50 |         print(" * Shutting down our hidden service")
51 |         controller.remove_hidden_service(hidden_service_dir)
52 |         shutil.rmtree(hidden_service_dir)
53 | 


--------------------------------------------------------------------------------
/python-examples/test_tor-example.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python3
 2 | import socks
 3 | import socket
 4 | 
 5 | 
 6 | def set_socks_default():
 7 |     # TOR SETUP GLOBAL Vars
 8 |     SOCKS_PORT = 9050  # TOR proxy port that is default from torrc, change to whatever torrc
 9 | 
10 |     socks.set_default_proxy(socks.SOCKS5, "127.0.0.1", SOCKS_PORT)
11 |     socket.socket = socks.socksocket
12 | 
13 |     # Perform DNS resolution through the socket
14 |     def getaddrinfo(*args):
15 |         return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
16 |     socket.getaddrinfo = getaddrinfo
17 |     return "success"
18 | 
19 | 
20 | def test_socks():
21 |     assert set_socks_default() == "success"
22 | 


--------------------------------------------------------------------------------
/python-examples/textract-example.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python3
 2 | # Author: James Campbell
 3 | # Date: June 3rd 2019
 4 | # What: get text from a PDF
 5 | import sys
 6 | import textract
 7 | 
 8 | text = textract.process(f"{sys.argv[1]}")
 9 | print(text)
10 | 


--------------------------------------------------------------------------------
/python-examples/tika-example-too.py:
--------------------------------------------------------------------------------
 1 | import requests
 2 | import json
 3 | from pprint import pprint
 4 | 
 5 | import os
 6 | 
 7 | # globals
 8 | 
 9 | filelist = os.listdir('assets')  # from configurator
10 | 
11 | for filepath in filelist:
12 |     payload = open(filepath, 'rb').read()
13 |     filename = filepath.rsplit('/')[0]
14 |     print(filename)
15 |     r = requests.put('http://localhost:9998/rmeta', data=payload)
16 |     print(r.text)
17 |     rdict = json.loads(r.text)[0]
18 |     pprint(rdict['meta:page-count'])
19 | exit()
20 | 


--------------------------------------------------------------------------------
/python-examples/tika-example.py:
--------------------------------------------------------------------------------
1 | """Tika example get metadata."""
2 | from tika import parser
3 | parsed = parser.from_file("temp.wav")
4 | print(parsed["metadata"])
5 | print(parsed["content"])
6 | 


--------------------------------------------------------------------------------
/python-examples/tika-get-text-example.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python3
 2 | # Author: James Campbell
 3 | # Date: June 3rd 2019
 4 | # What: get text from a PDF
 5 | from tika import parser
 6 | import sys
 7 | 
 8 | text = parser.from_file(f"{sys.argv[1]}")
 9 | print(text)
10 | 


--------------------------------------------------------------------------------
/python-examples/tkinter-example.py:
--------------------------------------------------------------------------------
 1 | #!/usr/bin/python3
 2 | # Author: James Campbell
 3 | # What: Example GUI with Tkinter
 4 | 
 5 | import sys
 6 | if sys.version_info < (3, 0):
 7 |     # Python 2
 8 |     import Tkinter as tk
 9 | else:
10 |     # Python 3
11 |     import tkinter as tk
12 | root = tk.Tk()
13 | root.title("Sandwich")
14 | tk.Button(root, text="Make me a Sandwich").pack()
15 | tk.mainloop()
16 | 


--------------------------------------------------------------------------------
/python-examples/tor-example.py:
--------------------------------------------------------------------------------
 1 | # tor connect example code
 2 | # author: James Campbell
 3 | # date: 2015 05 17
 4 | # date updated: 2016 09 18 confirmed working (make sure to not use privoxy settings or will break)
 5 | 
 6 | import urllib
 7 | import urllib.request
 8 | import socks
 9 | import socket
10 | import argparse
11 | 
12 | 
13 | # terminal arguments parser globals - do not change
14 | parser = argparse.ArgumentParser()
15 | parser.add_argument('-o', action='store', dest='onion',
16 |                     help='put in onion site to load (with http & quotes)')
17 | results = parser.parse_args()
18 | 
19 | # Global Vars
20 | # set the default onion site to visit to test, in this case DuckDuckGo
21 | onionsite = 'http://3g2upl4pq6kufc4m.onion'
22 | if results.onion is not None:  # if search terms set in terminal then change from default to that
23 |     onionsite = results.onion  # set from argparse above in globals section
24 | 
25 | # TOR SETUP GLOBAL Vars
26 | SOCKS_PORT = 9050  # TOR proxy port that is default from torrc, change to whatever torrc
27 | 
28 | socks.set_default_proxy(socks.SOCKS5, "127.0.0.1", SOCKS_PORT)
29 | socket.socket = socks.socksocket
30 | 
31 | # Perform DNS resolution through the socket
32 | 
33 | 
34 | def getaddrinfo(*args):
35 |     return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
36 | 
37 | 
38 | socket.getaddrinfo = getaddrinfo
39 | 
40 | # test connect to DuckDuckGo .onion site
41 | headers = {'User-Agent': 'JAMES CAMPBELL jamescampbell.us SEARCH BOT! I FOUND YOU!!!!'}
42 | req = urllib.request.Request(onionsite, None, headers)
43 | response = urllib.request.urlopen(req)  # new python 3 code -jc
44 | status = 'loaded successfully'
45 | try:
46 |     sitehtml = response.read()
47 |     print(sitehtml)
48 | except urllib.error.URLError as e:
49 |     html = e.read().decode("utf8", 'ignore')
50 |     status = 'failed reading'
51 |     html = 'none'
52 |     currenturl = 'none'
53 | print(status)
54 | 


--------------------------------------------------------------------------------
/python-examples/triplot-example.py:
--------------------------------------------------------------------------------
 1 | """
 2 | Creating and plotting unstructured triangular grids.
 3 | """
 4 | import matplotlib.pyplot as plt
 5 | import matplotlib.tri as tri
 6 | import numpy as np
 7 | import math
 8 | 
 9 | # Creating a Triangulation without specifying the triangles results in the
10 | # Delaunay triangulation of the points.
11 | 
12 | # First create the x and y coordinates of the points.
13 | n_angles = 36
14 | n_radii = 8
15 | min_radius = 0.25
16 | radii = np.linspace(min_radius, 0.95, n_radii)
17 | 
18 | angles = np.linspace(0, 2*math.pi, n_angles, endpoint=False)
19 | angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)
20 | angles[:, 1::2] += math.pi/n_angles
21 | 
22 | x = (radii*np.cos(angles)).flatten()
23 | y = (radii*np.sin(angles)).flatten()
24 | 
25 | # Create the Triangulation; no triangles so Delaunay triangulation created.
26 | triang = tri.Triangulation(x, y)
27 | 
28 | # Mask off unwanted triangles.
29 | xmid = x[triang.triangles].mean(axis=1)
30 | ymid = y[triang.triangles].mean(axis=1)
31 | mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
32 | triang.set_mask(mask)
33 | 
34 | # Plot the triangulation.
35 | plt.figure()
36 | plt.gca().set_aspect('equal')
37 | plt.triplot(triang, 'bo-')
38 | plt.title('triplot of Delaunay triangulation')
39 | 
40 | 
41 | # You can specify your own triangulation rather than perform a Delaunay
42 | # triangulation of the points, where each triangle is given by the indices of
43 | # the three points that make up the triangle, ordered in either a clockwise or
44 | # anticlockwise manner.
45 | 
46 | xy = np.asarray([
47 |     [-0.101, 0.872], [-0.080, 0.883], [-0.069, 0.888], [-0.054, 0.890],
48 |     [-0.045, 0.897], [-0.057, 0.895], [-0.073, 0.900], [-0.087, 0.898],
49 |     [-0.090, 0.904], [-0.069, 0.907], [-0.069, 0.921], [-0.080, 0.919],
50 |     [-0.073, 0.928], [-0.052, 0.930], [-0.048, 0.942], [-0.062, 0.949],
51 |     [-0.054, 0.958], [-0.069, 0.954], [-0.087, 0.952], [-0.087, 0.959],
52 |     [-0.080, 0.966], [-0.085, 0.973], [-0.087, 0.965], [-0.097, 0.965],
53 |     [-0.097, 0.975], [-0.092, 0.984], [-0.101, 0.980], [-0.108, 0.980],
54 |     [-0.104, 0.987], [-0.102, 0.993], [-0.115, 1.001], [-0.099, 0.996],
55 |     [-0.101, 1.007], [-0.090, 1.010], [-0.087, 1.021], [-0.069, 1.021],
56 |     [-0.052, 1.022], [-0.052, 1.017], [-0.069, 1.010], [-0.064, 1.005],
57 |     [-0.048, 1.005], [-0.031, 1.005], [-0.031, 0.996], [-0.040, 0.987],
58 |     [-0.045, 0.980], [-0.052, 0.975], [-0.040, 0.973], [-0.026, 0.968],
59 |     [-0.020, 0.954], [-0.006, 0.947], [0.003, 0.935], [0.006, 0.926],
60 |     [0.005, 0.921], [0.022, 0.923], [0.033, 0.912], [0.029, 0.905],
61 |     [0.017, 0.900], [0.012, 0.895], [0.027, 0.893], [0.019, 0.886],
62 |     [0.001, 0.883], [-0.012, 0.884], [-0.029, 0.883], [-0.038, 0.879],
63 |     [-0.057, 0.881], [-0.062, 0.876], [-0.078, 0.876], [-0.087, 0.872],
64 |     [-0.030, 0.907], [-0.007, 0.905], [-0.057, 0.916], [-0.025, 0.933],
65 |     [-0.077, 0.990], [-0.059, 0.993]])
66 | x = xy[:, 0]*180/3.14159
67 | y = xy[:, 1]*180/3.14159
68 | 
69 | triangles = np.asarray([
70 |     [67, 66,  1], [65,  2, 66], [1, 66,  2], [64,  2, 65], [63,  3, 64],
71 |     [60, 59, 57], [2, 64,  3], [3, 63,  4], [0, 67,  1], [62,  4, 63],
72 |     [57, 59, 56], [59, 58, 56], [61, 60, 69], [57, 69, 60], [4, 62, 68],
73 |     [6,  5,  9], [61, 68, 62], [69, 68, 61], [9,  5, 70], [6,  8,  7],
74 |     [4, 70,  5], [8,  6,  9], [56, 69, 57], [69, 56, 52], [70, 10,  9],
75 |     [54, 53, 55], [56, 55, 53], [68, 70,  4], [52, 56, 53], [11, 10, 12],
76 |     [69, 71, 68], [68, 13, 70], [10, 70, 13], [51, 50, 52], [13, 68, 71],
77 |     [52, 71, 69], [12, 10, 13], [71, 52, 50], [71, 14, 13], [50, 49, 71],
78 |     [49, 48, 71], [14, 16, 15], [14, 71, 48], [17, 19, 18], [17, 20, 19],
79 |     [48, 16, 14], [48, 47, 16], [47, 46, 16], [16, 46, 45], [23, 22, 24],
80 |     [21, 24, 22], [17, 16, 45], [20, 17, 45], [21, 25, 24], [27, 26, 28],
81 |     [20, 72, 21], [25, 21, 72], [45, 72, 20], [25, 28, 26], [44, 73, 45],
82 |     [72, 45, 73], [28, 25, 29], [29, 25, 31], [43, 73, 44], [73, 43, 40],
83 |     [72, 73, 39], [72, 31, 25], [42, 40, 43], [31, 30, 29], [39, 73, 40],
84 |     [42, 41, 40], [72, 33, 31], [32, 31, 33], [39, 38, 72], [33, 72, 38],
85 |     [33, 38, 34], [37, 35, 38], [34, 38, 35], [35, 37, 36]])
86 | 
87 | # Rather than create a Triangulation object, can simply pass x, y and triangles
88 | # arrays to triplot directly.  It would be better to use a Triangulation object
89 | # if the same triangulation was to be used more than once to save duplicated
90 | # calculations.
91 | plt.figure()
92 | plt.gca().set_aspect('equal')
93 | plt.triplot(x, y, triangles, 'go-')
94 | plt.title('triplot of user-specified triangulation')
95 | plt.xlabel('Longitude (degrees)')
96 | plt.ylabel('Latitude (degrees)')
97 | 
98 | plt.show()
99 | 


--------------------------------------------------------------------------------
/python-examples/tuple-example.py:
--------------------------------------------------------------------------------
 1 | # tuple sort example
 2 | # author: James Campbell
 3 | # date: 2015-05-28
 4 | # Date Updated: 2 July 2019
 5 | valued = []
 6 | lettered = []
 7 | plusone = []
 8 | listed = [(('d', 0), ('g', 0)), (('d', 0), ('d', 1)), (('i', 0), ('g', 0))]
 9 | for (x, y) in listed:
10 |     for subx, suby in x, y:
11 |         valued.append(int(suby))
12 |         lettered.append(subx)
13 | for value in valued:
14 |     value = value + 1
15 |     plusone.append(int(value))
16 | # print plusone
17 | coolness = zip(lettered, plusone)
18 | print(coolness)
19 | 
20 | print(map(list, zip(lettered, plusone)))
21 | 


--------------------------------------------------------------------------------
/python-examples/urllib3_proxymanager-example.py:
--------------------------------------------------------------------------------
 1 | """Using urllib3 ProxyManager and tor example."""
 2 | # author: James Campbell
 3 | # date: 2015 11 19
 4 | # Date Updated: 2 July 2019
 5 | import urllib3  # use with python 3 only
 6 | import argparse
 7 | from bs4 import BeautifulSoup
 8 | 
 9 | # terminal arguments parser globals - do not change
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument('-o', action='store', dest='onion',
12 |                     help='put in onion site to load (with http & quotes)')
13 | results = parser.parse_args()
14 | 
15 | # Global Vars
16 | # set the default onion site to visit to test, in this case DuckDuckGo
17 | onionsite = 'http://3g2upl4pq6kufc4m.onion'
18 | if results.onion is not None:  # if search terms set in terminal then change from default to that
19 |     onionsite = results.onion  # set from argparse above in globals section
20 | 
21 | # TOR SETUP GLOBAL Vars
22 | # TOR proxy port that is default from torrc, change to whatever torrc is configured to
23 | SOCKS_PORT = 9050
24 | 
25 | 
26 | header = {'User-Agent': 'JAMES CAMPBELL jamescampbell.us SEARCH BOT! I FOUND YOU!!!!'}
27 | # using this with privoxy and forwarding to tor
28 | proxy = urllib3.ProxyManager('http://127.0.0.1:8119/')
29 | r1 = proxy.request('GET', onionsite, headers=header)
30 | print(r1.status)  # status code
31 | print(r1.headers)  # header data
32 | print(r1.data.decode('utf8'))  # html raw output
33 | souper = BeautifulSoup(r1.data, "html.parser")
34 | soupera = souper.find_all('a')  # get all a href's
35 | for eachone in soupera:
36 |     print('This is a link: \n', eachone.text)
37 | exit()
38 | # test connect to DuckDuckGo .onion site
39 | 


--------------------------------------------------------------------------------
/python-examples/websockify-example.py:
--------------------------------------------------------------------------------
  1 | """Example using websockify."""
  2 | from websockify import auth_plugins as auth
  3 | from websockify import websocket
  4 | import select
  5 | import signal
  6 | import socket
  7 | import optparse
  8 | import time
  9 | import os
 10 | import sys
 11 | import subprocess
 12 | import logging
 13 | import errno
 14 | 
 15 | from socketserver import ForkingMixIn
 16 | from http.server import HTTPServer
 17 | from urllib.parse import parse_qs, urlparse
 18 | '''
 19 | A WebSocket to TCP socket proxy with support for "wss://" encryption.
 20 | Copyright 2011 Joel Martin
 21 | Licensed under LGPL version 3 (see docs/LICENSE.LGPL-3)
 22 | 
 23 | You can make a cert/key with openssl using:
 24 | openssl req -new -x509 -days 365 -nodes -out self.pem -keyout self.pem
 25 | as taken from http://docs.python.org/dev/library/ssl.html#certificates
 26 | 
 27 | '''
 28 | 
 29 | 
 30 | class ProxyRequestHandler(websocket.WebSocketRequestHandler):
 31 | 
 32 |     traffic_legend = """
 33 | Traffic Legend:
 34 |     }  - Client receive
 35 |     }. - Client receive partial
 36 |     {  - Target receive
 37 | 
 38 |     >  - Target send
 39 |     >. - Target send partial
 40 |     <  - Client send
 41 |     <. - Client send partial
 42 | """
 43 | 
 44 |     def send_auth_error(self, ex):
 45 |         self.send_response(ex.code, ex.msg)
 46 |         self.send_header('Content-Type', 'text/html')
 47 |         for name, val in ex.headers.items():
 48 |             self.send_header(name, val)
 49 | 
 50 |         self.end_headers()
 51 | 
 52 |     def validate_connection(self):
 53 |         if self.server.token_plugin:
 54 |             host, port = self.get_target(self.server.token_plugin, self.path)
 55 |             if host == 'unix_socket':
 56 |                 self.server.unix_target = port
 57 | 
 58 |             else:
 59 |                 self.server.target_host = host
 60 |                 self.server.target_port = port
 61 | 
 62 |         if self.server.auth_plugin:
 63 |             try:
 64 |                 self.server.auth_plugin.authenticate(
 65 |                     headers=self.headers, target_host=self.server.target_host,
 66 |                     target_port=self.server.target_port)
 67 |             except auth.AuthenticationError:
 68 |                 ex = sys.exc_info()[1]
 69 |                 self.send_auth_error(ex)
 70 |                 raise
 71 | 
 72 |     def new_websocket_client(self):
 73 |         """
 74 |         Called after a new WebSocket connection has been established.
 75 |         """
 76 |         # Checking for a token is done in validate_connection()
 77 | 
 78 |         # Connect to the target
 79 |         if self.server.wrap_cmd:
 80 |             msg = "connecting to command: '%s' (port %s)" % (
 81 |                 " ".join(self.server.wrap_cmd), self.server.target_port)
 82 |         elif self.server.unix_target:
 83 |             msg = "connecting to unix socket: %s" % self.server.unix_target
 84 |         else:
 85 |             msg = "connecting to: %s:%s" % (
 86 |                 self.server.target_host, self.server.target_port)
 87 | 
 88 |         if self.server.ssl_target:
 89 |             msg += " (using SSL)"
 90 |         self.log_message(msg)
 91 | 
 92 |         tsock = websocket.WebSocketServer.socket(self.server.target_host,
 93 |                                                  self.server.target_port,
 94 |                                                  connect=True,
 95 |                                                  use_ssl=self.server.ssl_target,
 96 |                                                  unix_socket=self.server.unix_target)
 97 | 
 98 |         self.request.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
 99 |         if not self.server.wrap_cmd and not self.server.unix_target:
100 |             tsock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
101 | 
102 |         self.print_traffic(self.traffic_legend)
103 | 
104 |         # Start proxying
105 |         try:
106 |             self.do_proxy(tsock)
107 |         except Exception:
108 |             if tsock:
109 |                 tsock.shutdown(socket.SHUT_RDWR)
110 |                 tsock.close()
111 |                 if self.verbose:
112 |                     self.log_message("%s:%s: Closed target",
113 |                                      self.server.target_host,
114 |                                      self.server.target_port)
115 |             raise
116 | 
117 |     def get_target(self, target_plugin, path):
118 |         """
119 |         Parses the path, extracts a token, and looks up a target
120 |         for that token using the token plugin. Sets
121 |         target_host and target_port if successful
122 |         """
123 |         # The files in targets contain the lines
124 |         # in the form of token: host:port
125 | 
126 |         # Extract the token parameter from url
127 |         args = parse_qs(urlparse(path)[4])  # 4 is the query from url
128 | 
129 |         if 'token' not in args or not len(args['token']):
130 |             raise self.server.EClose("Token not present")
131 | 
132 |         token = args['token'][0].rstrip('\n')
133 | 
134 |         result_pair = target_plugin.lookup(token)
135 | 
136 |         if result_pair is not None:
137 |             return result_pair
138 |         else:
139 |             raise self.server.EClose("Token '%s' not found" % token)
140 | 
141 |     def do_proxy(self, target):
142 |         """
143 |         Proxy client WebSocket to normal target socket.
144 |         """
145 |         cqueue = []
146 |         c_pend = 0
147 |         tqueue = []
148 |         rlist = [self.request, target]
149 | 
150 |         if self.server.heartbeat:
151 |             now = time.time()
152 |             self.heartbeat = now + self.server.heartbeat
153 |         else:
154 |             self.heartbeat = None
155 | 
156 |         while True:
157 |             wlist = []
158 | 
159 |             if self.heartbeat is not None:
160 |                 now = time.time()
161 |                 if now > self.heartbeat:
162 |                     self.heartbeat = now + self.server.heartbeat
163 |                     self.send_ping()
164 | 
165 |             if tqueue:
166 |                 wlist.append(target)
167 |             if cqueue or c_pend:
168 |                 wlist.append(self.request)
169 |             try:
170 |                 ins, outs, excepts = select.select(rlist, wlist, [], 1)
171 |             except (select.error, OSError):
172 |                 exc = sys.exc_info()[1]
173 |                 if hasattr(exc, 'errno'):
174 |                     err = exc.errno
175 |                 else:
176 |                     err = exc[0]
177 | 
178 |                 if err != errno.EINTR:
179 |                     raise
180 |                 else:
181 |                     continue
182 | 
183 |             if excepts:
184 |                 raise Exception("Socket exception")
185 | 
186 |             if self.request in outs:
187 |                 # Send queued target data to the client
188 |                 c_pend = self.send_frames(cqueue)
189 | 
190 |                 cqueue = []
191 | 
192 |             if self.request in ins:
193 |                 # Receive client data, decode it, and queue for target
194 |                 bufs, closed = self.recv_frames()
195 |                 tqueue.extend(bufs)
196 | 
197 |                 if closed:
198 |                     # TODO: What about blocking on client socket?
199 |                     if self.verbose:
200 |                         self.log_message("%s:%s: Client closed connection",
201 |                                          self.server.target_host, self.server.target_port)
202 |                     raise self.CClose(closed['code'], closed['reason'])
203 | 
204 |             if target in outs:
205 |                 # Send queued client data to the target
206 |                 dat = tqueue.pop(0)
207 |                 sent = target.send(dat)
208 |                 if sent == len(dat):
209 |                     self.print_traffic(">")
210 |                 else:
211 |                     # requeue the remaining data
212 |                     tqueue.insert(0, dat[sent:])
213 |                     self.print_traffic(".>")
214 | 
215 |             if target in ins:
216 |                 # Receive target data, encode it and queue for client
217 |                 buf = target.recv(self.buffer_size)
218 |                 if len(buf) == 0:
219 |                     if self.verbose:
220 |                         self.log_message("%s:%s: Target closed connection",
221 |                                          self.server.target_host, self.server.target_port)
222 |                     raise self.CClose(1000, "Target closed")
223 | 
224 |                 cqueue.append(buf)
225 |                 self.print_traffic("{")
226 | 
227 | 
228 | class WebSocketProxy(websocket.WebSocketServer):
229 |     """
230 |     Proxy traffic to and from a WebSockets client to a normal TCP
231 |     socket server target. All traffic to/from the client is base64
232 |     encoded/decoded to allow binary data to be sent/received to/from
233 |     the target.
234 |     """
235 | 
236 |     buffer_size = 65536
237 | 
238 |     def __init__(self, RequestHandlerClass=ProxyRequestHandler, *args, **kwargs):
239 |         # Save off proxy specific options
240 |         self.target_host = kwargs.pop('target_host', None)
241 |         self.target_port = kwargs.pop('target_port', None)
242 |         self.wrap_cmd = kwargs.pop('wrap_cmd', None)
243 |         self.wrap_mode = kwargs.pop('wrap_mode', None)
244 |         self.unix_target = kwargs.pop('unix_target', None)
245 |         self.ssl_target = kwargs.pop('ssl_target', None)
246 |         self.heartbeat = kwargs.pop('heartbeat', None)
247 | 
248 |         self.token_plugin = kwargs.pop('token_plugin', None)
249 |         self.auth_plugin = kwargs.pop('auth_plugin', None)
250 | 
251 |         # Last 3 timestamps command was run
252 |         self.wrap_times = [0, 0, 0]
253 | 
254 |         if self.wrap_cmd:
255 |             wsdir = os.path.dirname(sys.argv[0])
256 |             rebinder_path = [os.path.join(wsdir, "..", "lib"),
257 |                              os.path.join(wsdir, "..", "lib", "websockify"),
258 |                              wsdir]
259 |             self.rebinder = None
260 | 
261 |             for rdir in rebinder_path:
262 |                 rpath = os.path.join(rdir, "rebind.so")
263 |                 if os.path.exists(rpath):
264 |                     self.rebinder = rpath
265 |                     break
266 | 
267 |             if not self.rebinder:
268 |                 raise Exception(
269 |                     "rebind.so not found, perhaps you need to run make")
270 |             self.rebinder = os.path.abspath(self.rebinder)
271 | 
272 |             self.target_host = "127.0.0.1"  # Loopback
273 |             # Find a free high port
274 |             sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
275 |             sock.bind(('', 0))
276 |             self.target_port = sock.getsockname()[1]
277 |             sock.close()
278 | 
279 |             os.environ.update({
280 |                 "LD_PRELOAD": self.rebinder,
281 |                 "REBIND_OLD_PORT": str(kwargs['listen_port']),
282 |                 "REBIND_NEW_PORT": str(self.target_port)})
283 | 
284 |         websocket.WebSocketServer.__init__(
285 |             self, RequestHandlerClass, *args, **kwargs)
286 | 
287 |     def run_wrap_cmd(self):
288 |         self.msg("Starting '%s'", " ".join(self.wrap_cmd))
289 |         self.wrap_times.append(time.time())
290 |         self.wrap_times.pop(0)
291 |         self.cmd = subprocess.Popen(
292 |             self.wrap_cmd, env=os.environ, preexec_fn=_subprocess_setup)
293 |         self.spawn_message = True
294 | 
295 |     def started(self):
296 |         """
297 |         Called after Websockets server startup (i.e. after daemonize)
298 |         """
299 |         # Need to call wrapped command after daemonization so we can
300 |         # know when the wrapped command exits
301 |         if self.wrap_cmd:
302 |             dst_string = "'%s' (port %s)" % (
303 |                 " ".join(self.wrap_cmd), self.target_port)
304 |         elif self.unix_target:
305 |             dst_string = self.unix_target
306 |         else:
307 |             dst_string = "%s:%s" % (self.target_host, self.target_port)
308 | 
309 |         if self.token_plugin:
310 |             msg = "  - proxying from %s:%s to targets generated by %s" % (
311 |                 self.listen_host, self.listen_port, type(self.token_plugin).__name__)
312 |         else:
313 |             msg = "  - proxying from %s:%s to %s" % (
314 |                 self.listen_host, self.listen_port, dst_string)
315 | 
316 |         if self.ssl_target:
317 |             msg += " (using SSL)"
318 | 
319 |         self.msg("%s", msg)
320 | 
321 |         if self.wrap_cmd:
322 |             self.run_wrap_cmd()
323 | 
324 |     def poll(self):
325 |         # If we are wrapping a command, check it's status
326 | 
327 |         if self.wrap_cmd and self.cmd:
328 |             ret = self.cmd.poll()
329 |             if ret is not None:
330 |                 self.vmsg(
331 |                     "Wrapped command exited (or daemon). Returned %s" % ret)
332 |                 self.cmd = None
333 | 
334 |         if self.wrap_cmd and self.cmd is None:
335 |             # Response to wrapped command being gone
336 |             if self.wrap_mode == "ignore":
337 |                 pass
338 |             elif self.wrap_mode == "exit":
339 |                 sys.exit(ret)
340 |             elif self.wrap_mode == "respawn":
341 |                 now = time.time()
342 |                 avg = sum(self.wrap_times)/len(self.wrap_times)
343 |                 if (now - avg) < 10:
344 |                     # 3 times in the last 10 seconds
345 |                     if self.spawn_message:
346 |                         self.warn("Command respawning too fast")
347 |                         self.spawn_message = False
348 |                 else:
349 |                     self.run_wrap_cmd()
350 | 
351 | 
352 | def _subprocess_setup():
353 |     # Python installs a SIGPIPE handler by default. This is usually not what
354 |     # non-Python successfulbprocesses expect.
355 |     signal.signal(signal.SIGPIPE, signal.SIG_DFL)
356 | 
357 | 
358 | def logger_init():
359 |     logger = logging.getLogger(WebSocketProxy.log_prefix)
360 |     logger.propagate = False
361 |     logger.setLevel(logging.INFO)
362 |     h = logging.StreamHandler()
363 |     h.setLevel(logging.DEBUG)
364 |     h.setFormatter(logging.Formatter("%(message)s"))
365 |     logger.addHandler(h)
366 | 
367 | 
368 | def websockify_init():
369 |     logger_init()
370 | 
371 |     usage = "\n    %prog [options]"
372 |     usage += " [source_addr:]source_port [target_addr:target_port]"
373 |     usage += "\n    %prog [options]"
374 |     usage += " [source_addr:]source_port -- WRAP_COMMAND_LINE"
375 |     parser = optparse.OptionParser(usage=usage)
376 |     parser.add_option("--verbose", "-v", action="store_true",
377 |                       help="verbose messages")
378 |     parser.add_option("--traffic", action="store_true",
379 |                       help="per frame traffic")
380 |     parser.add_option("--record",
381 |                       help="record sessions to FILE.[session_number]", metavar="FILE")
382 |     parser.add_option("--daemon", "-D",
383 |                       dest="daemon", action="store_true",
384 |                       help="become a daemon (background process)")
385 |     parser.add_option("--run-once", action="store_true",
386 |                       help="handle a single WebSocket connection and exit")
387 |     parser.add_option("--timeout", type=int, default=0,
388 |                       help="after TIMEOUT seconds exit when not connected")
389 |     parser.add_option("--idle-timeout", type=int, default=0,
390 |                       help="server exits after TIMEOUT seconds if there are no "
391 |                       "active connections")
392 |     parser.add_option("--cert", default="self.pem",
393 |                       help="SSL certificate file")
394 |     parser.add_option("--key", default=None,
395 |                       help="SSL key file (if separate from cert)")
396 |     parser.add_option("--ssl-only", action="store_true",
397 |                       help="disallow non-encrypted client connections")
398 |     parser.add_option("--ssl-target", action="store_true",
399 |                       help="connect to SSL target as SSL client")
400 |     parser.add_option("--unix-target",
401 |                       help="connect to unix socket target", metavar="FILE")
402 |     parser.add_option("--web", default=None, metavar="DIR",
403 |                       help="run webserver on same port. Serve files from DIR.")
404 |     parser.add_option("--wrap-mode", default="exit", metavar="MODE",
405 |                       choices=["exit", "ignore", "respawn"],
406 |                       help="action to take when the wrapped program exits "
407 |                       "or daemonizes: exit (default), ignore, respawn")
408 |     parser.add_option("--prefer-ipv6", "-6",
409 |                       action="store_true", dest="source_is_ipv6",
410 |                       help="prefer IPv6 when resolving source_addr")
411 |     parser.add_option("--libserver", action="store_true",
412 |                       help="use Python library SocketServer engine")
413 |     parser.add_option("--target-config", metavar="FILE",
414 |                       dest="target_cfg",
415 |                       help="Configuration file containing valid targets "
416 |                       "in the form 'token: host:port' or, alternatively, a "
417 |                       "directory containing configuration files of this form "
418 |                       "(DEPRECATED: use `--token-plugin TokenFile --token-source "
419 |                       " path/to/token/file` instead)")
420 |     parser.add_option("--token-plugin", default=None, metavar="PLUGIN",
421 |                       help="use the given Python class to process tokens "
422 |                            "into host:port pairs")
423 |     parser.add_option("--token-source", default=None, metavar="ARG",
424 |                       help="an argument to be passed to the token plugin"
425 |                            "on instantiation")
426 |     parser.add_option("--auth-plugin", default=None, metavar="PLUGIN",
427 |                       help="use the given Python class to determine if "
428 |                            "a connection is allowed")
429 |     parser.add_option("--auth-source", default=None, metavar="ARG",
430 |                       help="an argument to be passed to the auth plugin"
431 |                            "on instantiation")
432 |     parser.add_option("--auto-pong", action="store_true",
433 |                       help="Automatically respond to ping frames with a pong")
434 |     parser.add_option("--heartbeat", type=int, default=0,
435 |                       help="send a ping to the client every HEARTBEAT seconds")
436 |     parser.add_option("--log-file", metavar="FILE",
437 |                       dest="log_file",
438 |                       help="File where logs will be saved")
439 | 
440 |     (opts, args) = parser.parse_args()
441 | 
442 |     if opts.log_file:
443 |         opts.log_file = os.path.abspath(opts.log_file)
444 |         handler = logging.FileHandler(opts.log_file)
445 |         handler.setLevel(logging.DEBUG)
446 |         handler.setFormatter(logging.Formatter("%(message)s"))
447 |         logging.getLogger(WebSocketProxy.log_prefix).addHandler(handler)
448 | 
449 |     del opts.log_file
450 | 
451 |     if opts.verbose:
452 |         logging.getLogger(WebSocketProxy.log_prefix).setLevel(logging.DEBUG)
453 | 
454 |     if opts.token_source and not opts.token_plugin:
455 |         parser.error("You must use --token-plugin to use --token-source")
456 | 
457 |     if opts.auth_source and not opts.auth_plugin:
458 |         parser.error("You must use --auth-plugin to use --auth-source")
459 | 
460 |     # Transform to absolute path as daemon may chdir
461 |     if opts.target_cfg:
462 |         opts.target_cfg = os.path.abspath(opts.target_cfg)
463 | 
464 |     if opts.target_cfg:
465 |         opts.token_plugin = 'TokenFile'
466 |         opts.token_source = opts.target_cfg
467 | 
468 |     del opts.target_cfg
469 | 
470 |     # Sanity checks
471 |     if len(args) < 2 and not (opts.token_plugin or opts.unix_target):
472 |         parser.error("Too few arguments")
473 |     if sys.argv.count('--'):
474 |         opts.wrap_cmd = args[1:]
475 |     else:
476 |         opts.wrap_cmd = None
477 |         if len(args) > 2:
478 |             parser.error("Too many arguments")
479 | 
480 |     if not websocket.ssl and opts.ssl_target:
481 |         parser.error("SSL target requested and Python SSL module not loaded.")
482 | 
483 |     if opts.ssl_only and not os.path.exists(opts.cert):
484 |         parser.error("SSL only and %s not found" % opts.cert)
485 | 
486 |     # Parse host:port and convert ports to numbers
487 |     if args[0].count(':') > 0:
488 |         opts.listen_host, opts.listen_port = args[0].rsplit(':', 1)
489 |         opts.listen_host = opts.listen_host.strip('[]')
490 |     else:
491 |         opts.listen_host, opts.listen_port = '', args[0]
492 | 
493 |     try:
494 |         opts.listen_port = int(opts.listen_port)
495 |     except Exception:
496 |         parser.error("Error parsing listen port")
497 | 
498 |     if opts.wrap_cmd or opts.unix_target or opts.token_plugin:
499 |         opts.target_host = None
500 |         opts.target_port = None
501 |     else:
502 |         if args[1].count(':') > 0:
503 |             opts.target_host, opts.target_port = args[1].rsplit(':', 1)
504 |             opts.target_host = opts.target_host.strip('[]')
505 |         else:
506 |             parser.error("Error parsing target")
507 |         try:
508 |             opts.target_port = int(opts.target_port)
509 |         except Exception:
510 |             parser.error("Error parsing target port")
511 | 
512 |     if opts.token_plugin is not None:
513 |         if '.' not in opts.token_plugin:
514 |             opts.token_plugin = (
515 |                 'websockify.token_plugins.%s' % opts.token_plugin)
516 | 
517 |         token_plugin_module, token_plugin_cls = opts.token_plugin.rsplit(
518 |             '.', 1)
519 | 
520 |         __import__(token_plugin_module)
521 |         token_plugin_cls = getattr(
522 |             sys.modules[token_plugin_module], token_plugin_cls)
523 | 
524 |         opts.token_plugin = token_plugin_cls(opts.token_source)
525 | 
526 |     del opts.token_source
527 | 
528 |     if opts.auth_plugin is not None:
529 |         if '.' not in opts.auth_plugin:
530 |             opts.auth_plugin = 'websockify.auth_plugins.%s' % opts.auth_plugin
531 | 
532 |         auth_plugin_module, auth_plugin_cls = opts.auth_plugin.rsplit('.', 1)
533 | 
534 |         __import__(auth_plugin_module)
535 |         auth_plugin_cls = getattr(
536 |             sys.modules[auth_plugin_module], auth_plugin_cls)
537 | 
538 |         opts.auth_plugin = auth_plugin_cls(opts.auth_source)
539 | 
540 |     del opts.auth_source
541 | 
542 |     # Create and start the WebSockets proxy
543 |     libserver = opts.libserver
544 |     del opts.libserver
545 |     if libserver:
546 |         # Use standard Python SocketServer framework
547 |         server = LibProxyServer(**opts.__dict__)
548 |         server.serve_forever()
549 |     else:
550 |         # Use internal service framework
551 |         server = WebSocketProxy(**opts.__dict__)
552 |         server.start_server()
553 | 
554 | 
555 | class LibProxyServer(ForkingMixIn, HTTPServer):
556 |     """
557 |     Just like WebSocketProxy, but uses standard Python SocketServer
558 |     framework.
559 |     """
560 | 
561 |     def __init__(self, RequestHandlerClass=ProxyRequestHandler, **kwargs):
562 |         # Save off proxy specific options
563 |         self.target_host = kwargs.pop('target_host', None)
564 |         self.target_port = kwargs.pop('target_port', None)
565 |         self.wrap_cmd = kwargs.pop('wrap_cmd', None)
566 |         self.wrap_mode = kwargs.pop('wrap_mode', None)
567 |         self.unix_target = kwargs.pop('unix_target', None)
568 |         self.ssl_target = kwargs.pop('ssl_target', None)
569 |         self.token_plugin = kwargs.pop('token_plugin', None)
570 |         self.auth_plugin = kwargs.pop('auth_plugin', None)
571 |         self.heartbeat = kwargs.pop('heartbeat', None)
572 | 
573 |         self.token_plugin = None
574 |         self.auth_plugin = None
575 |         self.daemon = False
576 | 
577 |         # Server configuration
578 |         listen_host = kwargs.pop('listen_host', '')
579 |         listen_port = kwargs.pop('listen_port', None)
580 |         web = kwargs.pop('web', '')
581 | 
582 |         # Configuration affecting base request handler
583 |         self.only_upgrade = not web
584 |         self.verbose = kwargs.pop('verbose', False)
585 |         record = kwargs.pop('record', '')
586 |         if record:
587 |             self.record = os.path.abspath(record)
588 |         self.run_once = kwargs.pop('run_once', False)
589 |         self.handler_id = 0
590 | 
591 |         for arg in kwargs.keys():
592 |             print("warning: option %s ignored when using --libserver" % arg)
593 | 
594 |         if web:
595 |             os.chdir(web)
596 | 
597 |         HTTPServer.__init__(self, (listen_host, listen_port),
598 |                             RequestHandlerClass)
599 | 
600 |     def process_request(self, request, client_address):
601 |         """Override process_request to implement a counter"""
602 |         self.handler_id += 1
603 |         ForkingMixIn.process_request(self, request, client_address)
604 | 
605 | 
606 | if __name__ == '__main__':
607 |     websockify_init()
608 | 


--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
 1 | beautifulsoup4>=4.12.3
 2 | pysocks>=1.7.1
 3 | rethinkdb>=2.4.10
 4 | quandl>=3.7.0
 5 | nltk>=3.9.1
 6 | exifread>=3.0.0
 7 | blockchain>=1.4.4
 8 | websockify>=0.11.0
 9 | shodan>=1.31.0
10 | urllib3>=2.2.1
11 | fuzzywuzzy>=0.18.0
12 | scrapy>=2.12.0
13 | pytest>=8.3.0
14 | termcolor>=2.4.0
15 | pycld2>=0.41
16 | polyglot>=16.7.4
17 | tika>=2.6.0
18 | pyzillow>=0.7.0
19 | geotext>=0.4.0
20 | tabulate>=0.9.0
21 | tqdm>=4.66.4
22 | redis>=5.0.4
23 | psycopg2-binary>=2.9.9
24 | pypdf2>=3.0.1
25 | pinboard>=2.1.9
26 | webdriver-manager>=4.0.2
27 | scapy>=2.5.0
28 | matplotlib>=3.9.0
29 | iptcinfo3>=2.1.4
30 | requests>=2.31.0
31 | lxml>=5.2.0
32 | pillow>=10.3.0
33 | numpy>=1.26.0
34 | 


--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 100


--------------------------------------------------------------------------------