├── .editorconfig ├── .github └── workflows │ ├── generated-pr.yml │ └── stale.yml ├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── RELEASE.md ├── docs ├── Makefile ├── conf.py ├── http_client_ref.md ├── index.md ├── internal_ref.md ├── publish.py └── py-cid.inv ├── ipfshttpclient ├── __init__.py ├── client │ ├── __init__.py │ ├── base.py │ ├── bitswap.py │ ├── block.py │ ├── bootstrap.py │ ├── config.py │ ├── dag.py │ ├── dht.py │ ├── files.py │ ├── key.py │ ├── miscellaneous.py │ ├── name.py │ ├── object.py │ ├── pin.py │ ├── repo.py │ ├── swarm.py │ └── unstable.py ├── encoding.py ├── exceptions.py ├── filescanner.py ├── http.py ├── http_common.py ├── http_httpx.py ├── http_requests.py ├── multipart.py ├── requests_wrapper.py ├── utils.py └── version.py ├── pyproject.toml ├── test ├── combine-coverage.py ├── functional │ ├── .gitattributes │ ├── conftest.py │ ├── fake_dir │ │ ├── fsdfgh │ │ ├── popoiopiu │ │ ├── test2 │ │ │ ├── fssdf │ │ │ ├── high │ │ │ │ └── five │ │ │ │ │ └── dummy │ │ │ └── llllg │ │ └── test3 │ │ │ └── ppppoooooooooo │ ├── fake_dir_almost_empty │ │ └── .gitignore │ ├── fake_json │ │ ├── data.car │ │ ├── links.json │ │ └── no_links.json │ ├── test_bitswap.py │ ├── test_block.py │ ├── test_dag.py │ ├── test_files.py │ ├── test_key.py │ ├── test_miscellaneous.py │ ├── test_name.py │ ├── test_object.py │ ├── test_other.py │ ├── test_pin.py │ ├── test_repo.py │ └── test_unstable.py ├── run-tests.py └── unit │ ├── test_client.py │ ├── test_encoding.py │ ├── test_exceptions.py │ ├── test_filescanner.py │ ├── test_http.py │ ├── test_http_httpx.py │ ├── test_http_requests.py │ ├── test_multipart.py │ └── test_utils.py ├── tools ├── pre-commit ├── release │ └── requirements.txt └── verify │ ├── Dockerfile │ ├── entrypoint.sh │ └── validate.sh ├── tox.ini └── verify.sh /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [**] 4 | end_of_line = lf 5 | insert_final_newline = false 6 | 7 | charset = utf-8 8 | indent_style = tab 9 | indent_brace_style = 1TBS 10 | -------------------------------------------------------------------------------- /.github/workflows/generated-pr.yml: -------------------------------------------------------------------------------- 1 | name: Close Generated PRs 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-generated-pr.yml@v1 15 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | name: Close Stale Issues 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | workflow_dispatch: 7 | 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | 12 | jobs: 13 | stale: 14 | uses: ipdxco/unified-github-workflows/.github/workflows/reusable-stale-issue.yml@v1 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary files used by some editors 2 | *.swp 3 | *~ 4 | 5 | # Temporary files created during Python file loading 6 | *.pyc 7 | **/__pycache__/ 8 | 9 | # Project storage of some editors 10 | /.idea 11 | /.project 12 | /.settings 13 | /.vscode 14 | /venv 15 | 16 | # Stuff that never was meant to be public 17 | /+junk 18 | 19 | # Build artefacts 20 | /coverage/ 21 | /build/ 22 | /dist/ 23 | 24 | # Documentation build artefacts 25 | docs/build/ 26 | 27 | # Testing artefacts 28 | go-ipfs/ 29 | .coverage 30 | .pytest_cache/ 31 | .tox/ 32 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Config file for automatic testing at travis-ci.com 2 | 3 | language: python 4 | dist: bionic 5 | 6 | matrix: 7 | include: 8 | - python: "3.6" 9 | - python: "3.7" 10 | - python: "3.8" 11 | - python: "3.9" 12 | - python: "pypy3" # Python 3.6.12-7.3.3 as of April 2021 13 | - python: "3.9" 14 | env: IPFS_VERSION=compat 15 | - python: "3.9" 16 | env: TOXENV=py3-httpx 17 | - python: "3.9" 18 | env: TOXENV=styleck 19 | before_install: ":" 20 | - python: "3.9" 21 | env: TOXENV=typeck 22 | before_install: ":" 23 | 24 | # Testing on macOS/Darwin tends to be much slower so only test the bare minimum 25 | # 26 | # When changing any version here also update the relevant checksum below with 27 | # the values found on the https://python.org/ website. 28 | - os: osx 29 | language: shell 30 | env: PYTHON_VERSION=3.6.8-macosx10.9 31 | - os: osx 32 | language: shell 33 | env: PYTHON_VERSION=3.9.0-macosx10.9 34 | 35 | # Minimum testing version for Windows is 3.7 due to `-X utf8` flag support 36 | # being a life-saver for unbreaking the stupid INI parser used by all the 37 | # Python testing tools 38 | # 39 | # Python version numbers must exactly match a version in chocolatey as that 40 | # tool does not support version ranges at this time. 41 | - os: windows 42 | language: shell 43 | env: PYTHON_VERSION=3.7.6.20200110 44 | - os: windows 45 | language: shell 46 | env: PYTHON_VERSION=latest 47 | 48 | # Ensure go-IPFS is available for testing 49 | before_install: 50 | - |- 51 | ### ==== MODIFY THIS WHEN CHANGING TARGET OR MINIMUM IPFS VERSION ==== ### 52 | case "${IPFS_VERSION:-latest}" in 53 | latest) # Currently targeted version 54 | VERSION=0.8.0 55 | SHA512_LINUX=64d5464e5b8636c4e4d76a285350de23e77b03199037fc79e4a6ed65569788586af6993b7faa0a826a2b5ffca3795e67c0c10386f98d1be1842d9c284d3fcf07 56 | SHA512_DARWIN=38d2196c7bfde43661c323c862928eb183f75d9879550f2c0eafb2b9db0bdf1b577d3bb5a201b2287c4e753628affbbbb90d37d0f9d197d00256ebefb2ff7203 57 | SHA512_WINDOWS=f9e260ea039c4d263fd5ef9d6d9829b98f88d524a206985169a89549fdb46b329d2fee2ac3196885726781dae247dd1fed00a9dba3d4ddd86a6423f2374b2276 58 | ;; 59 | compat) # Earliest supported version 60 | VERSION=0.5.0 61 | SHA512_LINUX=583ea6920226cf47cc3a2856a1f87273df9a5150b9f7e765280eec4b2d438f7e0a8b60a7283a3567b86165085b9b8b49ee867dffa83234c8cc46055d7ab98d90 62 | ;; 63 | esac 64 | ### ------------------------------ END ------------------------------- ### 65 | 66 | set -u 67 | 68 | # Derive usable parameters from the above constants 69 | case "${TRAVIS_OS_NAME}" in 70 | linux) 71 | export IPFS_DL_PATH="go-ipfs/v${VERSION}/go-ipfs_v${VERSION}_linux-amd64.tar.gz" 72 | export IPFS_DL_SHA512="${SHA512_LINUX}" 73 | ;; 74 | osx) 75 | export IPFS_DL_PATH="go-ipfs/v${VERSION}/go-ipfs_v${VERSION}_darwin-amd64.tar.gz" 76 | export IPFS_DL_SHA512="${SHA512_DARWIN}" 77 | 78 | # Make the `sha512sum` command available under that name 79 | export PATH="$(echo /usr/local/Cellar/coreutils/*/libexec/gnubin):${PATH}" 80 | ;; 81 | windows) 82 | export IPFS_DL_PATH="go-ipfs/v${VERSION}/go-ipfs_v${VERSION}_windows-amd64.zip" 83 | export IPFS_DL_SHA512="${SHA512_WINDOWS}" 84 | ;; 85 | esac 86 | export IPFS_DL_BASENAME="${IPFS_DL_PATH##*/}" 87 | 88 | set +u 89 | 90 | # Download the daemon application 91 | - wget "https://dist.ipfs.io/${IPFS_DL_PATH}" 2>&1 92 | # Verify its checksum 93 | - echo "${IPFS_DL_SHA512} ${IPFS_DL_BASENAME}" | sha512sum -c 94 | # Extract verified archive 95 | - |- 96 | if [[ "${IPFS_DL_BASENAME}" =~ .*\.zip ]]; 97 | then 98 | unzip "${IPFS_DL_BASENAME}" 99 | else 100 | tar -xvf "${IPFS_DL_BASENAME}" 101 | fi 102 | # Add IPFS daemon to search path 103 | - export PATH="${PWD}/go-ipfs:${PATH}" 104 | 105 | install: 106 | # Install suitable Python version for testing on Darwin and Windows; 107 | # and fixup the environment whichever way required 108 | - |- 109 | export PYTHON_VERSION="${PYTHON_VERSION:-${TRAVIS_PYTHON_VERSION}}" 110 | 111 | ### ====== MODIFY THIS WHEN CHANGING MACOS PYTHON TEST VERSIONS ====== ### 112 | case "${PYTHON_VERSION}" in 113 | 3.6.8-macosx10.9) MD5_MACOS="786c4d9183c754f58751d52f509bc971" ;; 114 | 3.9.0-macosx10.9) MD5_MACOS="16ca86fa3467e75bade26b8a9703c27f" ;; 115 | esac 116 | ### ------------------------------ END ------------------------------- ### 117 | 118 | set -eu 119 | if [[ "${TRAVIS_OS_NAME}" = "osx" ]]; 120 | then 121 | # Download and install official Python macOS installation package 122 | wget "https://www.python.org/ftp/python/${PYTHON_VERSION%%-*}/python-${PYTHON_VERSION}.pkg" -O /tmp/python.pkg 123 | echo "${MD5_MACOS} /tmp/python.pkg" | md5sum -c 124 | sudo installer -pkg /tmp/python.pkg -target / 125 | elif [[ "${TRAVIS_OS_NAME}" = "windows" ]]; 126 | then 127 | # Install Windows Python from chocolatey 128 | VERSION_FLAG="" # Use latest version 129 | if [[ "${PYTHON_VERSION:-latest}" != latest ]]; 130 | then # Use specific version 131 | VERSION_FLAG="--version=${PYTHON_VERSION}" 132 | fi 133 | choco install python ${VERSION_FLAG} 134 | 135 | # Fix up Windows line endings incorrectly applied to test files 136 | find test/functional/fake_dir -type f -exec dos2unix \{\} \+ 137 | 138 | # Export sanely named python3 shell command 139 | python3() { 140 | py -3 -X utf8 "$@" 141 | } 142 | export -f python3 143 | fi 144 | set +eu 145 | 146 | # Install the test runner 147 | - python3 -m pip install tox 148 | 149 | # Fixup the tox environment name for PyPy 150 | - |- 151 | if [[ -z "${TOXENV+set}" && "${PYTHON_VERSION}" =~ pypy.* ]]; 152 | then 153 | export TOXENV=pypy3 154 | fi 155 | 156 | script: python3 -m tox -e "${TOXENV:-py3}" 157 | 158 | cache: 159 | pip: true 160 | directories: 161 | - $HOME/AppData/Local/Temp/chocolatey 162 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | py-ipfs-http-client 0.X.X (XX.XX.20XX) 2 | -------------------------------------- 3 | 4 | * (None yet) 5 | 6 | 7 | py-ipfs-http-client 0.8.0a2 (18.05.2021) 8 | ---------------------------------------- 9 | 10 | * Fix presentation of exception tree in documentation 11 | * Support publishing documentation to a non-local IPFS server 12 | 13 | 14 | py-ipfs-http-client 0.8.0a1 (18.05.2021) 15 | ---------------------------------------- 16 | 17 | * py-ipfs-api-client will now only warn when detecting an unsupported daemon version 18 | * Rationale: During the 0.4.x series the library required many changes to stay compatible 19 | with the respective daemon version, but since 0.5.0 appears to be no longer the case 20 | * Compatibility bumped to go-IPFS 0.8.x (by Jan Rydzewski and other community members bugging me) 21 | 22 | **Breaking changes in this release**: 23 | 24 | * Dropped support for the (previously deprecated) `return_result` parameter of `.repo.gc(…)` 25 | * Dropped support for the previously deprecated and always undocumented `return_result` parameter everywhere else 26 | * Dropped support for go-IPFS 0.4.x; minimum supported version now 0.5.0 27 | 28 | 29 | py-ipfs-http-client 0.7.0 (15.03.2021) 30 | -------------------------------------- 31 | 32 | * No changes compared to 0.7.0a1 – breaking changes delayed to the unknown future 33 | 34 | 35 | py-ipfs-http-client 0.7.0a1 (14.10.2020) 36 | -------------------------------------- 37 | 38 | * Added support for optional arguments of the `.dag.*` APIs (by João Meyer) 39 | * Compatibility bumped to go-IPFS 0.7.x (by Jan Rydzewski and other community members bugging me) 40 | * The 0.7 series is not stable yet, expect some breaking changes before the final release! 41 | 42 | 43 | py-ipfs-http-client 0.6.1 (26.08.2020) 44 | -------------------------------------- 45 | 46 | * Added typings for most of the public and private API and enable type checking with `mypy` 47 | * Added support for connecting to the IPFS daemon using Unix domain sockets (implemented for both the requests and HTTPx backend) 48 | * Deprecate `.repo.gc(…)`s `return_result` parameter in favour of the newly introduced `quiet` parameter to match the newer HTTP API 49 | * If you use the undocumented `return_result` parameter anywhere else consider such use deprecated, support for this parameter will be removed in 0.7.X everywhere 50 | * Rationale: This parameter used to map to using the HTTP HEAD method perform the given request without any reply being returned, but this feature has been dropped with go-IPFS 0.5 from the API. 51 | * Implemented DAG APIs for go-IPFS 0.5+: `.dag.get`, `.dag.put`, `.dag.imprt` and `.dag.export` 52 | 53 | Bugfixes: 54 | 55 | * The value of the `timeout` parameter on `ipfshttpclient.{connect,Client}` is no longer ignored when using the `requests` HTTP backend (default) 56 | * (The per-API-call `timeout` parameter was unaffected by this.) 57 | * The HTTPx HTTP backend now properly applies address family restrictions encoded as part of the daemon MultiAddr (needed minor upstream change) 58 | 59 | py-ipfs-http-client 0.6.0 (30.06.2020) 60 | -------------------------------------- 61 | 62 | **Breaking changes in this release**: 63 | 64 | * The *recursive* parameter of `.add()` is no longer ignored and now enforces its default value of `False` (explicitly set it to `True` for the previous behaviour) 65 | * The glob pattern strings that may be passed to the `.add()` pattern parameter now actually behave like recursive glob patterns (see [the Python documentation](https://docs.python.org/3/library/glob.html) for how exactly) 66 | * Most functions previously returning a dict with the raw JSON response, now return a custom mapping type instead 67 | * This mapping type supports the original getitem syntax (`result["ItemName"]`) unchanged, but if you need an actual dictionary object you need to call `.as_json()` on it 68 | * In the future response-specific subtypes with Pythonic accessors and object specific methods will hopefully be added 69 | * HTTP basic authentication data to send to the API daemon must now be set as an `auth=(username, password)` tuple rather than using separate `username=` and `password=` parameters 70 | 71 | Other changes: 72 | 73 | * Added support for go-IPFS 0.5.x 74 | * Adding directories with `.add()` has been greatly reworked: 75 | * It's now possible to specify arbitrary rules on which objects to include a directory tree by passing a custom matcher object to the *pattern* parameter 76 | * The new *period_special* parameter allows toggling whether glob patterns match dot-files implicitly and defaults to `True` (previously it was effectively `False`) 77 | * The new *follow_symlinks* parameter similarly determines whether symbolic links will be followed when scanning directory trees and defaults to `False` (the previous default on Unix, albeit this likely wasn't intentional) 78 | * `.add()` will now limit its scan to the directories required to match the given glob patterns (passing in regular expression objects will still scan the tree unconditionally however) – custom matchers have full control over which directories are visited 79 | * The requests-based HTTP backend has been supplemented by another backend based on [HTTPx](https://www.python-httpx.org/) for Python 3.6+ 80 | * Due to a minor limitation within the library (no ability to apply address family restrictions during name resolution) this currently included as a preview and must be manually enabled, to do this ensure that the `httpx` library is installed in your Python environment and run your program with the environment variable *PY_IPFS_HTTP_CLIENT_PREFER_HTTPX* set to *yes*. 81 | * In the hopefully not too long future, HTTPx will be used to finally provide async/await support for this library. 82 | 83 | py-ipfs-http-client 0.4.12 (21.05.2019) 84 | --------------------------------------- 85 | 86 | Bug fix release: 87 | 88 | * Fix compatibility with `urllib3` 1.25.* when connecting to HTTPS API servers 89 | 90 | py-ipfs-http-client 0.4.11 (13.05.2019) 91 | --------------------------------------- 92 | 93 | (Most of the following was also released as version 0.4.10 the previous day, but that release was never advertised and some issues were quickly found that necessitated a new release.) 94 | 95 | This release features several breaking changes, as compared to the previous *py-ipfs-api* library 96 | 97 | * A new import name: `ipfsapi` → `ipfshttpclient` (thanks to @AlibabasMerchant) 98 | * The client API is now structured according to the [IPFS interface core specification](https://github.com/ipfs/interface-ipfs-core/tree/master/SPEC) 99 | * Daemon location is now described using [Multiaddr](https://github.com/multiformats/multiaddr) 100 | * Some deprecated methods have been dropped: 101 | * `bitswap_unwant`: API endpoint dropped by *go-ipfs* 102 | * `{get,set}_pyobj`: Can too easily be abused for abitrary code execution, use `pickle.{loads,dumps}` if you really need this 103 | * `file_ls`: Long deprecated by *go-ipfs* and scheduled for removal, use plain `ls` instead 104 | 105 | Some new features added in this release: 106 | 107 | * Adding large directories doesn't read them all into memory any more before sending them to the daemon 108 | * API documentation has been improved 109 | * TCP connections may now be reused between API requests 110 | * `.add_json` now adds data as UTF-8 rather than using Unicode-escapes for shorter/more canonical data representation (thanks to @emmnx) 111 | * Several parameters have been added to existing methods: 112 | * Using [filestore](https://github.com/ipfs-filestore/go-ipfs/tree/master/filestore) is now possible (thanks to @radfish) 113 | * Universal per-call `offline` parameter added (thanks to @radfish) 114 | * Universal per-call `return_result` parameter added to issue `HEAD` requests and suppress results for speeds (thanks to @loardcirth) 115 | * Universal per-call `timeout` parameter added (thanks to @AlibabasMerchant) 116 | * `.add`: `nocopy` & `raw_leaves` (thanks to @radfish) 117 | * `.ls`: `paths` (thanks to @radfish) 118 | * `.name.publish`: `allow_offline` (thanks to @radfish) 119 | * `.name.resolve`: `dht_record_count` & `dht_timeout` (thanks to @radfish) 120 | 121 | *go-ipfs* 0.4.20 has been blacklisted for having know compatibility problems, but 0.4.19 and 0.4.21 are OK. 122 | 123 | py-ipfs-api 0.4.4 (13.05.2019) 124 | ------------------------------ 125 | 126 | * Reimplement library as thin wrapper around the new *py-ipfs-http-client* library with helpful warnings about how to upgrade 127 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Andrew Stocker 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | 2 | # Release 3 | 4 | The release process uses `flit` to build the package, `Sphinx` to generate documentation, 5 | publishes the documentation to an IPFS server (obtaining an IPFS hash), and then links 6 | the IPFS hash (which varies based on content) to a static IPNS name. 7 | 8 | The IPNS name requires a private key, which is controlled by the project owners and not 9 | available to the public. 10 | 11 | All steps can be completed up through but not including linking the IPFS hash to IPNS. 12 | 13 | 14 | ## Pre-Requisites 15 | 16 | * On Debian 17 | * Python 3.8+ (or typings will be incomplete) 18 | 19 | 20 | ## One-Time Setup 21 | 22 | Install the release tools into your virtual environment: 23 | 24 | $ pip install -r tools/release/requirements.txt 25 | 26 | Source: [tools/release/requirements.txt](tools/release/requirements.txt) 27 | 28 | 29 | ### The Go IPFS daemon 30 | 31 | Yes, we use IPFS to host our documentation. In case you haven't already you can download it here: 32 | https://ipfs.io/docs/install/ 33 | 34 | 35 | ### A dedicated IPNS key for publishing 36 | 37 | For publishing the documentation an IPNS key used only for this task should be 38 | generated if there is no such key already: 39 | 40 | `ipfs key gen --type ed25519 ipfs-http-client` 41 | 42 | This key will need to be copied to all other servers hosting the IPNS link. 43 | Without the private key, other servers can host the IPFS files, but not the IPNS link. 44 | 45 | At the time of writing the officially used key is: *12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF* 46 | 47 | 48 | # Steps 49 | 50 | ## Update the source code 51 | 52 | 1. Make a GIT commit 53 | * Incrementing the version number in `ipfshttpclient/version.py` 54 | * Completing the currently open `CHANGELOG.md` entry 55 | 56 | `git commit -m "Release version 0.X.Y" ipfshttpclient/version.py CHANGELOG.md` 57 | 58 | 2. After the change is merged into master, pull master 59 | 60 | 3. Tag the GIT commit with the version number using an annotated and signed tag: 61 | 62 | `git tag --sign -m "Release version 0.X.Y" 0.X.Y` 63 | 64 | 4. Push the new tag 65 | 66 | 67 | ## Upload the new version to PyPI 68 | 69 | Run: 70 | 71 | $ flit build && flit publish 72 | 73 | ## Re-generate and publish the documentation 74 | 75 | Run: 76 | 77 | $ python docs/publish.py ipns-key-id 78 | 79 | The command will also print a commandline that may be used to mirror the generated 80 | documentation on systems other then the current one. 81 | 82 | If you don't have the IPNS private key, you can still exercise the documentation 83 | generation and publish process: 84 | 85 | $ python docs/publish.py 86 | 87 | If you are publishing to an IPFS server that is remote, and protected by an HTTP reverse proxy 88 | with TLS and basic authentication, run this instead: 89 | 90 | $ PY_IPFS_HTTP_CLIENT_DEFAULT_ADDR=/dns/yourserver.tld/tcp/5001/https PY_IPFS_HTTP_CLIENT_DEFAULT_USERNAME=basicauthuser PY_IPFS_HTTP_CLIENT_DEFAULT_PASSWORD=basicauthpassword python publish.py ipns-key-id 91 | 92 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = -W --keep-going 6 | SPHINXBUILD = sphinx-build 7 | PAPER = a4 8 | BUILDDIR = build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help 18 | help: 19 | @echo "Please use \`make ' where is one of" 20 | @echo " html to make standalone HTML files" 21 | @echo " dirhtml to make HTML files named index.html in directories" 22 | @echo " singlehtml to make a single large HTML file" 23 | @echo " pickle to make pickle files" 24 | @echo " json to make JSON files" 25 | @echo " htmlhelp to make HTML files and a HTML help project" 26 | @echo " qthelp to make HTML files and a qthelp project" 27 | @echo " applehelp to make an Apple Help Book" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " epub3 to make an epub3" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 34 | @echo " text to make text files" 35 | @echo " man to make manual pages" 36 | @echo " texinfo to make Texinfo files" 37 | @echo " info to make Texinfo files and run them through makeinfo" 38 | @echo " gettext to make PO message catalogs" 39 | @echo " changes to make an overview of all changed/added/deprecated items" 40 | @echo " xml to make Docutils-native XML files" 41 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 42 | @echo " linkcheck to check all external links for integrity" 43 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 44 | @echo " coverage to run coverage check of the documentation (if enabled)" 45 | @echo " dummy to check syntax errors of document sources" 46 | 47 | .PHONY: clean 48 | clean: 49 | rm -rf $(BUILDDIR)/* 50 | 51 | .PHONY: html 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | .PHONY: dirhtml 58 | dirhtml: 59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 62 | 63 | .PHONY: singlehtml 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | .PHONY: pickle 70 | pickle: 71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 72 | @echo 73 | @echo "Build finished; now you can process the pickle files." 74 | 75 | .PHONY: json 76 | json: 77 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 78 | @echo 79 | @echo "Build finished; now you can process the JSON files." 80 | 81 | .PHONY: htmlhelp 82 | htmlhelp: 83 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 84 | @echo 85 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 86 | ".hhp project file in $(BUILDDIR)/htmlhelp." 87 | 88 | .PHONY: qthelp 89 | qthelp: 90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 91 | @echo 92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PythonIPFSHTTPClient.qhcp" 95 | @echo "To view the help file:" 96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PythonIPFSHTTPClient.qhc" 97 | 98 | .PHONY: applehelp 99 | applehelp: 100 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 101 | @echo 102 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 103 | @echo "N.B. You won't be able to view it unless you put it in" \ 104 | "~/Library/Documentation/Help or install it in your application" \ 105 | "bundle." 106 | 107 | .PHONY: devhelp 108 | devhelp: 109 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 110 | @echo 111 | @echo "Build finished." 112 | @echo "To view the help file:" 113 | @echo "# mkdir -p $$HOME/.local/share/devhelp/PythonIPFSHTTPClient" 114 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PythonIPFSHTTPClient" 115 | @echo "# devhelp" 116 | 117 | .PHONY: epub 118 | epub: 119 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 120 | @echo 121 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 122 | 123 | .PHONY: epub3 124 | epub3: 125 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 126 | @echo 127 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." 128 | 129 | .PHONY: latex 130 | latex: 131 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 132 | @echo 133 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 134 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 135 | "(use \`make latexpdf' here to do that automatically)." 136 | 137 | .PHONY: latexpdf 138 | latexpdf: 139 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 140 | @echo "Running LaTeX files through pdflatex..." 141 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 142 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 143 | 144 | .PHONY: latexpdfja 145 | latexpdfja: 146 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 147 | @echo "Running LaTeX files through platex and dvipdfmx..." 148 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 149 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 150 | 151 | .PHONY: text 152 | text: 153 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 154 | @echo 155 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 156 | 157 | .PHONY: man 158 | man: 159 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 160 | @echo 161 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 162 | 163 | .PHONY: texinfo 164 | texinfo: 165 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 166 | @echo 167 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 168 | @echo "Run \`make' in that directory to run these through makeinfo" \ 169 | "(use \`make info' here to do that automatically)." 170 | 171 | .PHONY: info 172 | info: 173 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 174 | @echo "Running Texinfo files through makeinfo..." 175 | make -C $(BUILDDIR)/texinfo info 176 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 177 | 178 | .PHONY: gettext 179 | gettext: 180 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 181 | @echo 182 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 183 | 184 | .PHONY: changes 185 | changes: 186 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 187 | @echo 188 | @echo "The overview file is in $(BUILDDIR)/changes." 189 | 190 | .PHONY: linkcheck 191 | linkcheck: 192 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 193 | @echo 194 | @echo "Link check complete; look for any errors in the above output " \ 195 | "or in $(BUILDDIR)/linkcheck/output.txt." 196 | 197 | .PHONY: doctest 198 | doctest: 199 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 200 | @echo "Testing of doctests in the sources finished, look at the " \ 201 | "results in $(BUILDDIR)/doctest/output.txt." 202 | 203 | .PHONY: coverage 204 | coverage: 205 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 206 | @echo "Testing of coverage in the sources finished, look at the " \ 207 | "results in $(BUILDDIR)/coverage/python.txt." 208 | 209 | .PHONY: xml 210 | xml: 211 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 212 | @echo 213 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 214 | 215 | .PHONY: pseudoxml 216 | pseudoxml: 217 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 218 | @echo 219 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 220 | 221 | .PHONY: dummy 222 | dummy: 223 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy 224 | @echo 225 | @echo "Build finished. Dummy builder generates no files." 226 | -------------------------------------------------------------------------------- /docs/http_client_ref.md: -------------------------------------------------------------------------------- 1 | HTTP Client Reference 2 | -------------------- 3 | 4 | All commands are accessed through the ``ipfshttpclient.Client`` class. 5 | 6 | ### Exceptions 7 | 8 | ```eval_rst 9 | .. automodule:: ipfshttpclient.exceptions 10 | :members: 11 | ``` 12 | 13 | 14 | ### Utility Functions 15 | 16 | ```eval_rst 17 | .. data:: ipfshttpclient.DEFAULT_ADDR 18 | 19 | The default IPFS API daemon location the client library will attempt to 20 | connect to. By default this will have a value of ``multiaddr.Multiaddr("/dns/localhost/tcp/5001/http")``. 21 | 22 | This may be overwritten on a per-client-instance basis using 23 | the ``addr`` parameter of the :func:`~ipfshttpclient.connect` function. 24 | 25 | .. data:: ipfshttpclient.DEFAULT_BASE 26 | 27 | The default HTTP URL path prefix (or “base”) that the client library will use. 28 | By default this will have a value of ``"api/v0"``. 29 | 30 | This may be overwritten on a per-client-instance basis using the ``base`` 31 | parameter of the :func:`~ipfshttpclient.connect` function. 32 | 33 | .. autofunction:: ipfshttpclient.connect(addr=DEFAULT_ADDR, base=DEFAULT_BASE) 34 | 35 | .. autofunction:: ipfshttpclient.assert_version 36 | 37 | ``` 38 | 39 | ### The API Client 40 | 41 | All methods accept the following parameters in their `kwargs`: 42 | 43 | * **offline** ([**`bool`**](https://docs.python.org/3/library/functions.html#bool)) – Prevent the deamon from communicating with any remote IPFS node while performing the requested action? 44 | * **opts** ([**`dict`**](https://docs.python.org/3/library/stdtypes.html#dict)) – A mapping of custom IPFS API parameters to be sent along with the regular parameters generated by the client library 45 | * Values specified here will always override their respective counterparts 46 | of the client library itself. 47 | * **stream** ([**`bool`**](https://docs.python.org/3/library/functions.html#bool)) – Return results incrementally as they arrive? 48 | * Each method called with `stream=True` will return a generator instead 49 | of the documented value. If the return type is of type `list` then each 50 | item of the given list will be yielded separately; if it is of type 51 | `bytes` then arbitrary bags of bytes will be yielded that together form 52 | a stream; finally, if it is of type `dict` then the single dictonary item 53 | will be yielded once. 54 | * **timeout** ([**`float`**](https://docs.python.org/3/library/functions.html#float)) – The number of seconds to wait of a daemon reply before giving up 55 | 56 | ```eval_rst 57 | .. autoclientclass:: ipfshttpclient.Client 58 | :members: 59 | :inherited-members: 60 | :undoc-members: 61 | 62 | ``` 63 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | Python IPFS HTTP Client 2 | ======================= 3 | 4 | Contents 5 | -------- 6 | 7 | * [HTTP Client Reference](http_client_ref.md) 8 | * [Internal HTTP Client Reference](internal_ref.md) 9 | 10 | Indices and tables 11 | ------------------ 12 | 13 | ```eval_rst 14 | * :ref:`genindex` 15 | * :ref:`modindex` 16 | * :ref:`search` 17 | ``` 18 | 19 | -------------------------------------------------------------------------------- /docs/internal_ref.md: -------------------------------------------------------------------------------- 1 | Internal HTTP Client Reference 2 | ---------------------- 3 | 4 | ### `encoding` 5 | 6 | ```eval_rst 7 | .. automodule:: ipfshttpclient.encoding 8 | :members: 9 | :show-inheritance: 10 | 11 | ``` 12 | 13 | ### `http` 14 | 15 | ```eval_rst 16 | .. automodule:: ipfshttpclient.http 17 | :members: 18 | :show-inheritance: 19 | 20 | ``` 21 | 22 | ### `multipart` 23 | 24 | ```eval_rst 25 | .. automodule:: ipfshttpclient.multipart 26 | :members: 27 | :show-inheritance: 28 | 29 | ``` 30 | 31 | ### `utils` 32 | 33 | ```eval_rst 34 | .. automodule:: ipfshttpclient.utils 35 | :members: 36 | :show-inheritance: 37 | 38 | ``` 39 | -------------------------------------------------------------------------------- /docs/publish.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import os 3 | from pathlib import Path 4 | 5 | import sphinx.cmd.build 6 | import sys 7 | import typing as ty 8 | 9 | script_dir = Path(__file__).absolute().parent 10 | sys.path.insert(0, str(script_dir.parent)) 11 | 12 | import ipfshttpclient 13 | 14 | os.chdir(script_dir) 15 | 16 | 17 | def main(argv: ty.List[str]) -> int: 18 | if len(argv) >= 1: 19 | ipns_key = argv[0] 20 | else: 21 | ipns_key = None 22 | 23 | print("Usage: {0} [IPNS-key]".format(os.path.basename(__file__))) 24 | print() 25 | print('To connect to a remote IPFS daemon, set environment variables:') 26 | print() 27 | print(' PY_IPFS_HTTP_CLIENT_DEFAULT_ADDR') 28 | print(' PY_IPFS_HTTP_CLIENT_DEFAULT_USERNAME') 29 | print(' PY_IPFS_HTTP_CLIENT_DEFAULT_PASSWORD') 30 | print() 31 | print("!! Continuing without publishing to IPNS !!") 32 | print() 33 | 34 | return publish( 35 | ipns_key=ipns_key 36 | ) 37 | 38 | 39 | def publish(ipns_key: ty.Optional[str]) -> int: 40 | # Invoke Sphinx like the Makefile does 41 | result = sphinx.cmd.build.build_main([ 42 | "-b", "html", 43 | "-d", "build/doctrees", 44 | ".", "build/html", 45 | "-W", "--keep-going" 46 | ]) 47 | 48 | if result != 0: 49 | return result 50 | 51 | print() 52 | print(f"Exporting files to IPFS server at {ipfshttpclient.DEFAULT_ADDR}…") 53 | client = ipfshttpclient.connect() 54 | print('Adding files…') 55 | hash_docs = client.add("build/html", recursive=True, raw_leaves=True, pin=False)[-1]["Hash"] 56 | print('Getting directory hash…') 57 | hash_main = client.object.new("unixfs-dir")["Hash"] 58 | print('Getting docs hash…') 59 | hash_main = client.object.patch.add_link(hash_main, "docs", hash_docs)["Hash"] 60 | print(f'Pinning docs hash {hash_main}…') 61 | client.pin.add(hash_main) 62 | 63 | print("Final IPFS path:") 64 | print(f' /ipfs/{hash_main}') 65 | print(f' https://ipfs.io/ipfs/{hash_main}') 66 | 67 | if ipns_key: 68 | print() 69 | print("Exporting files to IPNS…") 70 | name_main = client.name.publish(hash_main, key=ipns_key)["Name"] 71 | print("Final IPNS path: /ipns/{0}".format(name_main)) 72 | 73 | print() 74 | print("Run the following commandline on all systems that mirror this documentation:") 75 | print(" ipfs pin add {0} && ipfs name publish -k {1} /ipfs/{0}".format(hash_main, name_main)) 76 | 77 | return 0 78 | 79 | 80 | if __name__ == "__main__": 81 | sys.exit(main(sys.argv[1:])) 82 | -------------------------------------------------------------------------------- /docs/py-cid.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-shipyard/py-ipfs-http-client/f260c1803369b395150dd598de838850da184403/docs/py-cid.inv -------------------------------------------------------------------------------- /ipfshttpclient/__init__.py: -------------------------------------------------------------------------------- 1 | """Python IPFS HTTP CLIENT library""" 2 | 3 | 4 | from .version import __version__ 5 | 6 | ################################### 7 | # Import stable HTTP CLIENT parts # 8 | ################################### 9 | from . import exceptions 10 | 11 | from .client import DEFAULT_ADDR, \ 12 | DEFAULT_BASE, \ 13 | DEFAULT_USERNAME, \ 14 | DEFAULT_PASSWORD 15 | from .client import VERSION_MINIMUM, VERSION_MAXIMUM 16 | from .client import Client, assert_version, connect 17 | -------------------------------------------------------------------------------- /ipfshttpclient/client/bitswap.py: -------------------------------------------------------------------------------- 1 | import typing as ty 2 | 3 | from . import base 4 | 5 | 6 | class Section(base.SectionBase): 7 | @base.returns_single_item(base.ResponseBase) 8 | def wantlist(self, peer: ty.Optional[str] = None, **kwargs: base.CommonArgs): 9 | """Returns blocks currently on the bitswap wantlist 10 | 11 | .. code-block:: python 12 | 13 | >>> client.bitswap.wantlist() 14 | {'Keys': [ 15 | 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', 16 | 'QmdCWFLDXqgdWQY9kVubbEHBbkieKd3uo7MtCm7nTZZE9K', 17 | 'QmVQ1XvYGF19X4eJqz1s7FJYJqAxFC4oqh3vWJJEXn66cp' 18 | ]} 19 | 20 | Parameters 21 | ---------- 22 | peer 23 | Peer to show wantlist for 24 | 25 | Returns 26 | ------- 27 | dict 28 | 29 | +------+----------------------------------------------------+ 30 | | Keys | List of blocks the connected daemon is looking for | 31 | +------+----------------------------------------------------+ 32 | """ 33 | args = (peer,) 34 | return self._client.request('/bitswap/wantlist', args, decoder='json', **kwargs) 35 | 36 | 37 | @base.returns_single_item(base.ResponseBase) 38 | def stat(self, **kwargs: base.CommonArgs): 39 | """Returns some diagnostic information from the bitswap agent 40 | 41 | .. code-block:: python 42 | 43 | >>> client.bitswap.stat() 44 | {'BlocksReceived': 96, 45 | 'DupBlksReceived': 73, 46 | 'DupDataReceived': 2560601, 47 | 'ProviderBufLen': 0, 48 | 'Peers': [ 49 | 'QmNZFQRxt9RMNm2VVtuV2Qx7q69bcMWRVXmr5CEkJEgJJP', 50 | 'QmNfCubGpwYZAQxX8LQDsYgB48C4GbfZHuYdexpX9mbNyT', 51 | 'QmNfnZ8SCs3jAtNPc8kf3WJqJqSoX7wsX7VqkLdEYMao4u', 52 | … 53 | ], 54 | 'Wantlist': [ 55 | 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', 56 | 'QmdCWFLDXqgdWQY9kVubbEHBbkieKd3uo7MtCm7nTZZE9K', 57 | 'QmVQ1XvYGF19X4eJqz1s7FJYJqAxFC4oqh3vWJJEXn66cp' 58 | ] 59 | } 60 | 61 | Returns 62 | ------- 63 | dict 64 | Statistics, peers and wanted blocks 65 | """ 66 | return self._client.request('/bitswap/stat', decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/block.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | from .. import multipart 4 | from .. import utils 5 | 6 | 7 | class Section(base.SectionBase): 8 | """Interacting with raw IPFS blocks""" 9 | 10 | def get(self, cid: base.cid_t, **kwargs: base.CommonArgs): 11 | r"""Returns the raw contents of a block 12 | 13 | .. code-block:: python 14 | 15 | >>> client.block.get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') 16 | b'\x121\n"\x12 \xdaW>\x14\xe5\xc1\xf6\xe4\x92\xd1 … \n\x02\x08\x01' 17 | 18 | Parameters 19 | ---------- 20 | cid 21 | The CID of an existing block to get 22 | 23 | Returns 24 | ------- 25 | bytes 26 | Contents of the requested block 27 | """ 28 | args = (str(cid),) 29 | return self._client.request('/block/get', args, **kwargs) 30 | 31 | 32 | @base.returns_single_item(base.ResponseBase) 33 | def put(self, file: utils.clean_file_t, 34 | **kwargs: base.CommonArgs): 35 | """Stores the contents of the given file object as an IPFS block 36 | 37 | .. code-block:: python 38 | 39 | >>> client.block.put(io.BytesIO(b'Mary had a little lamb')) 40 | {'Key': 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', 41 | 'Size': 22} 42 | 43 | Parameters 44 | ---------- 45 | file 46 | The data to be stored as an IPFS block 47 | 48 | Returns 49 | ------- 50 | dict 51 | Information about the new block 52 | 53 | See :meth:`~ipfshttpclient.Client.block.stat` 54 | """ 55 | body, headers = multipart.stream_files(file, chunk_size=self.chunk_size) 56 | return self._client.request('/block/put', decoder='json', data=body, 57 | headers=headers, **kwargs) 58 | 59 | 60 | @base.returns_single_item(base.ResponseBase) 61 | def stat(self, cid: base.cid_t, **kwargs: base.CommonArgs): 62 | """Returns a dict with the size of the block with the given hash. 63 | 64 | .. code-block:: python 65 | 66 | >>> client.block.stat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') 67 | {'Key': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', 68 | 'Size': 258} 69 | 70 | Parameters 71 | ---------- 72 | cid 73 | The CID of an existing block to stat 74 | 75 | Returns 76 | ------- 77 | dict 78 | Information about the requested block 79 | """ 80 | args = (str(cid),) 81 | return self._client.request('/block/stat', args, decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/bootstrap.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class Section(base.SectionBase): 5 | @base.returns_single_item(base.ResponseBase) 6 | def add(self, peer: base.multiaddr_t, *peers: base.multiaddr_t, 7 | **kwargs: base.CommonArgs): 8 | """Adds peers to the bootstrap list 9 | 10 | Parameters 11 | ---------- 12 | peer 13 | IPFS Multiaddr of a peer to add to the list 14 | 15 | Returns 16 | ------- 17 | dict 18 | """ 19 | args = (str(peer), *(str(p) for p in peers)) 20 | return self._client.request('/bootstrap/add', args, decoder='json', **kwargs) 21 | 22 | 23 | @base.returns_single_item(base.ResponseBase) 24 | def list(self, **kwargs: base.CommonArgs): 25 | """Returns the addresses of peers used during initial discovery of the 26 | IPFS network 27 | 28 | Peers are output in the format ``/``. 29 | 30 | .. code-block:: python 31 | 32 | >>> client.bootstrap.list() 33 | {'Peers': [ 34 | '/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYER … uvuJ', 35 | '/ip4/104.236.176.52/tcp/4001/ipfs/QmSoLnSGccFuZQJzRa … ca9z', 36 | '/ip4/104.236.179.241/tcp/4001/ipfs/QmSoLPppuBtQSGwKD … KrGM', 37 | … 38 | '/ip4/178.62.61.185/tcp/4001/ipfs/QmSoLMeWqB7YGVLJN3p … QBU3' 39 | ]} 40 | 41 | Returns 42 | ------- 43 | dict 44 | 45 | +-------+-------------------------------+ 46 | | Peers | List of known bootstrap peers | 47 | +-------+-------------------------------+ 48 | """ 49 | return self._client.request('/bootstrap', decoder='json', **kwargs) 50 | 51 | 52 | @base.returns_single_item(base.ResponseBase) 53 | def rm(self, peer: base.multiaddr_t, *peers: base.multiaddr_t, 54 | **kwargs: base.CommonArgs): 55 | """Removes peers from the bootstrap list 56 | 57 | Parameters 58 | ---------- 59 | peer 60 | IPFS Multiaddr of a peer to remove from the list 61 | 62 | Returns 63 | ------- 64 | dict 65 | """ 66 | args = (str(peer), *(str(p) for p in peers)) 67 | return self._client.request('/bootstrap/rm', args, decoder='json', **kwargs) 68 | -------------------------------------------------------------------------------- /ipfshttpclient/client/config.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | from .. import utils 4 | 5 | 6 | class Section(base.SectionBase): 7 | @base.returns_single_item(base.ResponseBase) 8 | def get(self, **kwargs: base.CommonArgs): 9 | #TODO: Support the optional `key` parameter 10 | """Returns the currently used node configuration 11 | 12 | .. code-block:: python 13 | 14 | >>> config = client.config.get() 15 | >>> config['Addresses'] 16 | {'API': '/ip4/127.0.0.1/tcp/5001', 17 | 'Gateway': '/ip4/127.0.0.1/tcp/8080', 18 | 'Swarm': ['/ip4/0.0.0.0/tcp/4001', '/ip6/::/tcp/4001']}, 19 | >>> config['Discovery'] 20 | {'MDNS': {'Enabled': True, 'Interval': 10}} 21 | 22 | Returns 23 | ------- 24 | dict 25 | The entire IPFS daemon configuration 26 | """ 27 | return self._client.request('/config/show', decoder='json', **kwargs) 28 | 29 | 30 | @base.returns_single_item(base.ResponseBase) 31 | def replace(self, config: utils.json_dict_t, **kwargs: base.CommonArgs): 32 | """Replaces the existing configuration with a new configuration tree 33 | 34 | Make sure to back up the config file first if neccessary, as this 35 | operation can not be undone. 36 | """ 37 | return self._client.request('/config/replace', (config,), decoder='json', **kwargs) 38 | 39 | 40 | @base.returns_single_item(base.ResponseBase) 41 | def set(self, key: str, value: utils.json_value_t = None, **kwargs: base.CommonArgs): 42 | """Adds or replaces a single configuration value 43 | 44 | .. code-block:: python 45 | 46 | >>> client.config.set("Addresses.Gateway") 47 | {'Key': 'Addresses.Gateway', 'Value': '/ip4/127.0.0.1/tcp/8080'} 48 | >>> client.config.set("Addresses.Gateway", "/ip4/127.0.0.1/tcp/8081") 49 | {'Key': 'Addresses.Gateway', 'Value': '/ip4/127.0.0.1/tcp/8081'} 50 | 51 | Parameters 52 | ---------- 53 | key 54 | The key of the configuration entry (e.g. "Addresses.API") 55 | value 56 | The value to set the configuration entry to 57 | 58 | Returns 59 | ------- 60 | dict 61 | 62 | +-------+---------------------------------------------+ 63 | | Key | The requested configuration key | 64 | +-------+---------------------------------------------+ 65 | | Value | The new value of the this configuration key | 66 | +-------+---------------------------------------------+ 67 | """ 68 | args = (key, value) 69 | return self._client.request('/config', args, decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/dag.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | from .. import multipart 4 | from .. import utils 5 | 6 | 7 | class Section(base.SectionBase): 8 | @base.returns_single_item(base.ResponseBase) 9 | def get(self, cid: base.cid_t, **kwargs: base.CommonArgs): 10 | """Retrieves the contents of a DAG node 11 | 12 | .. code-block:: python 13 | 14 | >>> client.dag.get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') 15 | {'Data': '\x08\x01', 16 | 'Links': [ 17 | {'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV', 18 | 'Name': 'Makefile', 'Size': 174}, 19 | {'Hash': 'QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX', 20 | 'Name': 'example', 'Size': 1474}, 21 | {'Hash': 'QmZAL3oHMQYqsV61tGvoAVtQLs1WzRe1zkkamv9qxqnDuK', 22 | 'Name': 'home', 'Size': 3947}, 23 | {'Hash': 'QmZNPyKVriMsZwJSNXeQtVQSNU4v4KEKGUQaMT61LPahso', 24 | 'Name': 'lib', 'Size': 268261}, 25 | {'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY', 26 | 'Name': 'published-version', 'Size': 55} 27 | ]} 28 | 29 | Parameters 30 | ---------- 31 | cid 32 | Key of the object to retrieve, in CID format 33 | 34 | Returns 35 | ------- 36 | dict 37 | Cid with the address of the dag object 38 | """ 39 | args = (str(cid),) 40 | return self._client.request('/dag/get', args, decoder='json', **kwargs) 41 | 42 | @base.returns_single_item(base.ResponseBase) 43 | def put(self, data: utils.clean_file_t, format: str = 'cbor', 44 | input_enc: str = 'json', **kwargs: base.CommonArgs): 45 | """Decodes the given input file as a DAG object and returns their key 46 | 47 | .. code-block:: python 48 | 49 | >>> client.dag.put(io.BytesIO(b''' 50 | ... { 51 | ... "Data": "another", 52 | ... "Links": [ { 53 | ... "Name": "some link", 54 | ... "Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCV … R39V", 55 | ... "Size": 8 56 | ... } ] 57 | ... }''')) 58 | {'Cid': { 59 | '/': 'bafyreifgjgbmtykld2e3yncey3naek5xad3h4m2pxmo3of376qxh54qk34' 60 | } 61 | } 62 | 63 | Parameters 64 | ---------- 65 | data 66 | IO stream object of path to a file containing the data to put 67 | format 68 | Format that the object will be added as. Default: cbor 69 | input_enc 70 | Format that the input object will be. Default: json 71 | 72 | Returns 73 | ------- 74 | dict 75 | Cid with the address of the dag object 76 | """ 77 | opts = {'format': format, 'input-enc': input_enc} 78 | kwargs.setdefault('opts', {}).update(opts) 79 | body, headers = multipart.stream_files(data, chunk_size=self.chunk_size) 80 | return self._client.request('/dag/put', decoder='json', data=body, 81 | headers=headers, **kwargs) 82 | 83 | @base.returns_single_item(base.ResponseBase) 84 | def resolve(self, cid: base.cid_t, **kwargs: base.CommonArgs): 85 | """Resolves a DAG node from its CID, returning its address and remaining path 86 | 87 | .. code-block:: python 88 | 89 | >>> client.dag.resolve('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') 90 | {'Cid': { 91 | '/': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' 92 | } 93 | } 94 | 95 | Parameters 96 | ---------- 97 | cid 98 | Key of the object to resolve, in CID format 99 | 100 | Returns 101 | ------- 102 | dict 103 | Cid with the address of the dag object 104 | """ 105 | args = (str(cid),) 106 | return self._client.request('/dag/resolve', args, decoder='json', **kwargs) 107 | 108 | @base.returns_single_item(base.ResponseBase) 109 | def imprt(self, data: utils.clean_file_t, **kwargs: base.CommonArgs): 110 | """Imports a .car file with a DAG into IPFS 111 | 112 | .. code-block:: python 113 | 114 | >>> with open('data.car', 'rb') as file 115 | ... client.dag.imprt(file) 116 | {'Root': { 117 | 'Cid': { 118 | '/': 'bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya' 119 | } 120 | } 121 | } 122 | 123 | *Note*: This method is named ``.imprt`` (rather than ``.import``) to avoid causing a Python 124 | :exc:`SyntaxError` due to ``import`` being global keyword in Python. 125 | 126 | Parameters 127 | ---------- 128 | data 129 | IO stream object with data that should be imported 130 | 131 | Returns 132 | ------- 133 | dict 134 | Dictionary with the root CID of the DAG imported 135 | """ 136 | body, headers = multipart.stream_files(data, chunk_size=self.chunk_size) 137 | return self._client.request('/dag/import', decoder='json', data=body, 138 | headers=headers, **kwargs) 139 | 140 | def export(self, cid: str, **kwargs: base.CommonArgs): 141 | """Exports a DAG into a .car file format 142 | 143 | .. code-block:: python 144 | 145 | >>> data = client.dag.export('bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya') 146 | 147 | *Note*: When exporting larger DAG structures, remember that you can set the *stream* 148 | parameter to ``True`` on any method to have it return results incrementally. 149 | 150 | Parameters 151 | ---------- 152 | cid 153 | Key of the object to export, in CID format 154 | 155 | Returns 156 | ------- 157 | bytes 158 | DAG in a .car format 159 | """ 160 | args = (str(cid),) 161 | return self._client.request('/dag/export', args, **kwargs) 162 | -------------------------------------------------------------------------------- /ipfshttpclient/client/dht.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | from .. import exceptions 4 | 5 | 6 | class Section(base.SectionBase): 7 | @base.returns_single_item(base.ResponseBase) 8 | def findpeer(self, peer_id: str, *peer_ids: str, **kwargs: base.CommonArgs): 9 | """Queries the DHT for all of the associated multiaddresses 10 | 11 | .. code-block:: python 12 | 13 | >>> client.dht.findpeer("QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZN … MTLZ") 14 | [{'ID': 'QmfVGMFrwW6AV6fTWmD6eocaTybffqAvkVLXQEFrYdk6yc', 15 | 'Extra': '', 'Type': 6, 'Responses': None}, 16 | {'ID': 'QmTKiUdjbRjeN9yPhNhG1X38YNuBdjeiV9JXYWzCAJ4mj5', 17 | 'Extra': '', 'Type': 6, 'Responses': None}, 18 | {'ID': 'QmTGkgHSsULk8p3AKTAqKixxidZQXFyF7mCURcutPqrwjQ', 19 | 'Extra': '', 'Type': 6, 'Responses': None}, 20 | … 21 | {'ID': '', 'Extra': '', 'Type': 2, 22 | 'Responses': [ 23 | {'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', 24 | 'Addrs': [ 25 | '/ip4/10.9.8.1/tcp/4001', 26 | '/ip6/::1/tcp/4001', 27 | '/ip4/164.132.197.107/tcp/4001', 28 | '/ip4/127.0.0.1/tcp/4001']} 29 | ]}] 30 | 31 | Parameters 32 | ---------- 33 | peer_id 34 | The ID of the peer to search for 35 | 36 | Returns 37 | ------- 38 | dict 39 | List of multiaddrs 40 | """ 41 | args = (peer_id,) + peer_ids 42 | return self._client.request('/dht/findpeer', args, decoder='json', **kwargs) 43 | 44 | 45 | @base.returns_multiple_items(base.ResponseBase) 46 | def findprovs(self, cid: base.cid_t, *cids: base.cid_t, **kwargs: base.CommonArgs): 47 | """Finds peers in the DHT that can provide a specific value 48 | 49 | .. code-block:: python 50 | 51 | >>> client.dht.findprovs("QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQu … mpW2") 52 | [{'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', 53 | 'Extra': '', 'Type': 6, 'Responses': None}, 54 | {'ID': 'QmaK6Aj5WXkfnWGoWq7V8pGUYzcHPZp4jKQ5JtmRvSzQGk', 55 | 'Extra': '', 'Type': 6, 'Responses': None}, 56 | {'ID': 'QmdUdLu8dNvr4MVW1iWXxKoQrbG6y1vAVWPdkeGK4xppds', 57 | 'Extra': '', 'Type': 6, 'Responses': None}, 58 | … 59 | {'ID': '', 'Extra': '', 'Type': 4, 'Responses': [ 60 | {'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97Mk … E9Uc', 'Addrs': None} 61 | ]}, 62 | {'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', 63 | 'Extra': '', 'Type': 1, 'Responses': [ 64 | {'ID': 'QmSHXfsmN3ZduwFDjeqBn1C8b1tcLkxK6yd … waXw', 'Addrs': [ 65 | '/ip4/127.0.0.1/tcp/4001', 66 | '/ip4/172.17.0.8/tcp/4001', 67 | '/ip6/::1/tcp/4001', 68 | '/ip4/52.32.109.74/tcp/1028' 69 | ]} 70 | ]}] 71 | 72 | Parameters 73 | ---------- 74 | cid 75 | The DHT key to find providers for 76 | 77 | Returns 78 | ------- 79 | dict 80 | List of provider Peer IDs 81 | """ 82 | args = (str(cid),) + tuple(str(c) for c in cids) 83 | return self._client.request('/dht/findprovs', args, decoder='json', **kwargs) 84 | 85 | 86 | @base.returns_single_item(base.ResponseBase) 87 | def get(self, key: str, *keys: str, **kwargs: base.CommonArgs): 88 | """Queries the DHT for its best value related to given key 89 | 90 | There may be several different values for a given key stored in the 91 | DHT; in this context *best* means the record that is most desirable. 92 | There is no one metric for *best*: it depends entirely on the key type. 93 | For IPNS, *best* is the record that is both valid and has the highest 94 | sequence number (freshest). Different key types may specify other rules 95 | for what they consider to be the *best*. 96 | 97 | Parameters 98 | ---------- 99 | key 100 | One or more keys whose values should be looked up 101 | 102 | Returns 103 | ------- 104 | str 105 | """ 106 | args = (key,) + keys 107 | res = self._client.request('/dht/get', args, decoder='json', **kwargs) 108 | 109 | if isinstance(res, dict) and "Extra" in res: 110 | return res["Extra"] 111 | else: 112 | for r in res: 113 | if "Extra" in r and len(r["Extra"]) > 0: 114 | return r["Extra"] 115 | raise exceptions.Error("empty response from DHT") 116 | 117 | 118 | #TODO: Implement `provide(cid)` 119 | 120 | 121 | @base.returns_multiple_items(base.ResponseBase) 122 | def put(self, key: str, value: str, **kwargs: base.CommonArgs): 123 | """Writes a key/value pair to the DHT 124 | 125 | Given a key of the form ``/foo/bar`` and a value of any form, this will 126 | write that value to the DHT with that key. 127 | 128 | Keys have two parts: a keytype (foo) and the key name (bar). IPNS uses 129 | the ``/ipns/`` keytype, and expects the key name to be a Peer ID. IPNS 130 | entries are formatted with a special strucutre. 131 | 132 | You may only use keytypes that are supported in your ``ipfs`` binary: 133 | ``go-ipfs`` currently only supports the ``/ipns/`` keytype. Unless you 134 | have a relatively deep understanding of the key's internal structure, 135 | you likely want to be using the :meth:`~ipfshttpclient.Client.name_publish` 136 | instead. 137 | 138 | Value is arbitrary text. 139 | 140 | .. code-block:: python 141 | 142 | >>> client.dht.put("QmVgNoP89mzpgEAAqK8owYoDEyB97Mkc … E9Uc", "test123") 143 | [{'ID': 'QmfLy2aqbhU1RqZnGQyqHSovV8tDufLUaPfN1LNtg5CvDZ', 144 | 'Extra': '', 'Type': 5, 'Responses': None}, 145 | {'ID': 'QmZ5qTkNvvZ5eFq9T4dcCEK7kX8L7iysYEpvQmij9vokGE', 146 | 'Extra': '', 'Type': 5, 'Responses': None}, 147 | {'ID': 'QmYqa6QHCbe6eKiiW6YoThU5yBy8c3eQzpiuW22SgVWSB8', 148 | 'Extra': '', 'Type': 6, 'Responses': None}, 149 | … 150 | {'ID': 'QmP6TAKVDCziLmx9NV8QGekwtf7ZMuJnmbeHMjcfoZbRMd', 151 | 'Extra': '', 'Type': 1, 'Responses': []}] 152 | 153 | Parameters 154 | ---------- 155 | key 156 | A unique identifier 157 | value 158 | Abitrary text to associate with the input (2048 bytes or less) 159 | 160 | Returns 161 | ------- 162 | list 163 | """ 164 | args = (key, value) 165 | return self._client.request('/dht/put', args, decoder='json', **kwargs) 166 | 167 | 168 | @base.returns_multiple_items(base.ResponseBase) 169 | def query(self, peer_id: str, *peer_ids: str, **kwargs: base.CommonArgs): 170 | """Finds the closest Peer IDs to a given Peer ID by querying the DHT. 171 | 172 | .. code-block:: python 173 | 174 | >>> client.dht.query("/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDM … uvuJ") 175 | [{'ID': 'QmPkFbxAQ7DeKD5VGSh9HQrdS574pyNzDmxJeGrRJxoucF', 176 | 'Extra': '', 'Type': 2, 'Responses': None}, 177 | {'ID': 'QmR1MhHVLJSLt9ZthsNNhudb1ny1WdhY4FPW21ZYFWec4f', 178 | 'Extra': '', 'Type': 2, 'Responses': None}, 179 | {'ID': 'Qmcwx1K5aVme45ab6NYWb52K2TFBeABgCLccC7ntUeDsAs', 180 | 'Extra': '', 'Type': 2, 'Responses': None}, 181 | … 182 | {'ID': 'QmYYy8L3YD1nsF4xtt4xmsc14yqvAAnKksjo3F3iZs5jPv', 183 | 'Extra': '', 'Type': 1, 'Responses': []}] 184 | 185 | Parameters 186 | ---------- 187 | peer_id 188 | The peerID to run the query against 189 | 190 | Returns 191 | ------- 192 | dict 193 | List of peers IDs 194 | """ 195 | args = (peer_id,) + peer_ids 196 | return self._client.request('/dht/query', args, decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/key.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class Section(base.SectionBase): 5 | #TODO: Add `export(name, password)` 6 | 7 | 8 | @base.returns_single_item(base.ResponseBase) 9 | def gen(self, key_name: str, type: str, size: int = 2048, 10 | **kwargs: base.CommonArgs): 11 | """Adds a new public key that can be used for 12 | :meth:`~ipfshttpclient.Client.name.publish` 13 | 14 | .. code-block:: python 15 | 16 | >>> client.key.gen('example_key_name') 17 | {'Name': 'example_key_name', 18 | 'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'} 19 | 20 | Parameters 21 | ---------- 22 | key_name 23 | Name of the new Key to be generated. Used to reference the Keys. 24 | type 25 | Type of key to generate. The current possible keys types are: 26 | 27 | * ``"rsa"`` 28 | * ``"ed25519"`` 29 | size 30 | Bitsize of key to generate 31 | 32 | Returns 33 | ------- 34 | dict 35 | 36 | +------+---------------------------------------------------+ 37 | | Name | The name of the newly generated key | 38 | +------+---------------------------------------------------+ 39 | | Id | The key ID/fingerprint of the newly generated key | 40 | +------+---------------------------------------------------+ 41 | """ 42 | 43 | opts = {"type": type, "size": size} 44 | kwargs.setdefault("opts", {}).update(opts) 45 | args = (key_name,) 46 | 47 | return self._client.request('/key/gen', args, decoder='json', **kwargs) 48 | 49 | 50 | #TODO: Add `import(name, pam, password)` 51 | 52 | 53 | @base.returns_single_item(base.ResponseBase) 54 | def list(self, **kwargs: base.CommonArgs): 55 | """Returns a list of all available IPNS keys 56 | 57 | .. code-block:: python 58 | 59 | >>> client.key.list() 60 | {'Keys': [ 61 | {'Name': 'self', 62 | 'Id': 'QmQf22bZar3WKmojipms22PkXH1MZGmvsqzQtuSvQE3uhm'}, 63 | {'Name': 'example_key_name', 64 | 'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'} 65 | ]} 66 | 67 | Returns 68 | ------- 69 | dict 70 | 71 | +------+--------------------------------------------------------+ 72 | | Keys | List of dictionaries with Names and Ids of public keys | 73 | +------+--------------------------------------------------------+ 74 | """ 75 | return self._client.request('/key/list', decoder='json', **kwargs) 76 | 77 | 78 | @base.returns_single_item(base.ResponseBase) 79 | def rename(self, key_name: str, new_key_name: str, **kwargs: base.CommonArgs): 80 | """Rename an existing key 81 | 82 | .. code-block:: python 83 | 84 | >>> client.key.rename("bla", "personal") 85 | {"Was": "bla", 86 | "Now": "personal", 87 | "Id": "QmeyrRNxXaasZaoDXcCZgryoBCga9shaHQ4suHAYXbNZF3", 88 | "Overwrite": False} 89 | 90 | Parameters 91 | ---------- 92 | key_name 93 | Current name of the key to rename 94 | new_key_name 95 | New name of the key 96 | 97 | Returns 98 | ------- 99 | dict 100 | Information about the key renameal 101 | """ 102 | args = (key_name, new_key_name) 103 | return self._client.request( 104 | '/key/rename', args, decoder='json', **kwargs 105 | ) 106 | 107 | 108 | @base.returns_single_item(base.ResponseBase) 109 | def rm(self, key_name: str, *key_names: str, **kwargs: base.CommonArgs): 110 | """Removes one or more keys 111 | 112 | .. code-block:: python 113 | 114 | >>> client.key.rm("bla") 115 | {"Keys": [ 116 | {"Name": "bla", 117 | "Id": "QmfJpR6paB6h891y7SYXGe6gapyNgepBeAYMbyejWA4FWA"} 118 | ]} 119 | 120 | Parameters 121 | ---------- 122 | key_name 123 | Name of the key(s) to remove. 124 | 125 | Returns 126 | ------- 127 | dict 128 | 129 | +------+--------------------------------------------------+ 130 | | Keys | List of key names and IDs that have been removed | 131 | +------+--------------------------------------------------+ 132 | """ 133 | args = (key_name,) + key_names 134 | return self._client.request('/key/rm', args, decoder='json', **kwargs) 135 | -------------------------------------------------------------------------------- /ipfshttpclient/client/miscellaneous.py: -------------------------------------------------------------------------------- 1 | import typing as ty 2 | 3 | from . import base 4 | 5 | from .. import exceptions 6 | 7 | 8 | class Base(base.ClientBase): 9 | @base.returns_single_item(base.ResponseBase) 10 | def dns(self, domain_name: str, recursive: bool = False, 11 | **kwargs: base.CommonArgs): 12 | """Resolves DNS links to their referenced dweb-path 13 | 14 | CIDs are hard to remember, but domain names are usually easy to 15 | remember. To create memorable aliases for CIDs, DNS TXT records 16 | can point to other DNS links, IPFS objects, IPNS keys, etc. 17 | This command resolves those links to the referenced object. 18 | 19 | For example, with this DNS TXT record:: 20 | 21 | >>> import dns.resolver 22 | >>> a = dns.resolver.query("ipfs.io", "TXT") 23 | >>> a.response.answer[0].items[0].to_text() 24 | '"dnslink=/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n"' 25 | 26 | The resolver will give:: 27 | 28 | >>> client.dns("ipfs.io") 29 | {'Path': '/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n'} 30 | 31 | Parameters 32 | ---------- 33 | domain_name 34 | The domain-name name to resolve 35 | recursive 36 | Resolve until the name is not a DNS link 37 | 38 | Returns 39 | ------- 40 | dict 41 | 42 | +------+-------------------------------------+ 43 | | Path | Resource were a DNS entry points to | 44 | +------+-------------------------------------+ 45 | """ 46 | kwargs.setdefault("opts", {})["recursive"] = recursive 47 | 48 | args = (domain_name,) 49 | return self._client.request('/dns', args, decoder='json', **kwargs) 50 | 51 | 52 | @base.returns_single_item(base.ResponseBase) 53 | def id(self, peer: ty.Optional[str] = None, **kwargs: base.CommonArgs): 54 | """Returns general information of an IPFS Node 55 | 56 | Returns the PublicKey, ProtocolVersion, ID, AgentVersion and 57 | Addresses of the connected daemon or some other node. 58 | 59 | .. code-block:: python 60 | 61 | >>> client.id() 62 | {'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97MkcGvoWZir8otE9Uc', 63 | 'PublicKey': 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggE … BAAE=', 64 | 'AgentVersion': 'go-libp2p/3.3.4', 65 | 'ProtocolVersion': 'ipfs/0.1.0', 66 | 'Addresses': [ 67 | '/ip4/127.0.0.1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owYo … E9Uc', 68 | '/ip4/10.1.0.172/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owY … E9Uc', 69 | '/ip4/172.18.0.1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owY … E9Uc', 70 | '/ip6/::1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owYoDEyB97 … E9Uc', 71 | '/ip6/fccc:7904:b05b:a579:957b:deef:f066:cad9/tcp/400 … E9Uc', 72 | '/ip6/fd56:1966:efd8::212/tcp/4001/ipfs/QmVgNoP89mzpg … E9Uc', 73 | '/ip6/fd56:1966:efd8:0:def1:34d0:773:48f/tcp/4001/ipf … E9Uc', 74 | '/ip6/2001:db8:1::1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8 … E9Uc', 75 | '/ip4/77.116.233.54/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8 … E9Uc', 76 | '/ip4/77.116.233.54/tcp/10842/ipfs/QmVgNoP89mzpgEAAqK … E9Uc']} 77 | 78 | Parameters 79 | ---------- 80 | peer 81 | Peer.ID of the node to look up (local node if ``None``) 82 | 83 | Returns 84 | ------- 85 | dict 86 | Information about the IPFS node 87 | """ 88 | args = (peer,) if peer is not None else () 89 | return self._client.request('/id', args, decoder='json', **kwargs) 90 | 91 | 92 | #TODO: isOnline() 93 | 94 | 95 | @base.returns_multiple_items(base.ResponseBase) 96 | def ping(self, peer: str, *peers: str, count: int = 10, 97 | **kwargs: base.CommonArgs): 98 | """Provides round-trip latency information for the routing system. 99 | 100 | Finds nodes via the routing system, sends pings, waits for pongs, 101 | and prints out round-trip latency information. 102 | 103 | .. code-block:: python 104 | 105 | >>> client.ping("QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n") 106 | [{'Success': True, 'Time': 0, 107 | 'Text': 'Looking up peer QmTzQ1JRkWErjk39mryYw2WVaphAZN … c15n'}, 108 | {'Success': False, 'Time': 0, 109 | 'Text': 'Peer lookup error: routing: not found'}] 110 | 111 | .. hint:: 112 | 113 | Pass ``stream=True`` to receive ping progress reports as they 114 | arrive. 115 | 116 | Parameters 117 | ---------- 118 | peer 119 | ID of peer(s) to be pinged 120 | count 121 | Number of ping messages to send 122 | 123 | Returns 124 | ------- 125 | list 126 | Progress reports from the ping 127 | """ 128 | kwargs.setdefault("opts", {})["count"] = count 129 | 130 | args = (peer,) + peers 131 | return self._client.request('/ping', args, decoder='json', **kwargs) 132 | 133 | 134 | @base.returns_single_item(base.ResponseBase) 135 | def resolve(self, path: str, recursive: bool = False, 136 | **kwargs: base.CommonArgs): 137 | """Resolves an dweb-path and return the path of the referenced item 138 | 139 | There are a number of mutable name protocols that can link among 140 | themselves and into IPNS. For example IPNS references can (currently) 141 | point at an IPFS object, and DNS links can point at other DNS links, 142 | IPNS entries, or IPFS objects. This command accepts any of these 143 | identifiers. 144 | 145 | .. code-block:: python 146 | 147 | >>> client.resolve("/ipfs/QmTkzDwWqPbnAh5YiV5VwcTLnGdw … ca7D/Makefile") 148 | {'Path': '/ipfs/Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV'} 149 | >>> client.resolve("/ipns/ipfs.io") 150 | {'Path': '/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n'} 151 | 152 | Parameters 153 | ---------- 154 | path 155 | The name to resolve 156 | recursive 157 | Resolve until the result is an IPFS name 158 | 159 | Returns 160 | ------- 161 | dict 162 | 163 | +------+-------------------------------------+ 164 | | Path | IPFS path of the requested resource | 165 | +------+-------------------------------------+ 166 | """ 167 | kwargs.setdefault("opts", {})["recursive"] = recursive 168 | 169 | args = (path,) 170 | return self._client.request('/resolve', args, decoder='json', **kwargs) 171 | 172 | 173 | @base.returns_no_item 174 | def stop(self): 175 | """Stops the connected IPFS daemon instance 176 | 177 | Sending any further requests after this will fail with 178 | :class:`~ipfshttpclient.exceptions.ConnectionError`, unless you start 179 | another IPFS daemon instance at the same address. 180 | """ 181 | try: 182 | self._client.request('/shutdown') 183 | except exceptions.ConnectionError: 184 | # Sometimes the daemon kills the connection before sending a 185 | # response causing an incorrect `ConnectionError` to bubble 186 | pass 187 | 188 | 189 | @base.returns_single_item(base.ResponseBase) 190 | def version(self, **kwargs: base.CommonArgs): 191 | """Returns the software versions of the currently connected node 192 | 193 | .. code-block:: python 194 | 195 | >>> client.version() 196 | {'Version': '0.4.3-rc2', 'Repo': '4', 'Commit': '', 197 | 'System': 'amd64/linux', 'Golang': 'go1.6.2'} 198 | 199 | Returns 200 | ------- 201 | dict 202 | Daemon and system version information 203 | """ 204 | return self._client.request('/version', decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/name.py: -------------------------------------------------------------------------------- 1 | import typing as ty 2 | 3 | from . import base 4 | 5 | 6 | class Section(base.SectionBase): 7 | @base.returns_single_item(base.ResponseBase) 8 | def publish(self, ipfs_path: str, 9 | resolve: bool = True, lifetime: ty.Union[str, int] = "24h", 10 | ttl: ty.Union[str, int] = None, key: str = None, 11 | allow_offline: bool = False, **kwargs: base.CommonArgs): 12 | """Publishes an object to IPNS 13 | 14 | IPNS is a PKI namespace, where names are the hashes of public keys, and 15 | the private key enables publishing new (signed) values. In publish, the 16 | default value of *name* is your own identity public key. 17 | 18 | .. code-block:: python 19 | 20 | >>> client.name.publish('/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZK … GZ5d') 21 | {'Value': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d', 22 | 'Name': 'QmVgNoP89mzpgEAAqK8owYoDEyB97MkcGvoWZir8otE9Uc'} 23 | 24 | Parameters 25 | ---------- 26 | ipfs_path 27 | IPFS path of the object to be published 28 | allow_offline 29 | When offline, save the IPNS record to the the local 30 | datastore without broadcasting to the network instead 31 | of simply failing. 32 | lifetime 33 | Time duration that the record will be valid for 34 | 35 | Accepts durations such as ``"300s"``, ``"1.5h"`` or ``"2h45m"``. 36 | Valid units are: 37 | 38 | * ``"ns"`` 39 | * ``"us"`` (or ``"µs"``) 40 | * ``"ms"`` 41 | * ``"s"`` 42 | * ``"m"`` 43 | * ``"h"`` 44 | resolve 45 | Resolve given path before publishing 46 | ttl 47 | Time duration this record should be cached for. 48 | Same syntax like 'lifetime' option. (experimental feature) 49 | key 50 | Name of the key to be used, as listed by 'ipfs key list'. 51 | 52 | Returns 53 | ------- 54 | dict 55 | 56 | +-------+----------------------------------------------------------+ 57 | | Name | Key ID of the key to which the given value was published | 58 | +-------+----------------------------------------------------------+ 59 | | Value | Value that was published | 60 | +-------+----------------------------------------------------------+ 61 | """ 62 | opts = {"lifetime": str(lifetime), 63 | "resolve": resolve, 64 | "allow-offline": allow_offline} 65 | if ttl: 66 | opts["ttl"] = str(ttl) 67 | if key: 68 | opts["key"] = key 69 | kwargs.setdefault("opts", {}).update(opts) 70 | 71 | args = (ipfs_path,) 72 | return self._client.request('/name/publish', args, decoder='json', **kwargs) 73 | 74 | 75 | @base.returns_single_item(base.ResponseBase) 76 | def resolve(self, name: str = None, recursive: bool = False, 77 | nocache: bool = False, dht_record_count: ty.Optional[int] = None, 78 | dht_timeout: ty.Optional[ty.Union[str, int]] = None, 79 | **kwargs: base.CommonArgs): 80 | """Retrieves the value currently published at the given IPNS name 81 | 82 | IPNS is a PKI namespace, where names are the hashes of public keys, and 83 | the private key enables publishing new (signed) values. In resolve, the 84 | default value of ``name`` is your own identity public key. 85 | 86 | .. code-block:: python 87 | 88 | >>> client.name.resolve() 89 | {'Path': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d'} 90 | 91 | Parameters 92 | ---------- 93 | name 94 | The IPNS name to resolve (defaults to the connected node) 95 | recursive 96 | Resolve until the result is not an IPFS name (default: false) 97 | nocache 98 | Do not use cached entries (default: false) 99 | dht_record_count 100 | Number of records to request for DHT resolution. 101 | dht_timeout 102 | Maximum time to collect values during DHT resolution, e.g. "30s". 103 | 104 | For the exact syntax see the ``lifetime`` argument on 105 | :meth:`~ipfshttpclient.Client.name.publish`. Set this parameter to 106 | ``0`` to disable the timeout. 107 | 108 | Returns 109 | ------- 110 | dict 111 | 112 | +------+--------------------------------------+ 113 | | Path | The resolved value of the given name | 114 | +------+--------------------------------------+ 115 | """ 116 | opts = {"recursive": recursive, "nocache": nocache} 117 | if dht_record_count is not None: 118 | opts["dht-record-count"] = str(dht_record_count) 119 | if dht_timeout is not None: 120 | opts["dht-timeout"] = str(dht_timeout) 121 | 122 | kwargs.setdefault("opts", {}).update(opts) 123 | args = (name,) if name is not None else () 124 | return self._client.request('/name/resolve', args, decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/pin.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class Section(base.SectionBase): 5 | @base.returns_single_item(base.ResponseBase) 6 | def add(self, path: base.cid_t, *paths: base.cid_t, recursive: bool = True, 7 | **kwargs: base.CommonArgs): 8 | """Pins objects to the node's local repository 9 | 10 | Stores an IPFS object(s) from a given path in the local repository. 11 | 12 | .. code-block:: python 13 | 14 | >>> client.pin.add("QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d") 15 | {'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']} 16 | 17 | Parameters 18 | ---------- 19 | path 20 | Path to object(s) to be pinned 21 | recursive 22 | Recursively unpin the object linked to by the specified object(s) 23 | 24 | Returns 25 | ------- 26 | dict 27 | 28 | +------+-----------------------------------------------------------+ 29 | | Pins | List of IPFS objects that have been pinned by this action | 30 | +------+-----------------------------------------------------------+ 31 | """ 32 | kwargs.setdefault("opts", {})["recursive"] = recursive 33 | 34 | args = (str(path), *(str(p) for p in paths)) 35 | return self._client.request('/pin/add', args, decoder='json', **kwargs) 36 | 37 | 38 | @base.returns_single_item(base.ResponseBase) 39 | def ls(self, *paths: base.cid_t, type: str = "all", **kwargs: base.CommonArgs): 40 | """Lists objects pinned in the local repository 41 | 42 | By default, all pinned objects are returned, but the ``type`` flag or 43 | arguments can restrict that to a specific pin type or to some specific 44 | objects respectively. In particular the ``type="recursive"`` argument will 45 | only list objects added ``.pin.add(…)`` (or similar) and will greatly 46 | speed processing as obtaining this list does *not* require a complete 47 | repository metadata scan. 48 | 49 | .. code-block:: python 50 | 51 | >>> client.pin.ls() 52 | {'Keys': { 53 | 'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'}, 54 | 'QmNPZUCeSN5458Uwny8mXSWubjjr6J … kP5e': {'Type': 'recursive'}, 55 | 'QmNg5zWpRMxzRAVg7FTQ3tUxVbKj8E … gHPz': {'Type': 'indirect'}, 56 | … 57 | 'QmNiuVapnYCrLjxyweHeuk6Xdqfvts … wCCe': {'Type': 'indirect'} 58 | }} 59 | 60 | >>> # While the above works you should always try to use `type="recursive"` 61 | >>> # instead as it will greatly speed up processing and only lists 62 | >>> # explicit pins (added with `.pin.add(…)` or similar), rather than 63 | >>> # than all objects that won't be removed as part of `.repo.gc()`: 64 | >>> client.pin.ls(type="recursive") 65 | {'Keys': { 66 | 'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'}, 67 | 'QmNPZUCeSN5458Uwny8mXSWubjjr6J … kP5e': {'Type': 'recursive'}, 68 | … 69 | }} 70 | 71 | >>> client.pin.ls('/ipfs/QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz') 72 | {'Keys': { 73 | 'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'}}} 74 | 75 | >>> client.pin.ls('/ipfs/QmdBCSn4UJP82MjhRVwpABww48tXL3 … mA6z') 76 | ipfshttpclient.exceptions.ErrorResponse: 77 | path '/ipfs/QmdBCSn4UJP82MjhRVwpABww48tXL3 … mA6z' is not pinned 78 | 79 | Parameters 80 | ---------- 81 | paths 82 | The IPFS paths or CIDs to search for 83 | 84 | If none are passed, return information about all pinned objects. 85 | If any of the passed CIDs is not pinned, then remote will 86 | return an error and an :exc:`ErrorResponse` exception will be raised. 87 | type 88 | The type of pinned keys to list. Can be: 89 | 90 | * ``"direct"`` 91 | * ``"indirect"`` 92 | * ``"recursive"`` 93 | * ``"all"`` 94 | 95 | Raises 96 | ------ 97 | ~ipfsapi.exceptions.ErrorResponse 98 | Remote returned an error. Remote will return an error 99 | if any of the passed CIDs is not pinned. In this case, 100 | the exception will contain 'not pinned' in its args[0]. 101 | 102 | Returns 103 | ------- 104 | dict 105 | 106 | +------+--------------------------------------------------------------+ 107 | | Keys | Mapping of IPFS object names currently pinned to their types | 108 | +------+--------------------------------------------------------------+ 109 | """ 110 | kwargs.setdefault("opts", {})["type"] = type 111 | 112 | args = tuple(str(p) for p in paths) 113 | return self._client.request('/pin/ls', args, decoder='json', **kwargs) 114 | 115 | 116 | @base.returns_single_item(base.ResponseBase) 117 | def rm(self, path: base.cid_t, *paths: base.cid_t, recursive: bool = True, 118 | **kwargs: base.CommonArgs): 119 | """Removes a pinned object from local storage 120 | 121 | Removes the pin from the given object allowing it to be garbage 122 | collected if needed. That is, depending on the node configuration 123 | it may not be garbage anytime soon or at all unless you manually 124 | clean up the local repository using :meth:`~ipfshttpclient.repo.gc`. 125 | 126 | Also note that an object is pinned both directly (that is its type 127 | is ``"recursive"``) and indirectly (meaning that it is referenced 128 | by another object that is still pinned) it may not be removed at all 129 | after this. 130 | 131 | .. code-block:: python 132 | 133 | >>> client.pin.rm('QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d') 134 | {'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']} 135 | 136 | Parameters 137 | ---------- 138 | path 139 | Path to object(s) to be unpinned 140 | recursive 141 | Recursively unpin the object linked to by the specified object(s) 142 | 143 | Returns 144 | ------- 145 | dict 146 | 147 | +------+-------------------------------------------------------------+ 148 | | Pins | List of IPFS objects that have been unpinned by this action | 149 | +------+-------------------------------------------------------------+ 150 | """ 151 | kwargs.setdefault("opts", {})["recursive"] = recursive 152 | 153 | args = (str(path), *(str(p) for p in paths)) 154 | return self._client.request('/pin/rm', args, decoder='json', **kwargs) 155 | 156 | 157 | @base.returns_single_item(base.ResponseBase) 158 | def update(self, from_path: base.cid_t, to_path: base.cid_t, *, 159 | unpin: bool = True, **kwargs: base.CommonArgs): 160 | """Replaces one pin with another 161 | 162 | Updates one pin to another, making sure that all objects in the new pin 163 | are local. Then removes the old pin. This is an optimized version of 164 | using first using :meth:`~ipfshttpclient.Client.pin.add` to add a new pin 165 | for an object and then using :meth:`~ipfshttpclient.Client.pin.rm` to remove 166 | the pin for the old object. 167 | 168 | .. code-block:: python 169 | 170 | >>> client.pin.update("QmXMqez83NU77ifmcPs5CkNRTMQksBLkyfBf4H5g1NZ52P", 171 | ... "QmUykHAi1aSjMzHw3KmBoJjqRUQYNkFXm8K1y7ZsJxpfPH") 172 | {"Pins": ["/ipfs/QmXMqez83NU77ifmcPs5CkNRTMQksBLkyfBf4H5g1NZ52P", 173 | "/ipfs/QmUykHAi1aSjMzHw3KmBoJjqRUQYNkFXm8K1y7ZsJxpfPH"]} 174 | 175 | Parameters 176 | ---------- 177 | from_path 178 | Path to the old object 179 | to_path 180 | Path to the new object to be pinned 181 | unpin 182 | Should the pin of the old object be removed? 183 | 184 | Returns 185 | ------- 186 | dict 187 | 188 | +------+-------------------------------------------------------------+ 189 | | Pins | List of IPFS objects that have been affected by this action | 190 | +------+-------------------------------------------------------------+ 191 | """ 192 | kwargs.setdefault("opts", {})["unpin"] = unpin 193 | 194 | args = (str(from_path), str(to_path)) 195 | return self._client.request('/pin/update', args, decoder='json', **kwargs) 196 | 197 | 198 | @base.returns_multiple_items(base.ResponseBase, stream=True) 199 | def verify(self, path: base.cid_t, *paths: base.cid_t, verbose: bool = False, 200 | **kwargs: base.CommonArgs): 201 | """Verifies that all recursive pins are completely available in the local 202 | repository 203 | 204 | Scan the repo for pinned object graphs and check their integrity. 205 | Issues will be reported back with a helpful human-readable error 206 | message to aid in error recovery. This is useful to help recover 207 | from datastore corruptions (such as when accidentally deleting 208 | files added using the filestore backend). 209 | 210 | This function returns an iterator has to be exhausted or closed 211 | using either a context manager (``with``-statement) or its 212 | ``.close()`` method. 213 | 214 | .. code-block:: python 215 | 216 | >>> with client.pin.verify("QmN…TTZ", verbose=True) as pin_verify_iter: 217 | ... for item in pin_verify_iter: 218 | ... print(item) 219 | ... 220 | {"Cid":"QmVkNdzCBukBRdpyFiKPyL2R15qPExMr9rV9RFV2kf9eeV","Ok":True} 221 | {"Cid":"QmbPzQruAEFjUU3gQfupns6b8USr8VrD9H71GrqGDXQSxm","Ok":True} 222 | {"Cid":"Qmcns1nUvbeWiecdGDPw8JxWeUfxCV8JKhTfgzs3F8JM4P","Ok":True} 223 | … 224 | 225 | Parameters 226 | ---------- 227 | path 228 | Path to object(s) to be checked 229 | verbose 230 | Also report status of items that were OK? 231 | 232 | Returns 233 | ------- 234 | Iterable[dict] 235 | 236 | +-----+----------------------------------------------------+ 237 | | Cid | IPFS object ID checked | 238 | +-----+----------------------------------------------------+ 239 | | Ok | Whether the given object was successfully verified | 240 | +-----+----------------------------------------------------+ 241 | """ 242 | kwargs.setdefault("opts", {})["verbose"] = verbose 243 | 244 | args = (str(path), *(str(p) for p in paths)) 245 | return self._client.request('/pin/verify', args, decoder='json', stream=True, **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/repo.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class Section(base.SectionBase): 5 | @base.returns_multiple_items(base.ResponseBase) 6 | def gc(self, *, quiet: bool = False, **kwargs: base.CommonArgs): 7 | """Removes stored objects that are not pinned from the repo 8 | 9 | .. code-block:: python 10 | 11 | >>> client.repo.gc() 12 | [{'Key': 'QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQuwaHG2mpW2'}, 13 | {'Key': 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k'}, 14 | {'Key': 'QmRVBnxUCsD57ic5FksKYadtyUbMsyo9KYQKKELajqAp4q'}, 15 | … 16 | {'Key': 'QmYp4TeCurXrhsxnzt5wqLqqUz8ZRg5zsc7GuUrUSDtwzP'}] 17 | 18 | Performs a garbage collection sweep of the local set of 19 | stored objects and remove ones that are not pinned in order 20 | to reclaim hard disk space. Returns the hashes of all collected 21 | objects. 22 | 23 | Parameters 24 | ---------- 25 | quiet 26 | Should the client will avoid downloading the list of removed objects? 27 | 28 | Passing ``True`` to this parameter often causing the GC process to 29 | speed up tremendously as it will also avoid generating the list of 30 | removed objects in the connected daemon at all. 31 | 32 | Returns 33 | ------- 34 | dict 35 | List of IPFS objects that have been removed 36 | """ 37 | kwargs.setdefault("opts", {})["quiet"] = quiet 38 | 39 | return self._client.request('/repo/gc', decoder='json', **kwargs) 40 | 41 | 42 | @base.returns_single_item(base.ResponseBase) 43 | def stat(self, **kwargs: base.CommonArgs): 44 | """Returns local repository status information 45 | 46 | .. code-block:: python 47 | 48 | >>> client.repo.stat() 49 | {'NumObjects': 354, 50 | 'RepoPath': '…/.local/share/ipfs', 51 | 'Version': 'fs-repo@4', 52 | 'RepoSize': 13789310} 53 | 54 | Returns 55 | ------- 56 | dict 57 | General information about the IPFS file repository 58 | 59 | +------------+-------------------------------------------------+ 60 | | NumObjects | Number of objects in the local repo. | 61 | +------------+-------------------------------------------------+ 62 | | RepoPath | The path to the repo being currently used. | 63 | +------------+-------------------------------------------------+ 64 | | RepoSize | Size in bytes that the repo is currently using. | 65 | +------------+-------------------------------------------------+ 66 | | Version | The repo version. | 67 | +------------+-------------------------------------------------+ 68 | """ 69 | return self._client.request('/repo/stat', decoder='json', **kwargs) 70 | 71 | 72 | #TODO: `version()` 73 | -------------------------------------------------------------------------------- /ipfshttpclient/client/swarm.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class FiltersSection(base.SectionBase): 5 | @base.returns_single_item(base.ResponseBase) 6 | def add(self, address: base.multiaddr_t, *addresses: base.multiaddr_t, 7 | **kwargs: base.CommonArgs): 8 | """Adds a given multiaddr filter to the filter/ignore list 9 | 10 | This will add an address filter to the daemons swarm. Filters applied 11 | this way will not persist daemon reboots, to achieve that, add your 12 | filters to the configuration file. 13 | 14 | .. code-block:: python 15 | 16 | >>> client.swarm.filters.add("/ip4/192.168.0.0/ipcidr/16") 17 | {'Strings': ['/ip4/192.168.0.0/ipcidr/16']} 18 | 19 | Parameters 20 | ---------- 21 | address 22 | Multiaddr to avoid connecting to 23 | 24 | Returns 25 | ------- 26 | dict 27 | 28 | +---------+-----------------------------+ 29 | | Strings | List of swarm filters added | 30 | +---------+-----------------------------+ 31 | """ 32 | args = (str(address), *(str(a) for a in address)) 33 | return self._client.request('/swarm/filters/add', args, decoder='json', **kwargs) 34 | 35 | 36 | @base.returns_single_item(base.ResponseBase) 37 | def rm(self, address: base.multiaddr_t, *addresses: base.multiaddr_t, 38 | **kwargs: base.CommonArgs): 39 | """Removes a given multiaddr filter from the filter list 40 | 41 | This will remove an address filter from the daemons swarm. Filters 42 | removed this way will not persist daemon reboots, to achieve that, 43 | remove your filters from the configuration file. 44 | 45 | .. code-block:: python 46 | 47 | >>> client.swarm.filters.rm("/ip4/192.168.0.0/ipcidr/16") 48 | {'Strings': ['/ip4/192.168.0.0/ipcidr/16']} 49 | 50 | Parameters 51 | ---------- 52 | address 53 | Multiaddr filter to remove 54 | 55 | Returns 56 | ------- 57 | dict 58 | 59 | +---------+-------------------------------+ 60 | | Strings | List of swarm filters removed | 61 | +---------+-------------------------------+ 62 | """ 63 | args = (str(address), *(str(a) for a in address)) 64 | return self._client.request('/swarm/filters/rm', args, decoder='json', **kwargs) 65 | 66 | 67 | class Section(base.SectionBase): 68 | filters = base.SectionProperty(FiltersSection) 69 | 70 | 71 | @base.returns_single_item(base.ResponseBase) 72 | def addrs(self, **kwargs: base.CommonArgs): 73 | """Returns the addresses of currently connected peers by peer id 74 | 75 | .. code-block:: python 76 | 77 | >>> pprint(client.swarm.addrs()) 78 | {'Addrs': { 79 | 'QmNMVHJTSZHTWMWBbmBrQgkA1hZPWYuVJx2DpSGESWW6Kn': [ 80 | '/ip4/10.1.0.1/tcp/4001', 81 | '/ip4/127.0.0.1/tcp/4001', 82 | '/ip4/51.254.25.16/tcp/4001', 83 | '/ip6/2001:41d0:b:587:3cae:6eff:fe40:94d8/tcp/4001', 84 | '/ip6/2001:470:7812:1045::1/tcp/4001', 85 | '/ip6/::1/tcp/4001', 86 | '/ip6/fc02:2735:e595:bb70:8ffc:5293:8af8:c4b7/tcp/4001', 87 | '/ip6/fd00:7374:6172:100::1/tcp/4001', 88 | '/ip6/fd20:f8be:a41:0:c495:aff:fe7e:44ee/tcp/4001', 89 | '/ip6/fd20:f8be:a41::953/tcp/4001'], 90 | 'QmNQsK1Tnhe2Uh2t9s49MJjrz7wgPHj4VyrZzjRe8dj7KQ': [ 91 | '/ip4/10.16.0.5/tcp/4001', 92 | '/ip4/127.0.0.1/tcp/4001', 93 | '/ip4/172.17.0.1/tcp/4001', 94 | '/ip4/178.62.107.36/tcp/4001', 95 | '/ip6/::1/tcp/4001'], 96 | … 97 | }} 98 | 99 | Returns 100 | ------- 101 | dict 102 | Multiaddrs of peers by peer id 103 | 104 | +-------+--------------------------------------------------------+ 105 | | Addrs | Mapping of PeerIDs to a list its advertised multiaddrs | 106 | +-------+--------------------------------------------------------+ 107 | """ 108 | return self._client.request('/swarm/addrs', decoder='json', **kwargs) 109 | 110 | 111 | @base.returns_single_item(base.ResponseBase) 112 | def connect(self, address: base.multiaddr_t, *addresses: base.multiaddr_t, 113 | **kwargs: base.CommonArgs): 114 | """Attempts to connect to a peer at the given multiaddr 115 | 116 | This will open a new direct connection to a peer address. The address 117 | format is an IPFS multiaddr, e.g.:: 118 | 119 | /ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ 120 | 121 | .. code-block:: python 122 | 123 | >>> client.swarm.connect("/ip4/104.131.131.82/tcp/4001/ipfs/Qma … uvuJ") 124 | {'Strings': ['connect QmaCpDMGvV2BGHeYERUEnRQAwe3 … uvuJ success']} 125 | 126 | Parameters 127 | ---------- 128 | address 129 | Address of peer to connect to 130 | 131 | Returns 132 | ------- 133 | dict 134 | Textual connection status report 135 | """ 136 | args = (str(address), *(str(a) for a in address)) 137 | return self._client.request('/swarm/connect', args, decoder='json', **kwargs) 138 | 139 | 140 | @base.returns_single_item(base.ResponseBase) 141 | def disconnect(self, address: base.multiaddr_t, *addresses: base.multiaddr_t, 142 | **kwargs: base.CommonArgs): 143 | """Closes any open connection to a given multiaddr 144 | 145 | This will close a connection to a peer address. The address format is 146 | an IPFS multiaddr:: 147 | 148 | /ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ 149 | 150 | The disconnect is not permanent; if IPFS needs to talk to that address 151 | later, it will reconnect. To avoid this, add a filter for the given 152 | address before disconnecting. 153 | 154 | .. code-block:: python 155 | 156 | >>> client.swarm.disconnect("/ip4/104.131.131.82/tcp/4001/ipfs/Qm … uJ") 157 | {'Strings': ['disconnect QmaCpDMGvV2BGHeYERUEnRQA … uvuJ success']} 158 | 159 | Parameters 160 | ---------- 161 | address 162 | Address of peer to disconnect from 163 | 164 | Returns 165 | ------- 166 | dict 167 | Textual connection status report 168 | """ 169 | args = (str(address), *(str(a) for a in address)) 170 | return self._client.request('/swarm/disconnect', args, decoder='json', **kwargs) 171 | 172 | 173 | @base.returns_single_item(base.ResponseBase) 174 | def peers(self, **kwargs: base.CommonArgs): 175 | """Returns the addresses & IDs of currently connected peers 176 | 177 | .. code-block:: python 178 | 179 | >>> client.swarm.peers() 180 | {'Strings': [ 181 | '/ip4/101.201.40.124/tcp/40001/ipfs/QmZDYAhmMDtnoC6XZ … kPZc', 182 | '/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYER … uvuJ', 183 | '/ip4/104.223.59.174/tcp/4001/ipfs/QmeWdgoZezpdHz1PX8 … 1jB6', 184 | … 185 | '/ip6/fce3: … :f140/tcp/43901/ipfs/QmSoLnSGccFuZQJzRa … ca9z' 186 | ]} 187 | 188 | Returns 189 | ------- 190 | dict 191 | 192 | +---------+----------------------------------------------------+ 193 | | Strings | List of Multiaddrs that the daemon is connected to | 194 | +---------+----------------------------------------------------+ 195 | """ 196 | return self._client.request('/swarm/peers', decoder='json', **kwargs) 197 | -------------------------------------------------------------------------------- /ipfshttpclient/client/unstable.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class LogSection(base.SectionBase): 5 | @base.returns_single_item(base.ResponseBase) 6 | def level(self, subsystem: str, level: str, **kwargs: base.CommonArgs): 7 | r"""Changes the logging output level for a given subsystem 8 | 9 | **This API is subject to future change or removal!** 10 | 11 | .. code-block:: python 12 | 13 | >>> client.unstable.log.level("path", "info") 14 | {"Message": "Changed log level of 'path' to 'info'\n"} 15 | 16 | Parameters 17 | ---------- 18 | subsystem 19 | The subsystem logging identifier (Use ``"all"`` for all subsystems) 20 | level 21 | The desired logging level. Must be one of: 22 | 23 | * ``"debug"`` 24 | * ``"info"`` 25 | * ``"warning"`` 26 | * ``"error"`` 27 | * ``"fatal"`` 28 | * ``"panic"`` 29 | 30 | Returns 31 | ------- 32 | dict 33 | 34 | +--------+-----------------------+ 35 | | Status | Textual status report | 36 | +--------+-----------------------+ 37 | """ 38 | args = (subsystem, level) 39 | return self._client.request('/log/level', args, 40 | decoder='json', **kwargs) 41 | 42 | 43 | @base.returns_single_item(base.ResponseBase) 44 | def ls(self, **kwargs: base.CommonArgs): 45 | """Lists the available logging subsystems 46 | 47 | **This API is subject to future change or removal!** 48 | 49 | .. code-block:: python 50 | 51 | >>> client.unstable.log.ls() 52 | {'Strings': [ 53 | 'github.com/ipfs/go-libp2p/p2p/host', 'net/identify', 54 | 'merkledag', 'providers', 'routing/record', 'chunk', 'mfs', 55 | 'ipns-repub', 'flatfs', 'ping', 'mockrouter', 'dagio', 56 | 'cmds/files', 'blockset', 'engine', 'mocknet', 'config', 57 | 'commands/http', 'cmd/ipfs', 'command', 'conn', 'gc', 58 | 'peerstore', 'core', 'coreunix', 'fsrepo', 'core/server', 59 | 'boguskey', 'github.com/ipfs/go-libp2p/p2p/host/routed', 60 | 'diagnostics', 'namesys', 'fuse/ipfs', 'node', 'secio', 61 | 'core/commands', 'supernode', 'mdns', 'path', 'table', 62 | 'swarm2', 'peerqueue', 'mount', 'fuse/ipns', 'blockstore', 63 | 'github.com/ipfs/go-libp2p/p2p/host/basic', 'lock', 'nat', 64 | 'importer', 'corerepo', 'dht.pb', 'pin', 'bitswap_network', 65 | 'github.com/ipfs/go-libp2p/p2p/protocol/relay', 'peer', 66 | 'transport', 'dht', 'offlinerouting', 'tarfmt', 'eventlog', 67 | 'ipfsaddr', 'github.com/ipfs/go-libp2p/p2p/net/swarm/addr', 68 | 'bitswap', 'reprovider', 'supernode/proxy', 'crypto', 'tour', 69 | 'commands/cli', 'blockservice']} 70 | 71 | Returns 72 | ------- 73 | dict 74 | 75 | +---------+-----------------------------------+ 76 | | Strings | List of daemon logging subsystems | 77 | +---------+-----------------------------------+ 78 | """ 79 | return self._client.request('/log/ls', decoder='json', **kwargs) 80 | 81 | 82 | @base.returns_multiple_items(base.ResponseBase, stream=True) 83 | def tail(self, **kwargs: base.CommonArgs): 84 | r"""Streams log outputs as they are generated 85 | 86 | **This API is subject to future change or removal!** 87 | 88 | This function returns an iterator that needs to be closed using a 89 | context manager (``with``-statement) or using the ``.close()`` method. 90 | 91 | .. code-block:: python 92 | 93 | >>> with client.unstable.log.tail() as log_tail_iter: 94 | ... for item in log_tail_iter: 95 | ... print(item) 96 | ... 97 | {"event":"updatePeer","system":"dht", 98 | "peerID":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", 99 | "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", 100 | "time":"2016-08-22T13:25:27.43353297Z"} 101 | {"event":"handleAddProviderBegin","system":"dht", 102 | "peer":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", 103 | "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", 104 | "time":"2016-08-22T13:25:27.433642581Z"} 105 | {"event":"handleAddProvider","system":"dht","duration":91704, 106 | "key":"QmNT9Tejg6t57Vs8XM2TVJXCwevWiGsZh3kB4HQXUZRK1o", 107 | "peer":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", 108 | "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", 109 | "time":"2016-08-22T13:25:27.433747513Z"} 110 | {"event":"updatePeer","system":"dht", 111 | "peerID":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", 112 | "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", 113 | "time":"2016-08-22T13:25:27.435843012Z"} 114 | … 115 | 116 | Returns 117 | ------- 118 | Iterable[dict] 119 | """ 120 | return self._client.request('/log/tail', decoder='json', 121 | stream=True, **kwargs) 122 | 123 | 124 | 125 | class RefsSection(base.SectionBase): 126 | @base.returns_multiple_items(base.ResponseBase) 127 | def __call__(self, cid: base.cid_t, **kwargs: base.CommonArgs): 128 | """Returns the hashes of objects referenced by the given hash 129 | 130 | **This API is subject to future change or removal!** You likely want to 131 | use :meth:`~ipfshttpclient.object.links` instead. 132 | 133 | .. code-block:: python 134 | 135 | >>> client.unstable.refs('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') 136 | [{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''}, 137 | … 138 | {'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}] 139 | 140 | Parameters 141 | ---------- 142 | cid 143 | Path to the object(s) to list refs from 144 | 145 | Returns 146 | ------- 147 | list 148 | """ 149 | args = (str(cid),) 150 | return self._client.request('/refs', args, decoder='json', **kwargs) 151 | 152 | 153 | @base.returns_multiple_items(base.ResponseBase) 154 | def local(self, **kwargs: base.CommonArgs): 155 | """Returns the hashes of all local objects 156 | 157 | **This API is subject to future change or removal!** 158 | 159 | .. code-block:: python 160 | 161 | >>> client.unstable.refs.local() 162 | [{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''}, 163 | … 164 | {'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}] 165 | 166 | Returns 167 | ------- 168 | list 169 | """ 170 | return self._client.request('/refs/local', decoder='json', **kwargs) 171 | 172 | 173 | 174 | class Section(base.SectionBase): 175 | """Features that are subject to change and are only provided for convenience""" 176 | log = base.SectionProperty(LogSection) 177 | refs = base.SectionProperty(RefsSection) -------------------------------------------------------------------------------- /ipfshttpclient/encoding.py: -------------------------------------------------------------------------------- 1 | """Classes for encoding and decoding datastreams into object values""" 2 | import abc 3 | import codecs 4 | import typing as ty 5 | import json 6 | 7 | from . import exceptions 8 | from . import utils 9 | 10 | 11 | if ty.TYPE_CHECKING: 12 | import typing_extensions as ty_ext 13 | else: 14 | from . import utils as ty_ext 15 | 16 | 17 | T = ty.TypeVar("T") 18 | 19 | 20 | def empty_gen() -> ty.Generator[T, None, None]: 21 | """A generator that yields nothing""" 22 | if False: # pragma: no branch 23 | yield ty.cast(T, None) # type: ignore[unreachable] 24 | 25 | 26 | class Encoding(ty.Generic[T], metaclass=abc.ABCMeta): 27 | """Abstract base for a data parser/encoder interface""" 28 | #name: str 29 | is_stream = False # type: bool 30 | 31 | @abc.abstractmethod 32 | def parse_partial(self, raw: bytes) -> ty.Generator[T, ty.Any, ty.Any]: 33 | """Parses the given data and yields all complete data sets that can 34 | be built from this. 35 | 36 | Raises 37 | ------ 38 | ~ipfshttpclient.exceptions.DecodingError 39 | 40 | Parameters 41 | ---------- 42 | raw 43 | Data to be parsed 44 | """ 45 | 46 | def parse_finalize(self) -> ty.Generator[T, ty.Any, ty.Any]: 47 | """Finalizes parsing based on remaining buffered data and yields the 48 | remaining data sets 49 | 50 | Raises 51 | ------ 52 | ~ipfshttpclient.exceptions.DecodingError 53 | """ 54 | return empty_gen() 55 | 56 | @abc.abstractmethod 57 | def encode(self, obj: T) -> bytes: 58 | """Serializes the given Python object to a bytes string 59 | 60 | Raises 61 | ------ 62 | ~ipfshttpclient.exceptions.EncodingError 63 | 64 | Parameters 65 | ---------- 66 | obj 67 | Object to be encoded 68 | """ 69 | 70 | 71 | class Dummy(Encoding[bytes]): 72 | """Dummy parser/encoder that does nothing""" 73 | name = "none" 74 | is_stream = True 75 | 76 | def parse_partial(self, raw: bytes) -> ty.Generator[bytes, ty.Any, ty.Any]: 77 | """Yields the data passed into this method 78 | 79 | Parameters 80 | ---------- 81 | raw 82 | Any kind of data 83 | """ 84 | yield raw 85 | 86 | def encode(self, obj: bytes) -> bytes: 87 | """Returns the bytes representation of the data passed into this 88 | function 89 | 90 | Parameters 91 | ---------- 92 | obj 93 | Any Python object 94 | """ 95 | return obj 96 | 97 | 98 | class Json(Encoding[utils.json_value_t]): 99 | """JSON parser/encoder that handles concatenated JSON""" 100 | name = 'json' 101 | 102 | def __init__(self) -> None: 103 | self._buffer = [] # type: ty.List[ty.Optional[str]] 104 | self._decoder1 = codecs.getincrementaldecoder('utf-8')() 105 | self._decoder2 = json.JSONDecoder() 106 | self._lasterror = None # type: ty.Optional[ValueError] 107 | 108 | # It works just fine and I don't want to rewrite it just because mypy doesn't understand… 109 | @ty.no_type_check 110 | def parse_partial(self, data: bytes) -> ty.Generator[utils.json_value_t, ty.Any, ty.Any]: 111 | """Incrementally decodes JSON data sets into Python objects. 112 | 113 | Raises 114 | ------ 115 | ~ipfshttpclient.exceptions.DecodingError 116 | """ 117 | try: 118 | # Python requires all JSON data to text strings 119 | lines = self._decoder1.decode(data, False).split("\n") 120 | 121 | # Add first input line to last buffer line, if applicable, to 122 | # handle cases where the JSON string has been chopped in half 123 | # at the network level due to streaming 124 | if len(self._buffer) > 0 and self._buffer[-1] is not None: 125 | self._buffer[-1] += lines[0] 126 | self._buffer.extend(lines[1:]) 127 | else: 128 | self._buffer.extend(lines) 129 | except UnicodeDecodeError as error: 130 | raise exceptions.DecodingError('json', error) from error 131 | 132 | # Process data buffer 133 | index = 0 134 | try: 135 | # Process each line as separate buffer 136 | #PERF: This way the `.lstrip()` call becomes almost always a NOP 137 | # even if it does return a different string it will only 138 | # have to allocate a new buffer for the currently processed 139 | # line. 140 | while index < len(self._buffer): 141 | while self._buffer[index]: 142 | # Make sure buffer does not start with whitespace 143 | #PERF: `.lstrip()` does not reallocate if the string does 144 | # not actually start with whitespace. 145 | self._buffer[index] = self._buffer[index].lstrip() 146 | 147 | # Handle case where the remainder of the line contained 148 | # only whitespace 149 | if not self._buffer[index]: 150 | self._buffer[index] = None 151 | continue 152 | 153 | # Try decoding the partial data buffer and return results 154 | # from this 155 | # 156 | # Use `pragma: no branch` as the final loop iteration will always 157 | # raise if parsing didn't work out, rather then falling through 158 | # to the `yield obj` line. 159 | data = self._buffer[index] 160 | for index2 in range(index, len(self._buffer)): # pragma: no branch 161 | # If decoding doesn't succeed with the currently 162 | # selected buffer (very unlikely with our current 163 | # class of input data) then retry with appending 164 | # any other pending pieces of input data 165 | # This will happen with JSON data that contains 166 | # arbitrary new-lines: "{1:\n2,\n3:4}" 167 | if index2 > index: 168 | data += "\n" + self._buffer[index2] 169 | 170 | try: 171 | (obj, offset) = self._decoder2.raw_decode(data) 172 | except ValueError: 173 | # Treat error as fatal if we have already added 174 | # the final buffer to the input 175 | if (index2 + 1) == len(self._buffer): 176 | raise 177 | else: 178 | index = index2 179 | break 180 | 181 | # Decoding succeeded – yield result and shorten buffer 182 | yield obj 183 | if offset < len(self._buffer[index]): 184 | self._buffer[index] = self._buffer[index][offset:] 185 | else: 186 | self._buffer[index] = None 187 | index += 1 188 | except ValueError as error: 189 | # It is unfortunately not possible to reliably detect whether 190 | # parsing ended because of an error *within* the JSON string, or 191 | # an unexpected *end* of the JSON string. 192 | # We therefor have to assume that any error that occurs here 193 | # *might* be related to the JSON parser hitting EOF and therefor 194 | # have to postpone error reporting until `parse_finalize` is 195 | # called. 196 | self._lasterror = error 197 | finally: 198 | # Remove all processed buffers 199 | del self._buffer[0:index] 200 | 201 | def parse_finalize(self) -> ty.Generator[utils.json_value_t, ty.Any, ty.Any]: 202 | """Raises errors for incomplete buffered data that could not be parsed 203 | because the end of the input data has been reached. 204 | 205 | Raises 206 | ------ 207 | ~ipfshttpclient.exceptions.DecodingError 208 | """ 209 | try: 210 | try: 211 | # Raise exception for remaining bytes in bytes decoder 212 | self._decoder1.decode(b'', True) 213 | except UnicodeDecodeError as error: 214 | raise exceptions.DecodingError('json', error) from error 215 | 216 | # Late raise errors that looked like they could have been fixed if 217 | # the caller had provided more data 218 | if self._buffer and self._lasterror: 219 | raise exceptions.DecodingError('json', self._lasterror) from self._lasterror 220 | finally: 221 | # Reset state 222 | self._buffer = [] 223 | self._lasterror = None 224 | self._decoder1.reset() 225 | 226 | return empty_gen() 227 | 228 | def encode(self, obj: utils.json_value_t) -> bytes: 229 | """Returns ``obj`` serialized as JSON formatted bytes 230 | 231 | Raises 232 | ------ 233 | ~ipfshttpclient.exceptions.EncodingError 234 | 235 | Parameters 236 | ---------- 237 | obj 238 | JSON serializable Python object 239 | """ 240 | try: 241 | result = json.dumps(obj, sort_keys=True, indent=None, 242 | separators=(',', ':'), ensure_ascii=False) 243 | return result.encode("utf-8") 244 | except (UnicodeEncodeError, TypeError) as error: 245 | raise exceptions.EncodingError('json', error) from error 246 | 247 | 248 | # encodings supported by the IPFS api (default is JSON) 249 | __encodings = { 250 | Dummy.name: Dummy, 251 | Json.name: Json, 252 | } # type: ty.Dict[str, ty.Type[Encoding[ty.Any]]] 253 | 254 | 255 | @ty.overload 256 | def get_encoding(name: ty_ext.Literal["none"]) -> Dummy: 257 | ... 258 | 259 | @ty.overload # noqa: E302 260 | def get_encoding(name: ty_ext.Literal["json"]) -> Json: 261 | ... 262 | 263 | def get_encoding(name: str) -> Encoding[ty.Any]: # noqa: E302 264 | """Returns an Encoder object for the given encoding name 265 | 266 | Raises 267 | ------ 268 | ~ipfshttpclient.exceptions.EncoderMissingError 269 | 270 | Parameters 271 | ---------- 272 | name 273 | Encoding name. Supported options: 274 | 275 | * ``"none"`` 276 | * ``"json"`` 277 | """ 278 | try: 279 | return __encodings[name.lower()]() 280 | except KeyError: 281 | raise exceptions.EncoderMissingError(name) from None 282 | -------------------------------------------------------------------------------- /ipfshttpclient/exceptions.py: -------------------------------------------------------------------------------- 1 | """ 2 | The class hierarchy for exceptions is:: 3 | 4 | builtins.Exception 5 | ├── builtins.Warning 6 | │ └── VersionMismatch 7 | └── Error 8 | ├── AddressError 9 | ├── EncoderError 10 | │ ├── EncoderMissingError 11 | │ ├── EncodingError 12 | │ └── DecodingError 13 | ├── CommunicationError 14 | │ ├── ProtocolError 15 | │ ├── StatusError 16 | │ ├── ErrorResponse 17 | │ │ └── PartialErrorResponse 18 | │ ├── ConnectionError 19 | │ └── TimeoutError 20 | └── MatcherSpecInvalidError 21 | 22 | """ 23 | 24 | import typing as ty 25 | 26 | import multiaddr.exceptions # type: ignore[import] 27 | 28 | 29 | class Error(Exception): 30 | """Base class for all exceptions in this module.""" 31 | __slots__ = () 32 | 33 | 34 | class AddressError(Error, multiaddr.exceptions.Error): # type: ignore[no-any-unimported, misc] 35 | """Raised when the provided daemon location Multiaddr does not match any 36 | of the supported patterns.""" 37 | __slots__ = ("addr",) 38 | 39 | addr: ty.Union[str, bytes] 40 | 41 | def __init__(self, addr: ty.Union[str, bytes]) -> None: 42 | self.addr = addr 43 | Error.__init__(self, "Unsupported Multiaddr pattern: {0!r}".format(addr)) 44 | 45 | 46 | class VersionMismatch(Warning): 47 | """Raised when daemon version is not supported by this client version.""" 48 | __slots__ = ("current", "minimum", "maximum") 49 | 50 | current: ty.Sequence[int] 51 | minimum: ty.Sequence[int] 52 | maximum: ty.Sequence[int] 53 | 54 | def __init__(self, current: ty.Sequence[int], minimum: ty.Sequence[int], 55 | maximum: ty.Sequence[int]) -> None: 56 | self.current = current 57 | self.minimum = minimum 58 | self.maximum = maximum 59 | 60 | msg = "Unsupported daemon version '{}' (not in range: {} ≤ … < {})".format( 61 | ".".join(map(str, current)), ".".join(map(str, minimum)), ".".join(map(str, maximum)) 62 | ) 63 | super().__init__(msg) 64 | 65 | 66 | ############### 67 | # encoding.py # 68 | ############### 69 | class EncoderError(Error): 70 | """Base class for all encoding and decoding related errors.""" 71 | __slots__ = ("encoder_name",) 72 | 73 | encoder_name: str 74 | 75 | def __init__(self, message: str, encoder_name: str) -> None: 76 | self.encoder_name = encoder_name 77 | 78 | super().__init__(message) 79 | 80 | 81 | class EncoderMissingError(EncoderError): 82 | """Raised when a requested encoder class does not actually exist.""" 83 | __slots__ = () 84 | 85 | def __init__(self, encoder_name: str) -> None: 86 | super().__init__("Unknown encoder: '{}'".format(encoder_name), encoder_name) 87 | 88 | 89 | class EncodingError(EncoderError): 90 | """Raised when encoding a Python object into a byte string has failed 91 | due to some problem with the input data.""" 92 | __slots__ = ("original",) 93 | 94 | original: Exception 95 | 96 | def __init__(self, encoder_name: str, original: Exception) -> None: 97 | self.original = original 98 | 99 | super().__init__("Object encoding error: {}".format(original), encoder_name) 100 | 101 | 102 | class DecodingError(EncoderError): 103 | """Raised when decoding a byte string to a Python object has failed due to 104 | some problem with the input data.""" 105 | __slots__ = ("original",) 106 | 107 | original: Exception 108 | 109 | def __init__(self, encoder_name: str, original: Exception) -> None: 110 | self.original = original 111 | 112 | super().__init__("Object decoding error: {}".format(original), encoder_name) 113 | 114 | 115 | ################## 116 | # filescanner.py # 117 | ################## 118 | 119 | class MatcherSpecInvalidError(Error, TypeError): 120 | """ 121 | An attempt was made to build a matcher using matcher_from_spec, but an invalid 122 | specification was provided. 123 | """ 124 | 125 | def __init__(self, invalid_spec: ty.Any) -> None: 126 | super().__init__( 127 | f"Don't know how to create a Matcher from spec {invalid_spec!r}" 128 | ) 129 | 130 | 131 | ########### 132 | # http.py # 133 | ########### 134 | class CommunicationError(Error): 135 | """Base class for all network communication related errors.""" 136 | __slots__ = ("original",) 137 | 138 | original: ty.Optional[Exception] 139 | 140 | def __init__(self, original: ty.Optional[Exception], 141 | _message: ty.Optional[str] = None) -> None: 142 | self.original = original 143 | 144 | if _message: 145 | msg = _message 146 | else: 147 | msg = "{}: {}".format(type(original).__name__, str(original)) 148 | super().__init__(msg) 149 | 150 | 151 | class ProtocolError(CommunicationError): 152 | """Raised when parsing the response from the daemon has failed. 153 | 154 | This can most likely occur if the service on the remote end isn't in fact 155 | an IPFS daemon.""" 156 | __slots__ = () 157 | 158 | 159 | class StatusError(CommunicationError): 160 | """Raised when the daemon responds with an error to our request.""" 161 | __slots__ = () 162 | 163 | 164 | class ErrorResponse(StatusError): 165 | """Raised when the daemon has responded with an error message because the 166 | requested operation could not be carried out.""" 167 | __slots__ = () 168 | 169 | def __init__(self, message: str, original: ty.Optional[Exception]) -> None: 170 | super().__init__(original, message) 171 | 172 | 173 | class PartialErrorResponse(ErrorResponse): 174 | """Raised when the daemon has responded with an error message after having 175 | already returned some data.""" 176 | __slots__ = () 177 | 178 | def __init__(self, message: str, original: ty.Optional[Exception] = None) -> None: 179 | super().__init__(message, original) 180 | 181 | 182 | class ConnectionError(CommunicationError): 183 | """Raised when connecting to the service has failed on the socket layer.""" 184 | __slots__ = () 185 | 186 | 187 | class TimeoutError(CommunicationError): 188 | """Raised when the daemon didn't respond in time.""" 189 | __slots__ = () 190 | -------------------------------------------------------------------------------- /ipfshttpclient/http.py: -------------------------------------------------------------------------------- 1 | """Default HTTP client selection proxy""" 2 | import os 3 | import typing as ty 4 | 5 | from .http_common import ( 6 | ClientSyncBase, 7 | StreamDecodeIteratorSync, 8 | 9 | addr_t, auth_t, cookies_t, headers_t, params_t, reqdata_sync_t, timeout_t, 10 | workarounds_t, 11 | ) 12 | 13 | 14 | __all__ = ( 15 | "addr_t", "auth_t", "cookies_t", "headers_t", "params_t", "reqdata_sync_t", 16 | "timeout_t", "workarounds_t", 17 | 18 | "build_client_sync", 19 | "StreamDecodeIteratorSync", 20 | ) 21 | 22 | PREFER_HTTPX = (os.environ.get("PY_IPFS_HTTP_CLIENT_PREFER_HTTPX", "no").lower() 23 | not in ("0", "f", "false", "n", "no")) 24 | 25 | if PREFER_HTTPX: # pragma: http-backend=httpx 26 | try: 27 | from . import http_httpx as _backend 28 | except ImportError: 29 | from . import http_requests as _backend # type: ignore[no-redef] 30 | else: # pragma: http-backend=requests 31 | try: 32 | from . import http_requests as _backend # type: ignore[no-redef] 33 | except ImportError: # pragma: no cover 34 | from . import http_httpx as _backend 35 | 36 | 37 | def build_client_sync( # type: ignore[no-any-unimported] 38 | addr: addr_t, 39 | base: str, 40 | offline: bool = False, 41 | auth: auth_t = None, 42 | cookies: cookies_t = None, 43 | headers: headers_t = None, 44 | timeout: timeout_t = 120 45 | ) -> ClientSyncBase[ty.Any]: 46 | 47 | return _backend.ClientSync( 48 | addr=addr, 49 | base=base, 50 | offline=offline, 51 | auth=auth, 52 | cookies=cookies, 53 | headers=headers or ty.cast(ty.Dict[str, str], {}), 54 | timeout=timeout 55 | ) 56 | -------------------------------------------------------------------------------- /ipfshttpclient/http_httpx.py: -------------------------------------------------------------------------------- 1 | """HTTP client for API requests based on HTTPx 2 | 3 | This will be supplemented by an asynchronous version based on HTTPx's 4 | asynchronous API soon™. 5 | """ 6 | 7 | import math 8 | import socket 9 | import typing as ty 10 | 11 | import httpcore 12 | import httpx 13 | 14 | from . import encoding 15 | from . import exceptions 16 | from .http_common import ( 17 | ClientSyncBase, multiaddr_to_url_data, 18 | 19 | addr_t, auth_t, cookies_t, headers_t, params_t, reqdata_sync_t, timeout_t, 20 | Closable, 21 | ) 22 | 23 | 24 | if ty.TYPE_CHECKING: 25 | import httpx._types 26 | import typing_extensions 27 | 28 | # By using the precise types from HTTPx we'll also get type errors if our 29 | # types become somehow incompatible with the ones from that library 30 | RequestArgs = typing_extensions.TypedDict("RequestArgs", { 31 | "auth": "httpx._types.AuthTypes", 32 | "cookies": "httpx._types.CookieTypes", 33 | "headers": "httpx._types.HeaderTypes", 34 | "timeout": "httpx._types.TimeoutTypes", 35 | "params": "httpx._types.QueryParamTypes", 36 | }, total=False) 37 | else: 38 | RequestArgs = ty.Dict[str, ty.Any] 39 | 40 | 41 | def map_args_to_httpx( 42 | *, 43 | auth: auth_t = None, 44 | cookies: cookies_t = None, 45 | headers: headers_t = None, 46 | params: params_t = None, 47 | timeout: timeout_t = None, 48 | ) -> RequestArgs: 49 | kwargs: RequestArgs = {} 50 | 51 | if auth is not None: 52 | kwargs["auth"] = auth 53 | 54 | if cookies is not None: 55 | kwargs["cookies"] = cookies 56 | 57 | if headers is not None: 58 | kwargs["headers"] = headers 59 | 60 | if timeout is not None: 61 | if isinstance(timeout, tuple): 62 | kwargs["timeout"] = ( 63 | timeout[0] if timeout[0] < math.inf else None, 64 | timeout[1] if timeout[1] < math.inf else None, 65 | None, 66 | None, 67 | ) 68 | else: 69 | kwargs["timeout"] = timeout if timeout < math.inf else None 70 | 71 | if params is not None: 72 | kwargs["params"] = list(params) 73 | 74 | return kwargs 75 | 76 | 77 | class ClientSync(ClientSyncBase[httpx.Client]): 78 | __slots__ = ("_session_base", "_session_kwargs", "_session_laddr", "_session_uds_path") 79 | _session_base: "httpx._types.URLTypes" 80 | _session_kwargs: RequestArgs 81 | _session_laddr: ty.Optional[str] 82 | _session_uds_path: ty.Optional[str] 83 | 84 | def _init(self, addr: addr_t, base: str, *, # type: ignore[no-any-unimported] 85 | auth: auth_t, 86 | cookies: cookies_t, 87 | headers: headers_t, 88 | params: params_t, 89 | timeout: timeout_t) -> None: 90 | base_url: str 91 | uds_path: ty.Optional[str] 92 | family: socket.AddressFamily 93 | host_numeric: bool 94 | base_url, uds_path, family, host_numeric = multiaddr_to_url_data(addr, base) 95 | 96 | self._session_laddr = None 97 | self._session_uds_path = None 98 | if family != socket.AF_UNSPEC: 99 | if family == socket.AF_INET: 100 | self._session_laddr = "0.0.0.0" 101 | elif family == socket.AF_INET6: 102 | self._session_laddr = "::" 103 | elif family == socket.AF_UNIX: 104 | self._session_uds_path = uds_path 105 | else: 106 | assert False, ("multiaddr_to_url_data should only return a socket " 107 | "address family of AF_INET, AF_INET6 or AF_UNSPEC") 108 | 109 | self._session_base = base_url 110 | self._session_kwargs = map_args_to_httpx( 111 | auth=auth, 112 | cookies=cookies, 113 | headers=headers, 114 | params=params, 115 | timeout=timeout, 116 | ) 117 | 118 | def _make_session(self) -> httpx.Client: 119 | connection_pool = httpcore.SyncConnectionPool( 120 | local_address = self._session_laddr, 121 | uds = self._session_uds_path, 122 | 123 | #XXX: Argument values duplicated from httpx._client.Client._init_transport: 124 | keepalive_expiry = 5.0, #XXX: Value duplicated from httpx._client.KEEPALIVE_EXPIRY 125 | max_connections = 100, #XXX: Value duplicated from httpx._config.DEFAULT_LIMITS 126 | max_keepalive_connections = 20, #XXX: Value duplicated from httpx._config.DEFAULT_LIMITS 127 | ssl_context = httpx.create_ssl_context(trust_env=True), 128 | ) 129 | return httpx.Client(**self._session_kwargs, 130 | base_url = self._session_base, 131 | transport = connection_pool) 132 | 133 | def _do_raise_for_status(self, response: httpx.Response) -> None: 134 | try: 135 | response.raise_for_status() 136 | except httpx.HTTPError as error: 137 | content: ty.List[object] = [] 138 | try: 139 | decoder: encoding.Json = encoding.get_encoding("json") 140 | for chunk in response.iter_bytes(): 141 | content += list(decoder.parse_partial(chunk)) 142 | content += list(decoder.parse_finalize()) 143 | except exceptions.DecodingError: 144 | pass 145 | 146 | # If we have decoded an error response from the server, 147 | # use that as the exception message; otherwise, just pass 148 | # the exception on to the caller. 149 | if len(content) == 1 \ 150 | and isinstance(content[0], dict) \ 151 | and "Message" in content[0]: 152 | msg: str = content[0]["Message"] 153 | raise exceptions.ErrorResponse(msg, error) from error 154 | else: 155 | raise exceptions.StatusError(error) from error 156 | 157 | def _request( 158 | self, method: str, path: str, params: ty.Sequence[ty.Tuple[str, str]], *, 159 | auth: auth_t, 160 | data: reqdata_sync_t, 161 | headers: headers_t, 162 | timeout: timeout_t, 163 | chunk_size: ty.Optional[int], 164 | ) -> ty.Tuple[ty.List[Closable], ty.Generator[bytes, ty.Any, ty.Any]]: 165 | # Ensure path is relative so that it is resolved relative to the base 166 | while path.startswith("/"): 167 | path = path[1:] 168 | 169 | try: 170 | # Determine session object to use 171 | closables: ty.List[Closable] 172 | session: httpx.Client 173 | closables, session = self._access_session() 174 | 175 | # Do HTTP request (synchronously) and map exceptions 176 | try: 177 | res: httpx.Response = session.stream( 178 | method=method, 179 | url=path, 180 | **map_args_to_httpx( 181 | params=params, 182 | auth=auth, 183 | headers=headers, 184 | timeout=timeout, 185 | ), 186 | data=data, 187 | ).__enter__() 188 | closables.insert(0, res) 189 | except (httpx.ConnectTimeout, httpx.ReadTimeout, httpx.WriteTimeout) as error: 190 | raise exceptions.TimeoutError(error) from error 191 | except httpx.NetworkError as error: 192 | raise exceptions.ConnectionError(error) from error 193 | except httpx.ProtocolError as error: 194 | raise exceptions.ProtocolError(error) from error 195 | 196 | # Raise exception for response status 197 | # (optionally incorporating the response message, if available) 198 | self._do_raise_for_status(res) 199 | 200 | return closables, res.iter_bytes() # type: ignore[return-value] #FIXME: httpx 201 | except: 202 | for closable in closables: 203 | closable.close() 204 | raise -------------------------------------------------------------------------------- /ipfshttpclient/http_requests.py: -------------------------------------------------------------------------------- 1 | """HTTP client for API requests based on good old requests library 2 | 3 | This exists mainly for Python 3.5 compatibility. 4 | """ 5 | 6 | import math 7 | import http.client 8 | import os 9 | import typing as ty 10 | import urllib.parse 11 | 12 | import urllib3.exceptions # type: ignore[import] 13 | 14 | from . import encoding 15 | from . import exceptions 16 | from .http_common import ( 17 | ClientSyncBase, multiaddr_to_url_data, 18 | 19 | addr_t, auth_t, cookies_t, headers_t, params_t, reqdata_sync_t, timeout_t, 20 | Closable, 21 | ) 22 | 23 | PATCH_REQUESTS = (os.environ.get("PY_IPFS_HTTP_CLIENT_PATCH_REQUESTS", "yes").lower() 24 | not in ("false", "no")) 25 | if PATCH_REQUESTS: 26 | from . import requests_wrapper as requests 27 | elif not ty.TYPE_CHECKING: # pragma: no cover (always enabled in production) 28 | import requests 29 | 30 | 31 | def map_args_to_requests( 32 | *, 33 | auth: auth_t = None, 34 | cookies: cookies_t = None, 35 | headers: headers_t = None, 36 | params: params_t = None, 37 | timeout: timeout_t = None 38 | ) -> ty.Dict[str, ty.Any]: 39 | kwargs = {} # type: ty.Dict[str, ty.Any] 40 | 41 | if auth is not None: 42 | kwargs["auth"] = auth 43 | 44 | if cookies is not None: 45 | kwargs["cookies"] = cookies 46 | 47 | if headers is not None: 48 | kwargs["headers"] = headers 49 | 50 | if timeout is not None: 51 | if isinstance(timeout, tuple): 52 | timeout_ = ( 53 | timeout[0] if timeout[0] < math.inf else None, 54 | timeout[1] if timeout[1] < math.inf else None, 55 | ) # type: ty.Union[ty.Optional[float], ty.Tuple[ty.Optional[float], ty.Optional[float]]] 56 | else: 57 | timeout_ = timeout if timeout < math.inf else None 58 | kwargs["timeout"] = timeout_ 59 | 60 | if params is not None: 61 | kwargs["params"] = {} 62 | for name, value in params: 63 | if name not in kwargs["params"]: 64 | kwargs["params"][name] = value 65 | elif not isinstance(kwargs["params"][name], list): 66 | kwargs["params"][name] = [kwargs["params"][name], value] 67 | else: 68 | kwargs["params"][name].append(value) 69 | 70 | return kwargs 71 | 72 | 73 | class ClientSync(ClientSyncBase[requests.Session]): # type: ignore[name-defined] 74 | __slots__ = ("_base_url", "_default_timeout", "_request_proxies", "_session_props") 75 | #_base_url: str 76 | #_default_timeout: timeout_t 77 | #_request_proxies: ty.Optional[ty.Dict[str, str]] 78 | #_session_props: ty.Dict[str, ty.Any] 79 | 80 | def _init(self, addr: addr_t, base: str, *, # type: ignore[no-any-unimported] 81 | auth: auth_t, 82 | cookies: cookies_t, 83 | headers: headers_t, 84 | params: params_t, 85 | timeout: timeout_t) -> None: 86 | self._base_url, uds_path, family, host_numeric = multiaddr_to_url_data(addr, base) 87 | 88 | self._session_props = map_args_to_requests( 89 | auth=auth, 90 | cookies=cookies, 91 | headers=headers, 92 | params=params, 93 | ) 94 | self._default_timeout = timeout 95 | if PATCH_REQUESTS: # pragma: no branch (always enabled in production) 96 | self._session_props["family"] = family 97 | 98 | # Ensure that no proxy lookups are done for the UDS pseudo-hostname 99 | # 100 | # I'm well aware of the `.proxies` attribute of the session object: As it turns out, 101 | # setting *that* attribute will *not* bypass system proxy resolution – only the 102 | # per-request keyword-argument can do *that*…! 103 | self._request_proxies = None # type: ty.Optional[ty.Dict[str, str]] 104 | if uds_path: 105 | self._request_proxies = { 106 | "no_proxy": urllib.parse.quote(uds_path, safe=""), 107 | } 108 | 109 | def _make_session(self) -> requests.Session: # type: ignore[name-defined] 110 | session = requests.Session() # type: ignore[attr-defined] 111 | try: 112 | for name, value in self._session_props.items(): 113 | setattr(session, name, value) 114 | return session 115 | # It is very unlikely that this would ever error, but if it does try our 116 | # best to prevent a leak 117 | except: # pragma: no cover 118 | session.close() 119 | raise 120 | 121 | def _do_raise_for_status(self, response: requests.Request) -> None: # type: ignore[name-defined] 122 | try: 123 | response.raise_for_status() 124 | except requests.exceptions.HTTPError as error: # type: ignore[attr-defined] 125 | content = [] 126 | try: 127 | decoder = encoding.get_encoding("json") 128 | for chunk in response.iter_content(chunk_size=None): 129 | content += list(decoder.parse_partial(chunk)) 130 | content += list(decoder.parse_finalize()) 131 | except exceptions.DecodingError: 132 | pass 133 | 134 | # If we have decoded an error response from the server, 135 | # use that as the exception message; otherwise, just pass 136 | # the exception on to the caller. 137 | if len(content) == 1 \ 138 | and isinstance(content[0], dict) \ 139 | and "Message" in content[0]: 140 | msg = content[0]["Message"] 141 | raise exceptions.ErrorResponse(msg, error) from error 142 | else: 143 | raise exceptions.StatusError(error) from error 144 | 145 | def _request( 146 | self, method: str, path: str, params: ty.Sequence[ty.Tuple[str, str]], *, 147 | auth: auth_t, 148 | data: reqdata_sync_t, 149 | headers: headers_t, 150 | timeout: timeout_t, 151 | chunk_size: ty.Optional[int] 152 | ) -> ty.Tuple[ty.List[Closable], ty.Generator[bytes, ty.Any, ty.Any]]: 153 | # Ensure path is relative so that it is resolved relative to the base 154 | while path.startswith("/"): 155 | path = path[1:] 156 | 157 | url = urllib.parse.urljoin(self._base_url, path) 158 | 159 | try: 160 | # Determine session object to use 161 | closables, session = self._access_session() 162 | 163 | # Do HTTP request (synchronously) and map exceptions 164 | try: 165 | res = session.request( 166 | method=method, 167 | url=url, 168 | **map_args_to_requests( 169 | params=params, 170 | auth=auth, 171 | headers=headers, 172 | timeout=(timeout if timeout is not None else self._default_timeout), 173 | ), 174 | proxies=self._request_proxies, 175 | data=data, 176 | stream=True, 177 | ) 178 | closables.insert(0, res) 179 | except (requests.ConnectTimeout, requests.Timeout) as error: # type: ignore[attr-defined] 180 | raise exceptions.TimeoutError(error) from error 181 | except requests.ConnectionError as error: # type: ignore[attr-defined] 182 | # Report protocol violations separately 183 | # 184 | # This used to happen because requests wouldn't catch 185 | # `http.client.HTTPException` at all, now we recreate 186 | # this behaviour manually if we detect it. 187 | if isinstance(error.args[0], urllib3.exceptions.ProtocolError): 188 | raise exceptions.ProtocolError(error.args[0]) from error.args[0] 189 | 190 | raise exceptions.ConnectionError(error) from error 191 | # Looks like the following error doesn't happen anymore with modern requests? 192 | except http.client.HTTPException as error: # pragma: no cover 193 | raise exceptions.ProtocolError(error) from error 194 | 195 | # Raise exception for response status 196 | # (optionally incorporating the response message, if available) 197 | self._do_raise_for_status(res) 198 | 199 | return closables, res.iter_content(chunk_size=chunk_size) 200 | except: 201 | for closable in closables: 202 | closable.close() 203 | raise -------------------------------------------------------------------------------- /ipfshttpclient/requests_wrapper.py: -------------------------------------------------------------------------------- 1 | # type: ignore 2 | """Exposes the full ``requests`` HTTP library API, while adding an extra 3 | ``family`` parameter to all HTTP request operations that may be used to restrict 4 | the address family used when resolving a domain-name to an IP address. 5 | """ 6 | import socket 7 | import urllib.parse 8 | 9 | import requests 10 | import requests.adapters 11 | import urllib3 12 | import urllib3.connection 13 | import urllib3.exceptions 14 | import urllib3.poolmanager 15 | import urllib3.util.connection 16 | 17 | AF2NAME = { 18 | int(socket.AF_INET): "ip4", 19 | int(socket.AF_INET6): "ip6", 20 | } 21 | if hasattr(socket, "AF_UNIX"): 22 | AF2NAME[int(socket.AF_UNIX)] = "unix" 23 | NAME2AF = {name: af for af, name in AF2NAME.items()} 24 | 25 | 26 | # This function is copied from urllib3/util/connection.py (that in turn copied 27 | # it from socket.py in the Python 2.7 standard library test suite) and accepts 28 | # an extra `family` parameter that specifies the allowed address families for 29 | # name resolution. 30 | # 31 | # The entire remainder of this file after this only exists to ensure that this 32 | # `family` parameter is exposed all the way up to request's `Session` interface, 33 | # storing it as part of the URL scheme while traversing most of the layers. 34 | def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, 35 | source_address=None, socket_options=None, 36 | family=socket.AF_UNSPEC): 37 | host, port = address 38 | if host.startswith('['): 39 | host = host.strip('[]') 40 | err = None 41 | 42 | if not family or family == socket.AF_UNSPEC: 43 | family = urllib3.util.connection.allowed_gai_family() 44 | 45 | # Extension for Unix domain sockets 46 | if hasattr(socket, "AF_UNIX") and family == socket.AF_UNIX: 47 | gai_result = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", host)] 48 | else: 49 | gai_result = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM) 50 | 51 | for res in gai_result: 52 | af, socktype, proto, canonname, sa = res 53 | sock = None 54 | try: 55 | sock = socket.socket(af, socktype, proto) 56 | 57 | # If provided, set socket level options before connecting. 58 | if socket_options is not None and family != getattr(socket, "AF_UNIX", NotImplemented): 59 | for opt in socket_options: 60 | sock.setsockopt(*opt) 61 | 62 | if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: 63 | sock.settimeout(timeout) 64 | if source_address: 65 | sock.bind(source_address) 66 | sock.connect(sa) 67 | return sock 68 | except OSError as e: 69 | err = e 70 | if sock is not None: 71 | sock.close() 72 | sock = None 73 | 74 | if err is not None: 75 | raise err 76 | 77 | raise OSError("getaddrinfo returns an empty list") 78 | 79 | 80 | # Override the `urllib3` low-level Connection objects that do the actual work 81 | # of speaking HTTP 82 | def _kw_scheme_to_family(kw, base_scheme): 83 | family = socket.AF_UNSPEC 84 | scheme = kw.pop("scheme", None) 85 | if isinstance(scheme, str): 86 | parts = scheme.rsplit("+", 1) 87 | if len(parts) == 2 and parts[0] == base_scheme: 88 | family = NAME2AF.get(parts[1], family) 89 | return family 90 | 91 | 92 | class ConnectionOverrideMixin: 93 | def _new_conn(self): 94 | extra_kw = { 95 | "family": self.family 96 | } 97 | if self.source_address: 98 | extra_kw['source_address'] = self.source_address 99 | 100 | if self.socket_options: 101 | extra_kw['socket_options'] = self.socket_options 102 | 103 | try: 104 | dns_host = getattr(self, "_dns_host", self.host) 105 | if hasattr(socket, "AF_UNIX") and extra_kw["family"] == socket.AF_UNIX: 106 | dns_host = urllib.parse.unquote(dns_host) 107 | conn = create_connection( 108 | (dns_host, self.port), self.timeout, **extra_kw) 109 | except socket.timeout: 110 | raise urllib3.exceptions.ConnectTimeoutError( 111 | self, "Connection to %s timed out. (connect timeout=%s)" % 112 | (self.host, self.timeout)) 113 | except OSError as e: 114 | raise urllib3.exceptions.NewConnectionError( 115 | self, "Failed to establish a new connection: %s" % e) 116 | 117 | return conn 118 | 119 | 120 | class HTTPConnection(ConnectionOverrideMixin, urllib3.connection.HTTPConnection): 121 | def __init__(self, *args, **kw): 122 | self.family = _kw_scheme_to_family(kw, "http") 123 | super().__init__(*args, **kw) 124 | 125 | 126 | class HTTPSConnection(ConnectionOverrideMixin, urllib3.connection.HTTPSConnection): 127 | def __init__(self, *args, **kw): 128 | self.family = _kw_scheme_to_family(kw, "https") 129 | super().__init__(*args, **kw) 130 | 131 | 132 | # Override the higher-level `urllib3` ConnectionPool objects that instantiate 133 | # one or more Connection objects and dispatch work between them 134 | class HTTPConnectionPool(urllib3.HTTPConnectionPool): 135 | ConnectionCls = HTTPConnection 136 | 137 | 138 | class HTTPSConnectionPool(urllib3.HTTPSConnectionPool): 139 | ConnectionCls = HTTPSConnection 140 | 141 | 142 | # Override the highest-level `urllib3` PoolManager to also properly support the 143 | # address family extended scheme values in URLs and pass these scheme values on 144 | # to the individual ConnectionPool objects 145 | class PoolManager(urllib3.PoolManager): 146 | def __init__(self, *args, **kwargs): 147 | super().__init__(*args, **kwargs) 148 | 149 | # Additionally to adding our variant of the usual HTTP and HTTPS 150 | # pool classes, also add these for some variants of the default schemes 151 | # that are limited to some specific address family only 152 | self.pool_classes_by_scheme = {} 153 | for scheme, ConnectionPool in (("http", HTTPConnectionPool), ("https", HTTPSConnectionPool)): 154 | self.pool_classes_by_scheme[scheme] = ConnectionPool 155 | for name in AF2NAME.values(): 156 | self.pool_classes_by_scheme["{0}+{1}".format(scheme, name)] = ConnectionPool 157 | self.key_fn_by_scheme["{0}+{1}".format(scheme, name)] = self.key_fn_by_scheme[scheme] 158 | 159 | # These next two are only required to ensure that our custom `scheme` values 160 | # will be passed down to the `*ConnectionPool`s and finally to the actual 161 | # `*Connection`s as parameter 162 | def _new_pool(self, scheme, host, port, request_context=None): 163 | # Copied from `urllib3` to *not* surpress the `scheme` parameter 164 | pool_cls = self.pool_classes_by_scheme[scheme] 165 | if request_context is None: 166 | request_context = self.connection_pool_kw.copy() 167 | 168 | for key in ('host', 'port'): 169 | request_context.pop(key, None) 170 | 171 | if scheme == "http" or scheme.startswith("http+"): 172 | for kw in urllib3.poolmanager.SSL_KEYWORDS: 173 | request_context.pop(kw, None) 174 | 175 | return pool_cls(host, port, **request_context) 176 | 177 | def connection_from_pool_key(self, pool_key, request_context=None): 178 | # Copied from `urllib3` so that we continue to ensure that this will 179 | # call `_new_pool` 180 | with self.pools.lock: 181 | pool = self.pools.get(pool_key) 182 | if pool: 183 | return pool 184 | 185 | scheme = request_context['scheme'] 186 | host = request_context['host'] 187 | port = request_context['port'] 188 | pool = self._new_pool(scheme, host, port, request_context=request_context) 189 | self.pools[pool_key] = pool 190 | return pool 191 | 192 | 193 | # Override the lower-level `requests` adapter that invokes the `urllib3` 194 | # PoolManager objects 195 | class HTTPAdapter(requests.adapters.HTTPAdapter): 196 | def init_poolmanager(self, connections, maxsize, block=False, **pool_kwargs): 197 | # save these values for pickling (copied from `requests`) 198 | self._pool_connections = connections 199 | self._pool_maxsize = maxsize 200 | self._pool_block = block 201 | 202 | self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, 203 | block=block, strict=True, **pool_kwargs) 204 | 205 | 206 | # Override the highest-level `requests` Session object to accept the `family` 207 | # parameter for any request and encode its value as part of the URL scheme 208 | # when passing it down to the adapter 209 | class Session(requests.Session): 210 | def __init__(self, *args, **kwargs): 211 | super().__init__(*args, **kwargs) 212 | self.family = socket.AF_UNSPEC 213 | 214 | # Additionally to mounting our variant of the usual HTTP and HTTPS 215 | # adapter, also mount it for some variants of the default schemes that 216 | # are limited to some specific address family only 217 | adapter = HTTPAdapter() 218 | for scheme in ("http", "https"): 219 | self.mount("{0}://".format(scheme), adapter) 220 | for name in AF2NAME.values(): 221 | self.mount("{0}+{1}://".format(scheme, name), adapter) 222 | 223 | def request(self, method, url, *args, **kwargs): 224 | family = kwargs.pop("family", self.family) 225 | if family != socket.AF_UNSPEC: 226 | # Inject provided address family value as extension to scheme 227 | url = urllib.parse.urlparse(url) 228 | url = url._replace(scheme="{0}+{1}".format(url.scheme, AF2NAME[int(family)])) 229 | url = url.geturl() 230 | return super().request(method, url, *args, **kwargs) 231 | 232 | 233 | session = Session 234 | 235 | 236 | # Import other `requests` stuff to make the top-level API of this more compatible 237 | from requests import ( 238 | __title__, __description__, __url__, __version__, __build__, __author__, 239 | __author_email__, __license__, __copyright__, __cake__, 240 | 241 | exceptions, utils, packages, codes, 242 | Request, Response, PreparedRequest, 243 | RequestException, Timeout, URLRequired, TooManyRedirects, HTTPError, 244 | ConnectionError, FileModeWarning, ConnectTimeout, ReadTimeout 245 | ) 246 | 247 | 248 | # Re-implement the top-level “session-less” API 249 | def request(method, url, **kwargs): 250 | with Session() as session: 251 | return session.request(method=method, url=url, **kwargs) 252 | 253 | 254 | def get(url, params=None, **kwargs): 255 | kwargs.setdefault('allow_redirects', True) 256 | return request('get', url, params=params, **kwargs) 257 | 258 | 259 | def options(url, **kwargs): 260 | kwargs.setdefault('allow_redirects', True) 261 | return request('options', url, **kwargs) 262 | 263 | 264 | def head(url, **kwargs): 265 | kwargs.setdefault('allow_redirects', False) 266 | return request('head', url, **kwargs) 267 | 268 | 269 | def post(url, data=None, json=None, **kwargs): 270 | return request('post', url, data=data, json=json, **kwargs) 271 | 272 | 273 | def put(url, data=None, **kwargs): 274 | return request('put', url, data=data, **kwargs) 275 | 276 | 277 | def patch(url, data=None, **kwargs): 278 | return request('patch', url, data=data, **kwargs) 279 | 280 | 281 | def delete(url, **kwargs): 282 | return request('delete', url, **kwargs) 283 | -------------------------------------------------------------------------------- /ipfshttpclient/utils.py: -------------------------------------------------------------------------------- 1 | """A module to handle generic operations. 2 | """ 3 | import mimetypes 4 | import os 5 | import sys 6 | import typing as ty 7 | from functools import wraps 8 | 9 | if ty.TYPE_CHECKING: 10 | import typing_extensions as ty_ext 11 | else: 12 | ty_ext = ty 13 | 14 | AnyStr = ty.TypeVar('AnyStr', bytes, str) 15 | T = ty.TypeVar("T") 16 | 17 | if sys.version_info >= (3, 8): #PY38+ 18 | Literal = ty_ext.Literal 19 | Protocol = ty_ext.Protocol 20 | 21 | Literal_True = ty.Literal[True] 22 | Literal_False = ty.Literal[False] 23 | else: #PY37- 24 | class Literal(ty.Generic[T]): 25 | ... 26 | 27 | class Protocol: 28 | ... 29 | 30 | Literal_True = Literal_False = bool 31 | 32 | # `os.PathLike` only has a type param while type checking 33 | if ty.TYPE_CHECKING: 34 | PathLike = os.PathLike 35 | PathLike_str = os.PathLike[str] 36 | PathLike_bytes = os.PathLike[bytes] 37 | else: 38 | class PathLike(Protocol, ty.Generic[AnyStr]): 39 | def __fspath__(self) -> AnyStr: 40 | ... 41 | 42 | PathLike_str = PathLike_bytes = os.PathLike 43 | 44 | path_str_t = ty.Union[str, PathLike_str] 45 | path_bytes_t = ty.Union[bytes, PathLike_bytes] 46 | path_t = ty.Union[path_str_t, path_bytes_t] 47 | AnyPath = ty.TypeVar("AnyPath", str, PathLike_str, bytes, PathLike_bytes) 48 | 49 | path_types = (str, bytes, os.PathLike,) 50 | path_obj_types = (os.PathLike,) 51 | 52 | 53 | # work around GH/mypy/mypy#731: no recursive structural types yet 54 | json_primitive_t = ty.Union[bool, float, int, str] 55 | 56 | 57 | # noqa: N802 58 | class json_list_t(ty.List["json_value_t"]): 59 | pass 60 | 61 | 62 | # noqa: N802 63 | class json_dict_t(ty.Dict[str, "json_value_t"]): 64 | pass 65 | 66 | 67 | json_value_t = ty.Union[ 68 | json_primitive_t, 69 | json_list_t, 70 | json_dict_t 71 | ] 72 | 73 | 74 | def maybe_fsencode(val: str, ref: AnyStr) -> AnyStr: 75 | """Encodes the string *val* using the system filesystem encoding if *ref* 76 | is of type :any:`bytes`""" 77 | 78 | if isinstance(ref, bytes): 79 | return os.fsencode(val) 80 | else: 81 | return val 82 | 83 | 84 | def guess_mimetype(filename: str) -> str: 85 | """Guesses the mimetype of a file based on the given ``filename``. 86 | 87 | .. code-block:: python 88 | 89 | >>> guess_mimetype('example.txt') 90 | 'text/plain' 91 | >>> guess_mimetype('/foo/bar/example') 92 | 'application/octet-stream' 93 | 94 | Parameters 95 | ---------- 96 | filename 97 | The file name or path for which the mimetype is to be guessed 98 | """ 99 | fn = os.path.basename(filename) 100 | return mimetypes.guess_type(fn)[0] or 'application/octet-stream' 101 | 102 | 103 | clean_file_t = ty.Union[path_t, ty.IO[bytes], int] 104 | 105 | 106 | def clean_file(file: clean_file_t) -> ty.Tuple[ty.IO[bytes], bool]: 107 | """Returns a tuple containing a file-like object and a close indicator 108 | 109 | This ensures the given file is opened and keeps track of files that should 110 | be closed after use (files that were not open prior to this function call). 111 | 112 | Raises 113 | ------ 114 | OSError 115 | Accessing the given file path failed 116 | 117 | Parameters 118 | ---------- 119 | file 120 | A filepath or file-like object that may or may not need to be 121 | opened 122 | """ 123 | if isinstance(file, int): 124 | return os.fdopen(file, 'rb', closefd=False), True 125 | elif not hasattr(file, 'read'): 126 | file = ty.cast(path_t, file) # Cannot be ty.IO[bytes] without `.read()` 127 | return open(file, 'rb'), True 128 | else: 129 | file = ty.cast(ty.IO[bytes], file) # Must be ty.IO[bytes] 130 | return file, False 131 | 132 | 133 | def clean_files(files: ty.Union[clean_file_t, ty.Iterable[clean_file_t]]) \ 134 | -> ty.Generator[ty.Tuple[ty.IO[bytes], bool], ty.Any, ty.Any]: 135 | """Generates tuples with a file-like object and a close indicator 136 | 137 | This is a generator of tuples, where the first element is the file object 138 | and the second element is a boolean which is True if this module opened the 139 | file (and thus should close it). 140 | 141 | Raises 142 | ------ 143 | OSError 144 | Accessing the given file path failed 145 | 146 | Parameters 147 | ---------- 148 | files 149 | Collection or single instance of a filepath and file-like object 150 | """ 151 | if not isinstance(files, path_types) and not hasattr(files, "read"): 152 | for f in ty.cast(ty.Iterable[clean_file_t], files): 153 | yield clean_file(f) 154 | else: 155 | yield clean_file(ty.cast(clean_file_t, files)) 156 | 157 | 158 | F = ty.TypeVar("F", bound=ty.Callable[..., ty.Dict[str, ty.Any]]) 159 | 160 | 161 | class return_field(ty.Generic[T]): 162 | """Decorator that returns the given field of a json response. 163 | 164 | Parameters 165 | ---------- 166 | field 167 | The response field to be returned for all invocations 168 | """ 169 | __slots__ = ("field",) 170 | 171 | field: str 172 | 173 | def __init__(self, field: str) -> None: 174 | self.field = field 175 | 176 | def __call__(self, cmd: F) -> ty.Callable[..., T]: 177 | """Wraps a command so that only a specified field is returned. 178 | 179 | Parameters 180 | ---------- 181 | cmd 182 | A command that is intended to be wrapped 183 | """ 184 | @wraps(cmd) 185 | def wrapper(*args: ty.Any, **kwargs: ty.Any) -> T: 186 | """Returns the specified field as returned by the wrapped function 187 | 188 | Parameters 189 | ---------- 190 | args 191 | Positional parameters to pass to the wrapped callable 192 | kwargs 193 | Named parameter to pass to the wrapped callable 194 | """ 195 | res = cmd(*args, **kwargs) # type: ty.Dict[str, T] 196 | return res[self.field] 197 | return wrapper 198 | -------------------------------------------------------------------------------- /ipfshttpclient/version.py: -------------------------------------------------------------------------------- 1 | # _Versioning scheme:_ 2 | # The major and minor version of each release correspond to the supported 3 | # IPFS daemon version. The revision number will be updated whenever we make 4 | # a new release for the `py-ipfs-http-client` for that daemon version. 5 | # 6 | # Example: The first client version to support the `0.4.x`-series of the IPFS 7 | # HTTP API will have version `0.4.0`, the second version will have version 8 | # `0.4.1` and so on. When IPFS `0.5.0` is released, the first client version 9 | # to support it will also be released as `0.5.0`. 10 | 11 | __version__ = "0.8.0a2" 12 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["flit-core >=2,<4"] 3 | build-backend = "flit_core.buildapi" 4 | 5 | [tool.flit.metadata] 6 | module = "ipfshttpclient" 7 | 8 | author = "py-ipfs-http-client team" 9 | author-email = "" 10 | home-page = "https://ipfs.io/ipns/12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF/" 11 | keywords = "ipfs storage distribution development" 12 | license = "MIT License" 13 | description-file = "README.md" 14 | 15 | # Notes: `typing.NoReturn` was introduced post-release in Python 3.5.4 and 3.6.2 and had 16 | # a critical bug (https://bugs.python.org/issue34921) in 3.7.0 to 3.7.1. So the 17 | # compatible versions below reflect the range of Python versions with working 18 | # `typing.NoReturn` function signature support. (Also, many other `typing` module 19 | # items were only introduced post-release in 3.6 and version restrictions on these 20 | # versions ensure that those are all available as well.) 21 | # 22 | # Maintain this concurrently with verify.sh 23 | requires-python = ">=3.6.2,!=3.7.0,!=3.7.1" 24 | requires = [ 25 | "multiaddr (>=0.0.7)", 26 | "requests (>=2.11)" 27 | ] 28 | 29 | classifiers = [ 30 | "Development Status :: 3 - Alpha", 31 | 32 | # Indicate who your project is intended for 33 | "Intended Audience :: Developers", 34 | "Intended Audience :: Information Technology", 35 | "Intended Audience :: Science/Research", 36 | 37 | "Topic :: Internet", 38 | "Topic :: Scientific/Engineering", 39 | "Topic :: System :: Filesystems", 40 | "Topic :: System :: Networking", 41 | 42 | # Pick your license as you wish (should match "license" above) 43 | "License :: OSI Approved :: MIT License", 44 | 45 | # Specify the Python versions you support here. In particular, ensure 46 | # that you indicate whether you support Python 2, Python 3 or both. 47 | "Programming Language :: Python :: 3 :: Only", 48 | "Programming Language :: Python :: 3.6", 49 | "Programming Language :: Python :: 3.7", 50 | "Programming Language :: Python :: 3.8", 51 | "Programming Language :: Python :: 3.9", 52 | ] 53 | 54 | [tool.flit.metadata.urls] 55 | Documentation = "https://ipfs.io/ipns/12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF/docs/" 56 | -------------------------------------------------------------------------------- /test/combine-coverage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import glob 3 | import os 4 | import pathlib 5 | 6 | import coverage 7 | 8 | # Monkey-patch `coverage` to not randomly delete files 9 | import coverage.data 10 | coverage.data.file_be_gone = lambda *a: None 11 | 12 | # Switch working directory to project directory 13 | BASE_PATH = pathlib.Path(__file__).parent.parent 14 | DATA_PATH = BASE_PATH / "coverage" 15 | os.chdir(str(BASE_PATH)) 16 | 17 | 18 | cov = coverage.Coverage() 19 | 20 | # Load the most recent coverage data collected for each test platform 21 | cov.combine(glob.glob("build/test-py*/cov_raw"), strict=True) 22 | 23 | cov.report() 24 | cov.html_report(directory=str(DATA_PATH / "cov_html")) 25 | cov.xml_report(outfile=str(DATA_PATH / "cov.xml")) 26 | -------------------------------------------------------------------------------- /test/functional/.gitattributes: -------------------------------------------------------------------------------- 1 | # Don't change the line-ending style of test files on Windows as this changes the test hashes 2 | /fake_dir/** text eol=lf 3 | -------------------------------------------------------------------------------- /test/functional/conftest.py: -------------------------------------------------------------------------------- 1 | # Note that this file is special in that py.test will automatically import this file and gather 2 | # its list of fixtures even if it is not directly imported into the corresponding test case. 3 | 4 | import pathlib 5 | import pytest 6 | import sys 7 | import typing as ty 8 | 9 | import ipfshttpclient 10 | 11 | 12 | TEST_DIR: pathlib.Path = pathlib.Path(__file__).parent 13 | 14 | 15 | @pytest.fixture(scope='session') 16 | def fake_dir() -> pathlib.Path: 17 | return TEST_DIR.joinpath('fake_dir') 18 | 19 | 20 | @pytest.fixture(scope='session') 21 | def ipfs_is_available() -> bool: 22 | """ 23 | Return whether the IPFS daemon is reachable or not 24 | """ 25 | 26 | try: 27 | with ipfshttpclient.connect(): 28 | pass 29 | except ipfshttpclient.exceptions.Error as e: 30 | print('\nFailed to connect to IPFS client', file=sys.stderr) 31 | print(e, file=sys.stderr) 32 | 33 | return False 34 | else: 35 | return True 36 | 37 | 38 | def sort_by_key(items, key="Name"): 39 | return sorted(items, key=lambda x: x[key]) 40 | 41 | 42 | def _generate_client( 43 | ipfs_is_available: bool, 44 | offline: bool 45 | ) -> ty.Generator[ipfshttpclient.Client, None, None]: 46 | if ipfs_is_available: 47 | with ipfshttpclient.Client(offline=offline) as client: 48 | yield client 49 | else: 50 | pytest.skip("Running IPFS node required") 51 | 52 | 53 | @pytest.fixture(scope="function") 54 | def client(ipfs_is_available: bool): 55 | yield from _generate_client(ipfs_is_available, False) 56 | 57 | 58 | @pytest.fixture(scope="function") 59 | def offline_client(ipfs_is_available: bool): 60 | yield from _generate_client(ipfs_is_available, True) 61 | 62 | 63 | @pytest.fixture(scope="module") 64 | def module_offline_client(ipfs_is_available: bool): 65 | yield from _generate_client(ipfs_is_available, True) 66 | 67 | 68 | @pytest.fixture 69 | def cleanup_pins(client): 70 | pinned = set(client.pin.ls(type="recursive")["Keys"]) 71 | 72 | yield 73 | 74 | for multihash in client.pin.ls(type="recursive")["Keys"]: 75 | if multihash not in pinned: 76 | client.pin.rm(multihash) 77 | 78 | 79 | @pytest.fixture 80 | def daemon(): 81 | """Result replaced by plugin in `run-tests.py` with the subprocess object of 82 | the spawned daemon.""" 83 | return None 84 | -------------------------------------------------------------------------------- /test/functional/fake_dir/fsdfgh: -------------------------------------------------------------------------------- 1 | dsadsad 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir/popoiopiu: -------------------------------------------------------------------------------- 1 | oooofiopfsdpio 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir/test2/fssdf: -------------------------------------------------------------------------------- 1 | dsdsdsadsdsad 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir/test2/high/five/dummy: -------------------------------------------------------------------------------- 1 | 😉 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir/test2/llllg: -------------------------------------------------------------------------------- 1 | dsdsadjs 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir/test3/ppppoooooooooo: -------------------------------------------------------------------------------- 1 | dsasasd 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir_almost_empty/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-shipyard/py-ipfs-http-client/f260c1803369b395150dd598de838850da184403/test/functional/fake_dir_almost_empty/.gitignore -------------------------------------------------------------------------------- /test/functional/fake_json/data.car: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ipfs-shipyard/py-ipfs-http-client/f260c1803369b395150dd598de838850da184403/test/functional/fake_json/data.car -------------------------------------------------------------------------------- /test/functional/fake_json/links.json: -------------------------------------------------------------------------------- 1 | { 2 | "Data": "another", 3 | "Links": [ { 4 | "Name": "some link", 5 | "Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V", 6 | "Size": 8 7 | } ] 8 | } 9 | -------------------------------------------------------------------------------- /test/functional/fake_json/no_links.json: -------------------------------------------------------------------------------- 1 | { 2 | "Data": "abc" 3 | } 4 | -------------------------------------------------------------------------------- /test/functional/test_bitswap.py: -------------------------------------------------------------------------------- 1 | def test_wantlist(client): 2 | result = client.bitswap.wantlist(peer="QmdkJZUWnVkEc6yfptVu4LWY8nHkEnGwsxqQ233QSGj8UP") 3 | assert "Keys" in result 4 | 5 | 6 | def test_stat(client): 7 | result = client.bitswap.stat() 8 | assert "Wantlist" in result -------------------------------------------------------------------------------- /test/functional/test_block.py: -------------------------------------------------------------------------------- 1 | import cid 2 | import io 3 | import pytest 4 | 5 | import conftest 6 | 7 | TEST1_FILEPATH = conftest.TEST_DIR / "fake_dir" / "fsdfgh" 8 | TEST1_CID_STR = "QmPevo2B1pwvDyuZyJbWVfhwkaGPee3f1kX36wFmqx1yna" 9 | TEST1_SIZE = 8 10 | 11 | TEST2_CONTENT = b"Hello World!" 12 | TEST2_CID_STR = "bafkreid7qoywk77r7rj3slobqfekdvs57qwuwh5d2z3sqsw52iabe3mqne" 13 | TEST2_CID_OBJ = cid.make_cid(TEST2_CID_STR) 14 | TEST2_SIZE = len(TEST2_CONTENT) 15 | 16 | 17 | @pytest.mark.dependency() 18 | def test_put(client): 19 | expected_keys = {"Key", "Size"} 20 | res = client.block.put(TEST1_FILEPATH) 21 | assert set(res.keys()).issuperset(expected_keys) 22 | assert res["Key"] == TEST1_CID_STR 23 | 24 | 25 | @pytest.mark.dependency(depends=["test_put"]) 26 | def test_stat(client): 27 | expected_keys = {"Key", "Size"} 28 | res = client.block.stat(TEST1_CID_STR) 29 | assert set(res.keys()).issuperset(expected_keys) 30 | 31 | 32 | @pytest.mark.dependency(depends=["test_put"]) 33 | def test_get(client): 34 | assert len(client.block.get(TEST1_CID_STR)) == TEST1_SIZE 35 | 36 | 37 | @pytest.mark.dependency() 38 | def test_put_str(client): 39 | expected_keys = {"Key", "Size"} 40 | res = client.block.put(io.BytesIO(TEST2_CONTENT), opts={"format": "raw"}) 41 | assert set(res.keys()).issuperset(expected_keys) 42 | assert res["Key"] == TEST2_CID_STR 43 | 44 | 45 | @pytest.mark.dependency(depends=["test_put_str"]) 46 | def test_stat_cid_obj(client): 47 | assert len(client.block.get(TEST2_CID_OBJ)) == TEST2_SIZE -------------------------------------------------------------------------------- /test/functional/test_dag.py: -------------------------------------------------------------------------------- 1 | import io 2 | 3 | import conftest 4 | 5 | 6 | def test_put_get_resolve(client): 7 | data = io.BytesIO(br'{"links": []}') 8 | response = client.dag.put(data) 9 | 10 | assert 'Cid' in response 11 | assert '/' in response['Cid'] 12 | assert response['Cid']['/'] == 'bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya' 13 | 14 | response = client.dag.get('bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya') 15 | 16 | assert 'links' in response 17 | assert response['links'] == [] 18 | 19 | response = client.dag.resolve('bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya') 20 | 21 | assert 'Cid' in response 22 | assert response['Cid']['/'] == 'bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya' 23 | 24 | 25 | def test_import_export(client): 26 | # This file was created by inserting a simple JSON object into IPFS and 27 | # exporting it using `ipfs dag export > file.car` 28 | data_car = conftest.TEST_DIR / 'fake_json' / 'data.car' 29 | 30 | with open(data_car, 'rb') as file: 31 | response = client.dag.imprt(file) 32 | 33 | assert 'Root' in response 34 | assert 'Cid' in response['Root'] 35 | assert '/' in response['Root']['Cid'] 36 | 37 | cid = response['Root']['Cid'] 38 | assert cid['/'] == 'bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya' 39 | 40 | data = client.dag.export('bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya') 41 | 42 | with open(data_car, 'rb') as file: 43 | assert data == file.read() 44 | -------------------------------------------------------------------------------- /test/functional/test_key.py: -------------------------------------------------------------------------------- 1 | def test_add_list_rename_rm(client): 2 | # Remove keys if they already exist 3 | key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) 4 | if "ipfshttpclient-test-rsa" in key_list: 5 | client.key.rm("ipfshttpclient-test-rsa") 6 | if "ipfshttpclient-test-ed" in key_list: 7 | client.key.rm("ipfshttpclient-test-ed") 8 | 9 | # Add new RSA and ED25519 key 10 | key1 = client.key.gen("ipfshttpclient-test-rsa", "rsa")["Name"] 11 | key2 = client.key.gen("ipfshttpclient-test-ed", "ed25519")["Name"] 12 | 13 | # Validate the keys exist now 14 | key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) 15 | assert key1 in key_list 16 | assert key2 in key_list 17 | 18 | # Rename the EC key 19 | key2_new = client.key.rename(key2, "ipfshttpclient-test-ed2")["Now"] 20 | 21 | # Validate that the key was successfully renamed 22 | key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) 23 | assert key1 in key_list 24 | assert key2 not in key_list 25 | assert key2_new in key_list 26 | 27 | # Drop both keys with one request 28 | client.key.rm(key1, key2_new) 29 | 30 | # Validate that the keys are gone again 31 | key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) 32 | assert key1 not in key_list 33 | assert key2_new not in key_list -------------------------------------------------------------------------------- /test/functional/test_miscellaneous.py: -------------------------------------------------------------------------------- 1 | import platform 2 | import time 3 | 4 | import pytest 5 | 6 | 7 | 8 | def test_version(client): 9 | expected_keys = {"Repo", "Commit", "Version"} 10 | resp_version = client.version() 11 | assert set(resp_version.keys()).issuperset(expected_keys) 12 | 13 | 14 | def test_id(client): 15 | expected_keys = {"PublicKey", "ProtocolVersion", "ID", "AgentVersion", "Addresses"} 16 | resp_id = client.id() 17 | assert set(resp_id.keys()).issuperset(expected_keys) 18 | 19 | 20 | ################# 21 | # Shutdown test # 22 | ################# 23 | 24 | @pytest.mark.order("last") 25 | def test_daemon_stop(daemon, client): 26 | # The value for the `daemon` “fixture” is injected using a pytest plugin 27 | # with access to the created daemon subprocess object defined directly 28 | # in the `test/run-test.py` file 29 | if not daemon: 30 | return 31 | 32 | def daemon_is_running(): 33 | return daemon.poll() is None 34 | 35 | # Daemon should still be running at this point 36 | assert daemon_is_running() 37 | 38 | # Send stop request 39 | client.stop() 40 | 41 | # Wait for daemon process to disappear 42 | # 43 | #XXX: Wait up to 2mins for slow go-IPFS in Travis CI Windows to shut down 44 | for _ in range(10000 if not platform.win32_ver()[0] else 120000): 45 | if not daemon_is_running(): 46 | break 47 | time.sleep(0.001) 48 | 49 | # Daemon should not be running anymore 50 | assert not daemon_is_running() 51 | -------------------------------------------------------------------------------- /test/functional/test_name.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | def get_key(client, key_name): 5 | keys = client.key.list()["Keys"] 6 | for k in keys: 7 | if k["Name"] == key_name: 8 | return k 9 | raise Exception("Unknown key: %s" % key_name) 10 | 11 | 12 | def hash_to_path(ns, h): 13 | assert "/" not in h 14 | assert h == h.strip() 15 | return "/" + ns + "/" + h 16 | 17 | 18 | def hash_to_ipfs_path(h): 19 | return hash_to_path("ipfs", h) 20 | 21 | 22 | def hash_to_ipns_path(h): 23 | return hash_to_path("ipns", h) 24 | 25 | 26 | class Resources: 27 | def __init__(self, offline_client): 28 | self.client = offline_client 29 | 30 | 31 | def __enter__(self): 32 | self.key_self = get_key(self.client, "self") 33 | self.key_test1 = self.client.key.gen("ipfshttpclient-test-name-1", "rsa") 34 | self.key_test2 = self.client.key.gen("ipfshttpclient-test-name-2", "rsa") 35 | self.msg1 = hash_to_ipfs_path(self.client.add_str("Mary had a little lamb")) 36 | self.msg2 = hash_to_ipfs_path(self.client.add_str("Mary had a little alpaca")) 37 | self.msg3 = hash_to_ipfs_path(self.client.add_str("Mary had a little goat")) 38 | return self 39 | 40 | 41 | def __exit__(self, t, v, tb): 42 | self.client.pin.rm(self.msg1, self.msg2, self.msg3) 43 | self.client.key.rm(self.key_test1["Name"], self.key_test2["Name"]) 44 | 45 | 46 | class PublishedMapping: 47 | def __init__(self, name, path): 48 | self.name = name 49 | self.path = path 50 | 51 | 52 | @pytest.fixture(scope="module") 53 | def resources(module_offline_client): 54 | with Resources(module_offline_client) as resources: 55 | yield resources 56 | 57 | 58 | @pytest.fixture(scope="module") 59 | def published_mapping(module_offline_client, resources): 60 | # we're not testing publish here, pass whatever args we want 61 | resp = module_offline_client.name.publish( 62 | resources.msg3, 63 | key=resources.key_test2["Name"], resolve=False, 64 | lifetime="5m", ttl="5m", allow_offline=True) 65 | return PublishedMapping(resp["Name"], resp["Value"]) 66 | 67 | 68 | def check_resolve(resp, path): 69 | assert resp["Path"] == path 70 | 71 | 72 | def check_publish(offline_client, response_path, resolved_path, key, resp): 73 | 74 | name = resp["Name"] 75 | assert name == key["Id"] 76 | assert resp["Value"] == response_path 77 | 78 | # we're not testing resolve here, pass whatever args we want 79 | resolve_resp = offline_client.name.resolve( 80 | name, 81 | recursive=True, dht_record_count=0, dht_timeout="1s", 82 | offline=True) 83 | check_resolve(resolve_resp, resolved_path) 84 | 85 | 86 | def test_publish_self(offline_client, resources): 87 | resp = offline_client.name.publish(resources.msg1, allow_offline=True) 88 | check_publish(offline_client, resources.msg1, resources.msg1, 89 | resources.key_self, resp) 90 | 91 | 92 | def test_publish_params(offline_client, resources): 93 | resp = offline_client.name.publish(resources.msg1, 94 | lifetime="25h", ttl="1m", 95 | allow_offline=True) 96 | check_publish(offline_client, resources.msg1, resources.msg1, 97 | resources.key_self, resp) 98 | 99 | 100 | def test_publish_key(offline_client, resources): 101 | resp = offline_client.name.publish( 102 | resources.msg2, 103 | key=resources.key_test1["Name"], allow_offline=True) 104 | check_publish(offline_client, resources.msg2, resources.msg2, 105 | resources.key_test1, resp) 106 | 107 | 108 | def test_publish_indirect(offline_client, resources, published_mapping): 109 | path = hash_to_ipns_path(published_mapping.name) 110 | resp = offline_client.name.publish(path, 111 | resolve=True, allow_offline=True) 112 | check_publish(offline_client, path, published_mapping.path, 113 | resources.key_self, resp) 114 | 115 | 116 | def test_resolve(offline_client, published_mapping): 117 | check_resolve(offline_client.name.resolve(published_mapping.name), 118 | published_mapping.path) 119 | 120 | 121 | def test_resolve_recursive(offline_client, published_mapping): 122 | inner_path = hash_to_ipns_path(published_mapping.name) 123 | res = offline_client.name.publish(inner_path, 124 | resolve=False, allow_offline=True) 125 | outer_path = res["Name"] 126 | 127 | resp = offline_client.name.resolve(outer_path, recursive=True) 128 | check_resolve(resp, published_mapping.path) 129 | 130 | 131 | def test_resolve_params(offline_client, published_mapping): 132 | resp = offline_client.name.resolve( 133 | published_mapping.name, 134 | nocache=True, dht_record_count=1, dht_timeout="180s", 135 | offline=True) 136 | check_resolve(resp, published_mapping.path) 137 | -------------------------------------------------------------------------------- /test/functional/test_object.py: -------------------------------------------------------------------------------- 1 | import conftest 2 | import pytest 3 | 4 | 5 | 6 | 7 | def test_new(client): 8 | expected_keys = {"Hash"} 9 | res = client.object.new() 10 | assert set(res.keys()).issuperset(expected_keys) 11 | 12 | 13 | def test_stat(client): 14 | expected_keys = {"Hash", "CumulativeSize", "DataSize", "NumLinks", "LinksSize", "BlockSize"} 15 | resource = client.add_str("Mary had a little lamb") 16 | resp_stat = client.object.stat(resource) 17 | assert set(resp_stat.keys()).issuperset(expected_keys) 18 | 19 | 20 | def test_put_get(client): 21 | # Set paths to test json files 22 | path_no_links = conftest.TEST_DIR / "fake_json" / "no_links.json" 23 | path_links = conftest.TEST_DIR / "fake_json" / "links.json" 24 | 25 | # Put the json objects on the DAG 26 | no_links = client.object.put(path_no_links) 27 | links = client.object.put(path_links) 28 | 29 | # Verify the correct content was put 30 | assert no_links["Hash"] == "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V" 31 | assert links["Hash"] == "QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm" 32 | 33 | # Get the objects from the DAG 34 | get_no_links = client.object.get("QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V") 35 | get_links = client.object.get("QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm") 36 | 37 | # Verify the objects we put have been gotten 38 | assert get_no_links["Data"] == "abc" 39 | assert get_links["Data"] == "another" 40 | assert get_links["Links"][0]["Name"] == "some link" 41 | 42 | 43 | def test_links(client): 44 | # Set paths to test json files 45 | path_links = conftest.TEST_DIR / "fake_json" / "links.json" 46 | 47 | # Put json object on the DAG and get its links 48 | client.object.put(path_links) 49 | links = client.object.links("QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm") 50 | 51 | # Verify the correct link has been gotten 52 | assert links["Links"][0]["Hash"] == "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V" 53 | 54 | 55 | def test_data(client): 56 | # Set paths to test json files 57 | path_links = conftest.TEST_DIR / "fake_json" / "links.json" 58 | 59 | # Put json objects on the DAG and get its data 60 | client.object.put(path_links) 61 | data = client.object.data("QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm") 62 | 63 | # Verify the correct bytes have been gotten 64 | assert data == b"another" 65 | 66 | 67 | # Instead of writing our own test file generation just make a proxy to piggyback off test_files 68 | @pytest.mark.dependency(depends=["test/functional/test_files.py::test_add_recursive"], 69 | scope='session') 70 | def test_prepare_test_files(client): 71 | pass 72 | 73 | 74 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 75 | def test_patch_append_data(client): 76 | """Warning, this test depends on the contents of 77 | test/functional/fake_dir/fsdfgh 78 | """ 79 | result = client.object.patch.append_data( 80 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", 81 | conftest.TEST_DIR / "fake_dir" / "fsdfgh" 82 | ) 83 | assert result == {"Hash": "QmcUsyoGVxWoQgYKgmLaDBGm8J3eHWfchMh3oDUD5FrrtN"} 84 | 85 | 86 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 87 | def test_patch_add_link(client): 88 | """Warning, this test depends on the contents of 89 | test/functional/fake_dir/fsdfgh 90 | """ 91 | result = client.object.patch.add_link( 92 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", "self", 93 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n" 94 | ) 95 | assert result == {"Hash": "QmbWSr7YXBLcF23VVb7yPvUuogUPn46GD7gXftXC6mmsNM"} 96 | 97 | 98 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 99 | def test_patch_rm_link(client): 100 | """Warning, this test depends on the contents of 101 | test/functional/fake_dir/fsdfgh 102 | """ 103 | result = client.object.patch.rm_link( 104 | "QmbWSr7YXBLcF23VVb7yPvUuogUPn46GD7gXftXC6mmsNM", "self" 105 | ) 106 | assert result == {"Hash": "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n"} 107 | 108 | 109 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 110 | def test_patch_set_data(client): 111 | """Warning, this test depends on the contents of 112 | test/functional/fake_dir/popoiopiu 113 | """ 114 | result = client.object.patch.set_data( 115 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", 116 | conftest.TEST_DIR / "fake_dir" / "popoiopiu" 117 | ) 118 | assert result == {"Hash": "QmV4QR7MCBj5VTi6ddHmXPyjWGzbaKEtX2mx7axA5PA13G"} 119 | 120 | 121 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 122 | def test_diff_same(client): 123 | """Warning, this test depends on the contents of 124 | test/functional/fake_dir/popoiopiu 125 | """ 126 | result = client.object.diff( 127 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", 128 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n" 129 | ) 130 | assert result == {'Changes': []} 131 | 132 | 133 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 134 | def test_diff_different_files(client): 135 | """Warning, this test depends on the contents of 136 | test/functional/fake_dir/fsdfgh 137 | test/functional/fake_dir/popoiopiu 138 | """ 139 | result = client.object.diff( 140 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", 141 | "QmV4QR7MCBj5VTi6ddHmXPyjWGzbaKEtX2mx7axA5PA13G" 142 | ) 143 | assert result == {'Changes': [{ 144 | 'Type': 2, 145 | 'Path': '', 146 | 'Before': {'/': 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n'}, 147 | 'After': {'/': 'QmV4QR7MCBj5VTi6ddHmXPyjWGzbaKEtX2mx7axA5PA13G'}}]} -------------------------------------------------------------------------------- /test/functional/test_other.py: -------------------------------------------------------------------------------- 1 | import ipfshttpclient 2 | 3 | 4 | def test_ipfs_node_available(ipfs_is_available): 5 | """ 6 | Dummy test to ensure that running the tests without a daemon produces a failure, since we 7 | think it's unlikely that people running tests want this 8 | """ 9 | assert ipfs_is_available, \ 10 | "Functional tests require an IPFS node to be available at: {0}" \ 11 | .format(ipfshttpclient.DEFAULT_ADDR) 12 | 13 | 14 | def test_add_json(client, cleanup_pins): 15 | data = {"Action": "Open", "Type": "PR", "Name": "IPFS", "Pubkey": 7} 16 | res = client.add_json(data) 17 | 18 | assert data == client.get_json(res) 19 | 20 | # have to test the string added to IPFS, deserializing JSON will not 21 | # test order of keys 22 | assert '{"Action":"Open","Name":"IPFS","Pubkey":7,"Type":"PR"}' == client.cat(res).decode("utf-8") 23 | -------------------------------------------------------------------------------- /test/functional/test_pin.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | import pytest 4 | 5 | import ipfshttpclient.exceptions 6 | 7 | 8 | class Resources: 9 | def __init__(self, client, source_folder: pathlib.Path) -> None: 10 | self.msg = client.add_str("Mary had a little lamb") 11 | self.msg2 = client.add_str("Mary had a little alpaca") 12 | resp_add = client.add(source_folder, recursive=True) 13 | self.fake_dir_hashes = [el["Hash"] for el in resp_add if "Hash" in el] 14 | for resp in resp_add: 15 | if resp["Name"] == "fake_dir": 16 | self.fake_dir_hash = resp["Hash"] 17 | elif resp["Name"] == "fake_dir/test2": 18 | self.fake_dir_test2_hash = resp["Hash"] 19 | 20 | 21 | @pytest.fixture 22 | def resources(client, fake_dir): 23 | return Resources(client, source_folder=fake_dir) 24 | 25 | 26 | def is_pinned(client, path): 27 | try: 28 | resp = client.pin.ls(path) 29 | assert path.split("/")[-1] in resp["Keys"] 30 | except ipfshttpclient.exceptions.ErrorResponse as exc: 31 | error_msg = exc.args[0] 32 | if "not pinned" in error_msg: 33 | return False 34 | raise 35 | return True 36 | 37 | 38 | def test_ls_void(client, resources): 39 | pins = client.pin.ls()["Keys"] 40 | assert len(pins) >= 2 41 | assert resources.msg in pins 42 | assert resources.msg2 in pins 43 | 44 | 45 | def test_ls_single(client, resources): 46 | pins = client.pin.ls(resources.msg)["Keys"] 47 | assert len(pins) == 1 48 | assert resources.msg in pins 49 | 50 | 51 | def test_ls_multiple(client, resources): 52 | pins = client.pin.ls(resources.msg, resources.msg2)["Keys"] 53 | assert len(pins) == 2 54 | assert resources.msg in pins 55 | assert resources.msg2 in pins 56 | 57 | 58 | def test_ls_add_rm_single(client, resources): 59 | # Get pinned objects at start. 60 | pins_begin = client.pin.ls()["Keys"] 61 | 62 | # Unpin the resource if already pinned. 63 | if resources.msg in pins_begin.keys(): 64 | client.pin.rm(resources.msg) 65 | 66 | # No matter what, the resource should not be pinned at this point 67 | assert resources.msg not in client.pin.ls()["Keys"] 68 | assert not is_pinned(client, resources.msg) 69 | 70 | for option in (True, False): 71 | # Pin the resource. 72 | resp_add = client.pin.add(resources.msg, recursive=option) 73 | pins_afer_add = client.pin.ls()["Keys"] 74 | assert resp_add["Pins"] == [resources.msg] 75 | assert resources.msg in pins_afer_add 76 | if option: 77 | assert pins_afer_add[resources.msg]["Type"] == "recursive" 78 | else: 79 | assert pins_afer_add[resources.msg]["Type"] != "recursive" 80 | 81 | # Unpin the resource 82 | resp_rm = client.pin.rm(resources.msg) 83 | pins_afer_rm = client.pin.ls()["Keys"] 84 | assert resp_rm["Pins"] == [resources.msg] 85 | assert resources.msg not in pins_afer_rm 86 | 87 | # Get pinned objects at end 88 | pins_end = client.pin.ls()["Keys"] 89 | 90 | # Compare pinned items from start to finish of test 91 | assert resources.msg not in pins_end.keys() 92 | assert not is_pinned(client, resources.msg) 93 | 94 | 95 | def test_ls_add_rm_directory(client, resources): 96 | # Remove fake_dir if it had previously been pinned 97 | if resources.fake_dir_hash in client.pin.ls(type="recursive")["Keys"].keys(): 98 | client.pin.rm(resources.fake_dir_hash) 99 | 100 | # Make sure I removed it 101 | assert resources.fake_dir_hash not in client.pin.ls()["Keys"].keys() 102 | 103 | # Add "fake_dir" recursively 104 | client.pin.add(resources.fake_dir_hash) 105 | 106 | # Make sure all appear on the list of pinned objects 107 | pins_after_add = client.pin.ls()["Keys"].keys() 108 | assert set(pins_after_add).issuperset(set(resources.fake_dir_hashes)) 109 | 110 | # Clean up 111 | client.pin.rm(resources.fake_dir_hash) 112 | pins_end = client.pin.ls(type="recursive")["Keys"].keys() 113 | assert resources.fake_dir_hash not in pins_end 114 | 115 | 116 | def test_add_update_verify_rm(client, resources): 117 | # Get pinned objects at start 118 | pins_begin = client.pin.ls(type="recursive")["Keys"].keys() 119 | 120 | # Remove fake_dir and demo resource if it had previously been pinned 121 | if resources.fake_dir_hash in pins_begin: 122 | client.pin.rm(resources.fake_dir_hash) 123 | if resources.fake_dir_test2_hash in pins_begin: 124 | client.pin.rm(resources.fake_dir_test2_hash) 125 | 126 | # Ensure that none of the above are pinned anymore 127 | pins_after_rm = client.pin.ls(type="recursive")["Keys"].keys() 128 | assert resources.fake_dir_hash not in pins_after_rm 129 | assert resources.fake_dir_test2_hash not in pins_after_rm 130 | 131 | # Add pin for sub-directory 132 | client.pin.add(resources.fake_dir_test2_hash) 133 | 134 | # Replace it by pin for the entire fake dir 135 | client.pin.update(resources.fake_dir_test2_hash, resources.fake_dir_hash) 136 | 137 | # Ensure that the sub-directory is not pinned directly anymore 138 | pins_after_update = client.pin.ls(type="recursive")["Keys"].keys() 139 | assert resources.fake_dir_test2_hash not in pins_after_update 140 | assert resources.fake_dir_hash in pins_after_update 141 | 142 | # Now add a pin to the sub-directory from the parent directory 143 | client.pin.update(resources.fake_dir_hash, resources.fake_dir_test2_hash, unpin=False) 144 | 145 | # Check integrity of all directory content hashes and whether all 146 | # directory contents have been processed in doing this 147 | hashes = [] 148 | for result in client.pin.verify(resources.fake_dir_hash, verbose=True): 149 | assert result["Ok"] 150 | hashes.append(result["Cid"]) 151 | assert resources.fake_dir_hash in hashes 152 | 153 | # Ensure that both directories are now recursively pinned 154 | pins_after_update2 = client.pin.ls(type="recursive")["Keys"].keys() 155 | assert resources.fake_dir_test2_hash in pins_after_update2 156 | assert resources.fake_dir_hash in pins_after_update2 157 | 158 | # Clean up 159 | client.pin.rm(resources.fake_dir_hash, resources.fake_dir_test2_hash) 160 | -------------------------------------------------------------------------------- /test/functional/test_repo.py: -------------------------------------------------------------------------------- 1 | def test_stat(client): 2 | # Verify that the correct key-value pairs are returned 3 | stat = client.repo.stat() 4 | assert sorted(stat.keys()) == [ 5 | "NumObjects", "RepoPath", "RepoSize", 6 | "StorageMax", "Version" 7 | ] 8 | 9 | 10 | def test_gc(client): 11 | # Add and unpin an object to be garbage collected 12 | garbage = client.add_str("Test String") 13 | client.pin.rm(garbage) 14 | 15 | # Collect the garbage object with object count before and after 16 | orig_objs = client.repo.stat()["NumObjects"] 17 | gc = client.repo.gc() 18 | cur_objs = client.repo.stat()["NumObjects"] 19 | 20 | # Verify the garbage object was collected 21 | assert orig_objs > cur_objs 22 | keys = [el["Key"]["/"] for el in gc] 23 | assert garbage in keys 24 | -------------------------------------------------------------------------------- /test/functional/test_unstable.py: -------------------------------------------------------------------------------- 1 | import collections.abc 2 | import conftest 3 | from threading import Timer 4 | import time 5 | 6 | 7 | ################## 8 | # Daemon Logging # 9 | ################## 10 | 11 | def test_log_ls_level(client): 12 | """ 13 | Unfortunately there is no way of knowing the logging levels prior 14 | to this test. This makes it impossible to guarantee that the logging 15 | levels are the same as before the test was run. 16 | """ 17 | # Retrieves the list of logging subsystems for a running daemon. 18 | resp_ls = client.unstable.log.ls() 19 | # The response should be a dictionary with only one key ('Strings'). 20 | assert "Strings" in resp_ls 21 | 22 | # Sets the logging level to 'error' for the first subsystem found. 23 | sub = resp_ls["Strings"][0] 24 | resp_level = client.unstable.log.level(sub, "error") 25 | assert resp_level["Message"] == "Changed log level of '{0}' to 'error'\n".format(sub) 26 | 27 | 28 | def test_log_tail(client): 29 | 30 | # Generate some events in the log, but only after we start listening 31 | TIME_TO_LOG_TAIL = 2 # time it takes to send request and start listening 32 | TIME_TO_GC = 2 # time it takes for GC to complete 33 | t = Timer(TIME_TO_LOG_TAIL, client.repo.gc) 34 | t.start() 35 | 36 | # Gets the response object. 37 | with client.unstable.log.tail(timeout=5) as log_tail_iter: 38 | # In case the log was not empty, we may return earlier 39 | # than the timer. If we return while the GC is still 40 | # running, we risk racing with test exit, so wait. 41 | t.cancel() 42 | time.sleep(TIME_TO_GC) 43 | 44 | # The log should have been parsed into a dictionary object with 45 | # various keys depending on the event that occurred. 46 | assert isinstance(next(log_tail_iter), collections.abc.Mapping) 47 | 48 | 49 | ############ 50 | # Refs API # 51 | ############ 52 | 53 | REFS_RESULT = [ 54 | {"Err": "", "Ref": "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX"}, 55 | {"Err": "", "Ref": "QmYAhvKYu46rh5NcHzeu6Bhc7NG9SqkF9wySj2jvB74Rkv"}, 56 | {"Err": "", "Ref": "QmStL6TPbJfMHQhHjoVT93kCynVx3GwLf7xwgrtScqABhU"}, 57 | {"Err": "", "Ref": "QmRphRr6ULDEj7YnXpLdnxhnPiVjv5RDtGX3er94Ec6v4Q"} 58 | ] 59 | 60 | 61 | def test_refs_local_1(client): 62 | with open(str(conftest.TEST_DIR / "fake_dir" / "fsdfgh"), "rb") as fp: 63 | res = client.add(fp, pin=False) 64 | 65 | assert res["Hash"] == "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX" 66 | 67 | assert res["Hash"] not in client.pin.ls(type="recursive") 68 | assert res["Hash"] in list(map(lambda i: i["Ref"], client.unstable.refs.local())) 69 | 70 | 71 | def test_refs_local_2(client): 72 | res = client.add(conftest.TEST_DIR / "fake_dir" / "fsdfgh", pin=False) 73 | 74 | assert res["Hash"] == "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX" 75 | 76 | assert res["Hash"] not in client.pin.ls(type="recursive") 77 | assert res["Hash"] in list(map(lambda i: i["Ref"], client.unstable.refs.local())) 78 | 79 | 80 | def test_refs(client, cleanup_pins): 81 | res = client.add(conftest.TEST_DIR / "fake_dir", recursive=True) 82 | assert res[-1]["Hash"] == "QmNx8xVu9mpdz9k6etbh2S8JwZygatsZVCH4XhgtfUYAJi" 83 | 84 | refs = client.unstable.refs(res[-1]["Hash"]) 85 | assert conftest.sort_by_key(REFS_RESULT, "Ref") == conftest.sort_by_key(refs, "Ref") -------------------------------------------------------------------------------- /test/run-tests.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import contextlib 4 | import itertools 5 | import locale 6 | import os 7 | import pathlib 8 | import random 9 | import shutil 10 | import subprocess 11 | import sys 12 | import tempfile 13 | 14 | import pytest 15 | 16 | 17 | ###################### 18 | # Test configuration # 19 | ###################### 20 | 21 | # Environment name as used by `tox` 22 | ENVNAME = "py{}{}".format(sys.version_info.major, sys.version_info.minor) 23 | 24 | # Determine project base directory and testing path 25 | BASE_PATH = pathlib.Path(__file__).parent.parent 26 | TEST_PATH = BASE_PATH / "build" / "test-{}".format(ENVNAME) 27 | IPFS_PATH = TEST_PATH / "ipfs-path" 28 | 29 | ADDR = "/ip4/127.0.0.1/tcp/{0}".format(random.randrange(40000, 65535)) 30 | 31 | 32 | ########################### 33 | # Set up test environment # 34 | ########################### 35 | 36 | # Add project directory to PYTHONPATH 37 | sys.path.insert(0, str(BASE_PATH)) 38 | 39 | # Switch working directory to project directory 40 | os.chdir(str(BASE_PATH)) 41 | 42 | # Export environment variables required for testing 43 | os.environ["IPFS_PATH"] = str(IPFS_PATH) 44 | os.environ["PY_IPFS_HTTP_CLIENT_DEFAULT_ADDR"] = str(ADDR) 45 | 46 | # Make sure the IPFS data directory exists and is empty 47 | with contextlib.suppress(FileNotFoundError): 48 | shutil.rmtree(str(IPFS_PATH)) 49 | 50 | with contextlib.suppress(FileExistsError): 51 | os.makedirs(str(IPFS_PATH)) 52 | 53 | # Initialize the IPFS data directory 54 | subprocess.call(["ipfs", "init"]) 55 | subprocess.call(["ipfs", "config", "Addresses.Gateway", ""]) 56 | subprocess.call(["ipfs", "config", "Addresses.API", ADDR]) 57 | subprocess.call(["ipfs", "config", "--bool", "Experimental.FilestoreEnabled", "true"]) 58 | 59 | 60 | ################ 61 | # Start daemon # 62 | ################ 63 | 64 | extra_args = { 65 | "encoding": locale.getpreferredencoding() 66 | } 67 | 68 | # Spawn IPFS daemon in data directory 69 | print("Starting IPFS daemon on {0}…".format(ADDR), file=sys.stderr) 70 | DAEMON = subprocess.Popen( 71 | ["ipfs", "daemon", "--enable-pubsub-experiment"], 72 | stdout=subprocess.PIPE, 73 | stderr=subprocess.STDOUT, 74 | **extra_args 75 | ) 76 | 77 | 78 | class DaemonProcessPlugin: 79 | """Tiny pytest plugin to inject daemon object reference as test “fixture” value.""" 80 | @pytest.hookimpl(hookwrapper=True) 81 | def pytest_pyfunc_call(self, pyfuncitem): 82 | if "daemon" in pyfuncitem.funcargs: 83 | pyfuncitem.funcargs["daemon"] = DAEMON 84 | yield 85 | 86 | 87 | # Wait for daemon to start up 88 | for line in DAEMON.stdout: 89 | print("\t{0}".format(line), end="", file=sys.stderr) 90 | if line.strip() == "Daemon is ready": 91 | break 92 | 93 | #XXX: This design could deadlock the test run if the daemon were to produce more 94 | # output than fits into its output pipe before shutdown 95 | 96 | 97 | ################## 98 | # Run test suite # 99 | ################## 100 | 101 | PYTEST_CODE = 1 102 | try: 103 | # Make sure all required pytest plugins are loaded up-front 104 | os.environ["PYTEST_PLUGINS"] = ",".join([ 105 | "cid", 106 | "dependency", 107 | "localserver", 108 | "pytest_cov", 109 | "pytest_mock", 110 | "pytest_order", 111 | ]) 112 | 113 | with tempfile.NamedTemporaryFile("r+") as coveragerc: 114 | coverage_args = [] 115 | if os.name != "nt": 116 | PREFER_HTTPX = (os.environ.get("PY_IPFS_HTTP_CLIENT_PREFER_HTTPX", "no").lower() 117 | not in ("0", "f", "false", "n", "no")) 118 | 119 | # Assemble list of files to exclude from coverage analysis 120 | omitted_files = [ 121 | "ipfshttpclient/requests_wrapper.py", 122 | ] 123 | if PREFER_HTTPX and sys.version_info >= (3, 6): 124 | omitted_files.append("ipfshttpclient/http_requests.py") 125 | else: #PY35: Fallback to old requests-based code instead of HTTPX 126 | omitted_files.append("ipfshttpclient/http_httpx.py") 127 | 128 | # Assemble list of coverage data exclusion patterns (also escape the 129 | # hash sign [#] as it has a special meaning [comment] in the generated 130 | # configuration file) 131 | exclusions = [ 132 | # Add the standard coverage exclusion statement 133 | r"pragma:\s+no\s+cover", 134 | 135 | # Ignore typing-only branches 136 | r"if\s+(?:[A-Za-z]+\s*[.]\s*)?TYPE_CHECKING\s*:", 137 | 138 | # Ignore dummy ellipsis expression line 139 | r"^\s*\.\.\.\s*$", 140 | ] 141 | if sys.version_info.major == 2: 142 | exclusions.append(r"\#PY3") 143 | else: 144 | # Exclude the past 145 | exclusions.append(r"\#PY2") 146 | # Exclude code only used for compatibility with a previous Python version 147 | exclusions.append(r"\#PY3({0})([^\d+]|$)".format( 148 | "|".join(map(str, range(0, sys.version_info.minor))) 149 | )) 150 | # Exclude code only used in future Python versions 151 | exclusions.append(r"\#PY3({0})\+".format( 152 | "|".join(map(str, range(sys.version_info.minor + 1, 20))) 153 | )) 154 | 155 | if PREFER_HTTPX and sys.version_info >= (3, 6): 156 | exclusions.append(r"\# pragma: http-backend=requests") 157 | else: #PY35: Fallback to old requests-based code instead of HTTPX 158 | exclusions.append(r"\# pragma: http-backend=httpx") 159 | 160 | # Create temporary file with extended *coverage.py* configuration data 161 | coveragerc.file.writelines( 162 | map( 163 | lambda s: s + "\n", 164 | itertools.chain( 165 | ( 166 | "[run]", 167 | "omit =", 168 | ), 169 | map(lambda s: "\t" + s, omitted_files), 170 | ( 171 | "[report]", 172 | "# Exclude lines specific to some other Python version from coverage", 173 | "exclude_lines =", 174 | ), 175 | map(lambda s: "\t" + s, exclusions)))) 176 | coveragerc.file.flush() 177 | 178 | coverage_args = [ 179 | "--cov=ipfshttpclient", 180 | "--cov-branch", 181 | "--cov-config={0}".format(coveragerc.name), 182 | "--no-cov-on-fail", 183 | "--cov-fail-under=90", 184 | "--cov-report=term", 185 | "--cov-report=html:{}".format(str(TEST_PATH / "cov_html")), 186 | "--cov-report=xml:{}".format(str(TEST_PATH / "cov.xml")), 187 | ] 188 | 189 | # Launch pytest in-process 190 | PYTEST_CODE = pytest.main([ 191 | "--verbose", 192 | ] + coverage_args + sys.argv[1:], plugins=[DaemonProcessPlugin()]) 193 | finally: 194 | try: 195 | # Move coverage file to test directory (so that the coverage files of different 196 | # versions can be merged later on) 197 | shutil.move(str(BASE_PATH / ".coverage"), str(TEST_PATH / "cov_raw")) 198 | except FileNotFoundError: 199 | pass # Early crash in pytest or Windows – no coverage data generated 200 | 201 | # Make sure daemon was terminated during the tests 202 | if DAEMON.poll() is None: # "if DAEMON is running" 203 | DAEMON.kill() 204 | 205 | print("IPFS daemon was still running after test!", file=sys.stderr) 206 | 207 | output = list(DAEMON.stdout) 208 | if output: 209 | print("IPFS daemon printed extra messages:", file=sys.stderr) 210 | for line in output: 211 | print("\t{0}".format(line), end="", file=sys.stderr) 212 | 213 | sys.exit(PYTEST_CODE) 214 | -------------------------------------------------------------------------------- /test/unit/test_client.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import ipfshttpclient 4 | 5 | 6 | def test_assert_version(): 7 | with pytest.warns(None) as warnings: 8 | # Minimum required version 9 | ipfshttpclient.assert_version("0.1.0", "0.1.0", "0.2.0", ["0.1.2"]) 10 | 11 | assert len(warnings) == 0 12 | 13 | # Too high version 14 | with pytest.warns(ipfshttpclient.exceptions.VersionMismatch): 15 | ipfshttpclient.assert_version("0.2.0", "0.1.0", "0.2.0", ["0.1.2"]) 16 | 17 | # Too low version 18 | with pytest.warns(ipfshttpclient.exceptions.VersionMismatch): 19 | ipfshttpclient.assert_version("0.0.5", "0.1.0", "0.2.0", ["0.1.2"]) 20 | 21 | # Blacklisted version 22 | with pytest.warns(ipfshttpclient.exceptions.VersionMismatch): 23 | ipfshttpclient.assert_version("0.1.2-1", "0.1.0", "0.2.0", ["0.1.2"]) 24 | 25 | 26 | def test_client_session_param(): 27 | client = ipfshttpclient.Client(session=True) 28 | assert client._client._session is not None 29 | try: 30 | with pytest.raises(Exception): 31 | with client: 32 | pass # Should fail because a session is already open 33 | assert client._client._session is not None 34 | finally: 35 | client.close() 36 | assert client._client._session is None 37 | 38 | 39 | def test_client_session_context(): 40 | client = ipfshttpclient.Client() 41 | assert client._client._session is None 42 | with client: 43 | assert client._client._session is not None 44 | assert client._client._session is None -------------------------------------------------------------------------------- /test/unit/test_encoding.py: -------------------------------------------------------------------------------- 1 | """Test the generic data encoding and decoding module.""" 2 | import json 3 | 4 | import pytest 5 | import typing as ty 6 | 7 | import ipfshttpclient.encoding 8 | import ipfshttpclient.exceptions 9 | import ipfshttpclient.utils 10 | 11 | 12 | @pytest.fixture 13 | def json_encoder(): 14 | return ipfshttpclient.encoding.Json() 15 | 16 | 17 | def test_dummy_encoder(): 18 | """Tests if the dummy encoder does its trivial job""" 19 | dummy_encoder = ipfshttpclient.encoding.Dummy() 20 | 21 | for v in (b"123", b"4", b"ddjlflsdmlflsdfjlfjlfdsjldfs"): 22 | assert dummy_encoder.encode(v) == v 23 | 24 | assert list(dummy_encoder.parse_partial(v)) == [v] 25 | assert list(dummy_encoder.parse_finalize()) == [] 26 | 27 | 28 | def test_json_parse_partial(json_encoder): 29 | """Tests if feeding parts of JSON strings in the right order to the JSON parser produces the right results.""" 30 | data1 = {'key1': 'value1'} 31 | data2 = {'key2': 'value2'} 32 | 33 | # Try single fragmented data set 34 | data1_binary = json.dumps(data1).encode("utf-8") 35 | assert list(json_encoder.parse_partial(data1_binary[:8])) == [] 36 | assert list(json_encoder.parse_partial(data1_binary[8:])) == [data1] 37 | assert list(json_encoder.parse_finalize()) == [] 38 | 39 | # Try multiple data sets contained in whitespace 40 | data2_binary = json.dumps(data2).encode("utf-8") 41 | data2_final = b" " + data1_binary + b" \r\n " + data2_binary + b" " 42 | assert list(json_encoder.parse_partial(data2_final)) == [data1, data2] 43 | assert list(json_encoder.parse_finalize()) == [] 44 | 45 | # String containing broken UTF-8 46 | with pytest.raises(ipfshttpclient.exceptions.DecodingError): 47 | list(json_encoder.parse_partial(b'{"hello": "\xc3ber world!"}')) 48 | assert list(json_encoder.parse_finalize()) == [] 49 | 50 | 51 | def test_json_with_newlines(json_encoder): 52 | """Tests if feeding partial JSON strings with line breaks behaves as expected.""" 53 | data1 = '{"key1":\n"value1",\n' 54 | data2 = '"key2":\n\n\n"value2"\n}' 55 | 56 | data_expected = json.loads(data1 + data2) 57 | 58 | assert list(json_encoder.parse_partial(data1.encode("utf-8"))) == [] 59 | assert list(json_encoder.parse_partial(data2.encode("utf-8"))) == [data_expected] 60 | assert list(json_encoder.parse_finalize()) == [] 61 | 62 | 63 | def test_json_parse_incomplete(json_encoder): 64 | """Tests if feeding the JSON parse incomplete data correctly produces an error.""" 65 | list(json_encoder.parse_partial(b'{"bla":')) 66 | with pytest.raises(ipfshttpclient.exceptions.DecodingError): 67 | json_encoder.parse_finalize() 68 | 69 | list(json_encoder.parse_partial(b'{"\xc3')) # Incomplete UTF-8 sequence 70 | with pytest.raises(ipfshttpclient.exceptions.DecodingError): 71 | json_encoder.parse_finalize() 72 | 73 | 74 | def test_json_encode(json_encoder): 75 | """Tests serialization of an object into a JSON formatted UTF-8 string.""" 76 | 77 | data = ty.cast( 78 | ipfshttpclient.utils.json_dict_t, 79 | {'key': 'value with Ünicøde characters ☺'} 80 | ) 81 | 82 | assert json_encoder.encode(data) == \ 83 | b'{"key":"value with \xc3\x9cnic\xc3\xb8de characters \xe2\x98\xba"}' 84 | 85 | 86 | def test_json_encode_invalid_surrogate(json_encoder): 87 | """Tests serialization of an object into a JSON formatted UTF-8 string.""" 88 | 89 | data = ty.cast( 90 | ipfshttpclient.utils.json_dict_t, 91 | {'key': 'value with Ünicøde characters and disallowed surrgate: \uDC00'} 92 | ) 93 | with pytest.raises(ipfshttpclient.exceptions.EncodingError): 94 | json_encoder.encode(data) 95 | 96 | 97 | def test_json_encode_invalid_type(json_encoder): 98 | """Tests serialization of an object into a JSON formatted UTF-8 string.""" 99 | 100 | data = ty.cast( 101 | ipfshttpclient.utils.json_dict_t, 102 | {'key': b'value that is not JSON encodable'} 103 | ) 104 | 105 | with pytest.raises(ipfshttpclient.exceptions.EncodingError): 106 | json_encoder.encode(data) 107 | 108 | 109 | def test_get_encoder_by_name(): 110 | """Tests the process of obtaining an Encoder object given the named encoding.""" 111 | encoder = ipfshttpclient.encoding.get_encoding('json') 112 | assert encoder.name == 'json' 113 | 114 | 115 | def test_get_invalid_encoder(): 116 | """Tests the exception handling given an invalid named encoding.""" 117 | with pytest.raises(ipfshttpclient.exceptions.EncoderMissingError): 118 | ipfshttpclient.encoding.get_encoding('fake') 119 | -------------------------------------------------------------------------------- /test/unit/test_exceptions.py: -------------------------------------------------------------------------------- 1 | 2 | from ipfshttpclient.exceptions import MatcherSpecInvalidError, Error 3 | from ipfshttpclient.filescanner import Matcher 4 | 5 | 6 | def test_matcher_spec_invalid_error_message(): 7 | ex = MatcherSpecInvalidError('junk') 8 | assert ex.args[0] == f"Don't know how to create a {Matcher.__name__} from spec 'junk'" 9 | 10 | 11 | def test_matcher_spec_invalid_error_multiple_inheritance(): 12 | ex = MatcherSpecInvalidError('wrong') 13 | 14 | # Base class of all exceptions in this library 15 | assert isinstance(ex, Error) 16 | 17 | # Base class of type errors 18 | assert isinstance(ex, TypeError) 19 | -------------------------------------------------------------------------------- /test/unit/test_filescanner.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import re 3 | import sys 4 | import typing as ty 5 | 6 | import pytest 7 | 8 | from datetime import datetime 9 | 10 | from ipfshttpclient import filescanner 11 | 12 | from ipfshttpclient.exceptions import MatcherSpecInvalidError 13 | from ipfshttpclient.filescanner import FSNodeEntry 14 | from ipfshttpclient.filescanner import FSNodeType 15 | 16 | 17 | TEST_FILE_DIR: str = os.path.join(os.path.dirname(__file__), "..", "functional") 18 | 19 | 20 | def test_fs_node_entry_as_repr() -> None: 21 | entry = FSNodeEntry(type=FSNodeType.FILE, path='b', relpath='c', name='d', parentfd=123) 22 | 23 | assert ( 24 | repr(entry) 25 | == 26 | "FSNodeEntry(type=, path='b', relpath='c', name='d', parentfd=123)" 27 | ) 28 | 29 | 30 | def test_fs_node_entry_as_str() -> None: 31 | entry = FSNodeEntry(type=FSNodeType.FILE, path='b', relpath='c', name='d', parentfd=123) 32 | 33 | assert str(entry) == 'b' 34 | 35 | 36 | @pytest.mark.parametrize("pattern,expected,kwargs", [ 37 | ("literal", [r"(?![.])(?s:literal)\Z"], {}), 38 | (b"literal", [br"(?![.])(?s:literal)\Z"], {}), 39 | ("*.a", [r"(?![.])(?s:.*\.a)\Z"], {}), 40 | (b"*.a", [br"(?![.])(?s:.*\.a)\Z"], {}), 41 | ("*/**/*.dir/**/**/.hidden", [r"(?![.])(?s:.*)\Z", None, r"(?![.])(?s:.*\.dir)\Z", None, None, r"(?s:\.hidden)\Z"], {}), 42 | ("*/**/*.dir/**/**/.hidden", [r"(?s:.*)\Z", None, r"(?s:.*\.dir)\Z", None, None, r"(?s:\.hidden)\Z"], {"period_special": False}), 43 | ("././/////////./*.a", [r"(?![.])(?s:.*\.a)\Z"], {}), 44 | (b"././/////////./*.a", [br"(?![.])(?s:.*\.a)\Z"], {}), 45 | ("*/*.a", [r"(?![.])(?s:.*)\Z", r"(?![.])(?s:.*\.a)\Z"], {}), 46 | ("*/*.a", [r"(?s:.*)\Z", r"(?s:.*\.a)\Z"], {"period_special": False}), 47 | ]) 48 | def test_glob_compile(pattern: ty.AnyStr, expected: ty.List[ty.AnyStr], kwargs: ty.Dict[str, bool]): 49 | matcher = filescanner.GlobMatcher(pattern, **kwargs) 50 | assert list(map(lambda r: r.pattern if r is not None else None, matcher._pat)) == expected 51 | 52 | 53 | def test_glob_sep_normalize(monkeypatch): 54 | monkeypatch.setattr(os.path, "sep", "#") 55 | monkeypatch.setattr(os.path, "altsep", "~") 56 | 57 | assert len(filescanner.GlobMatcher("a#b~c")._pat) == 3 58 | 59 | monkeypatch.setattr(os.path, "altsep", None) 60 | 61 | assert len(filescanner.GlobMatcher("a#b~c")._pat) == 2 62 | 63 | 64 | # Possible hypothesis test: Parsing glob should never fail, except in the following 3 cases. 65 | 66 | @pytest.mark.skipif(sys.flags.optimize, reason="Glob error asserts are stripped from optimized code") 67 | @pytest.mark.parametrize("pattern", [ 68 | "../*", 69 | b"../*", 70 | "/absolute/file/path", 71 | b"/absolute/file/path", 72 | ]) 73 | def test_glob_errors(pattern): 74 | with pytest.raises(AssertionError): 75 | filescanner.GlobMatcher(pattern) 76 | 77 | 78 | def test_glob_not_implemented(): 79 | with pytest.raises(NotImplementedError): 80 | filescanner.GlobMatcher("*/.**") 81 | 82 | 83 | @pytest.mark.parametrize("pattern,path,is_dir,descend,report,kwargs", [ 84 | # Basic literal path tests 85 | ("literal", "other", False, False, False, {}), 86 | ("literal", "literal", False, False, True, {}), 87 | ("literal", "literal/more", False, False, False, {}), 88 | (b"literal", b"other", False, False, False, {}), 89 | (b"literal", b"literal", False, False, True, {}), 90 | (b"literal", b"literal/more", False, False, False, {}), 91 | ("literal/more", "other", False, False, False, {}), 92 | ("literal/more", "literal", False, True, False, {}), 93 | ("literal/more", "literal", True, True, True, {}), 94 | ("literal/more", "literal/more", False, False, True, {}), 95 | (b"literal/more", b"other", False, False, False, {}), 96 | (b"literal/more", b"literal", False, True, False, {}), 97 | (b"literal/more", b"literal", True, True, True, {}), 98 | (b"literal/more", b"literal/more", False, False, True, {}), 99 | ("literal/more", "other", False, False, False, {"recursive": False}), 100 | ("literal/more", "literal", False, False, False, {"recursive": False}), 101 | ("literal/more", "literal", True, False, True, {"recursive": False}), 102 | ("literal/more", "literal/more", False, False, False, {"recursive": False}), 103 | 104 | # Test basic leading-period handling 105 | ("*.a", ".a", False, False, False, {}), 106 | ("*.a", ".a", False, False, True, {"period_special": False}), 107 | ("*.a", ".a", True, False, False, {}), 108 | ("*.a", ".a", True, False, True, {"period_special": False}), 109 | 110 | # Test leading-period with trailing slash handling 111 | ("*.a/", ".a", False, False, False, {}), 112 | ("*.a/", ".a", False, False, False, {"period_special": False}), 113 | ("*.a/", ".a", True, False, False, {}), 114 | ("*.a/", ".a", True, False, True, {"period_special": False}), 115 | 116 | # Tests for double-star recursion with premium leading-period shenanigans 117 | ("*/**/*.dir/**/**/.hidden", ".dir/.hidden", False, False, False, {}), 118 | ("*/**/*.dir/**/**/.hidden", "a/.dir/.hidden", False, True, False, {}), 119 | ("*/**/*.dir/**/**/.hidden", "a/b.dir/.hidden", False, True, True, {}), 120 | ("*/**/*.dir/**/**/.hidden", "a/u/v/w/b.dir/c/d/e/f/.hidden", False, True, True, {}), 121 | ("**", ".a", False, True, False, {}), 122 | (filescanner.GlobMatcher("**"), ".a", False, True, False, {}), 123 | 124 | # Regular expression test 125 | (re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), "Camera/IMG-0169.jpeg", False, True, True, {}), 126 | (re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), "Camera", True, True, True, {}), 127 | (re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), "Camera/Thumbs.db", False, True, False, {}), 128 | (re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), b"Camera/IMG-0169.jpeg", False, True, True, {}), 129 | (re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), b"Camera", True, True, True, {}), 130 | (re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), b"Camera/Thumbs.db", False, True, False, {}), 131 | (filescanner.ReMatcher(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), b"Camera/Thumbs.db", False, True, False, {}), 132 | 133 | # Multiple patterns 134 | (["*/**/*.dir/**/**/.hidden", re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], "Camera/IMG-1279.jpeg", False, True, True, {}), 135 | ([b"*/**/*.dir/**/**/.hidden", re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], b"Camera/IMG-1279.jpeg", False, True, True, {}), 136 | (["*/**/*.dir/**/**/.hidden", re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], "a/.dir/.hidden", False, True, False, {}), 137 | ([b"*/**/*.dir/**/**/.hidden", re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], b"a/.dir/.hidden", False, True, False, {}), 138 | (["*/**/*.dir/**/**/.hidden", re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], "a/b.dir/.hidden", False, True, True, {}), 139 | ([b"*/**/*.dir/**/**/.hidden", re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], b"a/b.dir/.hidden", False, True, True, {}), 140 | 141 | # Edge case: No patterns 142 | ([], "???", False, False, False, {}), 143 | ([], b"???", False, False, False, {}), 144 | ]) 145 | def test_glob_matching( 146 | monkeypatch, 147 | pattern: ty.Union[ty.AnyStr, filescanner.re_pattern_t, ty.List[ty.Union[ty.AnyStr, filescanner.re_pattern_t]]], 148 | path: ty.AnyStr, 149 | is_dir: bool, 150 | descend: bool, 151 | report: bool, 152 | kwargs: ty.Dict[str, bool] 153 | ): 154 | # Hopefully useless sanity check 155 | assert os.path.sep == "/" or os.path.altsep == "/" 156 | 157 | slash = "/" if isinstance(path, str) else b"/" # type: ty.AnyStr 158 | sep = os.path.sep if isinstance(path, str) else os.fsencode(os.path.sep) # type: ty.AnyStr 159 | 160 | path = path.replace(slash, sep) 161 | 162 | matcher = filescanner.matcher_from_spec(pattern, **kwargs) 163 | assert matcher.should_descend(path) is descend 164 | assert matcher.should_report(path, is_dir=is_dir) is report 165 | 166 | 167 | @pytest.mark.parametrize('spec', [123, datetime.now()]) 168 | def test_matcher_from_spec_rejects_invalid_spec_type(spec: ty.Any) -> None: 169 | with pytest.raises(MatcherSpecInvalidError): 170 | filescanner.matcher_from_spec(spec) 171 | 172 | 173 | def test_matcher_from_spec_builds_recursive_glob_matcher(): 174 | actual = filescanner.matcher_from_spec('*.py') 175 | 176 | assert isinstance(actual, filescanner.GlobMatcher) 177 | 178 | 179 | def test_matcher_from_spec_builds_recursive_open_matcher(): 180 | actual = filescanner.matcher_from_spec(None) 181 | 182 | assert isinstance(actual, filescanner.MatchAll) 183 | 184 | 185 | def test_matcher_from_spec_builds_non_recursive_glob_matcher(): 186 | actual = filescanner.matcher_from_spec('*.py', recursive=False) 187 | 188 | assert isinstance(actual, filescanner.NoRecusionAdapterMatcher) 189 | assert isinstance(actual._child, filescanner.GlobMatcher) 190 | 191 | 192 | def test_matcher_from_spec_builds_non_recursive_open_matcher(): 193 | actual = filescanner.matcher_from_spec(None, recursive=False) 194 | 195 | assert isinstance(actual, filescanner.NoRecusionAdapterMatcher) 196 | assert isinstance(actual._child, filescanner.MatchAll) 197 | 198 | 199 | def test_walk_fd_unsupported(monkeypatch): 200 | monkeypatch.setattr(filescanner, "HAVE_FWALK", False) 201 | 202 | with pytest.raises(NotImplementedError): 203 | filescanner.walk(0) 204 | 205 | 206 | def test_walk_instaclose(mocker): 207 | close_spy = mocker.spy(filescanner.walk, "close") 208 | 209 | with filescanner.walk("."): 210 | pass 211 | 212 | close_spy.assert_called_once() 213 | 214 | 215 | @pytest.mark.parametrize("path,pattern,expected", [ 216 | (TEST_FILE_DIR + os.path.sep + "fake_dir_almost_empty" + os.path.sep, None, [ 217 | (FSNodeType.DIRECTORY, ".", "."), 218 | (FSNodeType.FILE, ".gitignore", ".gitignore"), 219 | ]), 220 | (TEST_FILE_DIR + os.path.sep + "fake_dir", ["test2", "test3"], [ 221 | (FSNodeType.DIRECTORY, ".", "."), 222 | (FSNodeType.DIRECTORY, "test2", "test2"), 223 | (FSNodeType.DIRECTORY, "test3", "test3"), 224 | ]), 225 | ]) 226 | def test_walk( 227 | monkeypatch, 228 | path: str, 229 | pattern: ty.Optional[filescanner.match_spec_t[str]], 230 | expected: ty.List[filescanner.FSNodeEntry[str]] 231 | ) -> None: 232 | def assert_results() -> None: 233 | result = [(e.type, e.relpath, e.name) for e in filescanner.walk(path, pattern)] 234 | assert sorted(result, key=lambda r: r[1]) == expected 235 | 236 | assert_results() 237 | 238 | # Check again with plain `os.walk` if the current platform supports `os.fwalk` 239 | if filescanner.HAVE_FWALK: 240 | monkeypatch.setattr(filescanner, "HAVE_FWALK", False) 241 | 242 | assert_results() 243 | 244 | 245 | def test_supports_fd(): 246 | assert (filescanner.walk in filescanner.supports_fd) is filescanner.HAVE_FWALK 247 | -------------------------------------------------------------------------------- /test/unit/test_http_httpx.py: -------------------------------------------------------------------------------- 1 | # Only add tests to this file if they really are specific to the behaviour 2 | # of this backend. For cross-backend or `http_common.py` tests use 3 | # `test_http.py` instead. 4 | import http.cookiejar 5 | import math 6 | import pytest 7 | 8 | pytest.importorskip("ipfshttpclient.http_httpx") 9 | import ipfshttpclient.http_httpx 10 | 11 | 12 | cookiejar = http.cookiejar.CookieJar() 13 | 14 | 15 | @pytest.mark.parametrize("kwargs,expected", [ 16 | ({}, {}), 17 | 18 | ({ 19 | "auth": ("user", "pass"), 20 | "cookies": cookiejar, 21 | "headers": {"name": "value"}, 22 | "params": (("name", "value"),), 23 | "timeout": (math.inf, math.inf), 24 | }, { 25 | "auth": ("user", "pass"), 26 | "cookies": cookiejar, 27 | "headers": {"name": "value"}, 28 | "params": [("name", "value")], 29 | "timeout": (None, None, None, None), 30 | }), 31 | 32 | ({ 33 | "auth": ("user", b"pass"), 34 | "cookies": {"name": "value"}, 35 | "headers": ((b"name", b"value"),), 36 | "timeout": 34, 37 | }, { 38 | "auth": ("user", b"pass"), 39 | "cookies": {"name": "value"}, 40 | "headers": ((b"name", b"value"),), 41 | "timeout": 34, 42 | }), 43 | ]) 44 | def test_map_args_to_httpx(kwargs, expected): 45 | assert ipfshttpclient.http_httpx.map_args_to_httpx(**kwargs) == expected 46 | 47 | 48 | @pytest.mark.parametrize("args,kwargs,expected_kwargs,expected_base,expected_laddr", [ 49 | (("/dns/localhost/tcp/5001/http", "api/v0"), {}, { 50 | "params": [("stream-channels", "true")], 51 | }, "http://localhost:5001/api/v0/", None), 52 | 53 | (("/dns6/ietf.org/tcp/443/https", "/base/"), { 54 | "auth": ("user", "pass"), 55 | "cookies": cookiejar, 56 | "headers": {"name": "value"}, 57 | "offline": True, 58 | "timeout": (math.inf, math.inf), 59 | }, { 60 | "auth": ("user", "pass"), 61 | "cookies": cookiejar, 62 | "headers": {"name": "value"}, 63 | "params": [("offline", "true"), ("stream-channels", "true")], 64 | "timeout": (None, None, None, None), 65 | }, "https://ietf.org:443/base/", "::"), 66 | ]) 67 | def test_client_args_to_session_kwargs(args, kwargs, expected_kwargs, expected_base, expected_laddr): 68 | client = ipfshttpclient.http_httpx.ClientSync(*args, **kwargs) 69 | assert client._session_kwargs == expected_kwargs 70 | assert client._session_base == expected_base 71 | assert client._session_laddr == expected_laddr 72 | -------------------------------------------------------------------------------- /test/unit/test_http_requests.py: -------------------------------------------------------------------------------- 1 | # Only add tests to this file if they really are specific to the behaviour 2 | # of this backend. For cross-backend or `http_common.py` tests use 3 | # `test_http.py` instead. 4 | import http.cookiejar 5 | import math 6 | import socket 7 | 8 | import pytest 9 | 10 | pytest.importorskip("ipfshttpclient.http_requests") 11 | import ipfshttpclient.http_requests 12 | 13 | 14 | cookiejar = http.cookiejar.CookieJar() 15 | 16 | @pytest.mark.parametrize("kwargs,expected", [ 17 | ({}, {}), 18 | 19 | ({ 20 | "auth": ("user", "pass"), 21 | "cookies": cookiejar, 22 | "headers": {"name": "value"}, 23 | "params": (("name", "value"),), 24 | "timeout": (math.inf, math.inf), 25 | }, { 26 | "auth": ("user", "pass"), 27 | "cookies": cookiejar, 28 | "headers": {"name": "value"}, 29 | "params": {"name": "value"}, 30 | "timeout": (None, None), 31 | }), 32 | 33 | ({ 34 | "auth": ("user", b"pass"), 35 | "cookies": {"name": "value"}, 36 | "headers": ((b"name", b"value"),), 37 | "timeout": 34, 38 | }, { 39 | "auth": ("user", b"pass"), 40 | "cookies": {"name": "value"}, 41 | "headers": ((b"name", b"value"),), 42 | "timeout": 34, 43 | }), 44 | ]) 45 | def test_map_args_to_requests(kwargs, expected): 46 | assert ipfshttpclient.http_requests.map_args_to_requests(**kwargs) == expected 47 | 48 | @pytest.mark.parametrize("args,kwargs,expected1,expected2,expected3", [ 49 | (("/dns/localhost/tcp/5001/http", "api/v0"), {}, "http://localhost:5001/api/v0/", { 50 | "family": socket.AF_UNSPEC, 51 | "params": {'stream-channels': 'true'}, 52 | }, None), 53 | 54 | (("/dns6/ietf.org/tcp/443/https", "/base/"), { 55 | "auth": ("user", "pass"), 56 | "cookies": cookiejar, 57 | "headers": {"name": "value"}, 58 | "offline": True, 59 | "timeout": (math.inf, math.inf), 60 | }, "https://ietf.org:443/base/", { 61 | "family": socket.AF_INET6, 62 | "auth": ("user", "pass"), 63 | "cookies": cookiejar, 64 | "headers": {"name": "value"}, 65 | "params": {'offline': 'true', 'stream-channels': 'true'}, 66 | }, (math.inf, math.inf)), 67 | ]) 68 | def test_client_args_to_session_props(args, kwargs, expected1, expected2, expected3): 69 | client = ipfshttpclient.http_requests.ClientSync(*args, **kwargs) 70 | assert client._base_url == expected1 71 | assert client._session_props == expected2 72 | assert client._default_timeout == expected3 -------------------------------------------------------------------------------- /test/unit/test_utils.py: -------------------------------------------------------------------------------- 1 | """Tox unit tests for utils.py. 2 | 3 | Classes: 4 | TestUtils -- defines a set of unit tests for untils.py 5 | """ 6 | 7 | import io 8 | import os.path 9 | import sys 10 | import unittest 11 | 12 | import ipfshttpclient.utils as utils 13 | 14 | class TestUtils(unittest.TestCase): 15 | """Contains unit tests for utils.py. 16 | 17 | Public methods: 18 | test_guess_mimetype -- tests utils.guess_mimetype() 19 | test_ls_dir -- tests utils.ls_dir() 20 | test_clean_file_opened -- tests utils.clean_file() with a stringIO object 21 | test_clean_file_unopened -- tests utils.clean_file() with a filepath 22 | test_clean_files_single -- tests utils.clean_files() with a filepath 23 | test_clean_files_list -- tests utils.clean_files() with a list of files 24 | test_file_size -- tests utils.file_size() 25 | test_return_field_init -- tests utils.return_field.__init__() 26 | test_return_field_call -- tests utils.return_field.__call__() 27 | """ 28 | def test_guess_mimetype(self): 29 | """Tests utils.guess_mimetype(). 30 | 31 | Guesses the mimetype of the requirements.txt file 32 | located in the project's root directory. 33 | """ 34 | path = os.path.join(os.path.dirname(__file__), 35 | "..", "..", "requirements.txt") 36 | assert utils.guess_mimetype(path) == "text/plain" 37 | 38 | def test_clean_file_opened(self): 39 | """Tests utils.clean_file() with a stringIO object.""" 40 | string_io = io.StringIO('Mary had a little lamb') 41 | f, opened = utils.clean_file(string_io) 42 | assert hasattr(f, 'read') 43 | assert not opened 44 | # Closing stringIO after test assertions. 45 | f.close() 46 | 47 | def test_clean_file_unopened_textpath(self): 48 | """Tests utils.clean_file() with a text string filepath. 49 | 50 | This test relies on the openability of the file 'fsdfgh' 51 | located in 'test/functional/fake_dir'. 52 | """ 53 | path = os.path.dirname(__file__) 54 | path = os.path.join(path, "..", "functional", "fake_dir", "fsdfgh") 55 | f, opened = utils.clean_file(path) 56 | assert hasattr(f, 'read') 57 | assert opened 58 | # Closing file after test assertions. 59 | f.close() 60 | 61 | def test_clean_file_unopened_binarypath(self): 62 | """Tests utils.clean_file() with a binary string filepath. 63 | 64 | This test relies on the openability of the file 'fsdfgh' 65 | located in 'test/functional/fake_dir'. 66 | """ 67 | path = os.fsencode(os.path.dirname(__file__)) 68 | path = os.path.join(path, b"..", b"functional", b"fake_dir", b"fsdfgh") 69 | f, opened = utils.clean_file(path) 70 | assert hasattr(f, 'read') 71 | assert opened 72 | # Closing file after test assertions. 73 | f.close() 74 | 75 | def test_clean_files_single(self): 76 | """Tests utils.clean_files() with a singular filepath. 77 | 78 | This test relies on the openability of the file 'fsdfgh' 79 | located in 'test/functional/fake_dir'. 80 | """ 81 | path = os.path.join(os.path.dirname(__file__), 82 | "..", "functional", "fake_dir", "fsdfgh") 83 | gen = utils.clean_files(path) 84 | for tup in gen: 85 | assert hasattr(tup[0], 'read') 86 | assert tup[1] 87 | # Closing file after test assertions. 88 | tup[0].close() 89 | 90 | def test_clean_files_list(self): 91 | """Tests utils.clean_files() with a list of files/stringIO objects.""" 92 | path = os.path.join(os.path.dirname(__file__), 93 | "..", "functional", "fake_dir", "fsdfgh") 94 | string_io = io.StringIO('Mary had a little lamb') 95 | files = [path, string_io] 96 | gen = utils.clean_files(files) 97 | for i in range(0, 2): 98 | tup = next(gen) 99 | assert hasattr(tup[0], 'read') 100 | if i == 0: 101 | assert tup[1] 102 | else: 103 | assert not tup[1] 104 | # Closing files/stringIO objects after test assertions. 105 | tup[0].close() 106 | 107 | def test_return_field_init(self): 108 | """Tests utils.return_field.__init__().""" 109 | return_field = utils.return_field('Hash') 110 | assert return_field.field == 'Hash' 111 | 112 | def test_return_field_call(self): 113 | """Tests utils.return_field.__call__().""" 114 | expected_hash = 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab' 115 | 116 | @utils.return_field('Hash') 117 | def wrapper(string, *args, **kwargs): 118 | resp = {'Hash': expected_hash, 'string': string} 119 | return resp 120 | assert wrapper('Mary had a little lamb') == expected_hash 121 | -------------------------------------------------------------------------------- /tools/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | PROGNAME="${0}" 5 | 6 | usage() 7 | { 8 | echo "Usage: ${PROGNAME} [--install]" 9 | echo 10 | echo " --install Install the pre-commit hook" 11 | echo " -h, --help Display this help and exit" 12 | echo 13 | echo "Without any options the pre-commit checks are run." 14 | } 15 | 16 | if [ $# -gt 0 ]; 17 | then 18 | case "${1}" in 19 | "--install") 20 | top_dir="$(git rev-parse --show-toplevel)" 21 | git_dir="$(git rev-parse --git-dir)" 22 | 23 | if [ -f "${git_dir}/hooks/pre-commit" ]; 24 | then 25 | echo "ERROR: found existing pre-commit hook; " \ 26 | "cowardly giving up." >&2 27 | exit 1 28 | fi 29 | 30 | echo " • Installing pre-commit hook to ${git_dir}/hooks" 31 | ln -s "${top_dir}/tools/pre-commit" "${git_dir}/hooks/pre-commit" 32 | 33 | echo " • Enabling GIT hook Unicode support" 34 | git config --local --type=bool hooks.allownonascii true 35 | exit 36 | ;; 37 | 38 | "-h"|"--help") 39 | usage 40 | exit 0 41 | ;; 42 | 43 | *) 44 | echo "${PROGNAME}: Unknown option “${1}”" >&2 45 | echo >&2 46 | usage >&2 47 | exit 2 48 | ;; 49 | esac 50 | fi 51 | 52 | # Run code style and type tests before accepting a commit 53 | tox -e styleck -e typeck -------------------------------------------------------------------------------- /tools/release/requirements.txt: -------------------------------------------------------------------------------- 1 | flit>=3.0 2 | Sphinx~=3.0 3 | sphinx_autodoc_typehints 4 | recommonmark~=0.5.0 5 | 6 | -------------------------------------------------------------------------------- /tools/verify/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG PYTHON_VERSION 2 | 3 | FROM python:${PYTHON_VERSION}-slim 4 | 5 | RUN pip install --upgrade pip 6 | RUN pip install tox 7 | 8 | # Mount the source code here, instead of to /usr/src/app. 9 | # Otherwise, tox will fail due to folder being read-only. 10 | # Mount only the source code; avoid mounting working folders. 11 | 12 | RUN mkdir /source 13 | ADD entrypoint.sh / 14 | 15 | ENTRYPOINT ["/entrypoint.sh"] 16 | -------------------------------------------------------------------------------- /tools/verify/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cp -r /source/* /usr/src/app/ 4 | 5 | exec $@ 6 | 7 | -------------------------------------------------------------------------------- /tools/verify/validate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | python_version=$1 6 | script_path=$(dirname $0) 7 | source=$(realpath "$script_path/../..") 8 | tag=py-ipfs-http-client-verify:$python_version 9 | 10 | pushd "$script_path" 11 | 12 | echo "Building validator for Python $python_version..." 13 | 14 | docker build --build-arg PYTHON_VERSION="$python_version" -t "$tag" . 15 | 16 | echo "Validating version $python_version" 17 | 18 | docker run \ 19 | -it \ 20 | -v "$source/docs":/source/docs:ro \ 21 | -v "$source/ipfshttpclient":/source/ipfshttpclient:ro \ 22 | -v "$source/test":/source/test:ro \ 23 | -v "$source/pyproject.toml":/source/pyproject.toml:ro \ 24 | -v "$source/README.md":/source/README.md:ro \ 25 | -v "$source/tox.ini":/source/tox.ini:ro \ 26 | -w /usr/src/app \ 27 | "$tag" \ 28 | tox -e styleck -e typeck 29 | 30 | popd 31 | 32 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # For more information about tox, see https://tox.readthedocs.io/en/latest/ 2 | [tox] 3 | minversion = 3.3 4 | envlist = 5 | py3, 6 | py3-httpx, 7 | styleck, 8 | typeck 9 | 10 | # Enable slower `isolated_build` for PEP-518 compatibility 11 | isolated_build = true 12 | 13 | 14 | [testenv] 15 | deps = 16 | pytest ~= 6.2 17 | pytest-cov ~= 2.11 18 | pytest-dependency ~= 0.5 19 | pytest-localserver ~= 0.5 20 | pytest-mock ~= 3.5 21 | pytest-order ~= 0.8 22 | 23 | pytest-cid ~= 1.1 24 | py-cid 25 | 26 | whitelist_externals = ipfs 27 | passenv = IPFS_* PY_IPFS_HTTP_CLIENT_* 28 | commands = 29 | python -X utf8 "{toxinidir}/test/run-tests.py" {posargs} 30 | 31 | # Silence warning about not inheriting PYTHONPATH 32 | setenv = 33 | PYTHONPATH = 34 | 35 | 36 | [testenv:py3-httpx] 37 | deps-exclusive = 38 | httpx (~= 0.14.0) 39 | httpcore (~= 0.10.2) 40 | deps = 41 | {[testenv]deps} 42 | {[testenv:py3-httpx]deps-exclusive} 43 | setenv = 44 | {[testenv]setenv} 45 | PY_IPFS_HTTP_CLIENT_PREFER_HTTPX = yes 46 | commands = 47 | python -X utf8 "{toxinidir}/test/run-tests.py" {posargs} 48 | 49 | 50 | [testenv:styleck] 51 | isolated_build = false 52 | skipsdist = true 53 | 54 | deps = 55 | flake8 ~= 3.7 56 | flake8-tabs ~= 2.2 , >= 2.2.1 57 | commands = 58 | flake8 {posargs} 59 | 60 | 61 | [testenv:typeck] 62 | skip_install = true 63 | deps = 64 | mypy ~= 0.812 65 | pytest ~= 6.2 66 | {[testenv:py3-httpx]deps-exclusive} 67 | commands = 68 | mypy --config-file=tox.ini {posargs} -p ipfshttpclient 69 | 70 | # Pass down TERM environment variable to allow mypy output to be colorized 71 | # See: https://github.com/tox-dev/tox/issues/1441 72 | passenv = TERM 73 | 74 | 75 | [testenv:coverage] 76 | deps = 77 | coverage 78 | commands = 79 | python "{toxinidir}/test/combine-coverage.py" {posargs} 80 | 81 | 82 | #TODO: Migrate away from this file to `pyproject.toml` once `flake8`, `mypy` and `pytest` support using it: 83 | # * flake8: https://gitlab.com/pycqa/flake8/issues/428 (considering flakehell and flake9 here) 84 | # * mypy: https://github.com/python/mypy/issues/5205 85 | # * pytest: https://github.com/pytest-dev/pytest/issues/1556 (will be part of 6.0) 86 | 87 | 88 | [flake8] 89 | exclude = .git,.tox,+junk,coverage,dist,doc,*egg,build,tools,test/unit,docs,*__init__.py,venv 90 | 91 | # E221: Multiple spaces before operator 92 | # E241: Multiple spaces after ',': Breaks element alignment collections 93 | # E251: Spaces around '=' on parameter assignment 94 | # E262: Inline comment should start with '# ': Breaks tagged comments (ie: '#TODO: ') 95 | # E265: Block comment should start with '# ': ^ 96 | # E266: Too many leading '#' for block comment: Breaks declaring mega-blocks (ie: '### Section') 97 | # E303: More than 2 consecutive newlines 98 | # E722: Using bare except for cleanup-on-error is fine 99 | # (see bug report at https://github.com/PyCQA/pycodestyle/issues/703) 100 | # W292: No newline at end of file 101 | # W391: Blank line at end of file (sometimes triggered instead of the above!?) 102 | # F403: `from import *` used; unable to detect undefined names ←– Probably should be fixed… 103 | # F811: PyFlakes bug: `@ty.overload` annotation is not detected to mean `@typing.overload` 104 | # (see bug report at https://github.com/PyCQA/pyflakes/issues/561) 105 | ignore = E221,E241,E251,E262,E265,E266,E303,E722,W292,W391,F403,F811 106 | use-flake8-tabs = true 107 | max-line-length = 100 108 | tab-width = 4 109 | 110 | # E701: Multiple statements on one line 111 | # - requests_wrapper.py: Lots of symbols exported that we specifically don't use but that make sense in a reusable module 112 | # - test_*.py: Aligning `assert … not in …` and `assert … in …` kind of statements 113 | per-file-ignores = 114 | ./ipfshttpclient/requests_wrapper.py:E401,E402,F401 115 | ./test/functional/test_*.py:E272 116 | 117 | 118 | [mypy] 119 | # CLI behaviour 120 | color_output = true 121 | show_error_codes = true 122 | pretty = true 123 | 124 | # Include package directories without `__init__.py` 125 | namespace_packages = true 126 | 127 | # Extra strictness 128 | disallow_any_unimported = true 129 | #disallow_any_expr = true 130 | #disallow_any_decorated = true # Mostly OK, but fails at custom decorators 131 | disallow_any_generics = true 132 | disallow_subclassing_any = true 133 | 134 | #disallow_untyped_calls = true # Fails at many trio APIs that aren't typed yet 135 | disallow_untyped_defs = true 136 | 137 | strict_optional = true 138 | 139 | warn_redundant_casts = true 140 | warn_unused_ignores = true 141 | warn_return_any = true 142 | warn_unreachable = true 143 | 144 | [mypy-ipfshttpclient.client.*] 145 | ignore_errors = True 146 | 147 | [mypy-ipfshttpclient.client.base] 148 | ignore_errors = False 149 | 150 | 151 | [pytest] 152 | addopts = -ra --verbose 153 | console_output_style = progress 154 | testpaths = 155 | ipfshttpclient 156 | test/unit 157 | test/functional 158 | -------------------------------------------------------------------------------- /verify.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | function validate() { 6 | ./tools/verify/validate.sh "$1" 7 | } 8 | 9 | if [ -z "$1" ]; then 10 | echo "Validating minimum point release of each supported minor version..." 11 | 12 | # Maintain this concurrently with [tool.flit.metadata].requires-python in pyproject.toml. 13 | validate 3.6.2 14 | validate 3.7.2 15 | validate 3.8.0 16 | validate 3.9.0 17 | else 18 | echo "Validating only $1..." 19 | validate "$1" 20 | fi 21 | --------------------------------------------------------------------------------