├── .editorconfig ├── .gitignore ├── .travis.yml ├── CHANGELOG.md ├── LICENSE ├── README.md ├── docs ├── Makefile ├── conf.py ├── http_client_ref.md ├── index.md ├── internal_ref.md ├── publish.py ├── py-cid.inv └── releasing.md ├── ipfshttpclient ├── __init__.py ├── client │ ├── __init__.py │ ├── base.py │ ├── bitswap.py │ ├── block.py │ ├── bootstrap.py │ ├── config.py │ ├── dag.py │ ├── dht.py │ ├── files.py │ ├── key.py │ ├── miscellaneous.py │ ├── name.py │ ├── object.py │ ├── pin.py │ ├── pubsub.py │ ├── repo.py │ ├── swarm.py │ └── unstable.py ├── encoding.py ├── exceptions.py ├── filescanner.py ├── filescanner_ty.pyi ├── http.py ├── http_common.py ├── http_httpx.py ├── http_requests.py ├── multipart.py ├── requests_wrapper.py ├── utils.py └── version.py ├── pyproject.toml ├── test ├── combine-coverage.py ├── functional │ ├── .gitattributes │ ├── conftest.py │ ├── fake_dir │ │ ├── fsdfgh │ │ ├── popoiopiu │ │ ├── test2 │ │ │ ├── fssdf │ │ │ ├── high │ │ │ │ └── five │ │ │ │ │ └── dummy │ │ │ └── llllg │ │ └── test3 │ │ │ └── ppppoooooooooo │ ├── fake_dir_almost_empty │ │ └── .gitignore │ ├── fake_json │ │ ├── data.car │ │ ├── links.json │ │ └── no_links.json │ ├── test_bitswap.py │ ├── test_block.py │ ├── test_dag.py │ ├── test_files.py │ ├── test_key.py │ ├── test_miscellaneous.py │ ├── test_name.py │ ├── test_object.py │ ├── test_other.py │ ├── test_pin.py │ ├── test_pubsub.py │ ├── test_repo.py │ └── test_unstable.py ├── run-tests.py └── unit │ ├── test_client.py │ ├── test_encoding.py │ ├── test_filescanner.py │ ├── test_http.py │ ├── test_http_httpx.py │ ├── test_http_requests.py │ ├── test_multipart.py │ └── test_utils.py ├── tools └── pre-commit └── tox.ini /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [**] 4 | end_of_line = lf 5 | insert_final_newline = false 6 | 7 | charset = utf-8 8 | indent_style = tab 9 | indent_brace_style = 1TBS 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary files used by some editors 2 | *.swp 3 | *~ 4 | 5 | # Temporary files created during Python file loading 6 | *.pyc 7 | **/__pycache__/ 8 | 9 | # Project storage of some editors 10 | /.idea 11 | /.project 12 | /.settings 13 | /.vscode 14 | 15 | # Stuff that never was meant to be public 16 | /+junk 17 | 18 | # Build artefacts 19 | /coverage/ 20 | /build/ 21 | /dist/ 22 | 23 | # Documentation build artefacts 24 | docs/build/ 25 | 26 | # Testing artefacts 27 | go-ipfs/ 28 | .coverage 29 | .pytest_cache/ 30 | .tox/ -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # Config file for automatic testing at travis-ci.com 2 | 3 | language: python 4 | dist: bionic # VirtualEnv is too old on xenial 5 | 6 | matrix: 7 | include: 8 | - python: "3.5" 9 | - python: "3.6" 10 | - python: "3.7" 11 | - python: "3.8" 12 | - python: "pypy3" 13 | - python: "3.8" 14 | env: IPFS_VERSION=compat 15 | - python: "3.8" 16 | env: TOXENV=py3-httpx 17 | - python: "3.8" 18 | env: TOXENV=styleck 19 | before_install: ":" 20 | - python: "3.8" 21 | env: TOXENV=typeck 22 | before_install: ":" 23 | 24 | # Testing on macOS/Darwin tends to be much slower so only test the bare minimum 25 | # 26 | # Minimum testing version is 3.6, since the 3.5 binaries from python.org fail 27 | # with TLS error when trying to install `tox`. 28 | # 29 | # When changing any version here also update the relevant checksum below with 30 | # the values found on the https://python.org/ website. 31 | - os: osx 32 | language: shell 33 | env: PYTHON_VERSION=3.6.8-macosx10.9 34 | - os: osx 35 | language: shell 36 | env: PYTHON_VERSION=3.8.2-macosx10.9 37 | 38 | # Minimum testing version for Windows is 3.7 due to `-X utf8` flag support 39 | # being a life-saver for unbreaking the stupid INI parser used by all the 40 | # Python testing tools 41 | # 42 | # Python version numbers must exactly match a version in chocolatey as that 43 | # tool does not support version ranges at this time. 44 | - os: windows 45 | language: shell 46 | env: PYTHON_VERSION=3.7.6.20200110 47 | - os: windows 48 | language: shell 49 | env: PYTHON_VERSION=latest 50 | 51 | # Ensure go-IPFS is available for testing 52 | before_install: 53 | - |- 54 | ### ==== MODIFY THIS WHEN CHANGING TARGET OR MINIMUM IPFS VERSION ==== ### 55 | case "${IPFS_VERSION:-latest}" in 56 | latest) # Currently targeted version 57 | VERSION=0.7.0 58 | SHA512_LINUX=1d5910f27e8d7ea333145f15c6edcbacc1e8db3a99365f0847467bdfa7c73f4d7a05562e46be8e932056c8324ed0769ca1b6758dfb0ac4c2e1b6066b57c4a086 59 | SHA512_DARWIN=d864b58e832ce49df7ef77c8012ce9e6e7585f693c03ba8e4ebf86f772eebf0d6a00dde279cdc0f16250ad20bac6f67db6b3966848c3e3bcbdc4b4d2dee1cd89 60 | SHA512_WINDOWS=2262220e0502f00d6d429cfd16d2f0c55fa73cafb100bd72589fd1f7d97b3527dc7d49d60460650796d754f2aa4b03ba07753457691ef7d1a10d10857b819045 61 | ;; 62 | compat) # Earliest supported version 63 | VERSION=0.4.23 64 | SHA512_LINUX=5eebebd4d4628a01c3b6615d96123a5c744f64da18fc0950e00d99a36abb02eee694c1bb67549341a645ebb99f30de9198c33b556cdee2609013409a510d1d2b 65 | ;; 66 | esac 67 | ### ------------------------------ END ------------------------------- ### 68 | 69 | set -u 70 | 71 | # Derive usable parameters from the above constants 72 | case "${TRAVIS_OS_NAME}" in 73 | linux) 74 | export IPFS_DL_PATH="go-ipfs/v${VERSION}/go-ipfs_v${VERSION}_linux-amd64.tar.gz" 75 | export IPFS_DL_SHA512="${SHA512_LINUX}" 76 | ;; 77 | osx) 78 | export IPFS_DL_PATH="go-ipfs/v${VERSION}/go-ipfs_v${VERSION}_darwin-amd64.tar.gz" 79 | export IPFS_DL_SHA512="${SHA512_DARWIN}" 80 | 81 | # Make the `sha512sum` command available under that name 82 | export PATH="$(echo /usr/local/Cellar/coreutils/*/libexec/gnubin):${PATH}" 83 | ;; 84 | windows) 85 | export IPFS_DL_PATH="go-ipfs/v${VERSION}/go-ipfs_v${VERSION}_windows-amd64.zip" 86 | export IPFS_DL_SHA512="${SHA512_WINDOWS}" 87 | ;; 88 | esac 89 | export IPFS_DL_BASENAME="${IPFS_DL_PATH##*/}" 90 | 91 | set +u 92 | 93 | # Download the daemon application 94 | - wget "https://dist.ipfs.io/${IPFS_DL_PATH}" 2>&1 95 | # Verify its checksum 96 | - echo "${IPFS_DL_SHA512} ${IPFS_DL_BASENAME}" | sha512sum -c 97 | # Extract verified archive 98 | - |- 99 | if [[ "${IPFS_DL_BASENAME}" =~ .*\.zip ]]; 100 | then 101 | unzip "${IPFS_DL_BASENAME}" 102 | else 103 | tar -xvf "${IPFS_DL_BASENAME}" 104 | fi 105 | # Add IPFS daemon to search path 106 | - export PATH="${PWD}/go-ipfs:${PATH}" 107 | 108 | install: 109 | # Install suitable Python version for testing on Darwin and Windows; 110 | # and fixup the environment whichever way required 111 | - |- 112 | export PYTHON_VERSION="${PYTHON_VERSION:-${TRAVIS_PYTHON_VERSION}}" 113 | 114 | ### ====== MODIFY THIS WHEN CHANGING MACOS PYTHON TEST VERSIONS ====== ### 115 | case "${PYTHON_VERSION}" in 116 | 3.6.8-macosx10.9) MD5_MACOS="786c4d9183c754f58751d52f509bc971" ;; 117 | 3.8.2-macosx10.9) MD5_MACOS="f12203128b5c639dc08e5a43a2812cc7" ;; 118 | esac 119 | ### ------------------------------ END ------------------------------- ### 120 | 121 | set -eu 122 | if [[ "${TRAVIS_OS_NAME}" = "osx" ]]; 123 | then 124 | # Download and install official Python macOS installation package 125 | wget "https://www.python.org/ftp/python/${PYTHON_VERSION%%-*}/python-${PYTHON_VERSION}.pkg" -O /tmp/python.pkg 126 | echo "${MD5_MACOS} /tmp/python.pkg" | md5sum -c 127 | sudo installer -pkg /tmp/python.pkg -target / 128 | elif [[ "${TRAVIS_OS_NAME}" = "windows" ]]; 129 | then 130 | # Install Windows Python from chocolatey 131 | VERSION_FLAG="" # Use latest version 132 | if [[ "${PYTHON_VERSION:-latest}" != latest ]]; 133 | then # Use specific version 134 | VERSION_FLAG="--version=${PYTHON_VERSION}" 135 | fi 136 | choco install python ${VERSION_FLAG} 137 | 138 | # Fix up Windows line endings incorrectly applied to test files 139 | find test/functional/fake_dir -type f -exec dos2unix \{\} \+ 140 | 141 | # Export sanely named python3 shell command 142 | python3() { 143 | py -3 -X utf8 "$@" 144 | } 145 | export -f python3 146 | fi 147 | set +eu 148 | 149 | # Install the test runner 150 | - python3 -m pip install tox 151 | 152 | # Fixup the tox environment name for PyPy 153 | - |- 154 | if [[ -z "${TOXENV+set}" && "${PYTHON_VERSION}" =~ pypy.* ]]; 155 | then 156 | export TOXENV=pypy3 157 | fi 158 | 159 | script: python3 -m tox -e "${TOXENV:-py3}" 160 | 161 | cache: 162 | pip: true 163 | directories: 164 | - $HOME/AppData/Local/Temp/chocolatey 165 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | py-ipfs-http-client 0.X.X (XX.XX.20XX) 2 | -------------------------------------- 3 | 4 | * (None yet) 5 | 6 | 7 | py-ipfs-http-client 0.7.0b1 (14.10.2020) 8 | -------------------------------------- 9 | 10 | * Added support for optional arguments of the `.dag.*` APIs (by João Meyer) 11 | * Compatiblity bumped to go-IPFS 0.7.x (by Jan Rydzewski and other community members bugging me) 12 | * The 0.7 series is not stable yet, expect some breaking changes before the final release! 13 | 14 | 15 | py-ipfs-http-client 0.6.1 (26.08.2020) 16 | -------------------------------------- 17 | 18 | * Added typings for most of the public and private API and enable type checking with `mypy` 19 | * Added support for connecting to the IPFS daemon using Unix domain sockets (implemented for both the requests and HTTPx backend) 20 | * Deprecate `.repo.gc(…)`s `return_result` parameter in favour of the newly introduced `quiet` parameter to match the newer HTTP API 21 | * If you use the undocumented `return_result` parameter anywhere else consider such use deprecated, support for this parameter will be removed in 0.7.X everywhere 22 | * Rationale: This parameter used to map to using the HTTP HEAD method perform the given request without any reply being returned, but this feature has been dropped with go-IPFS 0.5 from the API. 23 | * Implemented DAG APIs for go-IPFS 0.5+: `.dag.get`, `.dag.put`, `.dag.imprt` and `.dag.export` 24 | 25 | Bugfixes: 26 | 27 | * The value of the `timeout` parameter on `ipfshttpclient.{connect,Client}` is no longer ignored when using the `requests` HTTP backend (default) 28 | * (The per-API-call `timeout` parameter was unaffected by this.) 29 | * The HTTPx HTTP backend now properly applies address family restrictions encoded as part of the daemon MultiAddr (needed minor upstream change) 30 | 31 | py-ipfs-http-client 0.6.0 (30.06.2020) 32 | -------------------------------------- 33 | 34 | **Breaking changes in this release**: 35 | 36 | * The *recursive* parameter of `.add()` is no longer ignored and now enforces its default value of `False` (explicitely set it to `True` for the previous behaviour) 37 | * The glob pattern strings that may be passed to the `.add()` pattern parameter now actually behave like recursive glob patterns (see [the Python documentation](https://docs.python.org/3/library/glob.html) for how exactly) 38 | * Most functions previously returning a dict with the raw JSON response, now return a custom mapping type instead 39 | * This mapping type supports the original getitem syntax (`result["ItemName"]`) unchanged, but if you need an actual dictionary object you need to call `.as_json()` on it 40 | * In the future response-specific subtypes with Pythonic accessors and object specific methods will hopefully be added 41 | * HTTP basic authentication data to send to the API daemon must now be set as an `auth=(username, password)` tuple rather than using separate `username=` and `password=` parameters 42 | 43 | Other changes: 44 | 45 | * Added support for go-IPFS 0.5.x 46 | * Adding directories with `.add()` has been greatly reworked: 47 | * Its now possible to specify arbitrary rules on which objects to include a directory tree by passing a custom matcher object to the *pattern* parameter 48 | * The new *period_special* parameter allows toggling whether glob patterns match dot-files implicietly and defaults to `True` (previously it was effectively `False`) 49 | * The new *follow_symlinks* parameter similarily determines whether symbolic links will be followed when scanning directory trees and defaults to `False` (the previous default on Unix, albeit this likely wasn't intentional) 50 | * `.add()` will now limit its scan to the directories required to match the given glob patterns (passing in regular expression objects will still scan the tree unconditionally however) – custom matchers have full control over which directories are visited 51 | * The requests-based HTTP backend has been supplemented by another backend based on [HTTPx](https://www.python-httpx.org/) for Python 3.6+ 52 | * Due to a minor limitation within the library (no ability to apply address family restrictions during name resolution) this currently included as a preview and must be manually enabled, to do this ensure that the `httpx` library is installed in your Python environment and run your program with the environment variable *PY_IPFS_HTTP_CLIENT_PREFER_HTTPX* set to *yes*. 53 | * In the hopefully not too long future, HTTPx will be used to finally provide async/await support for this library. 54 | 55 | py-ipfs-http-client 0.4.12 (21.05.2019) 56 | --------------------------------------- 57 | 58 | Bug fix release: 59 | 60 | * Fix compatibility with `urllib3` 1.25.* when connecting to HTTPS API servers 61 | 62 | py-ipfs-http-client 0.4.11 (13.05.2019) 63 | --------------------------------------- 64 | 65 | (Most of the following was also released as version 0.4.10 the previous day, but that release was never advertised and some issues were quickly found that necessitated a new release.) 66 | 67 | This release features several breaking changes, as compared to the previous *py-ipfs-api* library 68 | 69 | * A new import name: `ipfsapi` → `ipfshttpclient` (thanks to @AlibabasMerchant) 70 | * The client API is now structured according to the [IPFS interface core specification](https://github.com/ipfs/interface-ipfs-core/tree/master/SPEC) 71 | * Deamon location is now described using [Multiaddr](https://github.com/multiformats/multiaddr) 72 | * Some deprecated methods have been dropped: 73 | * `bitswap_unwant`: API endpoint dropped by *go-ipfs* 74 | * `{get,set}_pyobj`: Can too easily be abused for abitrary code execution, use `pickle.{loads,dumps}` if you really need this 75 | * `file_ls`: Long deprecated by *go-ipfs* and scheduled for removal, use plain `ls` instead 76 | 77 | Some new features added in this release: 78 | 79 | * Adding large directories doesn't read them all into memory anymore before sending them to the daemon 80 | * API documentation has been improved 81 | * TCP connections may now be reused between API requests 82 | * `.add_json` now adds data as UTF-8 rather than using Unicode-escapes for shorter/more-canoncial data representation (thanks to @emmnx) 83 | * Several parameters have been added to existing methods: 84 | * Using [filestore](https://github.com/ipfs-filestore/go-ipfs/tree/master/filestore) is now possible (thanks to @radfish) 85 | * Universal per-call `offline` parameter added (thanks to @radfish) 86 | * Universal per-call `return_result` parameter added to issue `HEAD` requests and surpress results for speeds (thanks to @loardcirth) 87 | * Universal per-call `timeout` parameter added (thanks to @AlibabasMerchant) 88 | * `.add`: `nocopy` & `raw_leaves` (thanks to @radfish) 89 | * `.ls`: `paths` (thanks to @radfish) 90 | * `.name.publish`: `allow_offline` (thanks to @radfish) 91 | * `.name.resolve`: `dht_record_count` & `dht_timeout` (thanks to @radfish) 92 | 93 | *go-ipfs* 0.4.20 has been blacklisted for having know compatibility problems, but 0.4.19 and 0.4.21 are OK. 94 | 95 | py-ipfs-api 0.4.4 (13.05.2019) 96 | ------------------------------ 97 | 98 | * Reimplement library as thin wrapper around the new *py-ipfs-http-client* library with helpful warnings about how to upgrade 99 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Andrew Stocker 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # py-ipfs-http-client 2 | 3 | [![Made by the IPFS Community](https://img.shields.io/badge/made%20by-IPFS%20Community-blue.svg?style=flat-square)](https://docs.ipfs.io/community/) 4 | [![IRC #py-ipfs on chat.freenode.net](https://img.shields.io/badge/freenode%20IRC-%23py--ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23py-ipfs) 5 | [![Matrix #py-ipfs:ninetailed.ninja](https://img.shields.io/matrix/py-ipfs:ninetailed.ninja?color=blue&label=matrix%20chat&server_fqdn=matrix.ninetailed.ninja&style=flat-square)](https://matrix.to/#/#py-ipfs:ninetailed.ninja?via=matrix.ninetailed.ninja&via=librem.one) 6 | [![Standard README Compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme) 7 | [![PyPI Package ipfshttpclient](https://img.shields.io/pypi/dm/ipfshttpclient.svg?style=flat-square)](https://pypi.python.org/pypi/ipfshttpclient) 8 | [![Build Status](https://img.shields.io/travis/com/ipfs-shipyard/py-ipfs-http-client/master.svg?style=flat-square)](https://travis-ci.com/github/ipfs-shipyard/py-ipfs-http-client) 9 | 10 | ![Python IPFS HTTP Client Library](https://ipfs.io/ipfs/QmQJ68PFMDdAsgCZvA1UVzzn18asVcf7HVvCDgpjiSCAse) 11 | 12 | Check out [the HTTP Client reference](https://ipfs.io/ipns/12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF/docs/) for the full command reference. 13 | 14 | **Note**: The `ipfsapi` PIP package and Python module have both been renamed to `ipfshttpclient`! 15 | See the [relevant section of the CHANGELOG](CHANGELOG.md#py-ipfs-http-client-0411-13052019) for details. There is also a `ipfsApi` library from which this library originated that is completely 16 | unmaintained and does not work with any recent go-IPFS version. 17 | 18 | **Note**: This library occasionally has to change to stay compatible with the IPFS HTTP API. 19 | Currently, this library is tested against [go-ipfs v0.7.0](https://github.com/ipfs/go-ipfs/releases/tag/v0.7.0). 20 | We strive to support the last 5 releases of go-IPFS at any given time; go-IPFS v0.4.23 therefore 21 | being to oldest supported version at this time. 22 | 23 | ## Table of Contents 24 | 25 | - [Install](#install) 26 | - [Usage](#usage) 27 | - [Documentation](#documentation) 28 | - [Migrating from 0.4.x to 0.6.0](#migrating-from-04x-to-060) 29 | - [Featured Projects](#featured-projects) 30 | - [Contributing](#contributing) 31 | - [Bug reports](#bug-reports) 32 | - [Pull requests](#pull-requests) 33 | - [Chat with Us (IRC/Matrix)](#chat-with-us-ircmatrix) 34 | - [License](#license) 35 | 36 | ## Install 37 | 38 | Install with pip: 39 | 40 | ```sh 41 | pip install ipfshttpclient 42 | ``` 43 | 44 | ### Development install from Source 45 | 46 | ```sh 47 | # Clone the source repository 48 | git clone https://github.com/ipfs/py-ipfs-http-client.git 49 | cd py-ipfs-http-client 50 | 51 | # Link ipfs-api-client into your Python Path 52 | flit install --pth-file 53 | ``` 54 | 55 | ## Usage 56 | 57 | Basic use-case (requires a running instance of IPFS daemon): 58 | 59 | ```py 60 | >>> import ipfshttpclient 61 | >>> client = ipfshttpclient.connect() # Connects to: /dns/localhost/tcp/5001/http 62 | >>> res = client.add('test.txt') 63 | >>> res 64 | {'Hash': 'QmWxS5aNTFEc9XbMX1ASvLET1zrqEaTssqt33rVZQCQb22', 'Name': 'test.txt'} 65 | >>> client.cat(res['Hash']) 66 | 'fdsafkljdskafjaksdjf\n' 67 | ``` 68 | 69 | *Please note*: You should specify the address for an IPFS *API server*, using the address of a *gateway* (such as the public `ipfs.io` one at `/dns/ipfs.io/tcp/443/https`) will only give you [extremely limited access](https://github.com/ipfs/go-ipfs/blob/master/docs/gateway.md#read-only-api) and may not work at all. If you are only interested in downloading IPFS content through public gateway servers then this library is unlikely of being of much help. 70 | 71 | For real-world scripts you can reuse TCP connections using a context manager or manually closing the session after use: 72 | 73 | ```py 74 | import ipfshttpclient 75 | 76 | # Share TCP connections using a context manager 77 | with ipfshttpclient.connect() as client: 78 | hash = client.add('test.txt')['Hash'] 79 | print(client.stat(hash)) 80 | 81 | # Share TCP connections until the client session is closed 82 | class SomeObject: 83 | def __init__(self): 84 | self._client = ipfshttpclient.connect(session=True) 85 | 86 | def do_something(self): 87 | hash = self._client.add('test.txt')['Hash'] 88 | print(self._client.stat(hash)) 89 | 90 | def close(self): # Call this when your done 91 | self._client.close() 92 | ``` 93 | 94 | Administrative functions: 95 | 96 | ```py 97 | >>> client.id() 98 | {'Addresses': ['/ip4/127.0.0.1/tcp/4001/ipfs/QmS2C4MjZsv2iP1UDMMLCYqJ4WeJw8n3vXx1VKxW1UbqHS', 99 | '/ip6/::1/tcp/4001/ipfs/QmS2C4MjZsv2iP1UDMMLCYqJ4WeJw8n3vXx1VKxW1UbqHS'], 100 | 'AgentVersion': 'go-ipfs/0.4.10', 101 | 'ID': 'QmS2C4MjZsv2iP1UDMMLCYqJ4WeJw8n3vXx1VKxW1UbqHS', 102 | 'ProtocolVersion': 'ipfs/0.1.0', 103 | 'PublicKey': 'CAASpgIwgg ... 3FcjAgMBAAE='} 104 | ``` 105 | 106 | Pass in API options: 107 | 108 | ```py 109 | >>> client.pin.ls(type='all') 110 | {'Keys': {'QmNMELyizsfFdNZW3yKTi1SE2pErifwDTXx6vvQBfwcJbU': {'Count': 1, 111 | 'Type': 'indirect'}, 112 | 'QmNQ1h6o1xJARvYzwmySPsuv9L5XfzS4WTvJSTAWwYRSd8': {'Count': 1, 113 | 'Type': 'indirect'}, 114 | … 115 | ``` 116 | 117 | Add a directory and match against a filename pattern: 118 | 119 | ```py 120 | >>> client.add('photos', pattern='*.jpg') 121 | [{'Hash': 'QmcqBstfu5AWpXUqbucwimmWdJbu89qqYmE3WXVktvaXhX', 122 | 'Name': 'photos/photo1.jpg'}, 123 | {'Hash': 'QmSbmgg7kYwkSNzGLvWELnw1KthvTAMszN5TNg3XQ799Fu', 124 | 'Name': 'photos/photo2.jpg'}, 125 | {'Hash': 'Qma6K85PJ8dN3qWjxgsDNaMjWjTNy8ygUWXH2kfoq9bVxH', 126 | 'Name': 'photos/photo3.jpg'}] 127 | ``` 128 | 129 | Or add a directory recursively: 130 | 131 | ```py 132 | >>> client.add('fake_dir', recursive=True) 133 | [{'Hash': 'QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX', 134 | 'Name': 'fake_dir/fsdfgh'}, 135 | {'Hash': 'QmNuvmuFeeWWpxjCQwLkHshr8iqhGLWXFzSGzafBeawTTZ', 136 | 'Name': 'fake_dir/test2/llllg'}, 137 | {'Hash': 'QmX1dd5DtkgoiYRKaPQPTCtXArUu4jEZ62rJBUcd5WhxAZ', 138 | 'Name': 'fake_dir/test2'}, 139 | {'Hash': 'Qmenzb5J4fR9c69BbpbBhPTSp2Snjthu2hKPWGPPJUHb9M', 140 | 'Name': 'fake_dir'}] 141 | ``` 142 | 143 | This module also contains some helper functions for adding strings and JSON to IPFS: 144 | 145 | ```py 146 | >>> lst = [1, 77, 'lol'] 147 | >>> client.add_json(lst) 148 | 'QmQ4R5cCUYBWiJpNL7mFe4LDrwD6qBr5Re17BoRAY9VNpd' 149 | >>> client.get_json(_) 150 | [1, 77, 'lol'] 151 | ``` 152 | 153 | Use an IPFS server with basic auth (replace username and password with real creds): 154 | 155 | ```py 156 | >>> import ipfshttpclient 157 | >>> client = ipfshttpclient.connect('/dns/ipfs-api.example.com/tcp/443/https', auth=("username", "password")) 158 | ``` 159 | 160 | Pass custom headers to the IPFS daemon with each request: 161 | ```py 162 | >>> import ipfshttpclient 163 | >>> headers = {"CustomHeader": "foobar"} 164 | >>> client = ipfshttpclient.connect('/dns/ipfs-api.example.com/tcp/443/https', headers=headers) 165 | ``` 166 | 167 | Connect to the IPFS daemon using a Unix domain socket (plain HTTP only): 168 | ```py 169 | >>> import ipfshttpclient 170 | >>> client = ipfshttpclient.connect("/unix/run/ipfs/ipfs.sock") 171 | ``` 172 | 173 | 174 | 175 | ## Documentation 176 | 177 | Documentation (currently mostly API documentation unfortunately) is available on IPFS: 178 | 179 | https://ipfs.io/ipns/12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF/docs/ 180 | 181 | The `ipfs` [command-line Client documentation](https://ipfs.io/docs/commands/) may also be useful in some cases. 182 | 183 | ### Migrating from `0.4.x` to `0.6.0` 184 | 185 | Please see the CHANGELOG for [the minor breaking changes between these releases](CHANGELOG.md#py-ipfs-http-client-060-30062020). 186 | 187 | ## Featured Projects 188 | 189 | Projects that currently use py-ipfs-http-client. If your project isn't here, feel free to submit a PR to add it! 190 | 191 | - [InterPlanetary Wayback](https://github.com/oduwsdl/ipwb) interfaces web archive ([WARC](https://www.iso.org/standard/44717.html)) files for distributed indexing and replay using IPFS. 192 | 193 | ## Contributing 194 | 195 | ### Easy Tasks 196 | 197 | Over time many smaller day-to-day tasks have piled up (mostly supporting some 198 | newer APIs). If want to help out without getting too involved picking up one 199 | of tasks of our [help wanted issue list](https://github.com/ipfs-shipyard/py-ipfs-http-client/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) 200 | would go a long way towards making this library more feature-complete. 👍 201 | 202 | ### Bug reports 203 | 204 | You can submit bug reports using the 205 | [GitHub issue tracker](https://github.com/ipfs/py-ipfs-http-client/issues). 206 | 207 | ### Pull requests 208 | 209 | Pull requests are welcome. Before submitting a new pull request, please 210 | make sure that your code passes both the 211 | [code formatting](https://www.python.org/dev/peps/pep-0008/) 212 | (PEP-8 with tab indentation) and 213 | [typing](https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html) 214 | (PEP-484 using mypy) checks: 215 | 216 | $ tox -e styleck -e typeck 217 | 218 | As well as the unit tests: 219 | 220 | $ tox -e py3 -e py3-httpx 221 | 222 | If you are unsure, don't hasitate to just submit your code and a human will 223 | take a look. 🙂 224 | 225 | If you can, Please make sure to include new unit tests for new features or 226 | changes in behavior. We aim to bring coverage to 100% at some point. 227 | 228 | #### Installing the pre-commit Hook 229 | 230 | You can arrange for the code style and typing tests to be run automatically 231 | before each commit by installing the GIT `pre-commit` hook: 232 | 233 | $ ./tools/pre-commit --install 234 | 235 | ### Chat with Us (IRC/Matrix) 236 | 237 | You can find us on [#py-ipfs on chat.freenode.org](http://webchat.freenode.net/?channels=%23py-ipfs) 238 | or in our [Matrix chat room](https://matrix.to/#/#py-ipfs:ninetailed.ninja?via=ninetailed.ninja&via=librem.one). 239 | Join us if you have any suggestions, questions or if you just want to discuss 240 | IPFS and Python in general. 241 | 242 | Please note that the channel is not monitored all the time and hence you may 243 | only receive a reply to your message later that day. Using Matrix makes it 244 | easier to stay connected in the background, so please prefer the Matrix option 245 | or use an IRC bouncer. 246 | 247 | ## License 248 | 249 | This code is distributed under the terms of the [MIT license](https://opensource.org/licenses/MIT). Details can be found in the file 250 | [LICENSE](LICENSE) in this repository. 251 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = a4 8 | BUILDDIR = build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 16 | 17 | .PHONY: help 18 | help: 19 | @echo "Please use \`make ' where is one of" 20 | @echo " html to make standalone HTML files" 21 | @echo " dirhtml to make HTML files named index.html in directories" 22 | @echo " singlehtml to make a single large HTML file" 23 | @echo " pickle to make pickle files" 24 | @echo " json to make JSON files" 25 | @echo " htmlhelp to make HTML files and a HTML help project" 26 | @echo " qthelp to make HTML files and a qthelp project" 27 | @echo " applehelp to make an Apple Help Book" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " epub3 to make an epub3" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 34 | @echo " text to make text files" 35 | @echo " man to make manual pages" 36 | @echo " texinfo to make Texinfo files" 37 | @echo " info to make Texinfo files and run them through makeinfo" 38 | @echo " gettext to make PO message catalogs" 39 | @echo " changes to make an overview of all changed/added/deprecated items" 40 | @echo " xml to make Docutils-native XML files" 41 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 42 | @echo " linkcheck to check all external links for integrity" 43 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 44 | @echo " coverage to run coverage check of the documentation (if enabled)" 45 | @echo " dummy to check syntax errors of document sources" 46 | 47 | .PHONY: clean 48 | clean: 49 | rm -rf $(BUILDDIR)/* 50 | 51 | .PHONY: html 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | .PHONY: dirhtml 58 | dirhtml: 59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 62 | 63 | .PHONY: singlehtml 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | .PHONY: pickle 70 | pickle: 71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 72 | @echo 73 | @echo "Build finished; now you can process the pickle files." 74 | 75 | .PHONY: json 76 | json: 77 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 78 | @echo 79 | @echo "Build finished; now you can process the JSON files." 80 | 81 | .PHONY: htmlhelp 82 | htmlhelp: 83 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 84 | @echo 85 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 86 | ".hhp project file in $(BUILDDIR)/htmlhelp." 87 | 88 | .PHONY: qthelp 89 | qthelp: 90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 91 | @echo 92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PythonIPFSHTTPClient.qhcp" 95 | @echo "To view the help file:" 96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PythonIPFSHTTPClient.qhc" 97 | 98 | .PHONY: applehelp 99 | applehelp: 100 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 101 | @echo 102 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 103 | @echo "N.B. You won't be able to view it unless you put it in" \ 104 | "~/Library/Documentation/Help or install it in your application" \ 105 | "bundle." 106 | 107 | .PHONY: devhelp 108 | devhelp: 109 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 110 | @echo 111 | @echo "Build finished." 112 | @echo "To view the help file:" 113 | @echo "# mkdir -p $$HOME/.local/share/devhelp/PythonIPFSHTTPClient" 114 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PythonIPFSHTTPClient" 115 | @echo "# devhelp" 116 | 117 | .PHONY: epub 118 | epub: 119 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 120 | @echo 121 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 122 | 123 | .PHONY: epub3 124 | epub3: 125 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 126 | @echo 127 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." 128 | 129 | .PHONY: latex 130 | latex: 131 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 132 | @echo 133 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 134 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 135 | "(use \`make latexpdf' here to do that automatically)." 136 | 137 | .PHONY: latexpdf 138 | latexpdf: 139 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 140 | @echo "Running LaTeX files through pdflatex..." 141 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 142 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 143 | 144 | .PHONY: latexpdfja 145 | latexpdfja: 146 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 147 | @echo "Running LaTeX files through platex and dvipdfmx..." 148 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 149 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 150 | 151 | .PHONY: text 152 | text: 153 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 154 | @echo 155 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 156 | 157 | .PHONY: man 158 | man: 159 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 160 | @echo 161 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 162 | 163 | .PHONY: texinfo 164 | texinfo: 165 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 166 | @echo 167 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 168 | @echo "Run \`make' in that directory to run these through makeinfo" \ 169 | "(use \`make info' here to do that automatically)." 170 | 171 | .PHONY: info 172 | info: 173 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 174 | @echo "Running Texinfo files through makeinfo..." 175 | make -C $(BUILDDIR)/texinfo info 176 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 177 | 178 | .PHONY: gettext 179 | gettext: 180 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 181 | @echo 182 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 183 | 184 | .PHONY: changes 185 | changes: 186 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 187 | @echo 188 | @echo "The overview file is in $(BUILDDIR)/changes." 189 | 190 | .PHONY: linkcheck 191 | linkcheck: 192 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 193 | @echo 194 | @echo "Link check complete; look for any errors in the above output " \ 195 | "or in $(BUILDDIR)/linkcheck/output.txt." 196 | 197 | .PHONY: doctest 198 | doctest: 199 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 200 | @echo "Testing of doctests in the sources finished, look at the " \ 201 | "results in $(BUILDDIR)/doctest/output.txt." 202 | 203 | .PHONY: coverage 204 | coverage: 205 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 206 | @echo "Testing of coverage in the sources finished, look at the " \ 207 | "results in $(BUILDDIR)/coverage/python.txt." 208 | 209 | .PHONY: xml 210 | xml: 211 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 212 | @echo 213 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 214 | 215 | .PHONY: pseudoxml 216 | pseudoxml: 217 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 218 | @echo 219 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 220 | 221 | .PHONY: dummy 222 | dummy: 223 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy 224 | @echo 225 | @echo "Build finished. Dummy builder generates no files." 226 | -------------------------------------------------------------------------------- /docs/http_client_ref.md: -------------------------------------------------------------------------------- 1 | HTTP Client Reference 2 | -------------------- 3 | 4 | All commands are accessed through the ``ipfshttpclient.Client`` class. 5 | 6 | ### Exceptions 7 | 8 | ```eval_rst 9 | .. automodule:: ipfshttpclient.exceptions 10 | :members: 11 | ``` 12 | 13 | 14 | ### Utility Functions 15 | 16 | ```eval_rst 17 | .. data:: ipfshttpclient.DEFAULT_ADDR 18 | 19 | The default IPFS API daemon location the client library will attempt to 20 | connect to. By default this will have a value of ``multiaddr.Multiaddr("/dns/localhost/tcp/5001/http")``. 21 | 22 | This may be overwritten on a per-client-instance basis using 23 | the ``addr`` parameter of the :func:`~ipfshttpclient.connect` function. 24 | 25 | .. data:: ipfshttpclient.DEFAULT_BASE 26 | 27 | The default HTTP URL path prefix (or “base”) that the client library will use. 28 | By default this will have a value of ``"api/v0"``. 29 | 30 | This may be overwritten on a per-client-instance basis using the ``base`` 31 | parameter of the :func:`~ipfshttpclient.connect` function. 32 | 33 | .. autofunction:: ipfshttpclient.connect(addr=DEFAULT_ADDR, base=DEFAULT_BASE) 34 | 35 | .. autofunction:: ipfshttpclient.assert_version 36 | 37 | ``` 38 | 39 | ### The API Client 40 | 41 | All methods accept the following parameters in their `kwargs`: 42 | 43 | * **offline** ([**`bool`**](https://docs.python.org/3/library/functions.html#bool)) – Prevent the deamon from communicating with any remote IPFS node while performing the requested action? 44 | * **opts** ([**`dict`**](https://docs.python.org/3/library/stdtypes.html#dict)) – A mapping of custom IPFS API parameters to be sent along with the regular parameters generated by the client library 45 | * Values specified here will always override their respective counterparts 46 | of the client library itself. 47 | * **stream** ([**`bool`**](https://docs.python.org/3/library/functions.html#bool)) – Return results incrementally as they arrive? 48 | * Each method called with `stream=True` will return a generator instead 49 | of the documented value. If the return type is of type `list` then each 50 | item of the given list will be yielded separately; if it is of type 51 | `bytes` then arbitrary bags of bytes will be yielded that together form 52 | a stream; finally, if it is of type `dict` then the single dictonary item 53 | will be yielded once. 54 | * **timeout** ([**`float`**](https://docs.python.org/3/library/functions.html#float)) – The number of seconds to wait of a daemon reply before giving up 55 | 56 | ```eval_rst 57 | .. autoclientclass:: ipfshttpclient.Client 58 | :members: 59 | :inherited-members: 60 | :undoc-members: 61 | 62 | ``` 63 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | Python IPFS HTTP Client's documentation! 2 | ================================ 3 | 4 | Contents 5 | -------- 6 | 7 | * [HTTP Client Reference](http_client_ref.md) 8 | * [Internal HTTP Client Reference](internal_ref.md) 9 | 10 | Indices and tables 11 | ------------------ 12 | 13 | ```eval_rst 14 | * :ref:`genindex` 15 | * :ref:`modindex` 16 | * :ref:`search` 17 | ``` 18 | 19 | -------------------------------------------------------------------------------- /docs/internal_ref.md: -------------------------------------------------------------------------------- 1 | Internal HTTP Client Reference 2 | ---------------------- 3 | 4 | ### `encoding` 5 | 6 | ```eval_rst 7 | .. automodule:: ipfshttpclient.encoding 8 | :members: 9 | :show-inheritance: 10 | 11 | ``` 12 | 13 | ### `http` 14 | 15 | ```eval_rst 16 | .. automodule:: ipfshttpclient.http 17 | :members: 18 | :show-inheritance: 19 | 20 | ``` 21 | 22 | ### `multipart` 23 | 24 | ```eval_rst 25 | .. automodule:: ipfshttpclient.multipart 26 | :members: 27 | :show-inheritance: 28 | 29 | ``` 30 | 31 | ### `utils` 32 | 33 | ```eval_rst 34 | .. automodule:: ipfshttpclient.utils 35 | :members: 36 | :show-inheritance: 37 | 38 | ``` 39 | -------------------------------------------------------------------------------- /docs/publish.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import os 3 | import sys 4 | __dir__ = os.path.dirname(__file__) 5 | sys.path.insert(0, os.path.join(__dir__, "..")) 6 | 7 | import sphinx.cmd.build 8 | import ipfshttpclient 9 | 10 | # Ensure working directory is script directory 11 | os.chdir(__dir__) 12 | 13 | def main(argv=sys.argv[1:], program=sys.argv[0]): 14 | if len(argv) != 1: 15 | print("Usage: {0} [IPNS-key]".format(os.path.basename(program))) 16 | print() 17 | print("!! Continuing without publishing to IPNS !!") 18 | print() 19 | 20 | # Invoke Sphinx like the Makefile does 21 | result = sphinx.cmd.build.build_main(["-b", "html", "-d", "build/doctrees", ".", "build/html"]) 22 | if result != 0: 23 | return result 24 | 25 | print() 26 | print("Exporting files to IPFS…") 27 | client = ipfshttpclient.connect() 28 | hash_docs = client.add("build/html", recursive=True, raw_leaves=True, pin=False)[-1]["Hash"] 29 | hash_main = client.object.new("unixfs-dir")["Hash"] 30 | hash_main = client.object.patch.add_link(hash_main, "docs", hash_docs)["Hash"] 31 | client.pin.add(hash_main) 32 | print("Final IPFS path: /ipfs/{0}".format(hash_main)) 33 | 34 | if len(argv) == 1: 35 | key = argv[0] 36 | print() 37 | print("Exporting files to IPNS…") 38 | name_main = client.name.publish(hash_main, key=key)["Name"] 39 | print("Final IPNS path: /ipns/{0}".format(name_main)) 40 | 41 | print() 42 | print("Run the following commandline on all systems that mirror this documentation:") 43 | print(" ipfs pin add {0} && ipfs name publish -k {1} /ipfs/{0}".format(hash_main, name_main)) 44 | 45 | return 0 46 | 47 | if __name__ == "__main__": 48 | sys.exit(main()) -------------------------------------------------------------------------------- /docs/py-cid.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iosifpeterfi/py-ipfs-http-client/bcec97aa83cf0b0348d8e160c3f68dc8495dbc1b/docs/py-cid.inv -------------------------------------------------------------------------------- /docs/releasing.md: -------------------------------------------------------------------------------- 1 | Since releasing new versions is currently a somewhat complicated task, the current procedure 2 | (17.02.2020) will be outlined in this document. 3 | 4 | All of this has only been tested on Debian 11 (Linux). 5 | 6 | # Prerequirements 7 | 8 | ## Building and updating the project 9 | 10 | ### The `flit` project manager 11 | 12 | APT line: `sudo apt install python3-pip && sudo pip3 install flit` 13 | DNF line: `sudo dnf install python3-flit` 14 | 15 | *Note*: Version `2.0+` of `flit` is required! 16 | 17 | ## Building the documentation 18 | 19 | ### Sphinx & the `recommonmark` preprocessor 20 | 21 | Sphinx is the standard documentation framework for Python. Recommonmark is an extension that allows 22 | Sphinx to process Markdown documentation as if it where reStructuredText. 23 | 24 | 25 | 26 | 27 | At least Sphinx 3.0 with the `sphinx_autodoc_typehints` and reCommonMark 0.5 plugins is required, 28 | so install them using PIP: 29 | 30 | `pip3 install Sphinx~=3.0 sphinx_autodoc_typehints recommonmark~=0.5.0` 31 | 32 | For best results Sphinx should be run with Python 3.8+ or typings will be incomplete. 33 | 34 | ## Hosting Documentation 35 | 36 | **Both of the following need to be on the device that will *host the documentation*, not the one 37 | that will build it**: 38 | 39 | ### The Go IPFS daemon 40 | 41 | Yes, we use IPFS to host our documentation. In case you haven't already you can download it here: 42 | https://ipfs.io/docs/install/ 43 | 44 | ### A dedicated IPNS key for publishing 45 | 46 | For publishing the documentation an IPNS key used only for this task should be 47 | generated if there is no such key already: 48 | 49 | `ipfs key gen --type ed25519 ipfs-http-client` 50 | 51 | This key will need to be copied to all other system if the documentation is to 52 | be published on these as well. 53 | 54 | At the time of writing the officially used key is: *12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF* 55 | 56 | 57 | # Steps when releasing a new version 58 | 59 | ## Update the source code 60 | 61 | 1. Make a GIT commit incrementing the version number in `ipfshttpclient/version.py` and completing the currently open `CHANGELOG.md` entry: 62 | `git commit -m "Release version 0.X.Y" ipfshttpclient/version.py CHANGELOG.md`) 63 | 2. Tag the GIT commit with the version number using an annotated and signed tag: 64 | `git tag --sign -m "Release version 0.X.Y" 0.X.Y` 65 | 3. Push the new version 66 | 67 | ## Upload the new version to PyPI 68 | 69 | Run: `flit build && flit publish` 70 | 71 | ## Re-generate and publish the documentation 72 | 73 | Run: `docs/publish.py ipfs-http-client` (were `ipfs-http-client` is the IPNS key ID) 74 | 75 | The command will also print a commandline that may be used to mirror the generated 76 | documentation on systems other then the current one. 77 | -------------------------------------------------------------------------------- /ipfshttpclient/__init__.py: -------------------------------------------------------------------------------- 1 | """Python IPFS HTTP CLIENT library""" 2 | 3 | 4 | from .version import __version__ 5 | 6 | ################################### 7 | # Import stable HTTP CLIENT parts # 8 | ################################### 9 | from . import exceptions 10 | 11 | from .client import DEFAULT_ADDR, DEFAULT_BASE 12 | from .client import VERSION_MINIMUM, VERSION_MAXIMUM 13 | from .client import Client, assert_version, connect 14 | -------------------------------------------------------------------------------- /ipfshttpclient/client/bitswap.py: -------------------------------------------------------------------------------- 1 | import typing as ty 2 | 3 | from . import base 4 | 5 | 6 | class Section(base.SectionBase): 7 | @base.returns_single_item(base.ResponseBase) 8 | def wantlist(self, peer: ty.Optional[str] = None, **kwargs: base.CommonArgs): 9 | """Returns blocks currently on the bitswap wantlist 10 | 11 | .. code-block:: python 12 | 13 | >>> client.bitswap.wantlist() 14 | {'Keys': [ 15 | 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', 16 | 'QmdCWFLDXqgdWQY9kVubbEHBbkieKd3uo7MtCm7nTZZE9K', 17 | 'QmVQ1XvYGF19X4eJqz1s7FJYJqAxFC4oqh3vWJJEXn66cp' 18 | ]} 19 | 20 | Parameters 21 | ---------- 22 | peer 23 | Peer to show wantlist for 24 | 25 | Returns 26 | ------- 27 | dict 28 | 29 | +------+----------------------------------------------------+ 30 | | Keys | List of blocks the connected daemon is looking for | 31 | +------+----------------------------------------------------+ 32 | """ 33 | args = (peer,) 34 | return self._client.request('/bitswap/wantlist', args, decoder='json', **kwargs) 35 | 36 | 37 | @base.returns_single_item(base.ResponseBase) 38 | def stat(self, **kwargs: base.CommonArgs): 39 | """Returns some diagnostic information from the bitswap agent 40 | 41 | .. code-block:: python 42 | 43 | >>> client.bitswap.stat() 44 | {'BlocksReceived': 96, 45 | 'DupBlksReceived': 73, 46 | 'DupDataReceived': 2560601, 47 | 'ProviderBufLen': 0, 48 | 'Peers': [ 49 | 'QmNZFQRxt9RMNm2VVtuV2Qx7q69bcMWRVXmr5CEkJEgJJP', 50 | 'QmNfCubGpwYZAQxX8LQDsYgB48C4GbfZHuYdexpX9mbNyT', 51 | 'QmNfnZ8SCs3jAtNPc8kf3WJqJqSoX7wsX7VqkLdEYMao4u', 52 | … 53 | ], 54 | 'Wantlist': [ 55 | 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', 56 | 'QmdCWFLDXqgdWQY9kVubbEHBbkieKd3uo7MtCm7nTZZE9K', 57 | 'QmVQ1XvYGF19X4eJqz1s7FJYJqAxFC4oqh3vWJJEXn66cp' 58 | ] 59 | } 60 | 61 | Returns 62 | ------- 63 | dict 64 | Statistics, peers and wanted blocks 65 | """ 66 | return self._client.request('/bitswap/stat', decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/block.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | from .. import multipart 4 | from .. import utils 5 | 6 | 7 | class Section(base.SectionBase): 8 | """Interacting with raw IPFS blocks""" 9 | 10 | def get(self, cid: base.cid_t, **kwargs: base.CommonArgs): 11 | r"""Returns the raw contents of a block 12 | 13 | .. code-block:: python 14 | 15 | >>> client.block.get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') 16 | b'\x121\n"\x12 \xdaW>\x14\xe5\xc1\xf6\xe4\x92\xd1 … \n\x02\x08\x01' 17 | 18 | Parameters 19 | ---------- 20 | cid 21 | The CID of an existing block to get 22 | 23 | Returns 24 | ------- 25 | bytes 26 | Contents of the requested block 27 | """ 28 | args = (str(cid),) 29 | return self._client.request('/block/get', args, **kwargs) 30 | 31 | 32 | @base.returns_single_item(base.ResponseBase) 33 | def put(self, file: utils.clean_file_t, 34 | **kwargs: base.CommonArgs): 35 | """Stores the contents of the given file object as an IPFS block 36 | 37 | .. code-block:: python 38 | 39 | >>> client.block.put(io.BytesIO(b'Mary had a little lamb')) 40 | {'Key': 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', 41 | 'Size': 22} 42 | 43 | Parameters 44 | ---------- 45 | file 46 | The data to be stored as an IPFS block 47 | 48 | Returns 49 | ------- 50 | dict 51 | Information about the new block 52 | 53 | See :meth:`~ipfshttpclient.Client.block.stat` 54 | """ 55 | body, headers = multipart.stream_files(file, chunk_size=self.chunk_size) 56 | return self._client.request('/block/put', decoder='json', data=body, 57 | headers=headers, **kwargs) 58 | 59 | 60 | @base.returns_single_item(base.ResponseBase) 61 | def stat(self, cid: base.cid_t, **kwargs: base.CommonArgs): 62 | """Returns a dict with the size of the block with the given hash. 63 | 64 | .. code-block:: python 65 | 66 | >>> client.block.stat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') 67 | {'Key': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', 68 | 'Size': 258} 69 | 70 | Parameters 71 | ---------- 72 | cid 73 | The CID of an existing block to stat 74 | 75 | Returns 76 | ------- 77 | dict 78 | Information about the requested block 79 | """ 80 | args = (str(cid),) 81 | return self._client.request('/block/stat', args, decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/bootstrap.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class Section(base.SectionBase): 5 | @base.returns_single_item(base.ResponseBase) 6 | def add(self, peer: base.multiaddr_t, *peers: base.multiaddr_t, 7 | **kwargs: base.CommonArgs): 8 | """Adds peers to the bootstrap list 9 | 10 | Parameters 11 | ---------- 12 | peer 13 | IPFS Multiaddr of a peer to add to the list 14 | 15 | Returns 16 | ------- 17 | dict 18 | """ 19 | args = (str(peer), *(str(p) for p in peers)) 20 | return self._client.request('/bootstrap/add', args, decoder='json', **kwargs) 21 | 22 | 23 | @base.returns_single_item(base.ResponseBase) 24 | def list(self, **kwargs: base.CommonArgs): 25 | """Returns the addresses of peers used during initial discovery of the 26 | IPFS network 27 | 28 | Peers are output in the format ``/``. 29 | 30 | .. code-block:: python 31 | 32 | >>> client.bootstrap.list() 33 | {'Peers': [ 34 | '/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYER … uvuJ', 35 | '/ip4/104.236.176.52/tcp/4001/ipfs/QmSoLnSGccFuZQJzRa … ca9z', 36 | '/ip4/104.236.179.241/tcp/4001/ipfs/QmSoLPppuBtQSGwKD … KrGM', 37 | … 38 | '/ip4/178.62.61.185/tcp/4001/ipfs/QmSoLMeWqB7YGVLJN3p … QBU3' 39 | ]} 40 | 41 | Returns 42 | ------- 43 | dict 44 | 45 | +-------+-------------------------------+ 46 | | Peers | List of known bootstrap peers | 47 | +-------+-------------------------------+ 48 | """ 49 | return self._client.request('/bootstrap', decoder='json', **kwargs) 50 | 51 | 52 | @base.returns_single_item(base.ResponseBase) 53 | def rm(self, peer: base.multiaddr_t, *peers: base.multiaddr_t, 54 | **kwargs: base.CommonArgs): 55 | """Removes peers from the bootstrap list 56 | 57 | Parameters 58 | ---------- 59 | peer 60 | IPFS Multiaddr of a peer to remove from the list 61 | 62 | Returns 63 | ------- 64 | dict 65 | """ 66 | args = (str(peer), *(str(p) for p in peers)) 67 | return self._client.request('/bootstrap/rm', args, decoder='json', **kwargs) 68 | -------------------------------------------------------------------------------- /ipfshttpclient/client/config.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | from .. import utils 4 | 5 | 6 | class Section(base.SectionBase): 7 | @base.returns_single_item(base.ResponseBase) 8 | def get(self, **kwargs: base.CommonArgs): 9 | #TODO: Support the optional `key` parameter 10 | """Returns the currently used node configuration 11 | 12 | .. code-block:: python 13 | 14 | >>> config = client.config.get() 15 | >>> config['Addresses'] 16 | {'API': '/ip4/127.0.0.1/tcp/5001', 17 | 'Gateway': '/ip4/127.0.0.1/tcp/8080', 18 | 'Swarm': ['/ip4/0.0.0.0/tcp/4001', '/ip6/::/tcp/4001']}, 19 | >>> config['Discovery'] 20 | {'MDNS': {'Enabled': True, 'Interval': 10}} 21 | 22 | Returns 23 | ------- 24 | dict 25 | The entire IPFS daemon configuration 26 | """ 27 | return self._client.request('/config/show', decoder='json', **kwargs) 28 | 29 | 30 | @base.returns_single_item(base.ResponseBase) 31 | def replace(self, config: utils.json_dict_t, **kwargs: base.CommonArgs): 32 | """Replaces the existing configuration with a new configuration tree 33 | 34 | Make sure to back up the config file first if neccessary, as this 35 | operation can not be undone. 36 | """ 37 | return self._client.request('/config/replace', (config,), decoder='json', **kwargs) 38 | 39 | 40 | @base.returns_single_item(base.ResponseBase) 41 | def set(self, key: str, value: utils.json_value_t = None, **kwargs: base.CommonArgs): 42 | """Adds or replaces a single configuration value 43 | 44 | .. code-block:: python 45 | 46 | >>> client.config.set("Addresses.Gateway") 47 | {'Key': 'Addresses.Gateway', 'Value': '/ip4/127.0.0.1/tcp/8080'} 48 | >>> client.config.set("Addresses.Gateway", "/ip4/127.0.0.1/tcp/8081") 49 | {'Key': 'Addresses.Gateway', 'Value': '/ip4/127.0.0.1/tcp/8081'} 50 | 51 | Parameters 52 | ---------- 53 | key 54 | The key of the configuration entry (e.g. "Addresses.API") 55 | value 56 | The value to set the configuration entry to 57 | 58 | Returns 59 | ------- 60 | dict 61 | 62 | +-------+---------------------------------------------+ 63 | | Key | The requested configuration key | 64 | +-------+---------------------------------------------+ 65 | | Value | The new value of the this configuration key | 66 | +-------+---------------------------------------------+ 67 | """ 68 | args = (key, value) 69 | return self._client.request('/config', args, decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/dag.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | from .. import multipart 4 | from .. import utils 5 | 6 | 7 | class Section(base.SectionBase): 8 | @base.returns_single_item(base.ResponseBase) 9 | def get(self, cid: base.cid_t, **kwargs: base.CommonArgs): 10 | """Retrieves the contents of a DAG node 11 | 12 | .. code-block:: python 13 | 14 | >>> client.dag.get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') 15 | {'Data': '\x08\x01', 16 | 'Links': [ 17 | {'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV', 18 | 'Name': 'Makefile', 'Size': 174}, 19 | {'Hash': 'QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX', 20 | 'Name': 'example', 'Size': 1474}, 21 | {'Hash': 'QmZAL3oHMQYqsV61tGvoAVtQLs1WzRe1zkkamv9qxqnDuK', 22 | 'Name': 'home', 'Size': 3947}, 23 | {'Hash': 'QmZNPyKVriMsZwJSNXeQtVQSNU4v4KEKGUQaMT61LPahso', 24 | 'Name': 'lib', 'Size': 268261}, 25 | {'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY', 26 | 'Name': 'published-version', 'Size': 55} 27 | ]} 28 | 29 | Parameters 30 | ---------- 31 | cid 32 | Key of the object to retrieve, in CID format 33 | 34 | Returns 35 | ------- 36 | dict 37 | Cid with the address of the dag object 38 | """ 39 | args = (str(cid),) 40 | return self._client.request('/dag/get', args, decoder='json', **kwargs) 41 | 42 | @base.returns_single_item(base.ResponseBase) 43 | def put(self, data: utils.clean_file_t, format: str = 'cbor', 44 | input_enc: str = 'json', **kwargs: base.CommonArgs): 45 | """Decodes the given input file as a DAG object and returns their key 46 | 47 | .. code-block:: python 48 | 49 | >>> client.dag.put(io.BytesIO(b''' 50 | ... { 51 | ... "Data": "another", 52 | ... "Links": [ { 53 | ... "Name": "some link", 54 | ... "Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCV … R39V", 55 | ... "Size": 8 56 | ... } ] 57 | ... }''')) 58 | {'Cid': { 59 | '/': 'bafyreifgjgbmtykld2e3yncey3naek5xad3h4m2pxmo3of376qxh54qk34' 60 | } 61 | } 62 | 63 | Parameters 64 | ---------- 65 | data 66 | IO stream object of path to a file containing the data to put 67 | format 68 | Format that the object will be added as. Default: cbor 69 | input_enc 70 | Format that the input object will be. Default: json 71 | 72 | Returns 73 | ------- 74 | dict 75 | Cid with the address of the dag object 76 | """ 77 | opts = {'format': format, 'input-enc': input_enc} 78 | kwargs.setdefault('opts', {}).update(opts) 79 | body, headers = multipart.stream_files(data, chunk_size=self.chunk_size) 80 | return self._client.request('/dag/put', decoder='json', data=body, 81 | headers=headers, **kwargs) 82 | 83 | @base.returns_single_item(base.ResponseBase) 84 | def resolve(self, cid: base.cid_t, **kwargs: base.CommonArgs): 85 | """Resolves a DAG node from its CID, returning its address and remaining path 86 | 87 | .. code-block:: python 88 | 89 | >>> client.dag.resolve('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') 90 | {'Cid': { 91 | '/': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' 92 | } 93 | } 94 | 95 | Parameters 96 | ---------- 97 | cid 98 | Key of the object to resolve, in CID format 99 | 100 | Returns 101 | ------- 102 | dict 103 | Cid with the address of the dag object 104 | """ 105 | args = (str(cid),) 106 | return self._client.request('/dag/resolve', args, decoder='json', **kwargs) 107 | 108 | @base.returns_single_item(base.ResponseBase) 109 | def imprt(self, data: utils.clean_file_t, **kwargs: base.CommonArgs): 110 | """Imports a .car file with a DAG into IPFS 111 | 112 | .. code-block:: python 113 | 114 | >>> with open('data.car', 'rb') as file 115 | ... client.dag.imprt(file) 116 | {'Root': { 117 | 'Cid': { 118 | '/': 'bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya' 119 | } 120 | } 121 | } 122 | 123 | *Note*: This method is named ``.imprt`` (rather than ``.import``) to avoid causing a Python 124 | :exc:`SyntaxError` due to ``import`` being global keyword in Python. 125 | 126 | Parameters 127 | ---------- 128 | data 129 | IO stream object with data that should be imported 130 | 131 | Returns 132 | ------- 133 | dict 134 | Dictionary with the root CID of the DAG imported 135 | """ 136 | body, headers = multipart.stream_files(data, chunk_size=self.chunk_size) 137 | return self._client.request('/dag/import', decoder='json', data=body, 138 | headers=headers, **kwargs) 139 | 140 | def export(self, cid: str, **kwargs: base.CommonArgs): 141 | """Exports a DAG into a .car file format 142 | 143 | .. code-block:: python 144 | 145 | >>> data = client.dag.export('bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya') 146 | 147 | *Note*: When exporting larger DAG structures, remember that you can set the *stream* 148 | parameter to ``True`` on any method to have it return results incrementally. 149 | 150 | Parameters 151 | ---------- 152 | cid 153 | Key of the object to export, in CID format 154 | 155 | Returns 156 | ------- 157 | bytes 158 | DAG in a .car format 159 | """ 160 | args = (str(cid),) 161 | return self._client.request('/dag/export', args, **kwargs) 162 | -------------------------------------------------------------------------------- /ipfshttpclient/client/dht.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | from .. import exceptions 4 | 5 | 6 | class Section(base.SectionBase): 7 | @base.returns_single_item(base.ResponseBase) 8 | def findpeer(self, peer_id: str, *peer_ids: str, **kwargs: base.CommonArgs): 9 | """Queries the DHT for all of the associated multiaddresses 10 | 11 | .. code-block:: python 12 | 13 | >>> client.dht.findpeer("QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZN … MTLZ") 14 | [{'ID': 'QmfVGMFrwW6AV6fTWmD6eocaTybffqAvkVLXQEFrYdk6yc', 15 | 'Extra': '', 'Type': 6, 'Responses': None}, 16 | {'ID': 'QmTKiUdjbRjeN9yPhNhG1X38YNuBdjeiV9JXYWzCAJ4mj5', 17 | 'Extra': '', 'Type': 6, 'Responses': None}, 18 | {'ID': 'QmTGkgHSsULk8p3AKTAqKixxidZQXFyF7mCURcutPqrwjQ', 19 | 'Extra': '', 'Type': 6, 'Responses': None}, 20 | … 21 | {'ID': '', 'Extra': '', 'Type': 2, 22 | 'Responses': [ 23 | {'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', 24 | 'Addrs': [ 25 | '/ip4/10.9.8.1/tcp/4001', 26 | '/ip6/::1/tcp/4001', 27 | '/ip4/164.132.197.107/tcp/4001', 28 | '/ip4/127.0.0.1/tcp/4001']} 29 | ]}] 30 | 31 | Parameters 32 | ---------- 33 | peer_id 34 | The ID of the peer to search for 35 | 36 | Returns 37 | ------- 38 | dict 39 | List of multiaddrs 40 | """ 41 | args = (peer_id,) + peer_ids 42 | return self._client.request('/dht/findpeer', args, decoder='json', **kwargs) 43 | 44 | 45 | @base.returns_multiple_items(base.ResponseBase) 46 | def findprovs(self, cid: base.cid_t, *cids: base.cid_t, **kwargs: base.CommonArgs): 47 | """Finds peers in the DHT that can provide a specific value 48 | 49 | .. code-block:: python 50 | 51 | >>> client.dht.findprovs("QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQu … mpW2") 52 | [{'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', 53 | 'Extra': '', 'Type': 6, 'Responses': None}, 54 | {'ID': 'QmaK6Aj5WXkfnWGoWq7V8pGUYzcHPZp4jKQ5JtmRvSzQGk', 55 | 'Extra': '', 'Type': 6, 'Responses': None}, 56 | {'ID': 'QmdUdLu8dNvr4MVW1iWXxKoQrbG6y1vAVWPdkeGK4xppds', 57 | 'Extra': '', 'Type': 6, 'Responses': None}, 58 | … 59 | {'ID': '', 'Extra': '', 'Type': 4, 'Responses': [ 60 | {'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97Mk … E9Uc', 'Addrs': None} 61 | ]}, 62 | {'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', 63 | 'Extra': '', 'Type': 1, 'Responses': [ 64 | {'ID': 'QmSHXfsmN3ZduwFDjeqBn1C8b1tcLkxK6yd … waXw', 'Addrs': [ 65 | '/ip4/127.0.0.1/tcp/4001', 66 | '/ip4/172.17.0.8/tcp/4001', 67 | '/ip6/::1/tcp/4001', 68 | '/ip4/52.32.109.74/tcp/1028' 69 | ]} 70 | ]}] 71 | 72 | Parameters 73 | ---------- 74 | cid 75 | The DHT key to find providers for 76 | 77 | Returns 78 | ------- 79 | dict 80 | List of provider Peer IDs 81 | """ 82 | args = (str(cid),) + tuple(str(c) for c in cids) 83 | return self._client.request('/dht/findprovs', args, decoder='json', **kwargs) 84 | 85 | 86 | @base.returns_single_item(base.ResponseBase) 87 | def get(self, key: str, *keys: str, **kwargs: base.CommonArgs): 88 | """Queries the DHT for its best value related to given key 89 | 90 | There may be several different values for a given key stored in the 91 | DHT; in this context *best* means the record that is most desirable. 92 | There is no one metric for *best*: it depends entirely on the key type. 93 | For IPNS, *best* is the record that is both valid and has the highest 94 | sequence number (freshest). Different key types may specify other rules 95 | for what they consider to be the *best*. 96 | 97 | Parameters 98 | ---------- 99 | key 100 | One or more keys whose values should be looked up 101 | 102 | Returns 103 | ------- 104 | str 105 | """ 106 | args = (key,) + keys 107 | res = self._client.request('/dht/get', args, decoder='json', **kwargs) 108 | 109 | if isinstance(res, dict) and "Extra" in res: 110 | return res["Extra"] 111 | else: 112 | for r in res: 113 | if "Extra" in r and len(r["Extra"]) > 0: 114 | return r["Extra"] 115 | raise exceptions.Error("empty response from DHT") 116 | 117 | 118 | #TODO: Implement `provide(cid)` 119 | 120 | 121 | @base.returns_multiple_items(base.ResponseBase) 122 | def put(self, key: str, value: str, **kwargs: base.CommonArgs): 123 | """Writes a key/value pair to the DHT 124 | 125 | Given a key of the form ``/foo/bar`` and a value of any form, this will 126 | write that value to the DHT with that key. 127 | 128 | Keys have two parts: a keytype (foo) and the key name (bar). IPNS uses 129 | the ``/ipns/`` keytype, and expects the key name to be a Peer ID. IPNS 130 | entries are formatted with a special strucutre. 131 | 132 | You may only use keytypes that are supported in your ``ipfs`` binary: 133 | ``go-ipfs`` currently only supports the ``/ipns/`` keytype. Unless you 134 | have a relatively deep understanding of the key's internal structure, 135 | you likely want to be using the :meth:`~ipfshttpclient.Client.name_publish` 136 | instead. 137 | 138 | Value is arbitrary text. 139 | 140 | .. code-block:: python 141 | 142 | >>> client.dht.put("QmVgNoP89mzpgEAAqK8owYoDEyB97Mkc … E9Uc", "test123") 143 | [{'ID': 'QmfLy2aqbhU1RqZnGQyqHSovV8tDufLUaPfN1LNtg5CvDZ', 144 | 'Extra': '', 'Type': 5, 'Responses': None}, 145 | {'ID': 'QmZ5qTkNvvZ5eFq9T4dcCEK7kX8L7iysYEpvQmij9vokGE', 146 | 'Extra': '', 'Type': 5, 'Responses': None}, 147 | {'ID': 'QmYqa6QHCbe6eKiiW6YoThU5yBy8c3eQzpiuW22SgVWSB8', 148 | 'Extra': '', 'Type': 6, 'Responses': None}, 149 | … 150 | {'ID': 'QmP6TAKVDCziLmx9NV8QGekwtf7ZMuJnmbeHMjcfoZbRMd', 151 | 'Extra': '', 'Type': 1, 'Responses': []}] 152 | 153 | Parameters 154 | ---------- 155 | key 156 | A unique identifier 157 | value 158 | Abitrary text to associate with the input (2048 bytes or less) 159 | 160 | Returns 161 | ------- 162 | list 163 | """ 164 | args = (key, value) 165 | return self._client.request('/dht/put', args, decoder='json', **kwargs) 166 | 167 | 168 | @base.returns_multiple_items(base.ResponseBase) 169 | def query(self, peer_id: str, *peer_ids: str, **kwargs: base.CommonArgs): 170 | """Finds the closest Peer IDs to a given Peer ID by querying the DHT. 171 | 172 | .. code-block:: python 173 | 174 | >>> client.dht.query("/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDM … uvuJ") 175 | [{'ID': 'QmPkFbxAQ7DeKD5VGSh9HQrdS574pyNzDmxJeGrRJxoucF', 176 | 'Extra': '', 'Type': 2, 'Responses': None}, 177 | {'ID': 'QmR1MhHVLJSLt9ZthsNNhudb1ny1WdhY4FPW21ZYFWec4f', 178 | 'Extra': '', 'Type': 2, 'Responses': None}, 179 | {'ID': 'Qmcwx1K5aVme45ab6NYWb52K2TFBeABgCLccC7ntUeDsAs', 180 | 'Extra': '', 'Type': 2, 'Responses': None}, 181 | … 182 | {'ID': 'QmYYy8L3YD1nsF4xtt4xmsc14yqvAAnKksjo3F3iZs5jPv', 183 | 'Extra': '', 'Type': 1, 'Responses': []}] 184 | 185 | Parameters 186 | ---------- 187 | peer_id 188 | The peerID to run the query against 189 | 190 | Returns 191 | ------- 192 | dict 193 | List of peers IDs 194 | """ 195 | args = (peer_id,) + peer_ids 196 | return self._client.request('/dht/query', args, decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/key.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class Section(base.SectionBase): 5 | #TODO: Add `export(name, password)` 6 | 7 | 8 | @base.returns_single_item(base.ResponseBase) 9 | def gen(self, key_name: str, type: str, size: int = 2048, 10 | **kwargs: base.CommonArgs): 11 | """Adds a new public key that can be used for 12 | :meth:`~ipfshttpclient.Client.name.publish` 13 | 14 | .. code-block:: python 15 | 16 | >>> client.key.gen('example_key_name') 17 | {'Name': 'example_key_name', 18 | 'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'} 19 | 20 | Parameters 21 | ---------- 22 | key_name 23 | Name of the new Key to be generated. Used to reference the Keys. 24 | type 25 | Type of key to generate. The current possible keys types are: 26 | 27 | * ``"rsa"`` 28 | * ``"ed25519"`` 29 | size 30 | Bitsize of key to generate 31 | 32 | Returns 33 | ------- 34 | dict 35 | 36 | +------+---------------------------------------------------+ 37 | | Name | The name of the newly generated key | 38 | +------+---------------------------------------------------+ 39 | | Id | The key ID/fingerprint of the newly generated key | 40 | +------+---------------------------------------------------+ 41 | """ 42 | 43 | opts = {"type": type, "size": size} 44 | kwargs.setdefault("opts", {}).update(opts) 45 | args = (key_name,) 46 | 47 | return self._client.request('/key/gen', args, decoder='json', **kwargs) 48 | 49 | 50 | #TODO: Add `import(name, pam, password)` 51 | 52 | 53 | @base.returns_single_item(base.ResponseBase) 54 | def list(self, **kwargs: base.CommonArgs): 55 | """Returns a list of all available IPNS keys 56 | 57 | .. code-block:: python 58 | 59 | >>> client.key.list() 60 | {'Keys': [ 61 | {'Name': 'self', 62 | 'Id': 'QmQf22bZar3WKmojipms22PkXH1MZGmvsqzQtuSvQE3uhm'}, 63 | {'Name': 'example_key_name', 64 | 'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'} 65 | ]} 66 | 67 | Returns 68 | ------- 69 | dict 70 | 71 | +------+--------------------------------------------------------+ 72 | | Keys | List of dictionaries with Names and Ids of public keys | 73 | +------+--------------------------------------------------------+ 74 | """ 75 | return self._client.request('/key/list', decoder='json', **kwargs) 76 | 77 | 78 | @base.returns_single_item(base.ResponseBase) 79 | def rename(self, key_name: str, new_key_name: str, **kwargs: base.CommonArgs): 80 | """Rename an existing key 81 | 82 | .. code-block:: python 83 | 84 | >>> client.key.rename("bla", "personal") 85 | {"Was": "bla", 86 | "Now": "personal", 87 | "Id": "QmeyrRNxXaasZaoDXcCZgryoBCga9shaHQ4suHAYXbNZF3", 88 | "Overwrite": False} 89 | 90 | Parameters 91 | ---------- 92 | key_name 93 | Current name of the key to rename 94 | new_key_name 95 | New name of the key 96 | 97 | Returns 98 | ------- 99 | dict 100 | Information about the key renameal 101 | """ 102 | args = (key_name, new_key_name) 103 | return self._client.request( 104 | '/key/rename', args, decoder='json', **kwargs 105 | ) 106 | 107 | 108 | @base.returns_single_item(base.ResponseBase) 109 | def rm(self, key_name: str, *key_names: str, **kwargs: base.CommonArgs): 110 | """Removes one or more keys 111 | 112 | .. code-block:: python 113 | 114 | >>> client.key.rm("bla") 115 | {"Keys": [ 116 | {"Name": "bla", 117 | "Id": "QmfJpR6paB6h891y7SYXGe6gapyNgepBeAYMbyejWA4FWA"} 118 | ]} 119 | 120 | Parameters 121 | ---------- 122 | key_name 123 | Name of the key(s) to remove. 124 | 125 | Returns 126 | ------- 127 | dict 128 | 129 | +------+--------------------------------------------------+ 130 | | Keys | List of key names and IDs that have been removed | 131 | +------+--------------------------------------------------+ 132 | """ 133 | args = (key_name,) + key_names 134 | return self._client.request('/key/rm', args, decoder='json', **kwargs) 135 | -------------------------------------------------------------------------------- /ipfshttpclient/client/miscellaneous.py: -------------------------------------------------------------------------------- 1 | import typing as ty 2 | 3 | from . import base 4 | 5 | from .. import exceptions 6 | 7 | 8 | class Base(base.ClientBase): 9 | @base.returns_single_item(base.ResponseBase) 10 | def dns(self, domain_name: str, recursive: bool = False, 11 | **kwargs: base.CommonArgs): 12 | """Resolves DNS links to their referenced dweb-path 13 | 14 | CIDs are hard to remember, but domain names are usually easy to 15 | remember. To create memorable aliases for CIDs, DNS TXT records 16 | can point to other DNS links, IPFS objects, IPNS keys, etc. 17 | This command resolves those links to the referenced object. 18 | 19 | For example, with this DNS TXT record:: 20 | 21 | >>> import dns.resolver 22 | >>> a = dns.resolver.query("ipfs.io", "TXT") 23 | >>> a.response.answer[0].items[0].to_text() 24 | '"dnslink=/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n"' 25 | 26 | The resolver will give:: 27 | 28 | >>> client.dns("ipfs.io") 29 | {'Path': '/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n'} 30 | 31 | Parameters 32 | ---------- 33 | domain_name 34 | The domain-name name to resolve 35 | recursive 36 | Resolve until the name is not a DNS link 37 | 38 | Returns 39 | ------- 40 | dict 41 | 42 | +------+-------------------------------------+ 43 | | Path | Resource were a DNS entry points to | 44 | +------+-------------------------------------+ 45 | """ 46 | kwargs.setdefault("opts", {})["recursive"] = recursive 47 | 48 | args = (domain_name,) 49 | return self._client.request('/dns', args, decoder='json', **kwargs) 50 | 51 | 52 | @base.returns_single_item(base.ResponseBase) 53 | def id(self, peer: ty.Optional[str] = None, **kwargs: base.CommonArgs): 54 | """Returns general information of an IPFS Node 55 | 56 | Returns the PublicKey, ProtocolVersion, ID, AgentVersion and 57 | Addresses of the connected daemon or some other node. 58 | 59 | .. code-block:: python 60 | 61 | >>> client.id() 62 | {'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97MkcGvoWZir8otE9Uc', 63 | 'PublicKey': 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggE … BAAE=', 64 | 'AgentVersion': 'go-libp2p/3.3.4', 65 | 'ProtocolVersion': 'ipfs/0.1.0', 66 | 'Addresses': [ 67 | '/ip4/127.0.0.1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owYo … E9Uc', 68 | '/ip4/10.1.0.172/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owY … E9Uc', 69 | '/ip4/172.18.0.1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owY … E9Uc', 70 | '/ip6/::1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owYoDEyB97 … E9Uc', 71 | '/ip6/fccc:7904:b05b:a579:957b:deef:f066:cad9/tcp/400 … E9Uc', 72 | '/ip6/fd56:1966:efd8::212/tcp/4001/ipfs/QmVgNoP89mzpg … E9Uc', 73 | '/ip6/fd56:1966:efd8:0:def1:34d0:773:48f/tcp/4001/ipf … E9Uc', 74 | '/ip6/2001:db8:1::1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8 … E9Uc', 75 | '/ip4/77.116.233.54/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8 … E9Uc', 76 | '/ip4/77.116.233.54/tcp/10842/ipfs/QmVgNoP89mzpgEAAqK … E9Uc']} 77 | 78 | Parameters 79 | ---------- 80 | peer 81 | Peer.ID of the node to look up (local node if ``None``) 82 | 83 | Returns 84 | ------- 85 | dict 86 | Information about the IPFS node 87 | """ 88 | args = (peer,) if peer is not None else () 89 | return self._client.request('/id', args, decoder='json', **kwargs) 90 | 91 | 92 | #TODO: isOnline() 93 | 94 | 95 | @base.returns_multiple_items(base.ResponseBase) 96 | def ping(self, peer: str, *peers: str, count: int = 10, 97 | **kwargs: base.CommonArgs): 98 | """Provides round-trip latency information for the routing system. 99 | 100 | Finds nodes via the routing system, sends pings, waits for pongs, 101 | and prints out round-trip latency information. 102 | 103 | .. code-block:: python 104 | 105 | >>> client.ping("QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n") 106 | [{'Success': True, 'Time': 0, 107 | 'Text': 'Looking up peer QmTzQ1JRkWErjk39mryYw2WVaphAZN … c15n'}, 108 | {'Success': False, 'Time': 0, 109 | 'Text': 'Peer lookup error: routing: not found'}] 110 | 111 | .. hint:: 112 | 113 | Pass ``stream=True`` to receive ping progress reports as they 114 | arrive. 115 | 116 | Parameters 117 | ---------- 118 | peer 119 | ID of peer(s) to be pinged 120 | count 121 | Number of ping messages to send 122 | 123 | Returns 124 | ------- 125 | list 126 | Progress reports from the ping 127 | """ 128 | kwargs.setdefault("opts", {})["count"] = count 129 | 130 | args = (peer,) + peers 131 | return self._client.request('/ping', args, decoder='json', **kwargs) 132 | 133 | 134 | @base.returns_single_item(base.ResponseBase) 135 | def resolve(self, path: str, recursive: bool = False, 136 | **kwargs: base.CommonArgs): 137 | """Resolves an dweb-path and return the path of the referenced item 138 | 139 | There are a number of mutable name protocols that can link among 140 | themselves and into IPNS. For example IPNS references can (currently) 141 | point at an IPFS object, and DNS links can point at other DNS links, 142 | IPNS entries, or IPFS objects. This command accepts any of these 143 | identifiers. 144 | 145 | .. code-block:: python 146 | 147 | >>> client.resolve("/ipfs/QmTkzDwWqPbnAh5YiV5VwcTLnGdw … ca7D/Makefile") 148 | {'Path': '/ipfs/Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV'} 149 | >>> client.resolve("/ipns/ipfs.io") 150 | {'Path': '/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n'} 151 | 152 | Parameters 153 | ---------- 154 | path 155 | The name to resolve 156 | recursive 157 | Resolve until the result is an IPFS name 158 | 159 | Returns 160 | ------- 161 | dict 162 | 163 | +------+-------------------------------------+ 164 | | Path | IPFS path of the requested resource | 165 | +------+-------------------------------------+ 166 | """ 167 | kwargs.setdefault("opts", {})["recursive"] = recursive 168 | 169 | args = (path,) 170 | return self._client.request('/resolve', args, decoder='json', **kwargs) 171 | 172 | 173 | @base.returns_no_item 174 | def stop(self): 175 | """Stops the connected IPFS daemon instance 176 | 177 | Sending any further requests after this will fail with 178 | :class:`~ipfshttpclient.exceptions.ConnectionError`, unless you start 179 | another IPFS daemon instance at the same address. 180 | """ 181 | try: 182 | self._client.request('/shutdown') 183 | except exceptions.ConnectionError: 184 | # Sometimes the daemon kills the connection before sending a 185 | # response causing an incorrect `ConnectionError` to bubble 186 | pass 187 | 188 | 189 | @base.returns_single_item(base.ResponseBase) 190 | def version(self, **kwargs: base.CommonArgs): 191 | """Returns the software versions of the currently connected node 192 | 193 | .. code-block:: python 194 | 195 | >>> client.version() 196 | {'Version': '0.4.3-rc2', 'Repo': '4', 'Commit': '', 197 | 'System': 'amd64/linux', 'Golang': 'go1.6.2'} 198 | 199 | Returns 200 | ------- 201 | dict 202 | Daemon and system version information 203 | """ 204 | return self._client.request('/version', decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/name.py: -------------------------------------------------------------------------------- 1 | import typing as ty 2 | 3 | from . import base 4 | 5 | 6 | class Section(base.SectionBase): 7 | @base.returns_single_item(base.ResponseBase) 8 | def publish(self, ipfs_path: str, 9 | resolve: bool = True, lifetime: ty.Union[str, int] = "24h", 10 | ttl: ty.Union[str, int] = None, key: str = None, 11 | allow_offline: bool = False, **kwargs: base.CommonArgs): 12 | """Publishes an object to IPNS 13 | 14 | IPNS is a PKI namespace, where names are the hashes of public keys, and 15 | the private key enables publishing new (signed) values. In publish, the 16 | default value of *name* is your own identity public key. 17 | 18 | .. code-block:: python 19 | 20 | >>> client.name.publish('/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZK … GZ5d') 21 | {'Value': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d', 22 | 'Name': 'QmVgNoP89mzpgEAAqK8owYoDEyB97MkcGvoWZir8otE9Uc'} 23 | 24 | Parameters 25 | ---------- 26 | ipfs_path 27 | IPFS path of the object to be published 28 | allow_offline 29 | When offline, save the IPNS record to the the local 30 | datastore without broadcasting to the network instead 31 | of simply failing. 32 | lifetime 33 | Time duration that the record will be valid for 34 | 35 | Accepts durations such as ``"300s"``, ``"1.5h"`` or ``"2h45m"``. 36 | Valid units are: 37 | 38 | * ``"ns"`` 39 | * ``"us"`` (or ``"µs"``) 40 | * ``"ms"`` 41 | * ``"s"`` 42 | * ``"m"`` 43 | * ``"h"`` 44 | resolve 45 | Resolve given path before publishing 46 | ttl 47 | Time duration this record should be cached for. 48 | Same syntax like 'lifetime' option. (experimental feature) 49 | key 50 | Name of the key to be used, as listed by 'ipfs key list'. 51 | 52 | Returns 53 | ------- 54 | dict 55 | 56 | +-------+----------------------------------------------------------+ 57 | | Name | Key ID of the key to which the given value was published | 58 | +-------+----------------------------------------------------------+ 59 | | Value | Value that was published | 60 | +-------+----------------------------------------------------------+ 61 | """ 62 | opts = {"lifetime": str(lifetime), 63 | "resolve": resolve, 64 | "allow-offline": allow_offline} 65 | if ttl: 66 | opts["ttl"] = str(ttl) 67 | if key: 68 | opts["key"] = key 69 | kwargs.setdefault("opts", {}).update(opts) 70 | 71 | args = (ipfs_path,) 72 | return self._client.request('/name/publish', args, decoder='json', **kwargs) 73 | 74 | 75 | @base.returns_single_item(base.ResponseBase) 76 | def resolve(self, name: str = None, recursive: bool = False, 77 | nocache: bool = False, dht_record_count: ty.Optional[int] = None, 78 | dht_timeout: ty.Optional[ty.Union[str, int]] = None, 79 | **kwargs: base.CommonArgs): 80 | """Retrieves the value currently published at the given IPNS name 81 | 82 | IPNS is a PKI namespace, where names are the hashes of public keys, and 83 | the private key enables publishing new (signed) values. In resolve, the 84 | default value of ``name`` is your own identity public key. 85 | 86 | .. code-block:: python 87 | 88 | >>> client.name.resolve() 89 | {'Path': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d'} 90 | 91 | Parameters 92 | ---------- 93 | name 94 | The IPNS name to resolve (defaults to the connected node) 95 | recursive 96 | Resolve until the result is not an IPFS name (default: false) 97 | nocache 98 | Do not use cached entries (default: false) 99 | dht_record_count 100 | Number of records to request for DHT resolution. 101 | dht_timeout 102 | Maximum time to collect values during DHT resolution, e.g. "30s". 103 | 104 | For the exact syntax see the ``lifetime`` argument on 105 | :meth:`~ipfshttpclient.Client.name.publish`. Set this parameter to 106 | ``0`` to disable the timeout. 107 | 108 | Returns 109 | ------- 110 | dict 111 | 112 | +------+--------------------------------------+ 113 | | Path | The resolved value of the given name | 114 | +------+--------------------------------------+ 115 | """ 116 | opts = {"recursive": recursive, "nocache": nocache} 117 | if dht_record_count is not None: 118 | opts["dht-record-count"] = str(dht_record_count) 119 | if dht_timeout is not None: 120 | opts["dht-timeout"] = str(dht_timeout) 121 | 122 | kwargs.setdefault("opts", {}).update(opts) 123 | args = (name,) if name is not None else () 124 | return self._client.request('/name/resolve', args, decoder='json', **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/pin.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class Section(base.SectionBase): 5 | @base.returns_single_item(base.ResponseBase) 6 | def add(self, path: base.cid_t, *paths: base.cid_t, recursive: bool = True, 7 | **kwargs: base.CommonArgs): 8 | """Pins objects to the node's local repository 9 | 10 | Stores an IPFS object(s) from a given path in the local repository. 11 | 12 | .. code-block:: python 13 | 14 | >>> client.pin.add("QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d") 15 | {'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']} 16 | 17 | Parameters 18 | ---------- 19 | path 20 | Path to object(s) to be pinned 21 | recursive 22 | Recursively unpin the object linked to by the specified object(s) 23 | 24 | Returns 25 | ------- 26 | dict 27 | 28 | +------+-----------------------------------------------------------+ 29 | | Pins | List of IPFS objects that have been pinned by this action | 30 | +------+-----------------------------------------------------------+ 31 | """ 32 | kwargs.setdefault("opts", {})["recursive"] = recursive 33 | 34 | args = (str(path), *(str(p) for p in paths)) 35 | return self._client.request('/pin/add', args, decoder='json', **kwargs) 36 | 37 | 38 | @base.returns_single_item(base.ResponseBase) 39 | def ls(self, *paths: base.cid_t, type: str = "all", **kwargs: base.CommonArgs): 40 | """Lists objects pinned in the local repository 41 | 42 | By default, all pinned objects are returned, but the ``type`` flag or 43 | arguments can restrict that to a specific pin type or to some specific 44 | objects respectively. In particular the ``type="recursive"`` argument will 45 | only list objects added ``.pin.add(…)`` (or similar) and will greatly 46 | speed processing as obtaining this list does *not* require a complete 47 | repository metadata scan. 48 | 49 | .. code-block:: python 50 | 51 | >>> client.pin.ls() 52 | {'Keys': { 53 | 'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'}, 54 | 'QmNPZUCeSN5458Uwny8mXSWubjjr6J … kP5e': {'Type': 'recursive'}, 55 | 'QmNg5zWpRMxzRAVg7FTQ3tUxVbKj8E … gHPz': {'Type': 'indirect'}, 56 | … 57 | 'QmNiuVapnYCrLjxyweHeuk6Xdqfvts … wCCe': {'Type': 'indirect'} 58 | }} 59 | 60 | >>> # While the above works you should always try to use `type="recursive"` 61 | >>> # instead as it will greatly speed up processing and only lists 62 | >>> # explicit pins (added with `.pin.add(…)` or similar), rather than 63 | >>> # than all objects that won't be removed as part of `.repo.gc()`: 64 | >>> client.pin.ls(type="recursive") 65 | {'Keys': { 66 | 'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'}, 67 | 'QmNPZUCeSN5458Uwny8mXSWubjjr6J … kP5e': {'Type': 'recursive'}, 68 | … 69 | }} 70 | 71 | >>> client.pin.ls('/ipfs/QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz') 72 | {'Keys': { 73 | 'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'}}} 74 | 75 | >>> client.pin.ls('/ipfs/QmdBCSn4UJP82MjhRVwpABww48tXL3 … mA6z') 76 | ipfshttpclient.exceptions.ErrorResponse: 77 | path '/ipfs/QmdBCSn4UJP82MjhRVwpABww48tXL3 … mA6z' is not pinned 78 | 79 | Parameters 80 | ---------- 81 | paths 82 | The IPFS paths or CIDs to search for 83 | 84 | If none are passed, return information about all pinned objects. 85 | If any of the passed CIDs is not pinned, then remote will 86 | return an error and an :exc:`ErrorResponse` exception will be raised. 87 | type 88 | The type of pinned keys to list. Can be: 89 | 90 | * ``"direct"`` 91 | * ``"indirect"`` 92 | * ``"recursive"`` 93 | * ``"all"`` 94 | 95 | Raises 96 | ------ 97 | ~ipfsapi.exceptions.ErrorResponse 98 | Remote returned an error. Remote will return an error 99 | if any of the passed CIDs is not pinned. In this case, 100 | the exception will contain 'not pinned' in its args[0]. 101 | 102 | Returns 103 | ------- 104 | dict 105 | 106 | +------+--------------------------------------------------------------+ 107 | | Keys | Mapping of IPFS object names currently pinned to their types | 108 | +------+--------------------------------------------------------------+ 109 | """ 110 | kwargs.setdefault("opts", {})["type"] = type 111 | 112 | args = tuple(str(p) for p in paths) 113 | return self._client.request('/pin/ls', args, decoder='json', **kwargs) 114 | 115 | 116 | @base.returns_single_item(base.ResponseBase) 117 | def rm(self, path: base.cid_t, *paths: base.cid_t, recursive: bool = True, 118 | **kwargs: base.CommonArgs): 119 | """Removes a pinned object from local storage 120 | 121 | Removes the pin from the given object allowing it to be garbage 122 | collected if needed. That is, depending on the node configuration 123 | it may not be garbage anytime soon or at all unless you manually 124 | clean up the local repository using :meth:`~ipfshttpclient.repo.gc`. 125 | 126 | Also note that an object is pinned both directly (that is its type 127 | is ``"recursive"``) and indirectly (meaning that it is referenced 128 | by another object that is still pinned) it may not be removed at all 129 | after this. 130 | 131 | .. code-block:: python 132 | 133 | >>> client.pin.rm('QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d') 134 | {'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']} 135 | 136 | Parameters 137 | ---------- 138 | path 139 | Path to object(s) to be unpinned 140 | recursive 141 | Recursively unpin the object linked to by the specified object(s) 142 | 143 | Returns 144 | ------- 145 | dict 146 | 147 | +------+-------------------------------------------------------------+ 148 | | Pins | List of IPFS objects that have been unpinned by this action | 149 | +------+-------------------------------------------------------------+ 150 | """ 151 | kwargs.setdefault("opts", {})["recursive"] = recursive 152 | 153 | args = (str(path), *(str(p) for p in paths)) 154 | return self._client.request('/pin/rm', args, decoder='json', **kwargs) 155 | 156 | 157 | @base.returns_single_item(base.ResponseBase) 158 | def update(self, from_path: base.cid_t, to_path: base.cid_t, *, 159 | unpin: bool = True, **kwargs: base.CommonArgs): 160 | """Replaces one pin with another 161 | 162 | Updates one pin to another, making sure that all objects in the new pin 163 | are local. Then removes the old pin. This is an optimized version of 164 | using first using :meth:`~ipfshttpclient.Client.pin.add` to add a new pin 165 | for an object and then using :meth:`~ipfshttpclient.Client.pin.rm` to remove 166 | the pin for the old object. 167 | 168 | .. code-block:: python 169 | 170 | >>> client.pin.update("QmXMqez83NU77ifmcPs5CkNRTMQksBLkyfBf4H5g1NZ52P", 171 | ... "QmUykHAi1aSjMzHw3KmBoJjqRUQYNkFXm8K1y7ZsJxpfPH") 172 | {"Pins": ["/ipfs/QmXMqez83NU77ifmcPs5CkNRTMQksBLkyfBf4H5g1NZ52P", 173 | "/ipfs/QmUykHAi1aSjMzHw3KmBoJjqRUQYNkFXm8K1y7ZsJxpfPH"]} 174 | 175 | Parameters 176 | ---------- 177 | from_path 178 | Path to the old object 179 | to_path 180 | Path to the new object to be pinned 181 | unpin 182 | Should the pin of the old object be removed? 183 | 184 | Returns 185 | ------- 186 | dict 187 | 188 | +------+-------------------------------------------------------------+ 189 | | Pins | List of IPFS objects that have been affected by this action | 190 | +------+-------------------------------------------------------------+ 191 | """ 192 | kwargs.setdefault("opts", {})["unpin"] = unpin 193 | 194 | args = (str(from_path), str(to_path)) 195 | return self._client.request('/pin/update', args, decoder='json', **kwargs) 196 | 197 | 198 | @base.returns_multiple_items(base.ResponseBase, stream=True) 199 | def verify(self, path: base.cid_t, *paths: base.cid_t, verbose: bool = False, 200 | **kwargs: base.CommonArgs): 201 | """Verifies that all recursive pins are completely available in the local 202 | repository 203 | 204 | Scan the repo for pinned object graphs and check their integrity. 205 | Issues will be reported back with a helpful human-readable error 206 | message to aid in error recovery. This is useful to help recover 207 | from datastore corruptions (such as when accidentally deleting 208 | files added using the filestore backend). 209 | 210 | This function returns an iterator has to be exhausted or closed 211 | using either a context manager (``with``-statement) or its 212 | ``.close()`` method. 213 | 214 | .. code-block:: python 215 | 216 | >>> with client.pin.verify("QmN…TTZ", verbose=True) as pin_verify_iter: 217 | ... for item in pin_verify_iter: 218 | ... print(item) 219 | ... 220 | {"Cid":"QmVkNdzCBukBRdpyFiKPyL2R15qPExMr9rV9RFV2kf9eeV","Ok":True} 221 | {"Cid":"QmbPzQruAEFjUU3gQfupns6b8USr8VrD9H71GrqGDXQSxm","Ok":True} 222 | {"Cid":"Qmcns1nUvbeWiecdGDPw8JxWeUfxCV8JKhTfgzs3F8JM4P","Ok":True} 223 | … 224 | 225 | Parameters 226 | ---------- 227 | path 228 | Path to object(s) to be checked 229 | verbose 230 | Also report status of items that were OK? 231 | 232 | Returns 233 | ------- 234 | Iterable[dict] 235 | 236 | +-----+----------------------------------------------------+ 237 | | Cid | IPFS object ID checked | 238 | +-----+----------------------------------------------------+ 239 | | Ok | Whether the given object was successfully verified | 240 | +-----+----------------------------------------------------+ 241 | """ 242 | kwargs.setdefault("opts", {})["verbose"] = verbose 243 | 244 | args = (str(path), *(str(p) for p in paths)) 245 | return self._client.request('/pin/verify', args, decoder='json', stream=True, **kwargs) -------------------------------------------------------------------------------- /ipfshttpclient/client/pubsub.py: -------------------------------------------------------------------------------- 1 | import typing as ty 2 | 3 | from . import base 4 | 5 | 6 | class SubChannel: 7 | """Wrapper for a pubsub subscription object that allows for easy 8 | closing of subscriptions. 9 | """ 10 | def __init__(self, sub): 11 | self.__sub = sub # type: str 12 | 13 | def read_message(self): 14 | return next(self.__sub) 15 | 16 | def __iter__(self): 17 | return self.__sub 18 | 19 | def close(self): 20 | self.__sub.close() 21 | 22 | def __enter__(self): 23 | return self 24 | 25 | def __exit__(self, *a): 26 | self.close() 27 | 28 | 29 | class Section(base.SectionBase): 30 | @base.returns_single_item(base.ResponseBase) 31 | def ls(self, **kwargs: base.CommonArgs): 32 | """Lists subscribed topics by name 33 | 34 | This method returns data that contains a list of 35 | all topics the user is subscribed to. In order 36 | to subscribe to a topic ``pubsub.sub`` must be called. 37 | 38 | .. code-block:: python 39 | 40 | # subscribe to a channel 41 | >>> with client.pubsub.sub("hello") as sub: 42 | ... client.pubsub.ls() 43 | { 44 | 'Strings' : ["hello"] 45 | } 46 | 47 | Returns 48 | ------- 49 | dict 50 | 51 | +---------+-------------------------------------------------+ 52 | | Strings | List of topic the IPFS daemon is subscribbed to | 53 | +---------+-------------------------------------------------+ 54 | """ 55 | return self._client.request('/pubsub/ls', decoder='json', **kwargs) 56 | 57 | 58 | @base.returns_single_item(base.ResponseBase) 59 | def peers(self, topic: ty.Optional[str] = None, **kwargs: base.CommonArgs): 60 | """Lists the peers we are pubsubbing with 61 | 62 | Lists the IDs of other IPFS users who we 63 | are connected to via some topic. Without specifying 64 | a topic, IPFS peers from all subscribed topics 65 | will be returned in the data. If a topic is specified 66 | only the IPFS id's of the peers from the specified 67 | topic will be returned in the data. 68 | 69 | .. code-block:: python 70 | 71 | >>> client.pubsub.peers() 72 | {'Strings': 73 | [ 74 | 'QmPbZ3SDgmTNEB1gNSE9DEf4xT8eag3AFn5uo7X39TbZM8', 75 | 'QmQKiXYzoFpiGZ93DaFBFDMDWDJCRjXDARu4wne2PRtSgA', 76 | ... 77 | 'QmepgFW7BHEtU4pZJdxaNiv75mKLLRQnPi1KaaXmQN4V1a' 78 | ] 79 | } 80 | 81 | ## with a topic 82 | 83 | # subscribe to a channel 84 | >>> with client.pubsub.sub('hello') as sub: 85 | ... client.pubsub.peers(topic='hello') 86 | {'String': 87 | [ 88 | 'QmPbZ3SDgmTNEB1gNSE9DEf4xT8eag3AFn5uo7X39TbZM8', 89 | ... 90 | # other peers connected to the same channel 91 | ] 92 | } 93 | 94 | Parameters 95 | ---------- 96 | topic 97 | The topic to list connected peers of 98 | (defaults to None which lists peers for all topics) 99 | 100 | Returns 101 | ------- 102 | dict 103 | 104 | +---------+-------------------------------------------------+ 105 | | Strings | List of PeerIDs of peers we are pubsubbing with | 106 | +---------+-------------------------------------------------+ 107 | """ 108 | args = (topic,) if topic is not None else () 109 | return self._client.request('/pubsub/peers', args, decoder='json', **kwargs) 110 | 111 | 112 | @base.returns_no_item 113 | def publish(self, topic: str, payload: str, **kwargs: base.CommonArgs): 114 | """Publish a message to a given pubsub topic 115 | 116 | Publishing will publish the given payload (string) to 117 | everyone currently subscribed to the given topic. 118 | 119 | All data (including the ID of the publisher) is automatically 120 | base64 encoded when published. 121 | 122 | .. code-block:: python 123 | 124 | # publishes the message 'message' to the topic 'hello' 125 | >>> client.pubsub.publish('hello', 'message') 126 | [] 127 | 128 | Parameters 129 | ---------- 130 | topic 131 | Topic to publish to 132 | payload 133 | Data to be published to the given topic 134 | 135 | Returns 136 | ------- 137 | list 138 | An empty list 139 | """ 140 | args = (topic, payload) 141 | return self._client.request('/pubsub/pub', args, decoder='json', **kwargs) 142 | 143 | 144 | def subscribe(self, topic: str, discover: bool = False, **kwargs: base.CommonArgs): 145 | """Subscribes to mesages on a given topic 146 | 147 | Subscribing to a topic in IPFS means anytime 148 | a message is published to a topic, the subscribers 149 | will be notified of the publication. 150 | 151 | The connection with the pubsub topic is opened and read. 152 | The Subscription returned should be used inside a context 153 | manager to ensure that it is closed properly and not left 154 | hanging. 155 | 156 | .. code-block:: python 157 | 158 | >>> sub = client.pubsub.subscribe('testing') 159 | >>> with client.pubsub.subscribe('testing') as sub: 160 | ... # publish a message 'hello' to the topic 'testing' 161 | ... client.pubsub.publish('testing', 'hello') 162 | ... for message in sub: 163 | ... print(message) 164 | ... # Stop reading the subscription after 165 | ... # we receive one publication 166 | ... break 167 | {'from': '', 168 | 'data': 'aGVsbG8=', 169 | 'topicIDs': ['testing']} 170 | 171 | # NOTE: in order to receive published data 172 | # you must already be subscribed to the topic at publication 173 | # time. 174 | 175 | Parameters 176 | ---------- 177 | topic 178 | Name of a topic to subscribe to 179 | 180 | discover 181 | Try to discover other peers subscibed to the same topic 182 | (defaults to False) 183 | 184 | Returns 185 | ------- 186 | :class:`SubChannel` 187 | Generator wrapped in a context manager that maintains a 188 | connection stream to the given topic. 189 | """ 190 | args = (topic, discover) 191 | return SubChannel(self._client.request('/pubsub/sub', args, stream=True, decoder='json')) -------------------------------------------------------------------------------- /ipfshttpclient/client/repo.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | from . import base 4 | 5 | 6 | class Section(base.SectionBase): 7 | @base.returns_multiple_items(base.ResponseBase) 8 | def gc(self, *, quiet: bool = False, return_result: bool = True, **kwargs: base.CommonArgs): 9 | """Removes stored objects that are not pinned from the repo 10 | 11 | .. code-block:: python 12 | 13 | >>> client.repo.gc() 14 | [{'Key': 'QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQuwaHG2mpW2'}, 15 | {'Key': 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k'}, 16 | {'Key': 'QmRVBnxUCsD57ic5FksKYadtyUbMsyo9KYQKKELajqAp4q'}, 17 | … 18 | {'Key': 'QmYp4TeCurXrhsxnzt5wqLqqUz8ZRg5zsc7GuUrUSDtwzP'}] 19 | 20 | Performs a garbage collection sweep of the local set of 21 | stored objects and remove ones that are not pinned in order 22 | to reclaim hard disk space. Returns the hashes of all collected 23 | objects. 24 | 25 | Parameters 26 | ---------- 27 | quiet 28 | Should the client will avoid downloading the list of removed objects? 29 | 30 | Passing ``True`` to this parameter often causing the GC process to 31 | speed up tremendously as it will also avoid generating the list of 32 | removed objects in the connected daemon at all. 33 | return_result 34 | If ``False`` this is a legacy alias for ``quiet=True``. 35 | 36 | (Will be dropped in py-ipfs-api-client 0.7.x!) 37 | 38 | Returns 39 | ------- 40 | dict 41 | List of IPFS objects that have been removed 42 | """ 43 | if not return_result: 44 | warnings.warn("Parameter `return_result` of `.repo.gc(…)` is deprecated " 45 | "in favour of the newer `quiet` parameter", DeprecationWarning) 46 | 47 | quiet = quiet or not return_result 48 | 49 | if "use_http_head_for_no_result" not in self._client.workarounds: 50 | # go-ipfs 0.4.22- does not support the quiet option yet 51 | kwargs.setdefault("opts", {})["quiet"] = quiet 52 | 53 | kwargs.setdefault("return_result", not quiet) 54 | 55 | return self._client.request('/repo/gc', decoder='json', **kwargs) 56 | 57 | 58 | @base.returns_single_item(base.ResponseBase) 59 | def stat(self, **kwargs: base.CommonArgs): 60 | """Returns local repository status information 61 | 62 | .. code-block:: python 63 | 64 | >>> client.repo.stat() 65 | {'NumObjects': 354, 66 | 'RepoPath': '…/.local/share/ipfs', 67 | 'Version': 'fs-repo@4', 68 | 'RepoSize': 13789310} 69 | 70 | Returns 71 | ------- 72 | dict 73 | General information about the IPFS file repository 74 | 75 | +------------+-------------------------------------------------+ 76 | | NumObjects | Number of objects in the local repo. | 77 | +------------+-------------------------------------------------+ 78 | | RepoPath | The path to the repo being currently used. | 79 | +------------+-------------------------------------------------+ 80 | | RepoSize | Size in bytes that the repo is currently using. | 81 | +------------+-------------------------------------------------+ 82 | | Version | The repo version. | 83 | +------------+-------------------------------------------------+ 84 | """ 85 | return self._client.request('/repo/stat', decoder='json', **kwargs) 86 | 87 | 88 | #TODO: `version()` 89 | -------------------------------------------------------------------------------- /ipfshttpclient/client/swarm.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class FiltersSection(base.SectionBase): 5 | @base.returns_single_item(base.ResponseBase) 6 | def add(self, address: base.multiaddr_t, *addresses: base.multiaddr_t, 7 | **kwargs: base.CommonArgs): 8 | """Adds a given multiaddr filter to the filter/ignore list 9 | 10 | This will add an address filter to the daemons swarm. Filters applied 11 | this way will not persist daemon reboots, to achieve that, add your 12 | filters to the configuration file. 13 | 14 | .. code-block:: python 15 | 16 | >>> client.swarm.filters.add("/ip4/192.168.0.0/ipcidr/16") 17 | {'Strings': ['/ip4/192.168.0.0/ipcidr/16']} 18 | 19 | Parameters 20 | ---------- 21 | address 22 | Multiaddr to avoid connecting to 23 | 24 | Returns 25 | ------- 26 | dict 27 | 28 | +---------+-----------------------------+ 29 | | Strings | List of swarm filters added | 30 | +---------+-----------------------------+ 31 | """ 32 | args = (str(address), *(str(a) for a in address)) 33 | return self._client.request('/swarm/filters/add', args, decoder='json', **kwargs) 34 | 35 | 36 | @base.returns_single_item(base.ResponseBase) 37 | def rm(self, address: base.multiaddr_t, *addresses: base.multiaddr_t, 38 | **kwargs: base.CommonArgs): 39 | """Removes a given multiaddr filter from the filter list 40 | 41 | This will remove an address filter from the daemons swarm. Filters 42 | removed this way will not persist daemon reboots, to achieve that, 43 | remove your filters from the configuration file. 44 | 45 | .. code-block:: python 46 | 47 | >>> client.swarm.filters.rm("/ip4/192.168.0.0/ipcidr/16") 48 | {'Strings': ['/ip4/192.168.0.0/ipcidr/16']} 49 | 50 | Parameters 51 | ---------- 52 | address 53 | Multiaddr filter to remove 54 | 55 | Returns 56 | ------- 57 | dict 58 | 59 | +---------+-------------------------------+ 60 | | Strings | List of swarm filters removed | 61 | +---------+-------------------------------+ 62 | """ 63 | args = (str(address), *(str(a) for a in address)) 64 | return self._client.request('/swarm/filters/rm', args, decoder='json', **kwargs) 65 | 66 | 67 | class Section(base.SectionBase): 68 | filters = base.SectionProperty(FiltersSection) 69 | 70 | 71 | @base.returns_single_item(base.ResponseBase) 72 | def addrs(self, **kwargs: base.CommonArgs): 73 | """Returns the addresses of currently connected peers by peer id 74 | 75 | .. code-block:: python 76 | 77 | >>> pprint(client.swarm.addrs()) 78 | {'Addrs': { 79 | 'QmNMVHJTSZHTWMWBbmBrQgkA1hZPWYuVJx2DpSGESWW6Kn': [ 80 | '/ip4/10.1.0.1/tcp/4001', 81 | '/ip4/127.0.0.1/tcp/4001', 82 | '/ip4/51.254.25.16/tcp/4001', 83 | '/ip6/2001:41d0:b:587:3cae:6eff:fe40:94d8/tcp/4001', 84 | '/ip6/2001:470:7812:1045::1/tcp/4001', 85 | '/ip6/::1/tcp/4001', 86 | '/ip6/fc02:2735:e595:bb70:8ffc:5293:8af8:c4b7/tcp/4001', 87 | '/ip6/fd00:7374:6172:100::1/tcp/4001', 88 | '/ip6/fd20:f8be:a41:0:c495:aff:fe7e:44ee/tcp/4001', 89 | '/ip6/fd20:f8be:a41::953/tcp/4001'], 90 | 'QmNQsK1Tnhe2Uh2t9s49MJjrz7wgPHj4VyrZzjRe8dj7KQ': [ 91 | '/ip4/10.16.0.5/tcp/4001', 92 | '/ip4/127.0.0.1/tcp/4001', 93 | '/ip4/172.17.0.1/tcp/4001', 94 | '/ip4/178.62.107.36/tcp/4001', 95 | '/ip6/::1/tcp/4001'], 96 | … 97 | }} 98 | 99 | Returns 100 | ------- 101 | dict 102 | Multiaddrs of peers by peer id 103 | 104 | +-------+--------------------------------------------------------+ 105 | | Addrs | Mapping of PeerIDs to a list its advertised multiaddrs | 106 | +-------+--------------------------------------------------------+ 107 | """ 108 | return self._client.request('/swarm/addrs', decoder='json', **kwargs) 109 | 110 | 111 | @base.returns_single_item(base.ResponseBase) 112 | def connect(self, address: base.multiaddr_t, *addresses: base.multiaddr_t, 113 | **kwargs: base.CommonArgs): 114 | """Attempts to connect to a peer at the given multiaddr 115 | 116 | This will open a new direct connection to a peer address. The address 117 | format is an IPFS multiaddr, e.g.:: 118 | 119 | /ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ 120 | 121 | .. code-block:: python 122 | 123 | >>> client.swarm.connect("/ip4/104.131.131.82/tcp/4001/ipfs/Qma … uvuJ") 124 | {'Strings': ['connect QmaCpDMGvV2BGHeYERUEnRQAwe3 … uvuJ success']} 125 | 126 | Parameters 127 | ---------- 128 | address 129 | Address of peer to connect to 130 | 131 | Returns 132 | ------- 133 | dict 134 | Textual connection status report 135 | """ 136 | args = (str(address), *(str(a) for a in address)) 137 | return self._client.request('/swarm/connect', args, decoder='json', **kwargs) 138 | 139 | 140 | @base.returns_single_item(base.ResponseBase) 141 | def disconnect(self, address: base.multiaddr_t, *addresses: base.multiaddr_t, 142 | **kwargs: base.CommonArgs): 143 | """Closes any open connection to a given multiaddr 144 | 145 | This will close a connection to a peer address. The address format is 146 | an IPFS multiaddr:: 147 | 148 | /ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ 149 | 150 | The disconnect is not permanent; if IPFS needs to talk to that address 151 | later, it will reconnect. To avoid this, add a filter for the given 152 | address before disconnecting. 153 | 154 | .. code-block:: python 155 | 156 | >>> client.swarm.disconnect("/ip4/104.131.131.82/tcp/4001/ipfs/Qm … uJ") 157 | {'Strings': ['disconnect QmaCpDMGvV2BGHeYERUEnRQA … uvuJ success']} 158 | 159 | Parameters 160 | ---------- 161 | address 162 | Address of peer to disconnect from 163 | 164 | Returns 165 | ------- 166 | dict 167 | Textual connection status report 168 | """ 169 | args = (str(address), *(str(a) for a in address)) 170 | return self._client.request('/swarm/disconnect', args, decoder='json', **kwargs) 171 | 172 | 173 | @base.returns_single_item(base.ResponseBase) 174 | def peers(self, **kwargs: base.CommonArgs): 175 | """Returns the addresses & IDs of currently connected peers 176 | 177 | .. code-block:: python 178 | 179 | >>> client.swarm.peers() 180 | {'Strings': [ 181 | '/ip4/101.201.40.124/tcp/40001/ipfs/QmZDYAhmMDtnoC6XZ … kPZc', 182 | '/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYER … uvuJ', 183 | '/ip4/104.223.59.174/tcp/4001/ipfs/QmeWdgoZezpdHz1PX8 … 1jB6', 184 | … 185 | '/ip6/fce3: … :f140/tcp/43901/ipfs/QmSoLnSGccFuZQJzRa … ca9z' 186 | ]} 187 | 188 | Returns 189 | ------- 190 | dict 191 | 192 | +---------+----------------------------------------------------+ 193 | | Strings | List of Multiaddrs that the daemon is connected to | 194 | +---------+----------------------------------------------------+ 195 | """ 196 | return self._client.request('/swarm/peers', decoder='json', **kwargs) 197 | -------------------------------------------------------------------------------- /ipfshttpclient/client/unstable.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class LogSection(base.SectionBase): 5 | @base.returns_single_item(base.ResponseBase) 6 | def level(self, subsystem: str, level: str, **kwargs: base.CommonArgs): 7 | r"""Changes the logging output level for a given subsystem 8 | 9 | **This API is subject to future change or removal!** 10 | 11 | .. code-block:: python 12 | 13 | >>> client.unstable.log.level("path", "info") 14 | {"Message": "Changed log level of 'path' to 'info'\n"} 15 | 16 | Parameters 17 | ---------- 18 | subsystem 19 | The subsystem logging identifier (Use ``"all"`` for all subsystems) 20 | level 21 | The desired logging level. Must be one of: 22 | 23 | * ``"debug"`` 24 | * ``"info"`` 25 | * ``"warning"`` 26 | * ``"error"`` 27 | * ``"fatal"`` 28 | * ``"panic"`` 29 | 30 | Returns 31 | ------- 32 | dict 33 | 34 | +--------+-----------------------+ 35 | | Status | Textual status report | 36 | +--------+-----------------------+ 37 | """ 38 | args = (subsystem, level) 39 | return self._client.request('/log/level', args, 40 | decoder='json', **kwargs) 41 | 42 | 43 | @base.returns_single_item(base.ResponseBase) 44 | def ls(self, **kwargs: base.CommonArgs): 45 | """Lists the available logging subsystems 46 | 47 | **This API is subject to future change or removal!** 48 | 49 | .. code-block:: python 50 | 51 | >>> client.unstable.log.ls() 52 | {'Strings': [ 53 | 'github.com/ipfs/go-libp2p/p2p/host', 'net/identify', 54 | 'merkledag', 'providers', 'routing/record', 'chunk', 'mfs', 55 | 'ipns-repub', 'flatfs', 'ping', 'mockrouter', 'dagio', 56 | 'cmds/files', 'blockset', 'engine', 'mocknet', 'config', 57 | 'commands/http', 'cmd/ipfs', 'command', 'conn', 'gc', 58 | 'peerstore', 'core', 'coreunix', 'fsrepo', 'core/server', 59 | 'boguskey', 'github.com/ipfs/go-libp2p/p2p/host/routed', 60 | 'diagnostics', 'namesys', 'fuse/ipfs', 'node', 'secio', 61 | 'core/commands', 'supernode', 'mdns', 'path', 'table', 62 | 'swarm2', 'peerqueue', 'mount', 'fuse/ipns', 'blockstore', 63 | 'github.com/ipfs/go-libp2p/p2p/host/basic', 'lock', 'nat', 64 | 'importer', 'corerepo', 'dht.pb', 'pin', 'bitswap_network', 65 | 'github.com/ipfs/go-libp2p/p2p/protocol/relay', 'peer', 66 | 'transport', 'dht', 'offlinerouting', 'tarfmt', 'eventlog', 67 | 'ipfsaddr', 'github.com/ipfs/go-libp2p/p2p/net/swarm/addr', 68 | 'bitswap', 'reprovider', 'supernode/proxy', 'crypto', 'tour', 69 | 'commands/cli', 'blockservice']} 70 | 71 | Returns 72 | ------- 73 | dict 74 | 75 | +---------+-----------------------------------+ 76 | | Strings | List of daemon logging subsystems | 77 | +---------+-----------------------------------+ 78 | """ 79 | return self._client.request('/log/ls', decoder='json', **kwargs) 80 | 81 | 82 | @base.returns_multiple_items(base.ResponseBase, stream=True) 83 | def tail(self, **kwargs: base.CommonArgs): 84 | r"""Streams log outputs as they are generated 85 | 86 | **This API is subject to future change or removal!** 87 | 88 | This function returns an iterator that needs to be closed using a 89 | context manager (``with``-statement) or using the ``.close()`` method. 90 | 91 | .. code-block:: python 92 | 93 | >>> with client.unstable.log.tail() as log_tail_iter: 94 | ... for item in log_tail_iter: 95 | ... print(item) 96 | ... 97 | {"event":"updatePeer","system":"dht", 98 | "peerID":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", 99 | "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", 100 | "time":"2016-08-22T13:25:27.43353297Z"} 101 | {"event":"handleAddProviderBegin","system":"dht", 102 | "peer":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", 103 | "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", 104 | "time":"2016-08-22T13:25:27.433642581Z"} 105 | {"event":"handleAddProvider","system":"dht","duration":91704, 106 | "key":"QmNT9Tejg6t57Vs8XM2TVJXCwevWiGsZh3kB4HQXUZRK1o", 107 | "peer":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", 108 | "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", 109 | "time":"2016-08-22T13:25:27.433747513Z"} 110 | {"event":"updatePeer","system":"dht", 111 | "peerID":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", 112 | "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", 113 | "time":"2016-08-22T13:25:27.435843012Z"} 114 | … 115 | 116 | Returns 117 | ------- 118 | Iterable[dict] 119 | """ 120 | return self._client.request('/log/tail', decoder='json', 121 | stream=True, **kwargs) 122 | 123 | 124 | 125 | class RefsSection(base.SectionBase): 126 | @base.returns_multiple_items(base.ResponseBase) 127 | def __call__(self, cid: base.cid_t, **kwargs: base.CommonArgs): 128 | """Returns the hashes of objects referenced by the given hash 129 | 130 | **This API is subject to future change or removal!** You likely want to 131 | use :meth:`~ipfshttpclient.object.links` instead. 132 | 133 | .. code-block:: python 134 | 135 | >>> client.unstable.refs('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') 136 | [{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''}, 137 | … 138 | {'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}] 139 | 140 | Parameters 141 | ---------- 142 | cid 143 | Path to the object(s) to list refs from 144 | 145 | Returns 146 | ------- 147 | list 148 | """ 149 | args = (str(cid),) 150 | return self._client.request('/refs', args, decoder='json', **kwargs) 151 | 152 | 153 | @base.returns_multiple_items(base.ResponseBase) 154 | def local(self, **kwargs: base.CommonArgs): 155 | """Returns the hashes of all local objects 156 | 157 | **This API is subject to future change or removal!** 158 | 159 | .. code-block:: python 160 | 161 | >>> client.unstable.refs.local() 162 | [{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''}, 163 | … 164 | {'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}] 165 | 166 | Returns 167 | ------- 168 | list 169 | """ 170 | return self._client.request('/refs/local', decoder='json', **kwargs) 171 | 172 | 173 | 174 | class Section(base.SectionBase): 175 | """Features that are subject to change and are only provided for convenience""" 176 | log = base.SectionProperty(LogSection) 177 | refs = base.SectionProperty(RefsSection) -------------------------------------------------------------------------------- /ipfshttpclient/encoding.py: -------------------------------------------------------------------------------- 1 | """Classes for encoding and decoding datastreams into object values""" 2 | import abc 3 | import codecs 4 | import typing as ty 5 | import json 6 | 7 | from . import exceptions 8 | from . import utils 9 | 10 | 11 | if ty.TYPE_CHECKING: 12 | import typing_extensions as ty_ext 13 | else: 14 | from . import utils as ty_ext 15 | 16 | 17 | T = ty.TypeVar("T") 18 | 19 | 20 | def empty_gen() -> ty.Generator[T, None, None]: 21 | """A generator that yields nothing""" 22 | if False: # pragma: no branch 23 | yield ty.cast(T, None) # type: ignore[unreachable] 24 | 25 | 26 | class Encoding(ty.Generic[T], metaclass=abc.ABCMeta): 27 | """Abstract base for a data parser/encoder interface""" 28 | #name: str 29 | is_stream = False # type: bool 30 | 31 | @abc.abstractmethod 32 | def parse_partial(self, raw: bytes) -> ty.Generator[T, ty.Any, ty.Any]: 33 | """Parses the given data and yields all complete data sets that can 34 | be built from this. 35 | 36 | Raises 37 | ------ 38 | ~ipfshttpclient.exceptions.DecodingError 39 | 40 | Parameters 41 | ---------- 42 | raw 43 | Data to be parsed 44 | """ 45 | 46 | def parse_finalize(self) -> ty.Generator[T, ty.Any, ty.Any]: 47 | """Finalizes parsing based on remaining buffered data and yields the 48 | remaining data sets 49 | 50 | Raises 51 | ------ 52 | ~ipfshttpclient.exceptions.DecodingError 53 | """ 54 | return empty_gen() 55 | 56 | @abc.abstractmethod 57 | def encode(self, obj: T) -> bytes: 58 | """Serializes the given Python object to a bytes string 59 | 60 | Raises 61 | ------ 62 | ~ipfshttpclient.exceptions.EncodingError 63 | 64 | Parameters 65 | ---------- 66 | obj 67 | Object to be encoded 68 | """ 69 | 70 | 71 | class Dummy(Encoding[bytes]): 72 | """Dummy parser/encoder that does nothing""" 73 | name = "none" 74 | is_stream = True 75 | 76 | def parse_partial(self, raw: bytes) -> ty.Generator[bytes, ty.Any, ty.Any]: 77 | """Yields the data passed into this method 78 | 79 | Parameters 80 | ---------- 81 | raw 82 | Any kind of data 83 | """ 84 | yield raw 85 | 86 | def encode(self, obj: bytes) -> bytes: 87 | """Returns the bytes representation of the data passed into this 88 | function 89 | 90 | Parameters 91 | ---------- 92 | obj 93 | Any Python object 94 | """ 95 | return obj 96 | 97 | 98 | class Json(Encoding[utils.json_value_t]): 99 | """JSON parser/encoder that handles concatenated JSON""" 100 | name = 'json' 101 | 102 | def __init__(self) -> None: 103 | self._buffer = [] # type: ty.List[ty.Optional[str]] 104 | self._decoder1 = codecs.getincrementaldecoder('utf-8')() 105 | self._decoder2 = json.JSONDecoder() 106 | self._lasterror = None # type: ty.Optional[ValueError] 107 | 108 | @ty.no_type_check # It works just fine and I don't want to rewrite it just 109 | # because mypy doesn't understand… # noqa: E114, E116 110 | def parse_partial(self, data: bytes) -> ty.Generator[utils.json_value_t, ty.Any, ty.Any]: 111 | """Incrementally decodes JSON data sets into Python objects. 112 | 113 | Raises 114 | ------ 115 | ~ipfshttpclient.exceptions.DecodingError 116 | """ 117 | try: 118 | # Python requires all JSON data to text strings 119 | lines = self._decoder1.decode(data, False).split("\n") 120 | 121 | # Add first input line to last buffer line, if applicable, to 122 | # handle cases where the JSON string has been chopped in half 123 | # at the network level due to streaming 124 | if len(self._buffer) > 0 and self._buffer[-1] is not None: 125 | self._buffer[-1] += lines[0] 126 | self._buffer.extend(lines[1:]) 127 | else: 128 | self._buffer.extend(lines) 129 | except UnicodeDecodeError as error: 130 | raise exceptions.DecodingError('json', error) from error 131 | 132 | # Process data buffer 133 | index = 0 134 | try: 135 | # Process each line as separate buffer 136 | #PERF: This way the `.lstrip()` call becomes almost always a NOP 137 | # even if it does return a different string it will only 138 | # have to allocate a new buffer for the currently processed 139 | # line. 140 | while index < len(self._buffer): 141 | while self._buffer[index]: 142 | # Make sure buffer does not start with whitespace 143 | #PERF: `.lstrip()` does not reallocate if the string does 144 | # not actually start with whitespace. 145 | self._buffer[index] = self._buffer[index].lstrip() 146 | 147 | # Handle case where the remainder of the line contained 148 | # only whitespace 149 | if not self._buffer[index]: 150 | self._buffer[index] = None 151 | continue 152 | 153 | # Try decoding the partial data buffer and return results 154 | # from this 155 | # 156 | # Use `pragma: no branch` as the final loop iteration will always 157 | # raise if parsing didn't work out, rather then falling through 158 | # to the `yield obj` line. 159 | data = self._buffer[index] 160 | for index2 in range(index, len(self._buffer)): # pragma: no branch 161 | # If decoding doesn't succeed with the currently 162 | # selected buffer (very unlikely with our current 163 | # class of input data) then retry with appending 164 | # any other pending pieces of input data 165 | # This will happen with JSON data that contains 166 | # arbitrary new-lines: "{1:\n2,\n3:4}" 167 | if index2 > index: 168 | data += "\n" + self._buffer[index2] 169 | 170 | try: 171 | (obj, offset) = self._decoder2.raw_decode(data) 172 | except ValueError: 173 | # Treat error as fatal if we have already added 174 | # the final buffer to the input 175 | if (index2 + 1) == len(self._buffer): 176 | raise 177 | else: 178 | index = index2 179 | break 180 | 181 | # Decoding succeeded – yield result and shorten buffer 182 | yield obj 183 | if offset < len(self._buffer[index]): 184 | self._buffer[index] = self._buffer[index][offset:] 185 | else: 186 | self._buffer[index] = None 187 | index += 1 188 | except ValueError as error: 189 | # It is unfortunately not possible to reliably detect whether 190 | # parsing ended because of an error *within* the JSON string, or 191 | # an unexpected *end* of the JSON string. 192 | # We therefor have to assume that any error that occurs here 193 | # *might* be related to the JSON parser hitting EOF and therefor 194 | # have to postpone error reporting until `parse_finalize` is 195 | # called. 196 | self._lasterror = error 197 | finally: 198 | # Remove all processed buffers 199 | del self._buffer[0:index] 200 | 201 | def parse_finalize(self) -> ty.Generator[utils.json_value_t, ty.Any, ty.Any]: 202 | """Raises errors for incomplete buffered data that could not be parsed 203 | because the end of the input data has been reached. 204 | 205 | Raises 206 | ------ 207 | ~ipfshttpclient.exceptions.DecodingError 208 | """ 209 | try: 210 | try: 211 | # Raise exception for remaining bytes in bytes decoder 212 | self._decoder1.decode(b'', True) 213 | except UnicodeDecodeError as error: 214 | raise exceptions.DecodingError('json', error) from error 215 | 216 | # Late raise errors that looked like they could have been fixed if 217 | # the caller had provided more data 218 | if self._buffer and self._lasterror: 219 | raise exceptions.DecodingError('json', self._lasterror) from self._lasterror 220 | finally: 221 | # Reset state 222 | self._buffer = [] 223 | self._lasterror = None 224 | self._decoder1.reset() 225 | 226 | return empty_gen() 227 | 228 | def encode(self, obj: utils.json_value_t) -> bytes: 229 | """Returns ``obj`` serialized as JSON formatted bytes 230 | 231 | Raises 232 | ------ 233 | ~ipfshttpclient.exceptions.EncodingError 234 | 235 | Parameters 236 | ---------- 237 | obj 238 | JSON serializable Python object 239 | """ 240 | try: 241 | result = json.dumps(obj, sort_keys=True, indent=None, 242 | separators=(',', ':'), ensure_ascii=False) 243 | return result.encode("utf-8") 244 | except (UnicodeEncodeError, TypeError) as error: 245 | raise exceptions.EncodingError('json', error) from error 246 | 247 | 248 | # encodings supported by the IPFS api (default is JSON) 249 | __encodings = { 250 | Dummy.name: Dummy, 251 | Json.name: Json, 252 | } # type: ty.Dict[str, ty.Type[Encoding[ty.Any]]] 253 | 254 | 255 | @ty.overload 256 | def get_encoding(name: ty_ext.Literal["none"]) -> Dummy: 257 | ... 258 | 259 | @ty.overload # noqa: E302 260 | def get_encoding(name: ty_ext.Literal["json"]) -> Json: 261 | ... 262 | 263 | def get_encoding(name: str) -> Encoding[ty.Any]: # noqa: E302 264 | """Returns an Encoder object for the given encoding name 265 | 266 | Raises 267 | ------ 268 | ~ipfshttpclient.exceptions.EncoderMissingError 269 | 270 | Parameters 271 | ---------- 272 | name 273 | Encoding name. Supported options: 274 | 275 | * ``"none"`` 276 | * ``"json"`` 277 | """ 278 | try: 279 | return __encodings[name.lower()]() 280 | except KeyError: 281 | raise exceptions.EncoderMissingError(name) from None 282 | -------------------------------------------------------------------------------- /ipfshttpclient/exceptions.py: -------------------------------------------------------------------------------- 1 | """ 2 | The class hierachy for exceptions is:: 3 | 4 | Error 5 | ├── VersionMismatch 6 | ├── AddressError 7 | ├── EncoderError 8 | │ ├── EncoderMissingError 9 | │ ├── EncodingError 10 | │ └── DecodingError 11 | └── CommunicationError 12 | ├── ProtocolError 13 | ├── StatusError 14 | ├── ErrorResponse 15 | │ └── PartialErrorResponse 16 | ├── ConnectionError 17 | └── TimeoutError 18 | 19 | """ 20 | import typing as ty 21 | 22 | import multiaddr.exceptions # type: ignore[import] 23 | 24 | 25 | class Error(Exception): 26 | """Base class for all exceptions in this module.""" 27 | __slots__ = () 28 | 29 | 30 | class AddressError(Error, multiaddr.exceptions.Error): # type: ignore[no-any-unimported, misc] 31 | """Raised when the provided daemon location Multiaddr does not match any 32 | of the supported patterns.""" 33 | __slots__ = ("addr",) 34 | #addr: ty.Union[str, bytes] 35 | 36 | def __init__(self, addr: ty.Union[str, bytes]) -> None: 37 | self.addr = addr # type: ty.Union[str, bytes] 38 | Error.__init__(self, "Unsupported Multiaddr pattern: {0!r}".format(addr)) 39 | 40 | 41 | class VersionMismatch(Error): 42 | """Raised when daemon version is not supported by this client version.""" 43 | __slots__ = ("current", "minimum", "maximum") 44 | #current: ty.Sequence[int] 45 | #minimum: ty.Sequence[int] 46 | #maximum: ty.Sequence[int] 47 | 48 | def __init__(self, current: ty.Sequence[int], minimum: ty.Sequence[int], 49 | maximum: ty.Sequence[int]) -> None: 50 | self.current = current # type: ty.Sequence[int] 51 | self.minimum = minimum # type: ty.Sequence[int] 52 | self.maximum = maximum # type: ty.Sequence[int] 53 | 54 | msg = "Unsupported daemon version '{}' (not in range: {} ≤ … < {})".format( 55 | ".".join(map(str, current)), ".".join(map(str, minimum)), ".".join(map(str, maximum)) 56 | ) 57 | super().__init__(msg) 58 | 59 | 60 | ############### 61 | # encoding.py # 62 | ############### 63 | class EncoderError(Error): 64 | """Base class for all encoding and decoding related errors.""" 65 | __slots__ = ("encoder_name",) 66 | #encoder_name: str 67 | 68 | def __init__(self, message: str, encoder_name: str) -> None: 69 | self.encoder_name = encoder_name # type: str 70 | 71 | super().__init__(message) 72 | 73 | 74 | class EncoderMissingError(EncoderError): 75 | """Raised when a requested encoder class does not actually exist.""" 76 | __slots__ = () 77 | 78 | def __init__(self, encoder_name: str) -> None: 79 | super().__init__("Unknown encoder: '{}'".format(encoder_name), encoder_name) 80 | 81 | 82 | class EncodingError(EncoderError): 83 | """Raised when encoding a Python object into a byte string has failed 84 | due to some problem with the input data.""" 85 | __slots__ = ("original",) 86 | #original: Exception 87 | 88 | def __init__(self, encoder_name: str, original: Exception) -> None: 89 | self.original = original # type: Exception 90 | 91 | super().__init__("Object encoding error: {}".format(original), encoder_name) 92 | 93 | 94 | class DecodingError(EncoderError): 95 | """Raised when decoding a byte string to a Python object has failed due to 96 | some problem with the input data.""" 97 | __slots__ = ("original",) 98 | #original: Exception 99 | 100 | def __init__(self, encoder_name: str, original: Exception) -> None: 101 | self.original = original # type: Exception 102 | 103 | super().__init__("Object decoding error: {}".format(original), encoder_name) 104 | 105 | 106 | ########### 107 | # http.py # 108 | ########### 109 | class CommunicationError(Error): 110 | """Base class for all network communication related errors.""" 111 | __slots__ = ("original",) 112 | #original: ty.Optional[Exception] 113 | 114 | def __init__(self, original: ty.Optional[Exception], 115 | _message: ty.Optional[str] = None) -> None: 116 | self.original = original # type: ty.Optional[Exception] 117 | 118 | msg = "" # type: str 119 | if _message: 120 | msg = _message 121 | else: 122 | msg = "{}: {}".format(type(original).__name__, str(original)) 123 | super().__init__(msg) 124 | 125 | 126 | class ProtocolError(CommunicationError): 127 | """Raised when parsing the response from the daemon has failed. 128 | 129 | This can most likely occur if the service on the remote end isn't in fact 130 | an IPFS daemon.""" 131 | __slots__ = () 132 | 133 | 134 | class StatusError(CommunicationError): 135 | """Raised when the daemon responds with an error to our request.""" 136 | __slots__ = () 137 | 138 | 139 | class ErrorResponse(StatusError): 140 | """Raised when the daemon has responded with an error message because the 141 | requested operation could not be carried out.""" 142 | __slots__ = () 143 | 144 | def __init__(self, message: str, original: ty.Optional[Exception]) -> None: 145 | super().__init__(original, message) 146 | 147 | 148 | class PartialErrorResponse(ErrorResponse): 149 | """Raised when the daemon has responded with an error message after having 150 | already returned some data.""" 151 | __slots__ = () 152 | 153 | def __init__(self, message: str, original: ty.Optional[Exception] = None) -> None: 154 | super().__init__(message, original) 155 | 156 | 157 | class ConnectionError(CommunicationError): 158 | """Raised when connecting to the service has failed on the socket layer.""" 159 | __slots__ = () 160 | 161 | 162 | class TimeoutError(CommunicationError): 163 | """Raised when the daemon didn't respond in time.""" 164 | __slots__ = () -------------------------------------------------------------------------------- /ipfshttpclient/filescanner_ty.pyi: -------------------------------------------------------------------------------- 1 | import enum 2 | import typing as ty 3 | 4 | class FSNodeType(enum.Enum): 5 | FILE = enum.auto() 6 | DIRECTORY = enum.auto() 7 | 8 | class FSNodeEntry(ty.Generic[ty.AnyStr], ty.NamedTuple): 9 | type: FSNodeType 10 | path: ty.AnyStr 11 | relpath: ty.AnyStr 12 | name: ty.AnyStr 13 | parentfd: ty.Optional[int] -------------------------------------------------------------------------------- /ipfshttpclient/http.py: -------------------------------------------------------------------------------- 1 | """Default HTTP client selection proxy""" 2 | import os 3 | 4 | from .http_common import ( 5 | StreamDecodeIteratorSync, 6 | 7 | addr_t, auth_t, cookies_t, headers_t, params_t, reqdata_sync_t, timeout_t, 8 | workarounds_t, 9 | ) 10 | 11 | 12 | __all__ = ( 13 | "addr_t", "auth_t", "cookies_t", "headers_t", "params_t", "reqdata_sync_t", 14 | "timeout_t", "workarounds_t", 15 | 16 | "ClientSync", 17 | "StreamDecodeIteratorSync", 18 | ) 19 | 20 | PREFER_HTTPX = (os.environ.get("PY_IPFS_HTTP_CLIENT_PREFER_HTTPX", "no").lower() 21 | not in ("0", "f", "false", "n", "no")) 22 | if PREFER_HTTPX: # pragma: http-backend=httpx 23 | try: #PY36+ 24 | from . import http_httpx as _backend 25 | except (ImportError, SyntaxError): #PY35 26 | from . import http_requests as _backend # type: ignore[no-redef] 27 | else: # pragma: http-backend=requests 28 | try: 29 | from . import http_requests as _backend # type: ignore[no-redef] 30 | except ImportError: # pragma: no cover 31 | from . import http_httpx as _backend 32 | 33 | ClientSync = _backend.ClientSync -------------------------------------------------------------------------------- /ipfshttpclient/http_httpx.py: -------------------------------------------------------------------------------- 1 | """HTTP client for API requests based on HTTPx 2 | 3 | This will be supplemented by an asynchronous version based on HTTPx's 4 | asynchronous API soon™. 5 | """ 6 | 7 | import math 8 | import socket 9 | import typing as ty 10 | 11 | import httpcore 12 | import httpx 13 | 14 | from . import encoding 15 | from . import exceptions 16 | from .http_common import ( 17 | ClientSyncBase, multiaddr_to_url_data, 18 | 19 | addr_t, auth_t, cookies_t, headers_t, params_t, reqdata_sync_t, timeout_t, 20 | Closable, 21 | ) 22 | 23 | 24 | if ty.TYPE_CHECKING: 25 | import httpx._types 26 | import typing_extensions 27 | 28 | # By using the precise types from HTTPx we'll also get type errors if our 29 | # types become somehow incompatible with the ones from that library 30 | RequestArgs = typing_extensions.TypedDict("RequestArgs", { 31 | "auth": "httpx._types.AuthTypes", 32 | "cookies": "httpx._types.CookieTypes", 33 | "headers": "httpx._types.HeaderTypes", 34 | "timeout": "httpx._types.TimeoutTypes", 35 | "params": "httpx._types.QueryParamTypes", 36 | }, total=False) 37 | else: 38 | RequestArgs = ty.Dict[str, ty.Any] 39 | 40 | 41 | def map_args_to_httpx( 42 | *, 43 | auth: auth_t = None, 44 | cookies: cookies_t = None, 45 | headers: headers_t = None, 46 | params: params_t = None, 47 | timeout: timeout_t = None, 48 | ) -> RequestArgs: 49 | kwargs: RequestArgs = {} 50 | 51 | if auth is not None: 52 | kwargs["auth"] = auth 53 | 54 | if cookies is not None: 55 | kwargs["cookies"] = cookies 56 | 57 | if headers is not None: 58 | kwargs["headers"] = headers 59 | 60 | if timeout is not None: 61 | if isinstance(timeout, tuple): 62 | kwargs["timeout"] = ( 63 | timeout[0] if timeout[0] < math.inf else None, 64 | timeout[1] if timeout[1] < math.inf else None, 65 | None, 66 | None, 67 | ) 68 | else: 69 | kwargs["timeout"] = timeout if timeout < math.inf else None 70 | 71 | if params is not None: 72 | kwargs["params"] = list(params) 73 | 74 | return kwargs 75 | 76 | 77 | class ClientSync(ClientSyncBase[httpx.Client]): 78 | __slots__ = ("_session_base", "_session_kwargs", "_session_laddr", "_session_uds_path") 79 | _session_base: "httpx._types.URLTypes" 80 | _session_kwargs: RequestArgs 81 | _session_laddr: ty.Optional[str] 82 | _session_uds_path: ty.Optional[str] 83 | 84 | def _init(self, addr: addr_t, base: str, *, # type: ignore[no-any-unimported] 85 | auth: auth_t, 86 | cookies: cookies_t, 87 | headers: headers_t, 88 | params: params_t, 89 | timeout: timeout_t) -> None: 90 | base_url: str 91 | uds_path: ty.Optional[str] 92 | family: socket.AddressFamily 93 | host_numeric: bool 94 | base_url, uds_path, family, host_numeric = multiaddr_to_url_data(addr, base) 95 | 96 | self._session_laddr = None 97 | self._session_uds_path = None 98 | if family != socket.AF_UNSPEC: 99 | if family == socket.AF_INET: 100 | self._session_laddr = "0.0.0.0" 101 | elif family == socket.AF_INET6: 102 | self._session_laddr = "::" 103 | elif family == socket.AF_UNIX: 104 | self._session_uds_path = uds_path 105 | else: 106 | assert False, ("multiaddr_to_url_data should only return a socket " 107 | "address family of AF_INET, AF_INET6 or AF_UNSPEC") 108 | 109 | self._session_base = base_url 110 | self._session_kwargs = map_args_to_httpx( 111 | auth=auth, 112 | cookies=cookies, 113 | headers=headers, 114 | params=params, 115 | timeout=timeout, 116 | ) 117 | 118 | def _make_session(self) -> httpx.Client: 119 | connection_pool = httpcore.SyncConnectionPool( 120 | local_address = self._session_laddr, 121 | uds = self._session_uds_path, 122 | 123 | #XXX: Argument values duplicated from httpx._client.Client._init_transport: 124 | keepalive_expiry = 5.0, #XXX: Value duplicated from httpx._client.KEEPALIVE_EXPIRY 125 | max_connections = 100, #XXX: Value duplicated from httpx._config.DEFAULT_LIMITS 126 | max_keepalive_connections = 20, #XXX: Value duplicated from httpx._config.DEFAULT_LIMITS 127 | ssl_context = httpx.create_ssl_context(trust_env=True), 128 | ) 129 | return httpx.Client(**self._session_kwargs, 130 | base_url = self._session_base, 131 | transport = connection_pool) 132 | 133 | def _do_raise_for_status(self, response: httpx.Response) -> None: 134 | try: 135 | response.raise_for_status() 136 | except httpx.HTTPError as error: 137 | content: ty.List[object] = [] 138 | try: 139 | decoder: encoding.Json = encoding.get_encoding("json") 140 | for chunk in response.iter_bytes(): 141 | content += list(decoder.parse_partial(chunk)) 142 | content += list(decoder.parse_finalize()) 143 | except exceptions.DecodingError: 144 | pass 145 | 146 | # If we have decoded an error response from the server, 147 | # use that as the exception message; otherwise, just pass 148 | # the exception on to the caller. 149 | if len(content) == 1 \ 150 | and isinstance(content[0], dict) \ 151 | and "Message" in content[0]: 152 | msg: str = content[0]["Message"] 153 | raise exceptions.ErrorResponse(msg, error) from error 154 | else: 155 | raise exceptions.StatusError(error) from error 156 | 157 | def _request( 158 | self, method: str, path: str, params: ty.Sequence[ty.Tuple[str, str]], *, 159 | auth: auth_t, 160 | data: reqdata_sync_t, 161 | headers: headers_t, 162 | timeout: timeout_t, 163 | chunk_size: ty.Optional[int], 164 | ) -> ty.Tuple[ty.List[Closable], ty.Generator[bytes, ty.Any, ty.Any]]: 165 | # Ensure path is relative so that it is resolved relative to the base 166 | while path.startswith("/"): 167 | path = path[1:] 168 | 169 | try: 170 | # Determine session object to use 171 | closables: ty.List[Closable] 172 | session: httpx.Client 173 | closables, session = self._access_session() 174 | 175 | # Do HTTP request (synchronously) and map exceptions 176 | try: 177 | res: httpx.Response = session.stream( 178 | method=method, 179 | url=path, 180 | **map_args_to_httpx( 181 | params=params, 182 | auth=auth, 183 | headers=headers, 184 | timeout=timeout, 185 | ), 186 | data=data, 187 | ).__enter__() 188 | closables.insert(0, res) 189 | except (httpx.ConnectTimeout, httpx.ReadTimeout, httpx.WriteTimeout) as error: 190 | raise exceptions.TimeoutError(error) from error 191 | except httpx.NetworkError as error: 192 | raise exceptions.ConnectionError(error) from error 193 | except httpx.ProtocolError as error: 194 | raise exceptions.ProtocolError(error) from error 195 | 196 | # Raise exception for response status 197 | # (optionally incorporating the response message, if available) 198 | self._do_raise_for_status(res) 199 | 200 | return closables, res.iter_bytes() # type: ignore[return-value] #FIXME: httpx 201 | except: 202 | for closable in closables: 203 | closable.close() 204 | raise -------------------------------------------------------------------------------- /ipfshttpclient/http_requests.py: -------------------------------------------------------------------------------- 1 | """HTTP client for API requests based on good old requests library 2 | 3 | This exists mainly for Python 3.5 compatibility. 4 | """ 5 | 6 | import math 7 | import http.client 8 | import os 9 | import typing as ty 10 | import urllib.parse 11 | 12 | import urllib3.exceptions # type: ignore[import] 13 | 14 | from . import encoding 15 | from . import exceptions 16 | from .http_common import ( 17 | ClientSyncBase, multiaddr_to_url_data, 18 | 19 | addr_t, auth_t, cookies_t, headers_t, params_t, reqdata_sync_t, timeout_t, 20 | Closable, 21 | ) 22 | 23 | PATCH_REQUESTS = (os.environ.get("PY_IPFS_HTTP_CLIENT_PATCH_REQUESTS", "yes").lower() 24 | not in ("false", "no")) 25 | if PATCH_REQUESTS: 26 | from . import requests_wrapper as requests 27 | elif not ty.TYPE_CHECKING: # pragma: no cover (always enabled in production) 28 | import requests 29 | 30 | 31 | def map_args_to_requests( 32 | *, 33 | auth: auth_t = None, 34 | cookies: cookies_t = None, 35 | headers: headers_t = None, 36 | params: params_t = None, 37 | timeout: timeout_t = None 38 | ) -> ty.Dict[str, ty.Any]: 39 | kwargs = {} # type: ty.Dict[str, ty.Any] 40 | 41 | if auth is not None: 42 | kwargs["auth"] = auth 43 | 44 | if cookies is not None: 45 | kwargs["cookies"] = cookies 46 | 47 | if headers is not None: 48 | kwargs["headers"] = headers 49 | 50 | if timeout is not None: 51 | if isinstance(timeout, tuple): 52 | timeout_ = ( 53 | timeout[0] if timeout[0] < math.inf else None, 54 | timeout[1] if timeout[1] < math.inf else None, 55 | ) # type: ty.Union[ty.Optional[float], ty.Tuple[ty.Optional[float], ty.Optional[float]]] 56 | else: 57 | timeout_ = timeout if timeout < math.inf else None 58 | kwargs["timeout"] = timeout_ 59 | 60 | if params is not None: 61 | kwargs["params"] = {} 62 | for name, value in params: 63 | if name not in kwargs["params"]: 64 | kwargs["params"][name] = value 65 | elif not isinstance(kwargs["params"][name], list): 66 | kwargs["params"][name] = [kwargs["params"][name], value] 67 | else: 68 | kwargs["params"][name].append(value) 69 | 70 | return kwargs 71 | 72 | 73 | class ClientSync(ClientSyncBase[requests.Session]): # type: ignore[name-defined] 74 | __slots__ = ("_base_url", "_default_timeout", "_request_proxies", "_session_props") 75 | #_base_url: str 76 | #_default_timeout: timeout_t 77 | #_request_proxies: ty.Optional[ty.Dict[str, str]] 78 | #_session_props: ty.Dict[str, ty.Any] 79 | 80 | def _init(self, addr: addr_t, base: str, *, # type: ignore[no-any-unimported] 81 | auth: auth_t, 82 | cookies: cookies_t, 83 | headers: headers_t, 84 | params: params_t, 85 | timeout: timeout_t) -> None: 86 | self._base_url, uds_path, family, host_numeric = multiaddr_to_url_data(addr, base) 87 | 88 | self._session_props = map_args_to_requests( 89 | auth=auth, 90 | cookies=cookies, 91 | headers=headers, 92 | params=params, 93 | ) 94 | self._default_timeout = timeout 95 | if PATCH_REQUESTS: # pragma: no branch (always enabled in production) 96 | self._session_props["family"] = family 97 | 98 | # Ensure that no proxy lookups are done for the UDS pseudo-hostname 99 | # 100 | # I'm well aware of the `.proxies` attribute of the session object: As it turns out, 101 | # setting *that* attribute will *not* bypass system proxy resolution – only the 102 | # per-request keyword-argument can do *that*…! 103 | self._request_proxies = None # type: ty.Optional[ty.Dict[str, str]] 104 | if uds_path: 105 | self._request_proxies = { 106 | "no_proxy": urllib.parse.quote(uds_path, safe=""), 107 | } 108 | 109 | def _make_session(self) -> requests.Session: # type: ignore[name-defined] 110 | session = requests.Session() # type: ignore[attr-defined] 111 | try: 112 | for name, value in self._session_props.items(): 113 | setattr(session, name, value) 114 | return session 115 | # It is very unlikely that this would ever error, but if it does try our 116 | # best to prevent a leak 117 | except: # pragma: no cover 118 | session.close() 119 | raise 120 | 121 | def _do_raise_for_status(self, response: requests.Request) -> None: # type: ignore[name-defined] 122 | try: 123 | response.raise_for_status() 124 | except requests.exceptions.HTTPError as error: # type: ignore[attr-defined] 125 | content = [] 126 | try: 127 | decoder = encoding.get_encoding("json") 128 | for chunk in response.iter_content(chunk_size=None): 129 | content += list(decoder.parse_partial(chunk)) 130 | content += list(decoder.parse_finalize()) 131 | except exceptions.DecodingError: 132 | pass 133 | 134 | # If we have decoded an error response from the server, 135 | # use that as the exception message; otherwise, just pass 136 | # the exception on to the caller. 137 | if len(content) == 1 \ 138 | and isinstance(content[0], dict) \ 139 | and "Message" in content[0]: 140 | msg = content[0]["Message"] 141 | raise exceptions.ErrorResponse(msg, error) from error 142 | else: 143 | raise exceptions.StatusError(error) from error 144 | 145 | def _request( 146 | self, method: str, path: str, params: ty.Sequence[ty.Tuple[str, str]], *, 147 | auth: auth_t, 148 | data: reqdata_sync_t, 149 | headers: headers_t, 150 | timeout: timeout_t, 151 | chunk_size: ty.Optional[int] 152 | ) -> ty.Tuple[ty.List[Closable], ty.Generator[bytes, ty.Any, ty.Any]]: 153 | # Ensure path is relative so that it is resolved relative to the base 154 | while path.startswith("/"): 155 | path = path[1:] 156 | 157 | url = urllib.parse.urljoin(self._base_url, path) 158 | 159 | try: 160 | # Determine session object to use 161 | closables, session = self._access_session() 162 | 163 | # Do HTTP request (synchronously) and map exceptions 164 | try: 165 | res = session.request( 166 | method=method, 167 | url=url, 168 | **map_args_to_requests( 169 | params=params, 170 | auth=auth, 171 | headers=headers, 172 | timeout=(timeout if timeout is not None else self._default_timeout), 173 | ), 174 | proxies=self._request_proxies, 175 | data=data, 176 | stream=True, 177 | ) 178 | closables.insert(0, res) 179 | except (requests.ConnectTimeout, requests.Timeout) as error: # type: ignore[attr-defined] 180 | raise exceptions.TimeoutError(error) from error 181 | except requests.ConnectionError as error: # type: ignore[attr-defined] 182 | # Report protocol violations separately 183 | # 184 | # This used to happen because requests wouldn't catch 185 | # `http.client.HTTPException` at all, now we recreate 186 | # this behaviour manually if we detect it. 187 | if isinstance(error.args[0], urllib3.exceptions.ProtocolError): 188 | raise exceptions.ProtocolError(error.args[0]) from error.args[0] 189 | 190 | raise exceptions.ConnectionError(error) from error 191 | # Looks like the following error doesn't happen anymore with modern requests? 192 | except http.client.HTTPException as error: # pragma: no cover 193 | raise exceptions.ProtocolError(error) from error 194 | 195 | # Raise exception for response status 196 | # (optionally incorporating the response message, if available) 197 | self._do_raise_for_status(res) 198 | 199 | return closables, res.iter_content(chunk_size=chunk_size) 200 | except: 201 | for closable in closables: 202 | closable.close() 203 | raise -------------------------------------------------------------------------------- /ipfshttpclient/utils.py: -------------------------------------------------------------------------------- 1 | """A module to handle generic operations. 2 | """ 3 | import mimetypes 4 | import os 5 | import pathlib 6 | import sys 7 | import typing as ty 8 | from functools import wraps 9 | 10 | if ty.TYPE_CHECKING: 11 | import typing_extensions as ty_ext 12 | else: 13 | ty_ext = ty 14 | 15 | if sys.version_info >= (3, 8): #PY38+ 16 | Literal = ty_ext.Literal 17 | Protocol = ty_ext.Protocol 18 | 19 | Literal_True = ty.Literal[True] 20 | Literal_False = ty.Literal[False] 21 | else: #PY37- 22 | class Literal(ty.Generic[ty.T]): 23 | ... 24 | 25 | class Protocol: 26 | ... 27 | 28 | Literal_True = Literal_False = bool 29 | 30 | if sys.version_info >= (3, 6): #PY36+ 31 | # `os.PathLike` only has a type param while type checking 32 | if ty.TYPE_CHECKING: 33 | PathLike = os.PathLike 34 | PathLike_str = os.PathLike[str] 35 | PathLike_bytes = os.PathLike[bytes] 36 | else: 37 | class PathLike(Protocol, ty.Generic[ty.AnyStr]): 38 | def __fspath__(self) -> ty.AnyStr: 39 | ... 40 | 41 | PathLike_str = PathLike_bytes = os.PathLike 42 | 43 | path_str_t = ty.Union[str, PathLike_str] 44 | path_bytes_t = ty.Union[bytes, PathLike_bytes] 45 | path_t = ty.Union[path_str_t, path_bytes_t] 46 | AnyPath = ty.TypeVar("AnyPath", str, PathLike_str, bytes, PathLike_bytes) 47 | 48 | path_types = (str, bytes, os.PathLike,) 49 | path_obj_types = (os.PathLike,) 50 | 51 | @ty.overload 52 | def convert_path(path: ty.AnyStr) -> ty.AnyStr: 53 | ... 54 | 55 | @ty.overload 56 | def convert_path(path: PathLike_str) -> PathLike_str: 57 | ... 58 | 59 | @ty.overload 60 | def convert_path(path: PathLike_bytes) -> PathLike_bytes: 61 | ... 62 | 63 | @ty.overload 64 | def convert_path(path: AnyPath) -> AnyPath: 65 | ... 66 | 67 | def convert_path(path: AnyPath) -> AnyPath: 68 | # Not needed since all system APIs also accept an `os.PathLike` 69 | return path 70 | else: #PY35 71 | class PathLike(pathlib.PurePath, ty.Generic[ty.AnyStr]): 72 | ... 73 | 74 | path_str_t = ty.Union[str, pathlib.PurePath] 75 | path_bytes_t = ty.Union[bytes] 76 | path_t = ty.Union[path_str_t, path_bytes_t] 77 | AnyPath = ty.TypeVar("AnyPath", str, pathlib.PurePath, bytes) 78 | 79 | path_types = (str, bytes, pathlib.PurePath,) 80 | path_obj_types = (pathlib.PurePath,) 81 | 82 | # Independently maintained forward-port of `pathlib` for Py27 and others 83 | try: 84 | import pathlib2 85 | path_types += (pathlib2.PurePath,) 86 | path_obj_types += (pathlib2.PurePath,) 87 | except ImportError: 88 | pass 89 | 90 | @ty.overload 91 | def convert_path(path: path_str_t) -> str: 92 | ... 93 | 94 | @ty.overload 95 | def convert_path(path: path_bytes_t) -> bytes: 96 | ... 97 | 98 | def convert_path(path: path_t) -> ty.Union[str, bytes]: 99 | # `pathlib`'s PathLike objects need to be treated specially and 100 | # converted to strings when interacting with system APIs 101 | return str(path) if isinstance(path, path_obj_types) else path 102 | 103 | 104 | # work around GH/mypy/mypy#731: no recursive structural types yet 105 | json_primitive_t = ty.Union[bool, float, int, str] 106 | json_value_t = ty.Union[ 107 | json_primitive_t, 108 | "json_list_t", 109 | "json_dict_t" 110 | ] 111 | 112 | 113 | class json_list_t(ty.List[json_value_t]): 114 | pass 115 | 116 | 117 | class json_dict_t(ty.Dict[str, json_value_t]): 118 | pass 119 | 120 | 121 | def maybe_fsencode(val: str, ref: ty.AnyStr) -> ty.AnyStr: 122 | """Encodes the string *val* using the system filesystem encoding if *ref* is 123 | of type :type:`bytes`""" 124 | if isinstance(ref, bytes): 125 | return os.fsencode(val) 126 | else: 127 | return val 128 | 129 | 130 | def guess_mimetype(filename: str) -> str: 131 | """Guesses the mimetype of a file based on the given ``filename``. 132 | 133 | .. code-block:: python 134 | 135 | >>> guess_mimetype('example.txt') 136 | 'text/plain' 137 | >>> guess_mimetype('/foo/bar/example') 138 | 'application/octet-stream' 139 | 140 | Parameters 141 | ---------- 142 | filename 143 | The file name or path for which the mimetype is to be guessed 144 | """ 145 | fn = os.path.basename(filename) 146 | return mimetypes.guess_type(fn)[0] or 'application/octet-stream' 147 | 148 | 149 | clean_file_t = ty.Union[path_t, ty.IO[bytes], int] 150 | 151 | 152 | def clean_file(file: clean_file_t) -> ty.Tuple[ty.IO[bytes], bool]: 153 | """Returns a tuple containing a file-like object and a close indicator 154 | 155 | This ensures the given file is opened and keeps track of files that should 156 | be closed after use (files that were not open prior to this function call). 157 | 158 | Raises 159 | ------ 160 | OSError 161 | Accessing the given file path failed 162 | 163 | Parameters 164 | ---------- 165 | file 166 | A filepath or file-like object that may or may not need to be 167 | opened 168 | """ 169 | if isinstance(file, int): 170 | return os.fdopen(file, 'rb', closefd=False), True 171 | elif not hasattr(file, 'read'): 172 | file = ty.cast(path_t, file) # Cannot be ty.IO[bytes] without `.read()` 173 | return open(convert_path(file), 'rb'), True 174 | else: 175 | file = ty.cast(ty.IO[bytes], file) # Must be ty.IO[bytes] 176 | return file, False 177 | 178 | 179 | def clean_files(files: ty.Union[clean_file_t, ty.Iterable[clean_file_t]]) \ 180 | -> ty.Generator[ty.Tuple[ty.IO[bytes], bool], ty.Any, ty.Any]: 181 | """Generates tuples with a file-like object and a close indicator 182 | 183 | This is a generator of tuples, where the first element is the file object 184 | and the second element is a boolean which is True if this module opened the 185 | file (and thus should close it). 186 | 187 | Raises 188 | ------ 189 | OSError 190 | Accessing the given file path failed 191 | 192 | Parameters 193 | ---------- 194 | files 195 | Collection or single instance of a filepath and file-like object 196 | """ 197 | if not isinstance(files, path_types) and not hasattr(files, "read"): 198 | for f in ty.cast(ty.Iterable[clean_file_t], files): 199 | yield clean_file(f) 200 | else: 201 | yield clean_file(ty.cast(clean_file_t, files)) 202 | 203 | 204 | T = ty.TypeVar("T") 205 | F = ty.TypeVar("F", bound=ty.Callable[..., ty.Dict[str, T]]) 206 | 207 | 208 | class return_field(ty.Generic[T]): 209 | """Decorator that returns the given field of a json response. 210 | 211 | Parameters 212 | ---------- 213 | field 214 | The response field to be returned for all invocations 215 | """ 216 | __slots__ = ("field",) 217 | #field: str 218 | 219 | def __init__(self, field: str) -> None: 220 | self.field = field # type: str 221 | 222 | def __call__(self, cmd: F) -> ty.Callable[..., T]: 223 | """Wraps a command so that only a specified field is returned. 224 | 225 | Parameters 226 | ---------- 227 | cmd 228 | A command that is intended to be wrapped 229 | """ 230 | @wraps(cmd) 231 | def wrapper(*args: ty.Any, **kwargs: ty.Any) -> T: 232 | """Returns the specified field as returned by the wrapped function 233 | 234 | Parameters 235 | ---------- 236 | args 237 | Positional parameters to pass to the wrapped callable 238 | kwargs 239 | Named parameter to pass to the wrapped callable 240 | """ 241 | res = cmd(*args, **kwargs) # type: ty.Dict[str, T] 242 | return res[self.field] 243 | return wrapper -------------------------------------------------------------------------------- /ipfshttpclient/version.py: -------------------------------------------------------------------------------- 1 | # _Versioning scheme:_ 2 | # The major and minor version of each release correspond to the supported 3 | # IPFS daemon version. The revision number will be updated whenever we make 4 | # a new release for the `py-ipfs-http-client` for that daemon version. 5 | # 6 | # Example: The first client version to support the `0.4.x`-series of the IPFS 7 | # HTTP API will have version `0.4.0`, the second version will have version 8 | # `0.4.1` and so on. When IPFS `0.5.0` is released, the first client version 9 | # to support it will also be released as `0.5.0`. 10 | 11 | __version__ = "0.7.0a1" 12 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["flit-core >=2,<4"] 3 | build-backend = "flit_core.buildapi" 4 | 5 | [tool.flit.metadata] 6 | module = "ipfshttpclient" 7 | 8 | author = "py-ipfs-http-client team" 9 | author-email = "" 10 | home-page = "https://ipfs.io/ipns/12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF/" 11 | keywords = "ipfs storage distribution development" 12 | license = "MIT License" 13 | description-file = "README.md" 14 | 15 | # Notes: `typing.NoReturn` was introduced post-release in Python 3.5.4 and 3.6.2 and had 16 | # a critical bug (https://bugs.python.org/issue34921) in 3.7.0 to 3.7.1. So the 17 | # compatible versions below reflect the range of Python versions with working 18 | # `typing.NoReturn` function signature support. (Also, many other `typing` module 19 | # items were only introduced post-release of Python 3.5 and 3.6 and version 20 | # restrictions on these versions ensure that those are all available as well.) 21 | requires-python = ">=3.5.4,!=3.6.0,!=3.6.1,!=3.7.0,!=3.7.1" 22 | requires = [ 23 | "multiaddr (>=0.0.7)", 24 | "requests (>=2.11)" 25 | ] 26 | 27 | classifiers = [ 28 | "Development Status :: 3 - Alpha", 29 | 30 | # Indicate who your project is intended for 31 | "Intended Audience :: Developers", 32 | "Intended Audience :: Information Technology", 33 | "Intended Audience :: Science/Research", 34 | 35 | "Topic :: Internet", 36 | "Topic :: Scientific/Engineering", 37 | "Topic :: System :: Filesystems", 38 | "Topic :: System :: Networking", 39 | 40 | # Pick your license as you wish (should match "license" above) 41 | "License :: OSI Approved :: MIT License", 42 | 43 | # Specify the Python versions you support here. In particular, ensure 44 | # that you indicate whether you support Python 2, Python 3 or both. 45 | "Programming Language :: Python :: 3 :: Only", 46 | "Programming Language :: Python :: 3.5", 47 | "Programming Language :: Python :: 3.6", 48 | "Programming Language :: Python :: 3.7", 49 | "Programming Language :: Python :: 3.8" 50 | ] 51 | 52 | [tool.flit.metadata.urls] 53 | Documentation = "https://ipfs.io/ipns/12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF/docs/" 54 | 55 | -------------------------------------------------------------------------------- /test/combine-coverage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import glob 3 | import os 4 | import pathlib 5 | 6 | import coverage 7 | 8 | # Monkey-patch `coverage` to not randomly delete files 9 | import coverage.data 10 | coverage.data.file_be_gone = lambda *a: None 11 | 12 | # Switch working directory to project directory 13 | BASE_PATH = pathlib.Path(__file__).parent.parent 14 | DATA_PATH = BASE_PATH / "coverage" 15 | os.chdir(str(BASE_PATH)) 16 | 17 | 18 | cov = coverage.Coverage() 19 | 20 | # Load the most recent coverage data collected for each test platform 21 | cov.combine(glob.glob("build/test-py*/cov_raw"), strict=True) 22 | 23 | cov.report() 24 | cov.html_report(directory=str(DATA_PATH / "cov_html")) 25 | cov.xml_report(outfile=str(DATA_PATH / "cov.xml")) 26 | -------------------------------------------------------------------------------- /test/functional/.gitattributes: -------------------------------------------------------------------------------- 1 | # Don't change the line-ending style of test files on Windows as this changes the test hashes 2 | /fake_dir/** text eol=lf 3 | -------------------------------------------------------------------------------- /test/functional/conftest.py: -------------------------------------------------------------------------------- 1 | # Note that this file is special in that py.test will automatically import this file and gather 2 | # its list of fixtures even if it is not directly imported into the corresponding test case. 3 | import pathlib 4 | 5 | import pytest 6 | 7 | import ipfshttpclient 8 | 9 | 10 | TEST_DIR = pathlib.Path(__file__).parent 11 | 12 | 13 | __is_available = None 14 | def is_available(): # noqa 15 | """ 16 | Return whether the IPFS daemon is reachable or not 17 | """ 18 | global __is_available 19 | 20 | if not isinstance(__is_available, bool): 21 | try: 22 | ipfshttpclient.connect() 23 | except ipfshttpclient.exceptions.Error as error: 24 | __is_available = False 25 | 26 | # Make sure version incompatibility is displayed to users 27 | if isinstance(error, ipfshttpclient.exceptions.VersionMismatch): 28 | raise 29 | else: 30 | __is_available = True 31 | 32 | return __is_available 33 | 34 | 35 | def sort_by_key(items, key="Name"): 36 | return sorted(items, key=lambda x: x[key]) 37 | 38 | 39 | def get_client(offline=False): 40 | if is_available(): 41 | return ipfshttpclient.Client(offline=offline) 42 | else: 43 | pytest.skip("Running IPFS node required") 44 | 45 | 46 | @pytest.fixture(scope="function") 47 | def client(): 48 | """Create a client with function lifetimme to connect to the IPFS daemon. 49 | 50 | Each test function should instantiate a fresh client, so use this 51 | fixture in test functions.""" 52 | with get_client() as client: 53 | yield client 54 | 55 | 56 | @pytest.fixture(scope="function") 57 | def offline_client(): 58 | """Create a client in offline mode with function lifetimme""" 59 | with get_client(offline=True) as client: 60 | yield client 61 | 62 | 63 | @pytest.fixture(scope="module") 64 | def module_client(): 65 | """Create a client with a module lifetime to connect to the IPFS daemon. 66 | 67 | For module-scope fixtures that need a client, if the client is to be created 68 | automatically using a fixture (to keep client creation code centralized 69 | here), that client-creating fixture must also be module-scope, so use 70 | this fixture in module-scoped fixtures.""" 71 | with get_client() as client: 72 | yield client 73 | 74 | 75 | @pytest.fixture(scope="module") 76 | def module_offline_client(): 77 | """Create a client in offline mode with module lifetime.""" 78 | with get_client(offline=True) as client: 79 | yield client 80 | 81 | 82 | @pytest.fixture 83 | def cleanup_pins(client): 84 | pinned = set(client.pin.ls(type="recursive")["Keys"]) 85 | 86 | yield 87 | 88 | for multihash in client.pin.ls(type="recursive")["Keys"]: 89 | if multihash not in pinned: 90 | client.pin.rm(multihash) 91 | 92 | 93 | @pytest.fixture 94 | def daemon(): 95 | """Result replaced by plugin in `run-tests.py` with the subprocess object of 96 | the spawned daemon.""" 97 | return None 98 | -------------------------------------------------------------------------------- /test/functional/fake_dir/fsdfgh: -------------------------------------------------------------------------------- 1 | dsadsad 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir/popoiopiu: -------------------------------------------------------------------------------- 1 | oooofiopfsdpio 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir/test2/fssdf: -------------------------------------------------------------------------------- 1 | dsdsdsadsdsad 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir/test2/high/five/dummy: -------------------------------------------------------------------------------- 1 | 😉 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir/test2/llllg: -------------------------------------------------------------------------------- 1 | dsdsadjs 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir/test3/ppppoooooooooo: -------------------------------------------------------------------------------- 1 | dsasasd 2 | -------------------------------------------------------------------------------- /test/functional/fake_dir_almost_empty/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iosifpeterfi/py-ipfs-http-client/bcec97aa83cf0b0348d8e160c3f68dc8495dbc1b/test/functional/fake_dir_almost_empty/.gitignore -------------------------------------------------------------------------------- /test/functional/fake_json/data.car: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/iosifpeterfi/py-ipfs-http-client/bcec97aa83cf0b0348d8e160c3f68dc8495dbc1b/test/functional/fake_json/data.car -------------------------------------------------------------------------------- /test/functional/fake_json/links.json: -------------------------------------------------------------------------------- 1 | { 2 | "Data": "another", 3 | "Links": [ { 4 | "Name": "some link", 5 | "Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V", 6 | "Size": 8 7 | } ] 8 | } 9 | -------------------------------------------------------------------------------- /test/functional/fake_json/no_links.json: -------------------------------------------------------------------------------- 1 | { 2 | "Data": "abc" 3 | } 4 | -------------------------------------------------------------------------------- /test/functional/test_bitswap.py: -------------------------------------------------------------------------------- 1 | def test_wantlist(client): 2 | result = client.bitswap.wantlist(peer="QmdkJZUWnVkEc6yfptVu4LWY8nHkEnGwsxqQ233QSGj8UP") 3 | assert "Keys" in result 4 | 5 | 6 | def test_stat(client): 7 | result = client.bitswap.stat() 8 | assert "Wantlist" in result -------------------------------------------------------------------------------- /test/functional/test_block.py: -------------------------------------------------------------------------------- 1 | import cid 2 | import io 3 | import pytest 4 | 5 | import conftest 6 | 7 | TEST1_FILEPATH = conftest.TEST_DIR / "fake_dir" / "fsdfgh" 8 | TEST1_CID_STR = "QmPevo2B1pwvDyuZyJbWVfhwkaGPee3f1kX36wFmqx1yna" 9 | TEST1_SIZE = 8 10 | 11 | TEST2_CONTENT = b"Hello World!" 12 | TEST2_CID_STR = "bafkreid7qoywk77r7rj3slobqfekdvs57qwuwh5d2z3sqsw52iabe3mqne" 13 | TEST2_CID_OBJ = cid.make_cid(TEST2_CID_STR) 14 | TEST2_SIZE = len(TEST2_CONTENT) 15 | 16 | 17 | @pytest.mark.dependency() 18 | def test_put(client): 19 | expected_keys = {"Key", "Size"} 20 | res = client.block.put(TEST1_FILEPATH) 21 | assert set(res.keys()).issuperset(expected_keys) 22 | assert res["Key"] == TEST1_CID_STR 23 | 24 | 25 | @pytest.mark.dependency(depends=["test_put"]) 26 | def test_stat(client): 27 | expected_keys = {"Key", "Size"} 28 | res = client.block.stat(TEST1_CID_STR) 29 | assert set(res.keys()).issuperset(expected_keys) 30 | 31 | 32 | @pytest.mark.dependency(depends=["test_put"]) 33 | def test_get(client): 34 | assert len(client.block.get(TEST1_CID_STR)) == TEST1_SIZE 35 | 36 | 37 | @pytest.mark.dependency() 38 | def test_put_str(client): 39 | expected_keys = {"Key", "Size"} 40 | res = client.block.put(io.BytesIO(TEST2_CONTENT), opts={"format": "raw"}) 41 | assert set(res.keys()).issuperset(expected_keys) 42 | assert res["Key"] == TEST2_CID_STR 43 | 44 | 45 | @pytest.mark.dependency(depends=["test_put_str"]) 46 | def test_stat_cid_obj(client): 47 | assert len(client.block.get(TEST2_CID_OBJ)) == TEST2_SIZE -------------------------------------------------------------------------------- /test/functional/test_dag.py: -------------------------------------------------------------------------------- 1 | import io 2 | 3 | import pytest 4 | 5 | import conftest 6 | 7 | 8 | def test_put_get_resolve(client): 9 | version = tuple(map(int, client.version()["Version"].split('-', 1)[0].split('.'))) 10 | if version < (0, 5): 11 | pytest.skip("IPFS DAG APIs first appeared in go-IPFS 0.5") 12 | 13 | data = io.BytesIO(br'{"links": []}') 14 | response = client.dag.put(data) 15 | 16 | assert 'Cid' in response 17 | assert '/' in response['Cid'] 18 | assert response['Cid']['/'] == 'bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya' 19 | 20 | response = client.dag.get('bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya') 21 | 22 | assert 'links' in response 23 | assert response['links'] == [] 24 | 25 | response = client.dag.resolve('bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya') 26 | 27 | assert 'Cid' in response 28 | assert response['Cid']['/'] == 'bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya' 29 | 30 | 31 | def test_import_export(client): 32 | version = tuple(map(int, client.version()["Version"].split('-', 1)[0].split('.'))) 33 | if version < (0, 5): 34 | pytest.skip("IPFS DAG APIs first appeared in go-IPFS 0.5") 35 | 36 | # This file was created by inserting a simple JSON object into IPFS and 37 | # exporting it using `ipfs dag export > file.car` 38 | data_car = conftest.TEST_DIR / 'fake_json' / 'data.car' 39 | data_car = str(data_car) #PY35 40 | 41 | with open(data_car, 'rb') as file: 42 | response = client.dag.imprt(file) 43 | 44 | assert 'Root' in response 45 | assert 'Cid' in response['Root'] 46 | assert '/' in response['Root']['Cid'] 47 | 48 | cid = response['Root']['Cid'] 49 | assert cid['/'] == 'bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya' 50 | 51 | data = client.dag.export('bafyreidepjmjhvhlvp5eyxqpmyyi7rxwvl7wsglwai3cnvq63komq4tdya') 52 | 53 | with open(data_car, 'rb') as file: 54 | assert data == file.read() 55 | -------------------------------------------------------------------------------- /test/functional/test_key.py: -------------------------------------------------------------------------------- 1 | def test_add_list_rename_rm(client): 2 | # Remove keys if they already exist 3 | key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) 4 | if "ipfshttpclient-test-rsa" in key_list: 5 | client.key.rm("ipfshttpclient-test-rsa") 6 | if "ipfshttpclient-test-ed" in key_list: 7 | client.key.rm("ipfshttpclient-test-ed") 8 | 9 | # Add new RSA and ED25519 key 10 | key1 = client.key.gen("ipfshttpclient-test-rsa", "rsa")["Name"] 11 | key2 = client.key.gen("ipfshttpclient-test-ed", "ed25519")["Name"] 12 | 13 | # Validate the keys exist now 14 | key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) 15 | assert key1 in key_list 16 | assert key2 in key_list 17 | 18 | # Rename the EC key 19 | key2_new = client.key.rename(key2, "ipfshttpclient-test-ed2")["Now"] 20 | 21 | # Validate that the key was successfully renamed 22 | key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) 23 | assert key1 in key_list 24 | assert key2 not in key_list 25 | assert key2_new in key_list 26 | 27 | # Drop both keys with one request 28 | client.key.rm(key1, key2_new) 29 | 30 | # Validate that the keys are gone again 31 | key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) 32 | assert key1 not in key_list 33 | assert key2_new not in key_list -------------------------------------------------------------------------------- /test/functional/test_miscellaneous.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pytest 4 | 5 | 6 | 7 | def test_version(client): 8 | expected_keys = {"Repo", "Commit", "Version"} 9 | resp_version = client.version() 10 | assert set(resp_version.keys()).issuperset(expected_keys) 11 | 12 | 13 | def test_id(client): 14 | expected_keys = {"PublicKey", "ProtocolVersion", "ID", "AgentVersion", "Addresses"} 15 | resp_id = client.id() 16 | assert set(resp_id.keys()).issuperset(expected_keys) 17 | 18 | 19 | ################# 20 | # Shutdown test # 21 | ################# 22 | 23 | @pytest.mark.last 24 | def test_daemon_stop(daemon, client): 25 | # The value for the `daemon` “fixture” is injected using a pytest plugin 26 | # with access to the created daemon subprocess object defined directly 27 | # in the `test/run-test.py` file 28 | if not daemon: 29 | pytest.skip("Not started using `test/run-test.py`") 30 | 31 | def daemon_is_running(): 32 | return daemon.poll() is None 33 | 34 | # Daemon should still be running at this point 35 | assert daemon_is_running() 36 | 37 | # Send stop request 38 | client.stop() 39 | 40 | # Wait for daemon process to disappear 41 | for _ in range(10000): 42 | if not daemon_is_running(): 43 | break 44 | time.sleep(0.001) 45 | 46 | # Daemon should not be running anymore 47 | assert not daemon_is_running() 48 | -------------------------------------------------------------------------------- /test/functional/test_name.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | 4 | def get_key(client, key_name): 5 | keys = client.key.list()["Keys"] 6 | for k in keys: 7 | if k["Name"] == key_name: 8 | return k 9 | raise Exception("Unknown key: %s" % key_name) 10 | 11 | 12 | def hash_to_path(ns, h): 13 | assert "/" not in h 14 | assert h == h.strip() 15 | return "/" + ns + "/" + h 16 | 17 | 18 | def hash_to_ipfs_path(h): 19 | return hash_to_path("ipfs", h) 20 | 21 | 22 | def hash_to_ipns_path(h): 23 | return hash_to_path("ipns", h) 24 | 25 | 26 | class Resources: 27 | def __init__(self, offline_client): 28 | self.client = offline_client 29 | 30 | 31 | def __enter__(self): 32 | self.key_self = get_key(self.client, "self") 33 | self.key_test1 = self.client.key.gen("ipfshttpclient-test-name-1", "rsa") 34 | self.key_test2 = self.client.key.gen("ipfshttpclient-test-name-2", "rsa") 35 | self.msg1 = hash_to_ipfs_path(self.client.add_str("Mary had a little lamb")) 36 | self.msg2 = hash_to_ipfs_path(self.client.add_str("Mary had a little alpaca")) 37 | self.msg3 = hash_to_ipfs_path(self.client.add_str("Mary had a little goat")) 38 | return self 39 | 40 | 41 | def __exit__(self, t, v, tb): 42 | self.client.pin.rm(self.msg1, self.msg2, self.msg3) 43 | self.client.key.rm(self.key_test1["Name"], self.key_test2["Name"]) 44 | 45 | 46 | class PublishedMapping: 47 | def __init__(self, name, path): 48 | self.name = name 49 | self.path = path 50 | 51 | 52 | @pytest.fixture(scope="module") 53 | def resources(module_offline_client): 54 | with Resources(module_offline_client) as resources: 55 | yield resources 56 | 57 | 58 | @pytest.fixture(scope="module") 59 | def published_mapping(module_offline_client, resources): 60 | # we're not testing publish here, pass whatever args we want 61 | resp = module_offline_client.name.publish( 62 | resources.msg3, 63 | key=resources.key_test2["Name"], resolve=False, 64 | lifetime="5m", ttl="5m", allow_offline=True) 65 | return PublishedMapping(resp["Name"], resp["Value"]) 66 | 67 | 68 | def check_resolve(resp, path): 69 | assert resp["Path"] == path 70 | 71 | 72 | def check_publish(offline_client, response_path, resolved_path, key, resp): 73 | 74 | name = resp["Name"] 75 | assert name == key["Id"] 76 | assert resp["Value"] == response_path 77 | 78 | # we're not testing resolve here, pass whatever args we want 79 | resolve_resp = offline_client.name.resolve( 80 | name, 81 | recursive=True, dht_record_count=0, dht_timeout="1s", 82 | offline=True) 83 | check_resolve(resolve_resp, resolved_path) 84 | 85 | 86 | def test_publish_self(offline_client, resources): 87 | resp = offline_client.name.publish(resources.msg1, allow_offline=True) 88 | check_publish(offline_client, resources.msg1, resources.msg1, 89 | resources.key_self, resp) 90 | 91 | 92 | def test_publish_params(offline_client, resources): 93 | resp = offline_client.name.publish(resources.msg1, 94 | lifetime="25h", ttl="1m", 95 | allow_offline=True) 96 | check_publish(offline_client, resources.msg1, resources.msg1, 97 | resources.key_self, resp) 98 | 99 | 100 | def test_publish_key(offline_client, resources): 101 | resp = offline_client.name.publish( 102 | resources.msg2, 103 | key=resources.key_test1["Name"], allow_offline=True) 104 | check_publish(offline_client, resources.msg2, resources.msg2, 105 | resources.key_test1, resp) 106 | 107 | 108 | def test_publish_indirect(offline_client, resources, published_mapping): 109 | path = hash_to_ipns_path(published_mapping.name) 110 | resp = offline_client.name.publish(path, 111 | resolve=True, allow_offline=True) 112 | check_publish(offline_client, path, published_mapping.path, 113 | resources.key_self, resp) 114 | 115 | 116 | def test_resolve(offline_client, published_mapping): 117 | check_resolve(offline_client.name.resolve(published_mapping.name), 118 | published_mapping.path) 119 | 120 | 121 | def test_resolve_recursive(offline_client, published_mapping): 122 | inner_path = hash_to_ipns_path(published_mapping.name) 123 | res = offline_client.name.publish(inner_path, 124 | resolve=False, allow_offline=True) 125 | outer_path = res["Name"] 126 | 127 | resp = offline_client.name.resolve(outer_path, recursive=True) 128 | check_resolve(resp, published_mapping.path) 129 | 130 | 131 | def test_resolve_params(offline_client, published_mapping): 132 | resp = offline_client.name.resolve( 133 | published_mapping.name, 134 | nocache=True, dht_record_count=1, dht_timeout="180s", 135 | offline=True) 136 | check_resolve(resp, published_mapping.path) 137 | -------------------------------------------------------------------------------- /test/functional/test_object.py: -------------------------------------------------------------------------------- 1 | import conftest 2 | import pytest 3 | 4 | 5 | 6 | 7 | def test_new(client): 8 | expected_keys = {"Hash"} 9 | res = client.object.new() 10 | assert set(res.keys()).issuperset(expected_keys) 11 | 12 | 13 | def test_stat(client): 14 | expected_keys = {"Hash", "CumulativeSize", "DataSize", "NumLinks", "LinksSize", "BlockSize"} 15 | resource = client.add_str("Mary had a little lamb") 16 | resp_stat = client.object.stat(resource) 17 | assert set(resp_stat.keys()).issuperset(expected_keys) 18 | 19 | 20 | def test_put_get(client): 21 | # Set paths to test json files 22 | path_no_links = conftest.TEST_DIR / "fake_json" / "no_links.json" 23 | path_links = conftest.TEST_DIR / "fake_json" / "links.json" 24 | 25 | # Put the json objects on the DAG 26 | no_links = client.object.put(path_no_links) 27 | links = client.object.put(path_links) 28 | 29 | # Verify the correct content was put 30 | assert no_links["Hash"] == "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V" 31 | assert links["Hash"] == "QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm" 32 | 33 | # Get the objects from the DAG 34 | get_no_links = client.object.get("QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V") 35 | get_links = client.object.get("QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm") 36 | 37 | # Verify the objects we put have been gotten 38 | assert get_no_links["Data"] == "abc" 39 | assert get_links["Data"] == "another" 40 | assert get_links["Links"][0]["Name"] == "some link" 41 | 42 | 43 | def test_links(client): 44 | # Set paths to test json files 45 | path_links = conftest.TEST_DIR / "fake_json" / "links.json" 46 | 47 | # Put json object on the DAG and get its links 48 | client.object.put(path_links) 49 | links = client.object.links("QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm") 50 | 51 | # Verify the correct link has been gotten 52 | assert links["Links"][0]["Hash"] == "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V" 53 | 54 | 55 | def test_data(client): 56 | # Set paths to test json files 57 | path_links = conftest.TEST_DIR / "fake_json" / "links.json" 58 | 59 | # Put json objects on the DAG and get its data 60 | client.object.put(path_links) 61 | data = client.object.data("QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm") 62 | 63 | # Verify the correct bytes have been gotten 64 | assert data == b"another" 65 | 66 | 67 | # Instead of writing our own test file generation just make a proxy to piggyback off test_files 68 | @pytest.mark.dependency(depends=["test/functional/test_files.py::test_add_recursive"], 69 | scope='session') 70 | def test_prepare_test_files(client): 71 | pass 72 | 73 | 74 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 75 | def test_patch_append_data(client): 76 | """Warning, this test depends on the contents of 77 | test/functional/fake_dir/fsdfgh 78 | """ 79 | result = client.object.patch.append_data( 80 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", 81 | conftest.TEST_DIR / "fake_dir" / "fsdfgh" 82 | ) 83 | assert result == {"Hash": "QmcUsyoGVxWoQgYKgmLaDBGm8J3eHWfchMh3oDUD5FrrtN"} 84 | 85 | 86 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 87 | def test_patch_add_link(client): 88 | """Warning, this test depends on the contents of 89 | test/functional/fake_dir/fsdfgh 90 | """ 91 | result = client.object.patch.add_link( 92 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", "self", 93 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n" 94 | ) 95 | assert result == {"Hash": "QmbWSr7YXBLcF23VVb7yPvUuogUPn46GD7gXftXC6mmsNM"} 96 | 97 | 98 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 99 | def test_patch_rm_link(client): 100 | """Warning, this test depends on the contents of 101 | test/functional/fake_dir/fsdfgh 102 | """ 103 | result = client.object.patch.rm_link( 104 | "QmbWSr7YXBLcF23VVb7yPvUuogUPn46GD7gXftXC6mmsNM", "self" 105 | ) 106 | assert result == {"Hash": "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n"} 107 | 108 | 109 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 110 | def test_patch_set_data(client): 111 | """Warning, this test depends on the contents of 112 | test/functional/fake_dir/popoiopiu 113 | """ 114 | result = client.object.patch.set_data( 115 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", 116 | conftest.TEST_DIR / "fake_dir" / "popoiopiu" 117 | ) 118 | assert result == {"Hash": "QmV4QR7MCBj5VTi6ddHmXPyjWGzbaKEtX2mx7axA5PA13G"} 119 | 120 | 121 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 122 | def test_diff_same(client): 123 | """Warning, this test depends on the contents of 124 | test/functional/fake_dir/popoiopiu 125 | """ 126 | result = client.object.diff( 127 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", 128 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n" 129 | ) 130 | assert result == {'Changes': []} 131 | 132 | 133 | @pytest.mark.dependency(depends=["test_prepare_test_files"]) 134 | def test_diff_different_files(client): 135 | """Warning, this test depends on the contents of 136 | test/functional/fake_dir/fsdfgh 137 | test/functional/fake_dir/popoiopiu 138 | """ 139 | result = client.object.diff( 140 | "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", 141 | "QmV4QR7MCBj5VTi6ddHmXPyjWGzbaKEtX2mx7axA5PA13G" 142 | ) 143 | assert result == {'Changes': [{ 144 | 'Type': 2, 145 | 'Path': '', 146 | 'Before': {'/': 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n'}, 147 | 'After': {'/': 'QmV4QR7MCBj5VTi6ddHmXPyjWGzbaKEtX2mx7axA5PA13G'}}]} -------------------------------------------------------------------------------- /test/functional/test_other.py: -------------------------------------------------------------------------------- 1 | import ipfshttpclient 2 | 3 | import conftest 4 | 5 | 6 | def test_ipfs_node_available(): 7 | """ 8 | Dummy test to ensure that running the tests without a daemon produces a failure, since we 9 | think it's unlikely that people running tests want this 10 | """ 11 | assert conftest.is_available(), \ 12 | "Functional tests require an IPFS node to be available at: {0}" \ 13 | .format(ipfshttpclient.DEFAULT_ADDR) 14 | 15 | 16 | def test_add_json(client, cleanup_pins): 17 | data = {"Action": "Open", "Type": "PR", "Name": "IPFS", "Pubkey": 7} 18 | res = client.add_json(data) 19 | 20 | assert data == client.get_json(res) 21 | 22 | # have to test the string added to IPFS, deserializing JSON will not 23 | # test order of keys 24 | assert '{"Action":"Open","Name":"IPFS","Pubkey":7,"Type":"PR"}' == client.cat(res).decode("utf-8") -------------------------------------------------------------------------------- /test/functional/test_pin.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import ipfshttpclient.exceptions 4 | 5 | 6 | class Resources: 7 | def __init__(self, client): 8 | self.msg = client.add_str("Mary had a little lamb") 9 | self.msg2 = client.add_str("Mary had a little alpaca") 10 | resp_add = client.add("test/functional/fake_dir", recursive=True) 11 | self.fake_dir_hashes = [el["Hash"] for el in resp_add if "Hash" in el] 12 | for resp in resp_add: 13 | if resp["Name"] == "fake_dir": 14 | self.fake_dir_hash = resp["Hash"] 15 | elif resp["Name"] == "fake_dir/test2": 16 | self.fake_dir_test2_hash = resp["Hash"] 17 | 18 | @pytest.fixture # noqa 19 | def resources(client): 20 | return Resources(client) 21 | 22 | 23 | def is_pinned(client, path): 24 | error_msg = None 25 | try: 26 | resp = client.pin.ls(path) 27 | assert path.split("/")[-1] in resp["Keys"] 28 | except ipfshttpclient.exceptions.ErrorResponse as exc: 29 | error_msg = exc.args[0] 30 | if "not pinned" in error_msg: 31 | return False 32 | raise 33 | return True 34 | 35 | 36 | def test_ls_void(client, resources): 37 | pins = client.pin.ls()["Keys"] 38 | assert len(pins) >= 2 39 | assert resources.msg in pins 40 | assert resources.msg2 in pins 41 | 42 | 43 | def test_ls_single(client, resources): 44 | pins = client.pin.ls(resources.msg)["Keys"] 45 | assert len(pins) == 1 46 | assert resources.msg in pins 47 | 48 | 49 | def test_ls_multiple(client, resources): 50 | pins = client.pin.ls(resources.msg, resources.msg2)["Keys"] 51 | assert len(pins) == 2 52 | assert resources.msg in pins 53 | assert resources.msg2 in pins 54 | 55 | 56 | def test_ls_add_rm_single(client, resources): 57 | # Get pinned objects at start. 58 | pins_begin = client.pin.ls()["Keys"] 59 | 60 | # Unpin the resource if already pinned. 61 | if resources.msg in pins_begin.keys(): 62 | client.pin.rm(resources.msg) 63 | 64 | # No matter what, the resource should not be pinned at this point 65 | assert resources.msg not in client.pin.ls()["Keys"] 66 | assert not is_pinned(client, resources.msg) 67 | 68 | for option in (True, False): 69 | # Pin the resource. 70 | resp_add = client.pin.add(resources.msg, recursive=option) 71 | pins_afer_add = client.pin.ls()["Keys"] 72 | assert resp_add["Pins"] == [resources.msg] 73 | assert resources.msg in pins_afer_add 74 | if option: 75 | assert pins_afer_add[resources.msg]["Type"] == "recursive" 76 | else: 77 | assert pins_afer_add[resources.msg]["Type"] != "recursive" 78 | 79 | # Unpin the resource 80 | resp_rm = client.pin.rm(resources.msg) 81 | pins_afer_rm = client.pin.ls()["Keys"] 82 | assert resp_rm["Pins"] == [resources.msg] 83 | assert resources.msg not in pins_afer_rm 84 | 85 | # Get pinned objects at end 86 | pins_end = client.pin.ls()["Keys"] 87 | 88 | # Compare pinned items from start to finish of test 89 | assert resources.msg not in pins_end.keys() 90 | assert not is_pinned(client, resources.msg) 91 | 92 | 93 | def test_ls_add_rm_directory(client, resources): 94 | # Remove fake_dir if it had previously been pinned 95 | if resources.fake_dir_hash in client.pin.ls(type="recursive")["Keys"].keys(): 96 | client.pin.rm(resources.fake_dir_hash) 97 | 98 | # Make sure I removed it 99 | assert resources.fake_dir_hash not in client.pin.ls()["Keys"].keys() 100 | 101 | # Add "fake_dir" recursively 102 | client.pin.add(resources.fake_dir_hash) 103 | 104 | # Make sure all appear on the list of pinned objects 105 | pins_after_add = client.pin.ls()["Keys"].keys() 106 | assert set(pins_after_add).issuperset(set(resources.fake_dir_hashes)) 107 | 108 | # Clean up 109 | client.pin.rm(resources.fake_dir_hash) 110 | pins_end = client.pin.ls(type="recursive")["Keys"].keys() 111 | assert resources.fake_dir_hash not in pins_end 112 | 113 | 114 | def test_add_update_verify_rm(client, resources): 115 | # Get pinned objects at start 116 | pins_begin = client.pin.ls(type="recursive")["Keys"].keys() 117 | 118 | # Remove fake_dir and demo resource if it had previously been pinned 119 | if resources.fake_dir_hash in pins_begin: 120 | client.pin.rm(resources.fake_dir_hash) 121 | if resources.fake_dir_test2_hash in pins_begin: 122 | client.pin.rm(resources.fake_dir_test2_hash) 123 | 124 | # Ensure that none of the above are pinned anymore 125 | pins_after_rm = client.pin.ls(type="recursive")["Keys"].keys() 126 | assert resources.fake_dir_hash not in pins_after_rm 127 | assert resources.fake_dir_test2_hash not in pins_after_rm 128 | 129 | # Add pin for sub-directory 130 | client.pin.add(resources.fake_dir_test2_hash) 131 | 132 | # Replace it by pin for the entire fake dir 133 | client.pin.update(resources.fake_dir_test2_hash, resources.fake_dir_hash) 134 | 135 | # Ensure that the sub-directory is not pinned directly anymore 136 | pins_after_update = client.pin.ls(type="recursive")["Keys"].keys() 137 | assert resources.fake_dir_test2_hash not in pins_after_update 138 | assert resources.fake_dir_hash in pins_after_update 139 | 140 | # Now add a pin to the sub-directory from the parent directory 141 | client.pin.update(resources.fake_dir_hash, resources.fake_dir_test2_hash, unpin=False) 142 | 143 | # Check integrity of all directory content hashes and whether all 144 | # directory contents have been processed in doing this 145 | hashes = [] 146 | for result in client.pin.verify(resources.fake_dir_hash, verbose=True): 147 | assert result["Ok"] 148 | hashes.append(result["Cid"]) 149 | assert resources.fake_dir_hash in hashes 150 | 151 | # Ensure that both directories are now recursively pinned 152 | pins_after_update2 = client.pin.ls(type="recursive")["Keys"].keys() 153 | assert resources.fake_dir_test2_hash in pins_after_update2 154 | assert resources.fake_dir_hash in pins_after_update2 155 | 156 | # Clean up 157 | client.pin.rm(resources.fake_dir_hash, resources.fake_dir_test2_hash) -------------------------------------------------------------------------------- /test/functional/test_pubsub.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | import pytest 4 | 5 | 6 | 7 | @pytest.fixture 8 | def pubsub_topic(): 9 | """ 10 | Creates a unique topic for testing purposes 11 | """ 12 | return "{}.testing".format(uuid.uuid4()) 13 | 14 | 15 | 16 | def test_publish_subscribe(client, pubsub_topic): 17 | """ 18 | We test both publishing and subscribing at 19 | the same time because we cannot verify that 20 | something has been properly published unless 21 | we subscribe to that channel and receive it. 22 | Likewise, we cannot accurately test a subscription 23 | without publishing something on the topic we are subscribed 24 | to. 25 | """ 26 | # the message that will be published 27 | message = "hello" 28 | 29 | expected_data = "aGVsbG8=" 30 | expected_topicIDs = [pubsub_topic] 31 | 32 | # get the subscription stream 33 | with client.pubsub.subscribe(pubsub_topic) as sub: 34 | # make sure something was actually returned from the subscription 35 | assert sub is not None 36 | 37 | # publish a message to topic 38 | client.pubsub.publish(pubsub_topic, message) 39 | 40 | # get the message 41 | sub_data = sub.read_message() 42 | 43 | # assert that the returned dict has the following keys 44 | assert "data" in sub_data 45 | assert "topicIDs" in sub_data 46 | 47 | assert sub_data["data"] == expected_data 48 | assert sub_data["topicIDs"] == expected_topicIDs 49 | 50 | 51 | def test_ls(client, pubsub_topic): 52 | """ 53 | Testing the ls, assumes we are able 54 | to at least subscribe to a topic 55 | """ 56 | expected_return = {"Strings": [pubsub_topic]} 57 | 58 | # subscribe to the topic testing 59 | sub = client.pubsub.subscribe(pubsub_topic) 60 | 61 | channels = None 62 | try: 63 | # grab the channels we"re subscribed to 64 | channels = client.pubsub.ls() 65 | finally: 66 | sub.close() 67 | 68 | assert channels == expected_return 69 | 70 | 71 | def test_peers(client): 72 | """ 73 | Not sure how to test this since it fully depends 74 | on who we"re connected to. We may not even have 75 | any peers 76 | """ 77 | peers = client.pubsub.peers() 78 | 79 | # make sure the Strings key is in the map thats returned 80 | assert "Strings" in peers 81 | 82 | # ensure the value of "Strings" is a list. 83 | # The list may or may not be empty. 84 | assert isinstance(peers["Strings"], list) -------------------------------------------------------------------------------- /test/functional/test_repo.py: -------------------------------------------------------------------------------- 1 | def test_stat(client): 2 | # Verify that the correct key-value pairs are returned 3 | stat = client.repo.stat() 4 | assert sorted(stat.keys()) == [ 5 | "NumObjects", "RepoPath", "RepoSize", 6 | "StorageMax", "Version" 7 | ] 8 | 9 | 10 | def test_gc(client): 11 | # Add and unpin an object to be garbage collected 12 | garbage = client.add_str("Test String") 13 | client.pin.rm(garbage) 14 | 15 | # Collect the garbage object with object count before and after 16 | orig_objs = client.repo.stat()["NumObjects"] 17 | gc = client.repo.gc() 18 | cur_objs = client.repo.stat()["NumObjects"] 19 | 20 | # Verify the garbage object was collected 21 | assert orig_objs > cur_objs 22 | keys = [el["Key"]["/"] for el in gc] 23 | assert garbage in keys 24 | 25 | 26 | def test_gc_no_result(client): 27 | # Add and unpin an object to be garbage collected 28 | garbage = client.add_str("Test String") 29 | client.pin.rm(garbage) 30 | 31 | # Collect the garbage object with object count before and after 32 | orig_objs = client.repo.stat()["NumObjects"] 33 | gc = client.repo.gc(quiet=True) 34 | cur_objs = client.repo.stat()["NumObjects"] 35 | 36 | # Verify the garbage object was collected 37 | assert orig_objs > cur_objs 38 | assert gc is None 39 | -------------------------------------------------------------------------------- /test/functional/test_unstable.py: -------------------------------------------------------------------------------- 1 | import collections.abc 2 | import conftest 3 | from threading import Timer 4 | import time 5 | 6 | 7 | ################## 8 | # Daemon Logging # 9 | ################## 10 | 11 | def test_log_ls_level(client): 12 | """ 13 | Unfortunately there is no way of knowing the logging levels prior 14 | to this test. This makes it impossible to guarantee that the logging 15 | levels are the same as before the test was run. 16 | """ 17 | # Retrieves the list of logging subsystems for a running daemon. 18 | resp_ls = client.unstable.log.ls() 19 | # The response should be a dictionary with only one key ('Strings'). 20 | assert "Strings" in resp_ls 21 | 22 | # Sets the logging level to 'error' for the first subsystem found. 23 | sub = resp_ls["Strings"][0] 24 | resp_level = client.unstable.log.level(sub, "error") 25 | assert resp_level["Message"] == "Changed log level of '{0}' to 'error'\n".format(sub) 26 | 27 | 28 | def test_log_tail(client): 29 | 30 | # Generate some events in the log, but only after we start listening 31 | TIME_TO_LOG_TAIL = 2 # time it takes to send request and start listening 32 | TIME_TO_GC = 2 # time it takes for GC to complete 33 | t = Timer(TIME_TO_LOG_TAIL, client.repo.gc) 34 | t.start() 35 | 36 | # Gets the response object. 37 | with client.unstable.log.tail(timeout=5) as log_tail_iter: 38 | # In case the log was not empty, we may return earlier 39 | # than the timer. If we return while the GC is still 40 | # running, we risk racing with test exit, so wait. 41 | t.cancel() 42 | time.sleep(TIME_TO_GC) 43 | 44 | # The log should have been parsed into a dictionary object with 45 | # various keys depending on the event that occurred. 46 | assert isinstance(next(log_tail_iter), collections.abc.Mapping) 47 | 48 | 49 | ############ 50 | # Refs API # 51 | ############ 52 | 53 | REFS_RESULT = [ 54 | {"Err": "", "Ref": "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX"}, 55 | {"Err": "", "Ref": "QmYAhvKYu46rh5NcHzeu6Bhc7NG9SqkF9wySj2jvB74Rkv"}, 56 | {"Err": "", "Ref": "QmStL6TPbJfMHQhHjoVT93kCynVx3GwLf7xwgrtScqABhU"}, 57 | {"Err": "", "Ref": "QmRphRr6ULDEj7YnXpLdnxhnPiVjv5RDtGX3er94Ec6v4Q"} 58 | ] 59 | 60 | 61 | def test_refs_local_1(client): 62 | with open(str(conftest.TEST_DIR / "fake_dir" / "fsdfgh"), "rb") as fp: 63 | res = client.add(fp, pin=False) 64 | 65 | assert res["Hash"] == "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX" 66 | 67 | assert res["Hash"] not in client.pin.ls(type="recursive") 68 | assert res["Hash"] in list(map(lambda i: i["Ref"], client.unstable.refs.local())) 69 | 70 | 71 | def test_refs_local_2(client): 72 | res = client.add(conftest.TEST_DIR / "fake_dir" / "fsdfgh", pin=False) 73 | 74 | assert res["Hash"] == "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX" 75 | 76 | assert res["Hash"] not in client.pin.ls(type="recursive") 77 | assert res["Hash"] in list(map(lambda i: i["Ref"], client.unstable.refs.local())) 78 | 79 | 80 | def test_refs(client, cleanup_pins): 81 | res = client.add(conftest.TEST_DIR / "fake_dir", recursive=True) 82 | assert res[-1]["Hash"] == "QmNx8xVu9mpdz9k6etbh2S8JwZygatsZVCH4XhgtfUYAJi" 83 | 84 | refs = client.unstable.refs(res[-1]["Hash"]) 85 | assert conftest.sort_by_key(REFS_RESULT, "Ref") == conftest.sort_by_key(refs, "Ref") -------------------------------------------------------------------------------- /test/run-tests.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import contextlib 4 | import itertools 5 | import locale 6 | import os 7 | import pathlib 8 | import random 9 | import shutil 10 | import subprocess 11 | import sys 12 | import tempfile 13 | 14 | import pytest 15 | 16 | 17 | if not hasattr(contextlib, "suppress"): 18 | """ 19 | Polyfill for ``contextlib.suppress`` 20 | """ 21 | @contextlib.contextmanager 22 | def _contextlib_suppress(*exceptions): 23 | try: 24 | yield 25 | except exceptions: 26 | pass 27 | contextlib.suppress = _contextlib_suppress 28 | 29 | 30 | ###################### 31 | # Test configuration # 32 | ###################### 33 | 34 | # Environment name as used by `tox` 35 | ENVNAME = "py{}{}".format(sys.version_info.major, sys.version_info.minor) 36 | 37 | # Determine project base directory and testing path 38 | BASE_PATH = pathlib.Path(__file__).parent.parent 39 | TEST_PATH = BASE_PATH / "build" / "test-{}".format(ENVNAME) 40 | IPFS_PATH = TEST_PATH / "ipfs-path" 41 | 42 | ADDR = "/ip4/127.0.0.1/tcp/{0}".format(random.randrange(40000, 65535)) 43 | 44 | 45 | ########################### 46 | # Set up test environment # 47 | ########################### 48 | 49 | # Add project directory to PYTHONPATH 50 | sys.path.insert(0, str(BASE_PATH)) 51 | 52 | # Switch working directory to project directory 53 | os.chdir(str(BASE_PATH)) 54 | 55 | # Export environment variables required for testing 56 | os.environ["IPFS_PATH"] = str(IPFS_PATH) 57 | os.environ["PY_IPFS_HTTP_CLIENT_DEFAULT_ADDR"] = str(ADDR) 58 | 59 | # Make sure the IPFS data directory exists and is empty 60 | with contextlib.suppress(FileNotFoundError): 61 | shutil.rmtree(str(IPFS_PATH)) 62 | 63 | with contextlib.suppress(FileExistsError): 64 | os.makedirs(str(IPFS_PATH)) 65 | 66 | # Initialize the IPFS data directory 67 | subprocess.call(["ipfs", "init"]) 68 | subprocess.call(["ipfs", "config", "Addresses.Gateway", ""]) 69 | subprocess.call(["ipfs", "config", "Addresses.API", ADDR]) 70 | subprocess.call(["ipfs", "config", "--bool", "Experimental.FilestoreEnabled", "true"]) 71 | 72 | 73 | ################ 74 | # Start daemon # 75 | ################ 76 | 77 | extra_args = {} 78 | if sys.version_info >= (3, 6, 0): 79 | extra_args["encoding"] = locale.getpreferredencoding() 80 | else: #PY35: `subprocess.Popen` encoding parameter missing 81 | extra_args["universal_newlines"] = True 82 | 83 | # Spawn IPFS daemon in data directory 84 | print("Starting IPFS daemon on {0}…".format(ADDR), file=sys.stderr) 85 | DAEMON = subprocess.Popen( 86 | ["ipfs", "daemon", "--enable-pubsub-experiment"], 87 | stdout=subprocess.PIPE, 88 | stderr=subprocess.STDOUT, 89 | **extra_args 90 | ) 91 | 92 | 93 | class DaemonProcessPlugin: 94 | """Tiny pytest plugin to inject daemon object reference as test “fixture” value.""" 95 | @pytest.hookimpl(hookwrapper=True) 96 | def pytest_pyfunc_call(self, pyfuncitem): 97 | if "daemon" in pyfuncitem.funcargs: 98 | pyfuncitem.funcargs["daemon"] = DAEMON 99 | yield 100 | 101 | 102 | # Wait for daemon to start up 103 | for line in DAEMON.stdout: 104 | print("\t{0}".format(line), end="", file=sys.stderr) 105 | if line.strip() == "Daemon is ready": 106 | break 107 | 108 | #XXX: This design could deadlock the test run if the daemon were to produce more 109 | # output than fits into its output pipe before shutdown 110 | 111 | 112 | ################## 113 | # Run test suite # 114 | ################## 115 | 116 | PYTEST_CODE = 1 117 | try: 118 | # Make sure all required pytest plugins are loaded up-front 119 | os.environ["PYTEST_PLUGINS"] = ",".join([ 120 | "cid", 121 | "dependency", 122 | "localserver", 123 | "pytest_cov", 124 | "pytest_mock", 125 | "pytest_ordering", 126 | ]) 127 | 128 | with tempfile.NamedTemporaryFile("r+") as coveragerc: 129 | coverage_args = [] 130 | if os.name != "nt": 131 | PREFER_HTTPX = (os.environ.get("PY_IPFS_HTTP_CLIENT_PREFER_HTTPX", "no").lower() 132 | not in ("0", "f", "false", "n", "no")) 133 | 134 | # Assemble list of files to exclude from coverage analysis 135 | omitted_files = [ 136 | "ipfshttpclient/requests_wrapper.py", 137 | ] 138 | if PREFER_HTTPX and sys.version_info >= (3, 6): 139 | omitted_files.append("ipfshttpclient/http_requests.py") 140 | else: #PY35: Fallback to old requests-based code instead of HTTPX 141 | omitted_files.append("ipfshttpclient/http_httpx.py") 142 | 143 | # Assemble list of coverage data exclusion patterns (also escape the 144 | # hash sign [#] as it has a special meaning [comment] in the generated 145 | # configuration file) 146 | exclusions = [ 147 | # Add the standard coverage exclusion statement 148 | r"pragma:\s+no\s+cover", 149 | 150 | # Ignore typing-only branches 151 | r"if\s+(?:[A-Za-z]+\s*[.]\s*)?TYPE_CHECKING\s*:", 152 | 153 | # Ignore dummy ellipsis expression line 154 | r"^\s*\.\.\.\s*$", 155 | ] 156 | if sys.version_info.major == 2: 157 | exclusions.append(r"\#PY3") 158 | else: 159 | # Exclude the past 160 | exclusions.append(r"\#PY2") 161 | # Exclude code only used for compatiblity with a previous Python version 162 | exclusions.append(r"\#PY3({0})([^\d+]|$)".format( 163 | "|".join(map(str, range(0, sys.version_info.minor))) 164 | )) 165 | # Exclude code only used in future Python versions 166 | exclusions.append(r"\#PY3({0})\+".format( 167 | "|".join(map(str, range(sys.version_info.minor + 1, 20))) 168 | )) 169 | 170 | if PREFER_HTTPX and sys.version_info >= (3, 6): 171 | exclusions.append(r"\# pragma: http-backend=requests") 172 | else: #PY35: Fallback to old requests-based code instead of HTTPX 173 | exclusions.append(r"\# pragma: http-backend=httpx") 174 | 175 | # Create temporary file with extended *coverage.py* configuration data 176 | coveragerc.file.writelines(map(lambda s: s + "\n", itertools.chain(( 177 | "[run]", 178 | "omit =", 179 | ), map(lambda s: "\t" + s, omitted_files), 180 | ( 181 | "[report]", 182 | "# Exclude lines specific to some other Python version from coverage", 183 | "exclude_lines =", 184 | ), map(lambda s: "\t" + s, exclusions)))) 185 | coveragerc.file.flush() 186 | 187 | coverage_args = [ 188 | "--cov=ipfshttpclient", 189 | "--cov-branch", 190 | "--cov-config={0}".format(coveragerc.name), 191 | "--no-cov-on-fail", 192 | "--cov-fail-under=90", 193 | "--cov-report=term", 194 | "--cov-report=html:{}".format(str(TEST_PATH / "cov_html")), 195 | "--cov-report=xml:{}".format(str(TEST_PATH / "cov.xml")), 196 | ] 197 | 198 | # Launch pytest in-process 199 | PYTEST_CODE = pytest.main([ 200 | "--verbose", 201 | ] + coverage_args + sys.argv[1:], plugins=[DaemonProcessPlugin()]) 202 | finally: 203 | try: 204 | # Move coverage file to test directory (so that the coverage files of different 205 | # versions can be merged later on) 206 | shutil.move(str(BASE_PATH / ".coverage"), str(TEST_PATH / "cov_raw")) 207 | except FileNotFoundError: 208 | pass # Early crash in pytest or Windows – no coverage data generated 209 | 210 | # Make sure daemon was terminated during the tests 211 | if DAEMON.poll() is None: # "if DAEMON is running" 212 | DAEMON.kill() 213 | 214 | print("IPFS daemon was still running after test!", file=sys.stderr) 215 | 216 | output = list(DAEMON.stdout) 217 | if output: 218 | print("IPFS daemon printed extra messages:", file=sys.stderr) 219 | for line in output: 220 | print("\t{0}".format(line), end="", file=sys.stderr) 221 | 222 | sys.exit(PYTEST_CODE) 223 | -------------------------------------------------------------------------------- /test/unit/test_client.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import ipfshttpclient 4 | 5 | 6 | def test_assert_version(): 7 | # Minimum required version 8 | ipfshttpclient.assert_version("0.1.0", "0.1.0", "0.2.0", ["0.1.2"]) 9 | 10 | # Too high version 11 | with pytest.raises(ipfshttpclient.exceptions.VersionMismatch): 12 | ipfshttpclient.assert_version("0.2.0", "0.1.0", "0.2.0", ["0.1.2"]) 13 | 14 | # Too low version 15 | with pytest.raises(ipfshttpclient.exceptions.VersionMismatch): 16 | ipfshttpclient.assert_version("0.0.5", "0.1.0", "0.2.0", ["0.1.2"]) 17 | 18 | # Blacklisted version 19 | with pytest.raises(ipfshttpclient.exceptions.VersionMismatch): 20 | ipfshttpclient.assert_version("0.1.2-1", "0.1.0", "0.2.0", ["0.1.2"]) 21 | 22 | 23 | def test_client_session_param(): 24 | client = ipfshttpclient.Client(session=True) 25 | assert client._client._session is not None 26 | try: 27 | with pytest.raises(Exception): 28 | with client: 29 | pass # Should fail because a session is already open 30 | assert client._client._session is not None 31 | finally: 32 | client.close() 33 | assert client._client._session is None 34 | 35 | 36 | def test_client_session_context(): 37 | client = ipfshttpclient.Client() 38 | assert client._client._session is None 39 | with client: 40 | assert client._client._session is not None 41 | assert client._client._session is None 42 | -------------------------------------------------------------------------------- /test/unit/test_encoding.py: -------------------------------------------------------------------------------- 1 | """Test the generic data encoding and decoding module.""" 2 | import json 3 | 4 | import pytest 5 | 6 | import ipfshttpclient.encoding 7 | import ipfshttpclient.exceptions 8 | 9 | 10 | 11 | @pytest.fixture 12 | def json_encoder(): 13 | return ipfshttpclient.encoding.Json() 14 | 15 | 16 | def test_dummy_encoder(): 17 | """Tests if the dummy encoder does its trivial job""" 18 | dummy_encoder = ipfshttpclient.encoding.Dummy() 19 | 20 | for v in (b"123", b"4", b"ddjlflsdmlflsdfjlfjlfdsjldfs"): 21 | assert dummy_encoder.encode(v) == v 22 | 23 | assert list(dummy_encoder.parse_partial(v)) == [v] 24 | assert list(dummy_encoder.parse_finalize()) == [] 25 | 26 | 27 | def test_json_parse_partial(json_encoder): 28 | """Tests if feeding parts of JSON strings in the right order to the JSON parser produces the right results.""" 29 | data1 = {'key1': 'value1'} 30 | data2 = {'key2': 'value2'} 31 | 32 | # Try single fragmented data set 33 | data1_binary = json.dumps(data1).encode("utf-8") 34 | assert list(json_encoder.parse_partial(data1_binary[:8])) == [] 35 | assert list(json_encoder.parse_partial(data1_binary[8:])) == [data1] 36 | assert list(json_encoder.parse_finalize()) == [] 37 | 38 | # Try multiple data sets contained in whitespace 39 | data2_binary = json.dumps(data2).encode("utf-8") 40 | data2_final = b" " + data1_binary + b" \r\n " + data2_binary + b" " 41 | assert list(json_encoder.parse_partial(data2_final)) == [data1, data2] 42 | assert list(json_encoder.parse_finalize()) == [] 43 | 44 | # String containing broken UTF-8 45 | with pytest.raises(ipfshttpclient.exceptions.DecodingError): 46 | list(json_encoder.parse_partial(b'{"hello": "\xc3ber world!"}')) 47 | assert list(json_encoder.parse_finalize()) == [] 48 | 49 | 50 | def test_json_with_newlines(json_encoder): 51 | """Tests if feeding partial JSON strings with line breaks behaves as expected.""" 52 | data1 = '{"key1":\n"value1",\n' 53 | data2 = '"key2":\n\n\n"value2"\n}' 54 | 55 | data_expected = json.loads(data1 + data2) 56 | 57 | assert list(json_encoder.parse_partial(data1.encode("utf-8"))) == [] 58 | assert list(json_encoder.parse_partial(data2.encode("utf-8"))) == [data_expected] 59 | assert list(json_encoder.parse_finalize()) == [] 60 | 61 | 62 | def test_json_parse_incomplete(json_encoder): 63 | """Tests if feeding the JSON parse incomplete data correctly produces an error.""" 64 | list(json_encoder.parse_partial(b'{"bla":')) 65 | with pytest.raises(ipfshttpclient.exceptions.DecodingError): 66 | json_encoder.parse_finalize() 67 | 68 | list(json_encoder.parse_partial(b'{"\xc3')) # Incomplete UTF-8 sequence 69 | with pytest.raises(ipfshttpclient.exceptions.DecodingError): 70 | json_encoder.parse_finalize() 71 | 72 | 73 | def test_json_encode(json_encoder): 74 | """Tests serialization of an object into a JSON formatted UTF-8 string.""" 75 | data = {'key': 'value with Ünicøde characters ☺'} 76 | assert json_encoder.encode(data) == \ 77 | b'{"key":"value with \xc3\x9cnic\xc3\xb8de characters \xe2\x98\xba"}' 78 | 79 | def test_json_encode_invalid_surrogate(json_encoder): 80 | """Tests serialization of an object into a JSON formatted UTF-8 string.""" 81 | data = {'key': 'value with Ünicøde characters and disallowed surrgate: \uDC00'} 82 | with pytest.raises(ipfshttpclient.exceptions.EncodingError): 83 | json_encoder.encode(data) 84 | 85 | def test_json_encode_invalid_type(json_encoder): 86 | """Tests serialization of an object into a JSON formatted UTF-8 string.""" 87 | data = {'key': b'value that is not JSON encodable'} 88 | with pytest.raises(ipfshttpclient.exceptions.EncodingError): 89 | json_encoder.encode(data) 90 | 91 | 92 | def test_get_encoder_by_name(): 93 | """Tests the process of obtaining an Encoder object given the named encoding.""" 94 | encoder = ipfshttpclient.encoding.get_encoding('json') 95 | assert encoder.name == 'json' 96 | 97 | 98 | def test_get_invalid_encoder(): 99 | """Tests the exception handling given an invalid named encoding.""" 100 | with pytest.raises(ipfshttpclient.exceptions.EncoderMissingError): 101 | ipfshttpclient.encoding.get_encoding('fake') 102 | -------------------------------------------------------------------------------- /test/unit/test_filescanner.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import re 3 | import sys 4 | import typing as ty 5 | 6 | import pytest 7 | 8 | from ipfshttpclient import filescanner 9 | 10 | 11 | TEST_FILE_DIR = os.path.join(os.path.dirname(__file__), "..", "functional") # type: str 12 | 13 | 14 | @pytest.mark.skipif(sys.version_info < (3, 6), reason="fnmatch.translate output changed in Python 3.6+") 15 | @pytest.mark.parametrize("pattern,expected,kwargs", [ 16 | ("literal", [r"(?![.])(?s:literal)\Z"], {}), 17 | (b"literal", [br"(?![.])(?s:literal)\Z"], {}), 18 | ("*.a", [r"(?![.])(?s:.*\.a)\Z"], {}), 19 | (b"*.a", [br"(?![.])(?s:.*\.a)\Z"], {}), 20 | ("*/**/*.dir/**/**/.hidden", [r"(?![.])(?s:.*)\Z", None, r"(?![.])(?s:.*\.dir)\Z", None, None, r"(?s:\.hidden)\Z"], {}), 21 | ("*/**/*.dir/**/**/.hidden", [r"(?s:.*)\Z", None, r"(?s:.*\.dir)\Z", None, None, r"(?s:\.hidden)\Z"], {"period_special": False}), 22 | ("././/////////./*.a", [r"(?![.])(?s:.*\.a)\Z"], {}), 23 | (b"././/////////./*.a", [br"(?![.])(?s:.*\.a)\Z"], {}), 24 | ("*/*.a", [r"(?![.])(?s:.*)\Z", r"(?![.])(?s:.*\.a)\Z"], {}), 25 | ("*/*.a", [r"(?s:.*)\Z", r"(?s:.*\.a)\Z"], {"period_special": False}), 26 | ]) 27 | def test_glob_compile(pattern: ty.AnyStr, expected: ty.List[ty.AnyStr], kwargs: ty.Dict[str, bool]): 28 | matcher = filescanner.GlobMatcher(pattern, **kwargs) 29 | assert list(map(lambda r: r.pattern if r is not None else None, matcher._pat)) == expected 30 | 31 | 32 | def test_glob_sep_normalize(monkeypatch): 33 | monkeypatch.setattr(os.path, "sep", "#") 34 | monkeypatch.setattr(os.path, "altsep", "~") 35 | 36 | assert len(filescanner.GlobMatcher("a#b~c")._pat) == 3 37 | 38 | monkeypatch.setattr(os.path, "altsep", None) 39 | 40 | assert len(filescanner.GlobMatcher("a#b~c")._pat) == 2 41 | 42 | 43 | # Possible hypothesis test: Parsing glob should never fail, except in the following 3 cases. 44 | 45 | @pytest.mark.skipif(sys.flags.optimize, reason="Glob error asserts are stripped from optimized code") 46 | @pytest.mark.parametrize("pattern", [ 47 | "../*", 48 | b"../*", 49 | "/absolute/file/path", 50 | b"/absolute/file/path", 51 | ]) 52 | def test_glob_errors(pattern): 53 | with pytest.raises(AssertionError): 54 | filescanner.GlobMatcher(pattern) 55 | 56 | 57 | def test_glob_not_implemented(): 58 | with pytest.raises(NotImplementedError): 59 | filescanner.GlobMatcher("*/.**") 60 | 61 | 62 | @pytest.mark.parametrize("pattern,path,is_dir,descend,report,kwargs", [ 63 | # Basic literal path tests 64 | ("literal", "other", False, False, False, {}), 65 | ("literal", "literal", False, False, True, {}), 66 | ("literal", "literal/more", False, False, False, {}), 67 | (b"literal", b"other", False, False, False, {}), 68 | (b"literal", b"literal", False, False, True, {}), 69 | (b"literal", b"literal/more", False, False, False, {}), 70 | ("literal/more", "other", False, False, False, {}), 71 | ("literal/more", "literal", False, True, False, {}), 72 | ("literal/more", "literal", True, True, True, {}), 73 | ("literal/more", "literal/more", False, False, True, {}), 74 | (b"literal/more", b"other", False, False, False, {}), 75 | (b"literal/more", b"literal", False, True, False, {}), 76 | (b"literal/more", b"literal", True, True, True, {}), 77 | (b"literal/more", b"literal/more", False, False, True, {}), 78 | ("literal/more", "other", False, False, False, {"recursive": False}), 79 | ("literal/more", "literal", False, False, False, {"recursive": False}), 80 | ("literal/more", "literal", True, False, True, {"recursive": False}), 81 | ("literal/more", "literal/more", False, False, False, {"recursive": False}), 82 | 83 | # Test basic leading-period handling 84 | ("*.a", ".a", False, False, False, {}), 85 | ("*.a", ".a", False, False, True, {"period_special": False}), 86 | ("*.a", ".a", True, False, False, {}), 87 | ("*.a", ".a", True, False, True, {"period_special": False}), 88 | 89 | # Test leading-period with trailing slash handling 90 | ("*.a/", ".a", False, False, False, {}), 91 | ("*.a/", ".a", False, False, False, {"period_special": False}), 92 | ("*.a/", ".a", True, False, False, {}), 93 | ("*.a/", ".a", True, False, True, {"period_special": False}), 94 | 95 | # Tests for double-star recursion with premium leading-period shenanigans 96 | ("*/**/*.dir/**/**/.hidden", ".dir/.hidden", False, False, False, {}), 97 | ("*/**/*.dir/**/**/.hidden", "a/.dir/.hidden", False, True, False, {}), 98 | ("*/**/*.dir/**/**/.hidden", "a/b.dir/.hidden", False, True, True, {}), 99 | ("*/**/*.dir/**/**/.hidden", "a/u/v/w/b.dir/c/d/e/f/.hidden", False, True, True, {}), 100 | ("**", ".a", False, True, False, {}), 101 | (filescanner.GlobMatcher("**"), ".a", False, True, False, {}), 102 | 103 | # Regular expression test 104 | (re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), "Camera/IMG-0169.jpeg", False, True, True, {}), 105 | (re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), "Camera", True, True, True, {}), 106 | (re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), "Camera/Thumbs.db", False, True, False, {}), 107 | (re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), b"Camera/IMG-0169.jpeg", False, True, True, {}), 108 | (re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), b"Camera", True, True, True, {}), 109 | (re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), b"Camera/Thumbs.db", False, True, False, {}), 110 | (filescanner.ReMatcher(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$"), b"Camera/Thumbs.db", False, True, False, {}), 111 | 112 | # Multiple patterns 113 | (["*/**/*.dir/**/**/.hidden", re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], "Camera/IMG-1279.jpeg", False, True, True, {}), 114 | ([b"*/**/*.dir/**/**/.hidden", re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], b"Camera/IMG-1279.jpeg", False, True, True, {}), 115 | (["*/**/*.dir/**/**/.hidden", re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], "a/.dir/.hidden", False, True, False, {}), 116 | ([b"*/**/*.dir/**/**/.hidden", re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], b"a/.dir/.hidden", False, True, False, {}), 117 | (["*/**/*.dir/**/**/.hidden", re.compile(r"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], "a/b.dir/.hidden", False, True, True, {}), 118 | ([b"*/**/*.dir/**/**/.hidden", re.compile(br"[^/\\]+[/\\](IMG-\d{0,4}\.jpeg)?$")], b"a/b.dir/.hidden", False, True, True, {}), 119 | 120 | # Edge case: No patterns 121 | ([], "???", False, False, False, {}), 122 | ([], b"???", False, False, False, {}), 123 | ]) 124 | def test_glob_matching( 125 | monkeypatch, 126 | pattern: ty.Union[ty.AnyStr, filescanner.re_pattern_t, ty.List[ty.Union[ty.AnyStr, filescanner.re_pattern_t]]], 127 | path: ty.AnyStr, 128 | is_dir: bool, 129 | descend: bool, 130 | report: bool, 131 | kwargs: ty.Dict[str, bool] 132 | ): 133 | # Hopefully useless sanity check 134 | assert os.path.sep == "/" or os.path.altsep == "/" 135 | 136 | slash = "/" if isinstance(path, str) else b"/" # type: ty.AnyStr 137 | sep = os.path.sep if isinstance(path, str) else os.fsencode(os.path.sep) # type: ty.AnyStr 138 | 139 | path = path.replace(slash, sep) 140 | 141 | matcher = filescanner.matcher_from_spec(pattern, **kwargs) 142 | assert matcher.should_descend(path) is descend 143 | assert matcher.should_report(path, is_dir=is_dir) is report 144 | 145 | 146 | def test_walk_fd_unsupported(monkeypatch): 147 | monkeypatch.setattr(filescanner, "HAVE_FWALK", False) 148 | 149 | with pytest.raises(NotImplementedError): 150 | filescanner.walk(0) 151 | 152 | 153 | def test_walk_instaclose(mocker): 154 | close_spy = mocker.spy(filescanner.walk, "close") 155 | 156 | with filescanner.walk("."): 157 | pass 158 | 159 | close_spy.assert_called_once() 160 | 161 | 162 | @pytest.mark.parametrize("path,pattern,kwargs,expected", [ 163 | (TEST_FILE_DIR + os.path.sep + "fake_dir_almost_empty" + os.path.sep, None, {}, [ 164 | (filescanner.FSNodeType.DIRECTORY, ".", "."), 165 | (filescanner.FSNodeType.FILE, ".gitignore", ".gitignore"), 166 | ]), 167 | (TEST_FILE_DIR + os.path.sep + "fake_dir", ["test2", "test3"], {}, [ 168 | (filescanner.FSNodeType.DIRECTORY, ".", "."), 169 | (filescanner.FSNodeType.DIRECTORY, "test2", "test2"), 170 | (filescanner.FSNodeType.DIRECTORY, "test3", "test3"), 171 | ]), 172 | ]) 173 | def test_walk(monkeypatch, path: str, pattern: None, kwargs: ty.Dict[str, bool], expected: ty.List[filescanner.FSNodeEntry]): 174 | result = [(e.type, e.relpath, e.name) for e in filescanner.walk(path, pattern, **kwargs)] 175 | assert sorted(result, key=lambda r: r[1]) == expected 176 | 177 | # Check again with plain `os.walk` if the current platform supports `os.fwalk` 178 | if filescanner.HAVE_FWALK: 179 | monkeypatch.setattr(filescanner, "HAVE_FWALK", False) 180 | 181 | result = [(e.type, e.relpath, e.name) for e in filescanner.walk(path, pattern, **kwargs)] 182 | assert sorted(result, key=lambda r: r[1]) == expected 183 | 184 | 185 | def test_supports_fd(): 186 | assert (filescanner.walk in filescanner.supports_fd) is filescanner.HAVE_FWALK 187 | -------------------------------------------------------------------------------- /test/unit/test_http_httpx.py: -------------------------------------------------------------------------------- 1 | # Only add tests to this file if they really are specific to the behaviour 2 | # of this backend. For cross-backend or `http_common.py` tests use 3 | # `test_http.py` instead. 4 | import http.cookiejar 5 | import math 6 | import sys 7 | 8 | import pytest 9 | 10 | if sys.version_info <= (3, 6): 11 | pytest.skip("HTTPx requires Python 3.6+", allow_module_level=True) 12 | pytest.importorskip("ipfshttpclient.http_httpx") 13 | import ipfshttpclient.http_httpx 14 | 15 | 16 | cookiejar = http.cookiejar.CookieJar() 17 | 18 | @pytest.mark.parametrize("kwargs,expected", [ 19 | ({}, {}), 20 | 21 | ({ 22 | "auth": ("user", "pass"), 23 | "cookies": cookiejar, 24 | "headers": {"name": "value"}, 25 | "params": (("name", "value"),), 26 | "timeout": (math.inf, math.inf), 27 | }, { 28 | "auth": ("user", "pass"), 29 | "cookies": cookiejar, 30 | "headers": {"name": "value"}, 31 | "params": [("name", "value")], 32 | "timeout": (None, None, None, None), 33 | }), 34 | 35 | ({ 36 | "auth": ("user", b"pass"), 37 | "cookies": {"name": "value"}, 38 | "headers": ((b"name", b"value"),), 39 | "timeout": 34, 40 | }, { 41 | "auth": ("user", b"pass"), 42 | "cookies": {"name": "value"}, 43 | "headers": ((b"name", b"value"),), 44 | "timeout": 34, 45 | }), 46 | ]) 47 | def test_map_args_to_httpx(kwargs, expected): 48 | assert ipfshttpclient.http_httpx.map_args_to_httpx(**kwargs) == expected 49 | 50 | @pytest.mark.parametrize("args,kwargs,expected_kwargs,expected_base,expected_laddr", [ 51 | (("/dns/localhost/tcp/5001/http", "api/v0"), {}, { 52 | "params": [("stream-channels", "true")], 53 | }, "http://localhost:5001/api/v0/", None), 54 | 55 | (("/dns6/ietf.org/tcp/443/https", "/base/"), { 56 | "auth": ("user", "pass"), 57 | "cookies": cookiejar, 58 | "headers": {"name": "value"}, 59 | "offline": True, 60 | "timeout": (math.inf, math.inf), 61 | }, { 62 | "auth": ("user", "pass"), 63 | "cookies": cookiejar, 64 | "headers": {"name": "value"}, 65 | "params": [("offline", "true"), ("stream-channels", "true")], 66 | "timeout": (None, None, None, None), 67 | }, "https://ietf.org:443/base/", "::"), 68 | ]) 69 | def test_client_args_to_session_kwargs(args, kwargs, expected_kwargs, expected_base, expected_laddr): 70 | client = ipfshttpclient.http_httpx.ClientSync(*args, **kwargs) 71 | assert client._session_kwargs == expected_kwargs 72 | assert client._session_base == expected_base 73 | assert client._session_laddr == expected_laddr -------------------------------------------------------------------------------- /test/unit/test_http_requests.py: -------------------------------------------------------------------------------- 1 | # Only add tests to this file if they really are specific to the behaviour 2 | # of this backend. For cross-backend or `http_common.py` tests use 3 | # `test_http.py` instead. 4 | import http.cookiejar 5 | import math 6 | import socket 7 | 8 | import pytest 9 | 10 | pytest.importorskip("ipfshttpclient.http_requests") 11 | import ipfshttpclient.http_requests 12 | 13 | 14 | cookiejar = http.cookiejar.CookieJar() 15 | 16 | @pytest.mark.parametrize("kwargs,expected", [ 17 | ({}, {}), 18 | 19 | ({ 20 | "auth": ("user", "pass"), 21 | "cookies": cookiejar, 22 | "headers": {"name": "value"}, 23 | "params": (("name", "value"),), 24 | "timeout": (math.inf, math.inf), 25 | }, { 26 | "auth": ("user", "pass"), 27 | "cookies": cookiejar, 28 | "headers": {"name": "value"}, 29 | "params": {"name": "value"}, 30 | "timeout": (None, None), 31 | }), 32 | 33 | ({ 34 | "auth": ("user", b"pass"), 35 | "cookies": {"name": "value"}, 36 | "headers": ((b"name", b"value"),), 37 | "timeout": 34, 38 | }, { 39 | "auth": ("user", b"pass"), 40 | "cookies": {"name": "value"}, 41 | "headers": ((b"name", b"value"),), 42 | "timeout": 34, 43 | }), 44 | ]) 45 | def test_map_args_to_requests(kwargs, expected): 46 | assert ipfshttpclient.http_requests.map_args_to_requests(**kwargs) == expected 47 | 48 | @pytest.mark.parametrize("args,kwargs,expected1,expected2,expected3", [ 49 | (("/dns/localhost/tcp/5001/http", "api/v0"), {}, "http://localhost:5001/api/v0/", { 50 | "family": socket.AF_UNSPEC, 51 | "params": {'stream-channels': 'true'}, 52 | }, None), 53 | 54 | (("/dns6/ietf.org/tcp/443/https", "/base/"), { 55 | "auth": ("user", "pass"), 56 | "cookies": cookiejar, 57 | "headers": {"name": "value"}, 58 | "offline": True, 59 | "timeout": (math.inf, math.inf), 60 | }, "https://ietf.org:443/base/", { 61 | "family": socket.AF_INET6, 62 | "auth": ("user", "pass"), 63 | "cookies": cookiejar, 64 | "headers": {"name": "value"}, 65 | "params": {'offline': 'true', 'stream-channels': 'true'}, 66 | }, (math.inf, math.inf)), 67 | ]) 68 | def test_client_args_to_session_props(args, kwargs, expected1, expected2, expected3): 69 | client = ipfshttpclient.http_requests.ClientSync(*args, **kwargs) 70 | assert client._base_url == expected1 71 | assert client._session_props == expected2 72 | assert client._default_timeout == expected3 -------------------------------------------------------------------------------- /test/unit/test_utils.py: -------------------------------------------------------------------------------- 1 | """Tox unit tests for utils.py. 2 | 3 | Classes: 4 | TestUtils -- defines a set of unit tests for untils.py 5 | """ 6 | 7 | import io 8 | import os.path 9 | import sys 10 | import unittest 11 | 12 | import ipfshttpclient.utils as utils 13 | 14 | class TestUtils(unittest.TestCase): 15 | """Contains unit tests for utils.py. 16 | 17 | Public methods: 18 | test_guess_mimetype -- tests utils.guess_mimetype() 19 | test_ls_dir -- tests utils.ls_dir() 20 | test_clean_file_opened -- tests utils.clean_file() with a stringIO object 21 | test_clean_file_unopened -- tests utils.clean_file() with a filepath 22 | test_clean_files_single -- tests utils.clean_files() with a filepath 23 | test_clean_files_list -- tests utils.clean_files() with a list of files 24 | test_file_size -- tests utils.file_size() 25 | test_return_field_init -- tests utils.return_field.__init__() 26 | test_return_field_call -- tests utils.return_field.__call__() 27 | """ 28 | def test_guess_mimetype(self): 29 | """Tests utils.guess_mimetype(). 30 | 31 | Guesses the mimetype of the requirements.txt file 32 | located in the project's root directory. 33 | """ 34 | path = os.path.join(os.path.dirname(__file__), 35 | "..", "..", "requirements.txt") 36 | assert utils.guess_mimetype(path) == "text/plain" 37 | 38 | def test_clean_file_opened(self): 39 | """Tests utils.clean_file() with a stringIO object.""" 40 | string_io = io.StringIO('Mary had a little lamb') 41 | f, opened = utils.clean_file(string_io) 42 | assert hasattr(f, 'read') 43 | assert not opened 44 | # Closing stringIO after test assertions. 45 | f.close() 46 | 47 | def test_clean_file_unopened_textpath(self): 48 | """Tests utils.clean_file() with a text string filepath. 49 | 50 | This test relies on the openability of the file 'fsdfgh' 51 | located in 'test/functional/fake_dir'. 52 | """ 53 | path = os.path.dirname(__file__) 54 | path = os.path.join(path, "..", "functional", "fake_dir", "fsdfgh") 55 | f, opened = utils.clean_file(path) 56 | assert hasattr(f, 'read') 57 | assert opened 58 | # Closing file after test assertions. 59 | f.close() 60 | 61 | def test_clean_file_unopened_binarypath(self): 62 | """Tests utils.clean_file() with a binary string filepath. 63 | 64 | This test relies on the openability of the file 'fsdfgh' 65 | located in 'test/functional/fake_dir'. 66 | """ 67 | path = os.fsencode(os.path.dirname(__file__)) 68 | path = os.path.join(path, b"..", b"functional", b"fake_dir", b"fsdfgh") 69 | f, opened = utils.clean_file(path) 70 | assert hasattr(f, 'read') 71 | assert opened 72 | # Closing file after test assertions. 73 | f.close() 74 | 75 | def test_clean_files_single(self): 76 | """Tests utils.clean_files() with a singular filepath. 77 | 78 | This test relies on the openability of the file 'fsdfgh' 79 | located in 'test/functional/fake_dir'. 80 | """ 81 | path = os.path.join(os.path.dirname(__file__), 82 | "..", "functional", "fake_dir", "fsdfgh") 83 | gen = utils.clean_files(path) 84 | for tup in gen: 85 | assert hasattr(tup[0], 'read') 86 | assert tup[1] 87 | # Closing file after test assertions. 88 | tup[0].close() 89 | 90 | def test_clean_files_list(self): 91 | """Tests utils.clean_files() with a list of files/stringIO objects.""" 92 | path = os.path.join(os.path.dirname(__file__), 93 | "..", "functional", "fake_dir", "fsdfgh") 94 | string_io = io.StringIO('Mary had a little lamb') 95 | files = [path, string_io] 96 | gen = utils.clean_files(files) 97 | for i in range(0, 2): 98 | tup = next(gen) 99 | assert hasattr(tup[0], 'read') 100 | if i == 0: 101 | assert tup[1] 102 | else: 103 | assert not tup[1] 104 | # Closing files/stringIO objects after test assertions. 105 | tup[0].close() 106 | 107 | def test_return_field_init(self): 108 | """Tests utils.return_field.__init__().""" 109 | return_field = utils.return_field('Hash') 110 | assert return_field.field == 'Hash' 111 | 112 | def test_return_field_call(self): 113 | """Tests utils.return_field.__call__().""" 114 | expected_hash = 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab' 115 | 116 | @utils.return_field('Hash') 117 | def wrapper(string, *args, **kwargs): 118 | resp = {'Hash': expected_hash, 'string': string} 119 | return resp 120 | assert wrapper('Mary had a little lamb') == expected_hash 121 | -------------------------------------------------------------------------------- /tools/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -eu 3 | 4 | PROGNAME="${0}" 5 | 6 | usage() 7 | { 8 | echo "Usage: ${PROGNAME} [--install]" 9 | echo 10 | echo " --install Install the pre-commit hook" 11 | echo " -h, --help Display this help and exit" 12 | echo 13 | echo "Without any options the pre-commit checks are run." 14 | } 15 | 16 | if [ $# -gt 0 ]; 17 | then 18 | case "${1}" in 19 | "--install") 20 | top_dir="$(git rev-parse --show-toplevel)" 21 | git_dir="$(git rev-parse --git-dir)" 22 | 23 | if [ -f "${git_dir}/hooks/pre-commit" ]; 24 | then 25 | echo "ERROR: found existing pre-commit hook; " \ 26 | "cowardly giving up." >&2 27 | exit 1 28 | fi 29 | 30 | echo " • Installing pre-commit hook to ${git_dir}/hooks" 31 | ln -s "${top_dir}/tools/pre-commit" "${git_dir}/hooks/pre-commit" 32 | 33 | echo " • Enabling GIT hook Unicode support" 34 | git config --local --type=bool hooks.allownonascii true 35 | exit 36 | ;; 37 | 38 | "-h"|"--help") 39 | usage 40 | exit 0 41 | ;; 42 | 43 | *) 44 | echo "${PROGNAME}: Unknown option “${1}”" >&2 45 | echo >&2 46 | usage >&2 47 | exit 2 48 | ;; 49 | esac 50 | fi 51 | 52 | # Run code style and type tests before accepting a commit 53 | tox -e styleck -e typeck -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # For more information about tox, see https://tox.readthedocs.io/en/latest/ 2 | [tox] 3 | minversion = 3.3 4 | envlist = 5 | py3, 6 | py3-httpx, 7 | styleck, 8 | typeck 9 | 10 | # Enable slower `isolated_build` for PEP-518 compatiblity 11 | isolated_build = true 12 | 13 | 14 | [testenv] 15 | deps = 16 | pytest ~= 5.0 17 | pytest-cov ~= 2.6 18 | pytest-dependency ~= 0.4 19 | pytest-localserver ~= 0.5 20 | pytest-mock ~= 1.10 21 | pytest-ordering ~= 0.6 22 | 23 | pytest-cid ~= 1.1 24 | py-cid 25 | 26 | mock 27 | whitelist_externals = ipfs 28 | passenv = IPFS_* PY_IPFS_HTTP_CLIENT_* 29 | commands = 30 | python -X utf8 "{toxinidir}/test/run-tests.py" {posargs} 31 | 32 | # Silence warning about not inheriting PYTHONPATH 33 | setenv = 34 | PYTHONPATH = 35 | 36 | 37 | [testenv:py35] 38 | # Fix missing dependencies when checking Python 3.5 39 | deps = 40 | {[testenv]deps} 41 | multiaddr (>=0.0.7) 42 | requests (>=2.11) 43 | 44 | 45 | [testenv:py3-httpx] 46 | deps-exclusive = 47 | httpx (~= 0.14.0) 48 | httpcore (~= 0.10.2) # Has Unix domain socket support 49 | deps = 50 | {[testenv]deps} 51 | {[testenv:py3-httpx]deps-exclusive} 52 | setenv = 53 | {[testenv]setenv} 54 | PY_IPFS_HTTP_CLIENT_PREFER_HTTPX = yes 55 | commands = 56 | python -X utf8 "{toxinidir}/test/run-tests.py" {posargs} 57 | 58 | 59 | [testenv:styleck] 60 | isolated_build = false 61 | skipsdist = true 62 | 63 | deps = 64 | flake8 ~= 3.7 65 | flake8-tabs ~= 2.2 , >= 2.2.1 66 | commands = 67 | flake8 {posargs} 68 | 69 | 70 | [testenv:typeck] 71 | skip_install = true 72 | deps = 73 | mypy ~= 0.790 74 | pytest ~= 5.0 75 | {[testenv:py3-httpx]deps-exclusive} 76 | commands = 77 | mypy --config-file=tox.ini {posargs} -p ipfshttpclient 78 | 79 | # Pass down TERM environment variable to allow mypy output to be colorized 80 | # See: https://github.com/tox-dev/tox/issues/1441 81 | passenv = TERM 82 | 83 | 84 | [testenv:coverage] 85 | deps = 86 | coverage 87 | commands = 88 | python "{toxinidir}/test/combine-coverage.py" {posargs} 89 | 90 | 91 | #TODO: Migrate away from this file to `pyproject.toml` once `flake8`, `mypy` and `pytest` support using it: 92 | # * flake8: https://gitlab.com/pycqa/flake8/issues/428 (considering flakehell and flake9 here) 93 | # * mypy: https://github.com/python/mypy/issues/5205 94 | # * pytest: https://github.com/pytest-dev/pytest/issues/1556 (will be part of 6.0) 95 | 96 | 97 | [flake8] 98 | exclude = .git,.tox,+junk,coverage,dist,doc,*egg,build,tools,test/unit,docs,*__init__.py 99 | 100 | # E221: Multiple spaces before operator 101 | # E241: Multiple spaces after ',': Breaks element alignment collections 102 | # E251: Spaces around '=' on parameter assignment 103 | # E262: Inline comment should start with '# ': Breaks tagged comments (ie: '#TODO: ') 104 | # E265: Block comment should start with '# ': ^ 105 | # E266: Too many leading '#' for block comment: Breaks declaring mega-blocks (ie: '### Section') 106 | # E303: More than 2 consecutive newlines 107 | # E722: Using bare except for cleanup-on-error is fine 108 | # (see bug report at https://github.com/PyCQA/pycodestyle/issues/703) 109 | # W292: No newline at end of file 110 | # W391: Blank line at end of file (sometimes trigged instead of the above!?) 111 | # F403: `from import *` used; unable to detect undefined names ←– Probably should be fixed… 112 | # F811: PyFlakes bug: `@ty.overload` annotation is not detected to mean `@typing.overload` 113 | # (see bug report at https://github.com/PyCQA/pyflakes/issues/561) 114 | ignore = E221,E241,E251,E262,E265,E266,E303,E722,W292,W391,F403,F811 115 | use-flake8-tabs = true 116 | max-line-length = 100 117 | tab-width = 4 118 | 119 | # E701: Multiple statements on one line 120 | # - requests_wrapper.py: Lots of symbols exported that we specifically don't use but that make sense in a reusable module 121 | # - test_*.py: Aligning `assert … not in …` and `assert … in …` kind of statements 122 | per-file-ignores = 123 | ./ipfshttpclient/requests_wrapper.py:E401,E402,F401 124 | ./test/functional/test_*.py:E272 125 | 126 | 127 | [mypy] 128 | # CLI behaviour 129 | color_output = true 130 | show_error_codes = true 131 | pretty = true 132 | 133 | # Include package directories without `__init__.py` 134 | namespace_packages = true 135 | 136 | # Extra strictness 137 | disallow_any_unimported = true 138 | #disallow_any_expr = true 139 | #disallow_any_decorated = true # Mostly OK, but fails at custom decorators 140 | disallow_any_generics = true 141 | disallow_subclassing_any = true 142 | 143 | #disallow_untyped_calls = true # Fails at many trio APIs that aren't typed yet 144 | disallow_untyped_defs = true 145 | 146 | strict_optional = true 147 | 148 | warn_redundant_casts = true 149 | warn_unused_ignores = true 150 | warn_return_any = true 151 | warn_unreachable = true 152 | 153 | [mypy-ipfshttpclient.client.*] 154 | ignore_errors = True 155 | 156 | [mypy-ipfshttpclient.client.base] 157 | ignore_errors = False 158 | 159 | 160 | [pytest] 161 | addopts = -ra --verbose 162 | console_output_style = progress 163 | testpaths = 164 | ipfshttpclient 165 | test/unit 166 | test/functional 167 | 168 | #XXX: Drop standalone mock once we're 3.6+-only 169 | mock_use_standalone_module = true 170 | --------------------------------------------------------------------------------