├── .gitignore ├── LICENSE.txt ├── README.md ├── examples ├── debian-11.2.0-amd64-netinst.iso.torrent └── ubuntu-21.10-desktop-amd64.iso.torrent ├── icons ├── about.svg ├── add.svg ├── directory.svg ├── file.svg ├── logo.svg ├── pause.svg ├── remove.svg └── resume.svg ├── requirements.txt ├── screenshot.png ├── torrent_cli.py ├── torrent_client ├── __init__.py ├── algorithms │ ├── __init__.py │ ├── announcer.py │ ├── downloader.py │ ├── peer_manager.py │ ├── speed_measurer.py │ ├── torrent_manager.py │ └── uploader.py ├── control │ ├── __init__.py │ ├── client.py │ ├── formatters.py │ ├── manager.py │ └── server.py ├── file_structure.py ├── models.py ├── network │ ├── __init__.py │ ├── peer_tcp_client.py │ ├── peer_tcp_server.py │ └── tracker_clients │ │ ├── __init__.py │ │ ├── base.py │ │ ├── http.py │ │ └── udp.py └── utils.py └── torrent_gui.py /.gitignore: -------------------------------------------------------------------------------- 1 | experiments/ 2 | downloads/ 3 | samples/ 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | env/ 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *,cover 50 | .hypothesis/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | 59 | # Sphinx documentation 60 | docs/_build/ 61 | 62 | # PyBuilder 63 | target/ 64 | 65 | 66 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 67 | 68 | *.iml 69 | 70 | ## Directory-based project format: 71 | .idea/ 72 | # if you remove the above rule, at least ignore the following: 73 | 74 | # User-specific stuff: 75 | # .idea/workspace.xml 76 | # .idea/tasks.xml 77 | # .idea/dictionaries 78 | # .idea/shelf 79 | 80 | # Sensitive or high-churn files: 81 | # .idea/dataSources.ids 82 | # .idea/dataSources.xml 83 | # .idea/sqlDataSources.xml 84 | # .idea/dynamic.xml 85 | # .idea/uiDesigner.xml 86 | 87 | # Gradle: 88 | # .idea/gradle.xml 89 | # .idea/libraries 90 | 91 | # Mongo Explorer plugin: 92 | # .idea/mongoSettings.xml 93 | 94 | ## File-based project format: 95 | *.ipr 96 | *.iws 97 | 98 | ## Plugin-specific files: 99 | 100 | # IntelliJ 101 | /out/ 102 | 103 | # mpeltonen/sbt-idea plugin 104 | .idea_modules/ 105 | 106 | # JIRA plugin 107 | atlassian-ide-plugin.xml 108 | 109 | # Crashlytics plugin (for Android Studio and IntelliJ) 110 | com_crashlytics_export_strings.xml 111 | crashlytics.properties 112 | crashlytics-build.properties 113 | fabric.properties 114 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Alexander Borzunov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | bit-torrent 2 | =========== 3 | 4 | Simple BitTorrent client built with Python's asyncio 5 | 6 |  7 | 8 | Features 9 | -------- 10 | 11 | * Downloading torrents and sharing received data 12 | * Graphical interface (supports Drag'n'Drop and can be assigned to *.torrent files in system "Open with..." dialog) 13 | * Console interface 14 | * Pausing torrents, watching progress, download and upload speed, ETA 15 | * Selecting which files in a torrent you want to download 16 | * Saving state between program restarts 17 | 18 | Implemented specifications: 19 | 20 | * The BitTorrent Protocol Specification ([BEP 0003][] with some additions from [the community spec][]) 21 | * Multitracker Metadata Extension ([BEP 0012][]) 22 | * *(partially)* UDP Tracker Protocol ([BEP 0015][]) 23 | * Tracker Returns Compact Peer Lists ([BEP 0023][]) 24 | 25 | [BEP 0003]: http://www.bittorrent.org/beps/bep_0003.html 26 | [the community spec]: https://wiki.theory.org/BitTorrentSpecification 27 | [BEP 0012]: http://www.bittorrent.org/beps/bep_0012.html 28 | [BEP 0015]: http://www.bittorrent.org/beps/bep_0015.html 29 | [BEP 0023]: http://www.bittorrent.org/beps/bep_0023.html 30 | 31 | Architecture 32 | ------------ 33 | 34 | In this project, I tried to avoid threads and use only asynchronous I/O. As a result, all algorithms and network interaction work in one thread running an asyncio event loop, but there are still a few additional threads: 35 | 36 | * Non-blocking disk I/O [isn't supported][asyncio-fs] by asyncio. To prevent freezes for up to a second 37 | during disk writing, blocking I/O runs in a [ThreadPoolExecutor][]. 38 | * PyQt GUI runs in the main thread and invokes an asyncio event loop in a separate [QThread][]. Another option is 39 | to use a Qt event loop in asyncio with [quamash][], but this increases UI reaction time, and the Qt event loop 40 | may be less efficient than asyncio's default one. 41 | 42 | [asyncio-fs]: https://github.com/python/asyncio/wiki/ThirdParty#filesystem 43 | [ThreadPoolExecutor]: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor 44 | [QThread]: https://doc.qt.io/qt-5/qthread.html 45 | [quamash]: https://github.com/harvimt/quamash 46 | 47 | Program sources depend on Python 3.5+ features: they are annotated with type hints ([PEP 0484][]) and 48 | use coroutines with `async`/`await` syntax ([PEP 0492][]). 49 | 50 | [PEP 0484]: https://www.python.org/dev/peps/pep-0484/ 51 | [PEP 0492]: https://www.python.org/dev/peps/pep-0492/ 52 | 53 | Installation 54 | ------------ 55 | 56 | The program requires Python 3.5+ and works on Linux, macOS, and Windows. You also need PyQt 5 to run GUI. 57 | 58 | On Ubuntu 16.04 or newer, run: 59 | 60 | ```bash 61 | sudo apt-get install python3-pip python3-pyqt5 62 | git clone https://github.com/borzunov/bit-torrent.git && cd bit-torrent 63 | sudo python3 -m pip install -r requirements.txt 64 | ``` 65 | 66 | On macOS, run: 67 | 68 | ```bash 69 | python3 -m pip install PyQt5 70 | git clone https://github.com/borzunov/bit-torrent.git && cd bit-torrent 71 | python3 -m pip install -r requirements.txt 72 | ``` 73 | 74 | Usage 75 | ----- 76 | 77 | ### Graphical interface 78 | 79 | Run: 80 | 81 | python3 torrent_gui.py 82 | 83 | If torrent files are provided as command line arguments, corresponding adding dialogs will be opened. 84 | 85 | ### Console interface 86 | 87 | 1. Start a daemon: 88 | 89 | ./torrent_cli.py start & 90 | 91 | 2. *(optional)* Look at a list of files in a torrent you want to download: 92 | 93 | ./torrent_cli.py show ./examples/debian-11.2.0-amd64-netinst.iso.torrent 94 | 95 | 3. Specify a download directory and add the torrent to the daemon: 96 | 97 | ./torrent_cli.py add ./examples/debian-11.2.0-amd64-netinst.iso.torrent -d ./downloads 98 | 99 | If the torrent contains more than one file, you can select which files you want to download 100 | using `--include` and `--exclude` options. For more information run: 101 | 102 | ./torrent_cli.py add --help 103 | 104 | 4. Watch torrent status: 105 | 106 | watch ./torrent_cli.py status 107 | 108 | Add `-v` to increase output verbosity. 109 | 110 | You also can add more torrents, pause, resume, and remove them. For more information run: 111 | 112 | ./torrent_cli.py --help 113 | 114 | 5. To stop the daemon run: 115 | 116 | ./torrent_cli.py stop 117 | 118 | The daemon will restore its state after restart. 119 | 120 | ### Debug mode 121 | 122 | You can enable a verbose debug mode for GUI and CLI daemons by adding `--debug` flag after the script name. 123 | 124 | You may also want to enable asyncio debug mode. This is done as follows: 125 | 126 | PYTHONASYNCIODEBUG=1 python3 -Wdefault torrent_gui.py --debug 127 | 128 | Author 129 | ------ 130 | 131 | Copyright © 2016-2017 Alexander Borzunov 132 | -------------------------------------------------------------------------------- /examples/debian-11.2.0-amd64-netinst.iso.torrent: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/borzunov/bit-torrent/5cc475d786eb1c5fa20fa0807fe3413f158e500a/examples/debian-11.2.0-amd64-netinst.iso.torrent -------------------------------------------------------------------------------- /examples/ubuntu-21.10-desktop-amd64.iso.torrent: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/borzunov/bit-torrent/5cc475d786eb1c5fa20fa0807fe3413f158e500a/examples/ubuntu-21.10-desktop-amd64.iso.torrent -------------------------------------------------------------------------------- /icons/about.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 48 | -------------------------------------------------------------------------------- /icons/add.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 43 | -------------------------------------------------------------------------------- /icons/directory.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 47 | -------------------------------------------------------------------------------- /icons/file.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 50 | -------------------------------------------------------------------------------- /icons/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 86 | -------------------------------------------------------------------------------- /icons/pause.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 40 | -------------------------------------------------------------------------------- /icons/remove.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 50 | -------------------------------------------------------------------------------- /icons/resume.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 41 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.7.4 2 | async-timeout==3.0.0 3 | bencodepy==0.9.5 4 | bitarray==0.8.1 5 | chardet==3.0.4 6 | multidict==4.5 7 | sip==5.1.0 8 | -------------------------------------------------------------------------------- /screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/borzunov/bit-torrent/5cc475d786eb1c5fa20fa0807fe3413f158e500a/screenshot.png -------------------------------------------------------------------------------- /torrent_cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import asyncio 5 | import logging 6 | import os 7 | import re 8 | import signal 9 | import sys 10 | from contextlib import closing, suppress 11 | from functools import partial 12 | from typing import List 13 | 14 | from torrent_client.control import ControlManager, ControlClient, ControlServer, DaemonExit, formatters 15 | from torrent_client.models import TorrentInfo, TorrentState 16 | 17 | 18 | logging.basicConfig(format='%(levelname)s %(asctime)s %(name)-23s %(message)s', datefmt='%H:%M:%S') 19 | 20 | 21 | async def check_daemon_absence(): 22 | try: 23 | async with ControlClient(): 24 | pass 25 | except RuntimeError: 26 | pass 27 | else: 28 | raise RuntimeError('The daemon is already running') 29 | 30 | 31 | def run_daemon(_): 32 | with closing(asyncio.get_event_loop()) as loop: 33 | loop.run_until_complete(check_daemon_absence()) 34 | 35 | control = ControlManager() 36 | loop.run_until_complete(control.start()) 37 | 38 | try: 39 | control.load_state() 40 | except Exception as err: 41 | logging.warning('Failed to load program state: %r', err) 42 | control.invoke_state_dumps() 43 | 44 | stopping = False 45 | 46 | def stop_daemon(server: ControlServer): 47 | nonlocal stopping 48 | if stopping: 49 | return 50 | stopping = True 51 | 52 | stop_task = asyncio.ensure_future(asyncio.wait([server.stop(), server.control.stop()])) 53 | stop_task.add_done_callback(lambda fut: loop.stop()) 54 | 55 | control_server = ControlServer(control, stop_daemon) 56 | loop.run_until_complete(control_server.start()) 57 | 58 | if os.name == 'posix': 59 | for sig in (signal.SIGINT, signal.SIGTERM): 60 | loop.add_signal_handler(sig, partial(stop_daemon, control_server)) 61 | 62 | loop.run_forever() 63 | 64 | 65 | def show_handler(args): 66 | torrent_info = TorrentInfo.from_file(args.filename, download_dir=None) 67 | content_description = formatters.join_lines( 68 | formatters.format_title(torrent_info.download_info, True) + 69 | formatters.format_content(torrent_info)) 70 | print(content_description, end='') 71 | 72 | 73 | PATH_SPLIT_RE = re.compile(r'/|{}'.format(re.escape(os.path.sep))) 74 | 75 | 76 | async def add_handler(args): 77 | torrents = [TorrentInfo.from_file(filename, download_dir=args.download_dir) for filename in args.filenames] 78 | 79 | if args.include: 80 | paths = args.include 81 | mode = 'whitelist' 82 | elif args.exclude: 83 | paths = args.exclude 84 | mode = 'blacklist' 85 | else: 86 | paths = None 87 | mode = None 88 | if mode is not None: 89 | if len(torrents) > 1: 90 | raise ValueError('Can\'t handle "--include" and "--exclude" when several files are added') 91 | torrent_info = torrents[0] 92 | if torrent_info.download_info.single_file_mode: 93 | raise ValueError("Can't select files in a single-file torrent") 94 | 95 | paths = [PATH_SPLIT_RE.split(path) for path in paths] 96 | torrent_info.download_info.select_files(paths, mode) 97 | 98 | async with ControlClient() as client: 99 | for info in torrents: 100 | await client.execute(partial(ControlManager.add, torrent_info=info)) 101 | 102 | 103 | async def control_action_handler(args): 104 | action = getattr(ControlManager, args.action) 105 | torrents = [TorrentInfo.from_file(filename, download_dir=None) for filename in args.filenames] 106 | # FIXME: Execute action with all torrents if torrents == [] 107 | 108 | async with ControlClient() as client: 109 | for info in torrents: 110 | await client.execute(partial(action, info_hash=info.download_info.info_hash)) 111 | 112 | 113 | def status_server_handler(manager: ControlManager) -> List[TorrentState]: 114 | torrents = manager.get_torrents() 115 | torrents.sort(key=lambda info: info.download_info.suggested_name) 116 | return [TorrentState(torrent_info) for torrent_info in torrents] 117 | 118 | 119 | async def status_handler(args): 120 | async with ControlClient() as client: 121 | torrent_states = await client.execute(status_server_handler) 122 | if not torrent_states: 123 | print('No torrents added') 124 | return 125 | 126 | paragraphs = [formatters.join_lines(formatters.format_title(state, args.verbose) + 127 | formatters.format_status(state, args.verbose)) 128 | for state in torrent_states] 129 | print('\n'.join(paragraphs).rstrip()) 130 | 131 | 132 | def stop_server_handler(_: ControlManager): 133 | raise DaemonExit() 134 | 135 | 136 | async def stop_handler(_): 137 | async with ControlClient() as client: 138 | with suppress(DaemonExit): 139 | await client.execute(stop_server_handler) 140 | 141 | 142 | DEFAULT_DOWNLOAD_DIR = 'downloads' 143 | 144 | 145 | def run_in_event_loop(coro_function, args): 146 | with closing(asyncio.get_event_loop()) as loop: 147 | loop.run_until_complete(coro_function(args)) 148 | 149 | 150 | def main(): 151 | parser = argparse.ArgumentParser(description='A prototype of BitTorrent client (console management tool)') 152 | parser.add_argument('--debug', action='store_true', 153 | help='Show debug messages') 154 | parser.set_defaults(func=lambda args: print('Use option "--help" to show usage.', file=sys.stderr)) 155 | subparsers = parser.add_subparsers(description='Specify an action before "--help" to show parameters for it.', 156 | metavar='ACTION', dest='action') 157 | 158 | subparser = subparsers.add_parser('start', help='Start the daemon') 159 | subparser.set_defaults(func=run_daemon) 160 | 161 | subparser = subparsers.add_parser('stop', help='Stop the daemon') 162 | subparser.set_defaults(func=partial(run_in_event_loop, stop_handler)) 163 | 164 | subparser = subparsers.add_parser('show', help="Show contents of a torrent file " 165 | "(you don't need to start the daemon for that)") 166 | subparser.add_argument('filename', help='Torrent file name') 167 | subparser.set_defaults(func=show_handler) 168 | 169 | subparser = subparsers.add_parser('add', help='Add torrent from file') 170 | subparser.add_argument('filenames', nargs='+', 171 | help='Torrent file names') 172 | subparser.add_argument('-d', '--download-dir', default=DEFAULT_DOWNLOAD_DIR, 173 | help='Download directory') 174 | group = subparser.add_mutually_exclusive_group() 175 | group.add_argument('--include', action='append', 176 | help='Download only files and directories specified in "--include" options') 177 | group.add_argument('--exclude', action='append', 178 | help='Download all files and directories except those that specified in "--exclude" options') 179 | subparser.set_defaults(func=partial(run_in_event_loop, add_handler)) 180 | 181 | control_commands = ['pause', 'resume', 'remove'] 182 | for command_name in control_commands: 183 | subparser = subparsers.add_parser(command_name, help='{} torrent from file'.format(command_name.capitalize())) 184 | subparser.add_argument('filenames', nargs='+', 185 | help='Torrent file names') 186 | subparser.set_defaults(func=partial(run_in_event_loop, control_action_handler)) 187 | 188 | subparser = subparsers.add_parser('status', help='Show status of all torrents') 189 | subparser.add_argument('-v', '--verbose', action='store_true', 190 | help='Increase output verbosity') 191 | subparser.set_defaults(func=partial(run_in_event_loop, status_handler)) 192 | 193 | arguments = parser.parse_args() 194 | if not arguments.debug: 195 | logging.disable(logging.INFO) 196 | try: 197 | arguments.func(arguments) 198 | except (IOError, ValueError, RuntimeError) as e: 199 | print('Error: {}'.format(e), file=sys.stderr) 200 | 201 | 202 | if __name__ == '__main__': 203 | sys.exit(main()) 204 | -------------------------------------------------------------------------------- /torrent_client/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/borzunov/bit-torrent/5cc475d786eb1c5fa20fa0807fe3413f158e500a/torrent_client/__init__.py -------------------------------------------------------------------------------- /torrent_client/algorithms/__init__.py: -------------------------------------------------------------------------------- 1 | from torrent_client.algorithms.torrent_manager import * 2 | -------------------------------------------------------------------------------- /torrent_client/algorithms/announcer.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from typing import Optional 4 | 5 | from torrent_client.algorithms.peer_manager import PeerManager 6 | from torrent_client.models import TorrentInfo 7 | from torrent_client.network import BaseTrackerClient, EventType, create_tracker_client 8 | 9 | 10 | class Announcer: 11 | def __init__(self, torrent_info: TorrentInfo, our_peer_id: bytes, server_port: int, logger: logging.Logger, 12 | peer_manager: PeerManager): 13 | self._torrent_info = torrent_info 14 | self._download_info = torrent_info.download_info 15 | self._our_peer_id = our_peer_id 16 | self._server_port = server_port 17 | 18 | self._logger = logger 19 | self._peer_manager = peer_manager 20 | 21 | self._last_tracker_client = None 22 | self._more_peers_requested = asyncio.Event() 23 | self._task = None # type: Optional[asyncio.Task] 24 | 25 | @property 26 | def last_tracker_client(self) -> BaseTrackerClient: 27 | return self._last_tracker_client 28 | 29 | @property 30 | def more_peers_requested(self) -> asyncio.Event: 31 | return self._more_peers_requested 32 | 33 | FAKE_SERVER_PORT = 6881 34 | DEFAULT_MIN_INTERVAL = 90 35 | 36 | async def try_to_announce(self, event: EventType) -> bool: 37 | server_port = self._server_port if self._server_port is not None else Announcer.FAKE_SERVER_PORT 38 | 39 | tier = None 40 | url = None 41 | lift_url = False 42 | try: 43 | for tier in self._torrent_info.announce_list: 44 | for url in tier: 45 | try: 46 | client = create_tracker_client(url, self._download_info, self._our_peer_id) 47 | await client.announce(server_port, event) 48 | except asyncio.CancelledError: 49 | raise 50 | except Exception as e: 51 | self._logger.info('announce to "%s" failed: %r', url, e) 52 | else: 53 | peer_count = len(client.peers) if client.peers else 'no' 54 | self._logger.debug('announce to "%s" succeed (%s peers, interval = %s, min_interval = %s)', 55 | url, peer_count, client.interval, client.min_interval) 56 | 57 | self._last_tracker_client = client 58 | lift_url = True 59 | return True 60 | return False 61 | finally: 62 | if lift_url: 63 | tier.remove(url) 64 | tier.insert(0, url) 65 | 66 | async def execute(self): 67 | try: 68 | while True: 69 | if self._last_tracker_client.min_interval is not None: 70 | min_interval = self._last_tracker_client.min_interval 71 | else: 72 | min_interval = min(Announcer.DEFAULT_MIN_INTERVAL, self._last_tracker_client.interval) 73 | await asyncio.sleep(min_interval) 74 | 75 | default_interval = self._last_tracker_client.interval 76 | try: 77 | await asyncio.wait_for(self._more_peers_requested.wait(), default_interval - min_interval) 78 | more_peers = True 79 | self._more_peers_requested.clear() 80 | except asyncio.TimeoutError: 81 | more_peers = False 82 | 83 | await self.try_to_announce(EventType.none) 84 | # TODO: if more_peers, maybe rerequest in case of exception 85 | 86 | self._peer_manager.connect_to_peers(self._last_tracker_client.peers, more_peers) 87 | finally: 88 | await self.try_to_announce(EventType.stopped) 89 | -------------------------------------------------------------------------------- /torrent_client/algorithms/downloader.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import hashlib 3 | import logging 4 | import random 5 | import time 6 | from collections import deque, OrderedDict 7 | from math import ceil 8 | from typing import List, Optional, Tuple, Iterator 9 | 10 | from torrent_client.algorithms.announcer import Announcer 11 | from torrent_client.algorithms.peer_manager import PeerData, PeerManager 12 | from torrent_client.file_structure import FileStructure 13 | from torrent_client.models import BlockRequestFuture, Peer, TorrentInfo, TorrentState 14 | from torrent_client.network import EventType 15 | from torrent_client.utils import floor_to, import_signals 16 | 17 | 18 | QObject, pyqtSignal = import_signals() 19 | 20 | 21 | class NotEnoughPeersError(RuntimeError): 22 | pass 23 | 24 | 25 | class NoRequestsError(RuntimeError): 26 | pass 27 | 28 | 29 | class Downloader(QObject): 30 | if pyqtSignal: 31 | progress = pyqtSignal() 32 | 33 | def __init__(self, torrent_info: TorrentInfo, our_peer_id: bytes, 34 | logger: logging.Logger, file_structure: FileStructure, 35 | peer_manager: PeerManager, announcer: Announcer): 36 | super().__init__() 37 | 38 | self._torrent_info = torrent_info 39 | self._download_info = torrent_info.download_info 40 | self._our_peer_id = our_peer_id 41 | 42 | self._logger = logger 43 | self._file_structure = file_structure 44 | self._peer_manager = peer_manager 45 | self._announcer = announcer 46 | 47 | self._request_executors = [] # type: List[asyncio.Task] 48 | 49 | self._executors_processed_requests = [] # type: List[List[BlockRequestFuture]] 50 | 51 | self._non_started_pieces = None # type: List[int] 52 | self._download_start_time = None # type: float 53 | 54 | self._piece_block_queue = OrderedDict() 55 | 56 | self._endgame_mode = False 57 | self._tasks_waiting_for_more_peers = 0 58 | self._request_deque_relevant = asyncio.Event() 59 | 60 | self._last_piece_finish_signal_time = None # type: Optional[float] 61 | 62 | REQUEST_LENGTH = 2 ** 14 63 | 64 | def _get_piece_position(self, index: int) -> Tuple[int, int]: 65 | piece_offset = index * self._download_info.piece_length 66 | cur_piece_length = self._download_info.get_real_piece_length(index) 67 | return piece_offset, cur_piece_length 68 | 69 | FLAG_TRANSMISSION_TIMEOUT = 0.5 70 | 71 | def _send_cancels(self, request: BlockRequestFuture): 72 | performers = request.prev_performers 73 | if request.performer is not None: 74 | performers.add(request.performer) 75 | source = request.result() 76 | peer_data = self._peer_manager.peer_data 77 | for peer in performers - {source}: 78 | if peer in peer_data: 79 | peer_data[peer].client.send_request(request, cancel=True) 80 | 81 | def _start_downloading_piece(self, piece_index: int): 82 | piece_info = self._download_info.pieces[piece_index] 83 | 84 | blocks_expected = piece_info.blocks_expected 85 | request_deque = deque() 86 | for block_begin in range(0, piece_info.length, Downloader.REQUEST_LENGTH): 87 | block_end = min(block_begin + Downloader.REQUEST_LENGTH, piece_info.length) 88 | block_length = block_end - block_begin 89 | request = BlockRequestFuture(piece_index, block_begin, block_length) 90 | request.add_done_callback(self._send_cancels) 91 | 92 | blocks_expected.add(request) 93 | request_deque.append(request) 94 | self._piece_block_queue[piece_index] = request_deque 95 | 96 | self._download_info.interesting_pieces.add(piece_index) 97 | peer_data = self._peer_manager.peer_data 98 | for peer in piece_info.owners: 99 | peer_data[peer].client.am_interested = True 100 | 101 | concurrent_peers_count = sum(1 for peer, data in peer_data.items() if data.queue_size) 102 | self._logger.debug('piece %s started (owned by %s alive peers, concurrency: %s peers)', 103 | piece_index, len(piece_info.owners), concurrent_peers_count) 104 | 105 | PIECE_FINISH_SIGNAL_MIN_INTERVAL = 1 106 | 107 | def _finish_downloading_piece(self, piece_index: int): 108 | piece_info = self._download_info.pieces[piece_index] 109 | 110 | piece_info.mark_as_downloaded() 111 | self._download_info.downloaded_piece_count += 1 112 | 113 | self._download_info.interesting_pieces.remove(piece_index) 114 | peer_data = self._peer_manager.peer_data 115 | for peer in piece_info.owners: 116 | client = peer_data[peer].client 117 | for index in self._download_info.interesting_pieces: 118 | if client.piece_owned[index]: 119 | break 120 | else: 121 | client.am_interested = False 122 | 123 | for data in peer_data.values(): 124 | data.client.send_have(piece_index) 125 | 126 | self._logger.debug('piece %s finished', piece_index) 127 | 128 | torrent_state = TorrentState(self._torrent_info) 129 | self._logger.info('progress %.1lf%% (%s / %s pieces)', floor_to(torrent_state.progress * 100, 1), 130 | self._download_info.downloaded_piece_count, torrent_state.selected_piece_count) 131 | 132 | if pyqtSignal and self._download_info.downloaded_piece_count < torrent_state.selected_piece_count: 133 | cur_time = time.time() 134 | if self._last_piece_finish_signal_time is None or \ 135 | cur_time - self._last_piece_finish_signal_time >= Downloader.PIECE_FINISH_SIGNAL_MIN_INTERVAL: 136 | self.progress.emit() 137 | self._last_piece_finish_signal_time = time.time() 138 | # If the signal isn't emitted, the GUI will be updated after the next speed measurement anyway 139 | 140 | async def _validate_piece(self, piece_index: int): 141 | piece_info = self._download_info.pieces[piece_index] 142 | 143 | assert piece_info.are_all_blocks_downloaded() 144 | 145 | piece_offset, cur_piece_length = self._get_piece_position(piece_index) 146 | data = await self._file_structure.read(piece_offset, cur_piece_length) 147 | actual_digest = hashlib.sha1(data).digest() 148 | if actual_digest == piece_info.piece_hash: 149 | self._finish_downloading_piece(piece_index) 150 | return 151 | 152 | peer_data = self._peer_manager.peer_data 153 | for peer in piece_info.sources: 154 | self._download_info.increase_distrust(peer) 155 | if self._download_info.is_banned(peer): 156 | self._logger.info('Host %s banned', peer.host) 157 | peer_data[peer].client_task.cancel() 158 | 159 | piece_info.reset_content() 160 | self._start_downloading_piece(piece_index) 161 | 162 | self._logger.debug('piece %s not valid, redownloading', piece_index) 163 | 164 | _INF = float('inf') 165 | 166 | HANG_PENALTY_DURATION = 10 167 | HANG_PENALTY_COEFF = 100 168 | 169 | def get_peer_download_rate(self, peer: Peer) -> int: 170 | data = self._peer_manager.peer_data[peer] 171 | 172 | rate = data.client.downloaded # To reach maximal download speed 173 | if data.hanged_time is not None and time.time() - data.hanged_time <= Downloader.HANG_PENALTY_DURATION: 174 | rate //= Downloader.HANG_PENALTY_COEFF 175 | return rate 176 | 177 | DOWNLOAD_PEER_COUNT = 15 178 | 179 | def _request_piece_blocks(self, max_pending_count: int, piece_index: int) -> Iterator[BlockRequestFuture]: 180 | if not max_pending_count: 181 | return 182 | piece_info = self._download_info.pieces[piece_index] 183 | peer_data = self._peer_manager.peer_data 184 | 185 | request_deque = self._piece_block_queue[piece_index] 186 | performer = None 187 | performer_data = None 188 | pending_count = 0 189 | while request_deque: 190 | request = request_deque[0] 191 | if request.done(): 192 | request_deque.popleft() 193 | 194 | yield request 195 | continue 196 | 197 | if performer is None or not performer_data.is_free(): 198 | available_peers = {peer for peer in piece_info.owners 199 | if peer_data[peer].is_available()} 200 | if not available_peers: 201 | return 202 | performer = max(available_peers, key=self.get_peer_download_rate) 203 | performer_data = peer_data[performer] 204 | request_deque.popleft() 205 | performer_data.queue_size += 1 206 | 207 | request.performer = performer 208 | performer_data.client.send_request(request) 209 | yield request 210 | 211 | pending_count += 1 212 | if pending_count == max_pending_count: 213 | return 214 | 215 | RAREST_PIECE_COUNT_TO_SELECT = 10 216 | 217 | def _select_new_piece(self, *, force: bool) -> Optional[int]: 218 | is_appropriate = PeerData.is_free if force else PeerData.is_available 219 | appropriate_peers = {peer for peer, data in self._peer_manager.peer_data.items() if is_appropriate(data)} 220 | if not appropriate_peers: 221 | return None 222 | 223 | pieces = self._download_info.pieces 224 | available_pieces = [index for index in self._non_started_pieces 225 | if appropriate_peers & pieces[index].owners] 226 | if not available_pieces: 227 | return None 228 | 229 | available_pieces.sort(key=lambda index: len(pieces[index].owners)) 230 | piece_count_to_select = min(len(available_pieces), Downloader.RAREST_PIECE_COUNT_TO_SELECT) 231 | return available_pieces[random.randint(0, piece_count_to_select - 1)] 232 | 233 | _typical_piece_length = 2 ** 20 234 | _requests_per_piece = ceil(_typical_piece_length / REQUEST_LENGTH) 235 | _desired_request_stock = DOWNLOAD_PEER_COUNT * PeerData.DOWNLOAD_REQUEST_QUEUE_SIZE 236 | DESIRED_PIECE_STOCK = ceil(_desired_request_stock / _requests_per_piece) 237 | 238 | def _request_blocks(self, max_pending_count: int) -> List[BlockRequestFuture]: 239 | result = [] 240 | pending_count = 0 241 | consumed_pieces = [] 242 | try: 243 | for piece_index, request_deque in self._piece_block_queue.items(): 244 | piece_requests = list(self._request_piece_blocks(max_pending_count - pending_count, piece_index)) 245 | result += piece_requests 246 | pending_count += sum(1 for request in piece_requests if not request.done()) 247 | if not request_deque: 248 | consumed_pieces.append(piece_index) 249 | if pending_count == max_pending_count: 250 | return result 251 | 252 | piece_stock = len(self._piece_block_queue) - len(consumed_pieces) 253 | piece_stock_small = (piece_stock < Downloader.DESIRED_PIECE_STOCK) 254 | new_piece_index = self._select_new_piece(force=piece_stock_small) 255 | if new_piece_index is not None: 256 | self._non_started_pieces.remove(new_piece_index) 257 | self._start_downloading_piece(new_piece_index) 258 | 259 | result += list(self._request_piece_blocks(max_pending_count - pending_count, new_piece_index)) 260 | if not self._piece_block_queue[new_piece_index]: 261 | consumed_pieces.append(new_piece_index) 262 | finally: 263 | for piece_index in consumed_pieces: 264 | del self._piece_block_queue[piece_index] 265 | 266 | if not result: 267 | if not self._piece_block_queue and not self._non_started_pieces: 268 | raise NoRequestsError('No more undistributed requests') 269 | raise NotEnoughPeersError('No peers to perform a request') 270 | return result 271 | 272 | DOWNLOAD_PEERS_ACTIVE_TO_REQUEST_MORE_PEERS = 2 273 | 274 | NO_PEERS_SLEEP_TIME = 3 275 | STARTING_DURATION = 5 276 | NO_PEERS_SLEEP_TIME_ON_STARTING = 1 277 | 278 | RECONNECT_TIMEOUT = 50 279 | 280 | async def _wait_more_peers(self): 281 | self._tasks_waiting_for_more_peers += 1 282 | download_peers_active = Downloader.DOWNLOAD_PEER_COUNT - self._tasks_waiting_for_more_peers 283 | if download_peers_active <= Downloader.DOWNLOAD_PEERS_ACTIVE_TO_REQUEST_MORE_PEERS and \ 284 | len(self._peer_manager.peer_data) < PeerManager.MAX_PEERS_TO_ACTIVELY_CONNECT: 285 | cur_time = time.time() 286 | if self._peer_manager.last_connecting_time is None or \ 287 | cur_time - self._peer_manager.last_connecting_time >= Downloader.RECONNECT_TIMEOUT: 288 | # This can recover connections to peers after temporary loss of Internet connection 289 | self._logger.info('trying to reconnect to peers') 290 | self._peer_manager.connect_to_peers(self._announcer.last_tracker_client.peers, True) 291 | 292 | self._announcer.more_peers_requested.set() 293 | 294 | if time.time() - self._download_start_time <= Downloader.STARTING_DURATION: 295 | sleep_time = Downloader.NO_PEERS_SLEEP_TIME_ON_STARTING 296 | else: 297 | sleep_time = Downloader.NO_PEERS_SLEEP_TIME 298 | await asyncio.sleep(sleep_time) 299 | self._tasks_waiting_for_more_peers -= 1 300 | 301 | def _get_non_finished_pieces(self) -> List[int]: 302 | pieces = self._download_info.pieces 303 | return [i for i in range(self._download_info.piece_count) 304 | if pieces[i].selected and not pieces[i].downloaded] 305 | 306 | async def _wait_more_requests(self): 307 | if not self._endgame_mode: 308 | self._logger.info('entering endgame mode (remaining pieces: %s)', 309 | ', '.join(map(str, self._get_non_finished_pieces()))) 310 | self._endgame_mode = True 311 | 312 | await self._request_deque_relevant.wait() 313 | 314 | REQUEST_TIMEOUT = 6 315 | REQUEST_TIMEOUT_ENDGAME = 1 316 | 317 | async def _execute_block_requests(self, processed_requests: List[BlockRequestFuture]): 318 | while True: 319 | try: 320 | max_pending_count = PeerData.DOWNLOAD_REQUEST_QUEUE_SIZE - len(processed_requests) 321 | if max_pending_count > 0: 322 | processed_requests += self._request_blocks(max_pending_count) 323 | except NotEnoughPeersError: 324 | if not processed_requests: 325 | await self._wait_more_peers() 326 | continue 327 | except NoRequestsError: 328 | if not processed_requests: 329 | if not any(self._executors_processed_requests): 330 | self._request_deque_relevant.set() 331 | return 332 | await self._wait_more_requests() 333 | continue 334 | 335 | if self._endgame_mode: 336 | request_timeout = Downloader.REQUEST_TIMEOUT_ENDGAME 337 | else: 338 | request_timeout = Downloader.REQUEST_TIMEOUT 339 | requests_done, requests_pending = await asyncio.wait( 340 | processed_requests, return_when=asyncio.FIRST_COMPLETED, timeout=request_timeout) 341 | 342 | peer_data = self._peer_manager.peer_data 343 | if len(requests_pending) < len(processed_requests): 344 | pieces = self._download_info.pieces 345 | for request in requests_done: 346 | if request.performer in peer_data: 347 | peer_data[request.performer].queue_size -= 1 348 | 349 | piece_info = pieces[request.piece_index] 350 | if not piece_info.validating and not piece_info.downloaded and not piece_info.blocks_expected: 351 | piece_info.validating = True 352 | await self._validate_piece(request.piece_index) 353 | piece_info.validating = False 354 | processed_requests.clear() 355 | processed_requests += list(requests_pending) 356 | else: 357 | hanged_peers = {request.performer for request in requests_pending} & set(peer_data.keys()) 358 | cur_time = time.time() 359 | for peer in hanged_peers: 360 | peer_data[peer].hanged_time = cur_time 361 | if hanged_peers: 362 | self._logger.debug('peers %s hanged', ', '.join(map(str, hanged_peers))) 363 | 364 | for request in requests_pending: 365 | if request.performer in peer_data: 366 | peer_data[request.performer].queue_size -= 1 367 | request.prev_performers.add(request.performer) 368 | request.performer = None 369 | 370 | self._piece_block_queue.setdefault(request.piece_index, deque()).append(request) 371 | processed_requests.clear() 372 | self._request_deque_relevant.set() 373 | self._request_deque_relevant.clear() 374 | 375 | async def run(self): 376 | self._non_started_pieces = self._get_non_finished_pieces() 377 | self._download_start_time = time.time() 378 | if not self._non_started_pieces: 379 | self._download_info.complete = True 380 | return 381 | 382 | random.shuffle(self._non_started_pieces) 383 | 384 | for _ in range(Downloader.DOWNLOAD_PEER_COUNT): 385 | processed_requests = [] 386 | self._executors_processed_requests.append(processed_requests) 387 | self._request_executors.append(asyncio.ensure_future(self._execute_block_requests(processed_requests))) 388 | 389 | await asyncio.wait(self._request_executors) 390 | 391 | self._download_info.complete = True 392 | await self._announcer.try_to_announce(EventType.completed) 393 | self._logger.info('file download complete') 394 | 395 | if pyqtSignal: 396 | self.progress.emit() 397 | 398 | # for peer, data in self._peer_manager.peer_data.items(): 399 | # if data.client.is_seed(): 400 | # data.client_task.cancel() 401 | 402 | async def stop(self): 403 | for task in self._request_executors: 404 | task.cancel() 405 | if self._request_executors: 406 | await asyncio.wait(self._request_executors) 407 | -------------------------------------------------------------------------------- /torrent_client/algorithms/peer_manager.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import time 4 | from typing import Dict, Optional, Sequence 5 | 6 | from torrent_client.file_structure import FileStructure 7 | from torrent_client.models import Peer, TorrentInfo 8 | from torrent_client.network import PeerTCPClient 9 | 10 | 11 | class PeerData: 12 | DOWNLOAD_REQUEST_QUEUE_SIZE = 150 13 | 14 | def __init__(self, client: PeerTCPClient, client_task: asyncio.Task, connected_time: float): 15 | self._client = client 16 | self._client_task = client_task 17 | self._connected_time = connected_time 18 | self.hanged_time = None # type: Optional[float] 19 | self.queue_size = 0 20 | 21 | @property 22 | def client(self) -> PeerTCPClient: 23 | return self._client 24 | 25 | @property 26 | def client_task(self) -> asyncio.Task: 27 | return self._client_task 28 | 29 | @property 30 | def connected_time(self) -> float: 31 | return self._connected_time 32 | 33 | def is_free(self) -> bool: 34 | return self.queue_size < PeerData.DOWNLOAD_REQUEST_QUEUE_SIZE 35 | 36 | def is_available(self) -> bool: 37 | return self.is_free() and not self._client.peer_choking 38 | 39 | 40 | class PeerManager: 41 | def __init__(self, torrent_info: TorrentInfo, our_peer_id: bytes, 42 | logger: logging.Logger, file_structure: FileStructure): 43 | # self._torrent_info = torrent_info 44 | self._download_info = torrent_info.download_info 45 | self._statistics = self._download_info.session_statistics 46 | self._our_peer_id = our_peer_id 47 | 48 | self._logger = logger 49 | self._file_structure = file_structure 50 | 51 | self._peer_data = {} 52 | self._client_executors = {} # type: Dict[Peer, asyncio.Task] 53 | self._keeping_alive_executor = None # type: Optional[asyncio.Task] 54 | self._last_connecting_time = None # type: Optional[float] 55 | 56 | @property 57 | def peer_data(self) -> Dict[Peer, PeerData]: 58 | return self._peer_data 59 | 60 | @property 61 | def last_connecting_time(self) -> int: 62 | return self._last_connecting_time 63 | 64 | async def _execute_peer_client(self, peer: Peer, client: PeerTCPClient, *, need_connect: bool): 65 | try: 66 | if need_connect: 67 | await client.connect(self._download_info, self._file_structure) 68 | else: 69 | client.confirm_info_hash(self._download_info, self._file_structure) 70 | 71 | self._peer_data[peer] = PeerData(client, asyncio.current_task(), time.time()) 72 | self._statistics.peer_count += 1 73 | 74 | await client.run() 75 | except asyncio.CancelledError: 76 | raise 77 | except Exception as e: 78 | self._logger.debug('%s disconnected because of %r', peer, e) 79 | finally: 80 | if peer in self._peer_data: 81 | self._statistics.peer_count -= 1 82 | del self._peer_data[peer] 83 | 84 | for info in self._download_info.pieces: 85 | if peer in info.owners: 86 | info.owners.remove(peer) 87 | if peer in self._statistics.peer_last_download: 88 | del self._statistics.peer_last_download[peer] 89 | if peer in self._statistics.peer_last_upload: 90 | del self._statistics.peer_last_upload[peer] 91 | 92 | client.close() 93 | 94 | del self._client_executors[peer] 95 | 96 | KEEP_ALIVE_TIMEOUT = 2 * 60 97 | 98 | async def _execute_keeping_alive(self): 99 | while True: 100 | await asyncio.sleep(PeerManager.KEEP_ALIVE_TIMEOUT) 101 | 102 | self._logger.debug('broadcasting keep-alives to %s alive peers', len(self._peer_data)) 103 | for data in self._peer_data.values(): 104 | data.client.send_keep_alive() 105 | 106 | MAX_PEERS_TO_ACTIVELY_CONNECT = 30 107 | MAX_PEERS_TO_ACCEPT = 55 108 | 109 | def connect_to_peers(self, peers: Sequence[Peer], force: bool): 110 | peers = list({peer for peer in peers 111 | if peer not in self._client_executors and not self._download_info.is_banned(peer)}) 112 | if force: 113 | max_peers_count = PeerManager.MAX_PEERS_TO_ACCEPT 114 | else: 115 | max_peers_count = PeerManager.MAX_PEERS_TO_ACTIVELY_CONNECT 116 | peers_to_connect_count = max(max_peers_count - len(self._peer_data), 0) 117 | self._logger.debug('trying to connect to %s new peers', min(len(peers), peers_to_connect_count)) 118 | 119 | for peer in peers[:peers_to_connect_count]: 120 | client = PeerTCPClient(self._our_peer_id, peer) 121 | self._client_executors[peer] = asyncio.ensure_future( 122 | self._execute_peer_client(peer, client, need_connect=True)) 123 | 124 | self._last_connecting_time = time.time() 125 | 126 | def accept_client(self, peer: Peer, client: PeerTCPClient): 127 | if len(self._peer_data) > PeerManager.MAX_PEERS_TO_ACCEPT or self._download_info.is_banned(peer) or \ 128 | peer in self._client_executors: 129 | client.close() 130 | return 131 | self._logger.debug('accepted connection from %s', peer) 132 | 133 | self._client_executors[peer] = asyncio.ensure_future( 134 | self._execute_peer_client(peer, client, need_connect=False)) 135 | 136 | def invoke(self): 137 | self._keeping_alive_executor = asyncio.ensure_future(self._execute_keeping_alive()) 138 | 139 | async def stop(self): 140 | tasks = [] 141 | if self._keeping_alive_executor is not None: 142 | tasks.append(self._keeping_alive_executor) 143 | tasks += list(self._client_executors.values()) 144 | 145 | for task in tasks: 146 | task.cancel() 147 | if tasks: 148 | await asyncio.wait(tasks) 149 | -------------------------------------------------------------------------------- /torrent_client/algorithms/speed_measurer.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from collections import deque 3 | 4 | from torrent_client.models import SessionStatistics 5 | from torrent_client.utils import import_signals 6 | 7 | 8 | QObject, pyqtSignal = import_signals() 9 | 10 | 11 | class SpeedMeasurer(QObject): 12 | if pyqtSignal: 13 | updated = pyqtSignal() 14 | 15 | def __init__(self, statistics: SessionStatistics): 16 | super().__init__() 17 | 18 | self._statistics = statistics 19 | 20 | SPEED_MEASUREMENT_PERIOD = 60 21 | SPEED_UPDATE_TIMEOUT = 2 22 | 23 | assert SPEED_MEASUREMENT_PERIOD % SPEED_UPDATE_TIMEOUT == 0 24 | 25 | async def execute(self): 26 | max_queue_length = SpeedMeasurer.SPEED_MEASUREMENT_PERIOD // SpeedMeasurer.SPEED_UPDATE_TIMEOUT 27 | 28 | downloaded_queue = deque() 29 | uploaded_queue = deque() 30 | while True: 31 | downloaded_queue.append(self._statistics.downloaded_per_session) 32 | uploaded_queue.append(self._statistics.uploaded_per_session) 33 | 34 | if len(downloaded_queue) > 1: 35 | period_in_seconds = (len(downloaded_queue) - 1) * SpeedMeasurer.SPEED_UPDATE_TIMEOUT 36 | downloaded_per_period = downloaded_queue[-1] - downloaded_queue[0] 37 | uploaded_per_period = uploaded_queue[-1] - uploaded_queue[0] 38 | self._statistics.download_speed = downloaded_per_period / period_in_seconds 39 | self._statistics.upload_speed = uploaded_per_period / period_in_seconds 40 | 41 | if len(downloaded_queue) > max_queue_length: 42 | downloaded_queue.popleft() 43 | uploaded_queue.popleft() 44 | 45 | if pyqtSignal: 46 | self.updated.emit() 47 | 48 | await asyncio.sleep(SpeedMeasurer.SPEED_UPDATE_TIMEOUT) 49 | -------------------------------------------------------------------------------- /torrent_client/algorithms/torrent_manager.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import random 4 | from typing import List, Optional 5 | 6 | from torrent_client.algorithms.announcer import Announcer 7 | from torrent_client.algorithms.downloader import Downloader 8 | from torrent_client.algorithms.peer_manager import PeerManager 9 | from torrent_client.algorithms.speed_measurer import SpeedMeasurer 10 | from torrent_client.algorithms.uploader import Uploader 11 | from torrent_client.file_structure import FileStructure 12 | from torrent_client.models import Peer, TorrentInfo, DownloadInfo 13 | from torrent_client.network import EventType, PeerTCPClient 14 | from torrent_client.utils import import_signals 15 | 16 | 17 | QObject, pyqtSignal = import_signals() 18 | 19 | 20 | __all__ = ['TorrentManager'] 21 | 22 | 23 | class TorrentManager(QObject): 24 | if pyqtSignal: 25 | state_changed = pyqtSignal() 26 | 27 | LOGGER_LEVEL = logging.DEBUG 28 | SHORT_NAME_LEN = 19 29 | 30 | def __init__(self, torrent_info: TorrentInfo, our_peer_id: bytes, server_port: Optional[int]): 31 | super().__init__() 32 | 33 | self._torrent_info = torrent_info 34 | download_info = torrent_info.download_info # type: DownloadInfo 35 | download_info.reset_run_state() 36 | download_info.reset_stats() 37 | 38 | short_name = download_info.suggested_name 39 | if len(short_name) > TorrentManager.SHORT_NAME_LEN: 40 | short_name = short_name[:TorrentManager.SHORT_NAME_LEN] + '..' 41 | self._logger = logging.getLogger('"{}"'.format(short_name)) 42 | self._logger.setLevel(TorrentManager.LOGGER_LEVEL) 43 | 44 | self._executors = [] # type: List[asyncio.Task] 45 | 46 | self._file_structure = FileStructure(torrent_info.download_dir, torrent_info.download_info) 47 | 48 | self._peer_manager = PeerManager(torrent_info, our_peer_id, self._logger, self._file_structure) 49 | self._announcer = Announcer(torrent_info, our_peer_id, server_port, self._logger, self._peer_manager) 50 | self._downloader = Downloader(torrent_info, our_peer_id, self._logger, self._file_structure, 51 | self._peer_manager, self._announcer) 52 | self._uploader = Uploader(torrent_info, self._logger, self._peer_manager) 53 | self._speed_measurer = SpeedMeasurer(torrent_info.download_info.session_statistics) 54 | if pyqtSignal: 55 | self._downloader.progress.connect(self.state_changed) 56 | self._speed_measurer.updated.connect(self.state_changed) 57 | 58 | ANNOUNCE_FAILED_SLEEP_TIME = 3 59 | 60 | def _shuffle_announce_tiers(self): 61 | for tier in self._torrent_info.announce_list: 62 | random.shuffle(tier) 63 | 64 | async def run(self): 65 | self._shuffle_announce_tiers() 66 | while not await self._announcer.try_to_announce(EventType.started): 67 | await asyncio.sleep(TorrentManager.ANNOUNCE_FAILED_SLEEP_TIME) 68 | 69 | self._peer_manager.connect_to_peers(self._announcer.last_tracker_client.peers, True) 70 | 71 | self._executors += [asyncio.ensure_future(coro) for coro in [ 72 | self._announcer.execute(), 73 | self._uploader.execute(), 74 | self._speed_measurer.execute(), 75 | ]] 76 | 77 | self._peer_manager.invoke() 78 | await self._downloader.run() 79 | 80 | def accept_client(self, peer: Peer, client: PeerTCPClient): 81 | self._peer_manager.accept_client(peer, client) 82 | 83 | async def stop(self): 84 | await self._downloader.stop() 85 | await self._peer_manager.stop() 86 | 87 | executors = [task for task in self._executors if task is not None] 88 | for task in reversed(executors): 89 | task.cancel() 90 | if executors: 91 | await asyncio.wait(executors) 92 | -------------------------------------------------------------------------------- /torrent_client/algorithms/uploader.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import itertools 3 | import logging 4 | import random 5 | import time 6 | from typing import List, Iterable, cast 7 | 8 | from torrent_client.algorithms.peer_manager import PeerManager 9 | from torrent_client.models import Peer, TorrentInfo 10 | from torrent_client.utils import humanize_size 11 | 12 | 13 | class Uploader: 14 | def __init__(self, torrent_info: TorrentInfo, logger: logging.Logger, peer_manager: PeerManager): 15 | self._download_info = torrent_info.download_info 16 | self._statistics = self._download_info.session_statistics 17 | 18 | self._logger = logger 19 | self._peer_manager = peer_manager 20 | 21 | CHOKING_CHANGING_TIME = 10 22 | UPLOAD_PEER_COUNT = 4 23 | 24 | ITERS_PER_OPTIMISTIC_UNCHOKING = 3 25 | CONNECTED_RECENTLY_THRESHOLD = 60 26 | CONNECTED_RECENTLY_COEFF = 3 27 | 28 | def _select_optimistically_unchoked(self, peers: Iterable[Peer]) -> Peer: 29 | cur_time = time.time() 30 | connected_recently = [] 31 | remaining_peers = [] 32 | peer_data = self._peer_manager.peer_data 33 | for peer in peers: 34 | if cur_time - peer_data[peer].connected_time <= Uploader.CONNECTED_RECENTLY_THRESHOLD: 35 | connected_recently.append(peer) 36 | else: 37 | remaining_peers.append(peer) 38 | 39 | max_index = len(remaining_peers) + Uploader.CONNECTED_RECENTLY_COEFF * len(connected_recently) - 1 40 | index = random.randint(0, max_index) 41 | if index < len(remaining_peers): 42 | return remaining_peers[index] 43 | return connected_recently[(index - len(remaining_peers)) % len(connected_recently)] 44 | 45 | def get_peer_upload_rate(self, peer: Peer) -> int: 46 | data = self._peer_manager.peer_data[peer] 47 | 48 | rate = data.client.downloaded # We owe them for downloading 49 | if self._download_info.complete: 50 | rate += data.client.uploaded # To reach maximal upload speed 51 | return rate 52 | 53 | async def execute(self): 54 | prev_unchoked_peers = set() 55 | optimistically_unchoked = None 56 | for i in itertools.count(): 57 | peer_data = self._peer_manager.peer_data 58 | alive_peers = list(sorted(peer_data.keys(), key=self.get_peer_upload_rate, reverse=True)) 59 | cur_unchoked_peers = set() 60 | interested_count = 0 61 | 62 | if Uploader.UPLOAD_PEER_COUNT: 63 | if i % Uploader.ITERS_PER_OPTIMISTIC_UNCHOKING == 0: 64 | if alive_peers: 65 | optimistically_unchoked = self._select_optimistically_unchoked(alive_peers) 66 | else: 67 | optimistically_unchoked = None 68 | 69 | if optimistically_unchoked is not None and optimistically_unchoked in peer_data: 70 | cur_unchoked_peers.add(optimistically_unchoked) 71 | if peer_data[optimistically_unchoked].client.peer_interested: 72 | interested_count += 1 73 | 74 | for peer in cast(List[Peer], alive_peers): 75 | if interested_count == Uploader.UPLOAD_PEER_COUNT: 76 | break 77 | if peer_data[peer].client.peer_interested: 78 | interested_count += 1 79 | 80 | cur_unchoked_peers.add(peer) 81 | 82 | for peer in prev_unchoked_peers - cur_unchoked_peers: 83 | if peer in peer_data: 84 | peer_data[peer].client.am_choking = True 85 | for peer in cur_unchoked_peers: 86 | peer_data[peer].client.am_choking = False 87 | self._logger.debug('now %s peers are unchoked (total_uploaded = %s)', len(cur_unchoked_peers), 88 | humanize_size(self._statistics.total_uploaded)) 89 | 90 | await asyncio.sleep(Uploader.CHOKING_CHANGING_TIME) 91 | 92 | prev_unchoked_peers = cur_unchoked_peers 93 | -------------------------------------------------------------------------------- /torrent_client/control/__init__.py: -------------------------------------------------------------------------------- 1 | from torrent_client.control.client import * 2 | from torrent_client.control.manager import * 3 | from torrent_client.control.server import * 4 | -------------------------------------------------------------------------------- /torrent_client/control/client.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from typing import Callable, TypeVar 4 | 5 | from torrent_client.control.manager import ControlManager 6 | from torrent_client.control.server import ControlServer 7 | 8 | 9 | __all__ = ['ControlClient'] 10 | 11 | 12 | logger = logging.getLogger(__name__) 13 | logger.setLevel(logging.INFO) 14 | 15 | 16 | T = TypeVar('T') 17 | 18 | 19 | class ControlClient: 20 | def __init__(self): 21 | self._reader = None # type: asyncio.StreamReader 22 | self._writer = None # type: asyncio.StreamWriter 23 | 24 | async def connect(self): 25 | for port in ControlServer.PORT_RANGE: 26 | try: 27 | self._reader, self._writer = await asyncio.open_connection(host=ControlServer.HOST, port=port) 28 | 29 | message = await self._reader.readexactly(len(ControlServer.HANDSHAKE_MESSAGE)) 30 | if message != ControlServer.HANDSHAKE_MESSAGE: 31 | raise RuntimeError('Unknown control server protocol') 32 | except Exception as e: 33 | self.close() 34 | self._reader = None 35 | self._writer = None 36 | logger.debug('failed to connect to port %s: %r', port, e) 37 | else: 38 | break 39 | else: 40 | raise RuntimeError("Can't connect to the control server (run \"./torrent_cli.py start\" first)") 41 | 42 | async def execute(self, action: Callable[[ControlManager], T]) -> T: 43 | ControlServer.send_object(action, self._writer) 44 | result = await ControlServer.receive_object(self._reader) 45 | 46 | if isinstance(result, Exception): 47 | raise result 48 | return result 49 | 50 | def close(self): 51 | if self._writer is not None: 52 | self._writer.close() 53 | 54 | async def __aenter__(self) -> 'ControlClient': 55 | await self.connect() 56 | return self 57 | 58 | async def __aexit__(self, exc_type, exc_val, exc_tb): 59 | self.close() 60 | -------------------------------------------------------------------------------- /torrent_client/control/formatters.py: -------------------------------------------------------------------------------- 1 | from math import floor 2 | from typing import Iterable, List, Union 3 | 4 | from torrent_client.models import DownloadInfo, TorrentInfo, TorrentState 5 | from torrent_client.utils import humanize_size, humanize_speed, floor_to, humanize_time 6 | 7 | 8 | COLUMN_WIDTH = 30 9 | INDENT = ' ' * 4 10 | PROGRESS_BAR_WIDTH = 50 11 | 12 | 13 | def join_lines(lines: Iterable[str]) -> str: 14 | return ''.join(line[:-1].ljust(COLUMN_WIDTH) if line.endswith('\t') else line for line in lines) 15 | 16 | 17 | def format_title(info: Union[DownloadInfo, TorrentState], long_format: bool) -> List[str]: 18 | lines = ['Name: {}\n'.format(info.suggested_name)] 19 | if long_format: 20 | lines.append('ID: {}\n'.format(info.info_hash.hex())) 21 | return lines 22 | 23 | 24 | def format_content(torrent_info: TorrentInfo) -> List[str]: 25 | download_info = torrent_info.download_info # type: DownloadInfo 26 | 27 | lines = ['Announce URLs:\n'] 28 | for i, tier in enumerate(torrent_info.announce_list): 29 | lines.append(INDENT + 'Tier {}: {}\n'.format(i + 1, ', '.join(tier))) 30 | 31 | total_size_repr = humanize_size(download_info.total_size) 32 | if download_info.single_file_mode: 33 | lines.append('Content: single file ({})\n'.format(total_size_repr)) 34 | else: 35 | lines.append('Content: {} files (total {})\n'.format(len(download_info.files), total_size_repr)) 36 | for file_info in download_info.files: 37 | lines.append(INDENT + '{} ({})\n'.format('/'.join(file_info.path), humanize_size(file_info.length))) 38 | return lines 39 | 40 | 41 | MIN_SPEED_TO_SHOW_ETA = 100 * 2 ** 10 # bytes/s 42 | 43 | 44 | def format_status(state: TorrentState, long_format: bool) -> List[str]: 45 | lines = [] 46 | 47 | if long_format: 48 | lines.append('Selected: {}/{} files ({}/{} pieces)\n'.format( 49 | state.selected_file_count, state.total_file_count, state.selected_piece_count, state.total_piece_count)) 50 | lines.append('Directory: {}\n'.format(state.download_dir)) 51 | 52 | if state.paused: 53 | general_status = 'Paused\n' 54 | elif state.complete: 55 | general_status = 'Uploading\n' 56 | else: 57 | general_status = 'Downloading\t' 58 | lines.append('State: ' + general_status) 59 | if not state.paused and not state.complete: 60 | eta_seconds = state.eta_seconds 61 | lines.append('ETA: {}\n'.format(humanize_time(eta_seconds) if eta_seconds is not None else 'unknown')) 62 | 63 | lines.append('Download from: {}/{} peers\t'.format(state.downloading_peer_count, state.total_peer_count)) 64 | lines.append('Upload to: {}/{} peers\n'.format(state.uploading_peer_count, state.total_peer_count)) 65 | 66 | lines.append('Download speed: {}\t'.format( 67 | humanize_speed(state.download_speed) if state.download_speed is not None else 'unknown')) 68 | lines.append('Upload speed: {}\n'.format( 69 | humanize_speed(state.upload_speed) if state.upload_speed is not None else 'unknown')) 70 | 71 | lines.append('Size: {}/{}\t'.format(humanize_size(state.downloaded_size), humanize_size(state.selected_size))) 72 | lines.append('Ratio: {:.1f}\n'.format(state.ratio)) 73 | 74 | progress = state.progress 75 | progress_bar = ('#' * floor(progress * PROGRESS_BAR_WIDTH)).ljust(PROGRESS_BAR_WIDTH) 76 | lines.append('Progress: {:5.1f}% [{}]\n'.format(floor_to(progress * 100, 1), progress_bar)) 77 | 78 | return lines 79 | -------------------------------------------------------------------------------- /torrent_client/control/manager.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import copy 3 | import logging 4 | import os 5 | import pickle 6 | from typing import Dict, List, Optional 7 | 8 | from torrent_client.algorithms import TorrentManager 9 | from torrent_client.models import generate_peer_id, TorrentInfo, TorrentState 10 | from torrent_client.network import PeerTCPServer 11 | from torrent_client.utils import import_signals 12 | 13 | 14 | QObject, pyqtSignal = import_signals() 15 | 16 | 17 | __all__ = ['ControlManager'] 18 | 19 | 20 | state_filename = os.path.expanduser('~/.torrent_gui_state') 21 | 22 | 23 | logger = logging.getLogger(__name__) 24 | logger.setLevel(logging.DEBUG) 25 | 26 | 27 | class ControlManager(QObject): 28 | if pyqtSignal: 29 | torrents_suggested = pyqtSignal(list) 30 | torrent_added = pyqtSignal(TorrentState) 31 | torrent_changed = pyqtSignal(TorrentState) 32 | torrent_removed = pyqtSignal(bytes) 33 | 34 | def __init__(self): 35 | super().__init__() 36 | 37 | self._our_peer_id = generate_peer_id() 38 | 39 | self._torrents = {} # type: Dict[bytes, TorrentInfo] 40 | self._torrent_managers = {} # type: Dict[bytes, TorrentManager] 41 | 42 | self._server = PeerTCPServer(self._our_peer_id, self._torrent_managers) 43 | 44 | self._torrent_manager_executors = {} # type: Dict[bytes, asyncio.Task] 45 | self._state_updating_executor = None # type: Optional[asyncio.Task] 46 | 47 | self.last_torrent_dir = None # type: Optional[str] 48 | self.last_download_dir = None # type: Optional[str] 49 | 50 | def get_torrents(self) -> List[TorrentInfo]: 51 | return list(self._torrents.values()) 52 | 53 | async def start(self): 54 | await self._server.start() 55 | 56 | def _start_torrent_manager(self, torrent_info: TorrentInfo): 57 | info_hash = torrent_info.download_info.info_hash 58 | 59 | manager = TorrentManager(torrent_info, self._our_peer_id, self._server.port) 60 | if pyqtSignal: 61 | manager.state_changed.connect(lambda: self.torrent_changed.emit(TorrentState(torrent_info))) 62 | self._torrent_managers[info_hash] = manager 63 | self._torrent_manager_executors[info_hash] = asyncio.ensure_future(manager.run()) 64 | 65 | def add(self, torrent_info: TorrentInfo): 66 | info_hash = torrent_info.download_info.info_hash 67 | if info_hash in self._torrents: 68 | raise ValueError('This torrent is already added') 69 | 70 | if not torrent_info.paused: 71 | self._start_torrent_manager(torrent_info) 72 | self._torrents[info_hash] = torrent_info 73 | 74 | if pyqtSignal: 75 | self.torrent_added.emit(TorrentState(torrent_info)) 76 | 77 | def resume(self, info_hash: bytes): 78 | if info_hash not in self._torrents: 79 | raise ValueError('Torrent not found') 80 | torrent_info = self._torrents[info_hash] 81 | if not torrent_info.paused: 82 | raise ValueError('The torrent is already running') 83 | 84 | self._start_torrent_manager(torrent_info) 85 | 86 | torrent_info.paused = False 87 | 88 | if pyqtSignal: 89 | self.torrent_changed.emit(TorrentState(torrent_info)) 90 | 91 | async def _stop_torrent_manager(self, info_hash: bytes): 92 | manager_executor = self._torrent_manager_executors[info_hash] 93 | manager_executor.cancel() 94 | try: 95 | await manager_executor 96 | except asyncio.CancelledError: 97 | pass 98 | del self._torrent_manager_executors[info_hash] 99 | 100 | manager = self._torrent_managers[info_hash] 101 | del self._torrent_managers[info_hash] 102 | await manager.stop() 103 | 104 | async def remove(self, info_hash: bytes): 105 | if info_hash not in self._torrents: 106 | raise ValueError('Torrent not found') 107 | torrent_info = self._torrents[info_hash] 108 | 109 | del self._torrents[info_hash] 110 | if not torrent_info.paused: 111 | await self._stop_torrent_manager(info_hash) 112 | 113 | if pyqtSignal: 114 | self.torrent_removed.emit(info_hash) 115 | 116 | async def pause(self, info_hash: bytes): 117 | if info_hash not in self._torrents: 118 | raise ValueError('Torrent not found') 119 | torrent_info = self._torrents[info_hash] 120 | if torrent_info.paused: 121 | raise ValueError('The torrent is already paused') 122 | 123 | await self._stop_torrent_manager(info_hash) 124 | 125 | torrent_info.paused = True 126 | 127 | if pyqtSignal: 128 | self.torrent_changed.emit(TorrentState(torrent_info)) 129 | 130 | def _dump_state(self): 131 | torrent_list = [] 132 | for manager, torrent_info in self._torrents.items(): 133 | torrent_info = copy.copy(torrent_info) 134 | torrent_info.download_info = copy.copy(torrent_info.download_info) 135 | torrent_info.download_info.reset_run_state() 136 | torrent_list.append(torrent_info) 137 | 138 | try: 139 | with open(state_filename, 'wb') as f: 140 | pickle.dump((self.last_torrent_dir, self.last_download_dir, torrent_list), f) 141 | logger.info('state saved (%s torrents)', len(torrent_list)) 142 | except Exception as err: 143 | logger.warning('Failed to save state: %r', err) 144 | 145 | STATE_UPDATE_INTERVAL = 5 * 60 146 | 147 | async def _execute_state_updates(self): 148 | while True: 149 | await asyncio.sleep(ControlManager.STATE_UPDATE_INTERVAL) 150 | 151 | self._dump_state() 152 | 153 | def invoke_state_dumps(self): 154 | self._state_updating_executor = asyncio.ensure_future(self._execute_state_updates()) 155 | 156 | def load_state(self): 157 | if not os.path.isfile(state_filename): 158 | return 159 | 160 | with open(state_filename, 'rb') as f: 161 | self.last_torrent_dir, self.last_download_dir, torrent_list = pickle.load(f) 162 | 163 | for torrent_info in torrent_list: 164 | self.add(torrent_info) 165 | logger.info('state recovered (%s torrents)', len(torrent_list)) 166 | 167 | async def stop(self): 168 | await self._server.stop() 169 | 170 | tasks = list(self._torrent_manager_executors.values()) 171 | if self._state_updating_executor is not None: 172 | tasks.append(self._state_updating_executor) 173 | 174 | for task in tasks: 175 | task.cancel() 176 | if tasks: 177 | await asyncio.wait(tasks) 178 | 179 | if self._torrent_managers: 180 | await asyncio.wait([manager.stop() for manager in self._torrent_managers.values()]) 181 | 182 | if self._state_updating_executor is not None: # Only if we have loaded starting state 183 | self._dump_state() 184 | -------------------------------------------------------------------------------- /torrent_client/control/server.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import pickle 4 | import struct 5 | from typing import Any, cast, Callable, Optional 6 | 7 | from torrent_client.control.manager import ControlManager 8 | 9 | 10 | __all__ = ['ControlServer', 'DaemonExit'] 11 | 12 | 13 | logger = logging.getLogger(__name__) 14 | logger.setLevel(logging.DEBUG) 15 | 16 | 17 | class DaemonExit(Exception): 18 | pass 19 | 20 | 21 | class ControlServer: 22 | def __init__(self, control: ControlManager, daemon_stop_handler: Optional[Callable[['ControlServer'], None]]): 23 | self._control = control 24 | self._daemon_stop_handler = daemon_stop_handler 25 | 26 | self._server = None 27 | 28 | @property 29 | def control(self) -> ControlManager: 30 | return self._control 31 | 32 | HANDSHAKE_MESSAGE = b'bit-torrent:ControlServer\n' 33 | 34 | LENGTH_FMT = '!I' 35 | 36 | @staticmethod 37 | async def receive_object(reader: asyncio.StreamReader) -> Any: 38 | length_data = await reader.readexactly(struct.calcsize(ControlServer.LENGTH_FMT)) 39 | (length,) = struct.unpack(ControlServer.LENGTH_FMT, length_data) 40 | data = await reader.readexactly(length) 41 | return pickle.loads(data) 42 | 43 | @staticmethod 44 | def send_object(obj: Any, writer: asyncio.StreamWriter): 45 | data = pickle.dumps(obj) 46 | length_data = struct.pack(ControlServer.LENGTH_FMT, len(data)) 47 | writer.write(length_data) 48 | writer.write(data) 49 | 50 | async def _accept(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter): 51 | addr_repr = ':'.join(map(str, writer.get_extra_info('peername'))) 52 | logger.info('accepted connection from %s', addr_repr) 53 | 54 | try: 55 | writer.write(ControlServer.HANDSHAKE_MESSAGE) 56 | 57 | while True: 58 | # FIXME: maybe do not allow to execute arbitrary object 59 | action = cast(Callable[[ControlManager], Any], await ControlServer.receive_object(reader)) 60 | 61 | try: 62 | result = action(self._control) 63 | if asyncio.iscoroutine(result): 64 | result = await result 65 | except asyncio.CancelledError: 66 | raise 67 | except Exception as e: 68 | result = e 69 | 70 | ControlServer.send_object(result, writer) 71 | 72 | if isinstance(result, DaemonExit): 73 | logger.info('stop command received') 74 | if self._daemon_stop_handler is not None: 75 | self._daemon_stop_handler(self) 76 | return 77 | except asyncio.IncompleteReadError: 78 | pass 79 | except asyncio.CancelledError: 80 | raise 81 | except Exception as e: 82 | logger.warning('%s disconnected because of %r', addr_repr, e) 83 | finally: 84 | writer.close() 85 | 86 | HOST = '127.0.0.1' 87 | PORT_RANGE = range(6995, 6999 + 1) 88 | 89 | async def start(self): 90 | for port in ControlServer.PORT_RANGE: 91 | try: 92 | self._server = await asyncio.start_server(self._accept, host=ControlServer.HOST, port=port) 93 | except asyncio.CancelledError: 94 | raise 95 | except Exception as e: 96 | logger.debug('exception on starting server on port %s: %r', port, e) 97 | else: 98 | logger.info('server started on port %s', port) 99 | return 100 | else: 101 | raise RuntimeError('Failed to start a control server') 102 | 103 | async def stop(self): 104 | if self._server is not None: 105 | self._server.close() 106 | await self._server.wait_closed() 107 | logger.info('server stopped') 108 | -------------------------------------------------------------------------------- /torrent_client/file_structure.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import functools 3 | import os 4 | from bisect import bisect_right 5 | from contextlib import closing 6 | from typing import Iterable, BinaryIO, Tuple 7 | 8 | from torrent_client.models import DownloadInfo 9 | 10 | 11 | def delegate_to_executor(func): 12 | @functools.wraps(func) 13 | async def wrapper(self: 'FileStructure', *args, acquire_lock=True, **kwargs): 14 | if acquire_lock: 15 | await self.lock.acquire() 16 | try: 17 | return await self._loop.run_in_executor(None, functools.partial(func, self, *args, **kwargs)) 18 | finally: 19 | if acquire_lock: 20 | self.lock.release() 21 | 22 | return wrapper 23 | 24 | 25 | class FileStructure: 26 | def __init__(self, download_dir: str, download_info: DownloadInfo): 27 | self._download_info = download_info 28 | 29 | self._loop = asyncio.get_event_loop() 30 | self._lock = asyncio.Lock() 31 | self._paths = [] 32 | self._offsets = [] 33 | offset = 0 34 | 35 | for file in download_info.files: 36 | path = os.path.join(download_dir, download_info.suggested_name, *file.path) 37 | directory = os.path.dirname(path) 38 | if not os.path.isdir(directory): 39 | os.makedirs(os.path.normpath(directory)) 40 | if not os.path.isfile(path): 41 | with open(path, 'w') as f: 42 | f.truncate(file.length) 43 | 44 | self._paths.append(path) 45 | self._offsets.append(offset) 46 | offset += file.length 47 | 48 | self._offsets.append(offset) # Fake entry for convenience 49 | 50 | @property 51 | def lock(self) -> asyncio.Lock: 52 | return self._lock 53 | 54 | def _iter_files(self, offset: int, data_length: int, mode: str) -> Iterable[Tuple[BinaryIO, int, int]]: 55 | if offset < 0 or offset + data_length > self._download_info.total_size: 56 | raise IndexError('Data position out of range') 57 | 58 | # Find rightmost file which start offset less than or equal to `offset` 59 | index = bisect_right(self._offsets, offset) - 1 60 | 61 | while data_length != 0: 62 | file_start_offset = self._offsets[index] 63 | file_end_offset = self._offsets[index + 1] 64 | file_pos = offset - file_start_offset 65 | bytes_to_operate = min(file_end_offset - offset, data_length) 66 | 67 | with open(self._paths[index], mode) as f: 68 | yield f, file_pos, bytes_to_operate 69 | 70 | offset += bytes_to_operate 71 | data_length -= bytes_to_operate 72 | index += 1 73 | 74 | @delegate_to_executor 75 | def read(self, offset: int, length: int): 76 | result = [] 77 | for f, file_pos, bytes_to_operate in self._iter_files(offset, length, 'rb'): 78 | f.seek(file_pos) 79 | result.append(f.read(bytes_to_operate)) 80 | return b''.join(result) 81 | 82 | @delegate_to_executor 83 | def write(self, offset: int, data: memoryview): 84 | for f, file_pos, bytes_to_operate in self._iter_files(offset, len(data), 'r+b'): 85 | f.seek(file_pos) 86 | f.write(data[:bytes_to_operate]) 87 | 88 | data = data[bytes_to_operate:] 89 | -------------------------------------------------------------------------------- /torrent_client/models.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import copy 3 | import hashlib 4 | import random 5 | import socket 6 | import struct 7 | import time 8 | from collections import OrderedDict 9 | from math import ceil 10 | from typing import List, Set, cast, Optional, Dict, Union, Any, Iterator 11 | 12 | import bencodepy 13 | from bitarray import bitarray 14 | 15 | from torrent_client.utils import grouper 16 | 17 | 18 | def generate_peer_id(): 19 | return bytes(random.randint(0, 255) for _ in range(20)) 20 | 21 | 22 | class Peer: 23 | def __init__(self, host: str, port: int, peer_id: bytes=None): 24 | # FIXME: Need we typecheck for the case of malicious data? 25 | 26 | self._host = host 27 | self._port = port 28 | self.peer_id = peer_id 29 | 30 | self._hash = hash((host, port)) # Important for performance 31 | 32 | @property 33 | def host(self) -> str: 34 | return self._host 35 | 36 | @property 37 | def port(self) -> int: 38 | return self._port 39 | 40 | def __eq__(self, other): 41 | if not isinstance(other, Peer): 42 | return False 43 | return self._host == other._host and self._port == other._port 44 | 45 | def __hash__(self): 46 | return self._hash 47 | 48 | @classmethod 49 | def from_dict(cls, dictionary: OrderedDict): 50 | return cls(dictionary[b'ip'].decode(), dictionary[b'port'], dictionary.get(b'peer id')) 51 | 52 | @classmethod 53 | def from_compact_form(cls, data: bytes): 54 | ip, port = struct.unpack('!4sH', data) 55 | host = socket.inet_ntoa(ip) 56 | return cls(host, port) 57 | 58 | def __repr__(self): 59 | return '{}:{}'.format(self._host, self._port) 60 | 61 | 62 | def get_utf8(dictionary: OrderedDict, key: bytes): 63 | assert isinstance(key, bytes) 64 | 65 | suffixed_key = key + b'.utf-8' 66 | if suffixed_key in dictionary: 67 | return dictionary[suffixed_key] 68 | return dictionary[key] 69 | 70 | 71 | class FileInfo: 72 | def __init__(self, length: int, path: List[str], *, md5sum: str=None): 73 | self._length = length 74 | self._path = path 75 | self._md5sum = md5sum 76 | 77 | self.offset = None 78 | self.selected = True 79 | 80 | @property 81 | def length(self) -> int: 82 | return self._length 83 | 84 | @property 85 | def path(self) -> List[str]: 86 | return self._path 87 | 88 | @property 89 | def md5sum(self) -> str: 90 | return self._md5sum 91 | 92 | @classmethod 93 | def from_dict(cls, dictionary: OrderedDict): 94 | try: 95 | path = list(map(bytes.decode, get_utf8(dictionary, b'path'))) 96 | except KeyError: 97 | path = [] 98 | 99 | return cls(dictionary[b'length'], path, md5sum=dictionary.get(b'md5sum')) 100 | 101 | 102 | class BlockRequest: 103 | def __init__(self, piece_index: int, block_begin: int, block_length: int): 104 | self.piece_index = piece_index 105 | self.block_begin = block_begin 106 | self.block_length = block_length 107 | 108 | def __eq__(self, other): 109 | if not isinstance(other, BlockRequest): 110 | return False 111 | return self.__dict__ == other.__dict__ 112 | 113 | def __hash__(self): 114 | return hash((self.piece_index, self.block_begin, self.block_length)) 115 | 116 | 117 | class BlockRequestFuture(asyncio.Future, BlockRequest): 118 | def __init__(self, piece_index: int, block_begin: int, block_length: int): 119 | asyncio.Future.__init__(self) 120 | BlockRequest.__init__(self, piece_index, block_begin, block_length) 121 | 122 | self.prev_performers = set() 123 | self.performer = None 124 | 125 | __eq__ = asyncio.Future.__eq__ 126 | __hash__ = asyncio.Future.__hash__ 127 | 128 | 129 | SHA1_DIGEST_LEN = 20 130 | 131 | 132 | class PieceInfo: 133 | def __init__(self, piece_hash: bytes, length: int): 134 | self._piece_hash = piece_hash 135 | self._length = length 136 | 137 | self.selected = True 138 | self.owners = set() # type: Set[Peer] 139 | 140 | self.validating = False 141 | 142 | self._downloaded = None 143 | self._sources = None 144 | self._block_downloaded = None # type: Optional[bitarray] 145 | self._blocks_expected = None 146 | self.reset_content() 147 | 148 | def reset_content(self): 149 | self._downloaded = False 150 | self._sources = set() 151 | 152 | self._block_downloaded = None 153 | self._blocks_expected = set() 154 | 155 | def reset_run_state(self): 156 | self.owners = set() 157 | 158 | self.validating = False 159 | 160 | self._blocks_expected = set() 161 | 162 | @property 163 | def piece_hash(self) -> bytes: 164 | return self._piece_hash 165 | 166 | @property 167 | def length(self) -> int: 168 | return self._length 169 | 170 | @property 171 | def downloaded(self) -> bool: 172 | return self._downloaded 173 | 174 | @property 175 | def sources(self) -> Set[Peer]: 176 | return self._sources 177 | 178 | @property 179 | def blocks_expected(self) -> Optional[Set[BlockRequestFuture]]: 180 | return self._blocks_expected 181 | 182 | def mark_downloaded_blocks(self, source: Peer, request: BlockRequest): 183 | if self._downloaded: 184 | raise ValueError('The whole piece is already downloaded') 185 | 186 | self._sources.add(source) 187 | 188 | arr = self._block_downloaded 189 | if arr is None: 190 | arr = bitarray(ceil(self._length / DownloadInfo.MARKED_BLOCK_SIZE)) 191 | arr.setall(False) 192 | self._block_downloaded = arr 193 | 194 | mark_begin = ceil(request.block_begin / DownloadInfo.MARKED_BLOCK_SIZE) 195 | if request.block_begin + request.block_length == self._length: 196 | mark_end = len(arr) 197 | else: 198 | mark_end = (request.block_begin + request.block_length) // DownloadInfo.MARKED_BLOCK_SIZE 199 | arr[mark_begin:mark_end] = True 200 | 201 | blocks_expected = cast(Set[BlockRequestFuture], self._blocks_expected) 202 | downloaded_blocks = [] 203 | for fut in blocks_expected: 204 | query_begin = fut.block_begin // DownloadInfo.MARKED_BLOCK_SIZE 205 | query_end = ceil((fut.block_begin + fut.block_length) / DownloadInfo.MARKED_BLOCK_SIZE) 206 | if arr[query_begin:query_end].all(): 207 | downloaded_blocks.append(fut) 208 | fut.set_result(source) 209 | for fut in downloaded_blocks: 210 | blocks_expected.remove(fut) 211 | 212 | def are_all_blocks_downloaded(self) -> bool: 213 | return self._downloaded or (self._block_downloaded is not None and self._block_downloaded.all()) 214 | 215 | def mark_as_downloaded(self): 216 | if self._downloaded: 217 | raise ValueError('The piece is already downloaded') 218 | 219 | self._downloaded = True 220 | 221 | # Delete data structures for this piece to save memory 222 | self._sources = None 223 | self._block_downloaded = None 224 | self._blocks_expected = None 225 | 226 | 227 | class SessionStatistics: 228 | def __init__(self, prev_session_stats: Optional['SessionStatistics']): 229 | self.peer_count = 0 230 | self._peer_last_download = {} 231 | self._peer_last_upload = {} 232 | self._downloaded_per_session = 0 233 | self._uploaded_per_session = 0 234 | self.download_speed = None # type: Optional[float] 235 | self.upload_speed = None # type: Optional[float] 236 | 237 | if prev_session_stats is not None: 238 | self._total_downloaded = prev_session_stats.total_downloaded 239 | self._total_uploaded = prev_session_stats.total_uploaded 240 | else: 241 | self._total_downloaded = 0 242 | self._total_uploaded = 0 243 | 244 | @property 245 | def peer_last_download(self) -> Dict[Peer, float]: 246 | return self._peer_last_download 247 | 248 | @property 249 | def peer_last_upload(self) -> Dict[Peer, float]: 250 | return self._peer_last_upload 251 | 252 | @property 253 | def downloaded_per_session(self) -> int: 254 | return self._downloaded_per_session 255 | 256 | @property 257 | def uploaded_per_session(self) -> int: 258 | return self._uploaded_per_session 259 | 260 | PEER_CONSIDERATION_TIME = 10 261 | 262 | @staticmethod 263 | def _get_actual_peer_count(time_dict: Dict[Peer, float]) -> int: 264 | cur_time = time.time() 265 | return sum(1 for t in time_dict.values() if cur_time - t <= SessionStatistics.PEER_CONSIDERATION_TIME) 266 | 267 | @property 268 | def downloading_peer_count(self) -> int: 269 | return SessionStatistics._get_actual_peer_count(self._peer_last_download) 270 | 271 | @property 272 | def uploading_peer_count(self) -> int: 273 | return SessionStatistics._get_actual_peer_count(self._peer_last_upload) 274 | 275 | @property 276 | def total_downloaded(self) -> int: 277 | return self._total_downloaded 278 | 279 | @property 280 | def total_uploaded(self) -> int: 281 | return self._total_uploaded 282 | 283 | def add_downloaded(self, peer: Peer, size: int): 284 | self._peer_last_download[peer] = time.time() 285 | self._downloaded_per_session += size 286 | self._total_downloaded += size 287 | 288 | def add_uploaded(self, peer: Peer, size: int): 289 | self._peer_last_upload[peer] = time.time() 290 | self._uploaded_per_session += size 291 | self._total_uploaded += size 292 | 293 | 294 | FileTreeNode = Union[FileInfo, Dict[str, Any]] 295 | 296 | 297 | class DownloadInfo: 298 | MARKED_BLOCK_SIZE = 2 ** 10 299 | 300 | def __init__(self, info_hash: bytes, 301 | piece_length: int, piece_hashes: List[bytes], suggested_name: str, files: List[FileInfo], *, 302 | private: bool=False): 303 | self.info_hash = info_hash 304 | self.piece_length = piece_length 305 | self.suggested_name = suggested_name 306 | 307 | self.files = files 308 | self._file_tree = {} 309 | self._create_file_tree() 310 | 311 | self.private = private 312 | 313 | assert piece_hashes 314 | self._pieces = [PieceInfo(item, piece_length) for item in piece_hashes[:-1]] 315 | last_piece_length = self.total_size - (len(piece_hashes) - 1) * self.piece_length 316 | self._pieces.append(PieceInfo(piece_hashes[-1], last_piece_length)) 317 | 318 | piece_count = len(piece_hashes) 319 | if ceil(self.total_size / piece_length) != piece_count: 320 | raise ValueError('Invalid count of piece hashes') 321 | 322 | self._interesting_pieces = None 323 | self.downloaded_piece_count = 0 324 | self._complete = False 325 | 326 | self._host_distrust_rates = {} 327 | 328 | self._session_statistics = SessionStatistics(None) 329 | 330 | @property 331 | def single_file_mode(self) -> bool: 332 | return len(self.files) == 1 and not self.files[0].path 333 | 334 | def _create_file_tree(self): 335 | offset = 0 336 | for item in self.files: 337 | item.offset = offset 338 | offset += item.length 339 | 340 | if not item.path: 341 | self._file_tree = item 342 | else: 343 | directory = self._file_tree 344 | for elem in item.path[:-1]: 345 | directory = directory.setdefault(elem, {}) 346 | directory[item.path[-1]] = item 347 | 348 | @property 349 | def file_tree(self) -> FileTreeNode: 350 | return self._file_tree 351 | 352 | def _get_file_tree_node(self, path: List[str]) -> FileTreeNode: 353 | result = self._file_tree 354 | try: 355 | for elem in path: 356 | result = result[elem] 357 | except KeyError: 358 | raise ValueError("Path \"{}\" doesn't exist in this torrent".format('/'.join(path))) 359 | return result 360 | 361 | @staticmethod 362 | def _traverse_nodes(node: FileTreeNode) -> Iterator[FileInfo]: 363 | if isinstance(node, FileInfo): 364 | yield node 365 | return 366 | for child in node.values(): 367 | yield from DownloadInfo._traverse_nodes(child) 368 | 369 | def select_files(self, paths: List[List[str]], mode: str): 370 | if mode not in ('whitelist', 'blacklist'): 371 | raise ValueError('Invalid mode "{}"'.format(mode)) 372 | include_paths = (mode == 'whitelist') 373 | 374 | for info in self.pieces: 375 | info.selected = not include_paths 376 | for info in self.files: 377 | info.selected = not include_paths 378 | 379 | segments = [] 380 | for path in paths: 381 | for node in DownloadInfo._traverse_nodes(self._get_file_tree_node(path)): 382 | node.selected = include_paths 383 | segments.append((node.offset, node.length)) 384 | if (include_paths and not segments) or (not include_paths and len(segments) == len(self.files)): 385 | raise ValueError("Can't exclude all files from the torrent") 386 | 387 | segments.sort() 388 | united_segments = [] 389 | for cur_segment in segments: 390 | if united_segments: 391 | last_segment = united_segments[-1] 392 | if last_segment[0] + last_segment[1] == cur_segment[0]: 393 | united_segments[-1] = (last_segment[0], last_segment[1] + cur_segment[1]) 394 | continue 395 | united_segments.append(cur_segment) 396 | 397 | for offset, length in united_segments: 398 | if include_paths: 399 | piece_begin = offset // self.piece_length 400 | piece_end = ceil((offset + length) / self.piece_length) 401 | else: 402 | piece_begin = ceil(offset / self.piece_length) 403 | piece_end = (offset + length) // self.piece_length 404 | 405 | for index in range(piece_begin, piece_end): 406 | self.pieces[index].selected = include_paths 407 | 408 | def reset_run_state(self): 409 | self._pieces = [copy.copy(info) for info in self._pieces] 410 | for info in self._pieces: 411 | info.reset_run_state() 412 | 413 | self._interesting_pieces = set() 414 | 415 | def reset_stats(self): 416 | self._session_statistics = SessionStatistics(self._session_statistics) 417 | 418 | @classmethod 419 | def from_dict(cls, dictionary: OrderedDict): 420 | info_hash = hashlib.sha1(bencodepy.encode(dictionary)).digest() 421 | 422 | if len(dictionary[b'pieces']) % SHA1_DIGEST_LEN != 0: 423 | raise ValueError('Invalid length of "pieces" string') 424 | piece_hashes = grouper(dictionary[b'pieces'], SHA1_DIGEST_LEN) 425 | 426 | if b'files' in dictionary: 427 | files = list(map(FileInfo.from_dict, dictionary[b'files'])) 428 | else: 429 | files = [FileInfo.from_dict(dictionary)] 430 | 431 | return cls(info_hash, 432 | dictionary[b'piece length'], piece_hashes, get_utf8(dictionary, b'name').decode(), files, 433 | private=dictionary.get('private', False)) 434 | 435 | @property 436 | def pieces(self) -> List[PieceInfo]: 437 | return self._pieces 438 | 439 | @property 440 | def piece_count(self) -> int: 441 | return len(self._pieces) 442 | 443 | def get_real_piece_length(self, index: int) -> int: 444 | if index == self.piece_count - 1: 445 | return self.total_size - self.piece_length * (self.piece_count - 1) 446 | else: 447 | return self.piece_length 448 | 449 | @property 450 | def total_size(self) -> int: 451 | return sum(file.length for file in self.files) 452 | 453 | @property 454 | def bytes_left(self) -> int: 455 | result = (self.piece_count - self.downloaded_piece_count) * self.piece_length 456 | last_piece_index = self.piece_count - 1 457 | if not self._pieces[last_piece_index].downloaded: 458 | result += self._pieces[last_piece_index].length - self.piece_length 459 | return result 460 | 461 | @property 462 | def interesting_pieces(self) -> Set[int]: 463 | return self._interesting_pieces 464 | 465 | @property 466 | def complete(self) -> bool: 467 | return self._complete 468 | 469 | @complete.setter 470 | def complete(self, value: bool): 471 | if value: 472 | assert all(info.downloaded or not info.selected for info in self._pieces) 473 | self._complete = value 474 | 475 | DISTRUST_RATE_TO_BAN = 5 476 | 477 | def increase_distrust(self, peer: Peer): 478 | self._host_distrust_rates[peer.host] = self._host_distrust_rates.get(peer.host, 0) + 1 479 | 480 | def is_banned(self, peer: Peer) -> bool: 481 | return (peer.host in self._host_distrust_rates and 482 | self._host_distrust_rates[peer.host] >= DownloadInfo.DISTRUST_RATE_TO_BAN) 483 | 484 | @property 485 | def session_statistics(self) -> SessionStatistics: 486 | return self._session_statistics 487 | 488 | 489 | class TorrentInfo: 490 | def __init__(self, download_info: DownloadInfo, announce_list: List[List[str]], *, download_dir: str): 491 | # TODO: maybe implement optional fields 492 | 493 | self.download_info = download_info 494 | self._announce_list = announce_list 495 | 496 | self.download_dir = download_dir 497 | 498 | self.paused = False 499 | 500 | @classmethod 501 | def from_file(cls, filename: str, **kwargs): 502 | dictionary = cast(OrderedDict, bencodepy.decode_from_file(filename)) 503 | download_info = DownloadInfo.from_dict(dictionary[b'info']) 504 | 505 | if b'announce-list' in dictionary: 506 | announce_list = [[url.decode() for url in tier] 507 | for tier in dictionary[b'announce-list']] 508 | else: 509 | announce_list = [[dictionary[b'announce'].decode()]] 510 | 511 | return cls(download_info, announce_list, **kwargs) 512 | 513 | @property 514 | def announce_list(self) -> List[List[str]]: 515 | return self._announce_list 516 | 517 | 518 | class TorrentState: 519 | """This class represents crucial parameters of torrent state. Unlike TorrentInfo and DownloadInfo, 520 | it is too small to serialize (to send it via socket as an answer to `status` command) and 521 | thread-safe (we can pass it to a GUI thread). 522 | """ 523 | 524 | def __init__(self, torrent_info: TorrentInfo): 525 | download_info = torrent_info.download_info 526 | statistics = download_info.session_statistics 527 | 528 | self.suggested_name = download_info.suggested_name 529 | self.info_hash = download_info.info_hash 530 | self.single_file_mode = download_info.single_file_mode 531 | 532 | self.total_piece_count = len(download_info.pieces) 533 | self.selected_piece_count = sum(1 for info in download_info.pieces if info.selected) 534 | 535 | last_piece_info = download_info.pieces[-1] 536 | self.selected_size = self.selected_piece_count * download_info.piece_length 537 | if last_piece_info.selected: 538 | self.selected_size += last_piece_info.length - download_info.piece_length 539 | self.downloaded_size = download_info.downloaded_piece_count * download_info.piece_length 540 | if last_piece_info.downloaded: 541 | self.downloaded_size += last_piece_info.length - download_info.piece_length 542 | 543 | self.total_file_count = len(download_info.files) 544 | self.selected_file_count = sum(1 for info in download_info.files if info.selected) 545 | 546 | self.download_dir = torrent_info.download_dir 547 | 548 | self.paused = torrent_info.paused 549 | self.complete = download_info.complete 550 | 551 | self.total_peer_count = statistics.peer_count 552 | self.downloading_peer_count = statistics.downloading_peer_count 553 | self.uploading_peer_count = statistics.uploading_peer_count 554 | 555 | self.download_speed = statistics.download_speed 556 | self.upload_speed = statistics.upload_speed 557 | 558 | self.total_uploaded = statistics.total_uploaded 559 | self.total_downloaded = statistics.total_downloaded 560 | 561 | MIN_SPEED_TO_CALC_ETA = 100 * 2 ** 10 # = 100 KiB/s 562 | 563 | @property 564 | def eta_seconds(self) -> Optional[int]: 565 | if self.download_speed is not None and self.download_speed >= TorrentState.MIN_SPEED_TO_CALC_ETA: 566 | return (self.selected_size - self.downloaded_size) / self.download_speed 567 | else: 568 | return None 569 | 570 | @property 571 | def ratio(self) -> float: 572 | return self.total_uploaded / self.total_downloaded if self.total_downloaded else 0 573 | 574 | @property 575 | def progress(self) -> float: 576 | return self.downloaded_size / self.selected_size 577 | -------------------------------------------------------------------------------- /torrent_client/network/__init__.py: -------------------------------------------------------------------------------- 1 | from torrent_client.network.peer_tcp_client import * 2 | from torrent_client.network.peer_tcp_server import * 3 | from torrent_client.network.tracker_clients import * 4 | 5 | 6 | -------------------------------------------------------------------------------- /torrent_client/network/peer_tcp_client.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import struct 4 | from enum import Enum 5 | from math import ceil 6 | from typing import Optional, Tuple, List, cast, Sequence 7 | 8 | from bitarray import bitarray 9 | 10 | from torrent_client.file_structure import FileStructure 11 | from torrent_client.models import SHA1_DIGEST_LEN, DownloadInfo, Peer, BlockRequest 12 | 13 | 14 | __all__ = ['PeerTCPClient'] 15 | 16 | 17 | class MessageType(Enum): 18 | choke = 0 19 | unchoke = 1 20 | interested = 2 21 | not_interested = 3 22 | have = 4 23 | bitfield = 5 24 | request = 6 25 | piece = 7 26 | cancel = 8 27 | port = 9 28 | 29 | 30 | class SeedError(Exception): 31 | pass 32 | 33 | 34 | class PeerTCPClient: 35 | LOGGER_LEVEL = logging.INFO 36 | 37 | def __init__(self, our_peer_id: bytes, peer: Peer): 38 | self._our_peer_id = our_peer_id 39 | self._peer = peer 40 | 41 | self._logger = logging.getLogger('[{}]'.format(peer)) 42 | self._logger.setLevel(PeerTCPClient.LOGGER_LEVEL) 43 | 44 | self._download_info = None # type: DownloadInfo 45 | self._file_structure = None # type: FileStructure 46 | self._piece_owned = None # type: bitarray 47 | 48 | self._am_choking = True 49 | self._am_interested = False 50 | self._peer_choking = True 51 | self._peer_interested = False 52 | 53 | self._downloaded = 0 54 | self._uploaded = 0 55 | 56 | self._reader = None # type: asyncio.StreamReader 57 | self._writer = None # type: asyncio.StreamWriter 58 | self._connected = False 59 | 60 | _handshake_message = b'BitTorrent protocol' 61 | HANDSHAKE_DATA = bytes([len(_handshake_message)]) + _handshake_message 62 | RESERVED_BYTES = b'\0' * 8 63 | 64 | CONNECT_TIMEOUT = 5 65 | READ_TIMEOUT = 5 66 | MAX_SILENCE_DURATION = 3 * 60 67 | WRITE_TIMEOUT = 5 68 | 69 | def _send_protocol_data(self): 70 | self._writer.write(PeerTCPClient.HANDSHAKE_DATA + PeerTCPClient.RESERVED_BYTES) 71 | 72 | async def _receive_protocol_data(self): 73 | data_len = len(PeerTCPClient.HANDSHAKE_DATA) + len(PeerTCPClient.RESERVED_BYTES) 74 | response = await asyncio.wait_for(self._reader.readexactly(data_len), PeerTCPClient.READ_TIMEOUT) 75 | 76 | if response[:len(PeerTCPClient.HANDSHAKE_DATA)] != PeerTCPClient.HANDSHAKE_DATA: 77 | raise ValueError('Unknown protocol') 78 | 79 | def _populate_info(self, download_info: DownloadInfo, file_structure: FileStructure): 80 | self._download_info = download_info 81 | self._file_structure = file_structure 82 | self._piece_owned = bitarray(download_info.piece_count) 83 | self._piece_owned.setall(False) 84 | 85 | self._writer.write(self._download_info.info_hash + self._our_peer_id) 86 | 87 | async def _receive_info(self) -> bytes: 88 | data_len = SHA1_DIGEST_LEN + len(self._our_peer_id) 89 | response = await asyncio.wait_for(self._reader.readexactly(data_len), PeerTCPClient.READ_TIMEOUT) 90 | 91 | actual_info_hash = response[:SHA1_DIGEST_LEN] 92 | actual_peer_id = response[SHA1_DIGEST_LEN:] 93 | if self._our_peer_id == actual_peer_id: 94 | raise ValueError('Connection to ourselves') 95 | if self._peer.peer_id is not None and self._peer.peer_id != actual_peer_id: 96 | raise ValueError('Unexpected peer_id') 97 | self._peer.peer_id = actual_peer_id 98 | 99 | return actual_info_hash 100 | 101 | async def connect(self, download_info: DownloadInfo, file_structure: FileStructure): 102 | self._reader, self._writer = await asyncio.wait_for( 103 | asyncio.open_connection(self._peer.host, self._peer.port), PeerTCPClient.CONNECT_TIMEOUT) 104 | 105 | self._send_protocol_data() 106 | self._populate_info(download_info, file_structure) 107 | 108 | await self._receive_protocol_data() 109 | if await self._receive_info() != download_info.info_hash: 110 | raise ValueError("info_hashes don't match") 111 | 112 | self._send_bitfield() 113 | self._connected = True 114 | 115 | async def accept(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> bytes: 116 | self._reader = reader 117 | self._writer = writer 118 | 119 | self._send_protocol_data() 120 | 121 | await self._receive_protocol_data() 122 | return await self._receive_info() 123 | 124 | def confirm_info_hash(self, download_info: DownloadInfo, file_structure: FileStructure): 125 | self._populate_info(download_info, file_structure) 126 | 127 | self._send_bitfield() 128 | self._connected = True 129 | 130 | MAX_MESSAGE_LENGTH = 2 ** 18 131 | 132 | async def _receive_message(self) -> Optional[Tuple[MessageType, memoryview]]: 133 | data = await asyncio.wait_for(self._reader.readexactly(4), PeerTCPClient.MAX_SILENCE_DURATION) 134 | (length,) = struct.unpack('!I', data) 135 | if length == 0: # keep-alive 136 | return None 137 | if length > PeerTCPClient.MAX_MESSAGE_LENGTH: 138 | raise ValueError('Message length is too big') 139 | 140 | data = await asyncio.wait_for(self._reader.readexactly(length), PeerTCPClient.READ_TIMEOUT) 141 | try: 142 | message_id = MessageType(data[0]) 143 | except ValueError: 144 | self._logger.debug('Unknown message type %s', data[0]) 145 | return None 146 | payload = memoryview(data)[1:] 147 | 148 | # self._logger.debug('incoming message %s length=%s', message_id.name, length) 149 | 150 | return message_id, payload 151 | 152 | _KEEP_ALIVE_MESSAGE = b'\0' * 4 153 | 154 | def _send_message(self, message_id: MessageType=None, *payload: List[bytes]): 155 | if message_id is None: # keep-alive 156 | self._writer.write(PeerTCPClient._KEEP_ALIVE_MESSAGE) 157 | return 158 | 159 | length = sum(len(portion) for portion in payload) + 1 160 | # self._logger.debug('outcoming message %s length=%s', message_id.name, length) 161 | 162 | self._writer.write(struct.pack('!IB', length, message_id.value)) 163 | for portion in payload: 164 | self._writer.write(portion) 165 | 166 | @property 167 | def am_choking(self): 168 | return self._am_choking 169 | 170 | @property 171 | def am_interested(self): 172 | return self._am_interested 173 | 174 | def _check_connect(self): 175 | if not self._connected: 176 | raise RuntimeError("Can't change state when the client isn't connected") 177 | 178 | @am_choking.setter 179 | def am_choking(self, value: bool): 180 | self._check_connect() 181 | if self._am_choking != value: 182 | self._am_choking = value 183 | self._send_message(MessageType.choke if value else MessageType.unchoke) 184 | 185 | @am_interested.setter 186 | def am_interested(self, value: bool): 187 | self._check_connect() 188 | if self._am_interested != value: 189 | self._am_interested = value 190 | self._send_message(MessageType.interested if value else MessageType.not_interested) 191 | 192 | @property 193 | def peer_choking(self): 194 | return self._peer_choking 195 | 196 | @property 197 | def peer_interested(self): 198 | return self._peer_interested 199 | 200 | @property 201 | def piece_owned(self) -> Sequence[bool]: 202 | return self._piece_owned 203 | 204 | # def is_seed(self) -> bool: 205 | # return self._piece_owned & self._download_info.piece_selected == self._download_info.piece_selected 206 | 207 | @property 208 | def downloaded(self): 209 | return self._downloaded 210 | 211 | @property 212 | def uploaded(self): 213 | return self._uploaded 214 | 215 | @staticmethod 216 | def _check_payload_len(message_id: MessageType, payload: memoryview, expected_len: int): 217 | if len(payload) != expected_len: 218 | raise ValueError('Invalid payload length on message_id = {} ' 219 | '(expected {}, got {})'.format(message_id.name, expected_len, len(payload))) 220 | 221 | def _handle_setting_states(self, message_id: MessageType, payload: memoryview): 222 | PeerTCPClient._check_payload_len(message_id, payload, 0) 223 | 224 | if message_id == MessageType.choke: 225 | self._peer_choking = True 226 | elif message_id == MessageType.unchoke: 227 | self._peer_choking = False 228 | elif message_id == MessageType.interested: 229 | self._peer_interested = True 230 | elif message_id == MessageType.not_interested: 231 | self._peer_interested = False 232 | 233 | def _mark_as_owner(self, piece_index: int): 234 | self._piece_owned[piece_index] = True 235 | self._download_info.pieces[piece_index].owners.add(self._peer) 236 | if piece_index in self._download_info.interesting_pieces: 237 | self.am_interested = True 238 | 239 | def _handle_haves(self, message_id: MessageType, payload: memoryview): 240 | if message_id == MessageType.have: 241 | (index,) = struct.unpack('!I', cast(bytes, payload)) 242 | self._mark_as_owner(index) 243 | elif message_id == MessageType.bitfield: 244 | piece_count = self._download_info.piece_count 245 | PeerTCPClient._check_payload_len(message_id, payload, int(ceil(piece_count / 8))) 246 | 247 | arr = bitarray(endian='big') 248 | arr.frombytes(payload.tobytes()) 249 | for i in range(piece_count): 250 | if arr[i]: 251 | self._mark_as_owner(i) 252 | for i in range(piece_count, len(arr)): 253 | if arr[i]: 254 | raise ValueError('Spare bits in "bitfield" message must be zero') 255 | 256 | # if self._download_info.complete and self.is_seed(): 257 | # raise SeedError('A seed is disconnected because a download is complete') 258 | 259 | MAX_REQUEST_LENGTH = 2 ** 17 260 | 261 | def _check_position_range(self, request: BlockRequest): 262 | if request.piece_index < 0 or request.piece_index >= self._download_info.piece_count: 263 | raise IndexError('Piece index out of range') 264 | end_offset = request.piece_index * self._download_info.piece_length + \ 265 | request.block_begin + request.block_length 266 | if (request.block_begin < 0 or request.block_begin + request.block_length > self._download_info.piece_length or 267 | end_offset > self._download_info.total_size): 268 | raise IndexError('Position in piece out of range') 269 | 270 | async def _handle_requests(self, message_id: MessageType, payload: memoryview): 271 | piece_index, begin, length = struct.unpack('!3I', cast(bytes, payload)) 272 | request = BlockRequest(piece_index, begin, length) 273 | self._check_position_range(request) 274 | 275 | if message_id == MessageType.request: 276 | if length > PeerTCPClient.MAX_REQUEST_LENGTH: 277 | raise ValueError('Requested {} bytes, but the current policy allows to accept requests ' 278 | 'of not more than {} bytes'.format(length, PeerTCPClient.MAX_REQUEST_LENGTH)) 279 | if (self._am_choking or not self._peer_interested or 280 | not self._download_info.pieces[piece_index].downloaded): 281 | # If peer isn't interested but requesting, their peer_interested flag wasn't considered 282 | # when selecting who to unchoke, so we may be not ready to upload to them. 283 | # If requested piece is not downloaded yet, we shouldn't disconnect because our piece_downloaded flag 284 | # could be removed because of file corruption. 285 | return 286 | 287 | await self._send_block(request) 288 | await self.drain() 289 | elif message_id == MessageType.cancel: 290 | # Now we answer to a request immediately or reject and forget it, 291 | # so there's no need to handle cancel messages 292 | pass 293 | 294 | async def _handle_block(self, payload: memoryview): 295 | if not self._am_interested: 296 | # For example, we can be not interested in pieces from peers with big distrust rate 297 | return 298 | 299 | fmt = '!2I' 300 | piece_index, block_begin = struct.unpack_from(fmt, payload) 301 | block_data = memoryview(payload)[struct.calcsize(fmt):] 302 | block_length = len(block_data) 303 | request = BlockRequest(piece_index, block_begin, block_length) 304 | self._check_position_range(request) 305 | 306 | if not block_length: 307 | return 308 | 309 | async with self._file_structure.lock: 310 | # Manual lock acquiring guarantees that piece validation will not be performed between 311 | # condition checking and piece writing 312 | piece_info = self._download_info.pieces[piece_index] 313 | if piece_info.validating or piece_info.downloaded: 314 | return 315 | 316 | self._downloaded += block_length 317 | self._download_info.session_statistics.add_downloaded(self._peer, block_length) 318 | 319 | await self._file_structure.write(piece_index * self._download_info.piece_length + block_begin, block_data, 320 | acquire_lock=False) 321 | 322 | piece_info.mark_downloaded_blocks(self._peer, request) 323 | 324 | async def run(self): 325 | while True: 326 | message = await self._receive_message() 327 | if message is None: 328 | continue 329 | message_id, payload = message 330 | 331 | if message_id in (MessageType.choke, MessageType.unchoke, 332 | MessageType.interested, MessageType.not_interested): 333 | self._handle_setting_states(message_id, payload) 334 | elif message_id in (MessageType.have, MessageType.bitfield): 335 | self._handle_haves(message_id, payload) 336 | elif message_id in (MessageType.request, MessageType.cancel): 337 | await self._handle_requests(message_id, payload) 338 | elif message_id == MessageType.piece: 339 | await self._handle_block(payload) 340 | elif message_id == MessageType.port: 341 | PeerTCPClient._check_payload_len(message_id, payload, 2) 342 | # TODO: Ignore or implement DHT 343 | 344 | def send_keep_alive(self): 345 | self._send_message(None) 346 | 347 | def _send_bitfield(self): 348 | if self._download_info.downloaded_piece_count: 349 | arr = bitarray([info.downloaded for info in self._download_info.pieces], endian='big') 350 | self._send_message(MessageType.bitfield, arr.tobytes()) 351 | 352 | def send_have(self, piece_index: int): 353 | self._send_message(MessageType.have, struct.pack('!I', piece_index)) 354 | 355 | def send_request(self, request: BlockRequest, cancel: bool=False): 356 | self._check_position_range(request) 357 | if not cancel: 358 | assert self._peer in self._download_info.pieces[request.piece_index].owners 359 | 360 | self._send_message(MessageType.request if not cancel else MessageType.cancel, 361 | struct.pack('!3I', request.piece_index, request.block_begin, request.block_length)) 362 | 363 | async def _send_block(self, request: BlockRequest): 364 | block = await self._file_structure.read( 365 | request.piece_index * self._download_info.piece_length + request.block_begin, request.block_length) 366 | # TODO: Maybe can handle cancels here 367 | 368 | self._send_message(MessageType.piece, struct.pack('!2I', request.piece_index, request.block_begin), block) 369 | 370 | self._uploaded += request.block_length 371 | self._download_info.session_statistics.add_uploaded(self._peer, request.block_length) 372 | 373 | async def drain(self): 374 | await asyncio.wait_for(self._writer.drain(), PeerTCPClient.WRITE_TIMEOUT) 375 | 376 | def close(self): 377 | if self._writer is not None: 378 | self._writer.close() 379 | 380 | self._connected = False 381 | -------------------------------------------------------------------------------- /torrent_client/network/peer_tcp_server.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | from typing import Dict 4 | 5 | from torrent_client import algorithms 6 | from torrent_client.models import Peer 7 | from torrent_client.network.peer_tcp_client import PeerTCPClient 8 | 9 | 10 | __all__= ['PeerTCPServer'] 11 | 12 | 13 | logger = logging.getLogger(__name__) 14 | logger.setLevel(logging.DEBUG) 15 | 16 | 17 | class PeerTCPServer: 18 | def __init__(self, our_peer_id: bytes, torrent_managers: Dict[bytes, 'algorithms.TorrentManager']): 19 | self._our_peer_id = our_peer_id 20 | self._torrent_managers = torrent_managers 21 | 22 | self._server = None 23 | self._port = None 24 | 25 | async def _accept(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter): 26 | addr = writer.get_extra_info('peername') 27 | peer = Peer(addr[0], addr[1]) 28 | 29 | client = PeerTCPClient(self._our_peer_id, peer) 30 | 31 | try: 32 | info_hash = await client.accept(reader, writer) 33 | if info_hash not in self._torrent_managers: 34 | raise ValueError('Unknown info_hash') 35 | except Exception as e: 36 | client.close() 37 | 38 | if isinstance(e, asyncio.CancelledError): 39 | raise 40 | else: 41 | logger.debug("%s wasn't accepted because of %r", peer, e) 42 | else: 43 | self._torrent_managers[info_hash].accept_client(peer, client) 44 | 45 | PORT_RANGE = range(6881, 6889 + 1) 46 | 47 | async def start(self): 48 | for port in PeerTCPServer.PORT_RANGE: 49 | try: 50 | self._server = await asyncio.start_server(self._accept, port=port) 51 | except asyncio.CancelledError: 52 | raise 53 | except Exception as e: 54 | logger.debug('exception on starting server on port %s: %r', port, e) 55 | else: 56 | self._port = port 57 | logger.info('server started on port %s', port) 58 | return 59 | else: 60 | logger.warning('failed to start a server') 61 | 62 | @property 63 | def port(self): 64 | return self._port 65 | 66 | async def stop(self): 67 | if self._server is not None: 68 | self._server.close() 69 | await self._server.wait_closed() 70 | logger.info('server stopped') 71 | -------------------------------------------------------------------------------- /torrent_client/network/tracker_clients/__init__.py: -------------------------------------------------------------------------------- 1 | from urllib.parse import urlparse 2 | 3 | from torrent_client.models import DownloadInfo 4 | from torrent_client.network.tracker_clients.base import * 5 | from torrent_client.network.tracker_clients.http import * 6 | from torrent_client.network.tracker_clients.udp import * 7 | 8 | 9 | def create_tracker_client(announce_url: str, download_info: DownloadInfo, our_peer_id: bytes) -> BaseTrackerClient: 10 | parsed_announce_url = urlparse(announce_url) 11 | scheme = parsed_announce_url.scheme 12 | protocols = { 13 | 'http': HTTPTrackerClient, 14 | 'https': HTTPTrackerClient, 15 | 'udp': UDPTrackerClient, 16 | } 17 | if scheme not in protocols: 18 | raise ValueError('announce_url uses unknown protocol "{}"'.format(scheme)) 19 | client_class = protocols[scheme] 20 | 21 | return client_class(parsed_announce_url, download_info, our_peer_id) 22 | -------------------------------------------------------------------------------- /torrent_client/network/tracker_clients/base.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from typing import List, Optional 3 | 4 | from torrent_client.models import DownloadInfo, Peer 5 | from torrent_client.utils import grouper 6 | 7 | 8 | __all__ = ['EventType', 'TrackerError', 'BaseTrackerClient'] 9 | 10 | 11 | class EventType(Enum): 12 | none = 0 13 | completed = 1 14 | started = 2 15 | stopped = 3 16 | 17 | 18 | class TrackerError(Exception): 19 | pass 20 | 21 | 22 | class BaseTrackerClient: 23 | def __init__(self, download_info: DownloadInfo, our_peer_id: bytes): 24 | self._download_info = download_info 25 | self._statistics = self._download_info.session_statistics 26 | 27 | self._our_peer_id = our_peer_id 28 | 29 | self.interval = None # type: int 30 | self.min_interval = None # type: Optional[int] 31 | self.seed_count = None # type: int 32 | self.leech_count = None # type: int 33 | self._peers = None 34 | 35 | @property 36 | def peers(self) -> List[Peer]: 37 | return self._peers 38 | 39 | async def announce(self, server_port: int, event: EventType): 40 | raise NotImplementedError 41 | 42 | 43 | def parse_compact_peers_list(data: bytes) -> List[Peer]: 44 | if len(data) % 6 != 0: 45 | raise ValueError('Invalid length of a compact representation of peers') 46 | return list(map(Peer.from_compact_form, grouper(data, 6))) 47 | -------------------------------------------------------------------------------- /torrent_client/network/tracker_clients/http.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import urllib.parse 3 | from collections import OrderedDict 4 | from typing import Optional, cast 5 | 6 | import aiohttp 7 | import async_timeout 8 | import bencodepy 9 | 10 | from torrent_client.models import Peer, DownloadInfo 11 | from torrent_client.network.tracker_clients.base import BaseTrackerClient, TrackerError, parse_compact_peers_list, \ 12 | EventType 13 | 14 | 15 | __all__ = ['HTTPTrackerClient'] 16 | 17 | 18 | logger = logging.getLogger(__name__) 19 | logger.setLevel(logging.DEBUG) 20 | 21 | 22 | class HTTPTrackerClient(BaseTrackerClient): 23 | def __init__(self, url: urllib.parse.ParseResult, download_info: DownloadInfo, our_peer_id: bytes): 24 | super().__init__(download_info, our_peer_id) 25 | self._announce_url = url 26 | if url.scheme not in ('http', 'https'): 27 | raise ValueError('TrackerHTTPClient expects announce_url with HTTP and HTTPS protocol') 28 | 29 | self._tracker_id = None # type: Optional[bytes] 30 | 31 | def _handle_primary_response_fields(self, response: OrderedDict): 32 | if b'failure reason' in response: 33 | raise TrackerError(response[b'failure reason'].decode()) 34 | 35 | self.interval = response[b'interval'] 36 | if b'min interval' in response: 37 | self.min_interval = response[b'min interval'] 38 | if self.min_interval > self.interval: 39 | raise ValueError('Tracker returned min_interval that is greater than a default interval') 40 | 41 | peers = response[b'peers'] 42 | if isinstance(peers, bytes): 43 | self._peers = parse_compact_peers_list(peers) 44 | else: 45 | self._peers = list(map(Peer.from_dict, peers)) 46 | 47 | def _handle_optional_response_fields(self, response: OrderedDict): 48 | if b'warning message' in response: 49 | logger.warning('Tracker returned warning message: %s', response[b'warning message'].decode()) 50 | 51 | if b'tracker id' in response: 52 | self._tracker_id = response[b'tracker id'] 53 | if b'complete' in response: 54 | self.seed_count = response[b'complete'] 55 | if b'incomplete' in response: 56 | self.leech_count = response[b'incomplete'] 57 | 58 | REQUEST_TIMEOUT = 5 59 | 60 | async def announce(self, server_port: int, event: EventType): 61 | params = { 62 | 'info_hash': self._download_info.info_hash, 63 | 'peer_id': self._our_peer_id, 64 | 'port': server_port, 65 | 'uploaded': self._statistics.total_uploaded, 66 | 'downloaded': self._statistics.total_downloaded, 67 | 'left': self._download_info.bytes_left, 68 | 'compact': 1, 69 | } 70 | if event != EventType.none: 71 | params['event'] = event.name 72 | if self._tracker_id is not None: 73 | params['trackerid'] = self._tracker_id 74 | 75 | # We call urlencode() manually to properly encode `bytes` (e.g., info_hash) 76 | url = self._announce_url._replace(query=urllib.parse.urlencode(params)) 77 | 78 | with async_timeout.timeout(HTTPTrackerClient.REQUEST_TIMEOUT): 79 | async with aiohttp.ClientSession() as session: 80 | async with session.get(url.geturl()) as conn: 81 | response = await conn.read() 82 | 83 | response = bencodepy.decode(response) 84 | if not response: 85 | if event == EventType.started: 86 | raise ValueError('Tracker returned an empty answer on start announcement') 87 | return 88 | response = cast(OrderedDict, response) 89 | 90 | self._handle_primary_response_fields(response) 91 | self._handle_optional_response_fields(response) 92 | -------------------------------------------------------------------------------- /torrent_client/network/tracker_clients/udp.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | import random 4 | import struct 5 | import urllib.parse 6 | from enum import Enum 7 | from typing import Optional 8 | 9 | from torrent_client.models import DownloadInfo 10 | from torrent_client.network.tracker_clients.base import BaseTrackerClient, EventType, TrackerError, \ 11 | parse_compact_peers_list 12 | 13 | 14 | __all__ = ['UDPTrackerClient'] 15 | 16 | 17 | logger = logging.getLogger(__name__) 18 | logger.setLevel(logging.DEBUG) 19 | 20 | 21 | class DatagramReaderProtocol: 22 | """Implements missing stream API for UDP with asyncio. 23 | Combines analogs for StreamReaderProtocol and StreamReader classes.""" 24 | 25 | def __init__(self): 26 | self._buffer = bytearray() 27 | self._waiter = None # type: Optional[asyncio.Future] 28 | self._connection_lost = False 29 | self._exception = None # type: Exception 30 | 31 | def connection_made(self, transport: asyncio.DatagramTransport): 32 | pass 33 | 34 | async def recv(self) -> bytes: 35 | if self._waiter is not None: 36 | raise RuntimeError('Another coroutine is already waiting for incoming data') 37 | 38 | if self._exception is None and not self._connection_lost and not self._buffer: 39 | self._waiter = asyncio.Future() 40 | try: 41 | await self._waiter 42 | finally: 43 | self._waiter = None 44 | if self._exception is not None: 45 | exc = self._exception 46 | self._exception = None 47 | raise exc 48 | if self._connection_lost: 49 | raise ConnectionResetError('Connection lost') 50 | 51 | buffer = self._buffer 52 | self._buffer = bytearray() 53 | return buffer 54 | 55 | def _wakeup_waiter(self): 56 | if self._waiter is not None: 57 | self._waiter.set_result(None) 58 | 59 | def datagram_received(self, data: bytes, addr: tuple): 60 | self._buffer.extend(data) 61 | self._wakeup_waiter() 62 | 63 | def error_received(self, exc: Exception): 64 | self._exception = exc 65 | self._wakeup_waiter() 66 | 67 | def connection_lost(self, exc: Exception): 68 | self._connection_lost = True 69 | self._exception = exc 70 | self._wakeup_waiter() 71 | 72 | 73 | class ActionType(Enum): 74 | connect = 0 75 | announce = 1 76 | scrape = 2 # TODO: not implemented yet 77 | error = 3 78 | 79 | 80 | def pack(*data) -> bytes: 81 | assert len(data) % 2 == 0 82 | 83 | common_format = '!' + ''.join(fmt for fmt in data[::2]) 84 | values = [elem for elem in data[1::2]] 85 | return struct.pack(common_format, *values) 86 | 87 | 88 | class UDPTrackerClient(BaseTrackerClient): 89 | def __init__(self, url: urllib.parse.ParseResult, download_info: DownloadInfo, our_peer_id: bytes, 90 | *, loop: asyncio.AbstractEventLoop=None): 91 | super().__init__(download_info, our_peer_id) 92 | if url.scheme != 'udp': 93 | raise ValueError('TrackerUDPClient expects announce_url with UDP protocol') 94 | self._host = url.hostname 95 | self._port = url.port 96 | 97 | self._loop = asyncio.get_event_loop() if loop is None else loop 98 | 99 | self._key = random.randint(0, 2 ** 32 - 1) # TODO: maybe implement the same key in HTTPTrackerClient 100 | # > An additional client identification mechanism that is not shared with any peers. 101 | # > It is intended to allow a client to prove their identity should their IP address change. 102 | # Source: https://wiki.theory.org/BitTorrentSpecification#Tracker_Request_Parameters 103 | 104 | MAGIC_CONNECTION_ID = 0x41727101980 105 | 106 | RESPONSE_HEADER_FMT = '!II' 107 | RESPONSE_HEADER_LEN = struct.calcsize(RESPONSE_HEADER_FMT) 108 | 109 | @staticmethod 110 | def _check_response(response: bytes, expected_transaction_id: bytes, expected_action: ActionType): 111 | actual_action, actual_transaction_id = struct.unpack_from(UDPTrackerClient.RESPONSE_HEADER_FMT, response) 112 | 113 | if actual_transaction_id != expected_transaction_id: 114 | raise ValueError('Unexpected transaction ID') 115 | # TODO: lock for announcements to one server? 116 | # Or both sockets will receive data and one will just skip a wrong packet? 117 | 118 | actual_action = ActionType(actual_action) 119 | if actual_action == ActionType.error: 120 | message = response[UDPTrackerClient.RESPONSE_HEADER_LEN:] 121 | raise TrackerError(message.decode()) 122 | if actual_action != expected_action: 123 | raise ValueError('Unexpected action ID (expected {}, got {})'.format( 124 | expected_action.name, actual_action.name)) 125 | 126 | REQUEST_TIMEOUT = 12 127 | # FIXME: Repeat requests as described in BEP 0015, but remember that we may have other trackers in announce-list 128 | 129 | async def announce(self, server_port: int, event: EventType): 130 | transport, protocol = await self._loop.create_datagram_endpoint( 131 | DatagramReaderProtocol, remote_addr=(self._host, self._port)) 132 | 133 | try: 134 | transaction_id = random.randint(0, 2 ** 32 - 1) 135 | request = pack( 136 | 'Q', UDPTrackerClient.MAGIC_CONNECTION_ID, 137 | 'I', ActionType.connect.value, 138 | 'I', transaction_id, 139 | ) 140 | transport.sendto(request) 141 | 142 | response = await asyncio.wait_for(protocol.recv(), UDPTrackerClient.REQUEST_TIMEOUT) 143 | UDPTrackerClient._check_response(response, transaction_id, ActionType.connect) 144 | (connection_id,) = struct.unpack_from('!Q', response, UDPTrackerClient.RESPONSE_HEADER_LEN) 145 | 146 | request = pack( 147 | 'Q', connection_id, 148 | 'I', ActionType.announce.value, 149 | 'I', transaction_id, 150 | '20s', self._download_info.info_hash, 151 | '20s', self._our_peer_id, 152 | 'Q', self._statistics.total_downloaded, 153 | 'Q', self._download_info.bytes_left, 154 | 'Q', self._statistics.total_uploaded, 155 | 'I', event.value, 156 | 'I', 0, # IP address: default 157 | 'I', self._key, # Key 158 | 'i', -1, # numwant: default 159 | 'H', server_port, 160 | ) 161 | assert len(request) == 98 162 | transport.sendto(request) 163 | 164 | response = await asyncio.wait_for(protocol.recv(), UDPTrackerClient.REQUEST_TIMEOUT) 165 | UDPTrackerClient._check_response(response, transaction_id, ActionType.announce) 166 | fmt = '!3I' 167 | self.interval, self.leech_count, self.seed_count = struct.unpack_from( 168 | fmt, response, UDPTrackerClient.RESPONSE_HEADER_LEN) 169 | self.min_interval = self.interval 170 | 171 | compact_peer_list = response[UDPTrackerClient.RESPONSE_HEADER_LEN + struct.calcsize(fmt):] 172 | self._peers = parse_compact_peers_list(compact_peer_list) 173 | finally: 174 | transport.close() 175 | -------------------------------------------------------------------------------- /torrent_client/utils.py: -------------------------------------------------------------------------------- 1 | from math import floor, log 2 | from typing import List, TypeVar, Sequence 3 | 4 | 5 | T = TypeVar('T', Sequence, memoryview) 6 | 7 | 8 | def grouper(arr: T, group_size: int) -> List[T]: 9 | # Yield successive n-sized chunks from l. 10 | 11 | return [arr[i:i + group_size] for i in range(0, len(arr), group_size)] 12 | 13 | 14 | UNIT_BASE = 2 ** 10 15 | UNIT_PREFIXES = 'KMG' 16 | 17 | 18 | def humanize_size(size: float) -> str: 19 | if not size: 20 | return 'None' 21 | if size < UNIT_BASE: 22 | return '{:.0f} bytes'.format(size) 23 | unit = floor(log(size, UNIT_BASE)) 24 | unit_name = UNIT_PREFIXES[min(unit, len(UNIT_PREFIXES)) - 1] + 'iB' 25 | return '{:.1f} {}'.format(size / UNIT_BASE ** unit, unit_name) 26 | 27 | 28 | def humanize_speed(speed: int) -> str: 29 | return humanize_size(speed) + '/s' 30 | 31 | 32 | SECONDS_PER_MINUTE = 60 33 | MINUTES_PER_HOUR = 60 34 | 35 | 36 | def humanize_time(total_seconds: int) -> str: 37 | if total_seconds < SECONDS_PER_MINUTE: 38 | return 'less than a minute' 39 | total_minutes = round(total_seconds / SECONDS_PER_MINUTE) 40 | 41 | hours = total_minutes // MINUTES_PER_HOUR 42 | minutes = total_minutes % MINUTES_PER_HOUR 43 | result = '{} min'.format(minutes) 44 | if hours: 45 | result = '{} h '.format(hours) + result 46 | return result 47 | 48 | 49 | def floor_to(x: float, ndigits: int) -> float: 50 | scale = 10 ** ndigits 51 | return floor(x * scale) / scale 52 | 53 | 54 | def import_signals(): 55 | try: 56 | from PyQt5.QtCore import QObject, pyqtSignal 57 | 58 | return QObject, pyqtSignal 59 | except ImportError: 60 | return object, None 61 | -------------------------------------------------------------------------------- /torrent_gui.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import asyncio 5 | import logging 6 | import os 7 | import sys 8 | from contextlib import closing 9 | from functools import partial, partialmethod 10 | from math import floor 11 | from typing import Dict, List, Optional 12 | 13 | # noinspection PyUnresolvedReferences 14 | from PyQt5.QtCore import Qt, QThread, pyqtSignal 15 | # noinspection PyUnresolvedReferences 16 | from PyQt5.QtGui import QIcon, QFont, QDropEvent 17 | # noinspection PyUnresolvedReferences 18 | from PyQt5.QtWidgets import QWidget, QListWidget, QAbstractItemView, QLabel, QVBoxLayout, QProgressBar, \ 19 | QListWidgetItem, QMainWindow, QApplication, QFileDialog, QMessageBox, QDialog, QDialogButtonBox, QTreeWidget, \ 20 | QTreeWidgetItem, QHeaderView, QHBoxLayout, QPushButton, QLineEdit, QAction 21 | 22 | from torrent_client.control import ControlManager, ControlServer, ControlClient 23 | from torrent_client.models import TorrentState, TorrentInfo, FileTreeNode, FileInfo 24 | from torrent_client.utils import humanize_speed, humanize_time, humanize_size 25 | 26 | 27 | logging.basicConfig(format='%(levelname)s %(asctime)s %(name)-23s %(message)s', datefmt='%H:%M:%S') 28 | 29 | 30 | ICON_DIRECTORY = os.path.join(os.path.dirname(__file__), 'icons') 31 | 32 | 33 | def load_icon(name: str): 34 | return QIcon(os.path.join(ICON_DIRECTORY, name + '.svg')) 35 | 36 | 37 | file_icon = load_icon('file') 38 | directory_icon = load_icon('directory') 39 | 40 | 41 | def get_directory(directory: Optional[str]): 42 | return directory if directory is not None else os.getcwd() 43 | 44 | 45 | class TorrentAddingDialog(QDialog): 46 | SELECTION_LABEL_FORMAT = 'Selected {} files ({})' 47 | 48 | def _traverse_file_tree(self, name: str, node: FileTreeNode, parent: QWidget): 49 | item = QTreeWidgetItem(parent) 50 | item.setCheckState(0, Qt.Checked) 51 | item.setText(0, name) 52 | if isinstance(node, FileInfo): 53 | item.setText(1, humanize_size(node.length)) 54 | item.setIcon(0, file_icon) 55 | self._file_items.append((node, item)) 56 | return 57 | 58 | item.setIcon(0, directory_icon) 59 | for name, child in node.items(): 60 | self._traverse_file_tree(name, child, item) 61 | 62 | def _get_directory_browse_widget(self): 63 | widget = QWidget() 64 | hbox = QHBoxLayout(widget) 65 | hbox.setContentsMargins(0, 0, 0, 0) 66 | 67 | self._path_edit = QLineEdit(self._download_dir) 68 | self._path_edit.setReadOnly(True) 69 | hbox.addWidget(self._path_edit, 3) 70 | 71 | browse_button = QPushButton('Browse...') 72 | browse_button.clicked.connect(self._browse) 73 | hbox.addWidget(browse_button, 1) 74 | 75 | widget.setLayout(hbox) 76 | return widget 77 | 78 | def _browse(self): 79 | new_download_dir = QFileDialog.getExistingDirectory(self, 'Select download directory', self._download_dir) 80 | if not new_download_dir: 81 | return 82 | 83 | self._download_dir = new_download_dir 84 | self._path_edit.setText(new_download_dir) 85 | 86 | def __init__(self, parent: QWidget, filename: str, torrent_info: TorrentInfo, 87 | control_thread: 'ControlManagerThread'): 88 | super().__init__(parent) 89 | self._torrent_info = torrent_info 90 | download_info = torrent_info.download_info 91 | self._control_thread = control_thread 92 | self._control = control_thread.control 93 | 94 | vbox = QVBoxLayout(self) 95 | 96 | self._download_dir = get_directory(self._control.last_download_dir) 97 | vbox.addWidget(QLabel('Download directory:')) 98 | vbox.addWidget(self._get_directory_browse_widget()) 99 | 100 | vbox.addWidget(QLabel('Announce URLs:')) 101 | 102 | url_tree = QTreeWidget() 103 | url_tree.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel) 104 | url_tree.header().close() 105 | vbox.addWidget(url_tree) 106 | for i, tier in enumerate(torrent_info.announce_list): 107 | tier_item = QTreeWidgetItem(url_tree) 108 | tier_item.setText(0, 'Tier {}'.format(i + 1)) 109 | for url in tier: 110 | url_item = QTreeWidgetItem(tier_item) 111 | url_item.setText(0, url) 112 | url_tree.expandAll() 113 | vbox.addWidget(url_tree, 1) 114 | 115 | file_tree = QTreeWidget() 116 | file_tree.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel) 117 | file_tree.setHeaderLabels(('Name', 'Size')) 118 | file_tree.header().setSectionResizeMode(0, QHeaderView.ResizeToContents) 119 | self._file_items = [] 120 | self._traverse_file_tree(download_info.suggested_name, download_info.file_tree, file_tree) 121 | file_tree.sortItems(0, Qt.AscendingOrder) 122 | file_tree.expandAll() 123 | file_tree.itemClicked.connect(self._update_checkboxes) 124 | vbox.addWidget(file_tree, 3) 125 | 126 | self._selection_label = QLabel(TorrentAddingDialog.SELECTION_LABEL_FORMAT.format( 127 | len(download_info.files), humanize_size(download_info.total_size))) 128 | vbox.addWidget(self._selection_label) 129 | 130 | self._button_box = QDialogButtonBox(self) 131 | self._button_box.setOrientation(Qt.Horizontal) 132 | self._button_box.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok) 133 | self._button_box.button(QDialogButtonBox.Ok).clicked.connect(self.submit_torrent) 134 | self._button_box.button(QDialogButtonBox.Cancel).clicked.connect(self.close) 135 | vbox.addWidget(self._button_box) 136 | 137 | self.setFixedSize(450, 550) 138 | self.setWindowTitle('Adding "{}"'.format(filename)) 139 | 140 | def _set_check_state_to_tree(self, item: QTreeWidgetItem, check_state: Qt.CheckState): 141 | for i in range(item.childCount()): 142 | child = item.child(i) 143 | child.setCheckState(0, check_state) 144 | self._set_check_state_to_tree(child, check_state) 145 | 146 | def _update_checkboxes(self, item: QTreeWidgetItem, column: int): 147 | if column != 0: 148 | return 149 | 150 | new_check_state = item.checkState(0) 151 | self._set_check_state_to_tree(item, new_check_state) 152 | 153 | while True: 154 | item = item.parent() 155 | if item is None: 156 | break 157 | 158 | has_checked_children = False 159 | has_partially_checked_children = False 160 | has_unchecked_children = False 161 | for i in range(item.childCount()): 162 | state = item.child(i).checkState(0) 163 | if state == Qt.Checked: 164 | has_checked_children = True 165 | elif state == Qt.PartiallyChecked: 166 | has_partially_checked_children = True 167 | else: 168 | has_unchecked_children = True 169 | 170 | if not has_partially_checked_children and not has_unchecked_children: 171 | new_state = Qt.Checked 172 | elif has_checked_children or has_partially_checked_children: 173 | new_state = Qt.PartiallyChecked 174 | else: 175 | new_state = Qt.Unchecked 176 | item.setCheckState(0, new_state) 177 | 178 | self._update_selection_label() 179 | 180 | def _update_selection_label(self): 181 | selected_file_count = 0 182 | selected_size = 0 183 | for node, item in self._file_items: 184 | if item.checkState(0) == Qt.Checked: 185 | selected_file_count += 1 186 | selected_size += node.length 187 | 188 | ok_button = self._button_box.button(QDialogButtonBox.Ok) 189 | if not selected_file_count: 190 | ok_button.setEnabled(False) 191 | self._selection_label.setText('Nothing to download') 192 | else: 193 | ok_button.setEnabled(True) 194 | self._selection_label.setText(TorrentAddingDialog.SELECTION_LABEL_FORMAT.format( 195 | selected_file_count, humanize_size(selected_size))) 196 | 197 | def submit_torrent(self): 198 | self._torrent_info.download_dir = self._download_dir 199 | self._control.last_download_dir = os.path.abspath(self._download_dir) 200 | 201 | file_paths = [] 202 | for node, item in self._file_items: 203 | if item.checkState(0) == Qt.Checked: 204 | file_paths.append(node.path) 205 | if not self._torrent_info.download_info.single_file_mode: 206 | self._torrent_info.download_info.select_files(file_paths, 'whitelist') 207 | 208 | self._control_thread.loop.call_soon_threadsafe(self._control.add, self._torrent_info) 209 | 210 | self.close() 211 | 212 | 213 | class TorrentListWidgetItem(QWidget): 214 | _name_font = QFont() 215 | _name_font.setBold(True) 216 | 217 | _stats_font = QFont() 218 | _stats_font.setPointSize(10) 219 | 220 | def __init__(self): 221 | super().__init__() 222 | vbox = QVBoxLayout(self) 223 | 224 | self._name_label = QLabel() 225 | self._name_label.setFont(TorrentListWidgetItem._name_font) 226 | vbox.addWidget(self._name_label) 227 | 228 | self._upper_status_label = QLabel() 229 | self._upper_status_label.setFont(TorrentListWidgetItem._stats_font) 230 | vbox.addWidget(self._upper_status_label) 231 | 232 | self._progress_bar = QProgressBar() 233 | self._progress_bar.setFixedHeight(15) 234 | self._progress_bar.setMaximum(1000) 235 | vbox.addWidget(self._progress_bar) 236 | 237 | self._lower_status_label = QLabel() 238 | self._lower_status_label.setFont(TorrentListWidgetItem._stats_font) 239 | vbox.addWidget(self._lower_status_label) 240 | 241 | self._state = None 242 | self._waiting_control_action = False 243 | 244 | @property 245 | def state(self) -> TorrentState: 246 | return self._state 247 | 248 | @state.setter 249 | def state(self, state: TorrentState): 250 | self._state = state 251 | self._update() 252 | 253 | @property 254 | def waiting_control_action(self) -> bool: 255 | return self._waiting_control_action 256 | 257 | @waiting_control_action.setter 258 | def waiting_control_action(self, value: bool): 259 | self._waiting_control_action = value 260 | self._update() 261 | 262 | def _update(self): 263 | state = self._state 264 | 265 | self._name_label.setText(state.suggested_name) # FIXME: Avoid XSS in all setText calls 266 | 267 | if state.downloaded_size < state.selected_size: 268 | status_text = '{} of {}'.format(humanize_size(state.downloaded_size), humanize_size(state.selected_size)) 269 | else: 270 | status_text = '{} (complete)'.format(humanize_size(state.selected_size)) 271 | status_text += ', Ratio: {:.1f}'.format(state.ratio) 272 | self._upper_status_label.setText(status_text) 273 | 274 | self._progress_bar.setValue(floor(state.progress * 1000)) 275 | 276 | if self.waiting_control_action: 277 | status_text = 'Waiting' 278 | elif state.paused: 279 | status_text = 'Paused' 280 | elif state.complete: 281 | status_text = 'Uploading to {} of {} peers'.format(state.uploading_peer_count, state.total_peer_count) 282 | if state.upload_speed: 283 | status_text += ' on {}'.format(humanize_speed(state.upload_speed)) 284 | else: 285 | status_text = 'Downloading from {} of {} peers'.format( 286 | state.downloading_peer_count, state.total_peer_count) 287 | if state.download_speed: 288 | status_text += ' on {}'.format(humanize_speed(state.download_speed)) 289 | eta_seconds = state.eta_seconds 290 | if eta_seconds is not None: 291 | status_text += ', {} remaining'.format(humanize_time(eta_seconds) if eta_seconds is not None else None) 292 | self._lower_status_label.setText(status_text) 293 | 294 | 295 | class TorrentListWidget(QListWidget): 296 | files_dropped = pyqtSignal(list) 297 | 298 | def __init__(self, parent=None): 299 | super().__init__(parent) 300 | 301 | self.setSelectionMode(QAbstractItemView.ExtendedSelection) 302 | self.setVerticalScrollMode(QAbstractItemView.ScrollPerPixel) 303 | 304 | self.setAcceptDrops(True) 305 | 306 | def drag_handler(self, event: QDropEvent, drop: bool=False): 307 | if event.mimeData().hasUrls(): 308 | event.setDropAction(Qt.CopyAction) 309 | event.accept() 310 | 311 | if drop: 312 | self.files_dropped.emit([url.toLocalFile() for url in event.mimeData().urls()]) 313 | else: 314 | event.ignore() 315 | 316 | dragEnterEvent = drag_handler 317 | dragMoveEvent = drag_handler 318 | dropEvent = partialmethod(drag_handler, drop=True) 319 | 320 | 321 | class MainWindow(QMainWindow): 322 | def __init__(self, control_thread: 'ControlManagerThread'): 323 | super().__init__() 324 | 325 | self._control_thread = control_thread 326 | control = control_thread.control 327 | 328 | toolbar = self.addToolBar('Exits') 329 | toolbar.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) 330 | toolbar.setMovable(False) 331 | 332 | self._add_action = toolbar.addAction(load_icon('add'), 'Add') 333 | self._add_action.triggered.connect(self._add_torrents_triggered) 334 | 335 | self._pause_action = toolbar.addAction(load_icon('pause'), 'Pause') 336 | self._pause_action.setEnabled(False) 337 | self._pause_action.triggered.connect(partial(self._control_action_triggered, control.pause)) 338 | 339 | self._resume_action = toolbar.addAction(load_icon('resume'), 'Resume') 340 | self._resume_action.setEnabled(False) 341 | self._resume_action.triggered.connect(partial(self._control_action_triggered, control.resume)) 342 | 343 | self._remove_action = toolbar.addAction(load_icon('remove'), 'Remove') 344 | self._remove_action.setEnabled(False) 345 | self._remove_action.triggered.connect(partial(self._control_action_triggered, control.remove)) 346 | 347 | self._about_action = toolbar.addAction(load_icon('about'), 'About') 348 | self._about_action.triggered.connect(self._show_about) 349 | 350 | self._list_widget = TorrentListWidget() 351 | self._list_widget.itemSelectionChanged.connect(self._update_control_action_state) 352 | self._list_widget.files_dropped.connect(self.add_torrent_files) 353 | self._torrent_to_item = {} # type: Dict[bytes, QListWidgetItem] 354 | 355 | self.setCentralWidget(self._list_widget) 356 | 357 | self.setMinimumSize(550, 450) 358 | self.resize(600, 500) 359 | self.setWindowTitle('BitTorrent Client') 360 | 361 | control_thread.error_happened.connect(self._error_happened) 362 | control.torrents_suggested.connect(self.add_torrent_files) 363 | control.torrent_added.connect(self._add_torrent_item) 364 | control.torrent_changed.connect(self._update_torrent_item) 365 | control.torrent_removed.connect(self._remove_torrent_item) 366 | 367 | self.show() 368 | 369 | def _add_torrent_item(self, state: TorrentState): 370 | widget = TorrentListWidgetItem() 371 | widget.state = state 372 | 373 | item = QListWidgetItem() 374 | item.setIcon(file_icon if state.single_file_mode else directory_icon) 375 | item.setSizeHint(widget.sizeHint()) 376 | item.setData(Qt.UserRole, state.info_hash) 377 | 378 | items_upper = 0 379 | for i in range(self._list_widget.count()): 380 | prev_item = self._list_widget.item(i) 381 | if self._list_widget.itemWidget(prev_item).state.suggested_name > state.suggested_name: 382 | break 383 | items_upper += 1 384 | self._list_widget.insertItem(items_upper, item) 385 | 386 | self._list_widget.setItemWidget(item, widget) 387 | self._torrent_to_item[state.info_hash] = item 388 | 389 | def _update_torrent_item(self, state: TorrentState): 390 | if state.info_hash not in self._torrent_to_item: 391 | return 392 | 393 | widget = self._list_widget.itemWidget(self._torrent_to_item[state.info_hash]) 394 | if widget.state.paused != state.paused: 395 | widget.waiting_control_action = False 396 | widget.state = state 397 | 398 | self._update_control_action_state() 399 | 400 | def _remove_torrent_item(self, info_hash: bytes): 401 | item = self._torrent_to_item[info_hash] 402 | self._list_widget.takeItem(self._list_widget.row(item)) 403 | del self._torrent_to_item[info_hash] 404 | 405 | self._update_control_action_state() 406 | 407 | def _update_control_action_state(self): 408 | self._pause_action.setEnabled(False) 409 | self._resume_action.setEnabled(False) 410 | self._remove_action.setEnabled(False) 411 | for item in self._list_widget.selectedItems(): 412 | widget = self._list_widget.itemWidget(item) 413 | if widget.waiting_control_action: 414 | continue 415 | 416 | if widget.state.paused: 417 | self._resume_action.setEnabled(True) 418 | else: 419 | self._pause_action.setEnabled(True) 420 | self._remove_action.setEnabled(True) 421 | 422 | def _error_happened(self, description: str, err: Exception): 423 | QMessageBox.critical(self, description, str(err)) 424 | 425 | def add_torrent_files(self, paths: List[str]): 426 | for path in paths: 427 | try: 428 | torrent_info = TorrentInfo.from_file(path, download_dir=None) 429 | self._control_thread.control.last_torrent_dir = os.path.abspath(os.path.dirname(path)) 430 | 431 | if torrent_info.download_info.info_hash in self._torrent_to_item: 432 | raise ValueError('This torrent is already added') 433 | except Exception as err: 434 | self._error_happened('Failed to add "{}"'.format(path), err) 435 | continue 436 | 437 | TorrentAddingDialog(self, path, torrent_info, self._control_thread).exec() 438 | 439 | def _add_torrents_triggered(self): 440 | paths, _ = QFileDialog.getOpenFileNames(self, 'Add torrents', self._control_thread.control.last_torrent_dir, 441 | 'Torrent file (*.torrent);;All files (*)') 442 | self.add_torrent_files(paths) 443 | 444 | @staticmethod 445 | async def _invoke_control_action(action, info_hash: bytes): 446 | try: 447 | result = action(info_hash) 448 | if asyncio.iscoroutine(result): 449 | await result 450 | except ValueError: 451 | pass 452 | 453 | def _control_action_triggered(self, action): 454 | for item in self._list_widget.selectedItems(): 455 | widget = self._list_widget.itemWidget(item) 456 | if widget.waiting_control_action: 457 | continue 458 | 459 | info_hash = item.data(Qt.UserRole) 460 | asyncio.run_coroutine_threadsafe(MainWindow._invoke_control_action(action, info_hash), 461 | self._control_thread.loop) 462 | widget.waiting_control_action = True 463 | 464 | self._update_control_action_state() 465 | 466 | def _show_about(self): 467 | QMessageBox.about(self, 'About', '
Prototype of a BitTorrent client
' 468 | 'Copyright © 2016 Alexander Borzunov
' 469 | 'Icons are made by Google and Freepik from ' 470 | 'www.flaticon.com
') 471 | 472 | 473 | class ControlManagerThread(QThread): 474 | error_happened = pyqtSignal(str, Exception) 475 | 476 | def __init__(self): 477 | super().__init__() 478 | 479 | self._loop = None # type: asyncio.AbstractEventLoop 480 | self._control = ControlManager() 481 | self._control_server = ControlServer(self._control, None) 482 | self._stopping = False 483 | 484 | @property 485 | def loop(self) -> asyncio.AbstractEventLoop: 486 | return self._loop 487 | 488 | @property 489 | def control(self) -> ControlManager: 490 | return self._control 491 | 492 | def run(self): 493 | self._loop = asyncio.new_event_loop() 494 | asyncio.set_event_loop(self._loop) 495 | with closing(self._loop): 496 | self._loop.run_until_complete(self._control.start()) 497 | self._loop.run_until_complete(self._control_server.start()) 498 | 499 | try: 500 | self._control.load_state() 501 | except Exception as err: 502 | self.error_happened.emit('Failed to load program state', err) 503 | self._control.invoke_state_dumps() 504 | 505 | self._loop.run_forever() 506 | 507 | def stop(self): 508 | if self._stopping: 509 | return 510 | self._stopping = True 511 | 512 | stop_fut = asyncio.run_coroutine_threadsafe(asyncio.wait([self._control_server.stop(), self._control.stop()]), 513 | self._loop) 514 | stop_fut.add_done_callback(lambda fut: self._loop.stop()) 515 | 516 | self.wait() 517 | 518 | 519 | def suggest_torrents(manager: ControlManager, filenames: List[str]): 520 | manager.torrents_suggested.emit(filenames) 521 | 522 | 523 | async def find_another_daemon(filenames: List[str]) -> bool: 524 | try: 525 | async with ControlClient() as client: 526 | if filenames: 527 | await client.execute(partial(suggest_torrents, filenames=filenames)) 528 | return True 529 | except RuntimeError: 530 | return False 531 | 532 | 533 | def main(): 534 | parser = argparse.ArgumentParser(description='A prototype of BitTorrent client (GUI)') 535 | parser.add_argument('--debug', action='store_true', help='Show debug messages') 536 | parser.add_argument('filenames', nargs='*', help='Torrent file names') 537 | args = parser.parse_args() 538 | 539 | if not args.debug: 540 | logging.disable(logging.INFO) 541 | 542 | app = QApplication(sys.argv) 543 | app.setWindowIcon(load_icon('logo')) 544 | 545 | with closing(asyncio.get_event_loop()) as loop: 546 | if loop.run_until_complete(find_another_daemon(args.filenames)): 547 | if not args.filenames: 548 | QMessageBox.critical(None, 'Failed to start', 'Another program instance is already running') 549 | return 550 | 551 | control_thread = ControlManagerThread() 552 | main_window = MainWindow(control_thread) 553 | 554 | control_thread.start() 555 | app.lastWindowClosed.connect(control_thread.stop) 556 | 557 | main_window.add_torrent_files(args.filenames) 558 | 559 | return app.exec() 560 | 561 | 562 | if __name__ == '__main__': 563 | sys.exit(main()) 564 | --------------------------------------------------------------------------------