├── .gitignore ├── LICENSE ├── README.md ├── install ├── install.sh └── supervisord.d │ └── tracker.ini └── src ├── BitTornado ├── BT1 │ ├── Choker.py │ ├── Connecter.py │ ├── Downloader.py │ ├── DownloaderFeedback.py │ ├── Encrypter.py │ ├── FileSelector.py │ ├── Filter.py │ ├── HTTPDownloader.py │ ├── NatCheck.py │ ├── PiecePicker.py │ ├── Rerequester.py │ ├── Statistics.py │ ├── Storage.py │ ├── StorageWrapper.py │ ├── StreamCheck.py │ ├── T2T.py │ ├── Uploader.py │ ├── __init__.py │ ├── btformats.py │ ├── fakeopen.py │ ├── makemetafile.py │ └── track.py ├── ConfigDir.py ├── ConfigReader.py ├── ConnChoice.py ├── CreateIcons.py ├── CurrentRateMeasure.py ├── HTTPHandler.py ├── PSYCO.py ├── RateLimiter.py ├── RateMeasure.py ├── RawServer.py ├── ServerPortHandler.py ├── SocketHandler.py ├── __init__.py ├── bencode.py ├── bitfield.py ├── clock.py ├── download_bt1.py ├── inifile.py ├── iprangeparse.py ├── launchmanycore.py ├── natpunch.py ├── parseargs.py ├── parsedir.py ├── piecebuffer.py ├── selectpoll.py ├── subnetparse.py ├── torrentlistparse.py └── zurllib.py ├── murder_client.py ├── murder_make_torrent.py └── murder_tracker.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # filebt 2 | 3 | 基于murder的P2P文件分发工具 4 | 5 | 在Twitter开源的[murder](https://github.com/lg/murder)的基础上,增加一些便于使用的脚本。 6 | 7 | ## 产生背景 8 | 9 | 不管是使用SaltStack还是Ansible,仅支持小文件的分发,在分发大文件的时候速度非常慢,甚至会有卡死的现象,之前用过murder,封装一个方便自己使用的小工具,声明一下,真的没有什么创新,呵呵... 10 | 11 | ## 部署手册 12 | 13 | 1.克隆filebt代码 14 | ``` 15 | [root@linux-node1 ~]# git clone https://github.com/unixhot/filebt.git 16 | ``` 17 | 18 | 2.部署tracker服务 19 | 20 | ``` 21 | [root@linux-node1 ~]# cd filebt/install/ 22 | [root@linux-node1 install]# ./install.sh tracker 23 | ``` 24 | 25 | ## 使用手册 26 | 27 | 1.生成一个100M的测试文件 28 | ``` 29 | [root@linux-node1 ~]# dd if=/dev/zero of=bigfile.tar.gz count=1 bs=100M 30 | ``` 31 | 32 | 2.为测试文件生成种子文件 33 | ``` 34 | [root@linux-node1 ~]# python /opt/filebt/murder_make_torrent.py bigfile.tar.gz 192.168.56.11:8998 bigfile.tar.gz.torrent 35 | ``` 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /install/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | install_init(){ 4 | INSTALL_PATH="/opt/filebt" 5 | LOG_PATH=${INSTALL_PATH}/logs 6 | DATA_PATH=${INSTALL_PATH}/data 7 | mkdir -p ${INSTALL_APTH} ${LOG_PATH} ${DATA_PATH} 8 | /bin/cp -r ../src/* $INSTALL_PATH 9 | echo "------ code deploy end ------" 10 | } 11 | 12 | tracker(){ 13 | yum install -y supervisor 14 | /bin/cp ./supervisord.d/*.ini /etc/supervisord.d/ 15 | systemctl start supervisord && systemctl enable supervisord 16 | supervisorctl status 17 | echo "------ filebt-tracker------" 18 | netstat -ntlp | grep 8998 19 | } 20 | 21 | usage(){ 22 | echo 23 | echo "Usage: $0 (install | uninstall | tracker)" 24 | echo 25 | } 26 | 27 | 28 | main(){ 29 | case $1 in 30 | install) 31 | install_init; 32 | ;; 33 | tracker) 34 | install_init; 35 | tracker; 36 | ;; 37 | *) 38 | usage; 39 | esac 40 | } 41 | 42 | main $1 43 | -------------------------------------------------------------------------------- /install/supervisord.d/tracker.ini: -------------------------------------------------------------------------------- 1 | [program:filebt-tracker] 2 | command=/usr/bin/python /opt/filebt/murder_tracker.py --dfile /opt/filebt/data/tracker-data-file --port 8998 3 | startsecs=0 4 | stopwaitsecs=0 5 | autostart=true 6 | autorestart=true 7 | redirect_stderr=true 8 | stdout_logfile=/opt/filebt/logs/filebt-tracker.log 9 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/Choker.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from random import randrange, shuffle 5 | from BitTornado.clock import clock 6 | try: 7 | True 8 | except: 9 | True = 1 10 | False = 0 11 | 12 | class Choker: 13 | def __init__(self, config, schedule, picker, done = lambda: False): 14 | self.config = config 15 | self.round_robin_period = config['round_robin_period'] 16 | self.schedule = schedule 17 | self.picker = picker 18 | self.connections = [] 19 | self.last_preferred = 0 20 | self.last_round_robin = clock() 21 | self.done = done 22 | self.super_seed = False 23 | self.paused = False 24 | schedule(self._round_robin, 5) 25 | 26 | def set_round_robin_period(self, x): 27 | self.round_robin_period = x 28 | 29 | def _round_robin(self): 30 | self.schedule(self._round_robin, 5) 31 | if self.super_seed: 32 | cons = range(len(self.connections)) 33 | to_close = [] 34 | count = self.config['min_uploads']-self.last_preferred 35 | if count > 0: # optimization 36 | shuffle(cons) 37 | for c in cons: 38 | i = self.picker.next_have(self.connections[c], count > 0) 39 | if i is None: 40 | continue 41 | if i < 0: 42 | to_close.append(self.connections[c]) 43 | continue 44 | self.connections[c].send_have(i) 45 | count -= 1 46 | for c in to_close: 47 | c.close() 48 | if self.last_round_robin + self.round_robin_period < clock(): 49 | self.last_round_robin = clock() 50 | for i in xrange(1, len(self.connections)): 51 | c = self.connections[i] 52 | u = c.get_upload() 53 | if u.is_choked() and u.is_interested(): 54 | self.connections = self.connections[i:] + self.connections[:i] 55 | break 56 | self._rechoke() 57 | 58 | def _rechoke(self): 59 | preferred = [] 60 | maxuploads = self.config['max_uploads'] 61 | if self.paused: 62 | for c in self.connections: 63 | c.get_upload().choke() 64 | return 65 | if maxuploads > 1: 66 | for c in self.connections: 67 | u = c.get_upload() 68 | if not u.is_interested(): 69 | continue 70 | if self.done(): 71 | r = u.get_rate() 72 | else: 73 | d = c.get_download() 74 | r = d.get_rate() 75 | if r < 1000 or d.is_snubbed(): 76 | continue 77 | preferred.append((-r, c)) 78 | self.last_preferred = len(preferred) 79 | preferred.sort() 80 | del preferred[maxuploads-1:] 81 | preferred = [x[1] for x in preferred] 82 | count = len(preferred) 83 | hit = False 84 | to_unchoke = [] 85 | for c in self.connections: 86 | u = c.get_upload() 87 | if c in preferred: 88 | to_unchoke.append(u) 89 | else: 90 | if count < maxuploads or not hit: 91 | to_unchoke.append(u) 92 | if u.is_interested(): 93 | count += 1 94 | hit = True 95 | else: 96 | u.choke() 97 | for u in to_unchoke: 98 | u.unchoke() 99 | 100 | def connection_made(self, connection, p = None): 101 | if p is None: 102 | p = randrange(-2, len(self.connections) + 1) 103 | self.connections.insert(max(p, 0), connection) 104 | self._rechoke() 105 | 106 | def connection_lost(self, connection): 107 | self.connections.remove(connection) 108 | self.picker.lost_peer(connection) 109 | if connection.get_upload().is_interested() and not connection.get_upload().is_choked(): 110 | self._rechoke() 111 | 112 | def interested(self, connection): 113 | if not connection.get_upload().is_choked(): 114 | self._rechoke() 115 | 116 | def not_interested(self, connection): 117 | if not connection.get_upload().is_choked(): 118 | self._rechoke() 119 | 120 | def set_super_seed(self): 121 | while self.connections: # close all connections 122 | self.connections[0].close() 123 | self.picker.set_superseed() 124 | self.super_seed = True 125 | 126 | def pause(self, flag): 127 | self.paused = flag 128 | self._rechoke() 129 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/Connecter.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from BitTornado.bitfield import Bitfield 5 | from BitTornado.clock import clock 6 | from binascii import b2a_hex 7 | 8 | try: 9 | True 10 | except: 11 | True = 1 12 | False = 0 13 | 14 | DEBUG = False 15 | 16 | def toint(s): 17 | return long(b2a_hex(s), 16) 18 | 19 | def tobinary(i): 20 | return (chr(i >> 24) + chr((i >> 16) & 0xFF) + 21 | chr((i >> 8) & 0xFF) + chr(i & 0xFF)) 22 | 23 | CHOKE = chr(0) 24 | UNCHOKE = chr(1) 25 | INTERESTED = chr(2) 26 | NOT_INTERESTED = chr(3) 27 | # index 28 | HAVE = chr(4) 29 | # index, bitfield 30 | BITFIELD = chr(5) 31 | # index, begin, length 32 | REQUEST = chr(6) 33 | # index, begin, piece 34 | PIECE = chr(7) 35 | # index, begin, piece 36 | CANCEL = chr(8) 37 | 38 | class Connection: 39 | def __init__(self, connection, connecter): 40 | self.connection = connection 41 | self.connecter = connecter 42 | self.got_anything = False 43 | self.next_upload = None 44 | self.outqueue = [] 45 | self.partial_message = None 46 | self.download = None 47 | self.send_choke_queued = False 48 | self.just_unchoked = None 49 | 50 | def get_ip(self, real=False): 51 | return self.connection.get_ip(real) 52 | 53 | def get_id(self): 54 | return self.connection.get_id() 55 | 56 | def get_readable_id(self): 57 | return self.connection.get_readable_id() 58 | 59 | def close(self): 60 | if DEBUG: 61 | print 'connection closed' 62 | self.connection.close() 63 | 64 | def is_locally_initiated(self): 65 | return self.connection.is_locally_initiated() 66 | 67 | def send_interested(self): 68 | self._send_message(INTERESTED) 69 | 70 | def send_not_interested(self): 71 | self._send_message(NOT_INTERESTED) 72 | 73 | def send_choke(self): 74 | if self.partial_message: 75 | self.send_choke_queued = True 76 | else: 77 | self._send_message(CHOKE) 78 | self.upload.choke_sent() 79 | self.just_unchoked = 0 80 | 81 | def send_unchoke(self): 82 | if self.send_choke_queued: 83 | self.send_choke_queued = False 84 | if DEBUG: 85 | print 'CHOKE SUPPRESSED' 86 | else: 87 | self._send_message(UNCHOKE) 88 | if ( self.partial_message or self.just_unchoked is None 89 | or not self.upload.interested or self.download.active_requests ): 90 | self.just_unchoked = 0 91 | else: 92 | self.just_unchoked = clock() 93 | 94 | def send_request(self, index, begin, length): 95 | self._send_message(REQUEST + tobinary(index) + 96 | tobinary(begin) + tobinary(length)) 97 | if DEBUG: 98 | print 'sent request: '+str(index)+': '+str(begin)+'-'+str(begin+length) 99 | 100 | def send_cancel(self, index, begin, length): 101 | self._send_message(CANCEL + tobinary(index) + 102 | tobinary(begin) + tobinary(length)) 103 | if DEBUG: 104 | print 'sent cancel: '+str(index)+': '+str(begin)+'-'+str(begin+length) 105 | 106 | def send_bitfield(self, bitfield): 107 | self._send_message(BITFIELD + bitfield) 108 | 109 | def send_have(self, index): 110 | self._send_message(HAVE + tobinary(index)) 111 | 112 | def send_keepalive(self): 113 | self._send_message('') 114 | 115 | def _send_message(self, s): 116 | s = tobinary(len(s))+s 117 | if self.partial_message: 118 | self.outqueue.append(s) 119 | else: 120 | self.connection.send_message_raw(s) 121 | 122 | def send_partial(self, bytes): 123 | if self.connection.closed: 124 | return 0 125 | if self.partial_message is None: 126 | s = self.upload.get_upload_chunk() 127 | if s is None: 128 | return 0 129 | index, begin, piece = s 130 | self.partial_message = ''.join(( 131 | tobinary(len(piece) + 9), PIECE, 132 | tobinary(index), tobinary(begin), piece.tostring() )) 133 | if DEBUG: 134 | print 'sending chunk: '+str(index)+': '+str(begin)+'-'+str(begin+len(piece)) 135 | 136 | if bytes < len(self.partial_message): 137 | self.connection.send_message_raw(self.partial_message[:bytes]) 138 | self.partial_message = self.partial_message[bytes:] 139 | return bytes 140 | 141 | q = [self.partial_message] 142 | self.partial_message = None 143 | if self.send_choke_queued: 144 | self.send_choke_queued = False 145 | self.outqueue.append(tobinary(1)+CHOKE) 146 | self.upload.choke_sent() 147 | self.just_unchoked = 0 148 | q.extend(self.outqueue) 149 | self.outqueue = [] 150 | q = ''.join(q) 151 | self.connection.send_message_raw(q) 152 | return len(q) 153 | 154 | def get_upload(self): 155 | return self.upload 156 | 157 | def get_download(self): 158 | return self.download 159 | 160 | def set_download(self, download): 161 | self.download = download 162 | 163 | def backlogged(self): 164 | return not self.connection.is_flushed() 165 | 166 | def got_request(self, i, p, l): 167 | self.upload.got_request(i, p, l) 168 | if self.just_unchoked: 169 | self.connecter.ratelimiter.ping(clock() - self.just_unchoked) 170 | self.just_unchoked = 0 171 | 172 | 173 | 174 | 175 | class Connecter: 176 | def __init__(self, make_upload, downloader, choker, numpieces, 177 | totalup, config, ratelimiter, sched = None): 178 | self.downloader = downloader 179 | self.make_upload = make_upload 180 | self.choker = choker 181 | self.numpieces = numpieces 182 | self.config = config 183 | self.ratelimiter = ratelimiter 184 | self.rate_capped = False 185 | self.sched = sched 186 | self.totalup = totalup 187 | self.rate_capped = False 188 | self.connections = {} 189 | self.external_connection_made = 0 190 | 191 | def how_many_connections(self): 192 | return len(self.connections) 193 | 194 | def connection_made(self, connection): 195 | c = Connection(connection, self) 196 | self.connections[connection] = c 197 | c.upload = self.make_upload(c, self.ratelimiter, self.totalup) 198 | c.download = self.downloader.make_download(c) 199 | self.choker.connection_made(c) 200 | return c 201 | 202 | def connection_lost(self, connection): 203 | c = self.connections[connection] 204 | del self.connections[connection] 205 | if c.download: 206 | c.download.disconnected() 207 | self.choker.connection_lost(c) 208 | 209 | def connection_flushed(self, connection): 210 | conn = self.connections[connection] 211 | if conn.next_upload is None and (conn.partial_message is not None 212 | or len(conn.upload.buffer) > 0): 213 | self.ratelimiter.queue(conn) 214 | 215 | def got_piece(self, i): 216 | for co in self.connections.values(): 217 | co.send_have(i) 218 | 219 | def got_message(self, connection, message): 220 | c = self.connections[connection] 221 | t = message[0] 222 | if t == BITFIELD and c.got_anything: 223 | connection.close() 224 | return 225 | c.got_anything = True 226 | if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and 227 | len(message) != 1): 228 | connection.close() 229 | return 230 | if t == CHOKE: 231 | c.download.got_choke() 232 | elif t == UNCHOKE: 233 | c.download.got_unchoke() 234 | elif t == INTERESTED: 235 | if not c.download.have.complete(): 236 | c.upload.got_interested() 237 | elif t == NOT_INTERESTED: 238 | c.upload.got_not_interested() 239 | elif t == HAVE: 240 | if len(message) != 5: 241 | connection.close() 242 | return 243 | i = toint(message[1:]) 244 | if i >= self.numpieces: 245 | connection.close() 246 | return 247 | if c.download.got_have(i): 248 | c.upload.got_not_interested() 249 | elif t == BITFIELD: 250 | try: 251 | b = Bitfield(self.numpieces, message[1:]) 252 | except ValueError: 253 | connection.close() 254 | return 255 | if c.download.got_have_bitfield(b): 256 | c.upload.got_not_interested() 257 | elif t == REQUEST: 258 | if len(message) != 13: 259 | connection.close() 260 | return 261 | i = toint(message[1:5]) 262 | if i >= self.numpieces: 263 | connection.close() 264 | return 265 | c.got_request(i, toint(message[5:9]), 266 | toint(message[9:])) 267 | elif t == CANCEL: 268 | if len(message) != 13: 269 | connection.close() 270 | return 271 | i = toint(message[1:5]) 272 | if i >= self.numpieces: 273 | connection.close() 274 | return 275 | c.upload.got_cancel(i, toint(message[5:9]), 276 | toint(message[9:])) 277 | elif t == PIECE: 278 | if len(message) <= 9: 279 | connection.close() 280 | return 281 | i = toint(message[1:5]) 282 | if i >= self.numpieces: 283 | connection.close() 284 | return 285 | if c.download.got_piece(i, toint(message[5:9]), message[9:]): 286 | self.got_piece(i) 287 | else: 288 | connection.close() 289 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/DownloaderFeedback.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from cStringIO import StringIO 5 | from urllib import quote 6 | from threading import Event 7 | 8 | try: 9 | True 10 | except: 11 | True = 1 12 | False = 0 13 | 14 | class DownloaderFeedback: 15 | def __init__(self, choker, httpdl, add_task, upfunc, downfunc, 16 | ratemeasure, leftfunc, file_length, finflag, sp, statistics, 17 | statusfunc = None, interval = None): 18 | self.choker = choker 19 | self.httpdl = httpdl 20 | self.add_task = add_task 21 | self.upfunc = upfunc 22 | self.downfunc = downfunc 23 | self.ratemeasure = ratemeasure 24 | self.leftfunc = leftfunc 25 | self.file_length = file_length 26 | self.finflag = finflag 27 | self.sp = sp 28 | self.statistics = statistics 29 | self.lastids = [] 30 | self.spewdata = None 31 | self.doneprocessing = Event() 32 | self.doneprocessing.set() 33 | if statusfunc: 34 | self.autodisplay(statusfunc, interval) 35 | 36 | 37 | def _rotate(self): 38 | cs = self.choker.connections 39 | for id in self.lastids: 40 | for i in xrange(len(cs)): 41 | if cs[i].get_id() == id: 42 | return cs[i:] + cs[:i] 43 | return cs 44 | 45 | def spews(self): 46 | l = [] 47 | cs = self._rotate() 48 | self.lastids = [c.get_id() for c in cs] 49 | for c in cs: 50 | a = {} 51 | a['id'] = c.get_readable_id() 52 | a['ip'] = c.get_ip() 53 | a['optimistic'] = (c is self.choker.connections[0]) 54 | if c.is_locally_initiated(): 55 | a['direction'] = 'L' 56 | else: 57 | a['direction'] = 'R' 58 | u = c.get_upload() 59 | a['uprate'] = int(u.measure.get_rate()) 60 | a['uinterested'] = u.is_interested() 61 | a['uchoked'] = u.is_choked() 62 | d = c.get_download() 63 | a['downrate'] = int(d.measure.get_rate()) 64 | a['dinterested'] = d.is_interested() 65 | a['dchoked'] = d.is_choked() 66 | a['snubbed'] = d.is_snubbed() 67 | a['utotal'] = d.connection.upload.measure.get_total() 68 | a['dtotal'] = d.connection.download.measure.get_total() 69 | if len(d.connection.download.have) > 0: 70 | a['completed'] = float(len(d.connection.download.have)-d.connection.download.have.numfalse)/float(len(d.connection.download.have)) 71 | else: 72 | a['completed'] = 1.0 73 | a['speed'] = d.connection.download.peermeasure.get_rate() 74 | 75 | l.append(a) 76 | 77 | for dl in self.httpdl.get_downloads(): 78 | if dl.goodseed: 79 | a = {} 80 | a['id'] = 'http seed' 81 | a['ip'] = dl.baseurl 82 | a['optimistic'] = False 83 | a['direction'] = 'L' 84 | a['uprate'] = 0 85 | a['uinterested'] = False 86 | a['uchoked'] = False 87 | a['downrate'] = int(dl.measure.get_rate()) 88 | a['dinterested'] = True 89 | a['dchoked'] = not dl.active 90 | a['snubbed'] = not dl.active 91 | a['utotal'] = None 92 | a['dtotal'] = dl.measure.get_total() 93 | a['completed'] = 1.0 94 | a['speed'] = None 95 | 96 | l.append(a) 97 | 98 | return l 99 | 100 | 101 | def gather(self, displayfunc = None): 102 | s = {'stats': self.statistics.update()} 103 | if self.sp.isSet(): 104 | s['spew'] = self.spews() 105 | else: 106 | s['spew'] = None 107 | s['up'] = self.upfunc() 108 | if self.finflag.isSet(): 109 | s['done'] = self.file_length 110 | return s 111 | s['down'] = self.downfunc() 112 | obtained, desired = self.leftfunc() 113 | s['done'] = obtained 114 | s['wanted'] = desired 115 | if desired > 0: 116 | s['frac'] = float(obtained)/desired 117 | else: 118 | s['frac'] = 1.0 119 | if desired == obtained: 120 | s['time'] = 0 121 | else: 122 | s['time'] = self.ratemeasure.get_time_left(desired-obtained) 123 | return s 124 | 125 | 126 | def display(self, displayfunc): 127 | if not self.doneprocessing.isSet(): 128 | return 129 | self.doneprocessing.clear() 130 | stats = self.gather() 131 | if self.finflag.isSet(): 132 | displayfunc(dpflag = self.doneprocessing, 133 | upRate = stats['up'], 134 | statistics = stats['stats'], spew = stats['spew']) 135 | elif stats['time'] is not None: 136 | displayfunc(dpflag = self.doneprocessing, 137 | fractionDone = stats['frac'], sizeDone = stats['done'], 138 | downRate = stats['down'], upRate = stats['up'], 139 | statistics = stats['stats'], spew = stats['spew'], 140 | timeEst = stats['time']) 141 | else: 142 | displayfunc(dpflag = self.doneprocessing, 143 | fractionDone = stats['frac'], sizeDone = stats['done'], 144 | downRate = stats['down'], upRate = stats['up'], 145 | statistics = stats['stats'], spew = stats['spew']) 146 | 147 | 148 | def autodisplay(self, displayfunc, interval): 149 | self.displayfunc = displayfunc 150 | self.interval = interval 151 | self._autodisplay() 152 | 153 | def _autodisplay(self): 154 | self.add_task(self._autodisplay, self.interval) 155 | self.display(self.displayfunc) 156 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/Encrypter.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from cStringIO import StringIO 5 | from binascii import b2a_hex 6 | from socket import error as socketerror 7 | from urllib import quote 8 | from traceback import print_exc 9 | try: 10 | True 11 | except: 12 | True = 1 13 | False = 0 14 | 15 | MAX_INCOMPLETE = 8 16 | 17 | protocol_name = 'BitTorrent protocol' 18 | option_pattern = chr(0)*8 19 | 20 | def toint(s): 21 | return long(b2a_hex(s), 16) 22 | 23 | def tobinary(i): 24 | return (chr(i >> 24) + chr((i >> 16) & 0xFF) + 25 | chr((i >> 8) & 0xFF) + chr(i & 0xFF)) 26 | 27 | hexchars = '0123456789ABCDEF' 28 | hexmap = [] 29 | for i in xrange(256): 30 | hexmap.append(hexchars[(i&0xF0)/16]+hexchars[i&0x0F]) 31 | 32 | def tohex(s): 33 | r = [] 34 | for c in s: 35 | r.append(hexmap[ord(c)]) 36 | return ''.join(r) 37 | 38 | def make_readable(s): 39 | if not s: 40 | return '' 41 | if quote(s).find('%') >= 0: 42 | return tohex(s) 43 | return '"'+s+'"' 44 | 45 | 46 | class IncompleteCounter: 47 | def __init__(self): 48 | self.c = 0 49 | def increment(self): 50 | self.c += 1 51 | def decrement(self): 52 | self.c -= 1 53 | def toomany(self): 54 | return self.c >= MAX_INCOMPLETE 55 | 56 | incompletecounter = IncompleteCounter() 57 | 58 | 59 | # header, reserved, download id, my id, [length, message] 60 | 61 | class Connection: 62 | def __init__(self, Encoder, connection, id, ext_handshake=False): 63 | self.Encoder = Encoder 64 | self.connection = connection 65 | self.connecter = Encoder.connecter 66 | self.id = id 67 | self.readable_id = make_readable(id) 68 | self.locally_initiated = (id != None) 69 | self.complete = False 70 | self.keepalive = lambda: None 71 | self.closed = False 72 | self.buffer = StringIO() 73 | if self.locally_initiated: 74 | incompletecounter.increment() 75 | if self.locally_initiated or ext_handshake: 76 | self.connection.write(chr(len(protocol_name)) + protocol_name + 77 | option_pattern + self.Encoder.download_id) 78 | if ext_handshake: 79 | self.Encoder.connecter.external_connection_made += 1 80 | self.connection.write(self.Encoder.my_id) 81 | self.next_len, self.next_func = 20, self.read_peer_id 82 | else: 83 | self.next_len, self.next_func = 1, self.read_header_len 84 | self.Encoder.raw_server.add_task(self._auto_close, 15) 85 | 86 | def get_ip(self, real=False): 87 | return self.connection.get_ip(real) 88 | 89 | def get_id(self): 90 | return self.id 91 | 92 | def get_readable_id(self): 93 | return self.readable_id 94 | 95 | def is_locally_initiated(self): 96 | return self.locally_initiated 97 | 98 | def is_flushed(self): 99 | return self.connection.is_flushed() 100 | 101 | def read_header_len(self, s): 102 | if ord(s) != len(protocol_name): 103 | return None 104 | return len(protocol_name), self.read_header 105 | 106 | def read_header(self, s): 107 | if s != protocol_name: 108 | return None 109 | return 8, self.read_reserved 110 | 111 | def read_reserved(self, s): 112 | return 20, self.read_download_id 113 | 114 | def read_download_id(self, s): 115 | if s != self.Encoder.download_id: 116 | return None 117 | if not self.locally_initiated: 118 | self.Encoder.connecter.external_connection_made += 1 119 | self.connection.write(chr(len(protocol_name)) + protocol_name + 120 | option_pattern + self.Encoder.download_id + self.Encoder.my_id) 121 | return 20, self.read_peer_id 122 | 123 | def read_peer_id(self, s): 124 | if not self.id: 125 | self.id = s 126 | self.readable_id = make_readable(s) 127 | else: 128 | if s != self.id: 129 | return None 130 | self.complete = self.Encoder.got_id(self) 131 | if not self.complete: 132 | return None 133 | if self.locally_initiated: 134 | self.connection.write(self.Encoder.my_id) 135 | incompletecounter.decrement() 136 | c = self.Encoder.connecter.connection_made(self) 137 | self.keepalive = c.send_keepalive 138 | return 4, self.read_len 139 | 140 | def read_len(self, s): 141 | l = toint(s) 142 | if l > self.Encoder.max_len: 143 | return None 144 | return l, self.read_message 145 | 146 | def read_message(self, s): 147 | if s != '': 148 | self.connecter.got_message(self, s) 149 | return 4, self.read_len 150 | 151 | def read_dead(self, s): 152 | return None 153 | 154 | def _auto_close(self): 155 | if not self.complete: 156 | self.close() 157 | 158 | def close(self): 159 | if not self.closed: 160 | self.connection.close() 161 | self.sever() 162 | 163 | def sever(self): 164 | self.closed = True 165 | del self.Encoder.connections[self.connection] 166 | if self.complete: 167 | self.connecter.connection_lost(self) 168 | elif self.locally_initiated: 169 | incompletecounter.decrement() 170 | 171 | def send_message_raw(self, message): 172 | if not self.closed: 173 | self.connection.write(message) 174 | 175 | def data_came_in(self, connection, s): 176 | self.Encoder.measurefunc(len(s)) 177 | while True: 178 | if self.closed: 179 | return 180 | i = self.next_len - self.buffer.tell() 181 | if i > len(s): 182 | self.buffer.write(s) 183 | return 184 | self.buffer.write(s[:i]) 185 | s = s[i:] 186 | m = self.buffer.getvalue() 187 | self.buffer.reset() 188 | self.buffer.truncate() 189 | try: 190 | x = self.next_func(m) 191 | except: 192 | self.next_len, self.next_func = 1, self.read_dead 193 | raise 194 | if x is None: 195 | self.close() 196 | return 197 | self.next_len, self.next_func = x 198 | 199 | def connection_flushed(self, connection): 200 | if self.complete: 201 | self.connecter.connection_flushed(self) 202 | 203 | def connection_lost(self, connection): 204 | if self.Encoder.connections.has_key(connection): 205 | self.sever() 206 | 207 | 208 | class Encoder: 209 | def __init__(self, connecter, raw_server, my_id, max_len, 210 | schedulefunc, keepalive_delay, download_id, 211 | measurefunc, config): 212 | self.raw_server = raw_server 213 | self.connecter = connecter 214 | self.my_id = my_id 215 | self.max_len = max_len 216 | self.schedulefunc = schedulefunc 217 | self.keepalive_delay = keepalive_delay 218 | self.download_id = download_id 219 | self.measurefunc = measurefunc 220 | self.config = config 221 | self.connections = {} 222 | self.banned = {} 223 | self.to_connect = [] 224 | self.paused = False 225 | if self.config['max_connections'] == 0: 226 | self.max_connections = 2 ** 30 227 | else: 228 | self.max_connections = self.config['max_connections'] 229 | schedulefunc(self.send_keepalives, keepalive_delay) 230 | 231 | def send_keepalives(self): 232 | self.schedulefunc(self.send_keepalives, self.keepalive_delay) 233 | if self.paused: 234 | return 235 | for c in self.connections.values(): 236 | c.keepalive() 237 | 238 | def start_connections(self, list): 239 | if not self.to_connect: 240 | self.raw_server.add_task(self._start_connection_from_queue) 241 | self.to_connect = list 242 | 243 | def _start_connection_from_queue(self): 244 | if self.connecter.external_connection_made: 245 | max_initiate = self.config['max_initiate'] 246 | else: 247 | max_initiate = int(self.config['max_initiate']*1.5) 248 | cons = len(self.connections) 249 | if cons >= self.max_connections or cons >= max_initiate: 250 | delay = 60 251 | elif self.paused or incompletecounter.toomany(): 252 | delay = 1 253 | else: 254 | delay = 0 255 | dns, id = self.to_connect.pop(0) 256 | self.start_connection(dns, id) 257 | if self.to_connect: 258 | self.raw_server.add_task(self._start_connection_from_queue, delay) 259 | 260 | def start_connection(self, dns, id): 261 | if ( self.paused 262 | or len(self.connections) >= self.max_connections 263 | or id == self.my_id 264 | or self.banned.has_key(dns[0]) ): 265 | return True 266 | for v in self.connections.values(): 267 | if v is None: 268 | continue 269 | if id and v.id == id: 270 | return True 271 | ip = v.get_ip(True) 272 | if self.config['security'] and ip != 'unknown' and ip == dns[0]: 273 | return True 274 | try: 275 | c = self.raw_server.start_connection(dns) 276 | con = Connection(self, c, id) 277 | self.connections[c] = con 278 | c.set_handler(con) 279 | except socketerror: 280 | return False 281 | return True 282 | 283 | def _start_connection(self, dns, id): 284 | def foo(self=self, dns=dns, id=id): 285 | self.start_connection(dns, id) 286 | 287 | self.schedulefunc(foo, 0) 288 | 289 | def got_id(self, connection): 290 | if connection.id == self.my_id: 291 | self.connecter.external_connection_made -= 1 292 | return False 293 | ip = connection.get_ip(True) 294 | if self.config['security'] and self.banned.has_key(ip): 295 | return False 296 | for v in self.connections.values(): 297 | if connection is not v: 298 | if connection.id == v.id: 299 | return False 300 | if self.config['security'] and ip != 'unknown' and ip == v.get_ip(True): 301 | v.close() 302 | return True 303 | 304 | def external_connection_made(self, connection): 305 | if self.paused or len(self.connections) >= self.max_connections: 306 | connection.close() 307 | return False 308 | con = Connection(self, connection, None) 309 | self.connections[connection] = con 310 | connection.set_handler(con) 311 | return True 312 | 313 | def externally_handshaked_connection_made(self, connection, options, already_read): 314 | if self.paused or len(self.connections) >= self.max_connections: 315 | connection.close() 316 | return False 317 | con = Connection(self, connection, None, True) 318 | self.connections[connection] = con 319 | connection.set_handler(con) 320 | if already_read: 321 | con.data_came_in(con, already_read) 322 | return True 323 | 324 | def close_all(self): 325 | for c in self.connections.values(): 326 | c.close() 327 | self.connections = {} 328 | 329 | def ban(self, ip): 330 | self.banned[ip] = 1 331 | 332 | def pause(self, flag): 333 | self.paused = flag 334 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/FileSelector.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from random import shuffle 5 | from traceback import print_exc 6 | try: 7 | True 8 | except: 9 | True = 1 10 | False = 0 11 | 12 | 13 | class FileSelector: 14 | def __init__(self, files, piece_length, bufferdir, 15 | storage, storagewrapper, sched, failfunc): 16 | self.files = files 17 | self.storage = storage 18 | self.storagewrapper = storagewrapper 19 | self.sched = sched 20 | self.failfunc = failfunc 21 | self.downloader = None 22 | self.picker = None 23 | 24 | storage.set_bufferdir(bufferdir) 25 | 26 | self.numfiles = len(files) 27 | self.priority = [1] * self.numfiles 28 | self.new_priority = None 29 | self.new_partials = None 30 | self.filepieces = [] 31 | total = 0L 32 | for file, length in files: 33 | if not length: 34 | self.filepieces.append(()) 35 | else: 36 | pieces = range( int(total/piece_length), 37 | int((total+length-1)/piece_length)+1 ) 38 | self.filepieces.append(tuple(pieces)) 39 | total += length 40 | self.numpieces = int((total+piece_length-1)/piece_length) 41 | self.piece_priority = [1] * self.numpieces 42 | 43 | 44 | 45 | def init_priority(self, new_priority): 46 | try: 47 | assert len(new_priority) == self.numfiles 48 | for v in new_priority: 49 | assert type(v) in (type(0),type(0L)) 50 | assert v >= -1 51 | assert v <= 2 52 | except: 53 | # print_exc() 54 | return False 55 | try: 56 | files_updated = False 57 | for f in xrange(self.numfiles): 58 | if new_priority[f] < 0: 59 | self.storage.disable_file(f) 60 | files_updated = True 61 | if files_updated: 62 | self.storage.reset_file_status() 63 | self.new_priority = new_priority 64 | except (IOError, OSError), e: 65 | self.failfunc("can't open partial file for " 66 | + self.files[f][0] + ': ' + str(e)) 67 | return False 68 | return True 69 | 70 | ''' 71 | d['priority'] = [file #1 priority [,file #2 priority...] ] 72 | a list of download priorities for each file. 73 | Priority may be -1, 0, 1, 2. -1 = download disabled, 74 | 0 = highest, 1 = normal, 2 = lowest. 75 | Also see Storage.pickle and StorageWrapper.pickle for additional keys. 76 | ''' 77 | def unpickle(self, d): 78 | if d.has_key('priority'): 79 | if not self.init_priority(d['priority']): 80 | return 81 | pieces = self.storage.unpickle(d) 82 | if not pieces: # don't bother, nothing restoreable 83 | return 84 | new_piece_priority = self._get_piece_priority_list(self.new_priority) 85 | self.storagewrapper.reblock([i == -1 for i in new_piece_priority]) 86 | self.new_partials = self.storagewrapper.unpickle(d, pieces) 87 | 88 | 89 | def tie_in(self, picker, cancelfunc, requestmorefunc, rerequestfunc): 90 | self.picker = picker 91 | self.cancelfunc = cancelfunc 92 | self.requestmorefunc = requestmorefunc 93 | self.rerequestfunc = rerequestfunc 94 | 95 | if self.new_priority: 96 | self.priority = self.new_priority 97 | self.new_priority = None 98 | self.new_piece_priority = self._set_piece_priority(self.priority) 99 | 100 | if self.new_partials: 101 | shuffle(self.new_partials) 102 | for p in self.new_partials: 103 | self.picker.requested(p) 104 | self.new_partials = None 105 | 106 | 107 | def _set_files_disabled(self, old_priority, new_priority): 108 | old_disabled = [p == -1 for p in old_priority] 109 | new_disabled = [p == -1 for p in new_priority] 110 | data_to_update = [] 111 | for f in xrange(self.numfiles): 112 | if new_disabled[f] != old_disabled[f]: 113 | data_to_update.extend(self.storage.get_piece_update_list(f)) 114 | buffer = [] 115 | for piece, start, length in data_to_update: 116 | if self.storagewrapper.has_data(piece): 117 | data = self.storagewrapper.read_raw(piece, start, length) 118 | if data is None: 119 | return False 120 | buffer.append((piece, start, data)) 121 | 122 | files_updated = False 123 | try: 124 | for f in xrange(self.numfiles): 125 | if new_disabled[f] and not old_disabled[f]: 126 | self.storage.disable_file(f) 127 | files_updated = True 128 | if old_disabled[f] and not new_disabled[f]: 129 | self.storage.enable_file(f) 130 | files_updated = True 131 | except (IOError, OSError), e: 132 | if new_disabled[f]: 133 | msg = "can't open partial file for " 134 | else: 135 | msg = 'unable to open ' 136 | self.failfunc(msg + self.files[f][0] + ': ' + str(e)) 137 | return False 138 | if files_updated: 139 | self.storage.reset_file_status() 140 | 141 | changed_pieces = {} 142 | for piece, start, data in buffer: 143 | if not self.storagewrapper.write_raw(piece, start, data): 144 | return False 145 | data.release() 146 | changed_pieces[piece] = 1 147 | if not self.storagewrapper.doublecheck_data(changed_pieces): 148 | return False 149 | 150 | return True 151 | 152 | 153 | def _get_piece_priority_list(self, file_priority_list): 154 | l = [-1] * self.numpieces 155 | for f in xrange(self.numfiles): 156 | if file_priority_list[f] == -1: 157 | continue 158 | for i in self.filepieces[f]: 159 | if l[i] == -1: 160 | l[i] = file_priority_list[f] 161 | continue 162 | l[i] = min(l[i],file_priority_list[f]) 163 | return l 164 | 165 | 166 | def _set_piece_priority(self, new_priority): 167 | was_complete = self.storagewrapper.am_I_complete() 168 | new_piece_priority = self._get_piece_priority_list(new_priority) 169 | pieces = range(self.numpieces) 170 | shuffle(pieces) 171 | new_blocked = [] 172 | new_unblocked = [] 173 | for piece in pieces: 174 | self.picker.set_priority(piece,new_piece_priority[piece]) 175 | o = self.piece_priority[piece] == -1 176 | n = new_piece_priority[piece] == -1 177 | if n and not o: 178 | new_blocked.append(piece) 179 | if o and not n: 180 | new_unblocked.append(piece) 181 | if new_blocked: 182 | self.cancelfunc(new_blocked) 183 | self.storagewrapper.reblock([i == -1 for i in new_piece_priority]) 184 | if new_unblocked: 185 | self.requestmorefunc(new_unblocked) 186 | if was_complete and not self.storagewrapper.am_I_complete(): 187 | self.rerequestfunc() 188 | 189 | return new_piece_priority 190 | 191 | 192 | def set_priorities_now(self, new_priority = None): 193 | if not new_priority: 194 | new_priority = self.new_priority 195 | self.new_priority = None # potential race condition 196 | if not new_priority: 197 | return 198 | old_priority = self.priority 199 | self.priority = new_priority 200 | if not self._set_files_disabled(old_priority, new_priority): 201 | return 202 | self.piece_priority = self._set_piece_priority(new_priority) 203 | 204 | def set_priorities(self, new_priority): 205 | self.new_priority = new_priority 206 | self.sched(self.set_priorities_now) 207 | 208 | def set_priority(self, f, p): 209 | new_priority = self.get_priorities() 210 | new_priority[f] = p 211 | self.set_priorities(new_priority) 212 | 213 | def get_priorities(self): 214 | priority = self.new_priority 215 | if not priority: 216 | priority = self.priority # potential race condition 217 | return [i for i in priority] 218 | 219 | def __setitem__(self, index, val): 220 | self.set_priority(index, val) 221 | 222 | def __getitem__(self, index): 223 | try: 224 | return self.new_priority[index] 225 | except: 226 | return self.priority[index] 227 | 228 | 229 | def finish(self): 230 | for f in xrange(self.numfiles): 231 | if self.priority[f] == -1: 232 | self.storage.delete_file(f) 233 | 234 | def pickle(self): 235 | d = {'priority': self.priority} 236 | try: 237 | s = self.storage.pickle() 238 | sw = self.storagewrapper.pickle() 239 | for k in s.keys(): 240 | d[k] = s[k] 241 | for k in sw.keys(): 242 | d[k] = sw[k] 243 | except (IOError, OSError): 244 | pass 245 | return d 246 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/Filter.py: -------------------------------------------------------------------------------- 1 | class Filter: 2 | def __init__(self, callback): 3 | self.callback = callback 4 | 5 | def check(self, ip, paramslist, headers): 6 | 7 | def params(key, default = None, l = paramslist): 8 | if l.has_key(key): 9 | return l[key][0] 10 | return default 11 | 12 | return None 13 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/HTTPDownloader.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from BitTornado.CurrentRateMeasure import Measure 5 | from random import randint 6 | from urlparse import urlparse 7 | from httplib import HTTPConnection 8 | from urllib import quote 9 | from threading import Thread 10 | from BitTornado.__init__ import product_name,version_short 11 | try: 12 | True 13 | except: 14 | True = 1 15 | False = 0 16 | 17 | EXPIRE_TIME = 60 * 60 18 | 19 | VERSION = product_name+'/'+version_short 20 | 21 | class haveComplete: 22 | def complete(self): 23 | return True 24 | def __getitem__(self, x): 25 | return True 26 | haveall = haveComplete() 27 | 28 | class SingleDownload: 29 | def __init__(self, downloader, url): 30 | self.downloader = downloader 31 | self.baseurl = url 32 | try: 33 | (scheme, self.netloc, path, pars, query, fragment) = urlparse(url) 34 | except: 35 | self.downloader.errorfunc('cannot parse http seed address: '+url) 36 | return 37 | if scheme != 'http': 38 | self.downloader.errorfunc('http seed url not http: '+url) 39 | return 40 | try: 41 | self.connection = HTTPConnection(self.netloc) 42 | except: 43 | self.downloader.errorfunc('cannot connect to http seed: '+url) 44 | return 45 | self.seedurl = path 46 | if pars: 47 | self.seedurl += ';'+pars 48 | self.seedurl += '?' 49 | if query: 50 | self.seedurl += query+'&' 51 | self.seedurl += 'info_hash='+quote(self.downloader.infohash) 52 | 53 | self.measure = Measure(downloader.max_rate_period) 54 | self.index = None 55 | self.url = '' 56 | self.requests = [] 57 | self.request_size = 0 58 | self.endflag = False 59 | self.error = None 60 | self.retry_period = 30 61 | self._retry_period = None 62 | self.errorcount = 0 63 | self.goodseed = False 64 | self.active = False 65 | self.cancelled = False 66 | self.resched(randint(2,10)) 67 | 68 | def resched(self, len = None): 69 | if len is None: 70 | len = self.retry_period 71 | if self.errorcount > 3: 72 | len = len * (self.errorcount - 2) 73 | self.downloader.rawserver.add_task(self.download, len) 74 | 75 | def _want(self, index): 76 | if self.endflag: 77 | return self.downloader.storage.do_I_have_requests(index) 78 | else: 79 | return self.downloader.storage.is_unstarted(index) 80 | 81 | def download(self): 82 | self.cancelled = False 83 | if self.downloader.picker.am_I_complete(): 84 | self.downloader.downloads.remove(self) 85 | return 86 | self.index = self.downloader.picker.next(haveall, self._want) 87 | if ( self.index is None and not self.endflag 88 | and not self.downloader.peerdownloader.has_downloaders() ): 89 | self.endflag = True 90 | self.index = self.downloader.picker.next(haveall, self._want) 91 | if self.index is None: 92 | self.endflag = True 93 | self.resched() 94 | else: 95 | self.url = ( self.seedurl+'&piece='+str(self.index) ) 96 | self._get_requests() 97 | if self.request_size < self.downloader.storage._piecelen(self.index): 98 | self.url += '&ranges='+self._request_ranges() 99 | rq = Thread(target = self._request) 100 | rq.setDaemon(False) 101 | rq.start() 102 | self.active = True 103 | 104 | def _request(self): 105 | import encodings.ascii 106 | import encodings.punycode 107 | import encodings.idna 108 | 109 | self.error = None 110 | self.received_data = None 111 | try: 112 | self.connection.request('GET',self.url, None, 113 | {'User-Agent': VERSION}) 114 | r = self.connection.getresponse() 115 | self.connection_status = r.status 116 | self.received_data = r.read() 117 | except Exception, e: 118 | self.error = 'error accessing http seed: '+str(e) 119 | try: 120 | self.connection.close() 121 | except: 122 | pass 123 | try: 124 | self.connection = HTTPConnection(self.netloc) 125 | except: 126 | self.connection = None # will cause an exception and retry next cycle 127 | self.downloader.rawserver.add_task(self.request_finished) 128 | 129 | def request_finished(self): 130 | self.active = False 131 | if self.error is not None: 132 | if self.goodseed: 133 | self.downloader.errorfunc(self.error) 134 | self.errorcount += 1 135 | if self.received_data: 136 | self.errorcount = 0 137 | if not self._got_data(): 138 | self.received_data = None 139 | if not self.received_data: 140 | self._release_requests() 141 | self.downloader.peerdownloader.piece_flunked(self.index) 142 | if self._retry_period: 143 | self.resched(self._retry_period) 144 | self._retry_period = None 145 | return 146 | self.resched() 147 | 148 | def _got_data(self): 149 | if self.connection_status == 503: # seed is busy 150 | try: 151 | self.retry_period = max(int(self.received_data),5) 152 | except: 153 | pass 154 | return False 155 | if self.connection_status != 200: 156 | self.errorcount += 1 157 | return False 158 | self._retry_period = 1 159 | if len(self.received_data) != self.request_size: 160 | if self.goodseed: 161 | self.downloader.errorfunc('corrupt data from http seed - redownloading') 162 | return False 163 | self.measure.update_rate(len(self.received_data)) 164 | self.downloader.measurefunc(len(self.received_data)) 165 | if self.cancelled: 166 | return False 167 | if not self._fulfill_requests(): 168 | return False 169 | if not self.goodseed: 170 | self.goodseed = True 171 | self.downloader.seedsfound += 1 172 | if self.downloader.storage.do_I_have(self.index): 173 | self.downloader.picker.complete(self.index) 174 | self.downloader.peerdownloader.check_complete(self.index) 175 | self.downloader.gotpiecefunc(self.index) 176 | return True 177 | 178 | def _get_requests(self): 179 | self.requests = [] 180 | self.request_size = 0L 181 | while self.downloader.storage.do_I_have_requests(self.index): 182 | r = self.downloader.storage.new_request(self.index) 183 | self.requests.append(r) 184 | self.request_size += r[1] 185 | self.requests.sort() 186 | 187 | def _fulfill_requests(self): 188 | start = 0L 189 | success = True 190 | while self.requests: 191 | begin, length = self.requests.pop(0) 192 | if not self.downloader.storage.piece_came_in(self.index, begin, 193 | self.received_data[start:start+length]): 194 | success = False 195 | break 196 | start += length 197 | return success 198 | 199 | def _release_requests(self): 200 | for begin, length in self.requests: 201 | self.downloader.storage.request_lost(self.index, begin, length) 202 | self.requests = [] 203 | 204 | def _request_ranges(self): 205 | s = '' 206 | begin, length = self.requests[0] 207 | for begin1, length1 in self.requests[1:]: 208 | if begin + length == begin1: 209 | length += length1 210 | continue 211 | else: 212 | if s: 213 | s += ',' 214 | s += str(begin)+'-'+str(begin+length-1) 215 | begin, length = begin1, length1 216 | if s: 217 | s += ',' 218 | s += str(begin)+'-'+str(begin+length-1) 219 | return s 220 | 221 | 222 | class HTTPDownloader: 223 | def __init__(self, storage, picker, rawserver, 224 | finflag, errorfunc, peerdownloader, 225 | max_rate_period, infohash, measurefunc, gotpiecefunc): 226 | self.storage = storage 227 | self.picker = picker 228 | self.rawserver = rawserver 229 | self.finflag = finflag 230 | self.errorfunc = errorfunc 231 | self.peerdownloader = peerdownloader 232 | self.infohash = infohash 233 | self.max_rate_period = max_rate_period 234 | self.gotpiecefunc = gotpiecefunc 235 | self.measurefunc = measurefunc 236 | self.downloads = [] 237 | self.seedsfound = 0 238 | 239 | def make_download(self, url): 240 | self.downloads.append(SingleDownload(self, url)) 241 | return self.downloads[-1] 242 | 243 | def get_downloads(self): 244 | if self.finflag.isSet(): 245 | return [] 246 | return self.downloads 247 | 248 | def cancel_piece_download(self, pieces): 249 | for d in self.downloads: 250 | if d.active and d.index in pieces: 251 | d.cancelled = True 252 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/NatCheck.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from cStringIO import StringIO 5 | from socket import error as socketerror 6 | from traceback import print_exc 7 | try: 8 | True 9 | except: 10 | True = 1 11 | False = 0 12 | 13 | protocol_name = 'BitTorrent protocol' 14 | 15 | # header, reserved, download id, my id, [length, message] 16 | 17 | class NatCheck: 18 | def __init__(self, resultfunc, downloadid, peerid, ip, port, rawserver): 19 | self.resultfunc = resultfunc 20 | self.downloadid = downloadid 21 | self.peerid = peerid 22 | self.ip = ip 23 | self.port = port 24 | self.closed = False 25 | self.buffer = StringIO() 26 | self.next_len = 1 27 | self.next_func = self.read_header_len 28 | try: 29 | self.connection = rawserver.start_connection((ip, port), self) 30 | self.connection.write(chr(len(protocol_name)) + protocol_name + 31 | (chr(0) * 8) + downloadid) 32 | except socketerror: 33 | self.answer(False) 34 | except IOError: 35 | self.answer(False) 36 | 37 | def answer(self, result): 38 | self.closed = True 39 | try: 40 | self.connection.close() 41 | except AttributeError: 42 | pass 43 | self.resultfunc(result, self.downloadid, self.peerid, self.ip, self.port) 44 | 45 | def read_header_len(self, s): 46 | if ord(s) != len(protocol_name): 47 | return None 48 | return len(protocol_name), self.read_header 49 | 50 | def read_header(self, s): 51 | if s != protocol_name: 52 | return None 53 | return 8, self.read_reserved 54 | 55 | def read_reserved(self, s): 56 | return 20, self.read_download_id 57 | 58 | def read_download_id(self, s): 59 | if s != self.downloadid: 60 | return None 61 | return 20, self.read_peer_id 62 | 63 | def read_peer_id(self, s): 64 | if s != self.peerid: 65 | return None 66 | self.answer(True) 67 | return None 68 | 69 | def data_came_in(self, connection, s): 70 | while True: 71 | if self.closed: 72 | return 73 | i = self.next_len - self.buffer.tell() 74 | if i > len(s): 75 | self.buffer.write(s) 76 | return 77 | self.buffer.write(s[:i]) 78 | s = s[i:] 79 | m = self.buffer.getvalue() 80 | self.buffer.reset() 81 | self.buffer.truncate() 82 | x = self.next_func(m) 83 | if x is None: 84 | if not self.closed: 85 | self.answer(False) 86 | return 87 | self.next_len, self.next_func = x 88 | 89 | def connection_lost(self, connection): 90 | if not self.closed: 91 | self.closed = True 92 | self.resultfunc(False, self.downloadid, self.peerid, self.ip, self.port) 93 | 94 | def connection_flushed(self, connection): 95 | pass 96 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/Statistics.py: -------------------------------------------------------------------------------- 1 | # Written by Edward Keyes 2 | # see LICENSE.txt for license information 3 | 4 | from threading import Event 5 | try: 6 | True 7 | except: 8 | True = 1 9 | False = 0 10 | 11 | class Statistics_Response: 12 | pass # empty class 13 | 14 | 15 | class Statistics: 16 | def __init__(self, upmeasure, downmeasure, connecter, httpdl, 17 | ratelimiter, rerequest_lastfailed, fdatflag): 18 | self.upmeasure = upmeasure 19 | self.downmeasure = downmeasure 20 | self.connecter = connecter 21 | self.httpdl = httpdl 22 | self.ratelimiter = ratelimiter 23 | self.downloader = connecter.downloader 24 | self.picker = connecter.downloader.picker 25 | self.storage = connecter.downloader.storage 26 | self.torrentmeasure = connecter.downloader.totalmeasure 27 | self.rerequest_lastfailed = rerequest_lastfailed 28 | self.fdatflag = fdatflag 29 | self.fdatactive = False 30 | self.piecescomplete = None 31 | self.placesopen = None 32 | self.storage_totalpieces = len(self.storage.hashes) 33 | 34 | 35 | def set_dirstats(self, files, piece_length): 36 | self.piecescomplete = 0 37 | self.placesopen = 0 38 | self.filelistupdated = Event() 39 | self.filelistupdated.set() 40 | frange = xrange(len(files)) 41 | self.filepieces = [[] for x in frange] 42 | self.filepieces2 = [[] for x in frange] 43 | self.fileamtdone = [0.0 for x in frange] 44 | self.filecomplete = [False for x in frange] 45 | self.fileinplace = [False for x in frange] 46 | start = 0L 47 | for i in frange: 48 | l = files[i][1] 49 | if l == 0: 50 | self.fileamtdone[i] = 1.0 51 | self.filecomplete[i] = True 52 | self.fileinplace[i] = True 53 | else: 54 | fp = self.filepieces[i] 55 | fp2 = self.filepieces2[i] 56 | for piece in range(int(start/piece_length), 57 | int((start+l-1)/piece_length)+1): 58 | fp.append(piece) 59 | fp2.append(piece) 60 | start += l 61 | 62 | 63 | def update(self): 64 | s = Statistics_Response() 65 | s.upTotal = self.upmeasure.get_total() 66 | s.downTotal = self.downmeasure.get_total() 67 | s.last_failed = self.rerequest_lastfailed() 68 | s.external_connection_made = self.connecter.external_connection_made 69 | if s.downTotal > 0: 70 | s.shareRating = float(s.upTotal)/s.downTotal 71 | elif s.upTotal == 0: 72 | s.shareRating = 0.0 73 | else: 74 | s.shareRating = -1.0 75 | s.torrentRate = self.torrentmeasure.get_rate() 76 | s.torrentTotal = self.torrentmeasure.get_total() 77 | s.numSeeds = self.picker.seeds_connected 78 | s.numOldSeeds = self.downloader.num_disconnected_seeds() 79 | s.numPeers = len(self.downloader.downloads)-s.numSeeds 80 | s.numCopies = 0.0 81 | for i in self.picker.crosscount: 82 | if i==0: 83 | s.numCopies+=1 84 | else: 85 | s.numCopies+=1-float(i)/self.picker.numpieces 86 | break 87 | if self.picker.done: 88 | s.numCopies2 = s.numCopies + 1 89 | else: 90 | s.numCopies2 = 0.0 91 | for i in self.picker.crosscount2: 92 | if i==0: 93 | s.numCopies2+=1 94 | else: 95 | s.numCopies2+=1-float(i)/self.picker.numpieces 96 | break 97 | s.discarded = self.downloader.discarded 98 | s.numSeeds += self.httpdl.seedsfound 99 | s.numOldSeeds += self.httpdl.seedsfound 100 | if s.numPeers == 0 or self.picker.numpieces == 0: 101 | s.percentDone = 0.0 102 | else: 103 | s.percentDone = 100.0*(float(self.picker.totalcount)/self.picker.numpieces)/s.numPeers 104 | 105 | s.backgroundallocating = self.storage.bgalloc_active 106 | s.storage_totalpieces = len(self.storage.hashes) 107 | s.storage_active = len(self.storage.stat_active) 108 | s.storage_new = len(self.storage.stat_new) 109 | s.storage_dirty = len(self.storage.dirty) 110 | numdownloaded = self.storage.stat_numdownloaded 111 | s.storage_justdownloaded = numdownloaded 112 | s.storage_numcomplete = self.storage.stat_numfound + numdownloaded 113 | s.storage_numflunked = self.storage.stat_numflunked 114 | s.storage_isendgame = self.downloader.endgamemode 115 | 116 | s.peers_kicked = self.downloader.kicked.items() 117 | s.peers_banned = self.downloader.banned.items() 118 | 119 | try: 120 | s.upRate = int(self.ratelimiter.upload_rate/1000) 121 | assert s.upRate < 5000 122 | except: 123 | s.upRate = 0 124 | s.upSlots = self.ratelimiter.slots 125 | 126 | if self.piecescomplete is None: # not a multi-file torrent 127 | return s 128 | 129 | if self.fdatflag.isSet(): 130 | if not self.fdatactive: 131 | self.fdatactive = True 132 | else: 133 | self.fdatactive = False 134 | 135 | if self.piecescomplete != self.picker.numgot: 136 | for i in xrange(len(self.filecomplete)): 137 | if self.filecomplete[i]: 138 | continue 139 | oldlist = self.filepieces[i] 140 | newlist = [ piece 141 | for piece in oldlist 142 | if not self.storage.have[piece] ] 143 | if len(newlist) != len(oldlist): 144 | self.filepieces[i] = newlist 145 | self.fileamtdone[i] = ( 146 | (len(self.filepieces2[i])-len(newlist)) 147 | /float(len(self.filepieces2[i])) ) 148 | if not newlist: 149 | self.filecomplete[i] = True 150 | self.filelistupdated.set() 151 | 152 | self.piecescomplete = self.picker.numgot 153 | 154 | if ( self.filelistupdated.isSet() 155 | or self.placesopen != len(self.storage.places) ): 156 | for i in xrange(len(self.filecomplete)): 157 | if not self.filecomplete[i] or self.fileinplace[i]: 158 | continue 159 | while self.filepieces2[i]: 160 | piece = self.filepieces2[i][-1] 161 | if self.storage.places[piece] != piece: 162 | break 163 | del self.filepieces2[i][-1] 164 | if not self.filepieces2[i]: 165 | self.fileinplace[i] = True 166 | self.storage.set_file_readonly(i) 167 | self.filelistupdated.set() 168 | 169 | self.placesopen = len(self.storage.places) 170 | 171 | s.fileamtdone = self.fileamtdone 172 | s.filecomplete = self.filecomplete 173 | s.fileinplace = self.fileinplace 174 | s.filelistupdated = self.filelistupdated 175 | 176 | return s 177 | 178 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/StreamCheck.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from cStringIO import StringIO 5 | from binascii import b2a_hex 6 | from socket import error as socketerror 7 | from urllib import quote 8 | from traceback import print_exc 9 | import Connecter 10 | try: 11 | True 12 | except: 13 | True = 1 14 | False = 0 15 | 16 | DEBUG = False 17 | 18 | 19 | protocol_name = 'BitTorrent protocol' 20 | option_pattern = chr(0)*8 21 | 22 | def toint(s): 23 | return long(b2a_hex(s), 16) 24 | 25 | def tobinary(i): 26 | return (chr(i >> 24) + chr((i >> 16) & 0xFF) + 27 | chr((i >> 8) & 0xFF) + chr(i & 0xFF)) 28 | 29 | hexchars = '0123456789ABCDEF' 30 | hexmap = [] 31 | for i in xrange(256): 32 | hexmap.append(hexchars[(i&0xF0)/16]+hexchars[i&0x0F]) 33 | 34 | def tohex(s): 35 | r = [] 36 | for c in s: 37 | r.append(hexmap[ord(c)]) 38 | return ''.join(r) 39 | 40 | def make_readable(s): 41 | if not s: 42 | return '' 43 | if quote(s).find('%') >= 0: 44 | return tohex(s) 45 | return '"'+s+'"' 46 | 47 | def toint(s): 48 | return long(b2a_hex(s), 16) 49 | 50 | # header, reserved, download id, my id, [length, message] 51 | 52 | streamno = 0 53 | 54 | 55 | class StreamCheck: 56 | def __init__(self): 57 | global streamno 58 | self.no = streamno 59 | streamno += 1 60 | self.buffer = StringIO() 61 | self.next_len, self.next_func = 1, self.read_header_len 62 | 63 | def read_header_len(self, s): 64 | if ord(s) != len(protocol_name): 65 | print self.no, 'BAD HEADER LENGTH' 66 | return len(protocol_name), self.read_header 67 | 68 | def read_header(self, s): 69 | if s != protocol_name: 70 | print self.no, 'BAD HEADER' 71 | return 8, self.read_reserved 72 | 73 | def read_reserved(self, s): 74 | return 20, self.read_download_id 75 | 76 | def read_download_id(self, s): 77 | if DEBUG: 78 | print self.no, 'download ID ' + tohex(s) 79 | return 20, self.read_peer_id 80 | 81 | def read_peer_id(self, s): 82 | if DEBUG: 83 | print self.no, 'peer ID' + make_readable(s) 84 | return 4, self.read_len 85 | 86 | def read_len(self, s): 87 | l = toint(s) 88 | if l > 2 ** 23: 89 | print self.no, 'BAD LENGTH: '+str(l)+' ('+s+')' 90 | return l, self.read_message 91 | 92 | def read_message(self, s): 93 | if not s: 94 | return 4, self.read_len 95 | m = s[0] 96 | if ord(m) > 8: 97 | print self.no, 'BAD MESSAGE: '+str(ord(m)) 98 | if m == Connecter.REQUEST: 99 | if len(s) != 13: 100 | print self.no, 'BAD REQUEST SIZE: '+str(len(s)) 101 | return 4, self.read_len 102 | index = toint(s[1:5]) 103 | begin = toint(s[5:9]) 104 | length = toint(s[9:]) 105 | print self.no, 'Request: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length) 106 | elif m == Connecter.CANCEL: 107 | if len(s) != 13: 108 | print self.no, 'BAD CANCEL SIZE: '+str(len(s)) 109 | return 4, self.read_len 110 | index = toint(s[1:5]) 111 | begin = toint(s[5:9]) 112 | length = toint(s[9:]) 113 | print self.no, 'Cancel: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length) 114 | elif m == Connecter.PIECE: 115 | index = toint(s[1:5]) 116 | begin = toint(s[5:9]) 117 | length = len(s)-9 118 | print self.no, 'Piece: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length) 119 | else: 120 | print self.no, 'Message '+str(ord(m))+' (length '+str(len(s))+')' 121 | return 4, self.read_len 122 | 123 | def write(self, s): 124 | while True: 125 | i = self.next_len - self.buffer.tell() 126 | if i > len(s): 127 | self.buffer.write(s) 128 | return 129 | self.buffer.write(s[:i]) 130 | s = s[i:] 131 | m = self.buffer.getvalue() 132 | self.buffer.reset() 133 | self.buffer.truncate() 134 | x = self.next_func(m) 135 | self.next_len, self.next_func = x 136 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/T2T.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from Rerequester import Rerequester 5 | from urllib import quote 6 | from threading import Event 7 | from random import randrange 8 | from string import lower 9 | import sys 10 | import __init__ 11 | try: 12 | True 13 | except: 14 | True = 1 15 | False = 0 16 | 17 | DEBUG = True 18 | 19 | 20 | def excfunc(x): 21 | print x 22 | 23 | class T2TConnection: 24 | def __init__(self, myid, tracker, hash, interval, peers, timeout, 25 | rawserver, disallow, isdisallowed): 26 | self.tracker = tracker 27 | self.interval = interval 28 | self.hash = hash 29 | self.operatinginterval = interval 30 | self.peers = peers 31 | self.rawserver = rawserver 32 | self.disallow = disallow 33 | self.isdisallowed = isdisallowed 34 | self.active = True 35 | self.busy = False 36 | self.errors = 0 37 | self.rejected = 0 38 | self.trackererror = False 39 | self.peerlists = [] 40 | 41 | self.rerequester = Rerequester([[tracker]], interval, 42 | rawserver.add_task, lambda: 0, peers, self.addtolist, 43 | rawserver.add_task, lambda: 1, 0, 0, 0, '', 44 | myid, hash, timeout, self.errorfunc, excfunc, peers, Event(), 45 | lambda: 0, lambda: 0) 46 | 47 | if self.isactive(): 48 | rawserver.add_task(self.refresh, randrange(int(self.interval/10), self.interval)) 49 | # stagger announces 50 | 51 | def isactive(self): 52 | if self.isdisallowed(self.tracker): # whoops! 53 | self.deactivate() 54 | return self.active 55 | 56 | def deactivate(self): 57 | self.active = False 58 | 59 | def refresh(self): 60 | if not self.isactive(): 61 | return 62 | self.lastsuccessful = True 63 | self.newpeerdata = [] 64 | if DEBUG: 65 | print 'contacting %s for info_hash=%s' % (self.tracker, quote(self.hash)) 66 | self.rerequester.snoop(self.peers, self.callback) 67 | 68 | def callback(self): 69 | self.busy = False 70 | if self.lastsuccessful: 71 | self.errors = 0 72 | self.rejected = 0 73 | if self.rerequester.announce_interval > (3*self.interval): 74 | # I think I'm stripping from a regular tracker; boost the number of peers requested 75 | self.peers = int(self.peers * (self.rerequester.announce_interval / self.interval)) 76 | self.operatinginterval = self.rerequester.announce_interval 77 | if DEBUG: 78 | print ("%s with info_hash=%s returned %d peers" % 79 | (self.tracker, quote(self.hash), len(self.newpeerdata))) 80 | self.peerlists.append(self.newpeerdata) 81 | self.peerlists = self.peerlists[-10:] # keep up to the last 10 announces 82 | if self.isactive(): 83 | self.rawserver.add_task(self.refresh, self.operatinginterval) 84 | 85 | def addtolist(self, peers): 86 | for peer in peers: 87 | self.newpeerdata.append((peer[1],peer[0][0],peer[0][1])) 88 | 89 | def errorfunc(self, r): 90 | self.lastsuccessful = False 91 | if DEBUG: 92 | print "%s with info_hash=%s gives error: '%s'" % (self.tracker, quote(self.hash), r) 93 | if r == self.rerequester.rejectedmessage + 'disallowed': # whoops! 94 | if DEBUG: 95 | print ' -- disallowed - deactivating' 96 | self.deactivate() 97 | self.disallow(self.tracker) # signal other torrents on this tracker 98 | return 99 | if lower(r[:8]) == 'rejected': # tracker rejected this particular torrent 100 | self.rejected += 1 101 | if self.rejected == 3: # rejected 3 times 102 | if DEBUG: 103 | print ' -- rejected 3 times - deactivating' 104 | self.deactivate() 105 | return 106 | self.errors += 1 107 | if self.errors >= 3: # three or more errors in a row 108 | self.operatinginterval += self.interval # lengthen the interval 109 | if DEBUG: 110 | print ' -- lengthening interval to '+str(self.operatinginterval)+' seconds' 111 | 112 | def harvest(self): 113 | x = [] 114 | for list in self.peerlists: 115 | x += list 116 | self.peerlists = [] 117 | return x 118 | 119 | 120 | class T2TList: 121 | def __init__(self, enabled, trackerid, interval, maxpeers, timeout, rawserver): 122 | self.enabled = enabled 123 | self.trackerid = trackerid 124 | self.interval = interval 125 | self.maxpeers = maxpeers 126 | self.timeout = timeout 127 | self.rawserver = rawserver 128 | self.list = {} 129 | self.torrents = {} 130 | self.disallowed = {} 131 | self.oldtorrents = [] 132 | 133 | def parse(self, allowed_list): 134 | if not self.enabled: 135 | return 136 | 137 | # step 1: Create a new list with all tracker/torrent combinations in allowed_dir 138 | newlist = {} 139 | for hash, data in allowed_list.items(): 140 | if data.has_key('announce-list'): 141 | for tier in data['announce-list']: 142 | for tracker in tier: 143 | self.disallowed.setdefault(tracker, False) 144 | newlist.setdefault(tracker, {}) 145 | newlist[tracker][hash] = None # placeholder 146 | 147 | # step 2: Go through and copy old data to the new list. 148 | # if the new list has no place for it, then it's old, so deactivate it 149 | for tracker, hashdata in self.list.items(): 150 | for hash, t2t in hashdata.items(): 151 | if not newlist.has_key(tracker) or not newlist[tracker].has_key(hash): 152 | t2t.deactivate() # this connection is no longer current 153 | self.oldtorrents += [t2t] 154 | # keep it referenced in case a thread comes along and tries to access. 155 | else: 156 | newlist[tracker][hash] = t2t 157 | if not newlist.has_key(tracker): 158 | self.disallowed[tracker] = False # reset when no torrents on it left 159 | 160 | self.list = newlist 161 | newtorrents = {} 162 | 163 | # step 3: If there are any entries that haven't been initialized yet, do so. 164 | # At the same time, copy all entries onto the by-torrent list. 165 | for tracker, hashdata in newlist.items(): 166 | for hash, t2t in hashdata.items(): 167 | if t2t is None: 168 | hashdata[hash] = T2TConnection(self.trackerid, tracker, hash, 169 | self.interval, self.maxpeers, self.timeout, 170 | self.rawserver, self._disallow, self._isdisallowed) 171 | newtorrents.setdefault(hash,[]) 172 | newtorrents[hash] += [hashdata[hash]] 173 | 174 | self.torrents = newtorrents 175 | 176 | # structures: 177 | # list = {tracker: {hash: T2TConnection, ...}, ...} 178 | # torrents = {hash: [T2TConnection, ...]} 179 | # disallowed = {tracker: flag, ...} 180 | # oldtorrents = [T2TConnection, ...] 181 | 182 | def _disallow(self,tracker): 183 | self.disallowed[tracker] = True 184 | 185 | def _isdisallowed(self,tracker): 186 | return self.disallowed[tracker] 187 | 188 | def harvest(self,hash): 189 | harvest = [] 190 | if self.enabled: 191 | for t2t in self.torrents[hash]: 192 | harvest += t2t.harvest() 193 | return harvest 194 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/Uploader.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from BitTornado.CurrentRateMeasure import Measure 5 | 6 | try: 7 | True 8 | except: 9 | True = 1 10 | False = 0 11 | 12 | class Upload: 13 | def __init__(self, connection, ratelimiter, totalup, choker, storage, 14 | picker, config): 15 | self.connection = connection 16 | self.ratelimiter = ratelimiter 17 | self.totalup = totalup 18 | self.choker = choker 19 | self.storage = storage 20 | self.picker = picker 21 | self.config = config 22 | self.max_slice_length = config['max_slice_length'] 23 | self.choked = True 24 | self.cleared = True 25 | self.interested = False 26 | self.super_seeding = False 27 | self.buffer = [] 28 | self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge']) 29 | self.was_ever_interested = False 30 | if storage.get_amount_left() == 0: 31 | if choker.super_seed: 32 | self.super_seeding = True # flag, and don't send bitfield 33 | self.seed_have_list = [] # set from piecepicker 34 | self.skipped_count = 0 35 | else: 36 | if config['breakup_seed_bitfield']: 37 | bitfield, msgs = storage.get_have_list_cloaked() 38 | connection.send_bitfield(bitfield) 39 | for have in msgs: 40 | connection.send_have(have) 41 | else: 42 | connection.send_bitfield(storage.get_have_list()) 43 | else: 44 | if storage.do_I_have_anything(): 45 | connection.send_bitfield(storage.get_have_list()) 46 | self.piecedl = None 47 | self.piecebuf = None 48 | 49 | def got_not_interested(self): 50 | if self.interested: 51 | self.interested = False 52 | del self.buffer[:] 53 | self.piecedl = None 54 | if self.piecebuf: 55 | self.piecebuf.release() 56 | self.piecebuf = None 57 | self.choker.not_interested(self.connection) 58 | 59 | def got_interested(self): 60 | if not self.interested: 61 | self.interested = True 62 | self.was_ever_interested = True 63 | self.choker.interested(self.connection) 64 | 65 | def get_upload_chunk(self): 66 | if self.choked or not self.buffer: 67 | return None 68 | index, begin, length = self.buffer.pop(0) 69 | if self.config['buffer_reads']: 70 | if index != self.piecedl: 71 | if self.piecebuf: 72 | self.piecebuf.release() 73 | self.piecedl = index 74 | self.piecebuf = self.storage.get_piece(index, 0, -1) 75 | try: 76 | piece = self.piecebuf[begin:begin+length] 77 | assert len(piece) == length 78 | except: # fails if storage.get_piece returns None or if out of range 79 | self.connection.close() 80 | return None 81 | else: 82 | if self.piecebuf: 83 | self.piecebuf.release() 84 | self.piecedl = None 85 | piece = self.storage.get_piece(index, begin, length) 86 | if piece is None: 87 | self.connection.close() 88 | return None 89 | self.measure.update_rate(len(piece)) 90 | self.totalup.update_rate(len(piece)) 91 | return (index, begin, piece) 92 | 93 | def got_request(self, index, begin, length): 94 | if ( (self.super_seeding and not index in self.seed_have_list) 95 | or not self.interested or length > self.max_slice_length ): 96 | self.connection.close() 97 | return 98 | if not self.cleared: 99 | self.buffer.append((index, begin, length)) 100 | if not self.choked and self.connection.next_upload is None: 101 | self.ratelimiter.queue(self.connection) 102 | 103 | 104 | def got_cancel(self, index, begin, length): 105 | try: 106 | self.buffer.remove((index, begin, length)) 107 | except ValueError: 108 | pass 109 | 110 | def choke(self): 111 | if not self.choked: 112 | self.choked = True 113 | self.connection.send_choke() 114 | self.piecedl = None 115 | if self.piecebuf: 116 | self.piecebuf.release() 117 | self.piecebuf = None 118 | 119 | def choke_sent(self): 120 | del self.buffer[:] 121 | self.cleared = True 122 | 123 | def unchoke(self): 124 | if self.choked: 125 | self.choked = False 126 | self.cleared = False 127 | self.connection.send_unchoke() 128 | 129 | def disconnected(self): 130 | if self.piecebuf: 131 | self.piecebuf.release() 132 | self.piecebuf = None 133 | 134 | def is_choked(self): 135 | return self.choked 136 | 137 | def is_interested(self): 138 | return self.interested 139 | 140 | def has_queries(self): 141 | return not self.choked and len(self.buffer) > 0 142 | 143 | def get_rate(self): 144 | return self.measure.get_rate() 145 | 146 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/__init__.py: -------------------------------------------------------------------------------- 1 | # placeholder -------------------------------------------------------------------------------- /src/BitTornado/BT1/btformats.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from types import StringType, LongType, IntType, ListType, DictType 5 | from re import compile 6 | 7 | reg = compile(r'^[^/\\.~][^/\\]*$') 8 | 9 | ints = (LongType, IntType) 10 | 11 | def check_info(info): 12 | if type(info) != DictType: 13 | raise ValueError, 'bad metainfo - not a dictionary' 14 | pieces = info.get('pieces') 15 | if type(pieces) != StringType or len(pieces) % 20 != 0: 16 | raise ValueError, 'bad metainfo - bad pieces key' 17 | piecelength = info.get('piece length') 18 | if type(piecelength) not in ints or piecelength <= 0: 19 | raise ValueError, 'bad metainfo - illegal piece length' 20 | name = info.get('name') 21 | if type(name) != StringType: 22 | raise ValueError, 'bad metainfo - bad name' 23 | if not reg.match(name): 24 | raise ValueError, 'name %s disallowed for security reasons' % name 25 | if info.has_key('files') == info.has_key('length'): 26 | raise ValueError, 'single/multiple file mix' 27 | if info.has_key('length'): 28 | length = info.get('length') 29 | if type(length) not in ints or length < 0: 30 | raise ValueError, 'bad metainfo - bad length' 31 | else: 32 | files = info.get('files') 33 | if type(files) != ListType: 34 | raise ValueError 35 | for f in files: 36 | if type(f) != DictType: 37 | raise ValueError, 'bad metainfo - bad file value' 38 | length = f.get('length') 39 | if type(length) not in ints or length < 0: 40 | raise ValueError, 'bad metainfo - bad length' 41 | path = f.get('path') 42 | if type(path) != ListType or path == []: 43 | raise ValueError, 'bad metainfo - bad path' 44 | for p in path: 45 | if type(p) != StringType: 46 | raise ValueError, 'bad metainfo - bad path dir' 47 | if not reg.match(p): 48 | raise ValueError, 'path %s disallowed for security reasons' % p 49 | for i in xrange(len(files)): 50 | for j in xrange(i): 51 | if files[i]['path'] == files[j]['path']: 52 | raise ValueError, 'bad metainfo - duplicate path' 53 | 54 | def check_message(message): 55 | if type(message) != DictType: 56 | raise ValueError 57 | check_info(message.get('info')) 58 | if type(message.get('announce')) != StringType: 59 | raise ValueError 60 | 61 | def check_peers(message): 62 | if type(message) != DictType: 63 | raise ValueError 64 | if message.has_key('failure reason'): 65 | if type(message['failure reason']) != StringType: 66 | raise ValueError 67 | return 68 | peers = message.get('peers') 69 | if type(peers) == ListType: 70 | for p in peers: 71 | if type(p) != DictType: 72 | raise ValueError 73 | if type(p.get('ip')) != StringType: 74 | raise ValueError 75 | port = p.get('port') 76 | if type(port) not in ints or p <= 0: 77 | raise ValueError 78 | if p.has_key('peer id'): 79 | id = p['peer id'] 80 | if type(id) != StringType or len(id) != 20: 81 | raise ValueError 82 | elif type(peers) != StringType or len(peers) % 6 != 0: 83 | raise ValueError 84 | interval = message.get('interval', 1) 85 | if type(interval) not in ints or interval <= 0: 86 | raise ValueError 87 | minint = message.get('min interval', 1) 88 | if type(minint) not in ints or minint <= 0: 89 | raise ValueError 90 | if type(message.get('tracker id', '')) != StringType: 91 | raise ValueError 92 | npeers = message.get('num peers', 0) 93 | if type(npeers) not in ints or npeers < 0: 94 | raise ValueError 95 | dpeers = message.get('done peers', 0) 96 | if type(dpeers) not in ints or dpeers < 0: 97 | raise ValueError 98 | last = message.get('last', 0) 99 | if type(last) not in ints or last < 0: 100 | raise ValueError 101 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/fakeopen.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from string import join 5 | 6 | class FakeHandle: 7 | def __init__(self, name, fakeopen): 8 | self.name = name 9 | self.fakeopen = fakeopen 10 | self.pos = 0 11 | 12 | def flush(self): 13 | pass 14 | 15 | def close(self): 16 | pass 17 | 18 | def seek(self, pos): 19 | self.pos = pos 20 | 21 | def read(self, amount = None): 22 | old = self.pos 23 | f = self.fakeopen.files[self.name] 24 | if self.pos >= len(f): 25 | return '' 26 | if amount is None: 27 | self.pos = len(f) 28 | return join(f[old:], '') 29 | else: 30 | self.pos = min(len(f), old + amount) 31 | return join(f[old:self.pos], '') 32 | 33 | def write(self, s): 34 | f = self.fakeopen.files[self.name] 35 | while len(f) < self.pos: 36 | f.append(chr(0)) 37 | self.fakeopen.files[self.name][self.pos : self.pos + len(s)] = list(s) 38 | self.pos += len(s) 39 | 40 | class FakeOpen: 41 | def __init__(self, initial = {}): 42 | self.files = {} 43 | for key, value in initial.items(): 44 | self.files[key] = list(value) 45 | 46 | def open(self, filename, mode): 47 | """currently treats everything as rw - doesn't support append""" 48 | self.files.setdefault(filename, []) 49 | return FakeHandle(filename, self) 50 | 51 | def exists(self, file): 52 | return self.files.has_key(file) 53 | 54 | def getsize(self, file): 55 | return len(self.files[file]) 56 | 57 | def test_normal(): 58 | f = FakeOpen({'f1': 'abcde'}) 59 | assert f.exists('f1') 60 | assert not f.exists('f2') 61 | assert f.getsize('f1') == 5 62 | h = f.open('f1', 'rw') 63 | assert h.read(3) == 'abc' 64 | assert h.read(1) == 'd' 65 | assert h.read() == 'e' 66 | assert h.read(2) == '' 67 | h.write('fpq') 68 | h.seek(4) 69 | assert h.read(2) == 'ef' 70 | h.write('ghij') 71 | h.seek(0) 72 | assert h.read() == 'abcdefghij' 73 | h.seek(2) 74 | h.write('p') 75 | h.write('q') 76 | assert h.read(1) == 'e' 77 | h.seek(1) 78 | assert h.read(5) == 'bpqef' 79 | 80 | h2 = f.open('f2', 'rw') 81 | assert h2.read() == '' 82 | h2.write('mnop') 83 | h2.seek(1) 84 | assert h2.read() == 'nop' 85 | 86 | assert f.exists('f1') 87 | assert f.exists('f2') 88 | assert f.getsize('f1') == 10 89 | assert f.getsize('f2') == 4 90 | -------------------------------------------------------------------------------- /src/BitTornado/BT1/makemetafile.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # multitracker extensions by John Hoffman 3 | # see LICENSE.txt for license information 4 | 5 | from os.path import getsize, split, join, abspath, isdir 6 | from os import listdir 7 | from sha import sha 8 | from copy import copy 9 | from string import strip 10 | from BitTornado.bencode import bencode 11 | from btformats import check_info 12 | from threading import Event 13 | from time import time 14 | from traceback import print_exc 15 | try: 16 | from sys import getfilesystemencoding 17 | ENCODING = getfilesystemencoding() 18 | except: 19 | from sys import getdefaultencoding 20 | ENCODING = getdefaultencoding() 21 | 22 | defaults = [ 23 | ('announce_list', '', 24 | 'a list of announce URLs - explained below'), 25 | ('httpseeds', '', 26 | 'a list of http seed URLs - explained below'), 27 | ('piece_size_pow2', 0, 28 | "which power of 2 to set the piece size to (0 = automatic)"), 29 | ('comment', '', 30 | "optional human-readable comment to put in .torrent"), 31 | ('filesystem_encoding', '', 32 | "optional specification for filesystem encoding " + 33 | "(set automatically in recent Python versions)"), 34 | ('target', '', 35 | "optional target file for the torrent") 36 | ] 37 | 38 | default_piece_len_exp = 18 39 | 40 | ignore = ['core', 'CVS'] 41 | 42 | def print_announcelist_details(): 43 | print (' announce_list = optional list of redundant/backup tracker URLs, in the format:') 44 | print (' url[,url...][|url[,url...]...]') 45 | print (' where URLs separated by commas are all tried first') 46 | print (' before the next group of URLs separated by the pipe is checked.') 47 | print (" If none is given, it is assumed you don't want one in the metafile.") 48 | print (' If announce_list is given, clients which support it') 49 | print (' will ignore the value.') 50 | print (' Examples:') 51 | print (' http://tracker1.com|http://tracker2.com|http://tracker3.com') 52 | print (' (tries trackers 1-3 in order)') 53 | print (' http://tracker1.com,http://tracker2.com,http://tracker3.com') 54 | print (' (tries trackers 1-3 in a randomly selected order)') 55 | print (' http://tracker1.com|http://backup1.com,http://backup2.com') 56 | print (' (tries tracker 1 first, then tries between the 2 backups randomly)') 57 | print ('') 58 | print (' httpseeds = optional list of http-seed URLs, in the format:') 59 | print (' url[|url...]') 60 | 61 | def make_meta_file(file, url, params = {}, flag = Event(), 62 | progress = lambda x: None, progress_percent = 1): 63 | if params.has_key('piece_size_pow2'): 64 | piece_len_exp = params['piece_size_pow2'] 65 | else: 66 | piece_len_exp = default_piece_len_exp 67 | if params.has_key('target') and params['target'] != '': 68 | f = params['target'] 69 | else: 70 | a, b = split(file) 71 | if b == '': 72 | f = a + '.torrent' 73 | else: 74 | f = join(a, b + '.torrent') 75 | 76 | if piece_len_exp == 0: # automatic 77 | size = calcsize(file) 78 | if size > 8L*1024*1024*1024: # > 8 gig = 79 | piece_len_exp = 21 # 2 meg pieces 80 | elif size > 2*1024*1024*1024: # > 2 gig = 81 | piece_len_exp = 20 # 1 meg pieces 82 | elif size > 512*1024*1024: # > 512M = 83 | piece_len_exp = 19 # 512K pieces 84 | elif size > 64*1024*1024: # > 64M = 85 | piece_len_exp = 18 # 256K pieces 86 | elif size > 16*1024*1024: # > 16M = 87 | piece_len_exp = 17 # 128K pieces 88 | elif size > 4*1024*1024: # > 4M = 89 | piece_len_exp = 16 # 64K pieces 90 | else: # < 4M = 91 | piece_len_exp = 15 # 32K pieces 92 | piece_length = 2 ** piece_len_exp 93 | 94 | encoding = None 95 | if params.has_key('filesystem_encoding'): 96 | encoding = params['filesystem_encoding'] 97 | if not encoding: 98 | encoding = ENCODING 99 | if not encoding: 100 | encoding = 'ascii' 101 | 102 | info = makeinfo(file, piece_length, encoding, flag, progress, progress_percent) 103 | if flag.isSet(): 104 | return 105 | check_info(info) 106 | h = open(f, 'wb') 107 | data = {'info': info, 'announce': strip(url), 'creation date': long(time())} 108 | 109 | if params.has_key('comment') and params['comment']: 110 | data['comment'] = params['comment'] 111 | 112 | if params.has_key('real_announce_list'): # shortcut for progs calling in from outside 113 | data['announce-list'] = params['real_announce_list'] 114 | elif params.has_key('announce_list') and params['announce_list']: 115 | l = [] 116 | for tier in params['announce_list'].split('|'): 117 | l.append(tier.split(',')) 118 | data['announce-list'] = l 119 | 120 | if params.has_key('real_httpseeds'): # shortcut for progs calling in from outside 121 | data['httpseeds'] = params['real_httpseeds'] 122 | elif params.has_key('httpseeds') and params['httpseeds']: 123 | data['httpseeds'] = params['httpseeds'].split('|') 124 | 125 | h.write(bencode(data)) 126 | h.close() 127 | 128 | def calcsize(file): 129 | if not isdir(file): 130 | return getsize(file) 131 | total = 0L 132 | for s in subfiles(abspath(file)): 133 | total += getsize(s[1]) 134 | return total 135 | 136 | 137 | def uniconvertl(l, e): 138 | r = [] 139 | try: 140 | for s in l: 141 | r.append(uniconvert(s, e)) 142 | except UnicodeError: 143 | raise UnicodeError('bad filename: '+join(l)) 144 | return r 145 | 146 | def uniconvert(s, e): 147 | try: 148 | s = unicode(s,e) 149 | except UnicodeError: 150 | raise UnicodeError('bad filename: '+s) 151 | return s.encode('utf-8') 152 | 153 | def makeinfo(file, piece_length, encoding, flag, progress, progress_percent=1): 154 | file = abspath(file) 155 | if isdir(file): 156 | subs = subfiles(file) 157 | subs.sort() 158 | pieces = [] 159 | sh = sha() 160 | done = 0L 161 | fs = [] 162 | totalsize = 0.0 163 | totalhashed = 0L 164 | for p, f in subs: 165 | totalsize += getsize(f) 166 | 167 | for p, f in subs: 168 | pos = 0L 169 | size = getsize(f) 170 | fs.append({'length': size, 'path': uniconvertl(p, encoding)}) 171 | h = open(f, 'rb') 172 | while pos < size: 173 | a = min(size - pos, piece_length - done) 174 | sh.update(h.read(a)) 175 | if flag.isSet(): 176 | return 177 | done += a 178 | pos += a 179 | totalhashed += a 180 | 181 | if done == piece_length: 182 | pieces.append(sh.digest()) 183 | done = 0 184 | sh = sha() 185 | if progress_percent: 186 | progress(totalhashed / totalsize) 187 | else: 188 | progress(a) 189 | h.close() 190 | if done > 0: 191 | pieces.append(sh.digest()) 192 | return {'pieces': ''.join(pieces), 193 | 'piece length': piece_length, 'files': fs, 194 | 'name': uniconvert(split(file)[1], encoding) } 195 | else: 196 | size = getsize(file) 197 | pieces = [] 198 | p = 0L 199 | h = open(file, 'rb') 200 | while p < size: 201 | x = h.read(min(piece_length, size - p)) 202 | if flag.isSet(): 203 | return 204 | pieces.append(sha(x).digest()) 205 | p += piece_length 206 | if p > size: 207 | p = size 208 | if progress_percent: 209 | progress(float(p) / size) 210 | else: 211 | progress(min(piece_length, size - p)) 212 | h.close() 213 | return {'pieces': ''.join(pieces), 214 | 'piece length': piece_length, 'length': size, 215 | 'name': uniconvert(split(file)[1], encoding) } 216 | 217 | def subfiles(d): 218 | r = [] 219 | stack = [([], d)] 220 | while len(stack) > 0: 221 | p, n = stack.pop() 222 | if isdir(n): 223 | for s in listdir(n): 224 | if s not in ignore and s[:1] != '.': 225 | stack.append((copy(p) + [s], join(n, s))) 226 | else: 227 | r.append((p, n)) 228 | return r 229 | 230 | 231 | def completedir(dir, url, params = {}, flag = Event(), 232 | vc = lambda x: None, fc = lambda x: None): 233 | files = listdir(dir) 234 | files.sort() 235 | ext = '.torrent' 236 | if params.has_key('target'): 237 | target = params['target'] 238 | else: 239 | target = '' 240 | 241 | togen = [] 242 | for f in files: 243 | if f[-len(ext):] != ext and (f + ext) not in files: 244 | togen.append(join(dir, f)) 245 | 246 | total = 0 247 | for i in togen: 248 | total += calcsize(i) 249 | 250 | subtotal = [0] 251 | def callback(x, subtotal = subtotal, total = total, vc = vc): 252 | subtotal[0] += x 253 | vc(float(subtotal[0]) / total) 254 | for i in togen: 255 | fc(i) 256 | try: 257 | t = split(i)[-1] 258 | if t not in ignore and t[0] != '.': 259 | if target != '': 260 | params['target'] = join(target,t+ext) 261 | make_meta_file(i, url, params, flag, progress = callback, progress_percent = 0) 262 | except ValueError: 263 | print_exc() 264 | -------------------------------------------------------------------------------- /src/BitTornado/ConnChoice.py: -------------------------------------------------------------------------------- 1 | connChoices=( 2 | {'name':'automatic', 3 | 'rate':{'min':0, 'max':5000, 'def': 0}, 4 | 'conn':{'min':0, 'max':100, 'def': 0}, 5 | 'automatic':1}, 6 | {'name':'unlimited', 7 | 'rate':{'min':0, 'max':5000, 'def': 0, 'div': 50}, 8 | 'conn':{'min':4, 'max':100, 'def': 4}}, 9 | {'name':'dialup/isdn', 10 | 'rate':{'min':3, 'max': 8, 'def': 5}, 11 | 'conn':{'min':2, 'max': 3, 'def': 2}, 12 | 'initiate': 12}, 13 | {'name':'dsl/cable slow', 14 | 'rate':{'min':10, 'max': 48, 'def': 13}, 15 | 'conn':{'min':4, 'max': 20, 'def': 4}}, 16 | {'name':'dsl/cable fast', 17 | 'rate':{'min':20, 'max': 100, 'def': 40}, 18 | 'conn':{'min':4, 'max': 30, 'def': 6}}, 19 | {'name':'T1', 20 | 'rate':{'min':100, 'max': 300, 'def':150}, 21 | 'conn':{'min':4, 'max': 40, 'def':10}}, 22 | {'name':'T3+', 23 | 'rate':{'min':400, 'max':2000, 'def':500}, 24 | 'conn':{'min':4, 'max':100, 'def':20}}, 25 | {'name':'seeder', 26 | 'rate':{'min':0, 'max':5000, 'def':0, 'div': 50}, 27 | 'conn':{'min':1, 'max':100, 'def':1}}, 28 | {'name':'SUPER-SEED', 'super-seed':1} 29 | ) 30 | 31 | connChoiceList = map(lambda x:x['name'], connChoices) 32 | -------------------------------------------------------------------------------- /src/BitTornado/CreateIcons.py: -------------------------------------------------------------------------------- 1 | # Generated from bt_MakeCreateIcons - 05/10/04 22:15:33 2 | # T-0.3.0 (BitTornado) 3 | 4 | from binascii import a2b_base64 5 | from zlib import decompress 6 | from os.path import join 7 | 8 | icons = { 9 | "icon_bt.ico": 10 | "eJyt1K+OFEEQx/FaQTh5GDRZhSQpiUHwCrxCBYXFrjyJLXeXEARPsZqUPMm+" + 11 | "AlmP+PGtngoLDji69zMz2zt/qqtr1mxHv7621d4+MnvK/jl66Bl2drV+e7Wz" + 12 | "S/v12A7rY4fDtuvOwfF4tOPXo52/fLLz+WwpWd6nqRXHKXux39sTrtnjNd7g" + 13 | "PW7wGSd860f880kffjvJ2QYS1Zcw4AjcoaA5yRFIFDQXOgKJguZmjkCioB4T" + 14 | "Y2CqxpTXA7sHEgVNEC8RSBQ0gfk7xtknCupgk3EEEgXlNgFHIFHQTMoRSBQ0" + 15 | "E+1ouicKmsk7AomCJiGOQKKgSZIjkChoEucIJAqaZDoCiYImwb4iydULmqQ7" + 16 | "AomC1kLcEQ/jSBQ0i+MIJAqaBXMEElVdi9siOgKJgmZhfWWlVjTddXW/FtsR" + 17 | "SBQ0BeAIJAqaonAEEgVNoTgCiYKmeByBREHaqiVWRtSRrAJzBBIFTdE5AomC" + 18 | "phBPpxPP57dVkDfrTl063nUVnWe383fZx9tb3uN+o7U+BLDtuvcQm8d/27Y/" + 19 | "jO3o5/ay+YPv/+f6y30e1OyB7QcsGWFj", 20 | "icon_done.ico": 21 | "eJyt1K2OVEEQhuEaQbJyMWgyCklSEoPgFvYWKigsduRKbLndhCC4itGk5Erm" + 22 | "Fsh4xMdbfSoMOGDpnuf89Jyf6uqaMdvRr69ttbdPzJ6xf4Eeeo6dXa3vXu/s" + 23 | "0n49tsP62OGw7bpzcDwe7fj1aOcvn+x8PltKlg9pasVxyl7u9/aUe/Z4gxu8" + 24 | "xy0+44Rv/Yp/vujDbxc520Ci+hYGHIF7FDQXOQKJguZGRyBR0DzMEUgU1GNi" + 25 | "DEzVmPJ6YfdAoqAJ4hUCiYImMH/HOPtEQR1sMo5AoqDcJuAIJAqaSTkCiYJm" + 26 | "oh1N90RBM3lHIFHQJMQRSBQ0SXIEEgVN4hyBREGTTEcgUdAk2FckuXpBk3RH" + 27 | "IFHQWoh74mEciYJmcRyBREGzYI5AoqprcVtERyBR0Cysr6zUiqa7rh7WYjsC" + 28 | "iYKmAByBREFTFI5AoqApFEcgUdAUjyOQKEhbtcTKiDqSVWCOQKKgKTpHIFHQ" + 29 | "FOLpdOL9fLcK8nY9qUvHu66i8+x2/i77eHfH77h/0VofAth23Xuoz/+2bX8Y" + 30 | "29HP7WXzB+f/5/7Lcx7V7JHtB9dPG3I=", 31 | "black.ico": 32 | "eJzt1zsOgkAYReFLLCztjJ2UlpLY485kOS7DpbgESwqTcQZDghjxZwAfyfl0" + 33 | "LIieGzUWSom/pan840rHnbSUtPHHX9Je9+tAh2ybNe8TZZ/vk8ajJ4zl6JVJ" + 34 | "+xFx+0R03Djx1/2B8bcT9L/bt0+4Wq+4se8e/VTfMvGqb4n3nYiIGz+lvt9s" + 35 | "9EpE2T4xJN4xNFYWU6t+JWXuXDFzTom7SodSyi/S+iwtwjlJ80KaNY/C34rW" + 36 | "aT8nvK5uhF7ohn7Yqfb87kffLAAAAAAAAAAAAAAAAAAAGMUNy7dADg==", 37 | "blue.ico": 38 | "eJzt10EOwUAYhuGv6cLSTux06QD2dTM9jmM4iiNYdiEZ81cIFTWddtDkfbQW" + 39 | "De8XogtS5h9FIf+81H4jLSSt/ekvaavrdaCDez4SZV+PpPHoicBy9ErSfkQ8" + 40 | "fCI6Hjgx6f7A+McJ+r/t95i46xMP7bf8Uz9o4k0/XMT338voP5shK0MkjXcM" + 41 | "YSqam6Qunatyf7Nk7iztaqk8SaujNLfzIM0qKX88ZX8rWmf7Nfa+W8N61rW+" + 42 | "7TR7fverHxYAAAAAAAAAAAAAAAAAAIziApVZ444=", 43 | "green.ico": 44 | "eJzt1zEOgjAAheFHGBzdjJuMHsAdbybxNB7Do3gERwaT2mJIBCOWlqok/yc4" + 45 | "EP1fNDIoZfZRFLLPa5120krS1p72kvZ6XAeGHLtHouzrkTQePOFZDl5J2g+I" + 46 | "+08Exz0nZt2PjH+coP/bvveEaY2L+/VN13/1PSbe9v0FfP+jTP6ziVmJkTQ+" + 47 | "MISZaO6SujSmyu3dkpmbdKil8iptLtLSnWdpUUn58yn3t6J39l/j3tc2XM91" + 48 | "Xd/tNHt296sfFgAAAAAAAAAAAAAAAAAATOIOVLEoDg==", 49 | "red.ico": 50 | "eJzt10EOwUAYhuGv6cLSTux06QD2dTOO4xiO4giWXUjG/BVCRTuddtDkfbQW" + 51 | "De8XogtS5h9FIf+81GEjLSSt/ekvaavbdaCVez0SZd+PpPHoicBy9ErSfkQ8" + 52 | "fCI6Hjgx6f7AeOcE/d/2QyceesaD+g1/1u+e+NwPF/H99zL6z2bIyhBJ4y1D" + 53 | "mIb6LqlK5/a5v1syd5F2lVSepdVJmtt5lGZ7KX8+ZX8rGmfzNfa+e8N61rW+" + 54 | "7dR7fverHxYAAAAAAAAAAAAAAAAAAIziCpgs444=", 55 | "white.ico": 56 | "eJzt1zsOgkAYReFLKCztjJ2ULsAed6bLcRnuwYTaJVhSmIwzGBLEiD8D+EjO" + 57 | "p2NB9NyosVBK/C3L5B+XOmykhaS1P/6StrpfBzoUp6J5nyj7fJ80Hj1hLEev" + 58 | "TNqPiNsnouPGib/uD4y/naD/3b59wtV6xY199+in+paJV31LvO9ERNz4KfX9" + 59 | "ZqNXIsr2iSHxjqGxspha9Sspc+f2qXNK3FXalVJ+kVZnaR7OUZrtpbR5FP5W" + 60 | "tE77OeF1dSP0Qjf0w06153c/+mYBAAAAAAAAAAAAAAAAAMAobj//I7s=", 61 | "yellow.ico": 62 | "eJzt1zsOgkAYReFLKCztjJ2ULsAedybLcRkuxSVYUpiM82M0ihGHgVFJzidY" + 63 | "ED03vgqlzN+KQv5+qf1GWkha+9Nf0lbX60AX556ORNnXI2k8eiKwHL2StB8R" + 64 | "D5+IjgdOTLo/MP5xgv5v+8ETd/3iYf2W/+oHTLzth4t4/3sZ/WszZGWIpPGO" + 65 | "IUxE8yupS+eq3H9smTtLu1oqT9LqKM3tPEizSsofT9nfitbZfow979awnnWt" + 66 | "bzvNnt/96osFAAAAAAAAAAAAAAAAAACjuABhjmIs", 67 | "black1.ico": 68 | "eJzt0zEOgkAUANEhFpZSGTstTWzkVt5Cj8ZROAIHMNGPWBCFDYgxMZkHn2Iz" + 69 | "G5YCyOLKc+K54XSANbCPiSV2tOt/qjgW3XtSnN41FH/Qv29Jx/P7qefp7W8P" + 70 | "4z85HQ+9JRG/7BpTft31DPUKyiVcFjEZzQ/TTtdzrWnKmCr6evv780qSJEmS" + 71 | "JEmSJEmSJEmSpPnunVFDcA==", 72 | "green1.ico": 73 | "eJzt0zEKwkAQRuEXLCyTSuy0DHgxb6F4shzFI+QAgpkkFoombowIwvt2Z4vh" + 74 | "X5gtFrJYRUGca/Y7WAFlVLTY0vf/1elxTwqP3xoKf5B/vjIenp+fOs+r/LWT" + 75 | "/uQ34aGpUqQnv+1ygDqHagnHRVRG+2H6unfrtZkq6hz5evP7eSVJkiRJkiRJ" + 76 | "kiRJkiRJ0nwNoWQ+AA==", 77 | "yellow1.ico": 78 | "eJzt0zEKwkAQRuEXLCxNJXZaCl7MW8Sj5SgeIQcQ4oS1UDTJxkhAeN/ubDH8" + 79 | "C7PFQhGrLIlzx/kEW+AYFS0OpP6/atuXPSk8fKsv/EX+/cpweH5+6jyf8kn+" + 80 | "k0fCfVPlyE/+2q2CZgP1Gi6rqILuw6R69uh1mTrqGvlmv/y8kiRJkiRJkiRJ" + 81 | "kiRJkiRpvjsp9L8k", 82 | "alloc.gif": 83 | "eJxz93SzsEw0YRBh+M4ABi0MS3ue///P8H8UjIIRBhR/sjAyMDAx6IAyAihP" + 84 | "MHAcYWDlkPHYsOBgM4ewVsyJDQsPNzEoebF8CHjo0smjH3dmRsDjI33C7Dw3" + 85 | "MiYuOtjNyDShRSNwyemJguJJKhaGS32nGka61Vg2NJyYKRd+bY+nwtMzjbqV" + 86 | "Qh84gxMCJgnlL4vJuqJyaa5NfFLNLsNVV2a7syacfVWkHd4bv7RN1ltM7ejm" + 87 | "tMtNZ19Oyb02p8C3aqr3dr2GbXl/7fZyOej5rW653WZ7MzzHZV+v7O2/EZM+" + 88 | "Pt45kbX6ScWHNWfOilo3n5thucXv8org1XF3DRQYrAEWiVY3" 89 | } 90 | 91 | def GetIcons(): 92 | return icons.keys() 93 | 94 | def CreateIcon(icon, savedir): 95 | try: 96 | f = open(join(savedir,icon),"wb") 97 | f.write(decompress(a2b_base64(icons[icon]))) 98 | success = 1 99 | except: 100 | success = 0 101 | try: 102 | f.close() 103 | except: 104 | pass 105 | return success 106 | -------------------------------------------------------------------------------- /src/BitTornado/CurrentRateMeasure.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from clock import clock 5 | 6 | class Measure: 7 | def __init__(self, max_rate_period, fudge = 1): 8 | self.max_rate_period = max_rate_period 9 | self.ratesince = clock() - fudge 10 | self.last = self.ratesince 11 | self.rate = 0.0 12 | self.total = 0l 13 | 14 | def update_rate(self, amount): 15 | self.total += amount 16 | t = clock() 17 | self.rate = (self.rate * (self.last - self.ratesince) + 18 | amount) / (t - self.ratesince + 0.0001) 19 | self.last = t 20 | if self.ratesince < t - self.max_rate_period: 21 | self.ratesince = t - self.max_rate_period 22 | 23 | def get_rate(self): 24 | self.update_rate(0) 25 | return self.rate 26 | 27 | def get_rate_noupdate(self): 28 | return self.rate 29 | 30 | def time_until_rate(self, newrate): 31 | if self.rate <= newrate: 32 | return 0 33 | t = clock() - self.ratesince 34 | return ((self.rate * t) / newrate) - t 35 | 36 | def get_total(self): 37 | return self.total -------------------------------------------------------------------------------- /src/BitTornado/HTTPHandler.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from cStringIO import StringIO 5 | from sys import stdout 6 | import time 7 | from clock import clock 8 | from gzip import GzipFile 9 | try: 10 | True 11 | except: 12 | True = 1 13 | False = 0 14 | 15 | DEBUG = False 16 | 17 | weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] 18 | 19 | months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 20 | 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] 21 | 22 | class HTTPConnection: 23 | def __init__(self, handler, connection): 24 | self.handler = handler 25 | self.connection = connection 26 | self.buf = '' 27 | self.closed = False 28 | self.done = False 29 | self.donereading = False 30 | self.next_func = self.read_type 31 | 32 | def get_ip(self): 33 | return self.connection.get_ip() 34 | 35 | def data_came_in(self, data): 36 | if self.donereading or self.next_func is None: 37 | return True 38 | self.buf += data 39 | while True: 40 | try: 41 | i = self.buf.index('\n') 42 | except ValueError: 43 | return True 44 | val = self.buf[:i] 45 | self.buf = self.buf[i+1:] 46 | self.next_func = self.next_func(val) 47 | if self.donereading: 48 | return True 49 | if self.next_func is None or self.closed: 50 | return False 51 | 52 | def read_type(self, data): 53 | self.header = data.strip() 54 | words = data.split() 55 | if len(words) == 3: 56 | self.command, self.path, garbage = words 57 | self.pre1 = False 58 | elif len(words) == 2: 59 | self.command, self.path = words 60 | self.pre1 = True 61 | if self.command != 'GET': 62 | return None 63 | else: 64 | return None 65 | if self.command not in ('HEAD', 'GET'): 66 | return None 67 | self.headers = {} 68 | return self.read_header 69 | 70 | def read_header(self, data): 71 | data = data.strip() 72 | if data == '': 73 | self.donereading = True 74 | if self.headers.get('accept-encoding','').find('gzip') > -1: 75 | self.encoding = 'gzip' 76 | else: 77 | self.encoding = 'identity' 78 | r = self.handler.getfunc(self, self.path, self.headers) 79 | if r is not None: 80 | self.answer(r) 81 | return None 82 | try: 83 | i = data.index(':') 84 | except ValueError: 85 | return None 86 | self.headers[data[:i].strip().lower()] = data[i+1:].strip() 87 | if DEBUG: 88 | print data[:i].strip() + ": " + data[i+1:].strip() 89 | return self.read_header 90 | 91 | def answer(self, (responsecode, responsestring, headers, data)): 92 | if self.closed: 93 | return 94 | if self.encoding == 'gzip': 95 | compressed = StringIO() 96 | gz = GzipFile(fileobj = compressed, mode = 'wb', compresslevel = 9) 97 | gz.write(data) 98 | gz.close() 99 | cdata = compressed.getvalue() 100 | if len(cdata) >= len(data): 101 | self.encoding = 'identity' 102 | else: 103 | if DEBUG: 104 | print "Compressed: %i Uncompressed: %i\n" % (len(cdata),len(data)) 105 | data = cdata 106 | headers['Content-Encoding'] = 'gzip' 107 | 108 | # i'm abusing the identd field here, but this should be ok 109 | if self.encoding == 'identity': 110 | ident = '-' 111 | else: 112 | ident = self.encoding 113 | self.handler.log( self.connection.get_ip(), ident, '-', 114 | self.header, responsecode, len(data), 115 | self.headers.get('referer','-'), 116 | self.headers.get('user-agent','-') ) 117 | self.done = True 118 | r = StringIO() 119 | r.write('HTTP/1.0 ' + str(responsecode) + ' ' + 120 | responsestring + '\r\n') 121 | if not self.pre1: 122 | headers['Content-Length'] = len(data) 123 | for key, value in headers.items(): 124 | r.write(key + ': ' + str(value) + '\r\n') 125 | r.write('\r\n') 126 | if self.command != 'HEAD': 127 | r.write(data) 128 | self.connection.write(r.getvalue()) 129 | if self.connection.is_flushed(): 130 | self.connection.shutdown(1) 131 | 132 | class HTTPHandler: 133 | def __init__(self, getfunc, minflush): 134 | self.connections = {} 135 | self.getfunc = getfunc 136 | self.minflush = minflush 137 | self.lastflush = clock() 138 | 139 | def external_connection_made(self, connection): 140 | self.connections[connection] = HTTPConnection(self, connection) 141 | 142 | def connection_flushed(self, connection): 143 | if self.connections[connection].done: 144 | connection.shutdown(1) 145 | 146 | def connection_lost(self, connection): 147 | ec = self.connections[connection] 148 | ec.closed = True 149 | del ec.connection 150 | del ec.next_func 151 | del self.connections[connection] 152 | 153 | def data_came_in(self, connection, data): 154 | c = self.connections[connection] 155 | if not c.data_came_in(data) and not c.closed: 156 | c.connection.shutdown(1) 157 | 158 | def log(self, ip, ident, username, header, 159 | responsecode, length, referrer, useragent): 160 | year, month, day, hour, minute, second, a, b, c = time.localtime(time.time()) 161 | print '%s %s %s [%02d/%3s/%04d:%02d:%02d:%02d] "%s" %i %i "%s" "%s"' % ( 162 | ip, ident, username, day, months[month], year, hour, 163 | minute, second, header, responsecode, length, referrer, useragent) 164 | t = clock() 165 | if t - self.lastflush > self.minflush: 166 | self.lastflush = t 167 | stdout.flush() 168 | -------------------------------------------------------------------------------- /src/BitTornado/PSYCO.py: -------------------------------------------------------------------------------- 1 | # edit this file to enable/disable Psyco 2 | # psyco = 1 -- enabled 3 | # psyco = 0 -- disabled 4 | 5 | psyco = 0 6 | -------------------------------------------------------------------------------- /src/BitTornado/RateLimiter.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from traceback import print_exc 5 | from binascii import b2a_hex 6 | from clock import clock 7 | from CurrentRateMeasure import Measure 8 | from cStringIO import StringIO 9 | from math import sqrt 10 | 11 | try: 12 | True 13 | except: 14 | True = 1 15 | False = 0 16 | try: 17 | sum([1]) 18 | except: 19 | sum = lambda a: reduce(lambda x,y: x+y, a, 0) 20 | 21 | DEBUG = False 22 | 23 | MAX_RATE_PERIOD = 20.0 24 | MAX_RATE = 10e10 25 | PING_BOUNDARY = 1.2 26 | PING_SAMPLES = 7 27 | PING_DISCARDS = 1 28 | PING_THRESHHOLD = 5 29 | PING_DELAY = 5 # cycles 'til first upward adjustment 30 | PING_DELAY_NEXT = 2 # 'til next 31 | ADJUST_UP = 1.05 32 | ADJUST_DOWN = 0.95 33 | UP_DELAY_FIRST = 5 34 | UP_DELAY_NEXT = 2 35 | SLOTS_STARTING = 6 36 | SLOTS_FACTOR = 1.66/1000 37 | 38 | class RateLimiter: 39 | def __init__(self, sched, unitsize, slotsfunc = lambda x: None): 40 | self.sched = sched 41 | self.last = None 42 | self.unitsize = unitsize 43 | self.slotsfunc = slotsfunc 44 | self.measure = Measure(MAX_RATE_PERIOD) 45 | self.autoadjust = False 46 | self.upload_rate = MAX_RATE * 1000 47 | self.slots = SLOTS_STARTING # garbage if not automatic 48 | 49 | def set_upload_rate(self, rate): 50 | # rate = -1 # test automatic 51 | if rate < 0: 52 | if self.autoadjust: 53 | return 54 | self.autoadjust = True 55 | self.autoadjustup = 0 56 | self.pings = [] 57 | rate = MAX_RATE 58 | self.slots = SLOTS_STARTING 59 | self.slotsfunc(self.slots) 60 | else: 61 | self.autoadjust = False 62 | if not rate: 63 | rate = MAX_RATE 64 | self.upload_rate = rate * 1000 65 | self.lasttime = clock() 66 | self.bytes_sent = 0 67 | 68 | def queue(self, conn): 69 | assert conn.next_upload is None 70 | if self.last is None: 71 | self.last = conn 72 | conn.next_upload = conn 73 | self.try_send(True) 74 | else: 75 | conn.next_upload = self.last.next_upload 76 | self.last.next_upload = conn 77 | self.last = conn 78 | 79 | def try_send(self, check_time = False): 80 | t = clock() 81 | self.bytes_sent -= (t - self.lasttime) * self.upload_rate 82 | self.lasttime = t 83 | if check_time: 84 | self.bytes_sent = max(self.bytes_sent, 0) 85 | cur = self.last.next_upload 86 | while self.bytes_sent <= 0: 87 | bytes = cur.send_partial(self.unitsize) 88 | self.bytes_sent += bytes 89 | self.measure.update_rate(bytes) 90 | if bytes == 0 or cur.backlogged(): 91 | if self.last is cur: 92 | self.last = None 93 | cur.next_upload = None 94 | break 95 | else: 96 | self.last.next_upload = cur.next_upload 97 | cur.next_upload = None 98 | cur = self.last.next_upload 99 | else: 100 | self.last = cur 101 | cur = cur.next_upload 102 | else: 103 | self.sched(self.try_send, self.bytes_sent / self.upload_rate) 104 | 105 | def adjust_sent(self, bytes): 106 | self.bytes_sent = min(self.bytes_sent+bytes, self.upload_rate*3) 107 | self.measure.update_rate(bytes) 108 | 109 | 110 | def ping(self, delay): 111 | if DEBUG: 112 | print delay 113 | if not self.autoadjust: 114 | return 115 | self.pings.append(delay > PING_BOUNDARY) 116 | if len(self.pings) < PING_SAMPLES+PING_DISCARDS: 117 | return 118 | if DEBUG: 119 | print 'cycle' 120 | pings = sum(self.pings[PING_DISCARDS:]) 121 | del self.pings[:] 122 | if pings >= PING_THRESHHOLD: # assume flooded 123 | if self.upload_rate == MAX_RATE: 124 | self.upload_rate = self.measure.get_rate()*ADJUST_DOWN 125 | else: 126 | self.upload_rate = min(self.upload_rate, 127 | self.measure.get_rate()*1.1) 128 | self.upload_rate = max(int(self.upload_rate*ADJUST_DOWN),2) 129 | self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR)) 130 | self.slotsfunc(self.slots) 131 | if DEBUG: 132 | print 'adjust down to '+str(self.upload_rate) 133 | self.lasttime = clock() 134 | self.bytes_sent = 0 135 | self.autoadjustup = UP_DELAY_FIRST 136 | else: # not flooded 137 | if self.upload_rate == MAX_RATE: 138 | return 139 | self.autoadjustup -= 1 140 | if self.autoadjustup: 141 | return 142 | self.upload_rate = int(self.upload_rate*ADJUST_UP) 143 | self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR)) 144 | self.slotsfunc(self.slots) 145 | if DEBUG: 146 | print 'adjust up to '+str(self.upload_rate) 147 | self.lasttime = clock() 148 | self.bytes_sent = 0 149 | self.autoadjustup = UP_DELAY_NEXT 150 | 151 | 152 | 153 | 154 | -------------------------------------------------------------------------------- /src/BitTornado/RateMeasure.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from clock import clock 5 | try: 6 | True 7 | except: 8 | True = 1 9 | False = 0 10 | 11 | FACTOR = 0.999 12 | 13 | class RateMeasure: 14 | def __init__(self): 15 | self.last = None 16 | self.time = 1.0 17 | self.got = 0.0 18 | self.remaining = None 19 | self.broke = False 20 | self.got_anything = False 21 | self.last_checked = None 22 | self.rate = 0 23 | self.lastten = False 24 | 25 | def data_came_in(self, amount): 26 | if not self.got_anything: 27 | self.got_anything = True 28 | self.last = clock() 29 | return 30 | self.update(amount) 31 | 32 | def data_rejected(self, amount): 33 | pass 34 | 35 | def get_time_left(self, left): 36 | t = clock() 37 | if not self.got_anything: 38 | return None 39 | if t - self.last > 15: 40 | self.update(0) 41 | try: 42 | remaining = left/self.rate 43 | if not self.lastten and remaining <= 10: 44 | self.lastten = True 45 | if self.lastten: 46 | return remaining 47 | delta = max(remaining/20,2) 48 | if self.remaining is None: 49 | self.remaining = remaining 50 | elif abs(self.remaining-remaining) > delta: 51 | self.remaining = remaining 52 | else: 53 | self.remaining -= t - self.last_checked 54 | except ZeroDivisionError: 55 | self.remaining = None 56 | if self.remaining is not None and self.remaining < 0.1: 57 | self.remaining = 0.1 58 | self.last_checked = t 59 | return self.remaining 60 | 61 | def update(self, amount): 62 | t = clock() 63 | t1 = int(t) 64 | l1 = int(self.last) 65 | for i in xrange(l1,t1): 66 | self.time *= FACTOR 67 | self.got *= FACTOR 68 | self.got += amount 69 | if t - self.last < 20: 70 | self.time += t - self.last 71 | self.last = t 72 | try: 73 | self.rate = self.got / self.time 74 | except ZeroDivisionError: 75 | pass 76 | -------------------------------------------------------------------------------- /src/BitTornado/RawServer.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from bisect import insort 5 | from SocketHandler import SocketHandler, UPnP_ERROR 6 | import socket 7 | from cStringIO import StringIO 8 | from traceback import print_exc 9 | from select import error 10 | from threading import Thread, Event 11 | from time import sleep 12 | from clock import clock 13 | import sys 14 | try: 15 | True 16 | except: 17 | True = 1 18 | False = 0 19 | 20 | 21 | def autodetect_ipv6(): 22 | try: 23 | assert sys.version_info >= (2,3) 24 | assert socket.has_ipv6 25 | socket.socket(socket.AF_INET6, socket.SOCK_STREAM) 26 | except: 27 | return 0 28 | return 1 29 | 30 | def autodetect_socket_style(): 31 | if sys.platform.find('linux') < 0: 32 | return 1 33 | else: 34 | try: 35 | f = open('/proc/sys/net/ipv6/bindv6only','r') 36 | dual_socket_style = int(f.read()) 37 | f.close() 38 | return int(not dual_socket_style) 39 | except: 40 | return 0 41 | 42 | 43 | READSIZE = 100000 44 | 45 | class RawServer: 46 | def __init__(self, doneflag, timeout_check_interval, timeout, noisy = True, 47 | ipv6_enable = True, failfunc = lambda x: None, errorfunc = None, 48 | sockethandler = None, excflag = Event()): 49 | self.timeout_check_interval = timeout_check_interval 50 | self.timeout = timeout 51 | self.servers = {} 52 | self.single_sockets = {} 53 | self.dead_from_write = [] 54 | self.doneflag = doneflag 55 | self.noisy = noisy 56 | self.failfunc = failfunc 57 | self.errorfunc = errorfunc 58 | self.exccount = 0 59 | self.funcs = [] 60 | self.externally_added = [] 61 | self.finished = Event() 62 | self.tasks_to_kill = [] 63 | self.excflag = excflag 64 | 65 | if sockethandler is None: 66 | sockethandler = SocketHandler(timeout, ipv6_enable, READSIZE) 67 | self.sockethandler = sockethandler 68 | self.add_task(self.scan_for_timeouts, timeout_check_interval) 69 | 70 | def get_exception_flag(self): 71 | return self.excflag 72 | 73 | def _add_task(self, func, delay, id = None): 74 | assert float(delay) >= 0 75 | insort(self.funcs, (clock() + delay, func, id)) 76 | 77 | def add_task(self, func, delay = 0, id = None): 78 | assert float(delay) >= 0 79 | self.externally_added.append((func, delay, id)) 80 | 81 | def scan_for_timeouts(self): 82 | self.add_task(self.scan_for_timeouts, self.timeout_check_interval) 83 | self.sockethandler.scan_for_timeouts() 84 | 85 | def bind(self, port, bind = '', reuse = False, 86 | ipv6_socket_style = 1, upnp = False): 87 | self.sockethandler.bind(port, bind, reuse, ipv6_socket_style, upnp) 88 | 89 | def find_and_bind(self, minport, maxport, bind = '', reuse = False, 90 | ipv6_socket_style = 1, upnp = 0, randomizer = False): 91 | return self.sockethandler.find_and_bind(minport, maxport, bind, reuse, 92 | ipv6_socket_style, upnp, randomizer) 93 | 94 | def start_connection_raw(self, dns, socktype, handler = None): 95 | return self.sockethandler.start_connection_raw(dns, socktype, handler) 96 | 97 | def start_connection(self, dns, handler = None, randomize = False): 98 | return self.sockethandler.start_connection(dns, handler, randomize) 99 | 100 | def get_stats(self): 101 | return self.sockethandler.get_stats() 102 | 103 | def pop_external(self): 104 | while self.externally_added: 105 | (a, b, c) = self.externally_added.pop(0) 106 | self._add_task(a, b, c) 107 | 108 | 109 | def listen_forever(self, handler): 110 | self.sockethandler.set_handler(handler) 111 | try: 112 | while not self.doneflag.isSet(): 113 | try: 114 | self.pop_external() 115 | self._kill_tasks() 116 | if self.funcs: 117 | period = self.funcs[0][0] + 0.001 - clock() 118 | else: 119 | period = 2 ** 30 120 | if period < 0: 121 | period = 0 122 | events = self.sockethandler.do_poll(period) 123 | if self.doneflag.isSet(): 124 | return 125 | while self.funcs and self.funcs[0][0] <= clock(): 126 | garbage1, func, id = self.funcs.pop(0) 127 | if id in self.tasks_to_kill: 128 | pass 129 | try: 130 | # print func.func_name 131 | func() 132 | except (SystemError, MemoryError), e: 133 | self.failfunc(str(e)) 134 | return 135 | except KeyboardInterrupt: 136 | # self.exception(True) 137 | return 138 | except: 139 | if self.noisy: 140 | self.exception() 141 | self.sockethandler.close_dead() 142 | self.sockethandler.handle_events(events) 143 | if self.doneflag.isSet(): 144 | return 145 | self.sockethandler.close_dead() 146 | except (SystemError, MemoryError), e: 147 | self.failfunc(str(e)) 148 | return 149 | except error: 150 | if self.doneflag.isSet(): 151 | return 152 | except KeyboardInterrupt: 153 | # self.exception(True) 154 | return 155 | except: 156 | self.exception() 157 | if self.exccount > 10: 158 | return 159 | finally: 160 | # self.sockethandler.shutdown() 161 | self.finished.set() 162 | 163 | def is_finished(self): 164 | return self.finished.isSet() 165 | 166 | def wait_until_finished(self): 167 | self.finished.wait() 168 | 169 | def _kill_tasks(self): 170 | if self.tasks_to_kill: 171 | new_funcs = [] 172 | for (t, func, id) in self.funcs: 173 | if id not in self.tasks_to_kill: 174 | new_funcs.append((t, func, id)) 175 | self.funcs = new_funcs 176 | self.tasks_to_kill = [] 177 | 178 | def kill_tasks(self, id): 179 | self.tasks_to_kill.append(id) 180 | 181 | def exception(self, kbint = False): 182 | if not kbint: 183 | self.excflag.set() 184 | self.exccount += 1 185 | if self.errorfunc is None: 186 | print_exc() 187 | else: 188 | data = StringIO() 189 | print_exc(file = data) 190 | # print data.getvalue() # report exception here too 191 | if not kbint: # don't report here if it's a keyboard interrupt 192 | self.errorfunc(data.getvalue()) 193 | 194 | def shutdown(self): 195 | self.sockethandler.shutdown() 196 | -------------------------------------------------------------------------------- /src/BitTornado/ServerPortHandler.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from cStringIO import StringIO 5 | #from RawServer import RawServer 6 | try: 7 | True 8 | except: 9 | True = 1 10 | False = 0 11 | 12 | from BT1.Encrypter import protocol_name 13 | 14 | default_task_id = [] 15 | 16 | class SingleRawServer: 17 | def __init__(self, info_hash, multihandler, doneflag, protocol): 18 | self.info_hash = info_hash 19 | self.doneflag = doneflag 20 | self.protocol = protocol 21 | self.multihandler = multihandler 22 | self.rawserver = multihandler.rawserver 23 | self.finished = False 24 | self.running = False 25 | self.handler = None 26 | self.taskqueue = [] 27 | 28 | def shutdown(self): 29 | if not self.finished: 30 | self.multihandler.shutdown_torrent(self.info_hash) 31 | 32 | def _shutdown(self): 33 | if not self.finished: 34 | self.finished = True 35 | self.running = False 36 | self.rawserver.kill_tasks(self.info_hash) 37 | if self.handler: 38 | self.handler.close_all() 39 | 40 | def _external_connection_made(self, c, options, already_read): 41 | if self.running: 42 | c.set_handler(self.handler) 43 | self.handler.externally_handshaked_connection_made( 44 | c, options, already_read) 45 | 46 | ### RawServer functions ### 47 | 48 | def add_task(self, func, delay=0, id = default_task_id): 49 | if id is default_task_id: 50 | id = self.info_hash 51 | if not self.finished: 52 | self.rawserver.add_task(func, delay, id) 53 | 54 | # def bind(self, port, bind = '', reuse = False): 55 | # pass # not handled here 56 | 57 | def start_connection(self, dns, handler = None): 58 | if not handler: 59 | handler = self.handler 60 | c = self.rawserver.start_connection(dns, handler) 61 | return c 62 | 63 | # def listen_forever(self, handler): 64 | # pass # don't call with this 65 | 66 | def start_listening(self, handler): 67 | self.handler = handler 68 | self.running = True 69 | return self.shutdown # obviously, doesn't listen forever 70 | 71 | def is_finished(self): 72 | return self.finished 73 | 74 | def get_exception_flag(self): 75 | return self.rawserver.get_exception_flag() 76 | 77 | 78 | class NewSocketHandler: # hand a new socket off where it belongs 79 | def __init__(self, multihandler, connection): 80 | self.multihandler = multihandler 81 | self.connection = connection 82 | connection.set_handler(self) 83 | self.closed = False 84 | self.buffer = StringIO() 85 | self.complete = False 86 | self.next_len, self.next_func = 1, self.read_header_len 87 | self.multihandler.rawserver.add_task(self._auto_close, 15) 88 | 89 | def _auto_close(self): 90 | if not self.complete: 91 | self.close() 92 | 93 | def close(self): 94 | if not self.closed: 95 | self.connection.close() 96 | self.closed = True 97 | 98 | 99 | # header format: 100 | # connection.write(chr(len(protocol_name)) + protocol_name + 101 | # (chr(0) * 8) + self.encrypter.download_id + self.encrypter.my_id) 102 | 103 | # copied from Encrypter and modified 104 | 105 | def read_header_len(self, s): 106 | l = ord(s) 107 | return l, self.read_header 108 | 109 | def read_header(self, s): 110 | self.protocol = s 111 | return 8, self.read_reserved 112 | 113 | def read_reserved(self, s): 114 | self.options = s 115 | return 20, self.read_download_id 116 | 117 | def read_download_id(self, s): 118 | if self.multihandler.singlerawservers.has_key(s): 119 | if self.multihandler.singlerawservers[s].protocol == self.protocol: 120 | return True 121 | return None 122 | 123 | def read_dead(self, s): 124 | return None 125 | 126 | def data_came_in(self, garbage, s): 127 | while True: 128 | if self.closed: 129 | return 130 | i = self.next_len - self.buffer.tell() 131 | if i > len(s): 132 | self.buffer.write(s) 133 | return 134 | self.buffer.write(s[:i]) 135 | s = s[i:] 136 | m = self.buffer.getvalue() 137 | self.buffer.reset() 138 | self.buffer.truncate() 139 | try: 140 | x = self.next_func(m) 141 | except: 142 | self.next_len, self.next_func = 1, self.read_dead 143 | raise 144 | if x is None: 145 | self.close() 146 | return 147 | if x == True: # ready to process 148 | self.multihandler.singlerawservers[m]._external_connection_made( 149 | self.connection, self.options, s) 150 | self.complete = True 151 | return 152 | self.next_len, self.next_func = x 153 | 154 | def connection_flushed(self, ss): 155 | pass 156 | 157 | def connection_lost(self, ss): 158 | self.closed = True 159 | 160 | class MultiHandler: 161 | def __init__(self, rawserver, doneflag): 162 | self.rawserver = rawserver 163 | self.masterdoneflag = doneflag 164 | self.singlerawservers = {} 165 | self.connections = {} 166 | self.taskqueues = {} 167 | 168 | def newRawServer(self, info_hash, doneflag, protocol=protocol_name): 169 | new = SingleRawServer(info_hash, self, doneflag, protocol) 170 | self.singlerawservers[info_hash] = new 171 | return new 172 | 173 | def shutdown_torrent(self, info_hash): 174 | self.singlerawservers[info_hash]._shutdown() 175 | del self.singlerawservers[info_hash] 176 | 177 | def listen_forever(self): 178 | self.rawserver.listen_forever(self) 179 | for srs in self.singlerawservers.values(): 180 | srs.finished = True 181 | srs.running = False 182 | srs.doneflag.set() 183 | 184 | ### RawServer handler functions ### 185 | # be wary of name collisions 186 | 187 | def external_connection_made(self, ss): 188 | NewSocketHandler(self, ss) 189 | -------------------------------------------------------------------------------- /src/BitTornado/__init__.py: -------------------------------------------------------------------------------- 1 | product_name = 'BitTornado' 2 | version_short = 'T-0.3.17' 3 | 4 | version = version_short+' ('+product_name+')' 5 | report_email = version_short+'@degreez.net' 6 | 7 | from types import StringType 8 | from sha import sha 9 | from time import time, clock 10 | try: 11 | from os import getpid 12 | except ImportError: 13 | def getpid(): 14 | return 1 15 | 16 | mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-' 17 | 18 | _idprefix = version_short[0] 19 | for subver in version_short[2:].split('.'): 20 | try: 21 | subver = int(subver) 22 | except: 23 | subver = 0 24 | _idprefix += mapbase64[subver] 25 | _idprefix += ('-' * (6-len(_idprefix))) 26 | _idrandom = [None] 27 | 28 | def resetPeerIDs(): 29 | try: 30 | f = open('/dev/urandom','rb') 31 | x = f.read(20) 32 | f.close() 33 | except: 34 | x = '' 35 | 36 | l1 = 0 37 | t = clock() 38 | while t == clock(): 39 | l1 += 1 40 | l2 = 0 41 | t = long(time()*100) 42 | while t == long(time()*100): 43 | l2 += 1 44 | l3 = 0 45 | if l2 < 1000: 46 | t = long(time()*10) 47 | while t == long(clock()*10): 48 | l3 += 1 49 | x += ( repr(time()) + '/' + str(time()) + '/' 50 | + str(l1) + '/' + str(l2) + '/' + str(l3) + '/' 51 | + str(getpid()) ) 52 | 53 | s = '' 54 | for i in sha(x).digest()[-11:]: 55 | s += mapbase64[ord(i) & 0x3F] 56 | _idrandom[0] = s 57 | 58 | resetPeerIDs() 59 | 60 | def createPeerID(ins = '---'): 61 | assert type(ins) is StringType 62 | assert len(ins) == 3 63 | return _idprefix + ins + _idrandom[0] 64 | -------------------------------------------------------------------------------- /src/BitTornado/bencode.py: -------------------------------------------------------------------------------- 1 | # Written by Petru Paler, Uoti Urpala, Ross Cohen and John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from types import IntType, LongType, StringType, ListType, TupleType, DictType 5 | try: 6 | from types import BooleanType 7 | except ImportError: 8 | BooleanType = None 9 | try: 10 | from types import UnicodeType 11 | except ImportError: 12 | UnicodeType = None 13 | from cStringIO import StringIO 14 | 15 | def decode_int(x, f): 16 | f += 1 17 | newf = x.index('e', f) 18 | try: 19 | n = int(x[f:newf]) 20 | except: 21 | n = long(x[f:newf]) 22 | if x[f] == '-': 23 | if x[f + 1] == '0': 24 | raise ValueError 25 | elif x[f] == '0' and newf != f+1: 26 | raise ValueError 27 | return (n, newf+1) 28 | 29 | def decode_string(x, f): 30 | colon = x.index(':', f) 31 | try: 32 | n = int(x[f:colon]) 33 | except (OverflowError, ValueError): 34 | n = long(x[f:colon]) 35 | if x[f] == '0' and colon != f+1: 36 | raise ValueError 37 | colon += 1 38 | return (x[colon:colon+n], colon+n) 39 | 40 | def decode_unicode(x, f): 41 | s, f = decode_string(x, f+1) 42 | return (s.decode('UTF-8'),f) 43 | 44 | def decode_list(x, f): 45 | r, f = [], f+1 46 | while x[f] != 'e': 47 | v, f = decode_func[x[f]](x, f) 48 | r.append(v) 49 | return (r, f + 1) 50 | 51 | def decode_dict(x, f): 52 | r, f = {}, f+1 53 | lastkey = None 54 | while x[f] != 'e': 55 | k, f = decode_string(x, f) 56 | if lastkey >= k: 57 | raise ValueError 58 | lastkey = k 59 | r[k], f = decode_func[x[f]](x, f) 60 | return (r, f + 1) 61 | 62 | decode_func = {} 63 | decode_func['l'] = decode_list 64 | decode_func['d'] = decode_dict 65 | decode_func['i'] = decode_int 66 | decode_func['0'] = decode_string 67 | decode_func['1'] = decode_string 68 | decode_func['2'] = decode_string 69 | decode_func['3'] = decode_string 70 | decode_func['4'] = decode_string 71 | decode_func['5'] = decode_string 72 | decode_func['6'] = decode_string 73 | decode_func['7'] = decode_string 74 | decode_func['8'] = decode_string 75 | decode_func['9'] = decode_string 76 | #decode_func['u'] = decode_unicode 77 | 78 | def bdecode(x, sloppy = 0): 79 | try: 80 | r, l = decode_func[x[0]](x, 0) 81 | # except (IndexError, KeyError): 82 | except (IndexError, KeyError, ValueError): 83 | raise ValueError, "bad bencoded data" 84 | if not sloppy and l != len(x): 85 | raise ValueError, "bad bencoded data" 86 | return r 87 | 88 | def test_bdecode(): 89 | try: 90 | bdecode('0:0:') 91 | assert 0 92 | except ValueError: 93 | pass 94 | try: 95 | bdecode('ie') 96 | assert 0 97 | except ValueError: 98 | pass 99 | try: 100 | bdecode('i341foo382e') 101 | assert 0 102 | except ValueError: 103 | pass 104 | assert bdecode('i4e') == 4L 105 | assert bdecode('i0e') == 0L 106 | assert bdecode('i123456789e') == 123456789L 107 | assert bdecode('i-10e') == -10L 108 | try: 109 | bdecode('i-0e') 110 | assert 0 111 | except ValueError: 112 | pass 113 | try: 114 | bdecode('i123') 115 | assert 0 116 | except ValueError: 117 | pass 118 | try: 119 | bdecode('') 120 | assert 0 121 | except ValueError: 122 | pass 123 | try: 124 | bdecode('i6easd') 125 | assert 0 126 | except ValueError: 127 | pass 128 | try: 129 | bdecode('35208734823ljdahflajhdf') 130 | assert 0 131 | except ValueError: 132 | pass 133 | try: 134 | bdecode('2:abfdjslhfld') 135 | assert 0 136 | except ValueError: 137 | pass 138 | assert bdecode('0:') == '' 139 | assert bdecode('3:abc') == 'abc' 140 | assert bdecode('10:1234567890') == '1234567890' 141 | try: 142 | bdecode('02:xy') 143 | assert 0 144 | except ValueError: 145 | pass 146 | try: 147 | bdecode('l') 148 | assert 0 149 | except ValueError: 150 | pass 151 | assert bdecode('le') == [] 152 | try: 153 | bdecode('leanfdldjfh') 154 | assert 0 155 | except ValueError: 156 | pass 157 | assert bdecode('l0:0:0:e') == ['', '', ''] 158 | try: 159 | bdecode('relwjhrlewjh') 160 | assert 0 161 | except ValueError: 162 | pass 163 | assert bdecode('li1ei2ei3ee') == [1, 2, 3] 164 | assert bdecode('l3:asd2:xye') == ['asd', 'xy'] 165 | assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]] 166 | try: 167 | bdecode('d') 168 | assert 0 169 | except ValueError: 170 | pass 171 | try: 172 | bdecode('defoobar') 173 | assert 0 174 | except ValueError: 175 | pass 176 | assert bdecode('de') == {} 177 | assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'} 178 | assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}} 179 | try: 180 | bdecode('d3:fooe') 181 | assert 0 182 | except ValueError: 183 | pass 184 | try: 185 | bdecode('di1e0:e') 186 | assert 0 187 | except ValueError: 188 | pass 189 | try: 190 | bdecode('d1:b0:1:a0:e') 191 | assert 0 192 | except ValueError: 193 | pass 194 | try: 195 | bdecode('d1:a0:1:a0:e') 196 | assert 0 197 | except ValueError: 198 | pass 199 | try: 200 | bdecode('i03e') 201 | assert 0 202 | except ValueError: 203 | pass 204 | try: 205 | bdecode('l01:ae') 206 | assert 0 207 | except ValueError: 208 | pass 209 | try: 210 | bdecode('9999:x') 211 | assert 0 212 | except ValueError: 213 | pass 214 | try: 215 | bdecode('l0:') 216 | assert 0 217 | except ValueError: 218 | pass 219 | try: 220 | bdecode('d0:0:') 221 | assert 0 222 | except ValueError: 223 | pass 224 | try: 225 | bdecode('d0:') 226 | assert 0 227 | except ValueError: 228 | pass 229 | 230 | bencached_marker = [] 231 | 232 | class Bencached: 233 | def __init__(self, s): 234 | self.marker = bencached_marker 235 | self.bencoded = s 236 | 237 | BencachedType = type(Bencached('')) # insufficient, but good as a filter 238 | 239 | def encode_bencached(x,r): 240 | assert x.marker == bencached_marker 241 | r.append(x.bencoded) 242 | 243 | def encode_int(x,r): 244 | r.extend(('i',str(x),'e')) 245 | 246 | def encode_bool(x,r): 247 | encode_int(int(x),r) 248 | 249 | def encode_string(x,r): 250 | r.extend((str(len(x)),':',x)) 251 | 252 | def encode_unicode(x,r): 253 | #r.append('u') 254 | encode_string(x.encode('UTF-8'),r) 255 | 256 | def encode_list(x,r): 257 | r.append('l') 258 | for e in x: 259 | encode_func[type(e)](e, r) 260 | r.append('e') 261 | 262 | def encode_dict(x,r): 263 | r.append('d') 264 | ilist = x.items() 265 | ilist.sort() 266 | for k,v in ilist: 267 | r.extend((str(len(k)),':',k)) 268 | encode_func[type(v)](v, r) 269 | r.append('e') 270 | 271 | encode_func = {} 272 | encode_func[BencachedType] = encode_bencached 273 | encode_func[IntType] = encode_int 274 | encode_func[LongType] = encode_int 275 | encode_func[StringType] = encode_string 276 | encode_func[ListType] = encode_list 277 | encode_func[TupleType] = encode_list 278 | encode_func[DictType] = encode_dict 279 | if BooleanType: 280 | encode_func[BooleanType] = encode_bool 281 | if UnicodeType: 282 | encode_func[UnicodeType] = encode_unicode 283 | 284 | def bencode(x): 285 | r = [] 286 | try: 287 | encode_func[type(x)](x, r) 288 | except: 289 | print "*** error *** could not encode type %s (value: %s)" % (type(x), x) 290 | assert 0 291 | return ''.join(r) 292 | 293 | def test_bencode(): 294 | assert bencode(4) == 'i4e' 295 | assert bencode(0) == 'i0e' 296 | assert bencode(-10) == 'i-10e' 297 | assert bencode(12345678901234567890L) == 'i12345678901234567890e' 298 | assert bencode('') == '0:' 299 | assert bencode('abc') == '3:abc' 300 | assert bencode('1234567890') == '10:1234567890' 301 | assert bencode([]) == 'le' 302 | assert bencode([1, 2, 3]) == 'li1ei2ei3ee' 303 | assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee' 304 | assert bencode({}) == 'de' 305 | assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee' 306 | assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee' 307 | try: 308 | bencode({1: 'foo'}) 309 | assert 0 310 | except AssertionError: 311 | pass 312 | 313 | 314 | try: 315 | import psyco 316 | psyco.bind(bdecode) 317 | psyco.bind(bencode) 318 | except ImportError: 319 | pass 320 | -------------------------------------------------------------------------------- /src/BitTornado/bitfield.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen, Uoti Urpala, and John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | try: 5 | True 6 | except: 7 | True = 1 8 | False = 0 9 | bool = lambda x: not not x 10 | 11 | try: 12 | sum([1]) 13 | negsum = lambda a: len(a)-sum(a) 14 | except: 15 | negsum = lambda a: reduce(lambda x,y: x+(not y), a, 0) 16 | 17 | def _int_to_booleans(x): 18 | r = [] 19 | for i in range(8): 20 | r.append(bool(x & 0x80)) 21 | x <<= 1 22 | return tuple(r) 23 | 24 | lookup_table = [] 25 | reverse_lookup_table = {} 26 | for i in xrange(256): 27 | x = _int_to_booleans(i) 28 | lookup_table.append(x) 29 | reverse_lookup_table[x] = chr(i) 30 | 31 | 32 | class Bitfield: 33 | def __init__(self, length = None, bitstring = None, copyfrom = None): 34 | if copyfrom is not None: 35 | self.length = copyfrom.length 36 | self.array = copyfrom.array[:] 37 | self.numfalse = copyfrom.numfalse 38 | return 39 | if length is None: 40 | raise ValueError, "length must be provided unless copying from another array" 41 | self.length = length 42 | if bitstring is not None: 43 | extra = len(bitstring) * 8 - length 44 | if extra < 0 or extra >= 8: 45 | raise ValueError 46 | t = lookup_table 47 | r = [] 48 | for c in bitstring: 49 | r.extend(t[ord(c)]) 50 | if extra > 0: 51 | if r[-extra:] != [0] * extra: 52 | raise ValueError 53 | del r[-extra:] 54 | self.array = r 55 | self.numfalse = negsum(r) 56 | else: 57 | self.array = [False] * length 58 | self.numfalse = length 59 | 60 | def __setitem__(self, index, val): 61 | val = bool(val) 62 | self.numfalse += self.array[index]-val 63 | self.array[index] = val 64 | 65 | def __getitem__(self, index): 66 | return self.array[index] 67 | 68 | def __len__(self): 69 | return self.length 70 | 71 | def tostring(self): 72 | booleans = self.array 73 | t = reverse_lookup_table 74 | s = len(booleans) % 8 75 | r = [ t[tuple(booleans[x:x+8])] for x in xrange(0, len(booleans)-s, 8) ] 76 | if s: 77 | r += t[tuple(booleans[-s:] + ([0] * (8-s)))] 78 | return ''.join(r) 79 | 80 | def complete(self): 81 | return not self.numfalse 82 | 83 | 84 | def test_bitfield(): 85 | try: 86 | x = Bitfield(7, 'ab') 87 | assert False 88 | except ValueError: 89 | pass 90 | try: 91 | x = Bitfield(7, 'ab') 92 | assert False 93 | except ValueError: 94 | pass 95 | try: 96 | x = Bitfield(9, 'abc') 97 | assert False 98 | except ValueError: 99 | pass 100 | try: 101 | x = Bitfield(0, 'a') 102 | assert False 103 | except ValueError: 104 | pass 105 | try: 106 | x = Bitfield(1, '') 107 | assert False 108 | except ValueError: 109 | pass 110 | try: 111 | x = Bitfield(7, '') 112 | assert False 113 | except ValueError: 114 | pass 115 | try: 116 | x = Bitfield(8, '') 117 | assert False 118 | except ValueError: 119 | pass 120 | try: 121 | x = Bitfield(9, 'a') 122 | assert False 123 | except ValueError: 124 | pass 125 | try: 126 | x = Bitfield(7, chr(1)) 127 | assert False 128 | except ValueError: 129 | pass 130 | try: 131 | x = Bitfield(9, chr(0) + chr(0x40)) 132 | assert False 133 | except ValueError: 134 | pass 135 | assert Bitfield(0, '').tostring() == '' 136 | assert Bitfield(1, chr(0x80)).tostring() == chr(0x80) 137 | assert Bitfield(7, chr(0x02)).tostring() == chr(0x02) 138 | assert Bitfield(8, chr(0xFF)).tostring() == chr(0xFF) 139 | assert Bitfield(9, chr(0) + chr(0x80)).tostring() == chr(0) + chr(0x80) 140 | x = Bitfield(1) 141 | assert x.numfalse == 1 142 | x[0] = 1 143 | assert x.numfalse == 0 144 | x[0] = 1 145 | assert x.numfalse == 0 146 | assert x.tostring() == chr(0x80) 147 | x = Bitfield(7) 148 | assert len(x) == 7 149 | x[6] = 1 150 | assert x.numfalse == 6 151 | assert x.tostring() == chr(0x02) 152 | x = Bitfield(8) 153 | x[7] = 1 154 | assert x.tostring() == chr(1) 155 | x = Bitfield(9) 156 | x[8] = 1 157 | assert x.numfalse == 8 158 | assert x.tostring() == chr(0) + chr(0x80) 159 | x = Bitfield(8, chr(0xC4)) 160 | assert len(x) == 8 161 | assert x.numfalse == 5 162 | assert x.tostring() == chr(0xC4) 163 | -------------------------------------------------------------------------------- /src/BitTornado/clock.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from time import * 5 | import sys 6 | 7 | _MAXFORWARD = 100 8 | _FUDGE = 1 9 | 10 | class RelativeTime: 11 | def __init__(self): 12 | self.time = time() 13 | self.offset = 0 14 | 15 | def get_time(self): 16 | t = time() + self.offset 17 | if t < self.time or t > self.time + _MAXFORWARD: 18 | self.time += _FUDGE 19 | self.offset += self.time - t 20 | return self.time 21 | self.time = t 22 | return t 23 | 24 | if sys.platform != 'win32': 25 | _RTIME = RelativeTime() 26 | def clock(): 27 | return _RTIME.get_time() -------------------------------------------------------------------------------- /src/BitTornado/inifile.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | ''' 5 | reads/writes a Windows-style INI file 6 | format: 7 | 8 | aa = "bb" 9 | cc = 11 10 | 11 | [eee] 12 | ff = "gg" 13 | 14 | decodes to: 15 | d = { '': {'aa':'bb','cc':'11'}, 'eee': {'ff':'gg'} } 16 | 17 | the encoder can also take this as input: 18 | 19 | d = { 'aa': 'bb, 'cc': 11, 'eee': {'ff':'gg'} } 20 | 21 | though it will only decode in the above format. Keywords must be strings. 22 | Values that are strings are written surrounded by quotes, and the decoding 23 | routine automatically strips any. 24 | Booleans are written as integers. Anything else aside from string/int/float 25 | may have unpredictable results. 26 | ''' 27 | 28 | from cStringIO import StringIO 29 | from traceback import print_exc 30 | from types import DictType, StringType 31 | try: 32 | from types import BooleanType 33 | except ImportError: 34 | BooleanType = None 35 | 36 | try: 37 | True 38 | except: 39 | True = 1 40 | False = 0 41 | 42 | DEBUG = False 43 | 44 | def ini_write(f, d, comment=''): 45 | try: 46 | a = {'':{}} 47 | for k,v in d.items(): 48 | assert type(k) == StringType 49 | k = k.lower() 50 | if type(v) == DictType: 51 | if DEBUG: 52 | print 'new section:' +k 53 | if k: 54 | assert not a.has_key(k) 55 | a[k] = {} 56 | aa = a[k] 57 | for kk,vv in v: 58 | assert type(kk) == StringType 59 | kk = kk.lower() 60 | assert not aa.has_key(kk) 61 | if type(vv) == BooleanType: 62 | vv = int(vv) 63 | if type(vv) == StringType: 64 | vv = '"'+vv+'"' 65 | aa[kk] = str(vv) 66 | if DEBUG: 67 | print 'a['+k+']['+kk+'] = '+str(vv) 68 | else: 69 | aa = a[''] 70 | assert not aa.has_key(k) 71 | if type(v) == BooleanType: 72 | v = int(v) 73 | if type(v) == StringType: 74 | v = '"'+v+'"' 75 | aa[k] = str(v) 76 | if DEBUG: 77 | print 'a[\'\']['+k+'] = '+str(v) 78 | r = open(f,'w') 79 | if comment: 80 | for c in comment.split('\n'): 81 | r.write('# '+c+'\n') 82 | r.write('\n') 83 | l = a.keys() 84 | l.sort() 85 | for k in l: 86 | if k: 87 | r.write('\n['+k+']\n') 88 | aa = a[k] 89 | ll = aa.keys() 90 | ll.sort() 91 | for kk in ll: 92 | r.write(kk+' = '+aa[kk]+'\n') 93 | success = True 94 | except: 95 | if DEBUG: 96 | print_exc() 97 | success = False 98 | try: 99 | r.close() 100 | except: 101 | pass 102 | return success 103 | 104 | 105 | if DEBUG: 106 | def errfunc(lineno, line, err): 107 | print '('+str(lineno)+') '+err+': '+line 108 | else: 109 | errfunc = lambda lineno, line, err: None 110 | 111 | def ini_read(f, errfunc = errfunc): 112 | try: 113 | r = open(f,'r') 114 | ll = r.readlines() 115 | d = {} 116 | dd = {'':d} 117 | for i in xrange(len(ll)): 118 | l = ll[i] 119 | l = l.strip() 120 | if not l: 121 | continue 122 | if l[0] == '#': 123 | continue 124 | if l[0] == '[': 125 | if l[-1] != ']': 126 | errfunc(i,l,'syntax error') 127 | continue 128 | l1 = l[1:-1].strip().lower() 129 | if not l1: 130 | errfunc(i,l,'syntax error') 131 | continue 132 | if dd.has_key(l1): 133 | errfunc(i,l,'duplicate section') 134 | d = dd[l1] 135 | continue 136 | d = {} 137 | dd[l1] = d 138 | continue 139 | try: 140 | k,v = l.split('=',1) 141 | except: 142 | try: 143 | k,v = l.split(':',1) 144 | except: 145 | errfunc(i,l,'syntax error') 146 | continue 147 | k = k.strip().lower() 148 | v = v.strip() 149 | if len(v) > 1 and ( (v[0] == '"' and v[-1] == '"') or 150 | (v[0] == "'" and v[-1] == "'") ): 151 | v = v[1:-1] 152 | if not k: 153 | errfunc(i,l,'syntax error') 154 | continue 155 | if d.has_key(k): 156 | errfunc(i,l,'duplicate entry') 157 | continue 158 | d[k] = v 159 | if DEBUG: 160 | print dd 161 | except: 162 | if DEBUG: 163 | print_exc() 164 | dd = None 165 | try: 166 | r.close() 167 | except: 168 | pass 169 | return dd 170 | -------------------------------------------------------------------------------- /src/BitTornado/iprangeparse.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from bisect import bisect, insort 5 | 6 | try: 7 | True 8 | except: 9 | True = 1 10 | False = 0 11 | bool = lambda x: not not x 12 | 13 | 14 | def to_long_ipv4(ip): 15 | ip = ip.split('.') 16 | if len(ip) != 4: 17 | raise ValueError, "bad address" 18 | b = 0L 19 | for n in ip: 20 | b *= 256 21 | b += int(n) 22 | return b 23 | 24 | 25 | def to_long_ipv6(ip): 26 | if ip == '': 27 | raise ValueError, "bad address" 28 | if ip == '::': # boundary handling 29 | ip = '' 30 | elif ip[:2] == '::': 31 | ip = ip[1:] 32 | elif ip[0] == ':': 33 | raise ValueError, "bad address" 34 | elif ip[-2:] == '::': 35 | ip = ip[:-1] 36 | elif ip[-1] == ':': 37 | raise ValueError, "bad address" 38 | 39 | b = [] 40 | doublecolon = False 41 | for n in ip.split(':'): 42 | if n == '': # double-colon 43 | if doublecolon: 44 | raise ValueError, "bad address" 45 | doublecolon = True 46 | b.append(None) 47 | continue 48 | if n.find('.') >= 0: # IPv4 49 | n = n.split('.') 50 | if len(n) != 4: 51 | raise ValueError, "bad address" 52 | for i in n: 53 | b.append(int(i)) 54 | continue 55 | n = ('0'*(4-len(n))) + n 56 | b.append(int(n[:2],16)) 57 | b.append(int(n[2:],16)) 58 | bb = 0L 59 | for n in b: 60 | if n is None: 61 | for i in xrange(17-len(b)): 62 | bb *= 256 63 | continue 64 | bb *= 256 65 | bb += n 66 | return bb 67 | 68 | ipv4addrmask = 65535L*256*256*256*256 69 | 70 | class IP_List: 71 | def __init__(self): 72 | self.ipv4list = [] # starts of ranges 73 | self.ipv4dict = {} # start: end of ranges 74 | self.ipv6list = [] # " 75 | self.ipv6dict = {} # " 76 | 77 | def __nonzero__(self): 78 | return bool(self.ipv4list or self.ipv6list) 79 | 80 | 81 | def append(self, ip_beg, ip_end = None): 82 | if ip_end is None: 83 | ip_end = ip_beg 84 | else: 85 | assert ip_beg <= ip_end 86 | if ip_beg.find(':') < 0: # IPv4 87 | ip_beg = to_long_ipv4(ip_beg) 88 | ip_end = to_long_ipv4(ip_end) 89 | l = self.ipv4list 90 | d = self.ipv4dict 91 | else: 92 | ip_beg = to_long_ipv6(ip_beg) 93 | ip_end = to_long_ipv6(ip_end) 94 | bb = ip_beg % (256*256*256*256) 95 | if bb == ipv4addrmask: 96 | ip_beg -= bb 97 | ip_end -= bb 98 | l = self.ipv4list 99 | d = self.ipv4dict 100 | else: 101 | l = self.ipv6list 102 | d = self.ipv6dict 103 | 104 | pos = bisect(l,ip_beg)-1 105 | done = pos < 0 106 | while not done: 107 | p = pos 108 | while p < len(l): 109 | range_beg = l[p] 110 | if range_beg > ip_end+1: 111 | done = True 112 | break 113 | range_end = d[range_beg] 114 | if range_end < ip_beg-1: 115 | p += 1 116 | if p == len(l): 117 | done = True 118 | break 119 | continue 120 | # if neither of the above conditions is true, the ranges overlap 121 | ip_beg = min(ip_beg, range_beg) 122 | ip_end = max(ip_end, range_end) 123 | del l[p] 124 | del d[range_beg] 125 | break 126 | 127 | insort(l,ip_beg) 128 | d[ip_beg] = ip_end 129 | 130 | 131 | def includes(self, ip): 132 | if not (self.ipv4list or self.ipv6list): 133 | return False 134 | if ip.find(':') < 0: # IPv4 135 | ip = to_long_ipv4(ip) 136 | l = self.ipv4list 137 | d = self.ipv4dict 138 | else: 139 | ip = to_long_ipv6(ip) 140 | bb = ip % (256*256*256*256) 141 | if bb == ipv4addrmask: 142 | ip -= bb 143 | l = self.ipv4list 144 | d = self.ipv4dict 145 | else: 146 | l = self.ipv6list 147 | d = self.ipv6dict 148 | for ip_beg in l[bisect(l,ip)-1:]: 149 | if ip == ip_beg: 150 | return True 151 | ip_end = d[ip_beg] 152 | if ip > ip_beg and ip <= ip_end: 153 | return True 154 | return False 155 | 156 | 157 | # reads a list from a file in the format 'whatever:whatever:ip-ip' 158 | # (not IPv6 compatible at all) 159 | def read_rangelist(self, file): 160 | f = open(file, 'r') 161 | while True: 162 | line = f.readline() 163 | if not line: 164 | break 165 | line = line.strip() 166 | if not line or line[0] == '#': 167 | continue 168 | line = line.split(':')[-1] 169 | try: 170 | ip1,ip2 = line.split('-') 171 | except: 172 | ip1 = line 173 | ip2 = line 174 | try: 175 | self.append(ip1.strip(),ip2.strip()) 176 | except: 177 | print '*** WARNING *** could not parse IP range: '+line 178 | f.close() 179 | 180 | def is_ipv4(ip): 181 | return ip.find(':') < 0 182 | 183 | def is_valid_ip(ip): 184 | try: 185 | if is_ipv4(ip): 186 | a = ip.split('.') 187 | assert len(a) == 4 188 | for i in a: 189 | chr(int(i)) 190 | return True 191 | to_long_ipv6(ip) 192 | return True 193 | except: 194 | return False 195 | -------------------------------------------------------------------------------- /src/BitTornado/natpunch.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # derived from NATPortMapping.py by Yejun Yang 3 | # and from example code by Myers Carpenter 4 | # see LICENSE.txt for license information 5 | 6 | import socket 7 | from traceback import print_exc 8 | from subnetparse import IP_List 9 | from clock import clock 10 | from __init__ import createPeerID 11 | try: 12 | True 13 | except: 14 | True = 1 15 | False = 0 16 | 17 | DEBUG = False 18 | 19 | EXPIRE_CACHE = 30 # seconds 20 | ID = "BT-"+createPeerID()[-4:] 21 | 22 | try: 23 | import pythoncom, win32com.client 24 | _supported = 1 25 | except ImportError: 26 | _supported = 0 27 | 28 | 29 | 30 | class _UPnP1: # derived from Myers Carpenter's code 31 | # seems to use the machine's local UPnP 32 | # system for its operation. Runs fairly fast 33 | 34 | def __init__(self): 35 | self.map = None 36 | self.last_got_map = -10e10 37 | 38 | def _get_map(self): 39 | if self.last_got_map + EXPIRE_CACHE < clock(): 40 | try: 41 | dispatcher = win32com.client.Dispatch("HNetCfg.NATUPnP") 42 | self.map = dispatcher.StaticPortMappingCollection 43 | self.last_got_map = clock() 44 | except: 45 | self.map = None 46 | return self.map 47 | 48 | def test(self): 49 | try: 50 | assert self._get_map() # make sure a map was found 51 | success = True 52 | except: 53 | success = False 54 | return success 55 | 56 | 57 | def open(self, ip, p): 58 | map = self._get_map() 59 | try: 60 | map.Add(p,'TCP',p,ip,True,ID) 61 | if DEBUG: 62 | print 'port opened: '+ip+':'+str(p) 63 | success = True 64 | except: 65 | if DEBUG: 66 | print "COULDN'T OPEN "+str(p) 67 | print_exc() 68 | success = False 69 | return success 70 | 71 | 72 | def close(self, p): 73 | map = self._get_map() 74 | try: 75 | map.Remove(p,'TCP') 76 | success = True 77 | if DEBUG: 78 | print 'port closed: '+str(p) 79 | except: 80 | if DEBUG: 81 | print 'ERROR CLOSING '+str(p) 82 | print_exc() 83 | success = False 84 | return success 85 | 86 | 87 | def clean(self, retry = False): 88 | if not _supported: 89 | return 90 | try: 91 | map = self._get_map() 92 | ports_in_use = [] 93 | for i in xrange(len(map)): 94 | try: 95 | mapping = map[i] 96 | port = mapping.ExternalPort 97 | prot = str(mapping.Protocol).lower() 98 | desc = str(mapping.Description).lower() 99 | except: 100 | port = None 101 | if port and prot == 'tcp' and desc[:3] == 'bt-': 102 | ports_in_use.append(port) 103 | success = True 104 | for port in ports_in_use: 105 | try: 106 | map.Remove(port,'TCP') 107 | except: 108 | success = False 109 | if not success and not retry: 110 | self.clean(retry = True) 111 | except: 112 | pass 113 | 114 | 115 | class _UPnP2: # derived from Yejun Yang's code 116 | # apparently does a direct search for UPnP hardware 117 | # may work in some cases where _UPnP1 won't, but is slow 118 | # still need to implement "clean" method 119 | 120 | def __init__(self): 121 | self.services = None 122 | self.last_got_services = -10e10 123 | 124 | def _get_services(self): 125 | if not self.services or self.last_got_services + EXPIRE_CACHE < clock(): 126 | self.services = [] 127 | try: 128 | f=win32com.client.Dispatch("UPnP.UPnPDeviceFinder") 129 | for t in ( "urn:schemas-upnp-org:service:WANIPConnection:1", 130 | "urn:schemas-upnp-org:service:WANPPPConnection:1" ): 131 | try: 132 | conns = f.FindByType(t,0) 133 | for c in xrange(len(conns)): 134 | try: 135 | svcs = conns[c].Services 136 | for s in xrange(len(svcs)): 137 | try: 138 | self.services.append(svcs[s]) 139 | except: 140 | pass 141 | except: 142 | pass 143 | except: 144 | pass 145 | except: 146 | pass 147 | self.last_got_services = clock() 148 | return self.services 149 | 150 | def test(self): 151 | try: 152 | assert self._get_services() # make sure some services can be found 153 | success = True 154 | except: 155 | success = False 156 | return success 157 | 158 | 159 | def open(self, ip, p): 160 | svcs = self._get_services() 161 | success = False 162 | for s in svcs: 163 | try: 164 | s.InvokeAction('AddPortMapping',['',p,'TCP',p,ip,True,ID,0],'') 165 | success = True 166 | except: 167 | pass 168 | if DEBUG and not success: 169 | print "COULDN'T OPEN "+str(p) 170 | print_exc() 171 | return success 172 | 173 | 174 | def close(self, p): 175 | svcs = self._get_services() 176 | success = False 177 | for s in svcs: 178 | try: 179 | s.InvokeAction('DeletePortMapping', ['',p,'TCP'], '') 180 | success = True 181 | except: 182 | pass 183 | if DEBUG and not success: 184 | print "COULDN'T OPEN "+str(p) 185 | print_exc() 186 | return success 187 | 188 | 189 | class _UPnP: # master holding class 190 | def __init__(self): 191 | self.upnp1 = _UPnP1() 192 | self.upnp2 = _UPnP2() 193 | self.upnplist = (None, self.upnp1, self.upnp2) 194 | self.upnp = None 195 | self.local_ip = None 196 | self.last_got_ip = -10e10 197 | 198 | def get_ip(self): 199 | if self.last_got_ip + EXPIRE_CACHE < clock(): 200 | local_ips = IP_List() 201 | local_ips.set_intranet_addresses() 202 | try: 203 | for info in socket.getaddrinfo(socket.gethostname(),0,socket.AF_INET): 204 | # exception if socket library isn't recent 205 | self.local_ip = info[4][0] 206 | if local_ips.includes(self.local_ip): 207 | self.last_got_ip = clock() 208 | if DEBUG: 209 | print 'Local IP found: '+self.local_ip 210 | break 211 | else: 212 | raise ValueError('couldn\'t find intranet IP') 213 | except: 214 | self.local_ip = None 215 | if DEBUG: 216 | print 'Error finding local IP' 217 | print_exc() 218 | return self.local_ip 219 | 220 | def test(self, upnp_type): 221 | if DEBUG: 222 | print 'testing UPnP type '+str(upnp_type) 223 | if not upnp_type or not _supported or self.get_ip() is None: 224 | if DEBUG: 225 | print 'not supported' 226 | return 0 227 | pythoncom.CoInitialize() # leave initialized 228 | self.upnp = self.upnplist[upnp_type] # cache this 229 | if self.upnp.test(): 230 | if DEBUG: 231 | print 'ok' 232 | return upnp_type 233 | if DEBUG: 234 | print 'tested bad' 235 | return 0 236 | 237 | def open(self, p): 238 | assert self.upnp, "must run UPnP_test() with the desired UPnP access type first" 239 | return self.upnp.open(self.get_ip(), p) 240 | 241 | def close(self, p): 242 | assert self.upnp, "must run UPnP_test() with the desired UPnP access type first" 243 | return self.upnp.close(p) 244 | 245 | def clean(self): 246 | return self.upnp1.clean() 247 | 248 | _upnp_ = _UPnP() 249 | 250 | UPnP_test = _upnp_.test 251 | UPnP_open_port = _upnp_.open 252 | UPnP_close_port = _upnp_.close 253 | UPnP_reset = _upnp_.clean 254 | 255 | -------------------------------------------------------------------------------- /src/BitTornado/parseargs.py: -------------------------------------------------------------------------------- 1 | # Written by Bill Bumgarner and Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from types import * 5 | from cStringIO import StringIO 6 | 7 | 8 | def splitLine(line, COLS=80, indent=10): 9 | indent = " " * indent 10 | width = COLS - (len(indent) + 1) 11 | if indent and width < 15: 12 | width = COLS - 2 13 | indent = " " 14 | s = StringIO() 15 | i = 0 16 | for word in line.split(): 17 | if i == 0: 18 | s.write(indent+word) 19 | i = len(word) 20 | continue 21 | if i + len(word) >= width: 22 | s.write('\n'+indent+word) 23 | i = len(word) 24 | continue 25 | s.write(' '+word) 26 | i += len(word) + 1 27 | return s.getvalue() 28 | 29 | def formatDefinitions(options, COLS, presets = {}): 30 | s = StringIO() 31 | for (longname, default, doc) in options: 32 | s.write('--' + longname + ' \n') 33 | default = presets.get(longname, default) 34 | if type(default) in (IntType, LongType): 35 | try: 36 | default = int(default) 37 | except: 38 | pass 39 | if default is not None: 40 | doc += ' (defaults to ' + repr(default) + ')' 41 | s.write(splitLine(doc,COLS,10)) 42 | s.write('\n\n') 43 | return s.getvalue() 44 | 45 | 46 | def usage(str): 47 | raise ValueError(str) 48 | 49 | 50 | def defaultargs(options): 51 | l = {} 52 | for (longname, default, doc) in options: 53 | if default is not None: 54 | l[longname] = default 55 | return l 56 | 57 | 58 | def parseargs(argv, options, minargs = None, maxargs = None, presets = {}): 59 | config = {} 60 | longkeyed = {} 61 | for option in options: 62 | longname, default, doc = option 63 | longkeyed[longname] = option 64 | config[longname] = default 65 | for longname in presets.keys(): # presets after defaults but before arguments 66 | config[longname] = presets[longname] 67 | options = [] 68 | args = [] 69 | pos = 0 70 | while pos < len(argv): 71 | if argv[pos][:2] != '--': 72 | args.append(argv[pos]) 73 | pos += 1 74 | else: 75 | if pos == len(argv) - 1: 76 | usage('parameter passed in at end with no value') 77 | key, value = argv[pos][2:], argv[pos+1] 78 | pos += 2 79 | if not longkeyed.has_key(key): 80 | usage('unknown key --' + key) 81 | longname, default, doc = longkeyed[key] 82 | try: 83 | t = type(config[longname]) 84 | if t is NoneType or t is StringType: 85 | config[longname] = value 86 | elif t in (IntType, LongType): 87 | config[longname] = long(value) 88 | elif t is FloatType: 89 | config[longname] = float(value) 90 | else: 91 | assert 0 92 | except ValueError, e: 93 | usage('wrong format of --%s - %s' % (key, str(e))) 94 | for key, value in config.items(): 95 | if value is None: 96 | usage("Option --%s is required." % key) 97 | if minargs is not None and len(args) < minargs: 98 | usage("Must supply at least %d args." % minargs) 99 | if maxargs is not None and len(args) > maxargs: 100 | usage("Too many args - %d max." % maxargs) 101 | return (config, args) 102 | 103 | def test_parseargs(): 104 | assert parseargs(('d', '--a', 'pq', 'e', '--b', '3', '--c', '4.5', 'f'), (('a', 'x', ''), ('b', 1, ''), ('c', 2.3, ''))) == ({'a': 'pq', 'b': 3, 'c': 4.5}, ['d', 'e', 'f']) 105 | assert parseargs([], [('a', 'x', '')]) == ({'a': 'x'}, []) 106 | assert parseargs(['--a', 'x', '--a', 'y'], [('a', '', '')]) == ({'a': 'y'}, []) 107 | try: 108 | parseargs([], [('a', 'x', '')]) 109 | except ValueError: 110 | pass 111 | try: 112 | parseargs(['--a', 'x'], []) 113 | except ValueError: 114 | pass 115 | try: 116 | parseargs(['--a'], [('a', 'x', '')]) 117 | except ValueError: 118 | pass 119 | try: 120 | parseargs([], [], 1, 2) 121 | except ValueError: 122 | pass 123 | assert parseargs(['x'], [], 1, 2) == ({}, ['x']) 124 | assert parseargs(['x', 'y'], [], 1, 2) == ({}, ['x', 'y']) 125 | try: 126 | parseargs(['x', 'y', 'z'], [], 1, 2) 127 | except ValueError: 128 | pass 129 | try: 130 | parseargs(['--a', '2.0'], [('a', 3, '')]) 131 | except ValueError: 132 | pass 133 | try: 134 | parseargs(['--a', 'z'], [('a', 2.1, '')]) 135 | except ValueError: 136 | pass 137 | 138 | -------------------------------------------------------------------------------- /src/BitTornado/parsedir.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman and Uoti Urpala 2 | # see LICENSE.txt for license information 3 | from bencode import bencode, bdecode 4 | from BT1.btformats import check_info 5 | from os.path import exists, isfile 6 | from sha import sha 7 | import sys, os 8 | 9 | try: 10 | True 11 | except: 12 | True = 1 13 | False = 0 14 | 15 | NOISY = False 16 | 17 | def _errfunc(x): 18 | print ":: "+x 19 | 20 | def parsedir(directory, parsed, files, blocked, 21 | exts = ['.torrent'], return_metainfo = False, errfunc = _errfunc): 22 | if NOISY: 23 | errfunc('checking dir') 24 | dirs_to_check = [directory] 25 | new_files = {} 26 | new_blocked = {} 27 | torrent_type = {} 28 | while dirs_to_check: # first, recurse directories and gather torrents 29 | directory = dirs_to_check.pop() 30 | newtorrents = False 31 | for f in os.listdir(directory): 32 | newtorrent = None 33 | for ext in exts: 34 | if f.endswith(ext): 35 | newtorrent = ext[1:] 36 | break 37 | if newtorrent: 38 | newtorrents = True 39 | p = os.path.join(directory, f) 40 | new_files[p] = [(os.path.getmtime(p), os.path.getsize(p)), 0] 41 | torrent_type[p] = newtorrent 42 | if not newtorrents: 43 | for f in os.listdir(directory): 44 | p = os.path.join(directory, f) 45 | if os.path.isdir(p): 46 | dirs_to_check.append(p) 47 | 48 | new_parsed = {} 49 | to_add = [] 50 | added = {} 51 | removed = {} 52 | # files[path] = [(modification_time, size), hash], hash is 0 if the file 53 | # has not been successfully parsed 54 | for p,v in new_files.items(): # re-add old items and check for changes 55 | oldval = files.get(p) 56 | if not oldval: # new file 57 | to_add.append(p) 58 | continue 59 | h = oldval[1] 60 | if oldval[0] == v[0]: # file is unchanged from last parse 61 | if h: 62 | if blocked.has_key(p): # parseable + blocked means duplicate 63 | to_add.append(p) # other duplicate may have gone away 64 | else: 65 | new_parsed[h] = parsed[h] 66 | new_files[p] = oldval 67 | else: 68 | new_blocked[p] = 1 # same broken unparseable file 69 | continue 70 | if parsed.has_key(h) and not blocked.has_key(p): 71 | if NOISY: 72 | errfunc('removing '+p+' (will re-add)') 73 | removed[h] = parsed[h] 74 | to_add.append(p) 75 | 76 | to_add.sort() 77 | for p in to_add: # then, parse new and changed torrents 78 | new_file = new_files[p] 79 | v,h = new_file 80 | if new_parsed.has_key(h): # duplicate 81 | if not blocked.has_key(p) or files[p][0] != v: 82 | errfunc('**warning** '+ 83 | p +' is a duplicate torrent for '+new_parsed[h]['path']) 84 | new_blocked[p] = 1 85 | continue 86 | 87 | if NOISY: 88 | errfunc('adding '+p) 89 | try: 90 | ff = open(p, 'rb') 91 | d = bdecode(ff.read()) 92 | check_info(d['info']) 93 | h = sha(bencode(d['info'])).digest() 94 | new_file[1] = h 95 | if new_parsed.has_key(h): 96 | errfunc('**warning** '+ 97 | p +' is a duplicate torrent for '+new_parsed[h]['path']) 98 | new_blocked[p] = 1 99 | continue 100 | 101 | a = {} 102 | a['path'] = p 103 | f = os.path.basename(p) 104 | a['file'] = f 105 | a['type'] = torrent_type[p] 106 | i = d['info'] 107 | l = 0 108 | nf = 0 109 | if i.has_key('length'): 110 | l = i.get('length',0) 111 | nf = 1 112 | elif i.has_key('files'): 113 | for li in i['files']: 114 | nf += 1 115 | if li.has_key('length'): 116 | l += li['length'] 117 | a['numfiles'] = nf 118 | a['length'] = l 119 | a['name'] = i.get('name', f) 120 | def setkey(k, d = d, a = a): 121 | if d.has_key(k): 122 | a[k] = d[k] 123 | setkey('failure reason') 124 | setkey('warning message') 125 | setkey('announce-list') 126 | if return_metainfo: 127 | a['metainfo'] = d 128 | except: 129 | errfunc('**warning** '+p+' has errors') 130 | new_blocked[p] = 1 131 | continue 132 | try: 133 | ff.close() 134 | except: 135 | pass 136 | if NOISY: 137 | errfunc('... successful') 138 | new_parsed[h] = a 139 | added[h] = a 140 | 141 | for p,v in files.items(): # and finally, mark removed torrents 142 | if not new_files.has_key(p) and not blocked.has_key(p): 143 | if NOISY: 144 | errfunc('removing '+p) 145 | removed[v[1]] = parsed[v[1]] 146 | 147 | if NOISY: 148 | errfunc('done checking') 149 | return (new_parsed, new_files, new_blocked, added, removed) 150 | 151 | -------------------------------------------------------------------------------- /src/BitTornado/piecebuffer.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from array import array 5 | from threading import Lock 6 | # import inspect 7 | try: 8 | True 9 | except: 10 | True = 1 11 | False = 0 12 | 13 | DEBUG = False 14 | 15 | class SingleBuffer: 16 | def __init__(self, pool): 17 | self.pool = pool 18 | self.buf = array('c') 19 | 20 | def init(self): 21 | if DEBUG: 22 | print self.count 23 | ''' 24 | for x in xrange(6,1,-1): 25 | try: 26 | f = inspect.currentframe(x).f_code 27 | print (f.co_filename,f.co_firstlineno,f.co_name) 28 | del f 29 | except: 30 | pass 31 | print '' 32 | ''' 33 | self.length = 0 34 | 35 | def append(self, s): 36 | l = self.length+len(s) 37 | self.buf[self.length:l] = array('c',s) 38 | self.length = l 39 | 40 | def __len__(self): 41 | return self.length 42 | 43 | def __getslice__(self, a, b): 44 | if b > self.length: 45 | b = self.length 46 | if b < 0: 47 | b += self.length 48 | if a == 0 and b == self.length and len(self.buf) == b: 49 | return self.buf # optimization 50 | return self.buf[a:b] 51 | 52 | def getarray(self): 53 | return self.buf[:self.length] 54 | 55 | def release(self): 56 | if DEBUG: 57 | print -self.count 58 | self.pool.release(self) 59 | 60 | 61 | class BufferPool: 62 | def __init__(self): 63 | self.pool = [] 64 | self.lock = Lock() 65 | if DEBUG: 66 | self.count = 0 67 | 68 | def new(self): 69 | self.lock.acquire() 70 | if self.pool: 71 | x = self.pool.pop() 72 | else: 73 | x = SingleBuffer(self) 74 | if DEBUG: 75 | self.count += 1 76 | x.count = self.count 77 | x.init() 78 | self.lock.release() 79 | return x 80 | 81 | def release(self, x): 82 | self.pool.append(x) 83 | 84 | 85 | _pool = BufferPool() 86 | PieceBuffer = _pool.new 87 | -------------------------------------------------------------------------------- /src/BitTornado/selectpoll.py: -------------------------------------------------------------------------------- 1 | # Written by Bram Cohen 2 | # see LICENSE.txt for license information 3 | 4 | from select import select, error 5 | from time import sleep 6 | from types import IntType 7 | from bisect import bisect 8 | POLLIN = 1 9 | POLLOUT = 2 10 | POLLERR = 8 11 | POLLHUP = 16 12 | 13 | class poll: 14 | def __init__(self): 15 | self.rlist = [] 16 | self.wlist = [] 17 | 18 | def register(self, f, t): 19 | if type(f) != IntType: 20 | f = f.fileno() 21 | if (t & POLLIN): 22 | insert(self.rlist, f) 23 | else: 24 | remove(self.rlist, f) 25 | if (t & POLLOUT): 26 | insert(self.wlist, f) 27 | else: 28 | remove(self.wlist, f) 29 | 30 | def unregister(self, f): 31 | if type(f) != IntType: 32 | f = f.fileno() 33 | remove(self.rlist, f) 34 | remove(self.wlist, f) 35 | 36 | def poll(self, timeout = None): 37 | if self.rlist or self.wlist: 38 | try: 39 | r, w, e = select(self.rlist, self.wlist, [], timeout) 40 | except ValueError: 41 | return None 42 | else: 43 | sleep(timeout) 44 | return [] 45 | result = [] 46 | for s in r: 47 | result.append((s, POLLIN)) 48 | for s in w: 49 | result.append((s, POLLOUT)) 50 | return result 51 | 52 | def remove(list, item): 53 | i = bisect(list, item) 54 | if i > 0 and list[i-1] == item: 55 | del list[i-1] 56 | 57 | def insert(list, item): 58 | i = bisect(list, item) 59 | if i == 0 or list[i-1] != item: 60 | list.insert(i, item) 61 | 62 | def test_remove(): 63 | x = [2, 4, 6] 64 | remove(x, 2) 65 | assert x == [4, 6] 66 | x = [2, 4, 6] 67 | remove(x, 4) 68 | assert x == [2, 6] 69 | x = [2, 4, 6] 70 | remove(x, 6) 71 | assert x == [2, 4] 72 | x = [2, 4, 6] 73 | remove(x, 5) 74 | assert x == [2, 4, 6] 75 | x = [2, 4, 6] 76 | remove(x, 1) 77 | assert x == [2, 4, 6] 78 | x = [2, 4, 6] 79 | remove(x, 7) 80 | assert x == [2, 4, 6] 81 | x = [2, 4, 6] 82 | remove(x, 5) 83 | assert x == [2, 4, 6] 84 | x = [] 85 | remove(x, 3) 86 | assert x == [] 87 | 88 | def test_insert(): 89 | x = [2, 4] 90 | insert(x, 1) 91 | assert x == [1, 2, 4] 92 | x = [2, 4] 93 | insert(x, 3) 94 | assert x == [2, 3, 4] 95 | x = [2, 4] 96 | insert(x, 5) 97 | assert x == [2, 4, 5] 98 | x = [2, 4] 99 | insert(x, 2) 100 | assert x == [2, 4] 101 | x = [2, 4] 102 | insert(x, 4) 103 | assert x == [2, 4] 104 | x = [2, 3, 4] 105 | insert(x, 3) 106 | assert x == [2, 3, 4] 107 | x = [] 108 | insert(x, 3) 109 | assert x == [3] 110 | -------------------------------------------------------------------------------- /src/BitTornado/subnetparse.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from bisect import bisect, insort 5 | 6 | try: 7 | True 8 | except: 9 | True = 1 10 | False = 0 11 | bool = lambda x: not not x 12 | 13 | hexbinmap = { 14 | '0': '0000', 15 | '1': '0001', 16 | '2': '0010', 17 | '3': '0011', 18 | '4': '0100', 19 | '5': '0101', 20 | '6': '0110', 21 | '7': '0111', 22 | '8': '1000', 23 | '9': '1001', 24 | 'a': '1010', 25 | 'b': '1011', 26 | 'c': '1100', 27 | 'd': '1101', 28 | 'e': '1110', 29 | 'f': '1111', 30 | 'x': '0000', 31 | } 32 | 33 | chrbinmap = {} 34 | for n in xrange(256): 35 | b = [] 36 | nn = n 37 | for i in xrange(8): 38 | if nn & 0x80: 39 | b.append('1') 40 | else: 41 | b.append('0') 42 | nn <<= 1 43 | chrbinmap[n] = ''.join(b) 44 | 45 | 46 | def to_bitfield_ipv4(ip): 47 | ip = ip.split('.') 48 | if len(ip) != 4: 49 | raise ValueError, "bad address" 50 | b = [] 51 | for i in ip: 52 | b.append(chrbinmap[int(i)]) 53 | return ''.join(b) 54 | 55 | def to_bitfield_ipv6(ip): 56 | b = '' 57 | doublecolon = False 58 | 59 | if ip == '': 60 | raise ValueError, "bad address" 61 | if ip == '::': # boundary handling 62 | ip = '' 63 | elif ip[:2] == '::': 64 | ip = ip[1:] 65 | elif ip[0] == ':': 66 | raise ValueError, "bad address" 67 | elif ip[-2:] == '::': 68 | ip = ip[:-1] 69 | elif ip[-1] == ':': 70 | raise ValueError, "bad address" 71 | for n in ip.split(':'): 72 | if n == '': # double-colon 73 | if doublecolon: 74 | raise ValueError, "bad address" 75 | doublecolon = True 76 | b += ':' 77 | continue 78 | if n.find('.') >= 0: # IPv4 79 | n = to_bitfield_ipv4(n) 80 | b += n + '0'*(32-len(n)) 81 | continue 82 | n = ('x'*(4-len(n))) + n 83 | for i in n: 84 | b += hexbinmap[i] 85 | if doublecolon: 86 | pos = b.find(':') 87 | b = b[:pos]+('0'*(129-len(b)))+b[pos+1:] 88 | if len(b) != 128: # always check size 89 | raise ValueError, "bad address" 90 | return b 91 | 92 | ipv4addrmask = to_bitfield_ipv6('::ffff:0:0')[:96] 93 | 94 | class IP_List: 95 | def __init__(self): 96 | self.ipv4list = [] 97 | self.ipv6list = [] 98 | 99 | def __nonzero__(self): 100 | return bool(self.ipv4list or self.ipv6list) 101 | 102 | 103 | def append(self, ip, depth = 256): 104 | if ip.find(':') < 0: # IPv4 105 | insort(self.ipv4list,to_bitfield_ipv4(ip)[:depth]) 106 | else: 107 | b = to_bitfield_ipv6(ip) 108 | if b.startswith(ipv4addrmask): 109 | insort(self.ipv4list,b[96:][:depth-96]) 110 | else: 111 | insort(self.ipv6list,b[:depth]) 112 | 113 | 114 | def includes(self, ip): 115 | if not (self.ipv4list or self.ipv6list): 116 | return False 117 | if ip.find(':') < 0: # IPv4 118 | b = to_bitfield_ipv4(ip) 119 | else: 120 | b = to_bitfield_ipv6(ip) 121 | if b.startswith(ipv4addrmask): 122 | b = b[96:] 123 | if len(b) > 32: 124 | l = self.ipv6list 125 | else: 126 | l = self.ipv4list 127 | for map in l[bisect(l,b)-1:]: 128 | if b.startswith(map): 129 | return True 130 | if map > b: 131 | return False 132 | return False 133 | 134 | 135 | def read_fieldlist(self, file): # reads a list from a file in the format 'ip/len ' 136 | f = open(file, 'r') 137 | while True: 138 | line = f.readline() 139 | if not line: 140 | break 141 | line = line.strip().expandtabs() 142 | if not line or line[0] == '#': 143 | continue 144 | try: 145 | line, garbage = line.split(' ',1) 146 | except: 147 | pass 148 | try: 149 | line, garbage = line.split('#',1) 150 | except: 151 | pass 152 | try: 153 | ip, depth = line.split('/') 154 | except: 155 | ip = line 156 | depth = None 157 | try: 158 | if depth is not None: 159 | depth = int(depth) 160 | self.append(ip,depth) 161 | except: 162 | print '*** WARNING *** could not parse IP range: '+line 163 | f.close() 164 | 165 | 166 | def set_intranet_addresses(self): 167 | self.append('127.0.0.1',8) 168 | self.append('10.0.0.0',8) 169 | self.append('172.16.0.0',12) 170 | self.append('192.168.0.0',16) 171 | self.append('169.254.0.0',16) 172 | self.append('::1') 173 | self.append('fe80::',16) 174 | self.append('fec0::',16) 175 | 176 | def set_ipv4_addresses(self): 177 | self.append('::ffff:0:0',96) 178 | 179 | def ipv6_to_ipv4(ip): 180 | ip = to_bitfield_ipv6(ip) 181 | if not ip.startswith(ipv4addrmask): 182 | raise ValueError, "not convertible to IPv4" 183 | ip = ip[-32:] 184 | x = '' 185 | for i in range(4): 186 | x += str(int(ip[:8],2)) 187 | if i < 3: 188 | x += '.' 189 | ip = ip[8:] 190 | return x 191 | 192 | def to_ipv4(ip): 193 | if is_ipv4(ip): 194 | _valid_ipv4(ip) 195 | return ip 196 | return ipv6_to_ipv4(ip) 197 | 198 | def is_ipv4(ip): 199 | return ip.find(':') < 0 200 | 201 | def _valid_ipv4(ip): 202 | ip = ip.split('.') 203 | if len(ip) != 4: 204 | raise ValueError 205 | for i in ip: 206 | chr(int(i)) 207 | 208 | def is_valid_ip(ip): 209 | try: 210 | if not ip: 211 | return False 212 | if is_ipv4(ip): 213 | _valid_ipv4(ip) 214 | return True 215 | to_bitfield_ipv6(ip) 216 | return True 217 | except: 218 | return False 219 | -------------------------------------------------------------------------------- /src/BitTornado/torrentlistparse.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from binascii import unhexlify 5 | 6 | try: 7 | True 8 | except: 9 | True = 1 10 | False = 0 11 | 12 | 13 | # parses a list of torrent hashes, in the format of one hash per line in hex format 14 | 15 | def parsetorrentlist(filename, parsed): 16 | new_parsed = {} 17 | added = {} 18 | removed = parsed 19 | f = open(filename, 'r') 20 | while True: 21 | l = f.readline() 22 | if not l: 23 | break 24 | l = l.strip() 25 | try: 26 | if len(l) != 40: 27 | raise ValueError, 'bad line' 28 | h = unhexlify(l) 29 | except: 30 | print '*** WARNING *** could not parse line in torrent list: '+l 31 | if parsed.has_key(h): 32 | del removed[h] 33 | else: 34 | added[h] = True 35 | new_parsed[h] = True 36 | f.close() 37 | return (new_parsed, added, removed) 38 | 39 | -------------------------------------------------------------------------------- /src/BitTornado/zurllib.py: -------------------------------------------------------------------------------- 1 | # Written by John Hoffman 2 | # see LICENSE.txt for license information 3 | 4 | from httplib import HTTPConnection, HTTPSConnection, HTTPException 5 | from urlparse import urlparse 6 | from bencode import bdecode 7 | import socket 8 | from gzip import GzipFile 9 | from StringIO import StringIO 10 | from urllib import quote, unquote 11 | from __init__ import product_name, version_short 12 | 13 | VERSION = product_name+'/'+version_short 14 | MAX_REDIRECTS = 10 15 | 16 | 17 | class btHTTPcon(HTTPConnection): # attempt to add automatic connection timeout 18 | def connect(self): 19 | HTTPConnection.connect(self) 20 | try: 21 | self.sock.settimeout(30) 22 | except: 23 | pass 24 | 25 | class btHTTPScon(HTTPSConnection): # attempt to add automatic connection timeout 26 | def connect(self): 27 | HTTPSConnection.connect(self) 28 | try: 29 | self.sock.settimeout(30) 30 | except: 31 | pass 32 | 33 | class urlopen: 34 | def __init__(self, url): 35 | self.tries = 0 36 | self._open(url.strip()) 37 | self.error_return = None 38 | 39 | def _open(self, url): 40 | self.tries += 1 41 | if self.tries > MAX_REDIRECTS: 42 | raise IOError, ('http error', 500, 43 | "Internal Server Error: Redirect Recursion") 44 | (scheme, netloc, path, pars, query, fragment) = urlparse(url) 45 | if scheme != 'http' and scheme != 'https': 46 | raise IOError, ('url error', 'unknown url type', scheme, url) 47 | url = path 48 | if pars: 49 | url += ';'+pars 50 | if query: 51 | url += '?'+query 52 | # if fragment: 53 | try: 54 | if scheme == 'http': 55 | self.connection = btHTTPcon(netloc) 56 | else: 57 | self.connection = btHTTPScon(netloc) 58 | self.connection.request('GET', url, None, 59 | { 'User-Agent': VERSION, 60 | 'Accept-Encoding': 'gzip' } ) 61 | self.response = self.connection.getresponse() 62 | except HTTPException, e: 63 | raise IOError, ('http error', str(e)) 64 | status = self.response.status 65 | if status in (301,302): 66 | try: 67 | self.connection.close() 68 | except: 69 | pass 70 | self._open(self.response.getheader('Location')) 71 | return 72 | if status != 200: 73 | try: 74 | data = self._read() 75 | d = bdecode(data) 76 | if d.has_key('failure reason'): 77 | self.error_return = data 78 | return 79 | except: 80 | pass 81 | raise IOError, ('http error', status, self.response.reason) 82 | 83 | def read(self): 84 | if self.error_return: 85 | return self.error_return 86 | return self._read() 87 | 88 | def _read(self): 89 | data = self.response.read() 90 | if self.response.getheader('Content-Encoding','').find('gzip') >= 0: 91 | try: 92 | compressed = StringIO(data) 93 | f = GzipFile(fileobj = compressed) 94 | data = f.read() 95 | except: 96 | raise IOError, ('http error', 'got corrupt response') 97 | return data 98 | 99 | def close(self): 100 | self.connection.close() 101 | -------------------------------------------------------------------------------- /src/murder_client.py: -------------------------------------------------------------------------------- 1 | # Copyright 2010 Twitter, Inc. 2 | # Copyright 2010 Larry Gadea 3 | # Copyright 2010 Matt Freels 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Usage: python murder_client.py peer/seed out.torrent OUT.OUT 127.0.0.1 18 | # last parameter is the local ip address, normally 10.x.x.x 19 | 20 | import warnings 21 | warnings.filterwarnings('ignore', category=DeprecationWarning) 22 | 23 | from BitTornado import PSYCO 24 | if PSYCO.psyco: 25 | try: 26 | import psyco 27 | assert psyco.__version__ >= 0x010100f0 28 | psyco.full() 29 | except: 30 | pass 31 | 32 | from BitTornado.download_bt1 import BT1Download, defaults, parse_params, get_usage, get_response 33 | from BitTornado.RawServer import RawServer, UPnP_ERROR 34 | from random import seed 35 | from socket import error as socketerror 36 | from BitTornado.bencode import bencode 37 | from BitTornado.natpunch import UPnP_test 38 | from threading import Event 39 | from os.path import abspath 40 | from sys import argv, stdout 41 | import sys 42 | import os 43 | import threading 44 | from sha import sha 45 | from time import strftime 46 | from BitTornado.clock import clock 47 | from BitTornado import createPeerID, version 48 | from BitTornado.ConfigDir import ConfigDir 49 | 50 | assert sys.version >= '2', "Install Python 2.0 or greater" 51 | try: 52 | True 53 | except: 54 | True = 1 55 | False = 0 56 | 57 | doneFlag = None 58 | isPeer = False 59 | 60 | def ok_close_now(): 61 | doneFlag.set() 62 | 63 | def hours(n): 64 | if n == 0: 65 | return 'complete!' 66 | try: 67 | n = int(n) 68 | assert n >= 0 and n < 5184000 # 60 days 69 | except: 70 | return '' 71 | m, s = divmod(n, 60) 72 | h, m = divmod(m, 60) 73 | if h > 0: 74 | return '%d hour %02d min %02d sec' % (h, m, s) 75 | else: 76 | return '%d min %02d sec' % (m, s) 77 | 78 | class HeadlessDisplayer: 79 | def __init__(self): 80 | self.done = False 81 | self.file = '' 82 | self.percentDone = '' 83 | self.timeEst = '' 84 | self.downloadTo = '' 85 | self.downRate = '' 86 | self.upRate = '' 87 | self.shareRating = '' 88 | self.seedStatus = '' 89 | self.peerStatus = '' 90 | self.errors = [] 91 | self.last_update_time = -1 92 | 93 | def finished(self): 94 | global doneFlag 95 | 96 | self.done = True 97 | self.percentDone = '100' 98 | self.timeEst = 'Download Succeeded!' 99 | self.downRate = '' 100 | #self.display() 101 | 102 | global isPeer 103 | 104 | print "done and done" 105 | 106 | if isPeer: 107 | if os.fork(): 108 | os._exit(0) 109 | return 110 | 111 | os.setsid() 112 | if os.fork(): 113 | os._exit(0) 114 | return 115 | 116 | os.close(0) 117 | os.close(1) 118 | os.close(2) 119 | 120 | t = threading.Timer(30.0, ok_close_now) 121 | t.start() 122 | 123 | def failed(self): 124 | self.done = True 125 | self.percentDone = '0' 126 | self.timeEst = 'Download Failed!' 127 | self.downRate = '' 128 | global doneFlag 129 | doneFlag.set() 130 | #self.display() 131 | 132 | def error(self, errormsg): 133 | #self.errors.append(errormsg) 134 | self.display() 135 | global doneFlag 136 | print errormsg 137 | doneFlag.set() 138 | 139 | def display(self, dpflag = Event(), fractionDone = None, timeEst = None, 140 | downRate = None, upRate = None, activity = None, 141 | statistics = None, **kws): 142 | if self.last_update_time + 0.1 > clock() and fractionDone not in (0.0, 1.0) and activity is not None: 143 | return 144 | self.last_update_time = clock() 145 | if fractionDone is not None: 146 | self.percentDone = str(float(int(fractionDone * 1000)) / 10) 147 | if timeEst is not None: 148 | self.timeEst = hours(timeEst) 149 | if activity is not None and not self.done: 150 | self.timeEst = activity 151 | if downRate is not None: 152 | self.downRate = '%.1f kB/s' % (float(downRate) / (1 << 10)) 153 | if upRate is not None: 154 | self.upRate = '%.1f kB/s' % (float(upRate) / (1 << 10)) 155 | if statistics is not None: 156 | if (statistics.shareRating < 0) or (statistics.shareRating > 100): 157 | self.shareRating = 'oo (%.1f MB up / %.1f MB down)' % (float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20)) 158 | else: 159 | self.shareRating = '%.3f (%.1f MB up / %.1f MB down)' % (statistics.shareRating, float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20)) 160 | if not self.done: 161 | self.seedStatus = '%d seen now, plus %.3f distributed copies' % (statistics.numSeeds,0.001*int(1000*statistics.numCopies)) 162 | else: 163 | self.seedStatus = '%d seen recently, plus %.3f distributed copies' % (statistics.numOldSeeds,0.001*int(1000*statistics.numCopies)) 164 | self.peerStatus = '%d seen now, %.1f%% done at %.1f kB/s' % (statistics.numPeers,statistics.percentDone,float(statistics.torrentRate) / (1 << 10)) 165 | #print '\n\n\n\n' 166 | for err in self.errors: 167 | print 'ERROR:\n' + err + '\n' 168 | #print 'saving: ', self.file 169 | #print 'percent done: ', self.percentDone 170 | #print 'time left: ', self.timeEst 171 | #print 'download to: ', self.downloadTo 172 | #print 'download rate: ', self.downRate 173 | #print 'upload rate: ', self.upRate 174 | #print 'share rating: ', self.shareRating 175 | #print 'seed status: ', self.seedStatus 176 | #print 'peer status: ', self.peerStatus 177 | #stdout.flush() 178 | dpflag.set() 179 | 180 | def chooseFile(self, default, size, saveas, dir): 181 | self.file = '%s (%.1f MB)' % (default, float(size) / (1 << 20)) 182 | if saveas != '': 183 | default = saveas 184 | self.downloadTo = abspath(default) 185 | return default 186 | 187 | def newpath(self, path): 188 | self.downloadTo = path 189 | 190 | def run(params): 191 | cols = 80 192 | 193 | h = HeadlessDisplayer() 194 | while 1: 195 | configdir = ConfigDir('downloadheadless') 196 | defaultsToIgnore = ['responsefile', 'url', 'priority'] 197 | configdir.setDefaults(defaults,defaultsToIgnore) 198 | configdefaults = configdir.loadConfig() 199 | defaults.append(('save_options',0, 200 | "whether to save the current options as the new default configuration " + 201 | "(only for btdownloadheadless.py)")) 202 | try: 203 | config = parse_params(params, configdefaults) 204 | except ValueError, e: 205 | print 'error: ' + str(e) + '\nrun with no args for parameter explanations' 206 | break 207 | if not config: 208 | print get_usage(defaults, 80, configdefaults) 209 | break 210 | if config['save_options']: 211 | configdir.saveConfig(config) 212 | configdir.deleteOldCacheData(config['expire_cache_data']) 213 | 214 | myid = createPeerID() 215 | seed(myid) 216 | 217 | global doneFlag 218 | doneFlag = Event() 219 | def disp_exception(text): 220 | print text 221 | rawserver = RawServer(doneFlag, config['timeout_check_interval'], 222 | config['timeout'], ipv6_enable = config['ipv6_enabled'], 223 | failfunc = h.failed, errorfunc = disp_exception) 224 | upnp_type = UPnP_test(config['upnp_nat_access']) 225 | while True: 226 | try: 227 | listen_port = rawserver.find_and_bind(config['minport'], config['maxport'], 228 | config['bind'], ipv6_socket_style = config['ipv6_binds_v4'], 229 | upnp = upnp_type, randomizer = config['random_port']) 230 | break 231 | except socketerror, e: 232 | if upnp_type and e == UPnP_ERROR: 233 | print 'WARNING: COULD NOT FORWARD VIA UPnP' 234 | upnp_type = 0 235 | continue 236 | print "error: Couldn't listen - " + str(e) 237 | h.failed() 238 | return 239 | 240 | response = get_response(config['responsefile'], config['url'], h.error) 241 | if not response: 242 | break 243 | 244 | infohash = sha(bencode(response['info'])).digest() 245 | 246 | dow = BT1Download(h.display, h.finished, h.error, disp_exception, doneFlag, 247 | config, response, infohash, myid, rawserver, listen_port, 248 | configdir) 249 | 250 | if not dow.saveAs(h.chooseFile, h.newpath): 251 | break 252 | 253 | if not dow.initFiles(old_style = True): 254 | break 255 | if not dow.startEngine(): 256 | dow.shutdown() 257 | break 258 | dow.startRerequester() 259 | dow.autoStats() 260 | 261 | if not dow.am_I_finished(): 262 | h.display(activity = 'connecting to peers') 263 | rawserver.listen_forever(dow.getPortHandler()) 264 | h.display(activity = 'shutting down') 265 | dow.shutdown() 266 | break 267 | try: 268 | rawserver.shutdown() 269 | except: 270 | pass 271 | if not h.done: 272 | h.failed() 273 | 274 | if __name__ == '__main__': 275 | 276 | if len(argv) != 5: 277 | print "Incorrect number of arguments" 278 | print 279 | print """Usage: 280 | python murder_client.py peer/seed out.torrent OUT.OUT 127.0.0.1 281 | 282 | The last parameter is the local ip address, normally 10.x.x.x 283 | """ 284 | sys.exit(1) 285 | 286 | argv = ["--responsefile", sys.argv[2], 287 | "--saveas", sys.argv[3], 288 | "--ip", sys.argv[4]] 289 | 290 | isPeer = sys.argv[1] == "peer" 291 | 292 | run(argv[1:]) 293 | -------------------------------------------------------------------------------- /src/murder_make_torrent.py: -------------------------------------------------------------------------------- 1 | # Copyright 2010 Twitter, Inc. 2 | # Copyright 2010 Larry Gadea 3 | # Copyright 2010 Matt Freels 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Usage: python murder_make_torrent.py 18 | # Usage: python murder_make_torrent.py deploy.tar.gz tracker.twitter.com:8998 deploy.torrent 19 | 20 | import warnings 21 | warnings.filterwarnings('ignore', category=DeprecationWarning) 22 | 23 | from sys import argv, version, exit 24 | from os.path import split 25 | assert version >= '2', "Install Python 2.0 or greater" 26 | from BitTornado.BT1.makemetafile import make_meta_file 27 | 28 | if __name__ == '__main__': 29 | 30 | if len(argv) != 4: 31 | print "Incorrect number of arguments" 32 | print 33 | print """Usage: 34 | python murder_make_torrent.py 35 | 36 | For example: 37 | python murder_make_torrent.py deploy.tar.gz tracker.twitter.com:8998 deploy.torrent 38 | """ 39 | exit(1) 40 | 41 | try: 42 | params = {} 43 | params["target"] = argv[3] 44 | make_meta_file(argv[1], "http://" + argv[2] + "/announce", params) 45 | except ValueError, e: 46 | print str(e) 47 | exit(1) 48 | -------------------------------------------------------------------------------- /src/murder_tracker.py: -------------------------------------------------------------------------------- 1 | # Copyright 2010 Twitter, Inc. 2 | # Copyright 2010 Larry Gadea 3 | # Copyright 2010 Matt Freels 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Usage: python murder_tracker.py 18 | # Usage: python murder_tracker.py 19 | 20 | import warnings 21 | warnings.filterwarnings('ignore', category=DeprecationWarning) 22 | 23 | from BitTornado.BT1.track import track 24 | from sys import argv 25 | 26 | if __name__ == '__main__': 27 | args = ["--dfile", "data", 28 | "--port", "8998"] + argv[1:] 29 | track(args) 30 | --------------------------------------------------------------------------------