├── .gitignore ├── autonomotorrent ├── __init__.py ├── factory.py ├── __main__.py ├── BTManager.py ├── tools.py ├── upload.py ├── TrackerClient.py ├── bencode.py ├── PieceManager.py ├── BTApp.py ├── download.py ├── bitfield.py ├── ClientIdentifier.py ├── BTProtocol.py ├── MetaInfo.py ├── FileManager.py └── DHTProtocol.py ├── setup.cfg ├── tests ├── unit │ ├── damn_small_linux.torrent │ ├── testBTApp.py │ └── testBTConfig.py ├── complexity │ ├── pygenie.py │ └── cc.py └── pre-commit ├── setup.py ├── README.markdown ├── distribute_setup.py └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /autonomotorrent/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.5.2" 2 | 3 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [build_sphinx] 2 | source-dir = docs/ 3 | build-dir = docs/_build 4 | all_files = 1 5 | -------------------------------------------------------------------------------- /autonomotorrent/factory.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/AutonomoTorrent/master/autonomotorrent/factory.py -------------------------------------------------------------------------------- /tests/unit/damn_small_linux.torrent: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Todo/AutonomoTorrent/master/tests/unit/damn_small_linux.torrent -------------------------------------------------------------------------------- /tests/unit/testBTApp.py: -------------------------------------------------------------------------------- 1 | """Tests cases for BTApp which is the main control point for using 2 | AutonomoTorrent 3 | """ 4 | import unittest 5 | 6 | from twisted.internet import reactor 7 | 8 | from autonomotorrent.BTApp import BTApp, BTConfig 9 | 10 | class testBTApp(unittest.TestCase): 11 | """Tests the BTApp which is the main control point for using AutonomoTorrent 12 | """ 13 | def setUp(self): 14 | self.bt_app = BTApp() 15 | 16 | def test_using_file(self): 17 | """Tests the BT App using a known good torrent meta file from disk. 18 | """ 19 | config = BTConfig(torrent_path="tests/unit/damn_small_linux.torrent") 20 | self.bt_app.add_torrent(config) 21 | reactor.callLater(2.5, reactor.stop) 22 | self.bt_app.start_reactor() 23 | 24 | if __name__ == "__main__": 25 | unittest.main() 26 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distribute_setup import use_setuptools 2 | use_setuptools() 3 | import os 4 | import shutil 5 | import autonomotorrent 6 | from setuptools import setup, find_packages 7 | 8 | 9 | # Start with a clean slate to prevent side-effects 10 | if os.path.exists('dist/'): shutil.rmtree('dist/') 11 | if os.path.exists('build/'): shutil.rmtree('build/') 12 | 13 | 14 | setup( 15 | name = "AutonomoTorrent", 16 | version = autonomotorrent.__version__, 17 | author = "Josh S. Ziegler", 18 | author_email = "josh.s.ziegler@gmail.com", 19 | description = "AutonomoTorrent %s" % autonomotorrent.__version__, 20 | long_description = """A minimal, pure-python BitTorrent client. 21 | 22 | Supports: 23 | - DHT 24 | - Multi-trackers 25 | """, 26 | license = "GPLv3", 27 | keywords = "bittorrent client", 28 | url = "http://github.com/joshsziegler/AutonomoTorrent", 29 | classifiers = [ 30 | 'Topic :: Internet', 31 | 'Intended Audience :: Developers', 32 | 'Intended Audience :: End Users/Desktop', 33 | 'Development Status :: 3 - Alpha', 34 | 'License :: OSI Approved :: GNU General Public License (GPL)', 35 | ], 36 | # End Meta-Data 37 | packages = find_packages(), 38 | scripts = [], 39 | entry_points = { 40 | 'console_scripts': [ 41 | 'autonomo = autonomotorrent.__main__:console', 42 | ], 43 | 'gui_scripts':[ 44 | ] 45 | }, 46 | install_requires = [ 47 | 'Twisted == 10.2', 48 | ], 49 | package_data = { 50 | '': ['*.markdown'], 51 | }, 52 | exclude_package_data = { 53 | '': [], 54 | }, 55 | zip_safe = True, 56 | ) 57 | -------------------------------------------------------------------------------- /autonomotorrent/__main__.py: -------------------------------------------------------------------------------- 1 | """ 2 | """ 3 | import os 4 | from twisted.python import log 5 | from autonomotorrent.BTApp import BTApp, BTConfig 6 | from autonomotorrent import __version__ as VERSION 7 | 8 | def main(opt, btfiles): 9 | app = BTApp(save_dir=opt.save_dir, 10 | listen_port=opt.listen_port, 11 | enable_DHT=opt.enable_dht, 12 | remote_debugging=opt.remote_debugging) 13 | for torrent_file in btfiles: 14 | try: 15 | log.msg('Adding: {0}'.format(torrent_file)) 16 | config = BTConfig(torrent_file) 17 | config.downloadList = None 18 | app.add_torrent(config) 19 | 20 | except: 21 | log.err() 22 | log.err("Failed to add {0}".format(torrent_file)) 23 | 24 | app.start_reactor() 25 | 26 | def console(): 27 | print("AutonomoTorrent v{0}".format(VERSION)) 28 | from optparse import OptionParser 29 | 30 | usage = 'usage: %prog [options] torrent1 torrent2 ...' 31 | parser = OptionParser(usage=usage) 32 | parser.add_option('-o', '--output_dir', action='store', type='string', 33 | dest='save_dir', default='.', 34 | help='save download file to which directory') 35 | parser.add_option('-l', '--listen-port', action='store', type='int', 36 | dest='listen_port', default=6881, 37 | help='Bittorrent listen port') 38 | parser.add_option("-d", "--enable_dht", action="store_true", 39 | dest="enable_dht", help="enable the DHT extension") 40 | parser.add_option("--remote_debug", action="store_true", 41 | dest="remote_debugging", 42 | help="enable remote debugging through twisted's manhole" + \ 43 | " telnet service on port 9999 (username & password: admin)") 44 | 45 | options, args = parser.parse_args() 46 | if(len(args) > 0): 47 | main(options, args) 48 | else: 49 | print "Error: No torrent files given." 50 | print usage 51 | 52 | if __name__ == '__main__': 53 | console() 54 | -------------------------------------------------------------------------------- /tests/complexity/pygenie.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import sys 5 | from glob import glob 6 | from optparse import OptionParser 7 | 8 | import cc 9 | 10 | 11 | COMMANDS = ['all', 'complexity', ] 12 | USAGE = 'usage: pygenie command [directories|files|packages]' 13 | 14 | 15 | class CommandParser(object): 16 | 17 | def __init__ (self, optparser, commands): 18 | self.commands = commands or [] 19 | self.optparser = optparser 20 | 21 | def parse_args(self, args=None, values=None): 22 | args = args or sys.argv[1:] 23 | if len(args) < 1: 24 | self.optparser.error('please provide a valid command') 25 | 26 | command = args[0] 27 | if command not in self.commands: 28 | self.optparser.error("'%s' is not a valid command" % command) 29 | 30 | options, values = self.optparser.parse_args(args[1:], values) 31 | return command, options, values 32 | 33 | 34 | def find_module(fqn): 35 | join = os.path.join 36 | exists = os.path.exists 37 | partial_path = fqn.replace('.', os.path.sep) 38 | for p in sys.path: 39 | path = join(p, partial_path, '__init__.py') 40 | if exists(path): 41 | return path 42 | path = join(p, partial_path + '.py') 43 | if exists(path): 44 | return path 45 | raise Exception('invalid module') 46 | 47 | 48 | def main(): 49 | from optparse import OptionParser 50 | 51 | parser = OptionParser(usage='./cc.py command [options] *.py') 52 | parser.add_option('-v', '--verbose', 53 | dest='verbose', action='store_true', default=False, 54 | help='print detailed statistics to stdout') 55 | parser = CommandParser(parser, COMMANDS) 56 | command, options, args = parser.parse_args() 57 | 58 | items = set() 59 | for arg in args: 60 | if os.path.isdir(arg): 61 | for f in glob(os.path.join(arg, '*.py')): 62 | if os.path.isfile(f): 63 | items.add(os.path.abspath(f)) 64 | elif os.path.isfile(arg): 65 | items.add(os.path.abspath(arg)) 66 | else: 67 | # this should be a package' 68 | items.add(find_module(arg)) 69 | 70 | for item in items: 71 | code = open(item).read() 72 | if command in ('all', 'complexity'): 73 | stats = cc.measure_complexity(code, item) 74 | pp = cc.PrettyPrinter(sys.stdout, verbose=options.verbose) 75 | pp.pprint(item, stats) 76 | 77 | 78 | if __name__ == '__main__': 79 | main() 80 | 81 | -------------------------------------------------------------------------------- /autonomotorrent/BTManager.py: -------------------------------------------------------------------------------- 1 | # 2 | # -*-encoding:gb2312-*- 3 | 4 | from twisted.internet import defer 5 | from PieceManager import BTPieceManager 6 | from tools import SpeedMonitor, generate_peer_id, sleep 7 | from factory import ConnectionManager 8 | from TrackerClient import BTTrackerClient 9 | 10 | class BTManager (object): 11 | def __init__(self, app, config): 12 | self.app = app 13 | self.config = config 14 | self.metainfo = config.metainfo 15 | self.info_hash = self.metainfo.info_hash 16 | self.downloadSpeedMonitor = SpeedMonitor(5) 17 | self.uploadSpeedMonitor = SpeedMonitor(5) 18 | self.my_peer_id = generate_peer_id() 19 | self.connectionManager = ConnectionManager(self) 20 | self.pieceManager = BTPieceManager(self) 21 | if len(self.metainfo.announce_list) > 0: 22 | self.bttrackerclient = BTTrackerClient(self) 23 | else: 24 | raise Exception("Torrent needs at least one tracker") 25 | self.status = None 26 | 27 | def startDownload(self): 28 | self.pieceManager.start() 29 | 30 | self.connectionManager.start() 31 | 32 | self.downloadSpeedMonitor.start() 33 | self.uploadSpeedMonitor.start() 34 | 35 | self.bttrackerclient.start() 36 | 37 | self.status = 'running' 38 | 39 | def stopDownload(self): 40 | self.pieceManager.stop() 41 | 42 | self.connectionManager.stop() 43 | 44 | self.downloadSpeedMonitor.stop() 45 | self.uploadSpeedMonitor.stop() 46 | 47 | self.bttrackerclient.stop() 48 | 49 | self.status = 'stopped' 50 | 51 | def get_speed(self): 52 | """Returns the speed in kibibit per second (Kibit/s). 53 | """ 54 | return { 55 | "down": self.downloadSpeedMonitor.get_speed(), 56 | "up": self.uploadSpeedMonitor.get_speed() } 57 | 58 | def get_num_connections(self): 59 | return { 60 | "client": len(self.connectionManager.clientFactory.active_connection), 61 | "server": len(self.connectionManager.serverFactory.active_connection)} 62 | 63 | def add_peers(self, peers): 64 | """Adds peers to the torrent for downloading pieces. 65 | 66 | @param peers list of tuples e.g. [('173.248.194.166', 12005), 67 | ('192.166.145.8', 13915)] 68 | """ 69 | self.connectionManager.clientFactory.updateTrackerPeers(peers) 70 | 71 | def exit(self): 72 | if self.status == 'running' : 73 | self.stopDownload() 74 | 75 | for i in self.__dict__ : 76 | del self.__dict__[i] 77 | 78 | -------------------------------------------------------------------------------- /autonomotorrent/tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | """ 3 | import hashlib 4 | import time, os 5 | 6 | from twisted.internet import reactor, defer 7 | from twisted.python import log 8 | 9 | def sleep(timeout): 10 | df = defer.Deferred() 11 | start_time = time.time() 12 | def callback(): 13 | dt = time.time() - start_time 14 | df.callback(dt) 15 | reactor.callLater(timeout, callback) 16 | return df 17 | 18 | @defer.inlineCallbacks 19 | def dns_resolve(addr): 20 | ip, port = addr 21 | if re.match(r'^(\d+\.){3}\d+$', ip): 22 | defer.returnValue(addr) 23 | else: 24 | ip = yield reactor.resolve(ip) 25 | addr = ip, port 26 | defer.returnValue(addr) 27 | 28 | class SpeedMonitor (object): 29 | """A generic network speed monitor. 30 | 31 | @param period the time window for each individual measurement; if this is 32 | not set, the SpeedMonitor will not take measurements! 33 | """ 34 | def __init__(self, period=None): 35 | self.bytes = 0 36 | self.start_time = None 37 | self.period = period 38 | self.bytes_record = 0 39 | self.time_record = None 40 | self.speed = 0 41 | self.observer = None 42 | 43 | def registerObserver(self, observer): 44 | self.observer = observer 45 | 46 | @defer.inlineCallbacks 47 | def start(self): 48 | self.bytes = 0 49 | self.start_time = time.time() 50 | self.status = 'started' 51 | 52 | while self.status == 'started': 53 | if not self.period: 54 | break 55 | self.bytes_record = self.bytes 56 | self.time_record = time.time() 57 | yield sleep(self.period) 58 | self.speedCalc() 59 | 60 | def stop(self): 61 | if self.observer: 62 | self.observer = None 63 | 64 | self.status = 'stopped' 65 | 66 | def addBytes(self, bytes): 67 | self.bytes += bytes 68 | if self.observer: 69 | self.observer.addBytes(bytes) 70 | 71 | def speedCalc(self): 72 | curTime = time.time() 73 | dq = self.bytes - self.bytes_record 74 | dt = curTime - self.time_record 75 | self.speed = float(dq) / dt 76 | self.time_record = curTime 77 | self.bytes_record = self.bytes 78 | 79 | def get_speed(self): 80 | """Returns the speed in kibibit per second (Kibit/s) no matter what the 81 | period was. Returns None is period is None. 82 | 83 | """ 84 | if self.speed and self.period: 85 | return self.speed / 1024 86 | else: 87 | return 0 88 | 89 | def generate_peer_id(): 90 | myid = 'M' + '7-2-0' + '--' 91 | myid += hashlib.sha1(str(time.time())+ ' ' + str(os.getpid())).hexdigest()[-12:] 92 | assert len(myid) == 20 93 | return myid 94 | 95 | -------------------------------------------------------------------------------- /README.markdown: -------------------------------------------------------------------------------- 1 | ##About 2 | This is a fork of [ABTorrent](http://code.google.com/p/abtorrent/) which is a 3 | pure Python implementation of a minimal BitTorrent client. The name 4 | AutonomoTorrent is short for Autonomous Torrent. Autonomous in this context 5 | [means](http://www.merriam-webster.com/dictionary/autonomous) "existing or 6 | capable of existing independently," which refers to its pure Python nature. 7 | 8 | Autonomo came about because I wanted to use BitTorrent in another Python 9 | project of mine ([AdroitGM](https://github.com/joshsziegler/AdroitGM)), but 10 | found that most of the well-known Python clients relied upon 11 | [Rasterbar's libtorrent](http://www.rasterbar.com/products/libtorrent/) which 12 | is in C. This worked well for dedicated clients, but all I wanted was a 13 | no-frills, good-enough client to integrate into another application for 14 | distributed file sharing. I looked at serveral pure-Python projects before 15 | settling on ABTorrent due to its minimal featureset and relatively 16 | up-to-date codebase (have a look at the old "Mainline" client code to get an 17 | idea of what I was comparing it against). 18 | 19 | Please keep in mind that this will remain a minimal, pure python client. I 20 | have put it on GitHub for easier forking for those with more grandiose 21 | desires. I *will* happily accept pull requests or patches for bugs however. 22 | 23 | ##License 24 | As the original ABTorrent, this is released under the 25 | [GPLv3](http://www.gnu.org/licenses/gpl.html). 26 | 27 | ##Install 28 | 29 | ``` 30 | git clone git://github.com/joshsziegler/AutonomoTorrent.git 31 | cd AutonomoTorrent 32 | sudo python setup.py install 33 | autonomo ~/torrents/damn_small_linux.torrent 34 | ``` 35 | 36 | If you have issues with Twisted while running setup, first make sure you have 37 | the python dev stuff installed (`sudo apt-get install python-dev build-essential` 38 | on Ubuntu). If that doesn't work, you might be better off simply installing it 39 | manually. 40 | 41 | - Ubuntu: `sudo apt-get install python-twisted` 42 | - Windows: Get the [installer here](http://twistedmatrix.com/trac/wiki/Downloads#Windows). 43 | 44 | ##Development 45 | Develpoment is slow to non-existent at the moment as real life has caught up 46 | with me. If you want to help though, the biggest need is to refactor 47 | the code base as it is mostly unchanged from the original ABTorrent, and is 48 | really ugly in places. The next biggest issue would be to write unit tests for 49 | everything. 50 | 51 | Either way, when submitting patches or pull requests, please at least run your 52 | code though pylint and the cylomatic complexity tests bundled with the 53 | test/pre-commit script. You don't have to fix everything, but at least attempt 54 | to stick to PEP 8 and keep your CC under 8. 55 | 56 | ``` 57 | cd AutonomoTorrent 58 | tests/pre-commit 59 | ``` 60 | 61 | -------------------------------------------------------------------------------- /tests/unit/testBTConfig.py: -------------------------------------------------------------------------------- 1 | """ 2 | """ 3 | import unittest 4 | from autonomotorrent.BTApp import BTApp, BTConfig 5 | 6 | class testBTConfig(unittest.TestCase): 7 | """Tests the BT Config which holds information for a single torrent. 8 | """ 9 | def test_bt_file(self): 10 | """Tests the config using a BT meta file on disk. 11 | """ 12 | config = BTConfig(torrent_path="tests/unit/damn_small_linux.torrent") 13 | self.assertNotEqual(config, None) 14 | config.check() 15 | 16 | def test_full_meta_info(self): 17 | """Tests the config using a hardcoded meta info dicionarity with most 18 | of the values. 19 | """ 20 | full_torrent = {'creation date': 1316733309, 'announce': 21 | 'http://107.10.137.161:8082/announce', 'info': {'length': 26039, 22 | 'piece length': 2048, 'name': '378ae2e61395da95c3cddf7d2acfc491.png', 23 | 'private': 0, 'pieces': 24 | "\xbbBOX\n\xb5P#\xb5\x11q\x8e\xd3\x7fI\x12\xa6\xc3v\xc9\xeav\x18\xd3$\xce\xb6HB\xc3\x1a\xb0\x1eW#\xa8<\x11\xb4w\xe2Y\xd3\x95\x93f\x90]\x19\x87\xda\xb3\xf6\xf9\x9e\xed\xad\x1a\xf6\x0b\x04m7\xbf\x82pj\xe0W\xb23\x92&l\x9a\xbe\xff\xb7\xce&!\x88\xc4\xdbK\xc9\xad{\xc1\xd4\xf5o\xd8\\47\xeem\xaeVSe\xce\xcaH\xbf0\x1bfZ\xc77V\xec\x9c{\xd35\xef'S!\xe5\xd4>\x03\xa9\x1b\x90\x0et\r\xd4\xb9\x0cdh\x0fMJ\x86\x9b\xf6Gc+e\xe2\xd8\xc4\xf7\xaa\xa4\x14\x80\xd1\x9f&4v\x13\xff+\xe7\xcf\xeen\xbd\xad\x96a\xa9\x00\xc7\x02\xdcU\xda\xe7\xb40\x14E\x96C\x8d)\xd7\x8d\xa8\x8b\xbboe\xf7u\x18!\xe7\x8e\x1e\x02\x17\x8ab\xa3u\xb0\n!\xf2\x01Q\x8eK\x94\xd3\xee\x1c\xf7\xde\xdb\x19\x9e\xe3\xfd+\xc6\x07|\x8c\xec{\xeau.\xf8\x92\x9a/\xf3W\x85\xf5\x8f\xfeC\xc1\xabj\xbc\xe5,7\xcdh\xfe\xa6\r"}, 25 | 'encoding': 'UTF-8'} 26 | 27 | config = BTConfig(meta_info=full_torrent) 28 | self.assertNotEqual(config, None) 29 | config.check() 30 | 31 | def test_minimal_meta_info(self): 32 | """Tests the config using a hardcoded meta info dicionarity with the 33 | minimal set of values/keys. 34 | """ 35 | min_torrent = {'info': {'length': 26039, 36 | 'piece length': 2048, 'name': '378ae2e61395da95c3cddf7d2acfc491.png', 37 | 'pieces': 38 | "\xbbBOX\n\xb5P#\xb5\x11q\x8e\xd3\x7fI\x12\xa6\xc3v\xc9\xeav\x18\xd3$\xce\xb6HB\xc3\x1a\xb0\x1eW#\xa8<\x11\xb4w\xe2Y\xd3\x95\x93f\x90]\x19\x87\xda\xb3\xf6\xf9\x9e\xed\xad\x1a\xf6\x0b\x04m7\xbf\x82pj\xe0W\xb23\x92&l\x9a\xbe\xff\xb7\xce&!\x88\xc4\xdbK\xc9\xad{\xc1\xd4\xf5o\xd8\\47\xeem\xaeVSe\xce\xcaH\xbf0\x1bfZ\xc77V\xec\x9c{\xd35\xef'S!\xe5\xd4>\x03\xa9\x1b\x90\x0et\r\xd4\xb9\x0cdh\x0fMJ\x86\x9b\xf6Gc+e\xe2\xd8\xc4\xf7\xaa\xa4\x14\x80\xd1\x9f&4v\x13\xff+\xe7\xcf\xeen\xbd\xad\x96a\xa9\x00\xc7\x02\xdcU\xda\xe7\xb40\x14E\x96C\x8d)\xd7\x8d\xa8\x8b\xbboe\xf7u\x18!\xe7\x8e\x1e\x02\x17\x8ab\xa3u\xb0\n!\xf2\x01Q\x8eK\x94\xd3\xee\x1c\xf7\xde\xdb\x19\x9e\xe3\xfd+\xc6\x07|\x8c\xec{\xeau.\xf8\x92\x9a/\xf3W\x85\xf5\x8f\xfeC\xc1\xabj\xbc\xe5,7\xcdh\xfe\xa6\r"}} 39 | 40 | config = BTConfig(meta_info=min_torrent) 41 | self.assertNotEqual(config, None) 42 | config.check() 43 | 44 | if __name__ == '__main__': 45 | unittest.main() 46 | -------------------------------------------------------------------------------- /autonomotorrent/upload.py: -------------------------------------------------------------------------------- 1 | # 2 | # -*-encoding:gb2312-*- 3 | 4 | from twisted.internet import reactor 5 | 6 | from tools import SpeedMonitor 7 | 8 | class BTUpload (object) : 9 | # producer interface implementation 10 | 11 | def __init__(self, protocol): 12 | self.protocol = protocol 13 | 14 | self.peer_interested = None 15 | self.am_choke = None 16 | 17 | self.uploadSpeedMonitor = SpeedMonitor() 18 | 19 | self.upload_todo = [] 20 | self.upload_doing = [] 21 | self.upload_done = [] 22 | 23 | self.status = None 24 | 25 | def start(self): 26 | if self.status == 'started' : 27 | return 28 | 29 | if not self.protocol: 30 | return 31 | 32 | self.btm = self.protocol.factory.btm 33 | self.pieceManager = self.btm.pieceManager 34 | 35 | self.uploadSpeedMonitor.start() 36 | self.uploadSpeedMonitor.registerObserver(self.protocol.factory.uploadSpeedMonitor) 37 | 38 | self.choke(False) 39 | 40 | self.protocol.transport.registerProducer(self, False) 41 | 42 | self.status = 'started' 43 | 44 | def stop(self): 45 | if self.status == 'stopped': 46 | return 47 | 48 | self.uploadSpeedMonitor.stop() 49 | 50 | self.protocol.transport.unregisterProducer() 51 | 52 | del self.protocol 53 | del self.btm 54 | del self.pieceManager 55 | 56 | self.status = 'stopped' 57 | 58 | def pause(self): 59 | pass 60 | 61 | def resume(self): 62 | pass 63 | 64 | 65 | def _interested(self, val): 66 | self.peer_interested = bool(val) 67 | 68 | def _request(self, idx, begin, length): 69 | if not self.pieceManager.doIHave(idx): # I don't have 70 | return 71 | 72 | self.upload_todo.append((idx, (begin, length))) 73 | 74 | # data = self.pieceManager.getPieceData(idx, begin, length) 75 | # if data : 76 | # self.protocol.send_piece(idx, begin, data) 77 | 78 | if self.status == 'idle' : 79 | self.resumeProducing() 80 | 81 | def _cancel(self, idx, begin, length): 82 | task = idx, (begin, length) 83 | if task in self.upload_todo : 84 | self.upload_todo.remove(task) 85 | 86 | def choke(self, val): 87 | am_choke = bool(val) 88 | if self.am_choke is am_choke : 89 | return 90 | 91 | if am_choke : 92 | self.protocol.send_choke() 93 | else : 94 | self.protocol.send_unchoke() 95 | 96 | self.am_choke = am_choke 97 | 98 | def _uploadMonitor(self, _type, data): 99 | self.uploadSpeedMonitor.addBytes(len(data)) 100 | 101 | # called by transport and do write 102 | def resumeProducing(self): 103 | for i in range(len(self.upload_todo)) : 104 | idx, (begin, length) = self.upload_todo[i] 105 | data = self.pieceManager.getPieceData(idx, begin, length) 106 | if data : 107 | self.protocol.send_piece(idx, begin, data) 108 | self.status = 'uploading' 109 | del self.upload_todo[i] 110 | break 111 | else: 112 | self.status = 'idle' 113 | 114 | def stopProducing(self): 115 | pass 116 | 117 | -------------------------------------------------------------------------------- /autonomotorrent/TrackerClient.py: -------------------------------------------------------------------------------- 1 | # 2 | # -*-encoding:gb2312-*- 3 | 4 | from twisted.internet import reactor 5 | 6 | from bencode import bencode, bdecode, BTError 7 | 8 | from twisted.python import log 9 | from twisted.internet import defer 10 | from twisted.web.client import getPage 11 | 12 | from tools import sleep 13 | 14 | from urllib import urlencode 15 | import hashlib 16 | import socket 17 | import struct 18 | 19 | 20 | class BTTrackerClient (object): 21 | def __init__(self, btm): 22 | self.btm = btm 23 | self.reciever = btm.connectionManager.clientFactory 24 | self.timmer = {} 25 | self.interval = 15 * 60 26 | 27 | @defer.inlineCallbacks 28 | def start(self): 29 | self.status = 'started' 30 | 31 | info_hash = self.btm.metainfo.info_hash 32 | peer_id = self.btm.my_peer_id 33 | port = self.btm.app.btServer.listen_port 34 | request = { 35 | 'info_hash' : info_hash, 36 | 'peer_id' : peer_id, 37 | 'port' : port, 38 | 'compact' : 1, 39 | #'key' : 'abcd', # This is optional anyways 40 | 'uploaded' : 0, 41 | 'downloaded' : 0, 42 | 'left' : 100, 43 | 'event' : 'started' 44 | } 45 | request_encode = urlencode(request) 46 | 47 | for url in self.btm.metainfo.announce_list : 48 | self.getPeerList(url, request_encode) 49 | yield sleep(1) 50 | 51 | def stop(self): 52 | self.status = 'stopped' 53 | 54 | @defer.inlineCallbacks 55 | def getPeerList(self, url, data): 56 | """TODO: This is in serious need of refactoring... 57 | """ 58 | if self.status == 'stopped': 59 | return 60 | 61 | try: 62 | page = yield getPage(url + '?' + data) 63 | 64 | except Exception as error: 65 | log.err('Failed to connect to tracker: {0}'.format(url)) 66 | 67 | yield sleep(self.interval) 68 | self.getPeerList(url, data) 69 | 70 | else: 71 | try: 72 | res = bdecode(page) 73 | except BTError: 74 | log.err("Received an invalid peer list from the tracker: " +\ 75 | "{0}".format(url)) 76 | else: 77 | if len(res) == 1: 78 | log.msg('Tracker: {0}'.format(res)) # TODO: What is this? 79 | return 80 | 81 | peers = res['peers'] 82 | peers_list = [] 83 | try: # Try parsing in binary format first 84 | while peers: 85 | addr = socket.inet_ntoa(peers[:4]) 86 | port = struct.unpack('!H', peers[4:6])[0] 87 | peers_list.append((addr, port)) 88 | peers = peers[6:] 89 | except: # Now try parsing in dictionary format 90 | try: 91 | for p in peers: 92 | peers_list.append((p["ip"], p["port"])) 93 | except: 94 | log.err("Received an invalid peer list from the " +\ 95 | "tracker: {0}".format(url)) 96 | 97 | log.msg('Received {0} peers from tracker: {1}'.format( 98 | len(peers_list), url)) 99 | self.btm.add_peers(peers_list) 100 | interval = res.get('interval', self.interval) 101 | yield sleep(interval) 102 | self.getPeerList(url, data) 103 | 104 | 105 | -------------------------------------------------------------------------------- /autonomotorrent/bencode.py: -------------------------------------------------------------------------------- 1 | # The contents of this file are subject to the BitTorrent Open Source License 2 | # Version 1.1 (the License). You may not copy or use this file, in either 3 | # source code or executable form, except in compliance with the License. You 4 | # may obtain a copy of the License at http://www.bittorrent.com/license/. 5 | # 6 | # Software distributed under the License is distributed on an AS IS basis, 7 | # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License 8 | # for the specific language governing rights and limitations under the 9 | # License. 10 | 11 | # Written by Petru Paler 12 | 13 | class BTError(Exception): 14 | pass 15 | 16 | def decode_int(x, f): 17 | f += 1 18 | newf = x.index('e', f) 19 | n = int(x[f:newf]) 20 | if x[f] == '-': 21 | if x[f + 1] == '0': 22 | raise ValueError 23 | elif x[f] == '0' and newf != f+1: 24 | raise ValueError 25 | return (n, newf+1) 26 | 27 | def decode_string(x, f): 28 | colon = x.index(':', f) 29 | n = int(x[f:colon]) 30 | if x[f] == '0' and colon != f+1: 31 | raise ValueError 32 | colon += 1 33 | return (x[colon:colon+n], colon+n) 34 | 35 | def decode_list(x, f): 36 | r, f = [], f+1 37 | while x[f] != 'e': 38 | v, f = decode_func[x[f]](x, f) 39 | r.append(v) 40 | return (r, f + 1) 41 | 42 | def decode_dict(x, f): 43 | r, f = {}, f+1 44 | while x[f] != 'e': 45 | k, f = decode_string(x, f) 46 | r[k], f = decode_func[x[f]](x, f) 47 | return (r, f + 1) 48 | 49 | decode_func = {} 50 | decode_func['l'] = decode_list 51 | decode_func['d'] = decode_dict 52 | decode_func['i'] = decode_int 53 | decode_func['0'] = decode_string 54 | decode_func['1'] = decode_string 55 | decode_func['2'] = decode_string 56 | decode_func['3'] = decode_string 57 | decode_func['4'] = decode_string 58 | decode_func['5'] = decode_string 59 | decode_func['6'] = decode_string 60 | decode_func['7'] = decode_string 61 | decode_func['8'] = decode_string 62 | decode_func['9'] = decode_string 63 | 64 | def bdecode(x): 65 | try: 66 | r, l = decode_func[x[0]](x, 0) 67 | except (IndexError, KeyError, ValueError): 68 | raise BTError("not a valid bencoded string") 69 | if l != len(x): 70 | #raise BTFailure("invalid bencoded value (data after valid prefix)") 71 | #print x[l:] 72 | pass 73 | return r 74 | 75 | from types import StringType, IntType, LongType, DictType, ListType, TupleType 76 | 77 | 78 | class Bencached(object): 79 | 80 | __slots__ = ['bencoded'] 81 | 82 | def __init__(self, s): 83 | self.bencoded = s 84 | 85 | def encode_bencached(x,r): 86 | r.append(x.bencoded) 87 | 88 | def encode_int(x, r): 89 | r.extend(('i', str(x), 'e')) 90 | 91 | def encode_bool(x, r): 92 | if x: 93 | encode_int(1, r) 94 | else: 95 | encode_int(0, r) 96 | 97 | def encode_string(x, r): 98 | r.extend((str(len(x)), ':', x)) 99 | 100 | def encode_list(x, r): 101 | r.append('l') 102 | for i in x: 103 | encode_func[type(i)](i, r) 104 | r.append('e') 105 | 106 | def encode_dict(x,r): 107 | r.append('d') 108 | ilist = x.items() 109 | ilist.sort() 110 | for k, v in ilist: 111 | r.extend((str(len(k)), ':', k)) 112 | encode_func[type(v)](v, r) 113 | r.append('e') 114 | 115 | encode_func = {} 116 | encode_func[Bencached] = encode_bencached 117 | encode_func[IntType] = encode_int 118 | encode_func[LongType] = encode_int 119 | encode_func[StringType] = encode_string 120 | encode_func[ListType] = encode_list 121 | encode_func[TupleType] = encode_list 122 | encode_func[DictType] = encode_dict 123 | 124 | try: 125 | from types import BooleanType 126 | encode_func[BooleanType] = encode_bool 127 | except ImportError: 128 | pass 129 | 130 | def bencode(x): 131 | r = [] 132 | encode_func[type(x)](x, r) 133 | return ''.join(r) 134 | -------------------------------------------------------------------------------- /autonomotorrent/PieceManager.py: -------------------------------------------------------------------------------- 1 | """ 2 | """ 3 | import hashlib 4 | 5 | from twisted.python import log 6 | from bitfield import Bitfield 7 | from FileManager import BTFileManager, BTFileError, BTHashTestError 8 | 9 | class BTPieceManager: 10 | 11 | slice_size = 2**14 12 | 13 | def __init__(self, btm): 14 | self.btm = btm 15 | self.metainfo = btm.metainfo 16 | self.connectionManager = btm.connectionManager 17 | 18 | self.btfiles = BTFileManager(btm) 19 | 20 | self.bitfield = self.btfiles.bitfieldHave 21 | 22 | metainfo = self.metainfo 23 | self.piece_length = metainfo.piece_length 24 | self.pieces_size = metainfo.pieces_size 25 | self.pieces_hash = metainfo.pieces_hash 26 | 27 | self.buffer = {} 28 | 29 | self.bfNeed = self.btfiles.bitfieldNeed 30 | 31 | self.pieceDownload = {} # [idx]: [todo], [doing], [done] 32 | self.pieceTodo = {} 33 | self.pieceDoing = {} 34 | self.pieceDone = {} 35 | 36 | def start(self) : 37 | self.btfiles.start() 38 | 39 | def stop(self) : 40 | self.btfiles.stop() 41 | 42 | def do_slice(self, beg, end): 43 | slice_list = [] 44 | r = range(beg, end, self.slice_size) 45 | for beg in r[:-1] : 46 | slice_list.append((beg, self.slice_size)) 47 | slice_list.append((r[-1], end-r[-1])) 48 | return slice_list 49 | 50 | def __getPieceSlice(self, idx): 51 | if idx == self.pieces_size-1: 52 | return self.do_slice(0, self.metainfo.last_piece_length) 53 | else: 54 | return self.do_slice(0, self.piece_length) 55 | 56 | def amInterested(self, idx): 57 | if type(idx) is Bitfield: 58 | try: 59 | for i in (self.bfNeed & idx): 60 | return True 61 | else: 62 | return False 63 | except TypeError: 64 | return False 65 | else: 66 | return idx in self.bfNeed 67 | 68 | def doIHave(self, index): 69 | return self.bitfield[index] 70 | 71 | def getMorePieceTask(self, peer_bf, num_task=5): 72 | if num_task == 0: 73 | return None 74 | tasks = [] 75 | for idx in (peer_bf & self.bfNeed) : 76 | while True: 77 | task = self.getPieceTask(idx) 78 | if not task : 79 | break 80 | tasks.append(task) 81 | if len(tasks) == num_task: 82 | return tasks 83 | 84 | def getPieceTask(self, idx): 85 | assert idx in self.bfNeed 86 | if idx not in self.pieceDownload: 87 | slice_list = self.__getPieceSlice(idx) 88 | self.pieceDownload[idx] = [slice_list, [], []] 89 | 90 | task_to_do, task_doing, task_done = self.pieceDownload[idx] 91 | if not task_to_do: 92 | return None 93 | my_task = task_to_do[0] 94 | del task_to_do[0] 95 | task_doing.append(my_task) 96 | return idx, my_task 97 | 98 | def failedPieceTask(self, idx, task): 99 | task_to_do, task_doing, task_done = self.pieceDownload[idx] 100 | assert task in task_doing 101 | task_doing.remove(task) 102 | task_to_do.append(task) 103 | 104 | def finishPieceTask(self, idx, task, data): 105 | task_to_do, task_doing, task_done = self.pieceDownload[idx] 106 | assert task in task_doing 107 | task_doing.remove(task) 108 | task_done.append((task, data)) 109 | 110 | if not task_to_do and not task_doing : 111 | task_done.sort(key=lambda x : x[0][0]) 112 | data = ''.join(d for t, d in task_done) 113 | 114 | try: 115 | self.btfiles.writePiece(idx, data) 116 | self.bitfield[idx] = 1 117 | self.bfNeed[idx] = 0 118 | 119 | except BTHashTestError as error: 120 | # sha1 error ~ corrupt piece 121 | del self.pieceDownload[idx] 122 | if idx == self.pieces_size-1: 123 | self.do_slice_tail() 124 | 125 | else: 126 | self.connectionManager.broadcastHave(idx) 127 | 128 | def getPieceData(self, index, beg, length) : 129 | if not self.doIHave(index) : 130 | return None 131 | piece = self.btfiles.readPiece(index) 132 | if piece : 133 | return piece[beg:(beg+length)] 134 | else : 135 | return None 136 | 137 | 138 | -------------------------------------------------------------------------------- /autonomotorrent/BTApp.py: -------------------------------------------------------------------------------- 1 | """ 2 | """ 3 | import sys 4 | import os 5 | 6 | import os 7 | from twisted.python import log 8 | from twisted.internet import reactor 9 | from twisted.internet import task 10 | 11 | from autonomotorrent.BTManager import BTManager 12 | from autonomotorrent.factory import BTServerFactories 13 | from autonomotorrent.MetaInfo import BTMetaInfo 14 | from autonomotorrent.DHTProtocol import DHTProtocol 15 | 16 | class BTConfig(object): 17 | def __init__(self, torrent_path=None, meta_info=None): 18 | if torrent_path: 19 | self.metainfo = BTMetaInfo(path=torrent_path) 20 | elif meta_info: 21 | self.metainfo = BTMetaInfo(meta_info=meta_info) 22 | else: 23 | raise Exception("Must provide either a torrent path or meta_info.") 24 | 25 | self.info_hash = self.metainfo.info_hash 26 | self.downloadList = None 27 | 28 | def check(self) : 29 | if self.downloadList is None: 30 | self.downloadList = range(len(self.metainfo.files)) 31 | for i in self.downloadList : 32 | f = self.metainfo.files[i] 33 | size = f['length'] 34 | name = f['path'] 35 | log.msg("File: {0} Size: {1}".format(name, size)) # TODO: Do we really need this? 36 | 37 | class BTApp: 38 | def __init__(self, save_dir=".", 39 | listen_port=6881, 40 | enable_DHT=False, 41 | remote_debugging=False): 42 | """ 43 | @param remote_degugging enables telnet login via port 9999 with a 44 | username and password of 'admin' 45 | """ 46 | log.startLogging(sys.stdout) # Start logging to stdout 47 | self.save_dir = save_dir 48 | self.listen_port = listen_port 49 | self.enable_DHT = enable_DHT 50 | self.tasks = {} 51 | self.btServer = BTServerFactories(self.listen_port) 52 | reactor.listenTCP(self.listen_port, self.btServer) 53 | if enable_DHT: 54 | log.msg("Turning DHT on.") 55 | self.dht = DHTProtocol() 56 | reactor.listenUDP(self.listen_port, self.dht) 57 | 58 | if remote_debugging: 59 | log.msg("Turning remote debugging on. You may login via telnet " +\ 60 | "on port 9999 username & password are 'admin'") 61 | import twisted.manhole.telnet 62 | dbg = twisted.manhole.telnet.ShellFactory() 63 | dbg.username = "admin" 64 | dbg.password = "admin" 65 | dbg.namespace['app'] = self 66 | reactor.listenTCP(9999, dbg) 67 | 68 | def add_torrent(self, config): 69 | config.check() 70 | info_hash = config.info_hash 71 | if info_hash in self.tasks: 72 | log.msg('Torrent {0} already in download list'.format(config.metainfo.pretty_info_hash)) 73 | else: 74 | btm = BTManager(self, config) 75 | self.tasks[info_hash] = btm 76 | btm.startDownload() 77 | return info_hash 78 | 79 | def stop_torrent(self, key): 80 | info_hash = key 81 | if info_hash in self.tasks: 82 | btm = self.tasks[info_hash] 83 | btm.stopDownload() 84 | 85 | def remove_torrent(self, key): 86 | info_hash = key 87 | if info_hash in self.tasks: 88 | btm = self.tasks[info_hash] 89 | btm.exit() 90 | 91 | def stop_all_torrents(self): 92 | for task in self.tasks.itervalues() : 93 | task.stopDownload() 94 | 95 | def get_status(self): 96 | """Returns a dictionary of stats on every torrent and total speed. 97 | """ 98 | status = {} 99 | for torrent_hash, bt_manager in self.tasks.iteritems(): 100 | pretty_hash = bt_manager.metainfo.pretty_info_hash 101 | speed = bt_manager.get_speed() 102 | num_connections = bt_manager.get_num_connections() 103 | 104 | status[pretty_hash] = { 105 | "state": bt_manager.status, 106 | "speed_up": speed["up"], 107 | "speed_down": speed["down"], 108 | "num_seeds": num_connections["server"], 109 | "num_peers": num_connections["client"], 110 | } 111 | try: 112 | status["all"]["speed_up"] += status[pretty_hash]["speed_up"] 113 | status["all"]["speed_down"] += status[pretty_hash]["speed_down"] 114 | except KeyError: 115 | status["all"] = { 116 | "speed_up": status[pretty_hash]["speed_up"], 117 | "speed_down": status[pretty_hash]["speed_down"] 118 | } 119 | 120 | 121 | return status 122 | 123 | def start_reactor(self): 124 | reactor.run() 125 | -------------------------------------------------------------------------------- /autonomotorrent/download.py: -------------------------------------------------------------------------------- 1 | """ 2 | """ 3 | from twisted.internet import reactor, defer 4 | 5 | from tools import SpeedMonitor, sleep 6 | from bitfield import Bitfield 7 | 8 | class BTDownload(object) : 9 | 10 | task_max_size = 5 11 | 12 | def __init__(self, protocol): 13 | self.protocol = protocol 14 | 15 | self.piece_doing = [] 16 | self.piece_done = [] 17 | 18 | self.peer_choke = None 19 | self.am_interested = None 20 | 21 | self.downloadSpeedMonitor = SpeedMonitor() 22 | 23 | self.task_max_size = 5 24 | 25 | def start(self): 26 | if not self.protocol: 27 | return 28 | 29 | self.status = 'running' 30 | self.btm = self.protocol.factory.btm 31 | self.pieceManager = self.btm.pieceManager 32 | pm = self.pieceManager 33 | self.peer_bitfield = Bitfield(pm.pieces_size) 34 | self.downloadSpeedMonitor.start() 35 | self.downloadSpeedMonitor.registerObserver(self.protocol.factory.downloadSpeedMonitor) 36 | 37 | def stop(self): 38 | for task in self.piece_doing: 39 | self.pieceManager.failedPieceTask(*task) 40 | 41 | del self.piece_doing[:] 42 | 43 | self.downloadSpeedMonitor.stop() 44 | 45 | del self.protocol 46 | del self.btm 47 | del self.pieceManager 48 | 49 | self.status = 'stopped' 50 | 51 | def _choke(self, val): 52 | self.peer_choke = bool(val) 53 | 54 | if val: 55 | pass 56 | else: 57 | self.__pieceRequest() 58 | 59 | def interested(self, val): 60 | am_interested = bool(val) 61 | if self.am_interested is am_interested : 62 | return 63 | 64 | if am_interested : 65 | self.protocol.send_interested() 66 | else : 67 | self.protocol.send_not_interested() 68 | 69 | self.am_interested = am_interested 70 | 71 | def cancel(self, task): 72 | idx, (beg, length) = task 73 | self.protocol.send_cancel(idx, beg, length) 74 | 75 | def _downloadMonitor(self, data): 76 | self.downloadSpeedMonitor.addBytes(len(data)) 77 | 78 | def __pieceRequest(self): 79 | if self.am_interested==True and self.peer_choke==False: 80 | if self.piece_doing : 81 | return 82 | new_task = self.__getTask() 83 | if new_task : 84 | self.__sendTaskRequest(new_task) 85 | 86 | def __getTask(self, size=None): 87 | if size is None : 88 | size = self.task_max_size 89 | pm = self.pieceManager 90 | new_task = pm.getMorePieceTask(self.peer_bitfield, size) 91 | return new_task 92 | 93 | @defer.inlineCallbacks 94 | def __sendTaskRequest(self, new_task, timeout=None): 95 | if not new_task: 96 | return 97 | 98 | if timeout is None: 99 | timeout = len(new_task) * 60 100 | 101 | for task in new_task : 102 | i, (beg, size) = task 103 | self.protocol.send_request(i, beg, size) 104 | self.piece_doing.append(task) 105 | 106 | yield sleep(timeout) 107 | self.__checkTimeout(new_task) 108 | 109 | def __checkTimeout(self, task_plan): 110 | if self.status == 'stopped' : 111 | return 112 | 113 | set_plan = set(task_plan) 114 | set_ing = set(self.piece_doing) 115 | set_undo = set_plan & set_ing 116 | set_new = set_ing - set_plan 117 | 118 | task_size = self.task_max_size - len(set_undo) 119 | if set_new: 120 | task_size += 1 121 | 122 | if task_size < 1: 123 | task_size = 1 124 | elif task_size > BTDownload.task_max_size : 125 | task_size = BTDownload.task_max_size 126 | 127 | self.task_max_size = task_size 128 | 129 | if not set_undo: 130 | return 131 | 132 | new_task = self.__getTask(self.task_max_size) 133 | 134 | for task in set_undo: 135 | self.cancel(task) 136 | self.piece_doing.remove(task) 137 | self.pieceManager.failedPieceTask(*task) 138 | 139 | if new_task: 140 | self.__sendTaskRequest(new_task) 141 | 142 | def _piece(self, index, beg, piece): 143 | task = index, (beg, len(piece)) 144 | if task not in self.piece_doing: 145 | return 146 | 147 | self.pieceManager.finishPieceTask(index, (beg, len(piece)), piece) 148 | self.piece_doing.remove(task) 149 | if len(self.piece_doing) == 0: 150 | self.__pieceRequest() 151 | 152 | def _bitfield(self, data): 153 | pm = self.pieceManager 154 | bf = Bitfield(pm.pieces_size, data) 155 | self.peer_bitfield = bf 156 | 157 | if self.pieceManager.amInterested(bf): 158 | self.interested(True) 159 | self.__pieceRequest() 160 | else: 161 | self.interested(False) 162 | 163 | def _have(self, index): 164 | self.peer_bitfield[index] = 1 165 | if self.pieceManager.amInterested(index) : 166 | self.interested(True) 167 | self.__pieceRequest() 168 | -------------------------------------------------------------------------------- /autonomotorrent/bitfield.py: -------------------------------------------------------------------------------- 1 | from array import array 2 | 3 | counts = [chr(sum([(i >> j) & 1 for j in xrange(8)])) for i in xrange(256)] 4 | counts = ''.join(counts) 5 | 6 | class BitOp (object): 7 | def __contians__(self, idx): 8 | return self[idx] 9 | 10 | def __and__(self, bf): 11 | return BitfieldOperatorProxy(self, bf, lambda x,y: x&y) 12 | 13 | class Bitfield (BitOp): 14 | 15 | def __init__(self, length, bitstring=None): 16 | self.length = length 17 | 18 | rlen, extra = divmod(length, 8) 19 | if bitstring is None: 20 | self.numzeros = length 21 | if extra: 22 | self.bits = array('B', chr(0) * (rlen + 1)) 23 | else: 24 | self.bits = array('B', chr(0) * rlen) 25 | else: 26 | if extra: 27 | if len(bitstring) != rlen + 1: 28 | raise ValueError 29 | if (ord(bitstring[-1]) << extra) & 0xFF != 0: 30 | raise ValueError 31 | else: 32 | if len(bitstring) != rlen: 33 | raise ValueError 34 | c = counts 35 | self.numzeros = length - sum(array('B', 36 | bitstring.translate(counts))) 37 | 38 | self.bits = array('B', bitstring) 39 | 40 | self.__updateIndex(0) 41 | 42 | def any(self): 43 | return self.numzeros != self.length 44 | 45 | def allOne(self): 46 | return self.numzeros == 0 47 | 48 | def allZero(self): 49 | return self.numzeros == self.length 50 | 51 | def __updateIndex(self, start=0): 52 | if self.allOne(): 53 | self.idxFirst1 = 0 54 | elif self.allZero() : 55 | self.idxFirst1 = None 56 | else: 57 | pos = start / 8 58 | for i in xrange(pos, len(self.bits)): 59 | c = self.bits[i] 60 | if c: 61 | for _i in xrange(8): 62 | if (128 >> _i) & c: 63 | break 64 | self.idxFirst1 = i * 8 + _i 65 | break 66 | else: 67 | self.idxFirst1 = None 68 | 69 | def set1(self, index): 70 | pos = index >> 3 71 | mask = 128 >> (index & 7) 72 | if self.bits[pos] & mask: 73 | return 74 | self.bits[pos] |= mask 75 | self.numzeros -= 1 76 | assert self.numzeros >=0 77 | 78 | if self.idxFirst1 is None: 79 | self.idxFirst1 = index 80 | elif self.idxFirst1 > index: 81 | self.idxFirst1 = index 82 | 83 | def set0(self, index): 84 | pos = index >> 3 85 | mask = 128 >> (index & 7) 86 | if not self.bits[pos] & mask: 87 | return 88 | 89 | self.bits[pos] &= ~mask 90 | 91 | self.numzeros += 1 92 | assert self.numzeros <= self.length 93 | 94 | if index == self.idxFirst1: 95 | self.__updateIndex(self.idxFirst1) 96 | 97 | def __setitem__(self, index, val): 98 | if val == 0: 99 | self.set0(index) 100 | elif val == 1: 101 | self.set1(index) 102 | else: 103 | raise ValueError('val is 0 or 1') 104 | 105 | def __getitem__(self, index): 106 | bits = self.bits 107 | return bits[index >> 3] & 128 >> (index & 7) 108 | 109 | def __len__(self): 110 | return self.length 111 | 112 | def __repr__(self): 113 | return self.bits.__repr__() 114 | 115 | def __iter__(self): 116 | if self.any(): 117 | pos = self.idxFirst1 / 8 118 | for i in xrange(pos, len(self.bits)): 119 | c = self.bits[i] 120 | if c: 121 | for _i in xrange(8): 122 | if (128 >> _i) & c: 123 | yield i*8 + _i 124 | 125 | def tostring(self): 126 | if self.bits is None: 127 | rlen, extra = divmod(self.length, 8) 128 | r = chr(0xFF) * rlen 129 | if extra: 130 | r += chr((0xFF << (8 - extra)) & 0xFF) 131 | return r 132 | else: 133 | return self.bits.tostring() 134 | 135 | 136 | class BitfieldOperatorProxy (object): 137 | def __init__(self, bf1, bf2, op): 138 | assert len(bf1) == len(bf2) 139 | self.bf1 = bf1 140 | self.bf2 = bf2 141 | self.op = op 142 | 143 | def __iter__(self): 144 | bf1 = self.bf1 145 | bf2 = self.bf2 146 | if self.any: 147 | pos = max(bf1.idxFirst1, bf2.idxFirst1) / 8 148 | bits1, bits2 = bf1.bits, bf2.bits 149 | for i in xrange(pos, len(bits1)): 150 | c = self.op(bits1[i], bits2[i]) 151 | if c: 152 | for _i in xrange(8): 153 | if (128 >> _i) & c: 154 | yield i*8 + _i 155 | 156 | def __len__(self): 157 | return len(self.bf1) 158 | 159 | def __getitem__(self, idx): 160 | return self.op(self.bf1[idx], self.bf2[idx]) 161 | 162 | def any(self): 163 | return self.op(self.bf1.any(), self.bf2.any()) 164 | 165 | def allOne(self): 166 | return self.op(self.bf1.allOne(), self.bf2.allOne()) 167 | 168 | def allZero(self): 169 | return self.op(self.bf1.allZero(), self.bf2.allZero()) 170 | -------------------------------------------------------------------------------- /tests/complexity/cc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import compiler 4 | from compiler.visitor import ASTVisitor 5 | 6 | 7 | class Stats(object): 8 | 9 | def __init__(self, name): 10 | self.name = name 11 | self.classes = [] 12 | self.functions = [] 13 | self.complexity = 1 14 | 15 | def __str__(self): 16 | return 'Stats: name=%r, classes=%r, functions=%r, complexity=%r' \ 17 | % (self.name, self.classes, self.functions, self.complexity) 18 | 19 | __repr__ = __str__ 20 | 21 | 22 | class ClassStats(Stats): 23 | 24 | def __str__(self): 25 | return 'Stats: name=%r, methods=%r, complexity=%r, inner_class=%r' \ 26 | % (self.name, self.functions, self.complexity, self.classes) 27 | 28 | __repr__ = __str__ 29 | 30 | 31 | class DefStats(Stats): 32 | 33 | def __str__(self): 34 | return 'DefStats: name=%r, complexity=%r' \ 35 | % (self.name, self.complexity) 36 | 37 | __repr__ = __str__ 38 | 39 | 40 | class CCVisitor(ASTVisitor): 41 | """Encapsulates the cyclomatic complexity counting.""" 42 | 43 | def __init__(self, ast, stats=None, description=None): 44 | ASTVisitor.__init__(self) 45 | if isinstance(ast, basestring): 46 | ast = compiler.parse(ast) 47 | 48 | self.stats = stats or Stats(description or '') 49 | for child in ast.getChildNodes(): 50 | compiler.walk(child, self, walker=self) 51 | 52 | def dispatchChildren(self, node): 53 | for child in node.getChildNodes(): 54 | self.dispatch(child) 55 | 56 | def visitFunction(self, node): 57 | if not hasattr(node, 'name'): # lambdas 58 | node.name = '' 59 | stats = DefStats(node.name) 60 | stats = CCVisitor(node, stats).stats 61 | self.stats.functions.append(stats) 62 | 63 | visitLambda = visitFunction 64 | 65 | def visitClass(self, node): 66 | stats = ClassStats(node.name) 67 | stats = CCVisitor(node, stats).stats 68 | self.stats.classes.append(stats) 69 | 70 | def visitIf(self, node): 71 | self.stats.complexity += len(node.tests) 72 | self.dispatchChildren(node) 73 | 74 | def __processDecisionPoint(self, node): 75 | self.stats.complexity += 1 76 | self.dispatchChildren(node) 77 | 78 | visitFor = visitGenExprFor = visitGenExprIf \ 79 | = visitListCompFor = visitListCompIf \ 80 | = visitWhile = _visitWith = __processDecisionPoint 81 | 82 | def visitAnd(self, node): 83 | self.dispatchChildren(node) 84 | self.stats.complexity += 1 85 | 86 | def visitOr(self, node): 87 | self.dispatchChildren(node) 88 | self.stats.complexity += 1 89 | 90 | 91 | def measure_complexity(ast, module_name=None): 92 | return CCVisitor(ast, description=module_name).stats 93 | 94 | 95 | class Table(object): 96 | 97 | def __init__(self, headings, rows): 98 | self.headings = headings 99 | self.rows = rows 100 | 101 | max_col_sizes = [len(x) for x in headings] 102 | for row in rows: 103 | for i, col in enumerate(row): 104 | max_col_sizes[i] = max(max_col_sizes[i], len(str(col))) 105 | self.max_col_sizes = max_col_sizes 106 | 107 | def __iter__(self): 108 | for row in self.rows: 109 | yield row 110 | 111 | def __nonzero__(self): 112 | return len(self.rows) 113 | 114 | 115 | class PrettyPrinter(object): 116 | 117 | def __init__(self, out, verbose=False): 118 | self.out = out 119 | self.verbose = verbose 120 | 121 | def pprint(self, filename, stats): 122 | self.out.write('File: %s\n' % filename) 123 | 124 | stats = self.flatten_stats(stats) 125 | 126 | if not self.verbose: 127 | # filter out suites with low complexity numbers 128 | stats = (row for row in stats if row[-1] > 7) 129 | 130 | stats = sorted(stats, lambda a, b: cmp(b[2], a[2])) 131 | 132 | table = Table(['Type', 'Name', 'Complexity'], stats) 133 | if table: 134 | self.pprint_table(table) 135 | else: 136 | self.out.write('This code looks all good!\n') 137 | self.out.write('\n') 138 | 139 | def pprint_table(self, table): 140 | for n, col in enumerate(table.headings): 141 | self.out.write(str(col).ljust(table.max_col_sizes[n] + 1)) 142 | self.out.write('\n') 143 | self.out.write('-' * (sum(table.max_col_sizes) + \ 144 | len(table.headings) - 1) + '\n') 145 | for row in table: 146 | for n, col in enumerate(row): 147 | self.out.write(str(col).ljust(table.max_col_sizes[n] + 1)) 148 | self.out.write('\n') 149 | 150 | def flatten_stats(self, stats): 151 | def flatten(stats, ns=None): 152 | if not ns: 153 | yield 'X', stats.name, stats.complexity 154 | for s in stats.classes: 155 | name = '.'.join(filter(None, [ns, s.name])) 156 | yield 'C', name, s.complexity 157 | for x in s.functions: 158 | fname = '.'.join([name, x.name]) 159 | yield 'M', fname, x.complexity 160 | for s in stats.functions: 161 | name = '.'.join(filter(None, [ns, s.name])) 162 | yield 'F', name, s.complexity 163 | 164 | return [t for t in flatten(stats)] 165 | 166 | 167 | -------------------------------------------------------------------------------- /autonomotorrent/ClientIdentifier.py: -------------------------------------------------------------------------------- 1 | # The contents of this file are subject to the BitTorrent Open Source License 2 | # Version 1.1 (the License). You may not copy or use this file, in either 3 | # source code or executable form, except in compliance with the License. You 4 | # may obtain a copy of the License at http://www.bittorrent.com/license/. 5 | # 6 | # Software distributed under the License is distributed on an AS IS basis, 7 | # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License 8 | # for the specific language governing rights and limitations under the 9 | # License. 10 | # 11 | # Written by Matt Chisholm 12 | # Client list updated by Ed Savage-Jones - May 28th 2005 13 | 14 | import re 15 | 16 | v64p = '[\da-zA-Z.-]{3}' 17 | 18 | matches = ( 19 | ('-AZ(?P\d+)-+.+$' , "Azureus" ), 20 | ('M(?P\d-\d-\d)--.+$' , "BitTorrent" ), 21 | ('T(?P%s)0?-+.+$'%v64p , "BitTornado" ), 22 | ('-UT(?P[\dA-F]+)-+.+$' , u"microTorrent" ), 23 | ('-TS(?P\d+)-+.+$' , "TorrentStorm" ), 24 | ('exbc(?P.+)LORD.+$' , "BitLord" ), 25 | ('exbc(?P[^-][^-]+)(?!---).+$', "BitComet" ), 26 | ('-BC0(?P\d+)-.+$' , "BitComet" ), 27 | ('FUTB(?P.+).+$' , "BitComet Mod1" ), 28 | ('xUTB(?P.+).+$' , "BitComet Mod2" ), 29 | ('A(?P%s)-+.+$'%v64p , "ABC" ), 30 | ('S(?P%s)-+.+$'%v64p , "Shadow's" ), 31 | (chr(0)*12 + 'aa.+$' , "Experimental 3.2.1b2" ), 32 | (chr(0)*12 + '.+$' , "BitTorrent (obsolete)"), 33 | ('-G3.+$' , "G3Torrent" ), 34 | ('-[Ll][Tt](?P\d+)-+.+$' , "libtorrent" ), 35 | ('Mbrst(?P\d-\d-\d).+$' , "burst!" ), 36 | ('eX.+$' , "eXeem" ), 37 | ('\x00\x02BS.+(?PUDP0|HTTPBT)$', "BitSpirit v2" ), 38 | ('\x00[\x02|\x00]BS.+$' , "BitSpirit v2" ), 39 | ('.*(?PUDP0|HTTPBT)$' , "BitSpirit" ), 40 | ('-BOWP?(?P[\dA-F]+)-.+$', "Bits on Wheels" ), 41 | ('(?P.+)RSAnonymous.+$' , "Rufus Anonymous" ), 42 | ('(?P.+)RS.+$' , "Rufus" ), 43 | ('-ML(?P(\d\.)+\d)(?:\.(?PCVS))?-+.+$',"MLDonkey"), 44 | ('346------.+$' , "TorrentTopia 1.70" ), 45 | ('OP(?P\d{4}).+$' , "Opera" ), 46 | ('-KT(?P\d+)(?PR\d+)-+.+$', "KTorrent" ), 47 | # Unknown but seen in peer lists: 48 | ('-S(?P10059)-+.+$' , "Shareaza" ), 49 | ('-TR(?P\d+)-+.+$' , "Transmission" ), 50 | ('S\x05\x07\x06\x00{7}.+' , "S 576 (unknown)" ), 51 | # Clients I've never actually seen in a peer list: 52 | ('exbc..---.+$' , "BitVampire 1.3.1" ), 53 | ('-BB(?P\d+)-+.+$' , "BitBuddy" ), 54 | ('-CT(?P\d+)-+.+$' , "CTorrent" ), 55 | ('-MT(?P\d+)-+.+$' , "MoonlightTorrent" ), 56 | ('-BX(?P\d+)-+.+$' , "BitTorrent X" ), 57 | ('-TN(?P\d+)-+.+$' , "TorrentDotNET" ), 58 | ('-SS(?P\d+)-+.+$' , "SwarmScope" ), 59 | ('-XT(?P\d+)-+.+$' , "XanTorrent" ), 60 | ('U(?P\d+)-+.+$' , "UPnP NAT Bit Torrent" ), 61 | ('-AR(?P\d+)-+.+$' , "Arctic" ), 62 | ('(?P.+)BM.+$' , "BitMagnet" ), 63 | ('BG(?P\d+).+$' , "BTGetit" ), 64 | ('-eX(?P[\dA-Fa-f]+)-.+$',"eXeem beta" ), 65 | ('Plus12(?P[\dR]+)-.+$' , "Plus! II" ), 66 | ('XBT(?P\d+)[d-]-.+$' , "XBT" ), 67 | ('-ZT(?P\d+)-+.+$' , "ZipTorrent" ), 68 | ('-BitE\?(?P\d+)-.+$' , "BitEruct" ), 69 | ('O(?P%s)-+.+$'%v64p , "Osprey Permaseed" ), 70 | # Guesses based on Rufus source code, never seen in the wild: 71 | ('-BS(?P\d+)-+.+$' , "BTSlave" ), 72 | ('-SB(?P\d+)-+.+$' , "SwiftBit" ), 73 | ('-SN(?P\d+)-+.+$' , "ShareNET" ), 74 | ('-bk(?P\d+)-+.+$' , "BitKitten" ), 75 | ('-SZ(?P\d+)-+.+$' , "Shareaza" ), 76 | ('-MP(?P\d+)-+.+$' , "MooPolice" ), 77 | ('Deadman Walking-.+$' , "Deadman" ), 78 | ('270------.+$' , "GreedBT 2.7.0" ), 79 | ('XTORR302.+$' , "TorrenTres 0.0.2" ), 80 | ('turbobt(?P\d\.\d).+$' , "TurboBT" ), 81 | ('DansClient.+$' , "XanTorrent" ), 82 | ('-PO(?P\d+)-+.+$' , "PO (unknown)" ), 83 | ('-UR(?P\d+)-+.+$' , "UR (unknown)" ), 84 | # Patterns that should be executed last 85 | ('.*Azureus.*' , "Azureus 2.0.3.2" ), 86 | ) 87 | 88 | matches = [(re.compile(pattern, re.DOTALL), name) for pattern, name in matches] 89 | 90 | unknown_clients = {} 91 | 92 | def identify_client(peerid, client_log=None): 93 | client = 'unknown' 94 | version = '' 95 | for pat, name in matches: 96 | m = pat.match(peerid) 97 | if m: 98 | client = name 99 | d = m.groupdict() 100 | if d.has_key('version'): 101 | version = d['version'] 102 | version = version.replace('-','.') 103 | if version.find('.') >= 0: 104 | version = ''.join(version.split('.')) 105 | 106 | version = list(version) 107 | for i,c in enumerate(version): 108 | if '0' <= c <= '9': 109 | version[i] = c 110 | elif 'A' <= c <= 'Z': 111 | version[i] = str(ord(c) - 55) 112 | elif 'a' <= c <= 'z': 113 | version[i] = str(ord(c) - 61) 114 | elif c == '.': 115 | version[i] = '62' 116 | elif c == '-': 117 | version[i] = '63' 118 | else: 119 | break 120 | version = '.'.join(version) 121 | elif d.has_key('bcver'): 122 | bcver = d['bcver'] 123 | version += str(ord(bcver[0])) + '.' 124 | if len(bcver) > 1: 125 | version += str(ord(bcver[1])/10) 126 | version += str(ord(bcver[1])%10) 127 | elif d.has_key('rsver'): 128 | rsver = d['rsver'] 129 | version += str(ord(rsver[0])) + '.' 130 | if len(rsver) > 1: 131 | version += str(ord(rsver[1])/10) + '.' 132 | version += str(ord(rsver[1])%10) 133 | if d.has_key('strver'): 134 | if d['strver'] is not None: 135 | version += d['strver'] 136 | if d.has_key('rc'): 137 | rc = 'RC ' + d['rc'][1:] 138 | if version: 139 | version += ' ' 140 | version += rc 141 | break 142 | if client == 'unknown': 143 | # identify Shareaza 2.0 - 2.1 144 | if len(peerid) == 20 and chr(0) not in peerid[:15]: 145 | for i in range(16,20): 146 | if ord(peerid[i]) != (ord(peerid[i - 16]) ^ ord(peerid[31 - i])): 147 | break 148 | else: 149 | client = "Shareaza" 150 | 151 | 152 | if client_log is not None and 'unknown' in client: 153 | if not unknown_clients.has_key(peerid): 154 | unknown_clients[peerid] = True 155 | client_log.write('%s\n'%peerid) 156 | client_log.write('------------------------------\n') 157 | return client, version 158 | -------------------------------------------------------------------------------- /tests/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | A pre-commit hook for git that uses Pylint for automated code review. 4 | 5 | If any python file's rating falls below the ``PYLINT_PASS_THRESHOLD``, this 6 | script will return nonzero and the commit will be rejected. 7 | 8 | To use this, create a symbolic link to this from .git/hooks/ 9 | cd .git/hooks && ln -s ../../tests/pre-commit 10 | 11 | Copyright 2009 Nick Fitzgerald - MIT Licensed. 12 | Changes by Josh Ziegler 2011 13 | """ 14 | import os 15 | import re 16 | import sys 17 | 18 | from subprocess import Popen, PIPE 19 | 20 | # Threshold for code to pass the Pylint test. 10 is the highest score Pylint 21 | # will give to any peice of code. 22 | PYLINT_PASS_THRESHOLD = 7 23 | PYGENIE = os.path.join("tests", "complexity", "pygenie.py") 24 | DOCTEST_FAIL = re.compile(r"\*\*\*Test Failed\*\*\*") 25 | PYLINT_SCORE = re.compile(r"Your code has been rated at (-?[\d\.]+)/10") 26 | PYLINT_REGEX = re.compile(r"""([A-Z]: | # Error lines 27 | \*\*\*\* ) # File separators 28 | """, re.VERBOSE) 29 | COMPLEXITY_OK = re.compile(r"This code looks all good!") 30 | COMPLEXITY_SCORE = re.compile(r"\w (?P\w+(\.\w+)*) (?P\d{1,4})") 31 | SPHINX_ERROR = re.compile(r"""(ERROR|WARNING)""", re.IGNORECASE) 32 | SPHINX_OK = re.compile(r"""build succeeded""") 33 | 34 | SEPARATOR = "*".join('*' for i in xrange(30)) 35 | 36 | def is_py_script(filename): 37 | """Returns True if a file is a python executable.""" 38 | if not os.access(filename, os.X_OK): 39 | return False 40 | else: 41 | try: 42 | first_line = open(filename, "r").next().strip() 43 | return "#!" in first_line and "python" in first_line 44 | except StopIteration: 45 | return False 46 | 47 | def __get_py_files_changed(): 48 | """Returns all file paths of python files which have changed with this 49 | commit. 50 | """ 51 | # Run the git command that gets the filenames of every file that has been 52 | # locally modified since the last commit. 53 | sub = Popen("git diff --staged --name-only HEAD".split(), stdout=PIPE) 54 | sub.wait() 55 | 56 | # Filter out non-python or deleted files. 57 | py_files_changed = [] 58 | for file_name in [f.strip() for f in sub.stdout.readlines()]: 59 | if (file_name.endswith(".py") and os.path.exists(file_name)) \ 60 | or is_py_script(file_name): 61 | py_files_changed.append(file_name) 62 | 63 | return py_files_changed 64 | 65 | def __get_all_py_files(base_dir=".."): 66 | """Returns all of the python files in the base directory. 67 | """ 68 | py_files = [] 69 | for root, dirs, files in os.walk(base_dir): 70 | for file_path in files: 71 | if file_path[-3:] == ".py": 72 | path = os.path.join(root, file_path) 73 | path = os.path.abspath(path) # Normalize the path 74 | py_files.append(path) 75 | return py_files 76 | 77 | def print_header(section_name): 78 | """Prints a large header to make the section stand out. 79 | """ 80 | print '\n\n' 81 | print SEPARATOR 82 | print " *** " + section_name + " ***" 83 | print SEPARATOR 84 | 85 | def print_failures(failures): 86 | """Prints a simple message with the file method and its' score if given. 87 | """ 88 | for (file_name, score) in failures.iteritems(): 89 | if score: 90 | print "[ FAIL ] %s : %s" % (file_name, score) 91 | else: 92 | print "[ FAIL ] %s" % (file_name, ) 93 | 94 | def query_user_for_files_to_test(): 95 | """Asks the user if they want to test all of the python files and where 96 | from. 97 | """ 98 | print "No files found in staging area!" 99 | try: 100 | answer = raw_input("Run tests on all files? (y/[n]): ") 101 | except: 102 | answer = 'n' 103 | 104 | if answer == "y": 105 | default_path = os.path.abspath("") 106 | new_path = raw_input("Directory to scan for python files: [" + \ 107 | default_path + "]: ") 108 | if new_path == "": 109 | new_path = default_path 110 | files_to_test = __get_all_py_files(new_path) 111 | 112 | else: 113 | sys.exit(0) 114 | 115 | return files_to_test 116 | 117 | def main(): 118 | """Checks your git commit with Pylint!""" 119 | failed = False 120 | tests_failing = [] 121 | files_changed = __get_py_files_changed() 122 | if not files_changed: 123 | files_changed = query_user_for_files_to_test() 124 | 125 | print_header("Running Sphinx Doc Build Test") 126 | sphinx_failures = run_sphinx_test() 127 | print_header("Running Doc Tests") 128 | doctest_failures = run_doc_test(files_changed) 129 | print_header("Running Pylint Tests") 130 | pylint_failures = run_pylint_test(files_changed) 131 | print_header("Running Cyclomatic Complexity Tests") 132 | complexity_failures = run_complexity_test(files_changed) 133 | 134 | # If any of the files failed the tests, stop the commit from continuing. 135 | # and list which are failing 136 | print_header("Composite Test Results") 137 | if len(sphinx_failures) > 0: 138 | print "Sphinx Doc Build Test Failures:" 139 | failed = True 140 | tests_failing.append("sphinx") 141 | print_failures(sphinx_failures) 142 | 143 | if len(doctest_failures) > 0: 144 | print "Doc Test Failures:" 145 | failed = True 146 | tests_failing.append("doctest") 147 | print_failures(doctest_failures) 148 | 149 | if len(pylint_failures) > 0: 150 | print "Pylint Test Failures: (must be above 7)" 151 | failed = True 152 | tests_failing.append("plylint") 153 | print_failures(pylint_failures) 154 | 155 | if len(complexity_failures) > 0: 156 | print "Complexity Test Failures (must be under 8)" 157 | failed = True 158 | tests_failing.append("complexity") 159 | print_failures(complexity_failures) 160 | 161 | if failed: 162 | print "\ngit: fatal: commit failed, " + ', '.join(tests_failing) \ 163 | + " tests failing." 164 | sys.exit(1) 165 | else: 166 | print "All Tests Passed!" 167 | 168 | def run_doc_test(files_changed): 169 | """Runs the standard docstring tests, which looks for documentation that 170 | appears to be intereactive sessions and makes sure they work as written. 171 | """ 172 | failures = {} # filename: score (if applicable) 173 | for f_name in files_changed: 174 | doctest = Popen(("python -m doctest %s" % f_name).split(), stdout=PIPE) 175 | doctest.wait() 176 | 177 | for line in doctest.stdout: 178 | print line.rstrip('\n') 179 | if DOCTEST_FAIL.match(line): 180 | failures[f_name] = None 181 | 182 | return failures 183 | 184 | def run_sphinx_test(): 185 | """Builds the project documentation using Sphinx. If it succeeds with 186 | fewer than 10 warnings, this test will pass. 187 | """ 188 | failures = {} # filename : score (if applicable) 189 | test = Popen(("python setup.py build_sphinx").split(), stdout=PIPE) 190 | test.wait() 191 | 192 | failed = False # Assume success by default 193 | for line in test.stdout: 194 | if SPHINX_ERROR.search(line): 195 | print line.rstrip('\n') 196 | failed = True 197 | 198 | if failed: 199 | failures['All Docs'] = None 200 | 201 | return failures 202 | 203 | def run_pylint_test(files_changed): 204 | """Run Pylint on each file, collect the results, and display them for the 205 | user. 206 | """ 207 | failures = {} # filename: score (if applicable) 208 | for f_name in files_changed: 209 | doctest = Popen(("pylint -f text %s" % f_name).split(), stdout=PIPE, 210 | stderr=PIPE) 211 | doctest.wait() 212 | 213 | for line in doctest.stdout: 214 | if PYLINT_SCORE.match(line): 215 | score = float(PYLINT_SCORE.match(line).group(1)) 216 | 217 | if score < PYLINT_PASS_THRESHOLD: 218 | failures[f_name] = score 219 | 220 | if PYLINT_REGEX.match(line): 221 | pretty_line = line.rstrip('\n').lstrip('* ') 222 | if pretty_line[1] == ":": 223 | print " ", pretty_line 224 | else: 225 | print pretty_line 226 | 227 | return failures 228 | 229 | def run_complexity_test(files_changed): 230 | """Run cyclomatic complexity test on each file, collect the results, and 231 | display them for the user. 232 | """ 233 | failures = {} # filename: score (if applicable) 234 | for f_name in files_changed: 235 | doctest = Popen((PYGENIE + " complexity " + f_name).split(), 236 | stdout=PIPE) 237 | doctest.wait() 238 | 239 | for line in doctest.stdout: 240 | if COMPLEXITY_SCORE.match(line): 241 | match = COMPLEXITY_SCORE.match(line) 242 | score = match.group('score') 243 | unit = match.group('unit') 244 | failures[f_name] = score 245 | print f_name, ":", unit, "[Score:", score + "]" 246 | 247 | return failures 248 | 249 | if __name__ == "__main__": 250 | main() 251 | -------------------------------------------------------------------------------- /autonomotorrent/BTProtocol.py: -------------------------------------------------------------------------------- 1 | # 2 | # -*-encoding:gb2312-*- 3 | 4 | import hashlib 5 | import struct 6 | import socket 7 | import time 8 | 9 | from twisted.internet import reactor, defer 10 | from twisted.internet import protocol 11 | from twisted.python import log 12 | 13 | from ClientIdentifier import identify_client 14 | from bitfield import Bitfield 15 | from tools import SpeedMonitor, sleep 16 | from upload import BTUpload 17 | from download import BTDownload 18 | 19 | class BTProtocol(protocol.Protocol): 20 | 21 | msg_choke = '\x00' 22 | msg_unchoke = '\x01' 23 | msg_interested = '\x02' 24 | msg_not_interested = '\x03' 25 | msg_have = '\x04' 26 | msg_bitfield = '\x05' 27 | msg_request = '\x06' 28 | msg_piece = '\x07' 29 | msg_cancel = '\x08' 30 | msg_port = '\x09' 31 | 32 | msg_type = {'\x00' : 'choke', 33 | '\x01' : 'unchoke', 34 | '\x02' : 'interested', 35 | '\x03' : 'not_interested', 36 | '\x04' : 'have', 37 | '\x05' : 'bitfield', 38 | '\x06' : 'request', 39 | '\x07' : 'piece', 40 | '\x08' : 'cancel', 41 | '\x09' : 'port'} 42 | 43 | def __init__(self): 44 | self.peer_id = None 45 | self.status = None 46 | 47 | def connectionMade(self): 48 | self.status = 'handshake' 49 | 50 | self.data = '' 51 | self._handle_data = self.handle_data() 52 | self._next_data_len = self._handle_data.next() 53 | 54 | self.preHandshake() 55 | 56 | def finishHandshake(self): 57 | self.btm = self.factory.btm 58 | 59 | self.bitfield = Bitfield(self.btm.metainfo.pieces_size) 60 | 61 | self.upload = BTUpload(self) 62 | self.download = BTDownload(self) 63 | self.upload.start() 64 | self.__uploadMonitor = self.upload._uploadMonitor 65 | self.download.start() 66 | self.__downloadMonitor = self.download._downloadMonitor 67 | 68 | self.send_bitfield(self.btm.pieceManager.bitfield) 69 | 70 | self.send_keep_alive() 71 | 72 | if self.btm.connectionManager.isAlreadyConnected(self.peer_id) : 73 | # Already connected, dropping the connection 74 | reactor.callLater(0, self.transport.loseConnection) 75 | else: 76 | self.factory.addActiveConnection(self.peer_id, self) 77 | 78 | self.status = 'started' 79 | 80 | def connectionLost(self, reason=None): 81 | if self.status == 'started': 82 | self.upload.stop() 83 | self.download.stop() 84 | 85 | del self.__uploadMonitor 86 | del self.__downloadMonitor 87 | del self.upload 88 | del self.download 89 | del self.btm 90 | 91 | self.factory.removeActiveConnection(self) 92 | 93 | self.status = 'stopped' 94 | 95 | def stopConnection(self): 96 | if self.connected: 97 | self.transport.loseConnection() 98 | 99 | def send_data(self, data): 100 | if not self.connected: 101 | return 102 | 103 | prefix = struct.pack('!I', len(data)) 104 | self.transport.write(prefix + data) 105 | 106 | def send_message(self, _type, data): 107 | self.send_data(_type + data) 108 | 109 | self.__uploadMonitor(_type, data) 110 | 111 | def __uploadMonitor(self, _type, data): 112 | pass 113 | 114 | def send_handshake(self): 115 | info_hash = self.factory.btm.metainfo.info_hash 116 | my_id = self.factory.btm.my_peer_id 117 | reserved = '\x00'*7 + '\x01' 118 | data = '\x13' + 'BitTorrent protocol' + reserved + info_hash + my_id 119 | self.transport.write(data) 120 | 121 | @defer.inlineCallbacks 122 | def send_keep_alive(self): 123 | yield sleep(60.0) 124 | while self.connected: 125 | self.send_data('') 126 | yield sleep(60.0) 127 | 128 | def send_choke(self): 129 | self.am_choke = True 130 | self.send_data(self.msg_choke) 131 | 132 | def send_unchoke(self): 133 | self.am_choke = False 134 | self.send_data(self.msg_unchoke) 135 | 136 | def send_interested(self): 137 | self.am_interested = True 138 | self.send_data(self.msg_interested) 139 | 140 | def send_not_interested(self): 141 | self.am_interested = False 142 | self.send_data(self.msg_not_interested) 143 | 144 | def send_have(self, index): 145 | data = struct.pack('!I', index) 146 | self.send_message(self.msg_have, data) 147 | 148 | def send_bitfield(self, bitfield): 149 | if type(bitfield) is str : 150 | data = bitfield 151 | elif type(bitfield) is Bitfield : 152 | data = bitfield.tostring() 153 | else : 154 | raise TypeError('bitfield should be str or Bitfield') 155 | 156 | self.send_message(self.msg_bitfield, data) 157 | 158 | def send_request(self, index, begin, length): 159 | data = struct.pack('!III', index, begin, length) 160 | self.send_message(self.msg_request, data) 161 | 162 | def send_piece(self, index, begin, piece): 163 | data = struct.pack('!II', index, begin) + piece 164 | self.send_message(self.msg_piece, data) 165 | 166 | def send_cancel(self, idx, begin, length): 167 | data = struct.pack('!III', idx, begin, length) 168 | self.send_message(self.msg_cancel, data) 169 | 170 | def send_port(self, port): 171 | data = struct.pack('!I', port) 172 | self.send_message(self.msg_port, data) 173 | 174 | def __downloadMonitor(self, data): 175 | pass 176 | 177 | def dataReceived(self, data): 178 | self.__downloadMonitor(data) 179 | 180 | data = self.data + data 181 | nd_len = self._next_data_len 182 | 183 | while len(data) >= nd_len: 184 | data_send, data = data[:nd_len], data[nd_len:] 185 | nd_len = self._handle_data.send(data_send) 186 | 187 | self.data = data 188 | self._next_data_len = nd_len 189 | 190 | def handle_data(self): 191 | protocol = yield ord((yield 1)) 192 | reserved = yield 8 193 | info_hash = yield 20 194 | peer_id = yield 20 195 | 196 | self.handle_handshake(protocol, reserved, info_hash, peer_id) 197 | 198 | self.postHandshake() 199 | 200 | self.finishHandshake() 201 | 202 | while True: 203 | size, = struct.unpack('!I', (yield 4)) 204 | if size == 0 : 205 | self.handle_keep_alive() 206 | else: 207 | _type = yield 1 208 | self.cur_msg_type = _type 209 | 210 | data = yield (size - 1) 211 | 212 | method_name = 'handle_'+self.msg_type[_type] 213 | method = getattr(self, method_name, None) 214 | if method: 215 | method(data) 216 | else: 217 | raise NotImplementedError(method_name) 218 | 219 | def handle_handshake(self, protocol, reserved, info_hash, peer_id): 220 | log.msg('Connected to client ID: {0} v{1}'.format(*identify_client(peer_id))) 221 | self.peer_protocol = protocol 222 | self.peer_reserved = reserved 223 | self.peer_info_hash = info_hash 224 | self.peer_id = peer_id 225 | 226 | def handle_keep_alive(self): 227 | pass 228 | 229 | def handle_choke(self, data): 230 | self.download._choke(True) 231 | 232 | def handle_unchoke(self, data): 233 | self.download._choke(False) 234 | 235 | def handle_interested(self, data): 236 | self.upload._interested(True) 237 | 238 | def handle_not_interested(self, data): 239 | self.upload._interested(False) 240 | 241 | def handle_have(self, data): 242 | assert len(data) == 4 243 | index, = struct.unpack('!I', data) 244 | self.download._have(index) 245 | 246 | def handle_bitfield(self, data): 247 | self.download._bitfield(data) 248 | 249 | def handle_request(self, data): 250 | index, begin, length = struct.unpack('!III', data) 251 | self.upload._request(index, begin, length) 252 | 253 | def handle_piece(self, data): 254 | index, begin = struct.unpack('!II', data[:8]) 255 | piece = data[8:] 256 | self.download._piece(index, begin, piece) 257 | 258 | def handle_cancel(self, data): 259 | index, begin, length = struct.unpack('!III', data) 260 | self.upload._cancel(index, begin, length) 261 | 262 | def handle_port(self, data): 263 | if self.btm.app.enable_DHT: 264 | port, = struct.unpack('!H', data) 265 | self.dht_port = port 266 | addr = self.transport.getPeer().host 267 | self.btm.connectionManager.handle_port(addr, port) 268 | 269 | ############################################################ 270 | class BTClientProtocol (BTProtocol): 271 | def preHandshake(self): 272 | self.send_handshake() 273 | 274 | def postHandshake(self): 275 | if self.peer_info_hash == self.factory.btm.metainfo.info_hash : 276 | pass 277 | else: 278 | reactor.callLater(0, self.transport.loseConnection) 279 | 280 | class BTServerProtocol (BTProtocol): 281 | def preHandshake(self): 282 | pass 283 | 284 | def postHandshake(self): 285 | factory = self.factory.resetFactory(self, self.peer_info_hash) 286 | if factory : 287 | self.send_handshake() 288 | else: 289 | reactor.callLater(0, self.transport.loseConnection) 290 | 291 | ############################################################ 292 | -------------------------------------------------------------------------------- /autonomotorrent/MetaInfo.py: -------------------------------------------------------------------------------- 1 | # 2 | # -*-encoding:gb2312-*- 3 | 4 | import sys 5 | import zlib 6 | 7 | from os.path import getsize, split, join, abspath, isdir, normpath 8 | from copy import copy 9 | from string import strip 10 | from time import time 11 | try: 12 | from sys import getfilesystemencoding 13 | ENCODING = getfilesystemencoding() 14 | except: 15 | from sys import getdefaultencoding 16 | ENCODING = getdefaultencoding() 17 | 18 | ### original imports 19 | import hashlib 20 | import os 21 | 22 | from bencode import bencode, bdecode 23 | 24 | from twisted.python import log 25 | 26 | class BTMetaInfo: 27 | """ 28 | """ 29 | encoding = 'utf-8' 30 | def __init__(self, path=None, meta_info=None): 31 | if path: 32 | ct = open(path, 'rb').read() 33 | metainfo = bdecode(ct) 34 | elif meta_info: 35 | metainfo = meta_info 36 | else: 37 | raise Exception("Must pass either a BT meta file path or the " +\ 38 | "meta info itself!") 39 | self.metainfo = metainfo 40 | 41 | if 'announce' in metainfo: 42 | self.announce_list = [metainfo['announce']] 43 | if 'announce-list' in metainfo: 44 | self.announce_list += reduce(lambda x,y: x+y, metainfo['announce-list']) 45 | else: # Trackerless torrent? 46 | self.announce_list = [] 47 | 48 | if 'encoding' in metainfo: 49 | self.encoding = metainfo['encoding'] 50 | 51 | info = metainfo['info'] 52 | temp = hashlib.sha1(bencode(info)) 53 | self.info_hash = temp.digest() 54 | self.pretty_info_hash = temp.hexdigest() 55 | 56 | self.piece_length = info['piece length'] 57 | 58 | hashes = info['pieces'] 59 | self.pieces_hash = [hashes[i:i+20] for i in range(0, len(hashes), 20)] 60 | self.pieces_size = len(self.pieces_hash) 61 | 62 | self.files = [] 63 | 64 | self.topDir = '.' 65 | name = info['name'].decode(self.encoding) 66 | if 'files' in info: 67 | cur_size = 0 68 | for fd in info['files']: 69 | _d = fd.copy() 70 | _path = [name] + [p.decode(self.encoding) for p in _d['path']] 71 | _path = os.path.join(*_path) 72 | _d['path'] = _path 73 | _start = cur_size 74 | _stop = cur_size + _d['length'] 75 | cur_size = _stop 76 | _d['pos_range'] = _start, _stop 77 | self.files.append(_d) 78 | 79 | self.total_length = cur_size 80 | self.topDir = name 81 | 82 | else: 83 | _d = {} 84 | _d['path'] = name 85 | _d['length'] = info['length'] 86 | _d['pos_range'] = 0, info['length'] # TODO: Is this right? 87 | self.files.append(_d) 88 | self.total_length = info['length'] 89 | 90 | last_piece_length = self.total_length % self.piece_length 91 | if last_piece_length == 0 : 92 | last_piece_length = self.piece_length 93 | self.last_piece_length = last_piece_length 94 | 95 | def __getitem__(self, key): 96 | if type(key) is type(0): 97 | return self.files[key] 98 | return self.metainfo[key] 99 | 100 | def __iter__(self): 101 | for f in self.files: 102 | yield f 103 | 104 | def _calcsize(file_to_torrent): 105 | if not isdir(file_to_torrent): 106 | return getsize(file_to_torrent) 107 | total = 0 108 | for s in _subfiles(abspath(file_to_torrent)): 109 | total += getsize(s[1]) 110 | return total 111 | 112 | def calculate_piece_length(file_to_torrent, piece_len_exp=None): 113 | """Calculates the piece length according to the piece length exponent. If 114 | one is not provided, it calculates it for you. 115 | """ 116 | if not piece_len_exp: # then calculate it automatically 117 | size = _calcsize(file_to_torrent) 118 | if size > 2*1024*1024*1024: # > 2 gig = 119 | piece_len_exp = 20 # 1 meg pieces 120 | elif size > 512*1024*1024: # > 512M = 121 | piece_len_exp = 19 # 512K pieces 122 | elif size > 64*1024*1024: # > 64M = 123 | piece_len_exp = 18 # 256K pieces 124 | elif size > 16*1024*1024: # > 16M = 125 | piece_len_exp = 17 # 128K pieces 126 | elif size > 4*1024*1024: # > 4M = 127 | piece_len_exp = 16 # 64K pieces 128 | else: 129 | piece_len_exp = 15 # 32K pieces 130 | piece_length = 2 ** piece_len_exp 131 | return piece_length 132 | 133 | def _get_fs_encoding(): 134 | """Attempts to figure out and return the local filesystem encoding. 135 | """ 136 | fs_encoding = ENCODING 137 | if not fs_encoding: 138 | fs_encoding = 'ascii' 139 | return fs_encoding 140 | 141 | def save_meta_info(path, meta_info): 142 | """Bencodes and saves the meta_info dictioary to the path provided. 143 | 144 | Warning: This does not verify the meta_info for correctness. 145 | """ 146 | # TODO: Check/verify meta_info? 147 | target_file = open(path, 'wb') 148 | target_file.write(bencode(meta_info)) 149 | target_file.close() 150 | 151 | def create_meta_info(file_to_torrent, url, target=None, save_to_disk=True, 152 | comment=None, created_by=None, announce_list=None, httpseeds=None, 153 | piece_len_exp=None, get_hash=None): 154 | """Creates and returns the meta info dictionary for the file_to_torrent 155 | passed. 156 | 157 | @param comment string 158 | @param created_by string 159 | @param announce_list list of lists with each list within the list being a 160 | tier and each string in that list being an announce url (e.g. 161 | [["sometracker.org:80/announce","trckr.net:80"],["t.com:80/announce"]] 162 | @param httpseeds list of urls (strings) 163 | """ 164 | info = make_info(file_to_torrent, piece_len_exp=piece_len_exp, get_hash=get_hash) 165 | #check_info(info) # FIXME: from BitTornado.BT1.btformats import check_info 166 | data = {'info': info, 'announce': strip(url), 'creation date': long(time())} 167 | 168 | if comment: 169 | data['comment'] = comment 170 | if created_by: 171 | data['created by'] = created_by 172 | if announce_list: 173 | data['announce-list'] = announce_list 174 | if httpseeds: 175 | data['httpseeds'] = httpseeds 176 | 177 | if save_to_disk: 178 | if target: 179 | target_path = join(target, split(normpath(file_to_torrent))[1] + '.torrent') 180 | else: 181 | a, b = split(file_to_torrent) 182 | if b == '': 183 | target_path = a + '.torrent' 184 | else: 185 | target_path = join(a, b + '.torrent') 186 | save_meta_info(target_path, data) 187 | 188 | return data 189 | 190 | def _uniconvertl(l, e): 191 | r = [] 192 | try: 193 | for s in l: 194 | r.append(_uniconvert(s, e)) 195 | except UnicodeError: 196 | raise UnicodeError('bad filename: '+join(l)) 197 | return r 198 | def _uniconvert(s, e): 199 | try: 200 | s = unicode(s,e) 201 | except UnicodeError: 202 | raise UnicodeError('bad filename: '+s) 203 | return s.encode('utf-8') 204 | def make_info(file_to_torrent, piece_len_exp=None, get_hash=None): 205 | """Creates and returns the meta info dictionary. 206 | 207 | @param piece_len_exp integer 2^piece_len_exp used to calculate the piece 208 | length. If piece_len_exp is not given, it (and thus the piece length) will 209 | be calculated for you. 210 | """ 211 | piece_length = calculate_piece_length(file_to_torrent, piece_len_exp) 212 | if get_hash is None: 213 | get_hash = {} 214 | 215 | if not 'md5' in get_hash: 216 | get_hash['md5'] = False 217 | if not 'crc32' in get_hash: 218 | get_hash['crc32'] = False 219 | if not 'sha1' in get_hash: 220 | get_hash['sha1'] = False 221 | 222 | fs_encoding = _get_fs_encoding() 223 | file_to_torrent = abspath(file_to_torrent) 224 | pieces = [] 225 | if isdir(file_to_torrent): #Multiple files 226 | subs = _subfiles(file_to_torrent) 227 | subs.sort() 228 | sh = hashlib.sha1() 229 | done = 0 230 | fs = [] 231 | totalsize = 0.0 232 | totalhashed = 0 233 | for p, f in subs: 234 | totalsize += getsize(f) 235 | 236 | for p, f in subs: 237 | pos = 0 238 | size = getsize(f) 239 | h = open(f, 'rb') 240 | 241 | if get_hash['md5']: 242 | hash_md5 = hashlib.md5() 243 | if get_hash['sha1']: 244 | hash_sha1 = hashlib.sha1() 245 | if get_hash['crc32']: 246 | hash_crc32 = zlib.crc32('') 247 | 248 | while pos < size: 249 | a = min(size-pos, piece_length-done) 250 | readpiece = h.read(a) 251 | sh.update(readpiece) 252 | if get_hash['md5']: 253 | hash_md5.update(readpiece) 254 | if get_hash['crc32']: 255 | hash_crc32 = zlib.crc32(readpiece, hash_crc32) 256 | if get_hash['sha1']: 257 | hash_sha1.update(readpiece) 258 | 259 | done += a 260 | pos += a 261 | totalhashed += a 262 | 263 | if done == piece_length: 264 | pieces.append(sh.digest()) 265 | done = 0 266 | sh = hashlib.sha1() 267 | 268 | newdict = {'length': size, 269 | 'path': _uniconvertl(p, fs_encoding) } 270 | if get_hash['md5']: 271 | newdict['md5sum'] = hash_md5.hexdigest() 272 | if get_hash['crc32']: 273 | newdict['crc32'] = "%08X" % hash_crc32 274 | if get_hash['sha1']: 275 | newdict['sha1'] = hash_sha1.digest() 276 | fs.append(newdict) 277 | h.close() 278 | if done > 0: 279 | pieces.append(sh.digest()) 280 | return {'pieces': ''.join(pieces), 281 | 'piece length': piece_length, 282 | 'files': fs, 283 | 'name': _uniconvert(split(file_to_torrent)[1], fs_encoding) 284 | } 285 | else: # Single file 286 | size = getsize(file_to_torrent) 287 | p = 0 288 | h = open(file_to_torrent, 'rb') 289 | 290 | if get_hash['md5']: 291 | hash_md5 = hashlib.md5() 292 | if get_hash['crc32']: 293 | hash_crc32 = zlib.crc32('') 294 | if get_hash['sha1']: 295 | hash_sha1 = hashlib.sha1() 296 | 297 | while p < size: 298 | x = h.read(min(piece_length, size - p)) 299 | if get_hash['md5']: 300 | # Update MD5 301 | hash_md5.update(x) 302 | if get_hash['crc32']: 303 | # Update CRC32 304 | hash_crc32 = zlib.crc32(x, hash_crc32) 305 | if get_hash['sha1']: 306 | # Update SHA-1 307 | hash_sha1.update(x) 308 | 309 | pieces.append(hashlib.sha1(x).digest()) 310 | p += piece_length 311 | if p > size: 312 | p = size 313 | h.close() 314 | newdict = {'pieces': ''.join(pieces), 315 | 'piece length': piece_length, 316 | 'length': size, 317 | 'name': _uniconvert(split(file_to_torrent)[1], fs_encoding), 318 | } 319 | if get_hash['md5']: 320 | newdict['md5sum'] = hash_md5.hexdigest() 321 | if get_hash['crc32']: 322 | newdict['crc32'] = "%08X" % hash_crc32 323 | if get_hash['sha1']: 324 | newdict['sha1'] = hash_sha1.digest() 325 | 326 | return newdict 327 | def _subfiles(d): 328 | r = [] 329 | stack = [([], d)] 330 | while stack: 331 | p, n = stack.pop() 332 | if isdir(n): 333 | for s in os.listdir(n): 334 | if s[:1] != '.': 335 | stack.append((copy.copy(p) + [s], join(n, s))) 336 | else: 337 | r.append((p, n)) 338 | return r 339 | -------------------------------------------------------------------------------- /autonomotorrent/FileManager.py: -------------------------------------------------------------------------------- 1 | """ 2 | """ 3 | import os 4 | import hashlib 5 | 6 | from twisted.python import log 7 | from twisted.internet import reactor, defer 8 | 9 | from bitfield import Bitfield 10 | 11 | from tools import sleep 12 | 13 | class BTFileError (Exception) : 14 | pass 15 | 16 | class BTHashTestError (Exception): 17 | pass 18 | 19 | class BTFile: 20 | def __init__(self, metainfo, index, saveDir): 21 | fileinfo = metainfo.files[index] 22 | piece_len = metainfo.piece_length 23 | self.fileInfo = fileinfo 24 | self.path = os.path.join(saveDir, fileinfo['path']) 25 | self.length = fileinfo['length'] 26 | self.piece_len = piece_len 27 | self.abs_pos0, self.abs_pos1 = fileinfo['pos_range'] 28 | self.fd = None 29 | idx0, ext = divmod(self.abs_pos0, self.piece_len) 30 | self.idx0_piece = idx0 31 | idx1, ext = divmod(self.abs_pos1, self.piece_len) 32 | self.idx1_piece = idx1+1 if ext else idx1 33 | h, t = os.path.split(self.path) 34 | if not os.path.exists(h): 35 | os.makedirs(h) 36 | 37 | def __str__(self): 38 | return u'piece=[{},{}) size={:,d} "{}" '.format(self.idx0_piece, self.idx1_piece, self.length, os.path.split(self.path)[1]).encode('gb2312') 39 | 40 | def __getIntersection(self, index, beg, data_len): 41 | # p0,p1,f0,f1 absolute position in files 42 | p0 = index * self.piece_len + beg 43 | p1 = p0 + data_len 44 | 45 | f0, f1 = self.abs_pos0, self.abs_pos1 46 | 47 | # intersect sub piece 48 | pf0 = max(p0, f0) 49 | pf1 = min(p1, f1) 50 | 51 | # pb,pe relative positioin in piece 52 | pb = pf0 - p0 53 | pe = pf1 - p0 54 | 55 | # fb,fe relative position in current file 56 | fb = pf0 - f0 57 | fe = pf1 - f0 58 | 59 | return (pb, pe), (fb, fe) 60 | 61 | 62 | def write(self, index, beg, data): 63 | (pb,pe), (fb,fe) = self.__getIntersection(index, beg, len(data)) 64 | if pb >= pe : 65 | raise BTFileError("index isn't in this file") 66 | my_data = data[pb:pe] 67 | if self.fd is None : 68 | if os.path.exists(self.path) : 69 | length = os.path.getsize(self.path) 70 | if length != self.length: 71 | raise BTFileError(u'old file size is error: {}'.format(self.path)) 72 | fd = open(self.path, 'rb+') 73 | else : 74 | fd = open(self.path, 'wb+') 75 | fd.truncate(self.length) 76 | self.fd = fd 77 | self.fd.seek(fb) 78 | self.fd.write(my_data) 79 | return pb, len(my_data) 80 | 81 | def read(self, index, beg, data_len): 82 | (pb,pe), (fb,fe) = self.__getIntersection(index, beg, data_len) 83 | if pb >= pe : 84 | raise BTFileError("index isn't in this file") 85 | 86 | if self.fd is None: 87 | try: 88 | self.fd = open(self.path, 'rb+') 89 | except IOError as error: 90 | raise BTFileError(str(error)) 91 | 92 | self.fd.seek(fb) 93 | data = self.fd.read(fe-fb) 94 | return pb, data 95 | 96 | def close(self): 97 | if self.fd : 98 | self.fd.close() 99 | 100 | def __getitem__(self, idx): 101 | return self.read(idx, 0, self.piece_len) 102 | 103 | def __setitem__(self, idx, data): 104 | self.write(idx, 0, data) 105 | 106 | def __iter__(self) : 107 | for idx in xrange(self.idx0_piece, self.idx1_piece) : 108 | yield idx, self[idx] 109 | 110 | def __len__(self) : 111 | return self.idx1_piece - self.idx0_piece 112 | 113 | def __contains__(self, idx) : 114 | return self.idx0_piece <= idx < self.idx1_piece 115 | 116 | 117 | class BTFiles : 118 | def __init__(self, metainfo, saveDir, selectedFileIndex=None): 119 | if selectedFileIndex is None : 120 | selectedFileIndex = range(len(metainfo.files)) 121 | selectedFileIndex.sort() 122 | 123 | self.metainfo = metainfo 124 | self.saveDir = saveDir 125 | self.totalSize = metainfo.total_length 126 | self.pieceNum = metainfo.pieces_size 127 | self.pieceLength = metainfo.piece_length 128 | self.hashArray = metainfo.pieces_hash 129 | 130 | self.files = [] 131 | for i in selectedFileIndex : 132 | self.files.append(BTFile(metainfo, i, saveDir)) 133 | 134 | def doHashTest(self, idx, data): 135 | return hashlib.sha1(data).digest() == self.hashArray[idx] 136 | 137 | def getBitfield(self) : 138 | bfNeed = Bitfield(self.pieceNum) 139 | for f in self.files : 140 | for i in xrange(f.idx0_piece, f.idx1_piece) : 141 | bfNeed[i] = 1 142 | 143 | bfHave = Bitfield(self.pieceNum) 144 | for i in xrange(self.pieceNum): 145 | try : 146 | ds = self[i] 147 | if len(ds) == 1: 148 | beg, dat = ds[0] 149 | if self.doHashTest(i, dat): 150 | bfHave[i] = 1 151 | bfNeed[i] = 0 152 | except BTFileError as error : 153 | pass 154 | 155 | return bfHave, bfNeed 156 | 157 | def write(self, idx, data) : 158 | ds = [f.write(idx,0,data) for f in self.files if idx in f] 159 | if len(ds) <= 1 : 160 | return ds 161 | else : 162 | _ds = ds[0:1] 163 | for d in ds[1:] : 164 | beg0, len0 = _ds[-1] 165 | beg1, len1 = d 166 | assert beg0+len0 <= beg1 167 | if beg0+len0==beg1: 168 | _ds[-1] = beg0, len0+len1 169 | else: 170 | _ds.append(d) 171 | return _ds 172 | 173 | def __getitem__(self, idx) : 174 | ds = [] 175 | for f in self.files: 176 | if idx in f: 177 | try: 178 | ds.append(f[idx]) 179 | except BTFileError as error: 180 | pass 181 | 182 | if len(ds) <=1 : 183 | return ds 184 | else : 185 | _ds = ds[0:1] 186 | for d in ds[1:] : 187 | beg0, dat0 = _ds[-1] 188 | beg1, dat1 = d 189 | assert beg0+len(dat0) <= beg1 190 | if beg0+len(dat0)==beg1: 191 | _ds[-1] = beg0, dat0+dat1 192 | else: 193 | _ds.append(d) 194 | return _ds 195 | 196 | def __setitem__(self, idx, data) : 197 | for f in self.files: 198 | if idx in f : 199 | f[idx] = data 200 | 201 | def __iter__(self): 202 | for idx in xrange(len(self)) : 203 | yield idx, self[idx] 204 | 205 | def __contains__(self, idx) : 206 | return any(idx in f for f in self.files) 207 | 208 | def __len__(self): 209 | return self.pieceNum 210 | 211 | def __str__(self): 212 | return '\n'.join(str(f) for f in self.files) 213 | 214 | class BTFileManager : 215 | ''' 216 | ''' 217 | 218 | slice_size = 2**14 219 | 220 | def __init__(self, btm): 221 | self.btm = btm 222 | self.config = btm.config 223 | 224 | metainfo = self.config.metainfo 225 | self.download_list = self.config.downloadList 226 | 227 | self.metainfo = metainfo 228 | self.piece_length = metainfo.piece_length 229 | self.pieceNum = metainfo.pieces_size 230 | 231 | self.btfiles = BTFiles(metainfo, self.btm.app.save_dir, self.config.downloadList) 232 | self.bitfieldHave, self.bitfieldNeed = self.btfiles.getBitfield() 233 | log.msg("Saving to: {0}".format(self.btm.app.save_dir)) 234 | self.buffer_reserved = {} 235 | self.buffer_max_size = 100 * 2**20 / self.piece_length 236 | 237 | def start(self) : 238 | self.status = 'started' 239 | 240 | self.buffer = {} 241 | self.buffer_record = [] 242 | self.buffer_dirty = {} 243 | 244 | reactor.callLater(10, self.deamon_write) 245 | reactor.callLater(10, self.deamon_read) 246 | 247 | def stop(self) : 248 | for idx, data in self.buffer_dirty.iteritems(): 249 | self.write(idx, data) 250 | 251 | self.buffer_dirty.clear() 252 | 253 | self.buffer.clear() 254 | 255 | del self.buffer_record[:] 256 | 257 | self.status = 'stopped' 258 | 259 | @defer.inlineCallbacks 260 | def deamon_write(self): 261 | while self.status == 'started': 262 | self.__thread_write() 263 | yield sleep(10) 264 | 265 | def __thread_write(self): 266 | if not hasattr(self, '__thread_write_status') : 267 | self.__thread_write_status = 'stopped' 268 | 269 | if self.__thread_write_status == 'running' : 270 | return 271 | 272 | if not self.buffer_dirty : 273 | return 274 | 275 | bfd = self.buffer_dirty.copy() 276 | 277 | def call_in_thread(): 278 | # Writing to disk 279 | for idx in sorted(bfd.keys()) : 280 | data = bfd[idx] 281 | self.write(idx, data) 282 | reactor.callFromThread(call_from_thread) 283 | 284 | def call_from_thread(): 285 | self.__thread_write_status = 'stopped' 286 | for idx, data in bfd.iteritems() : 287 | if data is self.buffer_dirty[idx] : 288 | del self.buffer_dirty[idx] 289 | 290 | if self.__thread_write_status == 'stopped' : 291 | self.__thread_write_status = 'running' 292 | reactor.callInThread(call_in_thread) 293 | 294 | @defer.inlineCallbacks 295 | def deamon_read(self): 296 | while self.status == 'started': 297 | size = len(self.buffer) 298 | if size > self.buffer_max_size : 299 | remove_count = size - self.buffer_max_size 300 | remove_count += self.buffer_max_size / 5 301 | for idx in self.buffer_record[:remove_count] : 302 | del self.buffer[idx] 303 | del self.buffer_record[:remove_count] 304 | 305 | yield sleep(10) 306 | 307 | def readPiece(self, index) : 308 | if not (0 <= index < self.pieceNum) : 309 | raise BTFileError('index is out of range') 310 | if not self.bitfieldHave[index] : 311 | raise BTFileError('index is not downloaded') 312 | 313 | if index in self.buffer : 314 | data = self.buffer[index] 315 | self.buffer_record.remove(index) 316 | self.buffer_record.append(index) 317 | return data 318 | 319 | else: 320 | for idx in [index-1, index, index+1] : 321 | if 0 <= idx < self.pieceNum and idx not in self.buffer : 322 | data = self.read(idx) 323 | assert data 324 | self.buffer[idx] = data 325 | self.buffer_record.append(idx) 326 | 327 | data = self.readPiece(index) 328 | 329 | return data 330 | 331 | def writePiece(self, index, piece) : 332 | if not (0 <= index < self.pieceNum) : 333 | raise BTFileError('index is out of range') 334 | if not self.bitfieldNeed[index] : 335 | raise BTFileError('index is not need') 336 | 337 | if not self.btfiles.doHashTest(index, piece): 338 | raise BTHashTestError() 339 | 340 | else: 341 | self.bitfieldHave[index] = 1 342 | self.bitfieldNeed[index] = 0 343 | if index in self.buffer : 344 | self.buffer[index] = piece 345 | 346 | self.buffer_dirty[index] = piece 347 | return True 348 | 349 | def read(self, index): 350 | if index in self.buffer_dirty: 351 | return self.buffer_dirty[index] 352 | elif index in self.buffer_reserved : 353 | return self.buffer_reserved[index] 354 | 355 | data_list = self.btfiles[index] 356 | 357 | if len(data_list) == 1 : 358 | assert data_list[0][0] == 0 359 | return data_list[0][1] 360 | else: 361 | assert False 362 | return data_list 363 | 364 | def write(self, index, data) : 365 | ds = self.btfiles.write(index, data) 366 | if len(ds) > 1 : 367 | self.buffer_reserved[index] = data 368 | elif not ds : 369 | assert False 370 | 371 | def __iter__(self): 372 | return self.btfiles.__iter__() 373 | -------------------------------------------------------------------------------- /distribute_setup.py: -------------------------------------------------------------------------------- 1 | #!python 2 | """Bootstrap distribute installation 3 | 4 | If you want to use setuptools in your package's setup.py, just include this 5 | file in the same directory with it, and add this to the top of your setup.py:: 6 | 7 | from distribute_setup import use_setuptools 8 | use_setuptools() 9 | 10 | If you want to require a specific version of setuptools, set a download 11 | mirror, or use an alternate download directory, you can do so by supplying 12 | the appropriate options to ``use_setuptools()``. 13 | 14 | This file can also be run as a script to install or upgrade setuptools. 15 | """ 16 | import os 17 | import shutil 18 | import sys 19 | import time 20 | import fnmatch 21 | import tempfile 22 | import tarfile 23 | import optparse 24 | 25 | from distutils import log 26 | 27 | try: 28 | from site import USER_SITE 29 | except ImportError: 30 | USER_SITE = None 31 | 32 | try: 33 | import subprocess 34 | 35 | def _python_cmd(*args): 36 | args = (sys.executable,) + args 37 | return subprocess.call(args) == 0 38 | 39 | except ImportError: 40 | # will be used for python 2.3 41 | def _python_cmd(*args): 42 | args = (sys.executable,) + args 43 | # quoting arguments if windows 44 | if sys.platform == 'win32': 45 | def quote(arg): 46 | if ' ' in arg: 47 | return '"%s"' % arg 48 | return arg 49 | args = [quote(arg) for arg in args] 50 | return os.spawnl(os.P_WAIT, sys.executable, *args) == 0 51 | 52 | DEFAULT_VERSION = "0.6.30" 53 | DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/" 54 | SETUPTOOLS_FAKED_VERSION = "0.6c11" 55 | 56 | SETUPTOOLS_PKG_INFO = """\ 57 | Metadata-Version: 1.0 58 | Name: setuptools 59 | Version: %s 60 | Summary: xxxx 61 | Home-page: xxx 62 | Author: xxx 63 | Author-email: xxx 64 | License: xxx 65 | Description: xxx 66 | """ % SETUPTOOLS_FAKED_VERSION 67 | 68 | 69 | def _install(tarball, install_args=()): 70 | # extracting the tarball 71 | tmpdir = tempfile.mkdtemp() 72 | log.warn('Extracting in %s', tmpdir) 73 | old_wd = os.getcwd() 74 | try: 75 | os.chdir(tmpdir) 76 | tar = tarfile.open(tarball) 77 | _extractall(tar) 78 | tar.close() 79 | 80 | # going in the directory 81 | subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) 82 | os.chdir(subdir) 83 | log.warn('Now working in %s', subdir) 84 | 85 | # installing 86 | log.warn('Installing Distribute') 87 | if not _python_cmd('setup.py', 'install', *install_args): 88 | log.warn('Something went wrong during the installation.') 89 | log.warn('See the error message above.') 90 | # exitcode will be 2 91 | return 2 92 | finally: 93 | os.chdir(old_wd) 94 | shutil.rmtree(tmpdir) 95 | 96 | 97 | def _build_egg(egg, tarball, to_dir): 98 | # extracting the tarball 99 | tmpdir = tempfile.mkdtemp() 100 | log.warn('Extracting in %s', tmpdir) 101 | old_wd = os.getcwd() 102 | try: 103 | os.chdir(tmpdir) 104 | tar = tarfile.open(tarball) 105 | _extractall(tar) 106 | tar.close() 107 | 108 | # going in the directory 109 | subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) 110 | os.chdir(subdir) 111 | log.warn('Now working in %s', subdir) 112 | 113 | # building an egg 114 | log.warn('Building a Distribute egg in %s', to_dir) 115 | _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) 116 | 117 | finally: 118 | os.chdir(old_wd) 119 | shutil.rmtree(tmpdir) 120 | # returning the result 121 | log.warn(egg) 122 | if not os.path.exists(egg): 123 | raise IOError('Could not build the egg.') 124 | 125 | 126 | def _do_download(version, download_base, to_dir, download_delay): 127 | egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg' 128 | % (version, sys.version_info[0], sys.version_info[1])) 129 | if not os.path.exists(egg): 130 | tarball = download_setuptools(version, download_base, 131 | to_dir, download_delay) 132 | _build_egg(egg, tarball, to_dir) 133 | sys.path.insert(0, egg) 134 | import setuptools 135 | setuptools.bootstrap_install_from = egg 136 | 137 | 138 | def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, 139 | to_dir=os.curdir, download_delay=15, no_fake=True): 140 | # making sure we use the absolute path 141 | to_dir = os.path.abspath(to_dir) 142 | was_imported = 'pkg_resources' in sys.modules or \ 143 | 'setuptools' in sys.modules 144 | try: 145 | try: 146 | import pkg_resources 147 | if not hasattr(pkg_resources, '_distribute'): 148 | if not no_fake: 149 | _fake_setuptools() 150 | raise ImportError 151 | except ImportError: 152 | return _do_download(version, download_base, to_dir, download_delay) 153 | try: 154 | pkg_resources.require("distribute>=" + version) 155 | return 156 | except pkg_resources.VersionConflict: 157 | e = sys.exc_info()[1] 158 | if was_imported: 159 | sys.stderr.write( 160 | "The required version of distribute (>=%s) is not available,\n" 161 | "and can't be installed while this script is running. Please\n" 162 | "install a more recent version first, using\n" 163 | "'easy_install -U distribute'." 164 | "\n\n(Currently using %r)\n" % (version, e.args[0])) 165 | sys.exit(2) 166 | else: 167 | del pkg_resources, sys.modules['pkg_resources'] # reload ok 168 | return _do_download(version, download_base, to_dir, 169 | download_delay) 170 | except pkg_resources.DistributionNotFound: 171 | return _do_download(version, download_base, to_dir, 172 | download_delay) 173 | finally: 174 | if not no_fake: 175 | _create_fake_setuptools_pkg_info(to_dir) 176 | 177 | 178 | def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, 179 | to_dir=os.curdir, delay=15): 180 | """Download distribute from a specified location and return its filename 181 | 182 | `version` should be a valid distribute version number that is available 183 | as an egg for download under the `download_base` URL (which should end 184 | with a '/'). `to_dir` is the directory where the egg will be downloaded. 185 | `delay` is the number of seconds to pause before an actual download 186 | attempt. 187 | """ 188 | # making sure we use the absolute path 189 | to_dir = os.path.abspath(to_dir) 190 | try: 191 | from urllib.request import urlopen 192 | except ImportError: 193 | from urllib2 import urlopen 194 | tgz_name = "distribute-%s.tar.gz" % version 195 | url = download_base + tgz_name 196 | saveto = os.path.join(to_dir, tgz_name) 197 | src = dst = None 198 | if not os.path.exists(saveto): # Avoid repeated downloads 199 | try: 200 | log.warn("Downloading %s", url) 201 | src = urlopen(url) 202 | # Read/write all in one block, so we don't create a corrupt file 203 | # if the download is interrupted. 204 | data = src.read() 205 | dst = open(saveto, "wb") 206 | dst.write(data) 207 | finally: 208 | if src: 209 | src.close() 210 | if dst: 211 | dst.close() 212 | return os.path.realpath(saveto) 213 | 214 | 215 | def _no_sandbox(function): 216 | def __no_sandbox(*args, **kw): 217 | try: 218 | from setuptools.sandbox import DirectorySandbox 219 | if not hasattr(DirectorySandbox, '_old'): 220 | def violation(*args): 221 | pass 222 | DirectorySandbox._old = DirectorySandbox._violation 223 | DirectorySandbox._violation = violation 224 | patched = True 225 | else: 226 | patched = False 227 | except ImportError: 228 | patched = False 229 | 230 | try: 231 | return function(*args, **kw) 232 | finally: 233 | if patched: 234 | DirectorySandbox._violation = DirectorySandbox._old 235 | del DirectorySandbox._old 236 | 237 | return __no_sandbox 238 | 239 | 240 | def _patch_file(path, content): 241 | """Will backup the file then patch it""" 242 | existing_content = open(path).read() 243 | if existing_content == content: 244 | # already patched 245 | log.warn('Already patched.') 246 | return False 247 | log.warn('Patching...') 248 | _rename_path(path) 249 | f = open(path, 'w') 250 | try: 251 | f.write(content) 252 | finally: 253 | f.close() 254 | return True 255 | 256 | _patch_file = _no_sandbox(_patch_file) 257 | 258 | 259 | def _same_content(path, content): 260 | return open(path).read() == content 261 | 262 | 263 | def _rename_path(path): 264 | new_name = path + '.OLD.%s' % time.time() 265 | log.warn('Renaming %s to %s', path, new_name) 266 | os.rename(path, new_name) 267 | return new_name 268 | 269 | 270 | def _remove_flat_installation(placeholder): 271 | if not os.path.isdir(placeholder): 272 | log.warn('Unkown installation at %s', placeholder) 273 | return False 274 | found = False 275 | for file in os.listdir(placeholder): 276 | if fnmatch.fnmatch(file, 'setuptools*.egg-info'): 277 | found = True 278 | break 279 | if not found: 280 | log.warn('Could not locate setuptools*.egg-info') 281 | return 282 | 283 | log.warn('Moving elements out of the way...') 284 | pkg_info = os.path.join(placeholder, file) 285 | if os.path.isdir(pkg_info): 286 | patched = _patch_egg_dir(pkg_info) 287 | else: 288 | patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO) 289 | 290 | if not patched: 291 | log.warn('%s already patched.', pkg_info) 292 | return False 293 | # now let's move the files out of the way 294 | for element in ('setuptools', 'pkg_resources.py', 'site.py'): 295 | element = os.path.join(placeholder, element) 296 | if os.path.exists(element): 297 | _rename_path(element) 298 | else: 299 | log.warn('Could not find the %s element of the ' 300 | 'Setuptools distribution', element) 301 | return True 302 | 303 | _remove_flat_installation = _no_sandbox(_remove_flat_installation) 304 | 305 | 306 | def _after_install(dist): 307 | log.warn('After install bootstrap.') 308 | placeholder = dist.get_command_obj('install').install_purelib 309 | _create_fake_setuptools_pkg_info(placeholder) 310 | 311 | 312 | def _create_fake_setuptools_pkg_info(placeholder): 313 | if not placeholder or not os.path.exists(placeholder): 314 | log.warn('Could not find the install location') 315 | return 316 | pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1]) 317 | setuptools_file = 'setuptools-%s-py%s.egg-info' % \ 318 | (SETUPTOOLS_FAKED_VERSION, pyver) 319 | pkg_info = os.path.join(placeholder, setuptools_file) 320 | if os.path.exists(pkg_info): 321 | log.warn('%s already exists', pkg_info) 322 | return 323 | 324 | log.warn('Creating %s', pkg_info) 325 | try: 326 | f = open(pkg_info, 'w') 327 | except EnvironmentError: 328 | log.warn("Don't have permissions to write %s, skipping", pkg_info) 329 | return 330 | try: 331 | f.write(SETUPTOOLS_PKG_INFO) 332 | finally: 333 | f.close() 334 | 335 | pth_file = os.path.join(placeholder, 'setuptools.pth') 336 | log.warn('Creating %s', pth_file) 337 | f = open(pth_file, 'w') 338 | try: 339 | f.write(os.path.join(os.curdir, setuptools_file)) 340 | finally: 341 | f.close() 342 | 343 | _create_fake_setuptools_pkg_info = _no_sandbox( 344 | _create_fake_setuptools_pkg_info 345 | ) 346 | 347 | 348 | def _patch_egg_dir(path): 349 | # let's check if it's already patched 350 | pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') 351 | if os.path.exists(pkg_info): 352 | if _same_content(pkg_info, SETUPTOOLS_PKG_INFO): 353 | log.warn('%s already patched.', pkg_info) 354 | return False 355 | _rename_path(path) 356 | os.mkdir(path) 357 | os.mkdir(os.path.join(path, 'EGG-INFO')) 358 | pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') 359 | f = open(pkg_info, 'w') 360 | try: 361 | f.write(SETUPTOOLS_PKG_INFO) 362 | finally: 363 | f.close() 364 | return True 365 | 366 | _patch_egg_dir = _no_sandbox(_patch_egg_dir) 367 | 368 | 369 | def _before_install(): 370 | log.warn('Before install bootstrap.') 371 | _fake_setuptools() 372 | 373 | 374 | def _under_prefix(location): 375 | if 'install' not in sys.argv: 376 | return True 377 | args = sys.argv[sys.argv.index('install') + 1:] 378 | for index, arg in enumerate(args): 379 | for option in ('--root', '--prefix'): 380 | if arg.startswith('%s=' % option): 381 | top_dir = arg.split('root=')[-1] 382 | return location.startswith(top_dir) 383 | elif arg == option: 384 | if len(args) > index: 385 | top_dir = args[index + 1] 386 | return location.startswith(top_dir) 387 | if arg == '--user' and USER_SITE is not None: 388 | return location.startswith(USER_SITE) 389 | return True 390 | 391 | 392 | def _fake_setuptools(): 393 | log.warn('Scanning installed packages') 394 | try: 395 | import pkg_resources 396 | except ImportError: 397 | # we're cool 398 | log.warn('Setuptools or Distribute does not seem to be installed.') 399 | return 400 | ws = pkg_resources.working_set 401 | try: 402 | setuptools_dist = ws.find( 403 | pkg_resources.Requirement.parse('setuptools', replacement=False) 404 | ) 405 | except TypeError: 406 | # old distribute API 407 | setuptools_dist = ws.find( 408 | pkg_resources.Requirement.parse('setuptools') 409 | ) 410 | 411 | if setuptools_dist is None: 412 | log.warn('No setuptools distribution found') 413 | return 414 | # detecting if it was already faked 415 | setuptools_location = setuptools_dist.location 416 | log.warn('Setuptools installation detected at %s', setuptools_location) 417 | 418 | # if --root or --preix was provided, and if 419 | # setuptools is not located in them, we don't patch it 420 | if not _under_prefix(setuptools_location): 421 | log.warn('Not patching, --root or --prefix is installing Distribute' 422 | ' in another location') 423 | return 424 | 425 | # let's see if its an egg 426 | if not setuptools_location.endswith('.egg'): 427 | log.warn('Non-egg installation') 428 | res = _remove_flat_installation(setuptools_location) 429 | if not res: 430 | return 431 | else: 432 | log.warn('Egg installation') 433 | pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO') 434 | if (os.path.exists(pkg_info) and 435 | _same_content(pkg_info, SETUPTOOLS_PKG_INFO)): 436 | log.warn('Already patched.') 437 | return 438 | log.warn('Patching...') 439 | # let's create a fake egg replacing setuptools one 440 | res = _patch_egg_dir(setuptools_location) 441 | if not res: 442 | return 443 | log.warn('Patching complete.') 444 | _relaunch() 445 | 446 | 447 | def _relaunch(): 448 | log.warn('Relaunching...') 449 | # we have to relaunch the process 450 | # pip marker to avoid a relaunch bug 451 | _cmd1 = ['-c', 'install', '--single-version-externally-managed'] 452 | _cmd2 = ['-c', 'install', '--record'] 453 | if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2: 454 | sys.argv[0] = 'setup.py' 455 | args = [sys.executable] + sys.argv 456 | sys.exit(subprocess.call(args)) 457 | 458 | 459 | def _extractall(self, path=".", members=None): 460 | """Extract all members from the archive to the current working 461 | directory and set owner, modification time and permissions on 462 | directories afterwards. `path' specifies a different directory 463 | to extract to. `members' is optional and must be a subset of the 464 | list returned by getmembers(). 465 | """ 466 | import copy 467 | import operator 468 | from tarfile import ExtractError 469 | directories = [] 470 | 471 | if members is None: 472 | members = self 473 | 474 | for tarinfo in members: 475 | if tarinfo.isdir(): 476 | # Extract directories with a safe mode. 477 | directories.append(tarinfo) 478 | tarinfo = copy.copy(tarinfo) 479 | tarinfo.mode = 448 # decimal for oct 0700 480 | self.extract(tarinfo, path) 481 | 482 | # Reverse sort directories. 483 | if sys.version_info < (2, 4): 484 | def sorter(dir1, dir2): 485 | return cmp(dir1.name, dir2.name) 486 | directories.sort(sorter) 487 | directories.reverse() 488 | else: 489 | directories.sort(key=operator.attrgetter('name'), reverse=True) 490 | 491 | # Set correct owner, mtime and filemode on directories. 492 | for tarinfo in directories: 493 | dirpath = os.path.join(path, tarinfo.name) 494 | try: 495 | self.chown(tarinfo, dirpath) 496 | self.utime(tarinfo, dirpath) 497 | self.chmod(tarinfo, dirpath) 498 | except ExtractError: 499 | e = sys.exc_info()[1] 500 | if self.errorlevel > 1: 501 | raise 502 | else: 503 | self._dbg(1, "tarfile: %s" % e) 504 | 505 | 506 | def _build_install_args(options): 507 | """ 508 | Build the arguments to 'python setup.py install' on the distribute package 509 | """ 510 | install_args = [] 511 | if options.user_install: 512 | if sys.version_info < (2, 6): 513 | log.warn("--user requires Python 2.6 or later") 514 | raise SystemExit(1) 515 | install_args.append('--user') 516 | return install_args 517 | 518 | def _parse_args(): 519 | """ 520 | Parse the command line for options 521 | """ 522 | parser = optparse.OptionParser() 523 | parser.add_option( 524 | '--user', dest='user_install', action='store_true', default=False, 525 | help='install in user site package (requires Python 2.6 or later)') 526 | parser.add_option( 527 | '--download-base', dest='download_base', metavar="URL", 528 | default=DEFAULT_URL, 529 | help='alternative URL from where to download the distribute package') 530 | options, args = parser.parse_args() 531 | # positional arguments are ignored 532 | return options 533 | 534 | def main(version=DEFAULT_VERSION): 535 | """Install or upgrade setuptools and EasyInstall""" 536 | options = _parse_args() 537 | tarball = download_setuptools(download_base=options.download_base) 538 | return _install(tarball, _build_install_args(options)) 539 | 540 | if __name__ == '__main__': 541 | sys.exit(main()) 542 | -------------------------------------------------------------------------------- /autonomotorrent/DHTProtocol.py: -------------------------------------------------------------------------------- 1 | """Provides basic DHT functionality using Twisted 2 | """ 3 | import hashlib 4 | import struct 5 | import socket 6 | import time 7 | import os 8 | import re 9 | import pickle 10 | import bz2 11 | 12 | from twisted.internet import reactor 13 | from twisted.internet import protocol, defer 14 | from twisted.python import log 15 | 16 | from bencode import bencode, bdecode, BTError 17 | 18 | def sleep(timeout): 19 | df = defer.Deferred() 20 | reactor.callLater(timeout, df.callback, None) 21 | return df 22 | 23 | @defer.inlineCallbacks 24 | def dns_resolve(addr): 25 | ip, port = addr 26 | if re.match(r'^(\d+\.){3}\d+$', ip): 27 | defer.returnValue(addr) 28 | else: 29 | try: 30 | ip = yield reactor.resolve(ip) 31 | addr = ip, port 32 | defer.returnValue(addr) 33 | except Exception as err : 34 | raise DHTError(err) 35 | 36 | def decodeIPAddr(addr) : 37 | if type(addr) is not str: 38 | raise TypeError('addr should be a string') 39 | 40 | if len(addr) != 6: 41 | raise ValueError('len(addr) == 6') 42 | 43 | ip = socket.inet_ntoa(addr[0:4]) 44 | port, = struct.unpack('!H', addr[4:6]) 45 | return ip, port 46 | 47 | def encodeIPAddr(addr): 48 | ''' 49 | @addr : (ip, port) 50 | ''' 51 | ip, port = addr 52 | s_ip = socket.inet_aton(ip) 53 | s_port = struct.pack('!H', port) 54 | return s_ip + s_port 55 | 56 | def decodeCompactNodes(compNodes): 57 | if type(compNodes) is not str: 58 | raise TypeError('compNodes should be a string') 59 | 60 | if len(compNodes) % 26 != 0 : 61 | raise ValueError('len(compNodes) % 26 != 0') 62 | 63 | nodes = [] 64 | for i in xrange(0, len(compNodes), 26) : 65 | dat = compNodes[i:(i+26)] 66 | _id = dat[0:20] 67 | ip, port = decodeIPAddr(dat[20:26]) 68 | nodes.append((_id, (ip, port))) 69 | return nodes 70 | 71 | def encodeCompactNodes(nodes): 72 | ''' 73 | @nodes: [(id, (ip, port)), (id, (ip, port)), ...] 74 | ''' 75 | return ''.join(_id + encodeIPAddr(addr) for _id, addr in nodes) 76 | 77 | def decodeCompactPeers(compPeers): 78 | return [decodeIPAddr(addr) for addr in compPeers] 79 | 80 | def encodeCompactPeers(peers) : 81 | ''' 82 | @peers: [(ip, port), (ip, port), ...] 83 | ''' 84 | return [encodeIPAddr(addr) for addr in peers] 85 | 86 | class RoutingTable : 87 | timeout = 15 * 60 # 15 min 88 | 89 | def __init__(self): 90 | self.my_node_id = hashlib.sha1(os.urandom(160)).digest() 91 | self.nodes_dict = {} 92 | self.bucket = [] 93 | self.k_value = 8 94 | 95 | def doStart(self, dht_protocol): 96 | self.dht = dht_protocol 97 | dht = self.dht 98 | nodes_dict = self.nodes_dict 99 | self.nodes_dict = {} 100 | del self.bucket[:] 101 | 102 | [self.addNode(addr) for addr in nodes_dict.itervalues()] 103 | 104 | self.autoFillRoutingTable() 105 | 106 | def doStop(self): 107 | self.dht = None 108 | 109 | @defer.inlineCallbacks 110 | def autoFillRoutingTable(self): 111 | if len(self.nodes_dict) > 160 * 6: 112 | return 113 | 114 | query_history = set() 115 | while len(self.nodes_dict) < 160*6: 116 | for _id, addr in self.nodes_dict.iteritems(): 117 | if _id not in query_history: 118 | query_history.add(_id) 119 | break 120 | else: 121 | break 122 | 123 | try: 124 | _id, nodes = yield self.dht.find_node(addr, _id) 125 | except DHTError: 126 | pass 127 | else: 128 | [(yield fd) for fd in 129 | [self.addNode(addr) for _id, addr in nodes]] 130 | 131 | @defer.inlineCallbacks 132 | def addNode(self, addr): 133 | try: 134 | _id = yield self.dht.ping(addr) 135 | except DHTError as err: 136 | defer.returnValue(False) 137 | else: 138 | self.addGoodNode(_id, addr) 139 | defer.returnValue(True) 140 | 141 | if _id in self.nodes_dict : 142 | self.updateNode(_id) 143 | defer.returnValue(True) 144 | 145 | @defer.inlineCallbacks 146 | def addGoodNode(self, node_id, node_addr): 147 | if node_id in self.nodes_dict : 148 | self.updateNode(node_id) 149 | return 150 | 151 | if len(self.nodes_dict) > 160 * 6: # too many nodes in the table 152 | return 153 | 154 | self.nodes_dict[node_id] = node_addr 155 | self.__addToBucket(node_id) 156 | 157 | yield sleep(15*60) 158 | while (node_id in self.nodes_dict and self.dht) : 159 | try: 160 | _id = yield self.dht.ping(self.nodes_dict[node_id]) 161 | assert node_id == _id 162 | except DHTError as err: 163 | self.removeNode(node_id) 164 | break 165 | else: 166 | self.updateNode(node_id) 167 | yield sleep(15*60) 168 | 169 | def updateNode(self, node_id): 170 | if node_id not in self.nodes_dict: 171 | return 172 | 173 | self.__addToBucket(node_id) 174 | 175 | def __addToBucket(self, node_id): 176 | ''' 177 | 0 1 2 3 178 | [[159],[158],[157], [156~0]] 179 | ''' 180 | if len(self.bucket) == 0: 181 | self.bucket.append([node_id]) 182 | 183 | idx = 159 - self.__distance(node_id) 184 | 185 | if idx == 160: 186 | return 187 | 188 | b_size = len(self.bucket) 189 | 190 | if (b_size-1) <= idx < 159: 191 | buk = self.bucket[-1] 192 | try: 193 | buk.remove(node_id) 194 | except ValueError: 195 | pass 196 | buk.append(node_id) 197 | 198 | if len(buk) > self.k_value: 199 | _buk, buk_ = [], [] 200 | for node in buk: 201 | _idx = 159 - self.__distance(node) 202 | if _idx == (b_size-1): 203 | _buk.append(node) 204 | else: 205 | buk_.append(node) 206 | self.bucket[-1:] = [_buk, buk_] 207 | else: 208 | buk = self.bucket[idx] 209 | try: 210 | buk.remove(node_id) 211 | except ValueError: 212 | pass 213 | buk.append(node_id) 214 | 215 | if len(buk) > self.k_value: 216 | del self.nodes_dict[buk[0]] 217 | del buk[0] 218 | 219 | def __distance(self, node_id): 220 | for i in range(20): 221 | val = ord(self.my_node_id[i]) ^ ord(node_id[i]) 222 | if val: 223 | for j in range(8): 224 | if val & (0x80>>j): 225 | return 159 - (i*8 + j) 226 | else: 227 | return -1 228 | 229 | def __removeFromBucket(self, node_id): 230 | if len(self.bucket) ==0 : 231 | return 232 | 233 | idx = 159 - self.__distance(node_id) 234 | 235 | if idx >= len(self.bucket): 236 | idx = -1 237 | 238 | try: 239 | self.bucket[idx].remove(node_id) 240 | except ValueError: 241 | pass 242 | 243 | def __findFromBucket(self, node_id): 244 | if len(self.bucket) == 0: 245 | return [] 246 | 247 | b_size = len(self.bucket) 248 | 249 | idx = 159 - self.__distance(node_id) 250 | if idx >= b_size: 251 | idx = b_size - 1 252 | 253 | result = self.bucket[idx][:] 254 | 255 | if len(result) >= self.k_value: 256 | return result 257 | 258 | for i in range(1, 160): 259 | idx_ = idx + 1 260 | if idx_ < b_size: 261 | size_need = self.k_value - len(result) 262 | result += self.bucket[idx_][:size_need] 263 | if len(result) >= self.k_value: 264 | break 265 | 266 | _idx = idx - 1 267 | if _idx >= 0 : 268 | size_need = self.k_value - len(result) 269 | result += self.bucket[_idx][:size_need] 270 | if len(result) >= self.k_value: 271 | break 272 | 273 | return result 274 | 275 | def removeNode(self, node_id): 276 | ''' 277 | node is bad node and node_id is already in the table 278 | ''' 279 | if node_id in self.nodes_dict: 280 | del self.nodes_dict[node_id] 281 | self.__removeFromBucket(node_id) 282 | 283 | def queryNode(self, node_id): 284 | ''' 285 | find node of node_id 286 | if node_id in the table, return it 287 | otherwise, return the closest nodes 288 | return: [(id,(ip, port)), ...] 289 | ''' 290 | return [(_id, self.nodes_dict[_id]) for _id in self.__findFromBucket(node_id)] 291 | 292 | def __contains__(self, node_id): 293 | return node_id in self.nodes_dict 294 | 295 | def __getitem__(self, node_id): 296 | return self.nodes_dict[node_id] 297 | 298 | class DHTError (Exception): 299 | pass 300 | 301 | class DHTProtocolBase (protocol.DatagramProtocol) : 302 | timeout = 15 # seconds 303 | 304 | def __init__(self): 305 | self.my_node_id = os.urandom(20) 306 | 307 | self.transaction = {} 308 | self.recieved_tokens = {} 309 | self.sent_tokens = {} 310 | 311 | def startProtocol(self): 312 | pass 313 | 314 | def stopProtocol(self): 315 | pass 316 | 317 | @defer.inlineCallbacks 318 | def ping(self, node_addr, timeout=None): 319 | args = {'id' : self.my_node_id} 320 | data = yield self.__KRPC_do_query(node_addr, 'ping', args, timeout) 321 | node_id = data['id'] 322 | defer.returnValue(node_id) 323 | 324 | @defer.inlineCallbacks 325 | def find_node(self, node_addr, target_id, timeout=None): 326 | args = {'id' : self.my_node_id, 327 | 'target' : target_id} 328 | data = yield self.__KRPC_do_query(node_addr, 'find_node', args, timeout) 329 | node_id = data['id'] 330 | nodes = decodeCompactNodes(data['nodes']) 331 | defer.returnValue((node_id, nodes)) 332 | 333 | @defer.inlineCallbacks 334 | def get_peers(self, node_addr, info_hash, timeout=None): 335 | args = {'id' : self.my_node_id, 336 | 'info_hash' : info_hash} 337 | data = yield self.__KRPC_do_query(node_addr, 'get_peers', args, timeout) 338 | node_id = data['id'] 339 | 340 | if 'token' in data: 341 | self.recieved_tokens[node_addr] = data['token'] 342 | 343 | def token_timeout(): 344 | if node_addr in self.recieved_tokens: 345 | del self.recieved_tokens[node_addr] 346 | reactor.callLater(600, token_timeout) # 10 min life time 347 | 348 | if 'values' in data : 349 | try: 350 | peers = decodeCompactPeers(data['values']) 351 | except (TypeError, ValueError) as error: 352 | raise DHTError(error) 353 | else: 354 | defer.returnValue((node_id, 'values', peers)) 355 | 356 | elif 'nodes' in data : 357 | try: 358 | nodes = decodeCompactNodes(data['nodes']) 359 | except (TypeError, ValueError) as error: 360 | raise DHTError(error) 361 | else: 362 | defer.returnValue((node_id, 'nodes', nodes)) 363 | else: 364 | assert False 365 | 366 | @defer.inlineCallbacks 367 | def announce_peer(self, node_addr, info_hash, port, timeout=None): 368 | if node_addr not in self.recieved_tokens: 369 | node_id, _type, peers = yield self.get_peers(node_addr, info_hash) 370 | 371 | token = self.recieved_tokens.get(node_addr, '') 372 | 373 | args = {'id' : self.my_node_id, 374 | 'info_hash' : info_hash, 375 | 'port' : port, 376 | 'token' : token} 377 | 378 | data = yield self.__KRPC_do_query(node_addr, 'announce_peer', args, timeout) 379 | node_id = data['id'] 380 | 381 | defer.returnValue(node_id) 382 | 383 | @defer.inlineCallbacks 384 | def __KRPC_do_query(self, node_addr, qtype, args, timeout=None) : 385 | t_id = os.urandom(20) 386 | self._KRPC_send_query(node_addr, t_id, qtype, args) 387 | 388 | df = defer.Deferred() 389 | self.transaction[t_id] = df 390 | 391 | @defer.inlineCallbacks 392 | def timeout_check(timeout): 393 | if timeout is None: timeout = self.timeout 394 | yield sleep(timeout) 395 | if t_id in self.transaction: 396 | df.errback(DHTError((0, 'timeout: "{}" to {}'.format(qtype, node_addr)))) 397 | 398 | timeout_check(timeout) 399 | 400 | try: 401 | data = yield df 402 | finally: 403 | del self.transaction[t_id] 404 | 405 | defer.returnValue(data) 406 | 407 | def __KRPC_fire_response(self, t_id, data, node_addr): 408 | if t_id in self.transaction: 409 | df = self.transaction[t_id] 410 | df.callback(data) 411 | else: # timeout 412 | pass 413 | 414 | def __KRPC_fire_error(self, t_id, error, node_addr): 415 | if t_id in self.transaction: 416 | df = self.transaction[t_id] 417 | df.errback(DHTError(*error)) 418 | else: # timeout 419 | pass 420 | 421 | def _KRPC_send_query(self, node_addr, t_id, qtype, args): 422 | data = {'t' : t_id, 423 | 'y' : 'q', 424 | 'q' : qtype, 425 | 'a' : args } 426 | self.writeDatagram(bencode(data), node_addr) 427 | 428 | def _KRPC_send_response(self, node_addr, t_id, args): 429 | response = {'t' : t_id, 430 | 'y' : 'r', 431 | 'r' : args} 432 | self.writeDatagram(bencode(response), node_addr) 433 | 434 | def _KRPC_send_error(self, node_addr, t_id, error): 435 | data = {'t' : t_id, 436 | 'y' : 'e', 437 | 'e' : error} 438 | self.writeDatagram(bencode(data), node_addr) 439 | 440 | def _KRPC_recieve_response(self, data, node_addr): 441 | assert data['y'] == 'r' 442 | t_id = data['t'] 443 | args = data['r'] 444 | node_id = args['id'] 445 | 446 | self.__KRPC_fire_response(t_id, data['r'], node_addr) 447 | 448 | def _KRPC_recieve_error(self, data, node_addr): 449 | assert data['y'] == 'e' 450 | t_id = data['t'] 451 | self.__KRPC_fire_error(t_id, data['e'], node_addr) 452 | 453 | def _KRPC_recieve_Query(self, data, node_addr): 454 | assert data['y'] == 'q' 455 | t_id = data['t'] 456 | args = data['a'] 457 | node_id = args['id'] 458 | 459 | name = 'handle_' + data['q'] 460 | if hasattr(self, name): 461 | getattr(self, name)(t_id, data['a'], node_addr) 462 | 463 | self._handle_query(node_id, node_addr) 464 | 465 | @defer.inlineCallbacks 466 | def writeDatagram(self, data, node_addr): 467 | node_addr = yield dns_resolve(node_addr) 468 | for i in range(10): 469 | try: 470 | self.transport.write(data, node_addr) 471 | except: 472 | log.err() 473 | 474 | def datagramReceived(self, datagram, node_addr): 475 | try: 476 | data = bdecode(datagram) 477 | except BTError: 478 | return 479 | if data['y'] == 'q': 480 | self._KRPC_recieve_Query(data, node_addr) 481 | elif data['y'] == 'r': 482 | self._KRPC_recieve_response(data, node_addr) 483 | elif data['y'] == 'e': 484 | self._KRPC_recieve_error(data, node_addr) 485 | else: 486 | assert False 487 | 488 | 489 | def handle_ping(self, t_id, data, node_addr): 490 | node_id = data['id'] 491 | args = {'id' : self.my_node_id} 492 | self._KRPC_send_response(node_addr, t_id, args) 493 | 494 | def handle_find_node(self, t_id, data, node_addr): 495 | node_id = data['id'] 496 | target_id = data['target'] 497 | nodes = self._handle_find_node(target_id) 498 | args = {'id' : self.my_node_id, 499 | 'nodes' : encodeCompactNodes(nodes)} 500 | self._KRPC_send_response(node_addr, t_id, args) 501 | 502 | def handle_get_peers(self, t_id, data, node_addr): 503 | node_id = data['id'] 504 | info_hash = data['info_hash'] 505 | 506 | token = hashlib.sha1(node_addr[0]+os.urandom(20)).digest() 507 | self.sent_tokens[token] = node_addr[0] 508 | def token_invalid(): 509 | del self.sent_tokens[token] 510 | reactor.callLater(600, token_invalid) 511 | 512 | _type, peers_or_nodes = self._handle_get_peers(info_hash) 513 | if _type == 'values': 514 | values = encodeCompactPeers(peers_or_nodes) 515 | args = {'id' : self.my_node_id, 516 | 'values' : values, 517 | 'token' : token} 518 | 519 | elif _type == 'nodes' : 520 | values = encodeCompactNodes(peers_or_nodes) 521 | args = {'id' : self.my_node_id, 522 | 'nodes' : values, 523 | 'token' : token} 524 | 525 | self._KRPC_send_response(node_addr, t_id, args) 526 | 527 | def handle_announce_peer(self, t_id, data, node_addr): 528 | node_id = data['id'] 529 | info_hash = data['info_hash'] 530 | port = data['port'] 531 | token = data['token'] 532 | ip = node_addr[0] 533 | 534 | if token not in self.sent_tokens: 535 | error = [203, 'protocol error: invalid token'] 536 | self._KRPC_send_error(node_addr, t_id, error) 537 | return 538 | 539 | self._handle_announce_peer(info_hash, (ip, port)) 540 | 541 | args = {'id' : self.my_node_id} 542 | self._KRPC_send_response(node_addr, t_id, args) 543 | 544 | def _handle_get_peers(self, info_hash): 545 | raise NotImplemented() 546 | 547 | def _handle_find_node(self, target_id): 548 | raise NotImplemented() 549 | 550 | def _handle_announce_peer(self, info_hash, peer_addr): 551 | raise NotImplemented() 552 | 553 | def _handle_query(self, node_id, node_addr): 554 | raise NotImplemented() 555 | 556 | class DHTProtocol (DHTProtocolBase) : 557 | 558 | def __init__(self): 559 | DHTProtocolBase.__init__(self) 560 | 561 | self.torrent = {} 562 | 563 | def startProtocol(self): 564 | self.routingTable = RoutingTable() 565 | self.my_node_id = self.routingTable.my_node_id 566 | self.routingTable.doStart(self) 567 | 568 | def stopProtocol(self): 569 | self.routingTable.doStop() 570 | self.torrent = {} 571 | 572 | @defer.inlineCallbacks 573 | def addNode(self, addr): 574 | ip, port = addr 575 | yield self.routingTable.addNode(addr) 576 | self.routingTable.autoFillRoutingTable() 577 | 578 | @defer.inlineCallbacks 579 | def searchPeers(self, node_addr, info_hash): 580 | try: 581 | node_id, _type, values = (yield self.get_peers(node_addr, info_hash)) 582 | except DHTError as err: 583 | log.err() 584 | return 585 | else: 586 | node_id, nodes = yield self.find_node(node_addr, node_id) 587 | 588 | @defer.inlineCallbacks 589 | def register_torrent(self, info_hash, my_peer_port, callback): 590 | assert callable(callback) 591 | 592 | if info_hash in self.torrent: 593 | yield self.__updatePeers(info_hash) 594 | return 595 | 596 | args = {'callback':callback, 597 | 'port' : my_peer_port, 598 | 'result' : set(), 599 | 'status' : 'idle'} 600 | 601 | self.torrent[info_hash] = args 602 | 603 | while info_hash in self.torrent: 604 | yield self.__updatePeers(info_hash) 605 | size = len(args['result']) 606 | if 100 < size: 607 | yield sleep(15*60) 608 | elif 50 < size <= 100: 609 | yield sleep(10*60) 610 | else: 611 | yield sleep(5*60) 612 | 613 | @defer.inlineCallbacks 614 | def __updatePeers(self, info_hash): 615 | args = self.torrent[info_hash] 616 | 617 | if args['status'] == 'running': 618 | return 619 | 620 | args['status'] = 'running' 621 | nodes = self.routingTable.queryNode(info_hash) 622 | query_history = set() 623 | 624 | dfs = [] 625 | for node in nodes: 626 | df = self._getPeers(node, info_hash, query_history) 627 | dfs.append(df) 628 | yield sleep(0) 629 | 630 | [(yield df) for df in dfs] 631 | 632 | args['status'] = 'idle' 633 | 634 | def unregsiter_torrent(self, info_hash): 635 | if info_hash in self.torrent: 636 | del self.torrent[info_hash] 637 | 638 | @defer.inlineCallbacks 639 | def _getPeers(self, node, info_hash, query_history=set()): 640 | if info_hash not in self.torrent: 641 | return 642 | 643 | node_id, node_addr = node 644 | ip, port = node_addr 645 | 646 | if node_addr in query_history: 647 | return 648 | else: 649 | query_history.add(node_addr) 650 | 651 | args = self.torrent[info_hash] 652 | peers_callback = args['callback'] 653 | my_peer_port = args['port'] 654 | peers_result = args['result'] 655 | 656 | try: 657 | node_id, _type, values = (yield self.get_peers(node_addr, info_hash)) 658 | except DHTError as err: 659 | self.routingTable.removeNode(node_id) 660 | return 661 | 662 | self.routingTable.addGoodNode(node_id, node_addr) 663 | 664 | if _type == 'values': 665 | peers_result |= set(values) 666 | peers_callback(values) 667 | if my_peer_port : 668 | try: 669 | _id = yield self.announce_peer(node_addr, info_hash, my_peer_port) 670 | except DHTError as err: 671 | pass 672 | 673 | elif _type == 'nodes': 674 | dfs = [] 675 | for node in values: 676 | df = self._getPeers(node, info_hash, query_history) 677 | dfs.append(df) 678 | yield sleep(0) 679 | 680 | [(yield df) for df in dfs] # wait for all children finishing 681 | 682 | else: 683 | assert False 684 | 685 | @defer.inlineCallbacks 686 | def _handle_query(self, node_id, node_addr): 687 | if node_id in self.routingTable: 688 | self.routingTable.updateNode(node_id) 689 | else: 690 | try: 691 | _id = yield self.ping(node_addr) 692 | assert _id == node_id 693 | except DHTError as err: 694 | pass 695 | else: 696 | self.routingTable.addGoodNode(_id, node_addr) 697 | 698 | def _handle_find_node(self, target_id): 699 | return self.routingTable.queryNode(target_id) 700 | 701 | def _handle_get_peers(self, info_hash): 702 | if info_hash in self.torrent: 703 | peers = list(self.torrent[info_hash])[:10] 704 | return 'values', peers 705 | else: 706 | nodes = self.routingTable.queryNode(info_hash) 707 | return 'nodes', nodes 708 | 709 | def _handle_announce_peer(self, info_hash, peer_addr): 710 | pass 711 | 712 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | --------------------------------------------------------------------------------