├── src ├── __init__.py ├── keys │ └── gen.sh ├── old │ ├── sender.py │ ├── run_nodes.py │ ├── server.py │ ├── Makefile │ ├── chord_node.py │ ├── test.py │ └── tcp_sock.cc ├── logger.py ├── upload_protocol.py ├── metadata_request_protocol.py ├── test_helpers.py ├── upload_request_protocol.py ├── helpers.py ├── share_node.py ├── index_master_protocol.py ├── file_system.py ├── file_database.py └── file_sharing_service.py ├── helper-scripts ├── __init__.py ├── exports ├── host.sh ├── benchmark.sh ├── connect.sh ├── connect-new.sh ├── connect-mount.sh ├── bootstrap-nfs.sh ├── server-setup.py ├── andrew.sh ├── server_line.py ├── run_fab.py ├── fix-server.sh ├── concurrent.py ├── bootstrap-p2pfs.sh ├── multi-server-run.py ├── sysctl.conf └── fabfile.py ├── README.md ├── lxc-scripts ├── destroy_all.sh ├── README ├── create.py ├── generate.py └── run.py ├── .gitignore └── COPYING /src/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /helper-scripts/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /helper-scripts/exports: -------------------------------------------------------------------------------- 1 | /home/ubuntu/tmp/test 129.215.5.255(rw,sync,no_subtree_check,no_root_squash) 2 | -------------------------------------------------------------------------------- /src/keys/gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for i in {0..100} 4 | do 5 | ssh-keygen -q -N "" -f key$i 6 | done 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | p2pfs 2 | ===== 3 | 4 | This is a peer-to-peer distributed file system using Python Twisted, Entangled (Kademlia) and FUSE. 5 | -------------------------------------------------------------------------------- /lxc-scripts/destroy_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | nodes=`lxc-ls` 3 | for node in $nodes 4 | do 5 | echo destroying $node 6 | sudo lxc-stop -n $node 7 | sudo lxc-destroy -n $node 8 | done 9 | -------------------------------------------------------------------------------- /lxc-scripts/README: -------------------------------------------------------------------------------- 1 | Put this in your interfaces: 2 | 3 | auto eth0 4 | iface eth0 inet static 5 | address 0.0.0.0 6 | 7 | auto br0 8 | iface br0 inet dhcp 9 | bridge_ports eth0 10 | -------------------------------------------------------------------------------- /helper-scripts/host.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /usr/bin/pypy /home/ubuntu/p2pfs/src/share_node.py \ 3 | --port 2000 \ 4 | --key /home/ubuntu/p2pfs/src/keys/key0 \ 5 | --db /home/ubuntu/p2pfs/work/dbhost \ 6 | --log /home/ubuntu/p2pfs/log-host \ 7 | --dir /home/ubuntu/p2pfs/work/res \ 8 | --newdb 9 | -------------------------------------------------------------------------------- /helper-scripts/benchmark.sh: -------------------------------------------------------------------------------- 1 | cd testfs 2 | echo Copy 3 | time -f "\t%E real,\t%U user,\t%S sys" cp -r ~/tmp/ed-1.9 . >> /dev/null 4 | echo List 5 | time -f "\t%E real,\t%U user,\t%S sys" ls -Rla ed-1.9 >> /dev/null 6 | echo Search 7 | time -f "\t%E real,\t%U user,\t%S sys" grep -R "next" ed-1.9 >> /dev/null 8 | -------------------------------------------------------------------------------- /helper-scripts/connect.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /usr/bin/pypy /home/ubuntu/p2pfs/src/share_node.py \ 3 | --port 2000 \ 4 | --connect 109.231.124.122:2000 \ 5 | --key /home/ubuntu/p2pfs/src/keys/key1 \ 6 | --db /home/ubuntu/p2pfs/work/dball \ 7 | --log /home/ubuntu/p2pfs/log-node \ 8 | --dir /home/ubuntu/p2pfs/work/res 9 | -------------------------------------------------------------------------------- /helper-scripts/connect-new.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /usr/bin/pypy /home/ubuntu/p2pfs/src/share_node.py \ 3 | --port 2000 \ 4 | --connect 109.231.124.122:2000 \ 5 | --key /home/ubuntu/p2pfs/src/keys/key1 \ 6 | --db /home/ubuntu/p2pfs/work/dball \ 7 | --log /home/ubuntu/p2pfs/log-node \ 8 | --dir /home/ubuntu/p2pfs/work/res \ 9 | --newdb 10 | -------------------------------------------------------------------------------- /helper-scripts/connect-mount.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /usr/bin/pypy /home/ubuntu/p2pfs/src/share_node.py \ 3 | --port 2000 \ 4 | --connect 109.231.124.122:2000 \ 5 | --key /home/ubuntu/p2pfs/src/keys/key1 \ 6 | --db /home/ubuntu/p2pfs/work/dball \ 7 | --log /home/ubuntu/p2pfs/log-node \ 8 | --dir /home/ubuntu/p2pfs/work/res \ 9 | --fs testfs 10 | -------------------------------------------------------------------------------- /helper-scripts/bootstrap-nfs.sh: -------------------------------------------------------------------------------- 1 | sudo cp sysctl.conf /etc/sysctl.conf 2 | sudo sysctl -p 3 | sudo apt-get update 4 | sudo apt-get install subversion build-essential autoconf automake flex bison rpcbind 5 | svn checkout svn://svn.code.sf.net/p/unfs3/code/trunk unfs3-code 6 | cd unfs3-code 7 | autoheader 8 | autoconf 9 | ./configure 10 | make 11 | sudo make install 12 | mkdir -p ~/tmp/test 13 | -------------------------------------------------------------------------------- /helper-scripts/server-setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import subprocess 4 | 5 | output = subprocess.check_output("blkid").strip().split() 6 | assert(output[1][:4] == 'UUID') 7 | uuid = output[1].split("=")[1].strip("\"") 8 | 9 | f = open("/boot/grub/load.cfg", 'w') 10 | f.write("""\ 11 | search.fs_uuid {} root 12 | set prefix=($root)/grub 13 | """.format(uuid)) 14 | f.close() 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | eggs 12 | parts 13 | bin 14 | var 15 | sdist 16 | develop-eggs 17 | .installed.cfg 18 | lib 19 | lib64 20 | 21 | # Installer logs 22 | pip-log.txt 23 | 24 | # Unit test / coverage reports 25 | .coverage 26 | .tox 27 | nosetests.xml 28 | 29 | # Translations 30 | *.mo 31 | 32 | # Mr Developer 33 | .mr.developer.cfg 34 | .project 35 | .pydevproject 36 | 37 | src/keys/* 38 | -------------------------------------------------------------------------------- /src/old/sender.py: -------------------------------------------------------------------------------- 1 | import socket 2 | def main(): 3 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 4 | sock.connect(('localhost', 6000)) 5 | print(sock.send("3:lol")) 6 | print(sock.send("3:lol")) 7 | print(sock.send("8:whatever")) 8 | sock.close() 9 | #totalsent = 0 10 | #while totalsent < len(netstring): 11 | # sent = self.sock.send(netstring[totalsent:]) 12 | # if sent == 0: 13 | # raise RuntimeError("connection broken") 14 | # totalsent += sent 15 | 16 | if __name__ == "__main__": 17 | main() 18 | -------------------------------------------------------------------------------- /helper-scripts/andrew.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd testfs 3 | echo Mkdir 4 | time -f "\t%E real,\t%U user,\t%S sys" sh -c 'mkdir ed-1.9;mkdir ed-1.9/doc;mkdir ed-1.9/testsuite' >> /dev/null 5 | echo Copy 6 | time -f "\t%E real,\t%U user,\t%S sys" cp -r ~/tmp/ed-1.9 . >> /dev/null 7 | echo List 8 | time -f "\t%E real,\t%U user,\t%S sys" ls -Rla ed-1.9 >> /dev/null 9 | echo Search 10 | time -f "\t%E real,\t%U user,\t%S sys" grep -R "next" ed-1.9 >> /dev/null 11 | cd ed-1.9 12 | echo Compile 13 | time -f "\t%E real,\t%U user,\t%S sys" sh -c './configure; make' >> /dev/null 14 | -------------------------------------------------------------------------------- /lxc-scripts/create.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import subprocess 3 | import sys 4 | 5 | prec = "cont" 6 | directory = "lxc_config" 7 | 8 | def main(): 9 | if len(sys.argv) != 2: 10 | print("Please specify container count") 11 | sys.exit(1) 12 | total = int(sys.argv[1]) 13 | for i in xrange(0, total): 14 | name = "{}{:03d}".format(prec, i) 15 | run_script = "sudo lxc-create -n {0} -f lxc_config/{0}.conf".format(name) 16 | subprocess.call(run_script, shell=True) 17 | 18 | if __name__ == "__main__": 19 | main() 20 | -------------------------------------------------------------------------------- /helper-scripts/server_line.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | import os 4 | 5 | def make_server_line(filename): 6 | f = open(filename) 7 | hosts = [] 8 | for l in f.readlines(): 9 | l = l.strip() 10 | if l.startswith('#'): 11 | continue 12 | host = l.split()[0] 13 | hosts.append(host) 14 | f.close() 15 | return ','.join(hosts) 16 | 17 | def main(): 18 | if len(sys.argv) != 2: 19 | print 'Usage: server-line.py FILENAME' 20 | sys.exit(1) 21 | filename = sys.argv[1] 22 | print make_server_line(filename) 23 | 24 | if __name__ == '__main__': 25 | main() 26 | -------------------------------------------------------------------------------- /src/logger.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import sys 3 | 4 | class Logger(object): 5 | def __init__(self, where=None): 6 | self.set_output(where) 7 | self.DISABLED = False 8 | 9 | def set_output(self, where): 10 | if not where: 11 | self.out = sys.stdout 12 | else: 13 | self.out = where 14 | 15 | def log(self, *args): 16 | if self.DISABLED: 17 | return 18 | if len(args) == 1: 19 | cl = 'NULL' 20 | message = args[0] 21 | elif len(args) == 2: 22 | cl = args[0] 23 | message = args[1] 24 | tm = str(datetime.datetime.now()) 25 | self.out.write('[{}] [{}] {}\n'.format(tm, cl, message)) 26 | self.out.flush() 27 | 28 | -------------------------------------------------------------------------------- /helper-scripts/run_fab.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import sys 3 | import subprocess 4 | import os 5 | import server_line 6 | 7 | def main(): 8 | if len(sys.argv) < 3: 9 | print 'Usage: run_fab.py FILENAME COMMAND [REST]' 10 | sys.exit(1) 11 | filename = sys.argv[1] 12 | command = sys.argv[2] 13 | 14 | fab_command = 'fab' 15 | if len(sys.argv) > 3: 16 | rest = ' '.join(sys.argv[3:]) 17 | fab_command += ' {}'.format(rest) 18 | 19 | hosts = server_line.make_server_line(filename) 20 | 21 | fab_command += ' -H {} {}'.format(hosts, command) 22 | 23 | print fab_command 24 | subprocess.call(fab_command, shell=True, cwd='.') 25 | print 'done' 26 | 27 | if __name__ == '__main__': 28 | main() 29 | -------------------------------------------------------------------------------- /helper-scripts/fix-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | HOST=$1 3 | scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-setup.py ubuntu@${HOST}:~/ 4 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -t ubuntu@${HOST} bash -c "' 5 | mkdir -p ~/.ssh 6 | echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCzYVatpTuKyUB0aQlUXxAW0rIVbAh/yluy0ljWFevba0Otz4kMWIfAcEpMmchEfp9+PgqT2E3JRVy+pDsmNp/Xf0JQQnHImHEeWjAx1UqEGLYIp75cOb2p2/wmA0M8bixgQQ86FUNnxVLWyQf4w42RAzROl/G53tj+ieMbpa+MSgGCgildnvv+brfpIYANFWPD0FAAqdkSUn0rldScEqInSs+/lC+UQSuJUTeL/eRTOT1uo0R3BDtksy3S9NfhNQPJpn57fIRKdif/wgOo3hwrsey1JR7CxUhCLd0pF8fiRawIdURQ86t+QOSzaheB1FISS1xZ2oJVZKz4dNsn+2Tz ubuntu@ubuntu-VirtualBox > ~/.ssh/authorized_keys 7 | chmod 664 ~/.ssh/authorized_keys 8 | sudo python server-setup.py 9 | sudo grub-install /dev/vda 10 | sudo update-grub 11 | '" 12 | -------------------------------------------------------------------------------- /helper-scripts/concurrent.py: -------------------------------------------------------------------------------- 1 | import os 2 | import timeit 3 | import subprocess 4 | import time 5 | import sys 6 | 7 | location_dir = 'testfs' 8 | SLEEP_TIME = 3 9 | output_filename = 'output.txt' 10 | 11 | def main(): 12 | FNULL = open(os.devnull, 'w') 13 | 14 | output_file = open(output_filename, 'w') 15 | 16 | os.chdir(os.path.join('.', location_dir)) 17 | 18 | if len(sys.argv) < 2: 19 | print 'USAGE: concurrent.py FILENAME' 20 | sys.exit(1) 21 | 22 | filename = sys.argv[1] 23 | while True: 24 | cmd = lambda: subprocess.call('/bin/cp /home/ubuntu/tmp/1MB ./{}'.format(filename), stdout=FNULL, stderr=subprocess.STDOUT, shell=True) 25 | t = timeit.Timer(cmd) 26 | miliseconds = t.timeit(number=1) 27 | miliseconds = int(round(miliseconds*1000)) 28 | print miliseconds 29 | output_file.write('{0}\n'.format(miliseconds)) 30 | output_file.flush() 31 | time.sleep(SLEEP_TIME) 32 | 33 | output_file.close() 34 | 35 | if __name__ == '__main__': 36 | main() 37 | -------------------------------------------------------------------------------- /src/old/run_nodes.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import time 4 | import subprocess 5 | 6 | port_range = (4000, 4008) 7 | 8 | retriever = 5 9 | 10 | data = {12:100, 17:200} 11 | 12 | def main(): 13 | # start first node 14 | procs = [] 15 | command = ['python', 'share_node.py', '--port', str(port_range[0])] 16 | print(' '.join(command)) 17 | procs.append(subprocess.Popen(command)) 18 | 19 | data_items = data.items() 20 | for port in range(port_range[0]+1, port_range[1]): 21 | command = ['python', 'share_node.py', '--port', str(port)] 22 | command += ['--connect', 'localhost:{}'.format(str(port_range[0]))] 23 | if len(data_items) > 0: 24 | item = data_items.pop() 25 | command.append('--store') 26 | command.append('{}:{}'.format(item[0], item[1])) 27 | print(' '.join(command)) 28 | procs.append(subprocess.Popen(command)) 29 | time.sleep(10) 30 | for p in procs: 31 | p.terminate() 32 | 33 | # call the rest of servers 34 | 35 | if __name__ == '__main__': 36 | main() 37 | -------------------------------------------------------------------------------- /src/upload_protocol.py: -------------------------------------------------------------------------------- 1 | from twisted.protocols.basic import LineReceiver 2 | from helpers import * 3 | from tempfile import NamedTemporaryFile 4 | import binascii 5 | import json 6 | 7 | class UploadProtocol(LineReceiver): 8 | def __init__(self, logger): 9 | self.l = logger 10 | 11 | def connectionMade(self): 12 | self.l.log('Connection was made (UploadProtocol)') 13 | 14 | def upload_file(self, path, file_path, key, hash, mtime): 15 | self.l.log("uploadFile protocol working, mtime: {}".format(mtime)) 16 | 17 | contents = json.dumps({'command' : 'store', 'path' : path, 'key' : key, 'hash' : binascii.hexlify(hash), 'time' : str(mtime)}) 18 | self.sendLine(contents) 19 | 20 | d = upload_file_with_encryption(file_path, self.transport) 21 | d.addCallback(self.transferCompleted) 22 | 23 | self.l.log('started uploading') 24 | 25 | def transferCompleted(self, lastsent): 26 | self.l.log('finished uploading') 27 | self.transport.loseConnection() 28 | self.l.log('connection closed') 29 | 30 | -------------------------------------------------------------------------------- /src/metadata_request_protocol.py: -------------------------------------------------------------------------------- 1 | from twisted.internet import defer 2 | from twisted.protocols.basic import LineReceiver 3 | from helpers import * 4 | import binascii 5 | import json 6 | 7 | class MetadataRequestProtocol(LineReceiver): 8 | def __init__(self, logger): 9 | self.l = logger 10 | self.buffer = '' 11 | 12 | def connectionMade(self): 13 | ip = self.transport.getPeer().host 14 | self.l.log('Connection was made (MetadataRequestProtocol) to {}'.format(ip)) 15 | 16 | def lineReceived(self, line): 17 | self.buffer = line 18 | 19 | def request_metadata(self, filename, key, hash): 20 | contents = json.dumps({'command' : 'tell_metadata', 'path' : filename, 'key' : key, 'hash' : binascii.hexlify(hash)}) 21 | self.sendLine(contents) 22 | 23 | self.l.log('metadata request finished') 24 | self.df = defer.Deferred() 25 | return self.df 26 | 27 | def connectionLost(self, reason): 28 | if len(self.buffer) == 0: 29 | self.l.log("Metadata request failed! Got nothing.\n") 30 | return 31 | self.df.callback(int(self.buffer)) 32 | 33 | -------------------------------------------------------------------------------- /src/test_helpers.py: -------------------------------------------------------------------------------- 1 | from helpers import encrypt_file, decrypt_file 2 | import hashlib 3 | import os 4 | import unittest 5 | import random 6 | 7 | class TestEncryptionFunctions(unittest.TestCase): 8 | def setUp(self): 9 | self.orig_filename = '.testEncryption' 10 | self.enc_filename = self.orig_filename + '.enc' 11 | self.dec_filename = self.orig_filename + '.dec' 12 | 13 | self.contents = ''.join([str(random.randint(0, 10000)) for x in xrange(1000)]) 14 | with open(self.orig_filename, 'wb') as f: 15 | f.write(self.contents) 16 | 17 | def testEncryption(self): 18 | key = hashlib.sha256('test123').digest() 19 | encrypt_file(open(self.orig_filename, 'rb'), open(self.enc_filename, 'wb'), key) 20 | decrypt_file(open(self.enc_filename, 'rb'), open(self.dec_filename, 'wb'), key) 21 | 22 | decrypted_contents = open(self.dec_filename, 'rb').read() 23 | self.assertTrue(self.contents == decrypted_contents) 24 | 25 | def tearDown(self): 26 | for filename in [self.orig_filename, self.enc_filename, self.dec_filename]: 27 | if os.path.exists(filename): 28 | os.remove(filename) 29 | -------------------------------------------------------------------------------- /src/old/server.py: -------------------------------------------------------------------------------- 1 | import socket 2 | import sys 3 | import os 4 | import struct 5 | import SocketServer 6 | 7 | 8 | class NodeHandler(SocketServer.BaseRequestHandler): 9 | 10 | netstring_max_header_size = 5 11 | 12 | def handle(self): 13 | data = self.request.recv(NodeHandler.netstring_max_header_size) 14 | if data == '': 15 | raise RuntimeError("connection broken") 16 | 17 | try: 18 | header, data = data.split(':') 19 | except ValueError: 20 | raise RuntimeError("incorrect header received") 21 | 22 | try: 23 | expected_size = int(header) 24 | except ValueError: 25 | raise("failed to determine expected packet length") 26 | 27 | expected_size -= len(data) 28 | received = False 29 | while not received: 30 | data_new = self.request.recv(expected_size) 31 | expected_size -= len(data_new) 32 | data = "{}{}".format(data, data_new) 33 | if (expected_size == 0): 34 | received = True 35 | print(data) 36 | 37 | class NodeServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): 38 | daemon_threads = True 39 | allow_reused_address = True 40 | 41 | def __init__(self, server_address, RequestHandlerClass): 42 | SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass) 43 | 44 | def main(): 45 | server = NodeServer(('localhost', 6000), NodeHandler) 46 | try: 47 | server.serve_forever() 48 | except KeyboardInterrupt: 49 | sys.exit(0) 50 | 51 | if __name__ == "__main__": 52 | main() 53 | -------------------------------------------------------------------------------- /lxc-scripts/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import netaddr 3 | import os 4 | import virtinst.util 5 | import copy 6 | 7 | default_config = [ 8 | ("lxc.utsname", "vps0"), 9 | ("lxc.network.type", "veth"), 10 | ("lxc.network.flags", "up"), 11 | ("lxc.network.link", "lxcbr0"), 12 | ("lxc.network.hwaddr", "00:30:6E:08:EC:80"), 13 | ("lxc.network.ipv4", "192.168.1.10"), 14 | ("lxc.network.name", "eth0") 15 | ] 16 | 17 | default_options = {} 18 | for key, value in default_config: 19 | default_options[key] = value 20 | 21 | #starting_address = netaddr.IPAddress("192.168.1.10") 22 | starting_address = netaddr.IPAddress("10.0.3.2") 23 | directory = "lxc_config" 24 | total = 100 25 | 26 | def write_config(f, options): 27 | for key in (pair[0] for pair in default_config): 28 | f.write("{} = {}\n".format(key, options[key])) 29 | 30 | def main(): 31 | current_address = copy.copy(starting_address) 32 | prec = "cont" 33 | if not os.path.exists(directory): 34 | os.makedirs(directory) 35 | for i in xrange(0, total): 36 | name = "{}{:03d}".format(prec, i) 37 | options = default_options.copy() 38 | options["lxc.utsname"] = name 39 | options["lxc.network.hwaddr"] = virtinst.util.randomMAC().upper() 40 | options["lxc.network.ipv4"] = str(current_address) + '/24' 41 | current_address += 1 42 | f = open(os.path.join(directory, "{}.conf".format(name)), "w") 43 | write_config(f, options) 44 | f.close() 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /helper-scripts/bootstrap-p2pfs.sh: -------------------------------------------------------------------------------- 1 | sudo cp sysctl.conf /etc/sysctl.conf 2 | sudo sysctl -p 3 | sudo apt-get update 4 | sudo apt-get install git subversion build-essential autoconf automake flex bison rpcbind pypy libsqlite3-dev python-dev pypy-dev unzip python-crypto nmap -y 5 | 6 | wget https://pypi.python.org/packages/source/z/zope.interface/zope.interface-4.0.5.zip 7 | unzip zope.interface-4.0.5.zip 8 | cd zope.interface-4.0.5/ 9 | sudo pypy setup.py install 10 | cd .. 11 | 12 | wget http://pypi.python.org/packages/source/d/distribute/distribute-0.6.49.tar.gz 13 | tar xvzf distribute-0.6.49.tar.gz 14 | cd distribute-0.6.49/ 15 | sudo pypy setup.py install 16 | cd .. 17 | 18 | # fix for too many open files error when installing twisted 19 | ulimit -n 2048 20 | 21 | wget http://twistedmatrix.com/Releases/Twisted/13.2/Twisted-13.2.0.tar.bz2 22 | tar xvjf Twisted-13.2.0.tar.bz2 23 | cd Twisted-13.2.0 24 | sudo pypy setup.py install 25 | cd .. 26 | 27 | git clone https://github.com/terencehonles/fusepy.git fusepy 28 | cd fusepy 29 | sudo pypy setup.py install 30 | cd .. 31 | 32 | svn checkout svn://svn.code.sf.net/p/entangled/code/ entangled-code 33 | cd entangled-code/entangled 34 | sudo pypy setup.py install 35 | cd ../.. 36 | 37 | wget https://ftp.dlitz.net/pub/dlitz/crypto/pycrypto/pycrypto-2.6.1.tar.gz 38 | tar xvzf pycrypto-2.6.1.tar.gz 39 | cd pycrypto-2.6.1 40 | sudo pypy setup.py install 41 | cd .. 42 | 43 | git clone https://darka@github.com/darka/p2pfs.git 44 | sudo chown ubuntu:ubuntu -R p2pfs 45 | 46 | sudo adduser ubuntu fuse 47 | 48 | mkdir tmp 49 | cd tmp/ 50 | wget http://www.irssi.org/files/irssi-0.8.16-rc1.tar.gz 51 | tar xvzf irssi-0.8.16-rc1.tar.gz 52 | cd .. 53 | -------------------------------------------------------------------------------- /src/upload_request_protocol.py: -------------------------------------------------------------------------------- 1 | from twisted.internet import defer 2 | from twisted.protocols.basic import LineReceiver 3 | from helpers import * 4 | import binascii 5 | import json 6 | 7 | class UploadRequestProtocol(LineReceiver): 8 | def __init__(self, logger): 9 | self.l = logger 10 | self.outfile_size = 0 11 | 12 | def connectionMade(self): 13 | ip = self.transport.getPeer().host 14 | self.l.log('Connection was made (UploadRequestProtocol) to {}'.format(ip)) 15 | 16 | def rawDataReceived(self, data): 17 | self.tmp_destination_file.write(data) 18 | self.outfile_size += len(data) 19 | 20 | def request_file(self, path, file_path, key, hash): 21 | self.destination = file_path 22 | hexhash = binascii.hexlify(hash) 23 | self.l.log("upload request protocol working ({}, {}, {}, {})".format(path, file_path, key, hexhash)) 24 | 25 | contents = json.dumps({'command' : 'upload', 'path' : path, 'key' : key, 'hash' : hexhash}) 26 | 27 | dirs = os.path.dirname(self.destination) 28 | if dirs and not os.path.exists(dirs): 29 | os.makedirs(dirs) 30 | 31 | self.tmp_destination_file = NamedTemporaryFile(delete=False) 32 | self.outfile_size = 0 33 | self.sendLine(contents) 34 | self.setRawMode() 35 | self.df = defer.Deferred() 36 | return self.df 37 | 38 | def connectionLost(self, reason): 39 | if self.outfile_size == 0: 40 | self.l.log("Upload request failed! Downloaded nothing.") 41 | return 42 | self.l.log('Saved download to {}'.format(self.destination)) 43 | self.tmp_destination_file.close() 44 | 45 | d = threads.deferToThread( 46 | decrypt_file, 47 | open(self.tmp_destination_file.name, 'rb'), 48 | open(self.destination, 'wb'), 49 | ENCRYPT_KEY) 50 | d.chainDeferred(self.df) 51 | 52 | -------------------------------------------------------------------------------- /src/old/Makefile: -------------------------------------------------------------------------------- 1 | CC=g++ 2 | CFLAGS=-O3 -g -pthread -std=c++11 -Wall 3 | INCLUDES=-I/usr/local/include/ns3.16 -I/usr/include/gtk-2.0 -I/usr/lib/i386-linux-gnu/gtk-2.0/include -I/usr/include/atk-1.0 -I/usr/include/cairo -I/usr/include/gdk-pixbuf-2.0 -I/usr/include/pango-1.0 -I/usr/include/gio-unix-2.0 -I/usr/include/glib-2.0 -I/usr/lib/i386-linux-gnu/glib-2.0/include -I/usr/include/pixman-1 -I/usr/include/freetype2 -I/usr/include/libpng12 -I/usr/include/libxml2 -I/usr/include/python2.7 4 | SETTINGS=-DNS3_ASSERT_ENABLE -DNS3_LOG_ENABLE -DHAVE_PACKET_H=1 -DHAVE_DL=1 -DSQLITE3=1 -DHAVE_IF_TUN_H=1 -DNDEBUG -D_FORTIFY_SOURCE=2 5 | LDFLAGS=-L/usr/lib -L/usr/local/lib -lns3.16-test-debug -lns3.16-csma-layout-debug -lns3.16-point-to-point-layout-debug -lns3.16-netanim-debug -lns3.16-lte-debug -lns3.16-spectrum-debug -lns3.16-antenna-debug -lns3.16-aodv-debug -lns3.16-dsdv-debug -lns3.16-dsr-debug -lns3.16-mesh-debug -lns3.16-olsr-debug -lns3.16-csma-debug -lns3.16-wimax-debug -lns3.16-applications-debug -lns3.16-virtual-net-device-debug -lns3.16-uan-debug -lns3.16-energy-debug -lns3.16-flow-monitor-debug -lns3.16-nix-vector-routing-debug -lns3.16-tap-bridge-debug -lns3.16-visualizer-debug -lns3.16-internet-debug -lns3.16-bridge-debug -lns3.16-point-to-point-debug -lns3.16-mpi-debug -lns3.16-wifi-debug -lns3.16-buildings-debug -lns3.16-propagation-debug -lns3.16-mobility-debug -lns3.16-config-store-debug -lns3.16-tools-debug -lns3.16-stats-debug -lns3.16-emu-debug -lns3.16-topology-read-debug -lns3.16-network-debug -lns3.16-core-debug -lrt -lgtk-x11-2.0 -lgdk-x11-2.0 -latk-1.0 -lgio-2.0 -lpangoft2-1.0 -lpangocairo-1.0 -lgdk_pixbuf-2.0 -lcairo -lpango-1.0 -lfreetype -lfontconfig -lgobject-2.0 -lglib-2.0 -lxml2 -lsqlite3 -lpython2.7 6 | SOURCES=tcp_sock.cc 7 | OBJECTS=$(SOURCES:.cc=.o) 8 | EXECUTABLE=tcp_sock 9 | 10 | all: $(SOURCES) $(EXECUTABLE) 11 | 12 | $(EXECUTABLE): $(OBJECTS) 13 | $(CC) -o $@ $(OBJECTS) $(LDFLAGS) 14 | 15 | .cc.o: 16 | $(CC) $(INCLUDES) $(SETTINGS) -c $< -o $@ $(CFLAGS) 17 | 18 | clean: 19 | rm -rf $(EXECUTABLE) $(OBJECTS) 20 | 21 | 22 | -------------------------------------------------------------------------------- /helper-scripts/multi-server-run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import threading 3 | import paramiko 4 | import sys 5 | import os 6 | import argparse 7 | 8 | logs_folder = 'logs' 9 | logs_err_folder = os.path.join(logs_folder, 'err') 10 | 11 | def perform_host(host, filenames, commands): 12 | ssh = paramiko.SSHClient() 13 | ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 14 | ssh.connect(host) 15 | 16 | out = open(os.path.join(logs_folder, host + '.log'), 'w') 17 | out_err = open(os.path.join(logs_err_folder, host + '.logerr'), 'w') 18 | 19 | if filenames: 20 | sftp = ssh.open_sftp() 21 | 22 | for filename in filenames: 23 | sftp.put(os.path.join(os.getcwd(), filename), os.path.join('.', filename)) 24 | out.write('>> Uploaded: {0}\n'.format(filename)) 25 | 26 | for command in commands: 27 | command = command.strip() 28 | if command.startswith('#'): 29 | continue 30 | stdin, stdout, stderr = ssh.exec_command(command) 31 | 32 | out.write(stdout.read()) 33 | out_err.write(stderr.read()) 34 | 35 | out.flush() 36 | out_err.flush() 37 | 38 | out_err.close() 39 | out.close() 40 | 41 | def main(): 42 | parser = argparse.ArgumentParser() 43 | parser.add_argument('--hosts', required=True) 44 | parser.add_argument('--commands', required=False) 45 | parser.add_argument('--files', required=False, nargs='*') 46 | args = parser.parse_args() 47 | 48 | if not os.path.exists(logs_folder): 49 | os.makedirs(logs_folder) 50 | 51 | if not os.path.exists(logs_err_folder): 52 | os.makedirs(logs_err_folder) 53 | 54 | hosts = [] 55 | f = open(args.hosts, 'r') 56 | for host in f.readlines(): 57 | hosts.append(host.strip()) 58 | f.close() 59 | 60 | threads = [] 61 | commands = open(args.commands, 'r').readlines() if args.commands else [] 62 | 63 | for host in hosts: 64 | print host 65 | t = threading.Thread(target=perform_host, args=(host, args.files, commands)) 66 | t.start() 67 | threads.append(t) 68 | 69 | for t in threads: 70 | t.join() 71 | 72 | if __name__ == "__main__": 73 | main() 74 | 75 | -------------------------------------------------------------------------------- /helper-scripts/sysctl.conf: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/sysctl.conf - Configuration file for setting system variables 3 | # See /etc/sysctl.d/ for additional system variables 4 | # See sysctl.conf (5) for information. 5 | # 6 | 7 | #kernel.domainname = example.com 8 | 9 | # Uncomment the following to stop low-level messages on console 10 | #kernel.printk = 3 4 1 3 11 | 12 | ##############################################################3 13 | # Functions previously found in netbase 14 | # 15 | 16 | # Uncomment the next two lines to enable Spoof protection (reverse-path filter) 17 | # Turn on Source Address Verification in all interfaces to 18 | # prevent some spoofing attacks 19 | #net.ipv4.conf.default.rp_filter=1 20 | #net.ipv4.conf.all.rp_filter=1 21 | 22 | # Uncomment the next line to enable TCP/IP SYN cookies 23 | # See http://lwn.net/Articles/277146/ 24 | # Note: This may impact IPv6 TCP sessions too 25 | #net.ipv4.tcp_syncookies=1 26 | 27 | # Uncomment the next line to enable packet forwarding for IPv4 28 | #net.ipv4.ip_forward=1 29 | 30 | # Uncomment the next line to enable packet forwarding for IPv6 31 | # Enabling this option disables Stateless Address Autoconfiguration 32 | # based on Router Advertisements for this host 33 | #net.ipv6.conf.all.forwarding=1 34 | 35 | 36 | ################################################################### 37 | # Additional settings - these settings can improve the network 38 | # security of the host and prevent against some network attacks 39 | # including spoofing attacks and man in the middle attacks through 40 | # redirection. Some network environments, however, require that these 41 | # settings are disabled so review and enable them as needed. 42 | # 43 | # Do not accept ICMP redirects (prevent MITM attacks) 44 | #net.ipv4.conf.all.accept_redirects = 0 45 | #net.ipv6.conf.all.accept_redirects = 0 46 | # _or_ 47 | # Accept ICMP redirects only for gateways listed in our default 48 | # gateway list (enabled by default) 49 | # net.ipv4.conf.all.secure_redirects = 1 50 | # 51 | # Do not send ICMP redirects (we are not a router) 52 | #net.ipv4.conf.all.send_redirects = 0 53 | # 54 | # Do not accept IP source route packets (we are not a router) 55 | #net.ipv4.conf.all.accept_source_route = 0 56 | #net.ipv6.conf.all.accept_source_route = 0 57 | # 58 | # Log Martian Packets 59 | #net.ipv4.conf.all.log_martians = 1 60 | # 61 | net.ipv6.conf.all.disable_ipv6 = 1 62 | net.ipv6.conf.default.disable_ipv6 = 1 63 | net.ipv6.conf.lo.disable_ipv6 = 1 64 | -------------------------------------------------------------------------------- /src/helpers.py: -------------------------------------------------------------------------------- 1 | from twisted.protocols.basic import FileSender 2 | from tempfile import NamedTemporaryFile 3 | from twisted.internet import threads 4 | from Crypto.Cipher import AES 5 | import struct 6 | import random 7 | import os 8 | import hashlib 9 | 10 | # For testing purposes, all users of the file system share the same AES key. 11 | # This is good enough for performance measurement. 12 | ENCRYPT_KEY = 'testtesttesttest' 13 | 14 | def sha_hash(name): 15 | h = hashlib.sha1() 16 | h.update(name) 17 | return h.digest() 18 | 19 | def upload_file_with_encryption(filename, transport): 20 | infile = open(filename, 'r') 21 | tmp_file = NamedTemporaryFile(delete=False) 22 | d = threads.deferToThread(encrypt_file, infile, tmp_file, ENCRYPT_KEY) 23 | d.addCallback(lambda tmp_file: open(tmp_file, 'rb')) 24 | return d.addCallback(upload_file, transport) 25 | 26 | def upload_file(file, transport): 27 | sender = FileSender() 28 | sender.CHUNK_SIZE = 2 ** 16 29 | return sender.beginFileTransfer(file, transport) 30 | 31 | # Encryption/decryption based on: 32 | # http://eli.thegreenplace.net/2010/06/25/aes-encryption-of-files-in-python-with-pycrypto/ 33 | 34 | def encrypt_file(file_in, file_out, key): 35 | chunk_size = 24*1024 36 | 37 | iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16)) 38 | encryptor = AES.new(key, AES.MODE_CBC, iv) 39 | 40 | file_size = os.path.getsize(file_in.name) 41 | 42 | file_out.write(struct.pack(' 0: 48 | if len(chunk) % 16 != 0: 49 | chunk += ' ' * (16 - len(chunk) % 16) 50 | file_out.write(encryptor.encrypt(chunk)) 51 | chunk = file_in.read(chunk_size) 52 | 53 | file_in.close() 54 | file_out.close() 55 | 56 | print("Encrypted: {}".format(file_out.name)) 57 | return file_out.name 58 | 59 | def decrypt_file(file_in, file_out, key): 60 | chunk_size = 64*1024 61 | 62 | orig_size = struct.unpack(' 0: 70 | file_out.write(decryptor.decrypt(chunk)) 71 | chunk = file_in.read(chunk_size) 72 | 73 | file_out.truncate(orig_size) 74 | file_in.close() 75 | file_out.close() 76 | print("Decrypted: {}".format(file_out.name)) 77 | return file_out.name 78 | 79 | def save_buffer(buffer, destination): 80 | real_file_path = os.path.dirname(destination) 81 | if not os.path.exists(real_file_path): 82 | os.makedirs(real_file_path) 83 | f = open(destination, 'w') 84 | f.write(buffer) 85 | f.close() 86 | 87 | -------------------------------------------------------------------------------- /helper-scripts/fabfile.py: -------------------------------------------------------------------------------- 1 | from fabric.api import * 2 | import pickle 3 | import os 4 | import random 5 | 6 | passwords_filename = 'passwords.pickle' 7 | if os.path.isfile(passwords_filename): 8 | passwords = pickle.load(open(passwords_filename)) 9 | env.passwords = {'ubuntu@{}:22'.format(ip) : password for ip, password in passwords.iteritems()} 10 | 11 | def runbg(cmd): 12 | run('screen -d -m {}'.format(cmd), pty=False) 13 | 14 | def ready_general(): 15 | bootstrap() 16 | prepare_dirs() 17 | upload_keys() 18 | upload_scripts() 19 | get_ed() 20 | 21 | def ready(): 22 | fix_sudoers() 23 | fix_boot() 24 | ready_general() 25 | 26 | def fix_boot(): 27 | local('./fix-server.sh {0}'.format(env.host)) 28 | 29 | def lsres(): 30 | run('ls p2pfs/work/res') 31 | 32 | def sudols(): 33 | sudo('ls') 34 | 35 | def ls(): 36 | run('ls') 37 | 38 | def upload_keys(): 39 | path = 'keys/' 40 | key_filenames = [ f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f)) and f.startswith('key')] 41 | for filename in key_filenames: 42 | full_path = os.path.join(path, filename) 43 | put(full_path, '~/p2pfs/src/keys/') 44 | 45 | def pull(): 46 | with cd('~/p2pfs/'): 47 | run('git pull') 48 | 49 | def fix_sudoers(): 50 | n = random.randint(1, 100) 51 | put('sudoers', '~/.tmp-sudoers-{0}'.format(n), mode=0440) 52 | sudo('chown root:root ~/.tmp-sudoers-{0} && mv ~/.tmp-sudoers-{0} /etc/sudoers'.format(n)) 53 | 54 | def bootstrap(): 55 | put('bootstrap-p2pfs.sh', '~/') 56 | put('sysctl.conf', '~/') 57 | sudo('sh bootstrap-p2pfs.sh') 58 | 59 | def prepare_dirs(): 60 | run('mkdir -p ~/p2pfs/work/res') 61 | run('mkdir -p ~/p2pfs/testfs') 62 | run('mkdir -p ~/tmp') 63 | sudo('chown ubuntu:ubuntu /home/ubuntu/tmp', warn_only=True) 64 | 65 | def upload_scripts(): 66 | put('host.sh', '~/p2pfs/', mirror_local_mode=True) 67 | put('connect.sh', '~/p2pfs/', mirror_local_mode=True) 68 | #put('connect-all.sh', '~/p2pfs/') 69 | #put('connect-new.sh', '~/p2pfs/') 70 | put('connect-mount.sh', '~/p2pfs/', mirror_local_mode=True) 71 | put('connect-new.sh', '~/p2pfs/', mirror_local_mode=True) 72 | put('connect-full-node.sh', '~/p2pfs/', mirror_local_mode=True) 73 | put('connect-full-node-mount.sh', '~/p2pfs/', mirror_local_mode=True) 74 | 75 | def get_ed(): 76 | with cd('~/tmp/'): 77 | run('wget http://ftp.unicamp.br/pub/gnu/ed/ed-1.9.tar.gz') 78 | run('tar xvzf ed-1.9.tar.gz') 79 | 80 | def new_node_mount(): 81 | with cd('~/p2pfs/'): 82 | runbg('./connect-full-node-mount.sh') 83 | 84 | def new_node(): 85 | with cd('~/p2pfs/'): 86 | runbg('./connect-full-node.sh') 87 | 88 | def connect_mount(): 89 | with cd('~/p2pfs/'): 90 | runbg('./connect-mount.sh') 91 | 92 | def connect_new(): 93 | with cd('~/p2pfs/'): 94 | runbg('./connect-new.sh') 95 | 96 | def connect(): 97 | with cd('~/p2pfs/'): 98 | runbg('./connect.sh') 99 | 100 | def host(): 101 | with cd('~/p2pfs/'): 102 | runbg('./host.sh') 103 | 104 | def unmount(): 105 | sudo('umount /home/ubuntu/p2pfs/testfs', warn_only=True) 106 | 107 | def kill_pypy(): 108 | run('killall pypy', warn_only=True) 109 | -------------------------------------------------------------------------------- /lxc-scripts/run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import os 4 | import time 5 | import random 6 | import sys 7 | import netaddr 8 | import subprocess 9 | import argparse 10 | 11 | prec = "cont" 12 | directory = "lxc_config" 13 | location = "/home/ubuntu/p2pfs" 14 | keys_location = "/home/ubuntu/p2pfs/src/keys/" 15 | dbs_location = "/home/ubuntu/p2pfs/src/dbs/" 16 | logs_location = "/home/ubuntu/p2pfs/src/logs/" 17 | resources_location = "/home/ubuntu/p2pfs/src/res/" 18 | node_location = os.path.join(location, 'src', 'share_node.py') 19 | base_command = node_location + " --port 2000" 20 | 21 | #python_command = sys.executable 22 | python_command = "/usr/bin/pypy" 23 | 24 | parser = argparse.ArgumentParser() 25 | parser.add_argument('--count', '-c', type=int, dest='container_count', required=True) 26 | parser.add_argument('--simulate', default=False, action='store_true') 27 | args = parser.parse_args() 28 | 29 | total = args.container_count 30 | 31 | #starting_address = netaddr.IPAddress("192.168.1.10") 32 | starting_address = netaddr.IPAddress("10.0.3.2") 33 | 34 | new_address = starting_address 35 | addresses = [('{}000'.format(prec), starting_address)] 36 | 37 | for i in xrange(0, total-1): 38 | new_address = new_address + 1 39 | name = "{}{:03d}".format(prec, i+1) 40 | addresses.append( (name, new_address) ) 41 | 42 | print [a for a in addresses] 43 | 44 | current = 0 45 | shares = { 1 : os.path.join('/home/ubuntu/test_pics') } 46 | 47 | def run_subprocess(address, command, fake=False): 48 | time.sleep(0.5) 49 | command_parts = ['lxc-execute', '-n', address[0], '--', python_command] + command.split() 50 | print(' '.join(command_parts)) 51 | if not fake: 52 | subprocess.Popen(command_parts, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=location) 53 | 54 | def with_default_args(command, current): 55 | ret = command 56 | key_location = os.path.join(keys_location, 'key' + str(current)) 57 | db_location = os.path.join(dbs_location, 'db' + str(current)) 58 | log_location = os.path.join(logs_location, 'log' + str(current)) 59 | resource_location = os.path.join(resources_location, 'res' + str(current)) 60 | try: 61 | os.makedirs(resource_location) 62 | except: 63 | pass 64 | ret += " --key {}".format(key_location) 65 | ret += " --db {}".format(db_location) 66 | ret += " --log {}".format(log_location) 67 | ret += " --dir {}".format(resource_location) 68 | ret += " --newdb" 69 | return ret 70 | 71 | def run_nodes(addresses, simulate=False): 72 | 73 | current = 0 74 | # run the first container 75 | a = addresses.pop(random.randint(0, len(addresses) - 1)) 76 | 77 | command = with_default_args(base_command, current) 78 | 79 | # add share if needed 80 | if current in shares: 81 | command = '{} {} {}'.format(command, '--share', shares[current]) 82 | run_subprocess(a, command, simulate) 83 | 84 | # run the rest of containers: 85 | 86 | current += 1 87 | while len(addresses) > 0: 88 | if len(addresses) == 0: 89 | break 90 | b = addresses.pop(random.randint(0, len(addresses) - 1)) 91 | 92 | command = "{} --connect {}:2000".format(base_command, a[1]) 93 | 94 | command = with_default_args(command, current) 95 | 96 | if current in shares: 97 | command = '{} {} {}'.format(command, '--share', shares[current]) 98 | 99 | run_subprocess(b, command, simulate) 100 | current += 1 101 | 102 | def main(): 103 | run_nodes(addresses, args.simulate) 104 | 105 | if __name__=='__main__': 106 | main() 107 | -------------------------------------------------------------------------------- /src/share_node.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | ## 3 | ## This library is free software, distributed under the terms of 4 | ## the GNU Lesser General Public License Version 3, or any later version. 5 | ## See the COPYING file included in this archive 6 | ## 7 | 8 | import argparse 9 | import cProfile 10 | import os 11 | import sys 12 | import shutil 13 | import time 14 | 15 | from threading import Lock 16 | 17 | from file_database import * 18 | from file_system import * 19 | from file_sharing_service import * 20 | from helpers import * 21 | from logger import * 22 | 23 | from twisted.internet import task 24 | from twisted.internet import defer 25 | 26 | import entangled.node 27 | 28 | from entangled.kademlia.datastore import SQLiteDataStore 29 | 30 | from Crypto.Cipher import PKCS1_OAEP 31 | from Crypto.PublicKey import RSA 32 | 33 | def main(): 34 | l = Logger() 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument('--key', required=True) 37 | parser.add_argument('--port', required=True, type=int) 38 | parser.add_argument('--connect', dest='address', default=None) 39 | parser.add_argument('--share', dest='shared', default=[], nargs='*') 40 | parser.add_argument('--dir', dest='content_directory', required=True) 41 | parser.add_argument('--db', dest='db_filename', required=True) 42 | parser.add_argument('--newdb', default=False, action='store_true') 43 | parser.add_argument('--log', dest='log_filename', default=None) 44 | parser.add_argument('--fs', default=None) 45 | args = parser.parse_args() 46 | 47 | l.set_output(open(args.log_filename, 'w')) 48 | print('> logging to: {}'.format(args.log_filename)) 49 | 50 | if args.address: 51 | ip, port = args.address.split(':') 52 | port = int(port) 53 | knownNodes = [(ip, port)] 54 | # elif len(sys.argv) == 3: 55 | # knownNodes = [] 56 | # f = open(sys.argv[2], 'r') 57 | # lines = f.readlines() 58 | # f.close() 59 | # for line in lines: 60 | # ipAddress, udpPort = line.split() 61 | # knownNodes.append((ipAddress, int(udpPort))) 62 | else: 63 | knownNodes = None 64 | 65 | try: 66 | os.makedirs(os.path.expanduser('~')+'/.entangled') 67 | except OSError: 68 | pass 69 | dataStore = None#SQLiteDataStore(os.path.expanduser('~')+'/.entangled/fileshare.sqlite') 70 | 71 | ##key = RSA.importKey(open(args.key + '.pub').read()) 72 | 73 | print('> reading key') 74 | sha = hashlib.sha1() 75 | public_key = open(args.key + '.pub').read().strip() 76 | sha.update(public_key) 77 | node_id = sha.digest() 78 | 79 | node = entangled.node.EntangledNode(id=node_id, udpPort=args.port, dataStore=dataStore) 80 | node.invalidKeywords.extend(('mp3', 'png', 'jpg', 'txt', 'ogg')) 81 | node.keywordSplitters.extend(('-', '!')) 82 | 83 | print('> joining network') 84 | node.joinNetwork(knownNodes) 85 | 86 | def prepare(): 87 | file_db = FileDatabase(l, public_key, args.db_filename, args.newdb) 88 | file_service = FileSharingService(l, node, args.port, public_key, file_db, args.content_directory) 89 | 90 | for directory in args.shared: 91 | reactor.callLater(6, file_service.publish_directory, public_key, directory) 92 | 93 | 94 | # just for informational purposes 95 | if args.newdb: 96 | print('> adding \'/\'') 97 | 98 | l.log('Main', 'Node running.') 99 | 100 | def fuse_call(): 101 | time.sleep(20) 102 | print('> filesystem running') 103 | debug = True 104 | fsobj = FileSystem(l, public_key, file_db, file_service, args.content_directory) 105 | fuse = FUSE(fsobj, args.fs, foreground=True, debug=debug) 106 | 107 | if args.fs: 108 | reactor.callInThread(fuse_call) 109 | 110 | print('> reactor running') 111 | reactor.callLater(2, prepare) 112 | 113 | reactor.run() 114 | 115 | if __name__ == '__main__': 116 | #cProfile.run('main()') 117 | main() 118 | -------------------------------------------------------------------------------- /src/old/chord_node.py: -------------------------------------------------------------------------------- 1 | from twisted.protocols.basic import LineReceiver 2 | from twisted.internet import reactor, protocol 3 | from twisted.internet.address import IPv4Address 4 | from twisted.internet.defer import Deferred, succeed 5 | from twisted.internet.protocol import Protocol, ClientFactory, ServerFactory 6 | from twisted.protocols.basic import NetstringReceiver 7 | from twisted.spread import pb 8 | from twisted.protocols import amp 9 | import argparse 10 | import md5 11 | import collections 12 | import math 13 | 14 | 15 | M = 10 16 | 17 | def Echo(s): 18 | print(s) 19 | 20 | def Hash(s): 21 | return int(long(md5.new(s).hexdigest(), 16) % M) 22 | 23 | 24 | class ChordServerProtocol(NetstringReceiver): 25 | 26 | def connectionMade(self): 27 | print("Someone connected.") 28 | 29 | def stringReceived(self, request): 30 | print("Received a request: {}.".format(request)) 31 | 32 | if '.' not in request: # bad request 33 | self.transport.loseConnection() 34 | return 35 | 36 | req, arg = request.split('.', 1) 37 | 38 | d = self.factory.HandleRequest(req) 39 | d.addCallback(lambda ret: self.sendString(str(ret))) 40 | d.addCallback(lambda _: self.transport.loseConnection) 41 | d.callback(int(arg)) 42 | 43 | 44 | class ChordServerFactory(ServerFactory): 45 | 46 | protocol = ChordServerProtocol 47 | 48 | def __init__(self, service): 49 | self.service = service 50 | 51 | def HandleRequest(self, req): 52 | if req == 'retrieve_value': 53 | d = Deferred() 54 | d.addCallback(self.service.GetValue) 55 | return d 56 | 57 | 58 | class ChordClientProtocol(amp.AMP): 59 | pass 60 | 61 | class ChordClientFactory(ClientFactory): 62 | 63 | protocol = ChordClientProtocol 64 | 65 | def __init__(self, key, deferred): 66 | self.key = key 67 | self.deferred = deferred 68 | 69 | 70 | def HashAddress(address): 71 | return Hash(str(address.host) + str(address.port)) 72 | 73 | 74 | class ChordService(pb.Root): 75 | 76 | def __init__(self): 77 | self.me = None 78 | self.data = {} 79 | self.routing_table = {} 80 | 81 | def AddToRoutingTable(self, address): 82 | h = HashAddress(address) 83 | self.routing_table[h] = address 84 | print("Added {} to routing table (hash: {}).".format(address, h)) 85 | 86 | def StoreValue(self, key, value): 87 | self.data[int(key)] = value 88 | print("Stored key: {}, value: {}.".format(key, value)) 89 | 90 | def remote_GetValue(self, key): 91 | return self.GetValue(int(key)) 92 | 93 | def GetValue(self, key): 94 | print('Retrieving value with key: {}.'.format(key)) 95 | # check if value is among the values you hold 96 | if key in self.data: 97 | return succeed(self.data[key]) 98 | 99 | # if it is not, look at your routing table 100 | deferred = Deferred() 101 | #factory = ChordClientFactory(key, deferred) 102 | factory = pb.PBClientFactory() 103 | #address_hash = int(math.floor(math.log(int(key), 2))) 104 | #print("address hash: {}".format(address_hash) 105 | #address = self.routing_table[address_hash] 106 | address = self.routing_table[int(key)] 107 | reactor.connectTCP(address.host, address.port, factory) 108 | d = factory.getRootObject() 109 | d.addCallback(lambda object: object.callRemote("GetValue", key)) 110 | return d 111 | 112 | 113 | def main(): 114 | parser = argparse.ArgumentParser() 115 | parser.add_argument('--port') 116 | parser.add_argument('--store') 117 | parser.add_argument('--retrieve') 118 | parser.add_argument('--connect', default=None) 119 | 120 | args = parser.parse_args() 121 | port = int(args.port) 122 | 123 | service = ChordService() 124 | 125 | if (args.connect): 126 | dst = args.connect.split(':') 127 | service.AddToRoutingTable(IPv4Address('TCP', dst[0], int(dst[1]))) 128 | 129 | if (args.store): 130 | key, value = args.store.split(':') 131 | service.StoreValue(key, value) 132 | 133 | if (args.retrieve): 134 | def EchoValue(value): 135 | print('Retrieved value: {}.'.format(value)) 136 | d = service.GetValue(args.retrieve) 137 | d.addCallback(EchoValue) 138 | 139 | f = pb.PBServerFactory(service) 140 | reactor.listenTCP(port, f) 141 | 142 | reactor.run() 143 | 144 | if __name__ == '__main__': 145 | main() 146 | 147 | -------------------------------------------------------------------------------- /src/index_master_protocol.py: -------------------------------------------------------------------------------- 1 | from twisted.protocols.basic import LineReceiver 2 | from tempfile import NamedTemporaryFile 3 | from helpers import * 4 | import os 5 | import json 6 | import binascii 7 | 8 | class IndexMasterProtocol(LineReceiver): 9 | def log(self, message): 10 | self.factory.l.log('IndexMaster', message) 11 | 12 | def connectionMade(self): 13 | self.setLineMode() 14 | ip = self.transport.getPeer().host 15 | self.log('New Connection from {}'.format(ip)) 16 | 17 | def lineReceived(self, data): 18 | data = json.loads(data) 19 | self.command_name = data['command'] 20 | self.log('Received: {}'.format(self.command_name)) 21 | 22 | if self.command_name == 'store': 23 | self.log("Index Master received: {} ({})".format(data['path'], data['hash'])) 24 | self.filename = data['path'] 25 | self.key = data['key'] 26 | self.hash = binascii.unhexlify(data['hash']) 27 | self.mtime = data['time'] 28 | # hack 29 | if self.filename[0] == '/': 30 | self.destination = os.path.join(self.factory.file_dir, self.filename[1:]) 31 | else: 32 | self.destination = os.path.join(self.factory.file_dir, self.filename) 33 | 34 | dirs = os.path.dirname(self.destination) 35 | if dirs and not os.path.exists(dirs): 36 | os.makedirs(dirs) 37 | 38 | self.tmp_destination_file = NamedTemporaryFile(delete=False) 39 | self.outfile_size = 0 40 | self.setRawMode() 41 | 42 | elif self.command_name == 'tell_metadata': 43 | path = data['path'] 44 | self.hash = binascii.unhexlify(data['hash']) 45 | 46 | if self.factory.file_service.storage.has_key(self.hash): 47 | #print self.factory.file_service.storage 48 | self.sendLine(str(self.factory.file_service.storage[self.hash]['mtime'])) 49 | self.transport.loseConnection() 50 | self.log('Metadata sent and transport connection terminated') 51 | else: 52 | self.log('Cannot send metadata: no such key') 53 | 54 | elif self.command_name == 'upload': 55 | self.log('upload: {}'.format(data['hash'])) 56 | self.hash = binascii.unhexlify(data['hash']) 57 | 58 | if self.factory.file_service.storage.has_key(self.hash): 59 | self.setRawMode() 60 | # hack 61 | if data['path'][0] == '/': 62 | file_path = os.path.join(self.factory.file_dir, data['path'][1:]) 63 | else: 64 | file_path = os.path.join(self.factory.file_dir, data['path']) 65 | self.log('Uploading: {}'.format(file_path)) 66 | d = upload_file_with_encryption(file_path, self.transport) 67 | d.addCallback(self.transferCompleted) 68 | else: 69 | self.log('Cannot upload: no such key') 70 | else: 71 | self.log('Unrecognised command: {}'.format(self.command_name)) 72 | 73 | def transferCompleted(self, last_sent): 74 | self.log('finished uploading') 75 | self.transport.loseConnection() 76 | 77 | def rawDataReceived(self, data): 78 | self.log('raw data received ({})'.format(len(data))) 79 | self.tmp_destination_file.write(data) 80 | self.outfile_size += len(data) 81 | 82 | def add_storage(self, hash, key, filename, mtime): 83 | #self.factory.file_service.storage[self.hash] = {'key':self.key, 'filename':self.filename, 'mtime':int(self.mtime)} 84 | print('stored {}'.format(filename)) 85 | self.factory.file_service.storage[hash] = { 86 | 'key': key, 87 | 'filename': filename, 88 | 'mtime': int(mtime) 89 | } 90 | 91 | def connectionLost(self, reason): 92 | self.log('Index master lost connection.') 93 | if self.command_name == 'store': 94 | self.setLineMode() 95 | if self.outfile_size == 0: 96 | self.log("Error! Connection lost :(\n") 97 | return 98 | else: 99 | self.tmp_destination_file.close() 100 | d = threads.deferToThread( 101 | decrypt_file, 102 | open(self.tmp_destination_file.name, 'rb'), 103 | open(self.destination, 'wb'), 104 | ENCRYPT_KEY) 105 | d.addCallback(lambda _ : self.add_storage(self.hash, self.key, self.filename, self.mtime)) 106 | self.log('Stored: {} ({} bytes)'.format(self.filename, self.outfile_size)) 107 | 108 | elif self.command_name == 'tell_metadata': 109 | self.log('Metadata sent') 110 | 111 | elif self.command_name == 'upload': 112 | self.setLineMode() 113 | self.log('Upload finished') 114 | 115 | -------------------------------------------------------------------------------- /src/old/test.py: -------------------------------------------------------------------------------- 1 | # /* 2 | # * This program is free software; you can redistribute it and/or modify 3 | # * it under the terms of the GNU General Public License version 2 as 4 | # * published by the Free Software Foundation; 5 | # * 6 | # * This program is distributed in the hope that it will be useful, 7 | # * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 | # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 | # * GNU General Public License for more details. 10 | # * 11 | # * You should have received a copy of the GNU General Public License 12 | # * along with this program; if not, write to the Free Software 13 | # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 14 | # */ 15 | 16 | import ns.applications 17 | import ns.core 18 | import ns.internet 19 | import ns.network 20 | import ns.point_to_point 21 | import ns.csma 22 | import visualizer 23 | 24 | class ChordApp(ns.network.Application): 25 | def Setup(self, peer_address): 26 | self.peer_address = peer_address 27 | 28 | def StartApplication(self): 29 | self.tid = ns.core.TypeId.LookupByName('ns3::TcpSocketFactory') 30 | self.address = ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), 9) 31 | self.mySocket = ns.network.Socket.CreateSocket(self.GetNode(), self.tid) 32 | 33 | self.mySocket.Bind(self.address) 34 | self.mySocket.Listen() 35 | self.mySocket.SetRecvCallback(self.handleRead) 36 | 37 | print("chord node started") 38 | if self.peer_address != None: 39 | self.peerSocket = ns.network.Socket.CreateSocket(self.GetNode(), self.tid) 40 | self.peerSocket.Bind() 41 | print "connecting to {}".format(self.peer_address.Get()) 42 | self.peerSocket.Connect(self.peer_address) 43 | print "sending" 44 | self.peerSocket.Send(ns.network.Packet()) 45 | 46 | def StopApplication(self): 47 | print("chord node stopped") 48 | # TODO: close all sockets 49 | # def __init__(self, addr, socket): 50 | # self.addr = addr 51 | # self.socket = socket 52 | 53 | def handleRead(self, socket): 54 | print("received message") 55 | 56 | node_count = 2 57 | 58 | #ns.core.LogComponentEnable("UdpEchoClientApplication", ns.core.LOG_LEVEL_INFO) 59 | #ns.core.LogComponentEnable("UdpEchoServerApplication", ns.core.LOG_LEVEL_INFO) 60 | 61 | nodes = ns.network.NodeContainer() 62 | nodes.Create(node_count) 63 | 64 | internet = ns.internet.InternetStackHelper() 65 | internet.Install(nodes) 66 | 67 | #eth = ns.csma.CsmaHelper() 68 | #eth.SetChannelAttribute("DataRate", ns.core.StringValue("5Mbps")) 69 | #eth.SetChannelAttribute("Delay", ns.core.TimeValue(ns.core.MilliSeconds(2))) 70 | #eth.SetDeviceAttribute("Mtu", ns.core.UintegerValue(1400)) 71 | 72 | #devices = eth.Install(nodes) 73 | 74 | pointToPoint = ns.point_to_point.PointToPointHelper() 75 | pointToPoint.SetDeviceAttribute("DataRate", ns.core.StringValue("5Mbps")) 76 | pointToPoint.SetChannelAttribute("Delay", ns.core.StringValue("2ms")) 77 | 78 | devices = pointToPoint.Install(nodes) 79 | 80 | 81 | ipv4 = ns.internet.Ipv4AddressHelper() 82 | ipv4.SetBase(ns.network.Ipv4Address("10.1.1.0"), ns.network.Ipv4Mask("255.255.255.0")) 83 | ipf = ipv4.Assign (devices) 84 | 85 | addr = ns.network.InetSocketAddress(ipf.GetAddress(0), 6000) 86 | #capp = ChordApp(addr, None) 87 | 88 | last_app = None 89 | apps = ns.network.ApplicationContainer() 90 | for i in xrange(0, node_count): 91 | app = ChordApp() 92 | nodes.Get(i).AddApplication(app) 93 | if i > 0: 94 | prev_address = ipf.GetAddress(0) 95 | else: 96 | prev_address = None 97 | app.Setup(prev_address) 98 | apps.Add(app) 99 | last_app = app 100 | 101 | #capp = ns.applications.UdpEchoServer() 102 | #nodes.Get(0).AddApplication(capp); 103 | #nodes.Get(1).AddApplication(capp2); 104 | 105 | ############ 106 | #pointToPoint = ns.point_to_point.PointToPointHelper() 107 | #pointToPoint.SetDeviceAttribute("DataRate", ns.core.StringValue("5Mbps")) 108 | #pointToPoint.SetChannelAttribute("Delay", ns.core.StringValue("2ms")) 109 | # 110 | #devices = pointToPoint.Install(nodes) 111 | # 112 | #stack = ns.internet.InternetStackHelper() 113 | #stack.Install(nodes) 114 | # 115 | #echoServer = ns.applications.UdpEchoServerHelper(9) 116 | # 117 | #serverApps = echoServer.Install(nodes.Get(1)) 118 | #serverApps.Start(ns.core.Seconds(1.0)) 119 | #serverApps.Stop(ns.core.Seconds(10.0)) 120 | # 121 | #echoClient = ns.applications.UdpEchoClientHelper(interfaces.GetAddress(1), 9) 122 | #echoClient.SetAttribute("MaxPackets", ns.core.UintegerValue(1)) 123 | #echoClient.SetAttribute("Interval", ns.core.TimeValue(ns.core.Seconds (1.0))) 124 | #echoClient.SetAttribute("PacketSize", ns.core.UintegerValue(1024)) 125 | # 126 | #clientApps = echoClient.Install(nodes.Get(0)) 127 | apps.Start(ns.core.Seconds(2.0)) 128 | apps.Stop(ns.core.Seconds(10.0)) 129 | 130 | #visualizer.start() 131 | ns.core.Simulator.Run() 132 | ns.core.Simulator.Destroy() 133 | 134 | -------------------------------------------------------------------------------- /src/file_system.py: -------------------------------------------------------------------------------- 1 | import os 2 | from twisted.internet import reactor 3 | from twisted.internet import threads 4 | from fuse import FUSE, FuseOSError, Operations, LoggingMixIn, fuse_get_context 5 | from errno import ENOENT 6 | from stat import S_IFDIR, S_IFLNK, S_IFREG 7 | 8 | 9 | class FileSystem(LoggingMixIn, Operations): 10 | def __init__(self, logger, key, file_db, file_service, file_dir): 11 | self.file_db = file_db 12 | self.file_dir = file_dir 13 | self.key = key 14 | self.l = logger 15 | self.file_service = file_service 16 | self.updateables = set([]) 17 | 18 | def log(self, message): 19 | self.l.log('FileSystem', message) 20 | 21 | def __call__(self, op, *args): 22 | #self.log('-> {} {}'.format(op, (' '.join(str(arg) for arg in args) if args else ''))) 23 | #self.log('-> {} ...'.format(op)) 24 | return getattr(self, op)(*args) 25 | 26 | def chown(self, path, uid, gid): 27 | threads.blockingCallFromThread(reactor, self.file_db.chown, self.key, path, uid, gid) 28 | 29 | def chmod(self, path, mode): 30 | threads.blockingCallFromThread(reactor, self.file_db.chmod, self.key, path, mode) 31 | return 0 32 | 33 | def getattr(self, path, fh=None): 34 | result = threads.blockingCallFromThread( 35 | reactor, self.file_db.getattr, self.key, path) 36 | if result: 37 | return result 38 | else: 39 | raise FuseOSError(ENOENT) 40 | 41 | getxattr = None 42 | listxattr = None 43 | 44 | def readdir(self, path, fh): 45 | contents = threads.blockingCallFromThread(reactor, self.file_db.list_directory, self.key, path) 46 | ret = ['.', '..'] 47 | if contents: 48 | ret += contents 49 | return ret 50 | 51 | def unlink(self, path): 52 | threads.blockingCallFromThread(reactor, self.file_db.delete_file, self.key, path) 53 | real_path = os.path.join(self.file_dir, path[1:]) 54 | os.unlink(real_path) 55 | 56 | def create(self, path, mode): 57 | threads.blockingCallFromThread(reactor, self.file_db.add_file, self.key, path, mode, 0) 58 | real_path = os.path.join(self.file_dir, path[1:]) 59 | dir_path = os.path.dirname(real_path) 60 | if not os.path.exists(dir_path): 61 | self.log('create dir: {}'.format(dir_path)) 62 | os.makedirs(dir_path) 63 | self.log('create file: {}'.format(real_path)) 64 | return os.open(real_path, os.O_WRONLY | os.O_CREAT, mode) 65 | 66 | def mkdir(self, path, mode): 67 | threads.blockingCallFromThread(reactor, self.file_db.add_directory, self.key, path, mode) 68 | 69 | def access(self, path, mode): 70 | real_path = os.path.join(self.file_dir, path[1:]) 71 | if os.path.exists(real_path) and not os.access(real_path, mode): 72 | raise FuseOSError(EACCES) 73 | 74 | opendir = None 75 | #release = None 76 | releasedir = None 77 | 78 | def file_is_up_to_date(self, file_path_on_disk, path): 79 | self.log('Is file up to date? {}'.format(file_path_on_disk)) 80 | if not os.path.isfile(file_path_on_disk): 81 | return False 82 | if os.stat(file_path_on_disk).st_mtime < threads.blockingCallFromThread(reactor, self.file_db.get_file_mtime, self.key, path): 83 | return False 84 | return True 85 | 86 | def open(self, path, flags): 87 | if threads.blockingCallFromThread(reactor, self.file_db.file_exists, self.key, path): 88 | file_path = os.path.join(self.file_dir, path[1:]) 89 | if not self.file_is_up_to_date(file_path, path): 90 | # we need to find this file on the dht 91 | threads.blockingCallFromThread(reactor, self.file_service.download, path, file_path, self.key, True) 92 | 93 | return os.open(os.path.join(self.file_dir, path[1:]), flags) 94 | 95 | def read(self, path, size, offset, fh): 96 | file_path = os.path.join(self.file_dir, path[1:]) 97 | #if not self.file_is_up_to_date(file_path, path): 98 | # # we need to find this file on the dht 99 | # threads.blockingCallFromThread(reactor, self.file_service.download, path, file_path, self.key, True) 100 | os.lseek(fh, offset, 0) 101 | return os.read(fh, size) 102 | 103 | #def symlink(self, target, source): 104 | # print 'symlink' 105 | def flush(self, path, fh): 106 | os.fsync(fh) 107 | if fh in self.updateables: 108 | full_file_path = os.path.join(self.file_dir, path[1:]) 109 | mtime = threads.blockingCallFromThread(reactor, self.file_db.update_file_mtime, self.key, path) 110 | threads.blockingCallFromThread(reactor, self.file_db.update_size, self.key, path, os.path.getsize(full_file_path)) 111 | reactor.callFromThread(self.file_service.publish_file, self.key, path, full_file_path, mtime) 112 | self.updateables.remove(fh) 113 | return 0 114 | 115 | def release(self, path, fh): 116 | return os.close(fh) 117 | 118 | def fsync(self, path, datasync, fh): 119 | os.fsync(fh) 120 | return 0 121 | 122 | def utimens(self, path, times=None): 123 | atime, mtime = times if times else (now, now) 124 | threads.blockingCallFromThread(reactor, self.file_db.update_time, self.key, path, atime, mtime) 125 | 126 | #def readlink(self, path): 127 | # print 'readlink' 128 | 129 | def rename(self, old, new): 130 | threads.blockingCallFromThread(reactor, self.file_db.rename, self.key, old, new) 131 | 132 | def rmdir(self, path): 133 | threads.blockingCallFromThread(reactor, self.file_db.delete_directory, self.key, path) 134 | 135 | #def unlink(self, path): 136 | # print 'unlink' 137 | 138 | def truncate(self, path, length, fh=None): 139 | with open(os.path.join(self.file_dir, path[1:]), 'r+') as f: 140 | f.truncate(length) 141 | 142 | def statfs(self, path): 143 | return dict(f_bsize=512, f_blocks=4096, f_bavail=2048) 144 | 145 | symlink = None 146 | 147 | def write(self, path, data, offset, fh): 148 | self.log('handle {}'.format(str(fh))) 149 | #f = open(full_file_path, 'w') 150 | self.log('writing {}'.format(path)) 151 | os.lseek(fh, offset, 0) 152 | self.updateables.add(fh) 153 | return os.write(fh, data) 154 | 155 | -------------------------------------------------------------------------------- /src/file_database.py: -------------------------------------------------------------------------------- 1 | from stat import S_IFDIR, S_IFLNK, S_IFREG 2 | import pickle 3 | import time 4 | import os 5 | import sqlite3 6 | 7 | class FileObject(object): 8 | attr_fields = 'st_atime, st_ctime, st_mode, st_mtime, st_nlink, st_size'.split(', ') 9 | def __init__(self): 10 | self.attrs = {} 11 | self.contents = {} 12 | 13 | class FileDatabase(object): 14 | def __init__(self, logger, key, filename, new=False): 15 | self.key = key 16 | self.db_filename = filename 17 | self.new = new 18 | self.l = logger 19 | self.data = {'current_time' : 0} 20 | 21 | def ready(self): 22 | if self.new or not os.path.exists(self.db_filename): 23 | self.update_db_time() 24 | else: 25 | self.load_data(self.db_filename) 26 | self.l.log('DB', 'READY') 27 | 28 | def save_data(self): 29 | self.l.log('DB', 'SAVING DB') 30 | f = open(self.db_filename, 'w') 31 | pickle.dump(self.data, f) 32 | f.close() 33 | 34 | def publish(self): 35 | self.save_data() 36 | m_time = self.get_db_mtime(self.key) 37 | self.file_service.publish_file(self.key, os.path.basename(self.db_filename), self.db_filename, m_time) 38 | 39 | def load_data(self, filename): 40 | self.data = pickle.load(open(filename)) 41 | 42 | def get_file_object(self, public_key, path, create_parents=False): 43 | path = path.split('/') 44 | result = self.data.get(public_key) 45 | 46 | if create_parents and not result: 47 | result = FileObject() 48 | current_time = int(time.time()) 49 | result.attrs['st_mode'] = S_IFDIR | 0755 50 | result.attrs['st_nlink'] = 2 51 | result.attrs['st_atime'] = current_time 52 | result.attrs['st_mtime'] = current_time 53 | result.attrs['st_ctime'] = current_time 54 | result.attrs['st_size'] = 0 55 | self.data[public_key] = result 56 | 57 | for dir_name in path: 58 | if dir_name: 59 | new_result = result.contents.get(dir_name) 60 | if new_result: 61 | result = new_result 62 | else: 63 | return None 64 | return result 65 | 66 | def chmod(self, public_key, path, mode): 67 | fobj = self.get_file_object(public_key, path) 68 | fobj.attrs['st_mode'] &= 0770000 69 | fobj.attrs['st_mode'] |= mode 70 | self.update_db_time() 71 | self.publish() 72 | 73 | def get_file_mtime(self, public_key, path): 74 | #self.l.log('DB', 'Retrieving mtime for: {}'.format(filename)) 75 | fobj = self.get_file_object(public_key, path) 76 | if not fobj: 77 | return 0 78 | else: 79 | return fobj.attrs['st_mtime'] 80 | 81 | def get_db_mtime(self, public_key): 82 | return self.data['current_time'] 83 | 84 | def update_time(self, public_key, path, atime, mtime): 85 | fobj = self.get_file_object(public_key, path) 86 | fobj.attrs['st_atime'] = atime 87 | fobj.attrs['st_mtime'] = mtime 88 | self.update_db_time() 89 | self.publish() 90 | 91 | def update_file_mtime(self, public_key, path): 92 | fobj = self.get_file_object(public_key, path) 93 | current_time = int(time.time()) 94 | fobj.attrs['st_mtime'] = current_time 95 | self.update_db_time() 96 | self.publish() 97 | return current_time 98 | 99 | def update_size(self, public_key, path, size): 100 | fobj = self.get_file_object(public_key, path) 101 | fobj.attrs['st_size'] = size 102 | self.update_db_time() 103 | self.publish() 104 | 105 | def chown(self, public_key, path, uid, gid): 106 | fobj = self.get_file_object(public_key, path) 107 | fobj.attrs['st_uid'] = uid 108 | fobj.attrs['st_gid'] = gid 109 | self.update_db_time() 110 | self.publish() 111 | 112 | def getattr(self, public_key, path): 113 | fobj = self.get_file_object(public_key, path) 114 | return fobj.attrs if fobj else None 115 | 116 | def rename(self, public_key, old_path, new_path): 117 | old_dirname, old_filename = os.path.split(old_path) 118 | new_dirname, new_filename = os.path.split(new_path) 119 | old_location = self.get_file_object(public_key, old_dirname) 120 | destination = self.get_file_object(public_key, new_dirname) 121 | destination.contents[new_filename] = old_location.contents[old_filename] 122 | del old_location.contents[old_filename] 123 | self.update_db_time() 124 | self.publish() 125 | 126 | def add_file(self, public_key, path, mode, size): 127 | self.l.log("Adding file: {}".format(path)) 128 | new_file = FileObject() 129 | current_time = int(time.time()) 130 | new_file.attrs['st_mode'] = S_IFREG | mode 131 | new_file.attrs['st_atime'] = current_time 132 | new_file.attrs['st_mtime'] = current_time 133 | new_file.attrs['st_ctime'] = current_time 134 | new_file.attrs['st_size'] = size 135 | new_file.attrs['st_nlink'] = 1 136 | dirname, filename = os.path.split(path) 137 | fobj = self.get_file_object(public_key, dirname, True) 138 | fobj.contents[filename] = new_file 139 | self.update_db_time() 140 | self.publish() 141 | 142 | def delete_file(self, public_key, path): 143 | dirname, filename = os.path.split(path) 144 | fobj = self.get_file_object(public_key, dirname) 145 | del fobj.contents[filename] 146 | self.update_db_time() 147 | self.publish() 148 | 149 | def delete_directory(self, public_key, path): 150 | delete_file(public_key, path) 151 | 152 | def update_db_time(self): 153 | self.data['current_time'] = int(time.time()) 154 | 155 | def add_directory(self, public_key, path, mode): 156 | # hack to create top folder, this shouldn't be done at all ever! 157 | self.l.log('adding directory {}'.format(path)) 158 | if path == '/': 159 | self.get_file_object(public_key, '/', True) 160 | self.update_db_time() 161 | self.publish() 162 | return 163 | current_time = int(time.time()) 164 | dirname, filename = os.path.split(path) 165 | new_dir = FileObject() 166 | new_dir.attrs['st_mode'] = S_IFDIR | mode 167 | new_dir.attrs['st_nlink'] = 2 168 | new_dir.attrs['st_size'] = 0 169 | new_dir.attrs['st_atime'] = current_time 170 | new_dir.attrs['st_mtime'] = current_time 171 | new_dir.attrs['st_ctime'] = current_time 172 | fobj = self.get_file_object(public_key, dirname, True) 173 | fobj.contents[filename] = new_dir 174 | 175 | self.update_db_time() 176 | self.publish() 177 | 178 | def list_directory(self, public_key, path): 179 | fobj = self.get_file_object(public_key, path) 180 | return fobj.contents.keys() 181 | 182 | def file_exists(self, public_key, path): 183 | dirname, filename = os.path.split(path) 184 | fobj = self.get_file_object(public_key, dirname) 185 | return fobj.contents.has_key(filename) 186 | 187 | -------------------------------------------------------------------------------- /src/file_sharing_service.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | from twisted.internet import defer 4 | from twisted.internet import reactor 5 | from twisted.internet.protocol import Protocol, ServerFactory, ClientCreator 6 | from index_master_protocol import * 7 | from upload_protocol import * 8 | from metadata_request_protocol import * 9 | from upload_request_protocol import * 10 | 11 | class FileSharingService(): 12 | def __init__(self, logger, node, listen_port, key, file_db, file_dir): 13 | self.node = node 14 | self.listen_port = listen_port 15 | self.file_dir = file_dir 16 | self.l = logger 17 | 18 | self.storage = {} 19 | self.key = key 20 | 21 | self.file_db = file_db 22 | self.file_db.file_service = self 23 | 24 | self._setup_tcp() 25 | 26 | if self.file_db.new: 27 | self.file_db.ready() 28 | self.file_db.add_directory(self.key, '/', 0755) 29 | self.file_db.publish() 30 | reactor.callLater(17, self.query_and_update_db_by_metadata) 31 | else: 32 | # download the database 33 | db_path = self.file_db.db_filename 34 | def prepare_database(_): 35 | self.file_db.ready() 36 | df = self.download(os.path.basename(self.file_db.db_filename), db_path, self.key) 37 | df.addCallback(prepare_database) 38 | reactor.callLater(30, self.query_and_update_db_by_metadata) 39 | 40 | def log(self, message): 41 | self.l.log('FileService', message) 42 | 43 | def query_and_update_db_by_metadata(self): 44 | """Continuously queries the network for a new version of the user's file database.""" 45 | df = self.get_metadata(self.file_db.db_filename, self.key) 46 | def handle_metadata(metadata): 47 | mtime = self.file_db.get_db_mtime(self.key) 48 | self.log('my: {}, their: {}'.format(mtime, metadata)) 49 | if mtime < metadata: 50 | self.log('will redownload: {} ({} < {})'.format(self.file_db.db_filename, mtime, metadata)) 51 | db_path = self.file_db.db_filename 52 | self.download(os.path.basename(self.file_db.db_filename), db_path, self.key) 53 | self.file_db.load_data(self.file_db.db_filename) 54 | else: 55 | self.log('{}: {} >= {}'.format(self.file_db.db_filename, mtime, metadata)) 56 | df.addCallback(handle_metadata) 57 | if df.called: 58 | self.log("already called.") 59 | reactor.callLater(5, self.query_and_update_db_by_metadata) 60 | 61 | def _setup_tcp(self): 62 | self.factory = ServerFactory() 63 | self.factory.protocol = IndexMasterProtocol 64 | self.factory.file_service = self 65 | self.factory.file_dir = self.file_dir 66 | self.factory.file_db = self.file_db 67 | self.factory.key = self.key 68 | self.factory.l = self.l 69 | reactor.listenTCP(self.listen_port, self.factory) 70 | 71 | def search(self, keyword): 72 | return self.node.searchForKeywords(keyword) 73 | 74 | def publish_file_with_upload(self, path, local_file_path, m_time): 75 | key = sha_hash(path) 76 | self.log('publishing file {} ({})'.format(path, local_file_path)) 77 | 78 | def upload_file(protocol): 79 | if protocol != None: 80 | self.log("upload file {} {}".format(path, local_file_path)) 81 | protocol.upload_file(path, local_file_path, self.key, key, m_time) 82 | 83 | def upload_file_to_peers(contacts): 84 | outerDf = defer.Deferred() 85 | if not contacts: 86 | self.log("Could not reach any peers. ({})".format(str(contacts))) 87 | else: 88 | for contact in contacts: 89 | c = ClientCreator(reactor, UploadProtocol, self.l) 90 | df = c.connectTCP(contact.address, contact.port) 91 | df.addCallback(upload_file) 92 | self.log("Will upload '{}' to: {}".format(local_file_path, contact)) 93 | outerDf.chainDeferred(df) 94 | return outerDf 95 | 96 | 97 | df = self.node.iterativeFindNode(key) 98 | df.addCallback(upload_file_to_peers) 99 | return df 100 | 101 | def publish_directory(self, key, path): 102 | def cut_path_off(starting_path, current_path): 103 | for i, j in enumerate(starting_path): 104 | if current_path[i] != j: 105 | return current_path[i:] 106 | return current_path[(i+1):] 107 | 108 | files = [] 109 | paths = set() 110 | 111 | outerDf = defer.Deferred() 112 | 113 | self.factory.sharePath = path 114 | self.factory.l = self.l 115 | 116 | for entry in os.walk(path): 117 | for file in entry[2]: 118 | if file not in files and file not in ('.directory'): 119 | fs_path = cut_path_off(path, entry[0]) 120 | files.append((file, fs_path)) 121 | paths.add(fs_path) 122 | files.sort() 123 | 124 | self.log('files: {}'.format(len(files))) 125 | 126 | for path in sorted(paths): 127 | if path != '': 128 | self.file_db.add_directory(key, path, 0775) 129 | 130 | for filename, path in files: 131 | # this is the path to file on the hard drive 132 | full_file_path = os.path.join(self.file_dir, path[1:], filename) 133 | orig_path = os.path.join(self.factory.sharePath, path[1:], filename) 134 | 135 | directory_name = os.path.dirname(full_file_path) 136 | if not os.path.exists(directory_name): 137 | os.makedirs(directory_name) 138 | 139 | shutil.copyfile(orig_path, full_file_path) 140 | 141 | size = os.path.getsize(full_file_path) 142 | 143 | # hack, need to look at the code beforehand to fix this 144 | if path == '': 145 | path = '/' 146 | 147 | file_path = os.path.join(path, filename) # 'virtual' path inside database 148 | self.file_db.add_file(key, file_path, 0777, size) 149 | m_time = self.file_db.get_file_mtime(self.key, file_path) 150 | self.publish_file(key, file_path, full_file_path, m_time) 151 | 152 | def publish_file(self, key, path, full_file_path, m_time, add_to_database=False): 153 | self.log('--> {}'.format(path)) 154 | hash = sha_hash(path) 155 | self.storage[hash] = {'key':key, 'filename':path, 'mtime':int(m_time)} 156 | df = self.publish_file_with_upload(path, full_file_path, m_time) 157 | return df 158 | 159 | def debug_contacts(self, contacts): 160 | return [str(contact.address) for contact in contacts] 161 | 162 | def get_metadata(self, path, key): 163 | filename = os.path.basename(path) 164 | hash = sha_hash(filename) 165 | self.log('Getting metadata for: {}'.format(filename)) 166 | 167 | def get_target_node(result): 168 | #print self.debug_contacts(result) 169 | return result.pop() 170 | 171 | def get_file(protocol): 172 | if protocol != None: 173 | return protocol.request_metadata(filename, key, hash) 174 | 175 | def connect_to_peer(contact): 176 | if contact == None: 177 | self.log("The host that published this file is no longer on-line.\n") 178 | else: 179 | c = ClientCreator(reactor, MetadataRequestProtocol, self.l) 180 | df = c.connectTCP(contact.address, contact.port) 181 | return df 182 | 183 | df = self.node.iterativeFindValue(hash) 184 | df.addCallback(get_target_node) 185 | df.addCallback(connect_to_peer) 186 | df.addCallback(get_file) 187 | return df 188 | 189 | def download(self, path, destination, key, should_update_time=False): 190 | hash = sha_hash(path) 191 | self.log('Downloading: {}({})'.format(path, should_update_time)) 192 | 193 | def get_target_node(result): 194 | #print self.debug_contacts(result) 195 | self.log("Target node: {}".format(str(result))) 196 | return result.pop() 197 | 198 | def get_file(protocol): 199 | self.log('Requesting file.') 200 | if protocol != None: 201 | return protocol.request_file(path, destination, key, hash) 202 | 203 | def connect_to_peer(contact): 204 | if contact == None: 205 | self.log("File could not be retrieved.\nThe host that published this file is no longer on-line.\n") 206 | else: 207 | c = ClientCreator(reactor, UploadRequestProtocol, self.l) 208 | df = c.connectTCP(contact.address, contact.port) 209 | return df 210 | 211 | def update_time(full_file_path): 212 | self.log('will update time for: {}'.format(full_file_path)) 213 | update_time = self.file_db.get_file_mtime(key, path) 214 | self.log('update_time: {}'.format(update_time)) 215 | if update_time == 0: 216 | return 217 | os.utime(full_file_path, (update_time, update_time)) 218 | self.log('changed {} mtime to {}'.format(full_file_path, update_time)) 219 | 220 | def log_message(_): 221 | self.log('Downloaded {}.'.format(path)) 222 | 223 | df = self.node.iterativeFindValue(hash) 224 | df.addCallback(get_target_node) 225 | df.addCallback(connect_to_peer) 226 | df.addCallback(get_file) 227 | if should_update_time: 228 | df.addCallback(update_time) 229 | df.addCallback(log_message) 230 | return df 231 | 232 | -------------------------------------------------------------------------------- /src/old/tcp_sock.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "ns3/core-module.h" 6 | #include "ns3/network-module.h" 7 | #include "ns3/internet-module.h" 8 | #include "ns3/point-to-point-module.h" 9 | #include "ns3/applications-module.h" 10 | #include "ns3/csma-star-helper.h" 11 | 12 | using namespace ns3; 13 | 14 | uint16_t FS_PORT = 8080; 15 | 16 | // you stole this from the web, replace with something that makes more sense 17 | uint32_t hash(uint32_t key) 18 | { 19 | key += ~(key << 15); 20 | key ^= (key >> 10); 21 | key += (key << 3); 22 | key ^= (key >> 6); 23 | key += ~(key << 11); 24 | key ^= (key >> 16); 25 | return key; 26 | } 27 | 28 | enum Command 29 | { 30 | ASK_FOR_SUCCESSOR = 0, 31 | RECEIVE_SUCCESSOR = 1, 32 | I_AM_SUCCESSOR = 2, 33 | ASK_FOR_MY_HASH = 3, 34 | RECEIVE_MY_HASH = 4, 35 | LOOKUP_VALUE = 5, 36 | RECEIVE_VALUE = 6, 37 | STORE_VALUE = 7, 38 | RELOOKUP_VALUE = 8 39 | }; 40 | 41 | int h1 = 0; 42 | class MyApp : public Application 43 | { 44 | public: 45 | MyApp (Ptr node) 46 | : inSocket(0) 47 | , predecessor(0) 48 | , successor(0) 49 | , myNode(node) 50 | , isOwnSuccessor(false) 51 | , hasHash(false) 52 | , myHash(h1) 53 | { 54 | h1++; 55 | } 56 | 57 | void StartApplication() 58 | { 59 | inSocket = Socket::CreateSocket(myNode, TcpSocketFactory::GetTypeId ()); 60 | inSocket->Bind(InetSocketAddress(Ipv4Address::GetAny(), 8080)); 61 | 62 | inSocket->Listen(); 63 | inSocket->SetAcceptCallback(MakeNullCallback, const Address &> (), MakeCallback(&MyApp::HandleAccept, this)); 64 | std::cout << myHash << " <- started inSocket\n"; 65 | } 66 | 67 | void HandleAccept(Ptr s, const Address& from) 68 | { 69 | std::cout << myHash << " Someone connected from "; 70 | InetSocketAddress::ConvertFrom(from).GetIpv4().Print(std::cout); 71 | std::cout << ' ' << hash(InetSocketAddress::ConvertFrom(from).GetIpv4().Get()); 72 | std::cout << '\n'; 73 | 74 | uint32_t ip = InetSocketAddress::ConvertFrom(from).GetIpv4().Get(); 75 | socketAddress[s] = ip; 76 | addressSocket[ip] = s; 77 | ipToAddress[ip] = from; 78 | s->SetRecvCallback(MakeCallback(&MyApp::HandleReceive, this)); 79 | } 80 | 81 | void CreateRing() 82 | { 83 | isOwnSuccessor = true; 84 | } 85 | 86 | void Join(Address address) 87 | { 88 | GetHash(address); 89 | GetSuccessor(address); 90 | } 91 | 92 | void LookupKey(uint32_t key) 93 | { 94 | SendMessageAskForValue(GetSocket(successor), key); 95 | } 96 | 97 | void HandleReceive(Ptr s) 98 | { 99 | Ptr packet = s->Recv(); 100 | if (packet == 0) 101 | { 102 | std::cout << "0 packet received\n"; 103 | } 104 | else 105 | { 106 | uint8_t buffer[17]; 107 | packet->CopyData(buffer, sizeof(buffer)); 108 | Command command = (Command)buffer[0]; 109 | switch (command) 110 | { 111 | case I_AM_SUCCESSOR: 112 | successorHash = byteArrayToInt(&buffer[1]); 113 | std::cout << myHash << " successor " << successorHash << '\n'; 114 | successor = socketAddress[s]; 115 | break; 116 | case RECEIVE_VALUE: 117 | { 118 | uint32_t value = byteArrayToInt(&buffer[1]); 119 | std::cout << myHash << " received value: " << value << '\n'; 120 | } 121 | break; 122 | case RELOOKUP_VALUE: 123 | { 124 | uint32_t ip = byteArrayToInt(&buffer[1]); 125 | uint32_t key = byteArrayToInt(&buffer[5]); 126 | if (myHash < key && key <= successorHash) 127 | { 128 | SendMessageReceiveValue(GetSocket(ip), key); 129 | } 130 | else 131 | { 132 | SendMessageReaskForValue(GetSocket(successor), ip, key); 133 | } 134 | } 135 | break; 136 | case LOOKUP_VALUE: 137 | { 138 | uint32_t key = byteArrayToInt(&buffer[1]); 139 | if (myHash < key && key <= successorHash) 140 | { 141 | SendMessageReceiveValue(s, key); 142 | } 143 | else 144 | { 145 | SendMessageReaskForValue(GetSocket(successor), socketAddress[s], key); 146 | } 147 | } 148 | break; 149 | case STORE_VALUE: 150 | { 151 | uint32_t key = byteArrayToInt(&buffer[1]); 152 | uint32_t value = byteArrayToInt(&buffer[5]); 153 | if (myHash < key && key <= successorHash) 154 | { 155 | lookupData[key] = value; 156 | std::cout << "stored value " << value << " (key " << key << ") at " << myHash << '\n'; 157 | } 158 | else 159 | { 160 | SendMessageStoreValue(GetSocket(successor), key, value); 161 | } 162 | } 163 | break; 164 | case ASK_FOR_SUCCESSOR: 165 | { 166 | uint32_t askedHash = byteArrayToInt(&buffer[1]); 167 | if (isOwnSuccessor || (myHash < askedHash && askedHash <= successorHash)) 168 | { 169 | // this node has the same successor as the node which asked 170 | SendMessageReceiveSuccessor(s, successorHash, successor); 171 | } 172 | else if (askedHash < myHash) 173 | { 174 | SendMessageReceiveMeAsSuccessor(s); 175 | } 176 | else 177 | { 178 | // continue search via the ring 179 | onGoingSearches[askedHash] = socketAddress[s]; 180 | GetSuccessorForHash(ipToAddress[successor], askedHash); 181 | } 182 | } 183 | break; 184 | case RECEIVE_SUCCESSOR: 185 | { 186 | std::cout << myHash << " RECEIVED SUCCESSOR!!\n"; 187 | // 4 bytes - id 188 | // 4 bytes - ip of successor of id 189 | //if (questionHash != myHash) 190 | //{ 191 | // uint32_t sendBackIp = onGoingSearches[questionHash]; 192 | // packet = Create< Packet >(buffer, sizeof(buffer)); 193 | // addressSocket[sendBackIp]->Send(packet); 194 | //} 195 | //else 196 | //{ 197 | successorHash = byteArrayToInt(&buffer[1]); 198 | successor = byteArrayToInt(&buffer[5]); 199 | std::cout << myHash << " successor is " << successorHash << '\n'; 200 | //} 201 | //uint32_t successorIp = byteArrayToInt(&buffer[5]); 202 | //if (id != myHash) 203 | //{ 204 | 205 | //} 206 | } 207 | break; 208 | case ASK_FOR_MY_HASH: 209 | SendMessageReceiveHash(s); 210 | if (!myHash) // Ask back for my own hash 211 | GetHash(ipToAddress[socketAddress[s]]); 212 | break; 213 | case RECEIVE_MY_HASH: 214 | { 215 | uint32_t newHash; 216 | newHash = byteArrayToInt(&buffer[1]); 217 | std::cout << myHash << " new hash is: " << newHash << '\n'; 218 | hasHash = true; 219 | myHash = newHash; 220 | } 221 | break; 222 | default: 223 | break; 224 | } 225 | } 226 | } 227 | 228 | ~MyApp() 229 | { 230 | if (inSocket) 231 | inSocket->Close(); 232 | for (std::map< Ptr, uint32_t >::iterator i = socketAddress.begin(); i != socketAddress.end(); ++i) 233 | { 234 | i->first->Close(); 235 | } 236 | } 237 | 238 | Ptr GetSocket(uint32_t ip) 239 | { 240 | std::map< uint32_t, Ptr >::iterator result = addressSocket.find(ip); 241 | if (result != addressSocket.end()) 242 | { 243 | //std::cout << "found socket\n"; 244 | return result->second; 245 | } 246 | else 247 | { 248 | Ptr outSocket = Socket::CreateSocket(myNode, TcpSocketFactory::GetTypeId()); 249 | outSocket->Bind(); 250 | 251 | Ipv4Address addressIpv4(ip); 252 | InetSocketAddress address(addressIpv4, FS_PORT); 253 | outSocket->Connect(address); 254 | 255 | outSocket->SetRecvCallback(MakeCallback(&MyApp::HandleReceive, this)); 256 | socketAddress[outSocket] = ip; 257 | addressSocket[ip] = outSocket; 258 | ipToAddress[ip] = address; 259 | return outSocket; 260 | } 261 | } 262 | 263 | Ptr GetSocket(Address address) 264 | { 265 | uint32_t ip = InetSocketAddress::ConvertFrom(address).GetIpv4().Get(); 266 | std::map< uint32_t, Ptr >::iterator result = addressSocket.find(ip); 267 | if (result != addressSocket.end()) 268 | { 269 | //std::cout << "found socket\n"; 270 | return result->second; 271 | } 272 | else 273 | { 274 | Ptr outSocket = Socket::CreateSocket(myNode, TcpSocketFactory::GetTypeId()); 275 | outSocket->Bind(); 276 | outSocket->Connect(address); 277 | outSocket->SetRecvCallback(MakeCallback(&MyApp::HandleReceive, this)); 278 | socketAddress[outSocket] = ip; 279 | addressSocket[ip] = outSocket; 280 | ipToAddress[ip] = address; 281 | return outSocket; 282 | } 283 | } 284 | 285 | void GetHash(Address address) 286 | { 287 | Ptr outSocket = GetSocket(address); 288 | SendMessageAskForMyHash(outSocket); 289 | } 290 | 291 | void GetSuccessor(Address address) 292 | { 293 | GetSuccessorForHash(address, myHash); 294 | } 295 | 296 | void GetSuccessorForHash(Address address, uint32_t hash) 297 | { 298 | Ptr outSocket = GetSocket(address); 299 | SendMessageAskForSuccessor(outSocket, hash); 300 | } 301 | 302 | //void LookupValue(Address address, uint32_t key) 303 | //{ 304 | // Ptr outSocket = GetSocket(address); 305 | // SendMessageAskForSuccessor(outSocket, hash); 306 | //} 307 | 308 | void SendMessageReaskForValue(Ptr socket, uint32_t ip, uint32_t hash) 309 | { 310 | uint8_t buffer[9]; 311 | buffer[0] = (uint8_t)RELOOKUP_VALUE; 312 | intToByteArray(ip, &buffer[1]); 313 | intToByteArray(hash, &buffer[5]); 314 | Ptr packet; 315 | packet = Create(buffer, sizeof(buffer)); 316 | socket->Send(packet); 317 | std::cout << myHash << " REASKED FOR VALUE\n"; 318 | } 319 | 320 | void SendMessageAskForValue(Ptr socket, uint32_t hash) 321 | { 322 | uint8_t buffer[9]; 323 | buffer[0] = (uint8_t)LOOKUP_VALUE; 324 | intToByteArray(hash, &buffer[1]); 325 | Ptr packet; 326 | packet = Create(buffer, sizeof(buffer)); 327 | socket->Send(packet); 328 | std::cout << myHash << " ASKED FOR VALUE\n"; 329 | } 330 | 331 | void SendMessageAskForSuccessor(Ptr socket, uint32_t hash) 332 | { 333 | uint8_t buffer[5]; 334 | buffer[0] = (uint8_t)ASK_FOR_SUCCESSOR; 335 | intToByteArray(myHash, &buffer[1]); 336 | Ptr packet; 337 | packet = Create(buffer, sizeof(buffer)); 338 | socket->Send(packet); 339 | std::cout << myHash << " ASKED FOR SUCCESSOR\n"; 340 | } 341 | 342 | void StoreValue(uint32_t key, uint32_t value) 343 | { 344 | SendMessageStoreValue(GetSocket(successor), key, value); 345 | } 346 | 347 | void SendMessageStoreValue(Ptr socket, uint32_t value) 348 | { 349 | SendMessageStoreValue(socket, hash(value), value); 350 | } 351 | 352 | void SendMessageStoreValue(Ptr socket, uint32_t key, uint32_t value) 353 | { 354 | uint8_t buffer[9]; 355 | buffer[0] = (uint8_t)STORE_VALUE; 356 | intToByteArray(key, &buffer[1]); 357 | intToByteArray(value, &buffer[5]); 358 | Ptr packet; 359 | packet = Create(buffer, sizeof(buffer)); 360 | socket->Send(packet); 361 | std::cout << myHash << " LOOKING WHERE TO STORE VALUE " << value << " with key(" << key << ")\n"; 362 | } 363 | 364 | void SendMessageReceiveValue(Ptr socket, uint32_t key) 365 | { 366 | uint8_t buffer[5]; 367 | buffer[0] = (uint8_t)RECEIVE_VALUE; 368 | intToByteArray(lookupData[key], &buffer[1]); 369 | Ptr packet = Create (buffer, sizeof(buffer)); 370 | socket->Send(packet); 371 | std::cout << myHash << " RECEIVE VALUE -> SENT\n"; 372 | } 373 | 374 | void SendMessageAskForMyHash(Ptr socket) 375 | { 376 | uint8_t buffer[1]; 377 | buffer[0] = (uint8_t)ASK_FOR_MY_HASH; 378 | Ptr packet; 379 | packet = Create< Packet >(buffer, sizeof(buffer)); 380 | socket->Send(packet); 381 | std::cout << myHash << " ASKED FOR HASH\n"; 382 | } 383 | 384 | void SendMessageReceiveHash(Ptr socket) 385 | { 386 | uint8_t buffer[5]; 387 | buffer[0] = (uint8_t)RECEIVE_MY_HASH; 388 | uint32_t h = hash(socketAddress[socket]); 389 | intToByteArray(h, &buffer[1]); 390 | Ptr packet = Create (buffer, sizeof(buffer)); 391 | socket->Send(packet); 392 | std::cout << myHash << " RECEIVE HASH -> SENT\n"; 393 | } 394 | 395 | void SendMessageReceiveMeAsSuccessor(Ptr socket) 396 | { 397 | uint8_t buffer[5]; 398 | buffer[0] = (uint8_t)I_AM_SUCCESSOR; 399 | intToByteArray(myHash, &buffer[1]); 400 | Ptr packet = Create (buffer, sizeof(buffer)); 401 | socket->Send(packet); 402 | std::cout << myHash << " RECEIVE SUCCESSOR -> SENT (I AM SUCCESSOR)\n"; 403 | } 404 | 405 | void SendMessageReceiveSuccessor(Ptr socket, uint32_t successorHash, uint32_t successor) 406 | { 407 | if (isOwnSuccessor) 408 | { 409 | uint8_t buffer[5]; 410 | buffer[0] = (uint8_t)I_AM_SUCCESSOR; 411 | intToByteArray(myHash, &buffer[1]); 412 | Ptr packet = Create (buffer, sizeof(buffer)); 413 | socket->Send(packet); 414 | std::cout << myHash << " RECEIVE SUCCESSOR -> SENT (I AM SUCCESSOR)\n"; 415 | } 416 | else 417 | { 418 | uint8_t buffer[13]; 419 | buffer[0] = (uint8_t)RECEIVE_SUCCESSOR; 420 | intToByteArray(successorHash, &buffer[1]); 421 | intToByteArray(successor, &buffer[5]); 422 | Ptr packet = Create (buffer, sizeof(buffer)); 423 | socket->Send(packet); 424 | std::cout << myHash << " RECEIVE SUCCESSOR -> SENT\n"; 425 | } 426 | } 427 | 428 | void intToByteArray(uint32_t n, uint8_t* arr) 429 | { 430 | arr[0] = n >> 24 & 0xFF; 431 | arr[1] = n >> 16 & 0xFF; 432 | arr[2] = n >> 8 & 0xFF; 433 | arr[3] = n & 0xFF; 434 | } 435 | 436 | uint32_t byteArrayToInt(uint8_t* arr) 437 | { 438 | uint32_t ret = 0; 439 | ret |= (((uint32_t)arr[0]) << 24); 440 | ret |= (((uint32_t)arr[1]) << 16); 441 | ret |= (((uint32_t)arr[2]) << 8); 442 | ret |= (uint32_t)arr[3]; 443 | return ret; 444 | } 445 | 446 | Ptr inSocket; 447 | uint32_t predecessor; 448 | uint32_t predecessorHash; 449 | uint32_t successor; 450 | uint32_t successorHash; 451 | Ptr myNode; 452 | 453 | std::map< Ptr, uint32_t > socketAddress; 454 | std::map< uint32_t, Ptr > addressSocket; 455 | 456 | std::map< uint32_t, Address > ipToAddress; 457 | 458 | std::map items; 459 | bool isOwnSuccessor; 460 | bool hasHash; 461 | uint32_t myHash; 462 | std::map< uint32_t, uint32_t > onGoingSearches; // hash -> address (see Joining Logic) 463 | std::map< uint32_t, uint32_t > lookupData; // key -> value data store 464 | }; 465 | 466 | 467 | int 468 | main (int argc, char *argv[]) 469 | { 470 | CommandLine cmd; 471 | cmd.Parse (argc, argv); 472 | NodeContainer nodes; 473 | nodes.Create (4); 474 | 475 | CsmaHelper eth; 476 | eth.SetChannelAttribute("DataRate", DataRateValue(DataRate(5000000))); 477 | 478 | eth.SetChannelAttribute("Delay", TimeValue(MilliSeconds(2))); 479 | eth.SetDeviceAttribute("Mtu", UintegerValue(1400)); 480 | 481 | NetDeviceContainer devices = eth.Install(nodes); 482 | 483 | //PointToPointHelper pointToPoint; 484 | //NetDeviceContainer devices; 485 | //devices = pointToPoint.Install (nodes); 486 | 487 | InternetStackHelper stack; 488 | stack.Install (nodes); 489 | Ipv4AddressHelper address; 490 | address.SetBase ("10.1.1.0", "255.255.255.0"); 491 | Ipv4InterfaceContainer interfaces = address.Assign (devices); 492 | 493 | Address creatorAddress (InetSocketAddress (interfaces.GetAddress (2), FS_PORT)); 494 | Address nodeAddressA (InetSocketAddress (interfaces.GetAddress (1), FS_PORT)); 495 | Address nodeAddressB (InetSocketAddress (interfaces.GetAddress (0), FS_PORT)); 496 | Address nodeAddressC (InetSocketAddress (interfaces.GetAddress (3), FS_PORT)); 497 | 498 | Ptr creator = CreateObject (nodes.Get(2)); 499 | nodes.Get(2)->AddApplication(creator); 500 | 501 | Ptr nodeA = CreateObject (nodes.Get (1)); 502 | nodes.Get (1)->AddApplication(nodeA); 503 | Ptr nodeB = CreateObject (nodes.Get (0)); 504 | nodes.Get (0)->AddApplication(nodeB); 505 | Ptr nodeC = CreateObject (nodes.Get (3)); 506 | nodes.Get (3)->AddApplication(nodeC); 507 | 508 | Simulator::Schedule( Seconds(3), &MyApp::CreateRing, creator); 509 | 510 | // create a ring of 4 nodes 511 | Simulator::Schedule( Seconds(9), &MyApp::GetHash, nodeB, creatorAddress); 512 | Simulator::Schedule( Seconds(12), &MyApp::GetSuccessor, nodeB, creatorAddress); 513 | Simulator::Schedule( Seconds(15), &MyApp::GetHash, nodeA, nodeAddressB ); 514 | Simulator::Schedule( Seconds(18), &MyApp::GetSuccessor, nodeA, nodeAddressB ); 515 | Simulator::Schedule( Seconds(15), &MyApp::GetHash, nodeC, nodeAddressA ); 516 | Simulator::Schedule( Seconds(18), &MyApp::GetSuccessor, nodeC, nodeAddressA ); 517 | 518 | // key lookup test 519 | Simulator::Schedule( Seconds(32), &MyApp::StoreValue, nodeA, 3278655985, 12 ); 520 | Simulator::Schedule( Seconds(48), &MyApp::LookupKey, nodeC, 3278655985 ); 521 | 522 | Simulator::Stop (); 523 | Simulator::Run (); 524 | Simulator::Destroy (); 525 | 526 | return 0; 527 | } 528 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | --------------------------------------------------------------------------------