├── SuperNode ├── .gitignore ├── requirements.txt ├── build.sh ├── db.py ├── proto │ ├── fileService.proto │ ├── fileService_pb2_grpc.py │ └── fileService_pb2.py ├── ClusterStatus.py └── superNode.py ├── iptable.txt ├── files ├── flix.gif └── wow2.jpg ├── Project Report.pdf ├── images ├── TechStack.png └── ArchitectureDiagram.png ├── requirements.txt ├── __pycache__ ├── db.cpython-36.pyc ├── db.cpython-37.pyc ├── database.cpython-36.pyc ├── database.cpython-37.pyc ├── FluffyServer.cpython-36.pyc ├── fluffy_pb2.cpython-36.pyc ├── fluffy_pb2.cpython-37.pyc ├── heartbeat_pb2.cpython-36.pyc ├── HeartbeatService.cpython-36.pyc ├── fluffy_pb2_grpc.cpython-36.pyc ├── fluffy_pb2_grpc.cpython-37.pyc └── heartbeat_pb2_grpc.cpython-36.pyc ├── utils ├── __pycache__ │ ├── db.cpython-36.pyc │ ├── db.cpython-37.pyc │ ├── Raft.cpython-36.pyc │ ├── RaftHelper.cpython-36.pyc │ ├── DeleteHelper.cpython-36.pyc │ ├── DownloadHelper.cpython-36.pyc │ ├── ShardingHandler.cpython-36.pyc │ ├── ShardingHandler.cpython-37.pyc │ ├── ActiveNodesChecker.cpython-36.pyc │ └── ActiveNodesChecker.cpython-37.pyc ├── Raft.py ├── db.py ├── ShardingHandler.py ├── DeleteHelper.py ├── DownloadHelper.py ├── ActiveNodesChecker.py └── RaftHelper.py ├── .gitignore ├── service ├── __pycache__ │ ├── FileServer.cpython-36.pyc │ ├── FileServer.cpython-37.pyc │ ├── HeartbeatService.cpython-36.pyc │ └── HeartbeatService.cpython-37.pyc ├── HeartbeatService.py └── FileServer.py ├── generated ├── __pycache__ │ ├── fluffy_pb2.cpython-36.pyc │ ├── heartbeat_pb2.cpython-36.pyc │ ├── heartbeat_pb2.cpython-37.pyc │ ├── fileService_pb2.cpython-37.pyc │ ├── fluffy_pb2_grpc.cpython-36.pyc │ ├── heartbeat_pb2_grpc.cpython-36.pyc │ ├── heartbeat_pb2_grpc.cpython-37.pyc │ └── fileService_pb2_grpc.cpython-37.pyc ├── heartbeat_pb2_grpc.py └── heartbeat_pb2.py ├── proto ├── __pycache__ │ ├── fileService_pb2.cpython-36.pyc │ ├── fileService_pb2.cpython-37.pyc │ ├── fileService_pb2_grpc.cpython-36.pyc │ └── fileService_pb2_grpc.cpython-37.pyc ├── heartbeat.proto ├── fluffy.proto ├── fileService.proto ├── fileService_pb2_grpc.py └── fileService_pb2.py ├── config.yaml ├── README.md ├── server.py └── client.py /SuperNode/.gitignore: -------------------------------------------------------------------------------- 1 | dump.rdb 2 | __pycache__ 3 | -------------------------------------------------------------------------------- /SuperNode/requirements.txt: -------------------------------------------------------------------------------- 1 | grpcio 2 | grpcio-tools 3 | redis 4 | ast 5 | -------------------------------------------------------------------------------- /iptable.txt: -------------------------------------------------------------------------------- 1 | 192.168.0.2:3000 2 | 192.168.0.5:3000 3 | 192.168.0.6:3000 -------------------------------------------------------------------------------- /files/flix.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/files/flix.gif -------------------------------------------------------------------------------- /files/wow2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/files/wow2.jpg -------------------------------------------------------------------------------- /Project Report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/Project Report.pdf -------------------------------------------------------------------------------- /images/TechStack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/images/TechStack.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | grpcio 2 | grpcio-tools 3 | redis 4 | ast 5 | yaml 6 | hashlib 7 | lru-dict 8 | psutil 9 | pysyncobj 10 | -------------------------------------------------------------------------------- /SuperNode/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | python3 -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. ./proto/fileService.proto 4 | 5 | -------------------------------------------------------------------------------- /__pycache__/db.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/db.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/db.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/db.cpython-37.pyc -------------------------------------------------------------------------------- /images/ArchitectureDiagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/images/ArchitectureDiagram.png -------------------------------------------------------------------------------- /__pycache__/database.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/database.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/database.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/database.cpython-37.pyc -------------------------------------------------------------------------------- /utils/__pycache__/db.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/utils/__pycache__/db.cpython-36.pyc -------------------------------------------------------------------------------- /utils/__pycache__/db.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/utils/__pycache__/db.cpython-37.pyc -------------------------------------------------------------------------------- /__pycache__/FluffyServer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/FluffyServer.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/fluffy_pb2.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/fluffy_pb2.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/fluffy_pb2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/fluffy_pb2.cpython-37.pyc -------------------------------------------------------------------------------- /utils/__pycache__/Raft.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/utils/__pycache__/Raft.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/heartbeat_pb2.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/heartbeat_pb2.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/HeartbeatService.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/HeartbeatService.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/fluffy_pb2_grpc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/fluffy_pb2_grpc.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/fluffy_pb2_grpc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/fluffy_pb2_grpc.cpython-37.pyc -------------------------------------------------------------------------------- /utils/__pycache__/RaftHelper.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/utils/__pycache__/RaftHelper.cpython-36.pyc -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dump.rdb 2 | downloads 3 | files 4 | cache 5 | __pycache__ 6 | utils/__pycache__ 7 | proto/__pycache__ 8 | service/__pycache__ 9 | generated/__pycache__ 10 | -------------------------------------------------------------------------------- /__pycache__/heartbeat_pb2_grpc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/__pycache__/heartbeat_pb2_grpc.cpython-36.pyc -------------------------------------------------------------------------------- /service/__pycache__/FileServer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/service/__pycache__/FileServer.cpython-36.pyc -------------------------------------------------------------------------------- /service/__pycache__/FileServer.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/service/__pycache__/FileServer.cpython-37.pyc -------------------------------------------------------------------------------- /utils/__pycache__/DeleteHelper.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/utils/__pycache__/DeleteHelper.cpython-36.pyc -------------------------------------------------------------------------------- /generated/__pycache__/fluffy_pb2.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/generated/__pycache__/fluffy_pb2.cpython-36.pyc -------------------------------------------------------------------------------- /proto/__pycache__/fileService_pb2.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/proto/__pycache__/fileService_pb2.cpython-36.pyc -------------------------------------------------------------------------------- /proto/__pycache__/fileService_pb2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/proto/__pycache__/fileService_pb2.cpython-37.pyc -------------------------------------------------------------------------------- /utils/__pycache__/DownloadHelper.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/utils/__pycache__/DownloadHelper.cpython-36.pyc -------------------------------------------------------------------------------- /utils/__pycache__/ShardingHandler.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/utils/__pycache__/ShardingHandler.cpython-36.pyc -------------------------------------------------------------------------------- /utils/__pycache__/ShardingHandler.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/utils/__pycache__/ShardingHandler.cpython-37.pyc -------------------------------------------------------------------------------- /generated/__pycache__/heartbeat_pb2.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/generated/__pycache__/heartbeat_pb2.cpython-36.pyc -------------------------------------------------------------------------------- /generated/__pycache__/heartbeat_pb2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/generated/__pycache__/heartbeat_pb2.cpython-37.pyc -------------------------------------------------------------------------------- /service/__pycache__/HeartbeatService.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/service/__pycache__/HeartbeatService.cpython-36.pyc -------------------------------------------------------------------------------- /service/__pycache__/HeartbeatService.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/service/__pycache__/HeartbeatService.cpython-37.pyc -------------------------------------------------------------------------------- /utils/__pycache__/ActiveNodesChecker.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/utils/__pycache__/ActiveNodesChecker.cpython-36.pyc -------------------------------------------------------------------------------- /utils/__pycache__/ActiveNodesChecker.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/utils/__pycache__/ActiveNodesChecker.cpython-37.pyc -------------------------------------------------------------------------------- /generated/__pycache__/fileService_pb2.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/generated/__pycache__/fileService_pb2.cpython-37.pyc -------------------------------------------------------------------------------- /generated/__pycache__/fluffy_pb2_grpc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/generated/__pycache__/fluffy_pb2_grpc.cpython-36.pyc -------------------------------------------------------------------------------- /proto/__pycache__/fileService_pb2_grpc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/proto/__pycache__/fileService_pb2_grpc.cpython-36.pyc -------------------------------------------------------------------------------- /proto/__pycache__/fileService_pb2_grpc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/proto/__pycache__/fileService_pb2_grpc.cpython-37.pyc -------------------------------------------------------------------------------- /generated/__pycache__/heartbeat_pb2_grpc.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/generated/__pycache__/heartbeat_pb2_grpc.cpython-36.pyc -------------------------------------------------------------------------------- /generated/__pycache__/heartbeat_pb2_grpc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/generated/__pycache__/heartbeat_pb2_grpc.cpython-37.pyc -------------------------------------------------------------------------------- /generated/__pycache__/fileService_pb2_grpc.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shubhamsawantsjsu/Distributed-File-Storage-System/HEAD/generated/__pycache__/fileService_pb2_grpc.cpython-37.pyc -------------------------------------------------------------------------------- /proto/heartbeat.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | // The heartbeat service definition. 4 | service HearBeat { 5 | // Sends status 6 | rpc isAlive (NodeInfo) returns (Stats) {} 7 | } 8 | 9 | // The request message containing ip address and leader status 10 | message NodeInfo { 11 | string ip = 1; 12 | string port = 2; 13 | } 14 | 15 | // The response message containing the status of the server 16 | message Stats { 17 | 18 | string cpu_usage = 1; 19 | string disk_space = 2; 20 | string used_mem = 3; 21 | } -------------------------------------------------------------------------------- /utils/Raft.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import sys 4 | import time 5 | from functools import partial 6 | from pysyncobj import SyncObj, replicated 7 | 8 | # 9 | # *** Raft Service: This class overrides few methods as per our need. *** 10 | # 11 | class Raft(SyncObj): 12 | 13 | def __init__(self, selfNodeAddr, otherNodeAddrs): 14 | super(Raft, self).__init__(selfNodeAddr, otherNodeAddrs) 15 | self.__counter = 0 16 | 17 | @replicated 18 | def incCounter(self): 19 | self.__counter += 1 20 | return self.__counter 21 | 22 | @replicated 23 | def addValue(self, value, cn): 24 | self.__counter += value 25 | return self.__counter, cn 26 | 27 | def getCounter(self): 28 | return self.__counter -------------------------------------------------------------------------------- /SuperNode/db.py: -------------------------------------------------------------------------------- 1 | import redis 2 | import ast 3 | 4 | _redis_port = 6379 5 | 6 | r = redis.StrictRedis(host='localhost', port=_redis_port, db=0) 7 | 8 | #metadata = {"username_filename" : [clusterName, clusterReplica]} 9 | def saveMetaData(username, filename, clusterName, clusterReplica): 10 | key = username + "_" + filename 11 | r.set(key,str([clusterName,clusterReplica])) 12 | 13 | def parseMetaData(username, filename): 14 | key = username + "_" + filename 15 | return ast.literal_eval(r.get(key).decode('utf-8')) 16 | 17 | def keyExists(key): 18 | return r.exists(key) 19 | 20 | def deleteEntry(key): 21 | r.delete(key) 22 | 23 | def getUserFiles(username): 24 | return r.get(username).decode('utf-8') 25 | 26 | def saveUserFile(username, filename): 27 | key = username + "_" + filename 28 | if(keyExists(key)): 29 | l=ast.literal_eval(r.get(key).decode('utf-8')) 30 | l.append(filename) 31 | r.set(key,str(l)) 32 | 33 | 34 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | # Server groups 2 | # Config Variables 3 | 4 | one: 5 | hostname: localhost 6 | server_port : 3000 7 | primary : 1 8 | mongodb_url: "mongodb://localhost:27017" 9 | raft_port: 3001 10 | 11 | two: 12 | hostname: localhost 13 | server_port : 4000 14 | primary : 0 15 | mongodb_url: "mongodb://localhost:27017" 16 | raft_port: 4001 17 | 18 | 19 | three: 20 | hostname: localhost 21 | server_port : 5000 22 | primary : 0 23 | mongodb_url: "mongodb://localhost:27017" 24 | raft_port: 5001 25 | 26 | four: 27 | hostname: 192.168.0.2 28 | server_port : 3000 29 | raft_port : 3001 30 | #connection_port : [4001, 5001] 31 | primary : 1 32 | mongodb_url: "mongodb://localhost:27017" 33 | 34 | 35 | five: 36 | hostname: 192.168.0.5 37 | server_port : 3000 38 | primary : 0 39 | mongodb_url: "mongodb://localhost:27017" 40 | raft_port: 3001 41 | 42 | 43 | six: 44 | hostname: 192.168.0.6 45 | server_port : 3000 46 | primary : 0 47 | mongodb_url: "mongodb://localhost:27017" 48 | raft_port: 3001 49 | 50 | LRUCapacity: 5 51 | 52 | UPLOAD_SHARD_SIZE: 52428800 53 | 54 | super_node_address: "192.168.0.9:9000" -------------------------------------------------------------------------------- /proto/fluffy.proto: -------------------------------------------------------------------------------- 1 | //Author: Neil Shah 2 | 3 | syntax = "proto3"; 4 | 5 | option java_multiple_files = true; 6 | option optimize_for = SPEED; 7 | 8 | package fluffy; 9 | 10 | // a service interface (contract) 11 | 12 | service DataTransferService { 13 | rpc UploadFile (stream FileData) returns (FileInfo); 14 | rpc DownloadFile (FileInfo) returns (stream FileData); 15 | rpc ReplicateFile(stream FileData) returns (FileInfo); 16 | rpc ListFiles (RequestFileList) returns (FileList); 17 | } 18 | 19 | message FileData { 20 | string fileName = 1; 21 | bytes data = 2; 22 | } 23 | 24 | message FileInfo { 25 | string fileName = 1; // Mongo fileID ? (unique) 26 | } 27 | 28 | message RequestFileList { 29 | bool isClient=1; // true then client else other team 30 | } 31 | 32 | message FileList { 33 | repeated string lstFileNames = 1; 34 | } 35 | 36 | // basic message correspondence between two points. Additional metadata 37 | // and routing options can be added. 38 | 39 | // message Route { 40 | // int64 id = 1; 41 | // int64 origin = 2; 42 | // int64 destination = 3; 43 | // string path = 4; 44 | // bytes payload = 5; 45 | // } -------------------------------------------------------------------------------- /utils/db.py: -------------------------------------------------------------------------------- 1 | import redis 2 | import ast 3 | 4 | _redis_port = 6379 5 | 6 | r = redis.StrictRedis(host='localhost', port=_redis_port, db=0) 7 | 8 | def setData(key, value): 9 | r.set(key,value) 10 | 11 | def getData(key): 12 | return (r.get(key)).decode('utf-8') 13 | 14 | def get(key): 15 | return (r.get(key)) 16 | 17 | def getFileData(key): 18 | return r.get(key) 19 | 20 | def keyExists(key): 21 | return r.exists(key) 22 | 23 | #metadata -> node, seq 24 | def saveMetaData(username, filename, metaData): 25 | key = username + "_" + filename 26 | print("Key from db", key) 27 | r.set(key,str(metaData).encode('utf-8')) 28 | 29 | def saveMetaDataOnOtherNodes(uniqueFileName, dataLocations): 30 | r.set(uniqueFileName,dataLocations) 31 | 32 | def parseMetaData(username, filename): 33 | key = username + "_" + filename 34 | return ast.literal_eval(r.get(key).decode('utf-8')) 35 | 36 | def deleteEntry(key): 37 | r.delete(key) 38 | 39 | def getUserFiles(username): 40 | return r.get(username).decode('utf-8') 41 | 42 | def saveUserFile(username, filename): 43 | key = username + "_" + filename 44 | if(keyExists(key)): 45 | l=ast.literal_eval(r.get(key).decode('utf-8')) 46 | l.append(filename) 47 | r.set(key,str(l)) 48 | 49 | 50 | -------------------------------------------------------------------------------- /SuperNode/proto/fileService.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package fileservice; 4 | 5 | service Fileservice { 6 | 7 | rpc UploadFile (stream FileData) returns (ack); 8 | rpc DownloadFile (FileInfo) returns (stream FileData); 9 | rpc FileSearch (FileInfo) returns (ack); 10 | rpc ReplicateFile (stream FileData) returns (ack); 11 | rpc FileList (UserInfo) returns (FileListResponse); 12 | rpc FileDelete (FileInfo) returns (ack); 13 | rpc UpdateFile (stream FileData) returns (ack); 14 | rpc getClusterStats (Empty) returns (ClusterStats); 15 | rpc getLeaderInfo (ClusterInfo) returns (ack); 16 | } 17 | 18 | message FileData { 19 | string username = 1; 20 | string filename =2; 21 | bytes data =3; 22 | } 23 | 24 | message ack { 25 | bool success =1; 26 | string message = 2; 27 | } 28 | 29 | message UserInfo { 30 | string username=1; 31 | } 32 | 33 | message FileInfo { 34 | string username = 1; 35 | string filename = 2; 36 | } 37 | 38 | message FileListResponse { 39 | string Filenames =1; 40 | } 41 | 42 | // The request message containing ip address and leader status 43 | message ClusterInfo { 44 | string ip = 1; 45 | string port = 2; 46 | string clusterName = 3; 47 | } 48 | 49 | // The response message containing the status of the server 50 | message ClusterStats { 51 | string cpu_usage = 1; 52 | string disk_space = 2; 53 | string used_mem = 3; 54 | } 55 | 56 | message Empty {} -------------------------------------------------------------------------------- /service/HeartbeatService.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import psutil 3 | import grpc 4 | import sys 5 | sys.path.append('../generated') 6 | sys.path.append('../utils') 7 | import heartbeat_pb2 8 | import heartbeat_pb2_grpc 9 | import db 10 | 11 | _ONE_DAY_IN_SECONDS = 60 * 60 * 24 12 | 13 | # 14 | # ***HeartBeat Service : HeartBeat service as per heartbeat.proto file.*** 15 | # 16 | class Heartbeat(heartbeat_pb2_grpc.HearBeatServicer): 17 | def __init__(self): 18 | self.primary = int(db.get("primaryStatus")) 19 | 20 | # 21 | # ***Returns the Machine stats only if the node is alive.*** 22 | # Each time request comes to leaderNode, it will ask all the other nodes in the cluster to send 23 | # response(CPU stats) through isAlive method. 24 | # 25 | def isAlive(self, request, context): 26 | cpu_usage = str(psutil.cpu_percent()) 27 | disk_space = str(psutil.virtual_memory()[2]) 28 | used_mem = str(psutil.disk_usage('/')[3]) 29 | stats = heartbeat_pb2.Stats(cpu_usage = cpu_usage, disk_space = disk_space, used_mem = used_mem) 30 | return stats 31 | 32 | def getCPUusage(self): 33 | print('CPU % used', psutil.cpu_percent()) 34 | print('physical memory % used:', psutil.virtual_memory()[2]) 35 | print('Secondary memory % used', psutil.disk_usage('/')[3]) 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /generated/heartbeat_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | import grpc 3 | 4 | import heartbeat_pb2 as heartbeat__pb2 5 | 6 | 7 | class HearBeatStub(object): 8 | """The heartbeat service definition. 9 | """ 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.isAlive = channel.unary_unary( 18 | '/HearBeat/isAlive', 19 | request_serializer=heartbeat__pb2.NodeInfo.SerializeToString, 20 | response_deserializer=heartbeat__pb2.Stats.FromString, 21 | ) 22 | 23 | 24 | class HearBeatServicer(object): 25 | """The heartbeat service definition. 26 | """ 27 | 28 | def isAlive(self, request, context): 29 | """Sends status 30 | """ 31 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 32 | context.set_details('Method not implemented!') 33 | raise NotImplementedError('Method not implemented!') 34 | 35 | 36 | def add_HearBeatServicer_to_server(servicer, server): 37 | rpc_method_handlers = { 38 | 'isAlive': grpc.unary_unary_rpc_method_handler( 39 | servicer.isAlive, 40 | request_deserializer=heartbeat__pb2.NodeInfo.FromString, 41 | response_serializer=heartbeat__pb2.Stats.SerializeToString, 42 | ), 43 | } 44 | generic_handler = grpc.method_handlers_generic_handler( 45 | 'HearBeat', rpc_method_handlers) 46 | server.add_generic_rpc_handlers((generic_handler,)) 47 | -------------------------------------------------------------------------------- /proto/fileService.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package fileservice; 4 | 5 | service Fileservice { 6 | 7 | rpc UploadFile (stream FileData) returns (ack); 8 | rpc DownloadFile (FileInfo) returns (stream FileData); 9 | rpc FileSearch (FileInfo) returns (ack); 10 | rpc ReplicateFile (stream FileData) returns (ack); 11 | rpc FileList (UserInfo) returns (FileListResponse); 12 | rpc FileDelete (FileInfo) returns (ack); 13 | rpc UpdateFile (stream FileData) returns (ack); 14 | rpc getClusterStats (Empty) returns (ClusterStats); 15 | rpc getLeaderInfo (ClusterInfo) returns (ack); 16 | rpc MetaDataInfo (MetaData) returns (ack); 17 | } 18 | 19 | message FileData { 20 | string username = 1; 21 | string filename =2; 22 | bytes data =3; 23 | int32 seqNo = 4; 24 | string replicaNode = 5; 25 | } 26 | 27 | message MetaData { 28 | string filename = 1; 29 | bytes seqValues = 2; 30 | } 31 | 32 | message ack { 33 | bool success =1; 34 | string message = 2; 35 | } 36 | 37 | message UserInfo { 38 | string username=1; 39 | } 40 | 41 | message FileInfo { 42 | string username = 1; 43 | string filename = 2; 44 | int32 seqNo = 3; 45 | } 46 | 47 | message FileListResponse { 48 | string Filenames =1; 49 | } 50 | 51 | // The request message containing ip address and leader status 52 | message ClusterInfo { 53 | string ip = 1; 54 | string port = 2; 55 | string clusterName = 3; 56 | } 57 | 58 | message Empty {} 59 | 60 | // The response message containing the status of the server 61 | message ClusterStats { 62 | 63 | string cpu_usage = 1; 64 | string disk_space = 2; 65 | string used_mem = 3; 66 | } -------------------------------------------------------------------------------- /SuperNode/ClusterStatus.py: -------------------------------------------------------------------------------- 1 | from concurrent import futures 2 | import grpc 3 | import sys 4 | import db 5 | sys.path.append('./proto') 6 | import fileService_pb2_grpc 7 | import fileService_pb2 8 | import time 9 | import threading 10 | 11 | class ClusterStatus(): 12 | def leastUtilizedNode(self, clusterList): 13 | #print("In leastUtilizedNode") 14 | minVal, minVal2 = 301.00, 301.00 15 | leastLoadedNode, leastLoadedNode2 = "","" 16 | clusterName, clusterName2 = "", "" 17 | #print("Checking clusterList-{}", clusterList) 18 | for cluster in clusterList: 19 | channel = self.isChannelAlive(clusterList[cluster]) 20 | #print("After checking channel alive, channel=",channel) 21 | if(channel): 22 | stub = fileService_pb2_grpc.FileserviceStub(channel) 23 | stats = stub.getClusterStats(fileService_pb2.Empty()) 24 | #print("stats=",stats) 25 | total = 300.00 - (float(stats.cpu_usage) + float(stats.disk_space) + float(stats.used_mem)) 26 | if ((total/3)>") 76 | print("Enter one, two or three for server No.") 77 | exit() 78 | config_dict = config_dict_orig[str(sys.argv[1]).lower()] 79 | server_host = config_dict['hostname'] 80 | server_port = str(config_dict['server_port']) 81 | raft_port = str(config_dict['raft_port']) 82 | super_node_address = config_dict_orig['super_node_address'] 83 | 84 | #Initially set the primaryStatus to 0 85 | db.setData("primaryStatus", 0) 86 | 87 | # Start the server 88 | run_server(server_host, server_port, raft_port, super_node_address) -------------------------------------------------------------------------------- /utils/DeleteHelper.py: -------------------------------------------------------------------------------- 1 | from concurrent import futures 2 | from concurrent.futures import ThreadPoolExecutor 3 | 4 | import grpc 5 | import sys 6 | sys.path.append('../generated') 7 | sys.path.append('../utils') 8 | import db 9 | import fileService_pb2_grpc 10 | import fileService_pb2 11 | import heartbeat_pb2_grpc 12 | import heartbeat_pb2 13 | import time 14 | import yaml 15 | import threading 16 | import hashlib 17 | import concurrent.futures 18 | 19 | class DeleteHelper(): 20 | 21 | def __init__(self, hostname, server_port, activeNodesChecker): 22 | self.serverAddress = hostname+":"+server_port 23 | self.activeNodesChecker = activeNodesChecker 24 | 25 | # This method is responsible for deleting dataChunks and metadata from all the nodes. 26 | def deleteFileChunksAndMetaFromNodes(self, username, filename, metaData): 27 | 28 | # Define a threadpool and start separate threads where each thread will go to the node and delete the file chunk. 29 | with concurrent.futures.ThreadPoolExecutor(max_workers = 10) as executor: 30 | list_of_executors = {executor.submit(self.deleteDataAndMetaFromIndividualChunk, metas, username, filename): metas for metas in metaData} 31 | for future in concurrent.futures.as_completed(list_of_executors): 32 | try: 33 | future.result() 34 | except Exception as exec: 35 | print(exec) 36 | 37 | print("All tasks are completed") 38 | 39 | # This method takes meta object which consists of node address, seqNo and replicaNode address. 40 | # It deletes corresponding dataChunk and metadata. 41 | def deleteDataAndMetaFromIndividualChunk(self, meta, username, filename): 42 | 43 | node, seqNo, replicaNode = str(meta[0]), meta[1], str(meta[2]) 44 | metaDataKey = username+"_"+filename 45 | dataChunkKey = username+"_"+filename+"_"+str(seqNo) 46 | 47 | if(db.keyExists(metaDataKey)==1): 48 | print("deleteDataAndMetaFromIndividualChunk: Deleting the metadataEntry from local db :", node) 49 | db.deleteEntry(metaDataKey) 50 | 51 | if(db.keyExists(dataChunkKey)): 52 | print("deleteDataAndMetaFromIndividualChunk: Deleting the data chunk from local db:", node) 53 | db.deleteEntry(dataChunkKey) 54 | 55 | active_ip_channel_dict = self.activeNodesChecker.getActiveChannels() 56 | 57 | if( node != self.serverAddress and node in active_ip_channel_dict): 58 | channel = active_ip_channel_dict[node] 59 | stub = fileService_pb2_grpc.FileserviceStub(channel) 60 | response = stub.FileDelete(fileService_pb2.FileInfo(username=username, filename=filename, seqNo=seqNo)) 61 | 62 | if(response.success==True): 63 | print("deleteDataAndMetaFromIndividualChunk : Successfully deleted chunk from node : ", node) 64 | else: 65 | print("deleteDataAndMetaFromIndividualChunk : Chunk could not be deleted from node :", node) 66 | 67 | if(replicaNode!=self.serverAddress and replicaNode in self.activeNodesChecker.getActiveChannels()): 68 | print("Deleting ") 69 | channel = active_ip_channel_dict[replicaNode] 70 | stub = fileService_pb2_grpc.FileserviceStub(channel) 71 | response = stub.FileDelete(fileService_pb2.FileInfo(username=username, filename=filename, seqNo=seqNo)) 72 | 73 | print(type(response.success)) 74 | if(response.success==True): 75 | print("Successfully deleted chunk from ReplicaNode : ", node) 76 | else: 77 | print("Chunk could not be deleted from ReplicaNode :", node) -------------------------------------------------------------------------------- /utils/DownloadHelper.py: -------------------------------------------------------------------------------- 1 | from concurrent import futures 2 | from concurrent.futures import ThreadPoolExecutor 3 | 4 | import grpc 5 | import sys 6 | sys.path.append('../generated') 7 | sys.path.append('../utils') 8 | import db 9 | import fileService_pb2_grpc 10 | import fileService_pb2 11 | import heartbeat_pb2_grpc 12 | import heartbeat_pb2 13 | import time 14 | import yaml 15 | import threading 16 | import hashlib 17 | import concurrent.futures 18 | 19 | # 20 | # *** DownloadHelper Utility : Helper class to fetch fileData chunks from other nodes. *** 21 | # 22 | class DownloadHelper(): 23 | 24 | def __init__(self, hostname, server_port, activeNodesChecker): 25 | self.active_ip_channel_dict = activeNodesChecker.getActiveChannels() 26 | self.serverAddress = hostname+":"+server_port 27 | self.seqDataMap = {} 28 | 29 | # This method is responsible for getting data from all the nodes using medata. 30 | # This method stitches back the data and sends to UploadFile service. 31 | def getDataFromNodes(self, username, filename, metaData): 32 | 33 | # Define a threadpool and start separate threads where each thread will get data from a particular node and will update the 'seqDataMap'. 34 | with concurrent.futures.ThreadPoolExecutor(max_workers = 10) as executor: 35 | list_of_executors = {executor.submit(self.getDataFromIndividualNode, metas, username, filename): metas for metas in metaData} 36 | for future in concurrent.futures.as_completed(list_of_executors): 37 | try: 38 | future.result() 39 | except Exception as exec: 40 | print(exec) 41 | 42 | print("All tasks are completed") 43 | 44 | # Stitch the data back from 'seqDataMap'. 45 | return self.buildTheDataFromMap() 46 | 47 | # This method is responsible for getting data from specific node. 48 | # Multiple threads will call this method to get the data parallely from each node. 49 | def getDataFromIndividualNode(self, meta, username, filename): 50 | print("Inside getDataFromIndividualNode") 51 | print("Task Executed {}".format(threading.current_thread())) 52 | node, seqNo, replicaNode = str(meta[0]), meta[1], str(meta[2]) 53 | 54 | data = bytes("",'utf-8') 55 | result = {} 56 | 57 | if(node==str(self.serverAddress)): 58 | key = username + "_" + filename + "_" + str(seqNo) 59 | data = db.getFileData(key) 60 | else: 61 | if(node in self.active_ip_channel_dict): 62 | channel = self.active_ip_channel_dict[node] 63 | print("Fetching Data from Node {}".format(node)) 64 | elif(replicaNode in self.active_ip_channel_dict): 65 | channel = self.active_ip_channel_dict[replicaNode] 66 | print("Fetching Data from Node {}".format(replicaNode)) 67 | else: 68 | print("Both original and replica nodes are down!") 69 | return 70 | 71 | stub = fileService_pb2_grpc.FileserviceStub(channel) 72 | responses = stub.DownloadFile(fileService_pb2.FileInfo(username = username, filename = filename, seqNo = seqNo)) 73 | for response in responses: 74 | data+=response.data 75 | 76 | self.seqDataMap[seqNo] = data 77 | print("returning from the getDataFromIndividualNode") 78 | 79 | # This method is responsible for stitching back the data from seqDataMap. 80 | def buildTheDataFromMap(self): 81 | fileData = bytes("",'utf-8') 82 | totalNumberOfChunks = len(self.seqDataMap) 83 | 84 | for i in range(1, totalNumberOfChunks+1): 85 | fileData+=self.seqDataMap.get(i) 86 | 87 | return fileData -------------------------------------------------------------------------------- /utils/ActiveNodesChecker.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('../generated') 3 | sys.path.append('../utils') 4 | import db 5 | import time 6 | import grpc 7 | 8 | # 9 | # *** ActiveNodesChecker Utility : Helper class to keep track of active nodes. *** 10 | # 11 | class ActiveNodesChecker(): 12 | 13 | def __init__(self): 14 | self.channel_ip_map = {} 15 | self.active_ip_channel_dict = {} 16 | 17 | # 18 | # A thread will start for this method. This method keeps updating the active_ip_channel_dict map. 19 | # 20 | def readAvailableIPAddresses(self): 21 | print("Inside readAvailableIPAddresses") 22 | 23 | # Read all the available IP addresses from iptable.txt 24 | ip_addresses = self.getAllAvailableIPAddresses() 25 | 26 | # Create channels with all the IP addresses 27 | self.createChannelListForAvailableIPs(ip_addresses) 28 | db.setData("ip_addresses", self.getStringFromIPAddressesList(ip_addresses)) 29 | 30 | while True: 31 | time.sleep(0.5) 32 | ip_addresses=[] 33 | 34 | try: 35 | ip_addresses_old = self.getIPAddressListFromString(db.getData("ip_addresses")) 36 | except: 37 | db.setData("ip_addresses","") 38 | 39 | ip_addresses = self.getAllAvailableIPAddresses() 40 | 41 | db.setData("ip_addresses", self.getStringFromIPAddressesList(ip_addresses)) 42 | 43 | # If there is any addition or deletion of node then create a new channel for that and update {channel, ip} map. 44 | if(ip_addresses != ip_addresses_old): 45 | self.createChannelListForAvailableIPs(ip_addresses) 46 | 47 | # Update the active {IP, channel} map 48 | self.heartBeatChecker() 49 | 50 | # This method return a list of IP Addresses present in iptable.txt 51 | def getAllAvailableIPAddresses(self): 52 | ip_addresses=[] 53 | with open('iptable.txt') as f: 54 | for line in f: 55 | ip_addresses.append(line.split()[0]) 56 | return ip_addresses 57 | 58 | def getIPAddressListFromString(self, ipAddresses): 59 | result = [] 60 | if ipAddresses=="": return result 61 | return ipAddresses.split(',') 62 | 63 | def getStringFromIPAddressesList(self, ipAddressList): 64 | ipAddressString = "" 65 | for ipAddress in ipAddressList: 66 | ipAddressString+=ipAddress+"," 67 | ipAddressString = ipAddressString[:-1] 68 | return ipAddressString 69 | 70 | #Create Channel:IP HashMap 71 | def createChannelListForAvailableIPs(self, ip_addresses): 72 | self.channel_ip_map = {} 73 | for ip_address in ip_addresses: 74 | channel = grpc.insecure_channel('{}'.format(ip_address)) 75 | self.channel_ip_map[channel]=ip_address 76 | 77 | # This method keeps updating the active channels based on their aliveness. 78 | # It removes the channels from the list if the node is down. 79 | def heartBeatChecker(self): 80 | for channel in self.channel_ip_map: 81 | if (self.isChannelAlive(channel)): 82 | if (self.channel_ip_map.get(channel) not in self.active_ip_channel_dict): 83 | self.active_ip_channel_dict[self.channel_ip_map.get(channel)]=channel 84 | else: 85 | if (self.channel_ip_map.get(channel) in self.active_ip_channel_dict): 86 | del self.active_ip_channel_dict[self.channel_ip_map.get(channel)] 87 | 88 | # This method checks whether the channel is alive or not. 89 | def isChannelAlive(self, channel): 90 | try: 91 | grpc.channel_ready_future(channel).result(timeout=1) 92 | except grpc.FutureTimeoutError: 93 | return False 94 | return True 95 | 96 | # This method returns a map of active {ip, channel} 97 | def getActiveChannels(self): 98 | return self.active_ip_channel_dict -------------------------------------------------------------------------------- /utils/RaftHelper.py: -------------------------------------------------------------------------------- 1 | from concurrent import futures 2 | 3 | import grpc 4 | import sys 5 | sys.path.append("../generated") 6 | sys.path.append("../utils") 7 | import db 8 | import fileService_pb2_grpc 9 | import fileService_pb2 10 | import heartbeat_pb2_grpc 11 | import heartbeat_pb2 12 | import time 13 | import yaml 14 | import threading 15 | import hashlib 16 | from Raft import Raft 17 | from pysyncobj import SyncObj, replicated 18 | 19 | # 20 | # Raft Utility : Helper class to start the raft service. 21 | # 22 | class RaftHelper(): 23 | def __init__(self, hostname, server_port, raft_port, activeNodesChecker, superNodeAddress): 24 | self.activeNodesChecker = activeNodesChecker 25 | self.serverAddress = hostname + ":" + raft_port 26 | self.raft_port = raft_port 27 | self.superNodeAddress = superNodeAddress 28 | self.hostname = hostname 29 | self.serverPort = server_port 30 | 31 | # 32 | # A thread will start for this method. This method keeps updating the primaryStatus field in db 33 | # whenever leader goes down. 34 | # Also this method is responisble for sending the newly elected leader info to SuperNode. 35 | # 36 | def startRaftServer(self): 37 | time.sleep(4) 38 | print("------------------------------Starting Raft Server-------------------------------------") 39 | otherNodes = self.getListOfOtherNodes(self.activeNodesChecker.getAllAvailableIPAddresses()) 40 | 41 | for node in otherNodes: 42 | print(node) 43 | 44 | otherNodes.remove(self.serverAddress) 45 | 46 | raftInstance = Raft(self.serverAddress, otherNodes) 47 | print("Raft utility has been started") 48 | 49 | n = 0 50 | old_value = -1 51 | isLeaderUpdated = False 52 | 53 | while True: 54 | time.sleep(0.5) 55 | if raftInstance.getCounter() != old_value: 56 | old_value = raftInstance.getCounter() 57 | if raftInstance._getLeader() is None: 58 | #print(len(self.activeNodesChecker.getActiveChannels())) 59 | if(not isLeaderUpdated and len(self.activeNodesChecker.getActiveChannels())==1): 60 | print("Since the leader is None, hence declaring myself the leader:", self.serverAddress) 61 | db.setData("primaryStatus", 1) 62 | self.sendLeaderInfoToSuperNode() 63 | isLeaderUpdated = True 64 | continue 65 | n += 1 66 | if n % 20 == 0: 67 | if True: 68 | print("===================================") 69 | print("Am I the leader?", raftInstance._isLeader()) 70 | print("Current Leader running at address:", raftInstance._getLeader()) 71 | self.updatePrimaryStatus(raftInstance._isLeader(), raftInstance) 72 | 73 | def getListOfOtherNodes(self, AllAvailableIPAddresses): 74 | allavailableIps = self.activeNodesChecker.getAllAvailableIPAddresses() 75 | raftNodes = [] 76 | for ip in allavailableIps: 77 | ip, port = ip.split(":") 78 | raftNodes.append(ip+":"+self.raft_port) 79 | return raftNodes 80 | 81 | # Method to update the primaryStatus flag in db and also to send newly elected leader info to supernode 82 | def updatePrimaryStatus(self, isLeader, raftInstance): 83 | isPrimary = int(db.get("primaryStatus")) 84 | 85 | if(isPrimary==1): 86 | self.sendLeaderInfoToSuperNode() 87 | 88 | if (raftInstance._getLeader() is None): 89 | db.setData("primaryStatus", 1) 90 | self.sendLeaderInfoToSuperNode() 91 | elif(isLeader and isPrimary==0): 92 | db.setData("primaryStatus", 1) 93 | self.sendLeaderInfoToSuperNode() 94 | elif(not isLeader and isPrimary==1): 95 | db.setData("primaryStatus", 0) 96 | 97 | # Method to send newly elected leader info to supernode 98 | def sendLeaderInfoToSuperNode(self): 99 | try: 100 | channel = grpc.insecure_channel('{}'.format(self.superNodeAddress)) 101 | stub = fileService_pb2_grpc.FileserviceStub(channel) 102 | response = stub.getLeaderInfo(fileService_pb2.ClusterInfo(ip = self.hostname, port= self.serverPort, clusterName="team1")) 103 | print(response.message) 104 | except: 105 | print("Not able to connect to supernode") 106 | pass 107 | 108 | 109 | 110 | 111 | 112 | -------------------------------------------------------------------------------- /generated/heartbeat_pb2.py: -------------------------------------------------------------------------------- 1 | # Generated by the protocol buffer compiler. DO NOT EDIT! 2 | # source: heartbeat.proto 3 | 4 | import sys 5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import message as _message 8 | from google.protobuf import reflection as _reflection 9 | from google.protobuf import symbol_database as _symbol_database 10 | # @@protoc_insertion_point(imports) 11 | 12 | _sym_db = _symbol_database.Default() 13 | 14 | 15 | 16 | 17 | DESCRIPTOR = _descriptor.FileDescriptor( 18 | name='heartbeat.proto', 19 | package='', 20 | syntax='proto3', 21 | serialized_options=None, 22 | serialized_pb=_b('\n\x0fheartbeat.proto\"$\n\x08NodeInfo\x12\n\n\x02ip\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\t\"@\n\x05Stats\x12\x11\n\tcpu_usage\x18\x01 \x01(\t\x12\x12\n\ndisk_space\x18\x02 \x01(\t\x12\x10\n\x08used_mem\x18\x03 \x01(\t2*\n\x08HearBeat\x12\x1e\n\x07isAlive\x12\t.NodeInfo\x1a\x06.Stats\"\x00\x62\x06proto3') 23 | ) 24 | 25 | 26 | 27 | 28 | _NODEINFO = _descriptor.Descriptor( 29 | name='NodeInfo', 30 | full_name='NodeInfo', 31 | filename=None, 32 | file=DESCRIPTOR, 33 | containing_type=None, 34 | fields=[ 35 | _descriptor.FieldDescriptor( 36 | name='ip', full_name='NodeInfo.ip', index=0, 37 | number=1, type=9, cpp_type=9, label=1, 38 | has_default_value=False, default_value=_b("").decode('utf-8'), 39 | message_type=None, enum_type=None, containing_type=None, 40 | is_extension=False, extension_scope=None, 41 | serialized_options=None, file=DESCRIPTOR), 42 | _descriptor.FieldDescriptor( 43 | name='port', full_name='NodeInfo.port', index=1, 44 | number=2, type=9, cpp_type=9, label=1, 45 | has_default_value=False, default_value=_b("").decode('utf-8'), 46 | message_type=None, enum_type=None, containing_type=None, 47 | is_extension=False, extension_scope=None, 48 | serialized_options=None, file=DESCRIPTOR), 49 | ], 50 | extensions=[ 51 | ], 52 | nested_types=[], 53 | enum_types=[ 54 | ], 55 | serialized_options=None, 56 | is_extendable=False, 57 | syntax='proto3', 58 | extension_ranges=[], 59 | oneofs=[ 60 | ], 61 | serialized_start=19, 62 | serialized_end=55, 63 | ) 64 | 65 | 66 | _STATS = _descriptor.Descriptor( 67 | name='Stats', 68 | full_name='Stats', 69 | filename=None, 70 | file=DESCRIPTOR, 71 | containing_type=None, 72 | fields=[ 73 | _descriptor.FieldDescriptor( 74 | name='cpu_usage', full_name='Stats.cpu_usage', index=0, 75 | number=1, type=9, cpp_type=9, label=1, 76 | has_default_value=False, default_value=_b("").decode('utf-8'), 77 | message_type=None, enum_type=None, containing_type=None, 78 | is_extension=False, extension_scope=None, 79 | serialized_options=None, file=DESCRIPTOR), 80 | _descriptor.FieldDescriptor( 81 | name='disk_space', full_name='Stats.disk_space', index=1, 82 | number=2, type=9, cpp_type=9, label=1, 83 | has_default_value=False, default_value=_b("").decode('utf-8'), 84 | message_type=None, enum_type=None, containing_type=None, 85 | is_extension=False, extension_scope=None, 86 | serialized_options=None, file=DESCRIPTOR), 87 | _descriptor.FieldDescriptor( 88 | name='used_mem', full_name='Stats.used_mem', index=2, 89 | number=3, type=9, cpp_type=9, label=1, 90 | has_default_value=False, default_value=_b("").decode('utf-8'), 91 | message_type=None, enum_type=None, containing_type=None, 92 | is_extension=False, extension_scope=None, 93 | serialized_options=None, file=DESCRIPTOR), 94 | ], 95 | extensions=[ 96 | ], 97 | nested_types=[], 98 | enum_types=[ 99 | ], 100 | serialized_options=None, 101 | is_extendable=False, 102 | syntax='proto3', 103 | extension_ranges=[], 104 | oneofs=[ 105 | ], 106 | serialized_start=57, 107 | serialized_end=121, 108 | ) 109 | 110 | DESCRIPTOR.message_types_by_name['NodeInfo'] = _NODEINFO 111 | DESCRIPTOR.message_types_by_name['Stats'] = _STATS 112 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 113 | 114 | NodeInfo = _reflection.GeneratedProtocolMessageType('NodeInfo', (_message.Message,), dict( 115 | DESCRIPTOR = _NODEINFO, 116 | __module__ = 'heartbeat_pb2' 117 | # @@protoc_insertion_point(class_scope:NodeInfo) 118 | )) 119 | _sym_db.RegisterMessage(NodeInfo) 120 | 121 | Stats = _reflection.GeneratedProtocolMessageType('Stats', (_message.Message,), dict( 122 | DESCRIPTOR = _STATS, 123 | __module__ = 'heartbeat_pb2' 124 | # @@protoc_insertion_point(class_scope:Stats) 125 | )) 126 | _sym_db.RegisterMessage(Stats) 127 | 128 | 129 | 130 | _HEARBEAT = _descriptor.ServiceDescriptor( 131 | name='HearBeat', 132 | full_name='HearBeat', 133 | file=DESCRIPTOR, 134 | index=0, 135 | serialized_options=None, 136 | serialized_start=123, 137 | serialized_end=165, 138 | methods=[ 139 | _descriptor.MethodDescriptor( 140 | name='isAlive', 141 | full_name='HearBeat.isAlive', 142 | index=0, 143 | containing_service=None, 144 | input_type=_NODEINFO, 145 | output_type=_STATS, 146 | serialized_options=None, 147 | ), 148 | ]) 149 | _sym_db.RegisterServiceDescriptor(_HEARBEAT) 150 | 151 | DESCRIPTOR.services_by_name['HearBeat'] = _HEARBEAT 152 | 153 | # @@protoc_insertion_point(module_scope) 154 | -------------------------------------------------------------------------------- /client.py: -------------------------------------------------------------------------------- 1 | from concurrent import futures 2 | 3 | import sys #pip3 install sys 4 | sys.path.append('./generated') 5 | sys.path.append('./proto') 6 | sys.path.append('./utils') 7 | import grpc 8 | import fileService_pb2_grpc 9 | import fileService_pb2 10 | import heartbeat_pb2_grpc 11 | import heartbeat_pb2 12 | import sys 13 | import time 14 | import yaml 15 | import threading 16 | import os 17 | 18 | def getFileData(): 19 | fileName = input("Enter filename:") 20 | outfile = os.path.join('files', fileName) 21 | file_data = open(outfile, 'rb').read() 22 | fileData = fileService_pb2.FileData(fileName=fileName, data=file_data) 23 | return fileData 24 | 25 | def getFileChunks(): 26 | # Maximum chunk size that can be sent 27 | CHUNK_SIZE=4000000 28 | 29 | username = input("Enter Username: ") 30 | fileName = input("Enter filename: ") 31 | 32 | outfile = os.path.join('files', fileName) 33 | 34 | sTime=time.time() 35 | with open(outfile, 'rb') as infile: 36 | while True: 37 | chunk = infile.read(CHUNK_SIZE) 38 | if not chunk: break 39 | 40 | # Do what you want with each chunk (in dev, write line to file) 41 | yield fileService_pb2.FileData(username=username, filename=fileName, data=chunk, seqNo=1) 42 | print("Time for upload= ", time.time()-sTime) 43 | 44 | def downloadTheFile(stub): 45 | userName = input("Enter Username: ") 46 | fileName = input("Enter file name: ") 47 | data = bytes("",'utf-8') 48 | sTime=time.time() 49 | responses = stub.DownloadFile(fileService_pb2.FileInfo(username=userName, filename=fileName)) 50 | for response in responses: 51 | fileName = response.filename 52 | data += response.data 53 | 54 | print("Time for Download = ", time.time()-sTime) 55 | filePath=os.path.join('downloads', fileName) 56 | saveFile = open(filePath, 'wb') 57 | saveFile.write(data) 58 | saveFile.close() 59 | 60 | print("File Downloaded - ", fileName) 61 | 62 | 63 | def uploadTheFileChunks(stub): 64 | response = stub.UploadFile(getFileChunks()) 65 | if(response.success): print("File successfully Uploaded") 66 | else: 67 | print("Failed to upload. Message - ", response.message) 68 | 69 | def deleteTheFile(stub): 70 | userName = input("Enter Username: ") 71 | fileName = input("Enter file name: ") 72 | response = stub.FileDelete(fileService_pb2.FileInfo(username=userName, filename=fileName)) 73 | print(response.message) 74 | 75 | def isFilePresent(stub): 76 | userName = input("Enter Username: ") 77 | fileName = input("Enter file name: ") 78 | response = stub.FileSearch(fileService_pb2.FileInfo(username=userName, filename=fileName)) 79 | 80 | if(response.success==True): 81 | print(response.message) 82 | else: 83 | print(response.message) 84 | 85 | def sendFileInChunks(username, filename, i): 86 | # Maximum chunk size that can be sent 87 | CHUNK_SIZE=4000000 88 | 89 | outfile = os.path.join('files', fileName) 90 | 91 | with open(outfile, 'rb') as infile: 92 | while True: 93 | chunk = infile.read(CHUNK_SIZE) 94 | if not chunk: break 95 | yield fileService_pb2.FileData(username=username+"_"+str(i), filename=fileName, data=chunk, seqNo=1) 96 | 97 | def sendFileMultipleTimes(stub): 98 | userName = input("Enter Username: ") 99 | fileName = input("Enter file name: ") 100 | numberOfTimes = input("How many times you want to send this file?") 101 | 102 | for i in range(1, numberOfTimes+1): 103 | response = stub.UploadFile(sendFileInChunks(userName, fileName, i)) 104 | if(response.success): 105 | print("File successfully Uploaded for sequence : ", str(i)) 106 | else: 107 | print("Failed to upload for sequence : ", str(i)) 108 | 109 | def updateFile(stub): 110 | response = stub.UpdateFile(getFileChunks()) 111 | if(response.success): 112 | print("File successfully updated") 113 | else: 114 | print("Failed to update the file") 115 | 116 | def getListOfAllTheFilesForTheUser(stub): 117 | userName = input("Enter Username: ") 118 | FileListResponse = stub.FileList(fileService_pb2.UserInfo(username=userName)) 119 | print(FileListResponse.Filenames) 120 | 121 | 122 | def handleUserInputs(stub): 123 | print("===================================") 124 | print("1. Upload a file") 125 | print("2. Download a file.") 126 | print("3. Delete a file") 127 | print("4. Check if a file is present") 128 | print("5. Update a file.") 129 | print("6. Get a list of all the files for an user") 130 | print("7. Send a file 100 times") 131 | print("===================================") 132 | option = input("Please choose an option.") 133 | 134 | if(option=='1'): 135 | uploadTheFileChunks(stub) 136 | elif(option=='2'): 137 | downloadTheFile(stub) 138 | elif(option=='3'): 139 | deleteTheFile(stub) 140 | elif(option=='4'): 141 | isFilePresent(stub) 142 | elif(option=='5'): 143 | updateFile(stub) 144 | elif(option=='6'): 145 | getListOfAllTheFilesForTheUser(stub) 146 | elif(option=='7'): 147 | sendFileMultipleTimes(stub) 148 | 149 | def run_client(serverAddress): 150 | with grpc.insecure_channel(serverAddress) as channel: 151 | try: 152 | grpc.channel_ready_future(channel).result(timeout=1) 153 | except grpc.FutureTimeoutError: 154 | print("Connection timeout. Unable to connect to port ") 155 | #exit() 156 | else: 157 | print("Connected") 158 | stub = fileService_pb2_grpc.FileserviceStub(channel) 159 | handleUserInputs(stub) 160 | 161 | 162 | if __name__ == '__main__': 163 | run_client('192.168.0.9:9000') -------------------------------------------------------------------------------- /SuperNode/proto/fileService_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | import grpc 3 | 4 | from proto import fileService_pb2 as proto_dot_fileService__pb2 5 | 6 | 7 | class FileserviceStub(object): 8 | # missing associated documentation comment in .proto file 9 | pass 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.UploadFile = channel.stream_unary( 18 | '/fileservice.Fileservice/UploadFile', 19 | request_serializer=proto_dot_fileService__pb2.FileData.SerializeToString, 20 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 21 | ) 22 | self.DownloadFile = channel.unary_stream( 23 | '/fileservice.Fileservice/DownloadFile', 24 | request_serializer=proto_dot_fileService__pb2.FileInfo.SerializeToString, 25 | response_deserializer=proto_dot_fileService__pb2.FileData.FromString, 26 | ) 27 | self.FileSearch = channel.unary_unary( 28 | '/fileservice.Fileservice/FileSearch', 29 | request_serializer=proto_dot_fileService__pb2.FileInfo.SerializeToString, 30 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 31 | ) 32 | self.ReplicateFile = channel.stream_unary( 33 | '/fileservice.Fileservice/ReplicateFile', 34 | request_serializer=proto_dot_fileService__pb2.FileData.SerializeToString, 35 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 36 | ) 37 | self.FileList = channel.unary_unary( 38 | '/fileservice.Fileservice/FileList', 39 | request_serializer=proto_dot_fileService__pb2.UserInfo.SerializeToString, 40 | response_deserializer=proto_dot_fileService__pb2.FileListResponse.FromString, 41 | ) 42 | self.FileDelete = channel.unary_unary( 43 | '/fileservice.Fileservice/FileDelete', 44 | request_serializer=proto_dot_fileService__pb2.FileInfo.SerializeToString, 45 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 46 | ) 47 | self.UpdateFile = channel.stream_unary( 48 | '/fileservice.Fileservice/UpdateFile', 49 | request_serializer=proto_dot_fileService__pb2.FileData.SerializeToString, 50 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 51 | ) 52 | self.getClusterStats = channel.unary_unary( 53 | '/fileservice.Fileservice/getClusterStats', 54 | request_serializer=proto_dot_fileService__pb2.Empty.SerializeToString, 55 | response_deserializer=proto_dot_fileService__pb2.ClusterStats.FromString, 56 | ) 57 | self.getLeaderInfo = channel.unary_unary( 58 | '/fileservice.Fileservice/getLeaderInfo', 59 | request_serializer=proto_dot_fileService__pb2.ClusterInfo.SerializeToString, 60 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 61 | ) 62 | 63 | 64 | class FileserviceServicer(object): 65 | # missing associated documentation comment in .proto file 66 | pass 67 | 68 | def UploadFile(self, request_iterator, context): 69 | # missing associated documentation comment in .proto file 70 | pass 71 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 72 | context.set_details('Method not implemented!') 73 | raise NotImplementedError('Method not implemented!') 74 | 75 | def DownloadFile(self, request, context): 76 | # missing associated documentation comment in .proto file 77 | pass 78 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 79 | context.set_details('Method not implemented!') 80 | raise NotImplementedError('Method not implemented!') 81 | 82 | def FileSearch(self, request, context): 83 | # missing associated documentation comment in .proto file 84 | pass 85 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 86 | context.set_details('Method not implemented!') 87 | raise NotImplementedError('Method not implemented!') 88 | 89 | def ReplicateFile(self, request_iterator, context): 90 | # missing associated documentation comment in .proto file 91 | pass 92 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 93 | context.set_details('Method not implemented!') 94 | raise NotImplementedError('Method not implemented!') 95 | 96 | def FileList(self, request, context): 97 | # missing associated documentation comment in .proto file 98 | pass 99 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 100 | context.set_details('Method not implemented!') 101 | raise NotImplementedError('Method not implemented!') 102 | 103 | def FileDelete(self, request, context): 104 | # missing associated documentation comment in .proto file 105 | pass 106 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 107 | context.set_details('Method not implemented!') 108 | raise NotImplementedError('Method not implemented!') 109 | 110 | def UpdateFile(self, request_iterator, context): 111 | # missing associated documentation comment in .proto file 112 | pass 113 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 114 | context.set_details('Method not implemented!') 115 | raise NotImplementedError('Method not implemented!') 116 | 117 | def getClusterStats(self, request, context): 118 | # missing associated documentation comment in .proto file 119 | pass 120 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 121 | context.set_details('Method not implemented!') 122 | raise NotImplementedError('Method not implemented!') 123 | 124 | def getLeaderInfo(self, request, context): 125 | # missing associated documentation comment in .proto file 126 | pass 127 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 128 | context.set_details('Method not implemented!') 129 | raise NotImplementedError('Method not implemented!') 130 | 131 | 132 | def add_FileserviceServicer_to_server(servicer, server): 133 | rpc_method_handlers = { 134 | 'UploadFile': grpc.stream_unary_rpc_method_handler( 135 | servicer.UploadFile, 136 | request_deserializer=proto_dot_fileService__pb2.FileData.FromString, 137 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 138 | ), 139 | 'DownloadFile': grpc.unary_stream_rpc_method_handler( 140 | servicer.DownloadFile, 141 | request_deserializer=proto_dot_fileService__pb2.FileInfo.FromString, 142 | response_serializer=proto_dot_fileService__pb2.FileData.SerializeToString, 143 | ), 144 | 'FileSearch': grpc.unary_unary_rpc_method_handler( 145 | servicer.FileSearch, 146 | request_deserializer=proto_dot_fileService__pb2.FileInfo.FromString, 147 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 148 | ), 149 | 'ReplicateFile': grpc.stream_unary_rpc_method_handler( 150 | servicer.ReplicateFile, 151 | request_deserializer=proto_dot_fileService__pb2.FileData.FromString, 152 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 153 | ), 154 | 'FileList': grpc.unary_unary_rpc_method_handler( 155 | servicer.FileList, 156 | request_deserializer=proto_dot_fileService__pb2.UserInfo.FromString, 157 | response_serializer=proto_dot_fileService__pb2.FileListResponse.SerializeToString, 158 | ), 159 | 'FileDelete': grpc.unary_unary_rpc_method_handler( 160 | servicer.FileDelete, 161 | request_deserializer=proto_dot_fileService__pb2.FileInfo.FromString, 162 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 163 | ), 164 | 'UpdateFile': grpc.stream_unary_rpc_method_handler( 165 | servicer.UpdateFile, 166 | request_deserializer=proto_dot_fileService__pb2.FileData.FromString, 167 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 168 | ), 169 | 'getClusterStats': grpc.unary_unary_rpc_method_handler( 170 | servicer.getClusterStats, 171 | request_deserializer=proto_dot_fileService__pb2.Empty.FromString, 172 | response_serializer=proto_dot_fileService__pb2.ClusterStats.SerializeToString, 173 | ), 174 | 'getLeaderInfo': grpc.unary_unary_rpc_method_handler( 175 | servicer.getLeaderInfo, 176 | request_deserializer=proto_dot_fileService__pb2.ClusterInfo.FromString, 177 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 178 | ), 179 | } 180 | generic_handler = grpc.method_handlers_generic_handler( 181 | 'fileservice.Fileservice', rpc_method_handlers) 182 | server.add_generic_rpc_handlers((generic_handler,)) 183 | -------------------------------------------------------------------------------- /proto/fileService_pb2_grpc.py: -------------------------------------------------------------------------------- 1 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! 2 | import grpc 3 | 4 | from proto import fileService_pb2 as proto_dot_fileService__pb2 5 | 6 | 7 | class FileserviceStub(object): 8 | # missing associated documentation comment in .proto file 9 | pass 10 | 11 | def __init__(self, channel): 12 | """Constructor. 13 | 14 | Args: 15 | channel: A grpc.Channel. 16 | """ 17 | self.UploadFile = channel.stream_unary( 18 | '/fileservice.Fileservice/UploadFile', 19 | request_serializer=proto_dot_fileService__pb2.FileData.SerializeToString, 20 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 21 | ) 22 | self.DownloadFile = channel.unary_stream( 23 | '/fileservice.Fileservice/DownloadFile', 24 | request_serializer=proto_dot_fileService__pb2.FileInfo.SerializeToString, 25 | response_deserializer=proto_dot_fileService__pb2.FileData.FromString, 26 | ) 27 | self.FileSearch = channel.unary_unary( 28 | '/fileservice.Fileservice/FileSearch', 29 | request_serializer=proto_dot_fileService__pb2.FileInfo.SerializeToString, 30 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 31 | ) 32 | self.ReplicateFile = channel.stream_unary( 33 | '/fileservice.Fileservice/ReplicateFile', 34 | request_serializer=proto_dot_fileService__pb2.FileData.SerializeToString, 35 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 36 | ) 37 | self.FileList = channel.unary_unary( 38 | '/fileservice.Fileservice/FileList', 39 | request_serializer=proto_dot_fileService__pb2.UserInfo.SerializeToString, 40 | response_deserializer=proto_dot_fileService__pb2.FileListResponse.FromString, 41 | ) 42 | self.FileDelete = channel.unary_unary( 43 | '/fileservice.Fileservice/FileDelete', 44 | request_serializer=proto_dot_fileService__pb2.FileInfo.SerializeToString, 45 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 46 | ) 47 | self.UpdateFile = channel.stream_unary( 48 | '/fileservice.Fileservice/UpdateFile', 49 | request_serializer=proto_dot_fileService__pb2.FileData.SerializeToString, 50 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 51 | ) 52 | self.getClusterStats = channel.unary_unary( 53 | '/fileservice.Fileservice/getClusterStats', 54 | request_serializer=proto_dot_fileService__pb2.Empty.SerializeToString, 55 | response_deserializer=proto_dot_fileService__pb2.ClusterStats.FromString, 56 | ) 57 | self.getLeaderInfo = channel.unary_unary( 58 | '/fileservice.Fileservice/getLeaderInfo', 59 | request_serializer=proto_dot_fileService__pb2.ClusterInfo.SerializeToString, 60 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 61 | ) 62 | self.MetaDataInfo = channel.unary_unary( 63 | '/fileservice.Fileservice/MetaDataInfo', 64 | request_serializer=proto_dot_fileService__pb2.MetaData.SerializeToString, 65 | response_deserializer=proto_dot_fileService__pb2.ack.FromString, 66 | ) 67 | 68 | 69 | class FileserviceServicer(object): 70 | # missing associated documentation comment in .proto file 71 | pass 72 | 73 | def UploadFile(self, request_iterator, context): 74 | # missing associated documentation comment in .proto file 75 | pass 76 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 77 | context.set_details('Method not implemented!') 78 | raise NotImplementedError('Method not implemented!') 79 | 80 | def DownloadFile(self, request, context): 81 | # missing associated documentation comment in .proto file 82 | pass 83 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 84 | context.set_details('Method not implemented!') 85 | raise NotImplementedError('Method not implemented!') 86 | 87 | def FileSearch(self, request, context): 88 | # missing associated documentation comment in .proto file 89 | pass 90 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 91 | context.set_details('Method not implemented!') 92 | raise NotImplementedError('Method not implemented!') 93 | 94 | def ReplicateFile(self, request_iterator, context): 95 | # missing associated documentation comment in .proto file 96 | pass 97 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 98 | context.set_details('Method not implemented!') 99 | raise NotImplementedError('Method not implemented!') 100 | 101 | def FileList(self, request, context): 102 | # missing associated documentation comment in .proto file 103 | pass 104 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 105 | context.set_details('Method not implemented!') 106 | raise NotImplementedError('Method not implemented!') 107 | 108 | def FileDelete(self, request, context): 109 | # missing associated documentation comment in .proto file 110 | pass 111 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 112 | context.set_details('Method not implemented!') 113 | raise NotImplementedError('Method not implemented!') 114 | 115 | def UpdateFile(self, request_iterator, context): 116 | # missing associated documentation comment in .proto file 117 | pass 118 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 119 | context.set_details('Method not implemented!') 120 | raise NotImplementedError('Method not implemented!') 121 | 122 | def getClusterStats(self, request, context): 123 | # missing associated documentation comment in .proto file 124 | pass 125 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 126 | context.set_details('Method not implemented!') 127 | raise NotImplementedError('Method not implemented!') 128 | 129 | def getLeaderInfo(self, request, context): 130 | # missing associated documentation comment in .proto file 131 | pass 132 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 133 | context.set_details('Method not implemented!') 134 | raise NotImplementedError('Method not implemented!') 135 | 136 | def MetaDataInfo(self, request, context): 137 | # missing associated documentation comment in .proto file 138 | pass 139 | context.set_code(grpc.StatusCode.UNIMPLEMENTED) 140 | context.set_details('Method not implemented!') 141 | raise NotImplementedError('Method not implemented!') 142 | 143 | 144 | def add_FileserviceServicer_to_server(servicer, server): 145 | rpc_method_handlers = { 146 | 'UploadFile': grpc.stream_unary_rpc_method_handler( 147 | servicer.UploadFile, 148 | request_deserializer=proto_dot_fileService__pb2.FileData.FromString, 149 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 150 | ), 151 | 'DownloadFile': grpc.unary_stream_rpc_method_handler( 152 | servicer.DownloadFile, 153 | request_deserializer=proto_dot_fileService__pb2.FileInfo.FromString, 154 | response_serializer=proto_dot_fileService__pb2.FileData.SerializeToString, 155 | ), 156 | 'FileSearch': grpc.unary_unary_rpc_method_handler( 157 | servicer.FileSearch, 158 | request_deserializer=proto_dot_fileService__pb2.FileInfo.FromString, 159 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 160 | ), 161 | 'ReplicateFile': grpc.stream_unary_rpc_method_handler( 162 | servicer.ReplicateFile, 163 | request_deserializer=proto_dot_fileService__pb2.FileData.FromString, 164 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 165 | ), 166 | 'FileList': grpc.unary_unary_rpc_method_handler( 167 | servicer.FileList, 168 | request_deserializer=proto_dot_fileService__pb2.UserInfo.FromString, 169 | response_serializer=proto_dot_fileService__pb2.FileListResponse.SerializeToString, 170 | ), 171 | 'FileDelete': grpc.unary_unary_rpc_method_handler( 172 | servicer.FileDelete, 173 | request_deserializer=proto_dot_fileService__pb2.FileInfo.FromString, 174 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 175 | ), 176 | 'UpdateFile': grpc.stream_unary_rpc_method_handler( 177 | servicer.UpdateFile, 178 | request_deserializer=proto_dot_fileService__pb2.FileData.FromString, 179 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 180 | ), 181 | 'getClusterStats': grpc.unary_unary_rpc_method_handler( 182 | servicer.getClusterStats, 183 | request_deserializer=proto_dot_fileService__pb2.Empty.FromString, 184 | response_serializer=proto_dot_fileService__pb2.ClusterStats.SerializeToString, 185 | ), 186 | 'getLeaderInfo': grpc.unary_unary_rpc_method_handler( 187 | servicer.getLeaderInfo, 188 | request_deserializer=proto_dot_fileService__pb2.ClusterInfo.FromString, 189 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 190 | ), 191 | 'MetaDataInfo': grpc.unary_unary_rpc_method_handler( 192 | servicer.MetaDataInfo, 193 | request_deserializer=proto_dot_fileService__pb2.MetaData.FromString, 194 | response_serializer=proto_dot_fileService__pb2.ack.SerializeToString, 195 | ), 196 | } 197 | generic_handler = grpc.method_handlers_generic_handler( 198 | 'fileservice.Fileservice', rpc_method_handlers) 199 | server.add_generic_rpc_handlers((generic_handler,)) 200 | -------------------------------------------------------------------------------- /SuperNode/superNode.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append('./proto') 3 | from concurrent import futures 4 | from threading import Thread 5 | import grpc 6 | 7 | import db 8 | import fileService_pb2_grpc 9 | import fileService_pb2 10 | import time 11 | import threading 12 | from ClusterStatus import ClusterStatus 13 | 14 | 15 | _ONE_DAY_IN_SECONDS = 60 * 60 * 24 16 | 17 | # 18 | # *** FileServer Service : FileServer service as per fileService.proto file. *** 19 | # *** This class implements all the required methods to serve the user requests. *** 20 | # 21 | class FileServer(fileService_pb2_grpc.FileserviceServicer): 22 | def __init__(self, hostIP, port): 23 | self.serverAddress = hostIP+":"+port 24 | self.clusterLeaders = {} 25 | self.clusterStatus = ClusterStatus() 26 | self.ip_channel_dict = {} 27 | 28 | # 29 | # This service gets invoked when each cluster's leader informs the supernode 30 | # about who the current cluster leader is 31 | # 32 | def getLeaderInfo(self, request, context): 33 | print("getLeaderInfo Called") 34 | address = request.ip + ":" + request.port 35 | self.clusterLeaders[request.clusterName] = address 36 | print("ClusterLeaders: ",self.clusterLeaders) 37 | channel = grpc.insecure_channel('{}'.format(address)) 38 | self.ip_channel_dict[address] = channel 39 | return fileService_pb2.ack(success=True, message="Leader Updated.") 40 | 41 | # 42 | # This service gets invoked when client uploads a new file. 43 | # 44 | def UploadFile(self, request_iterator, context): 45 | print("Inside Server method ---------- UploadFile") 46 | 47 | # Get the two clusters that have the most resources based on cluster stats 48 | node, node_replica, clusterName, clusterReplica = self.clusterStatus.leastUtilizedNode(self.clusterLeaders) 49 | 50 | if(node==-1): 51 | return fileService_pb2.ack(success=False, message="No Active Clusters.") 52 | 53 | print("Node found is:{}, replica is:{}".format(node, node_replica)) 54 | 55 | channel1 = self.ip_channel_dict[node] 56 | stub1 = fileService_pb2_grpc.FileserviceStub(channel1) 57 | if(node_replica!="" and node_replica in self.ip_channel_dict): 58 | channel2 = self.ip_channel_dict[node_replica] 59 | stub2 = fileService_pb2_grpc.FileserviceStub(channel2) 60 | else: stub2 = None 61 | 62 | filename, username = "","" 63 | data = bytes("",'utf-8') 64 | 65 | for request in request_iterator: 66 | filename, username = request.filename, request.username 67 | data=request.data 68 | break 69 | 70 | if(self.fileExists(username, filename)): 71 | return fileService_pb2.ack(success=False, message="File already exists for this user. Please rename or delete file first.") 72 | 73 | 74 | def sendDataStreaming(username, filename, data): 75 | yield fileService_pb2.FileData(username=username, filename=filename, data=data) 76 | for request in request_iterator: 77 | data+=request.data 78 | yield fileService_pb2.FileData(username=request.username, filename=request.filename, data=request.data) 79 | 80 | resp1 = stub1.UploadFile(sendDataStreaming(username, filename, request.data)) 81 | 82 | # Replicate current file to alternate cluster 83 | if(stub2 is not None): 84 | t1 = Thread(target=self.replicateData, args=(stub2,username,filename,data,)) 85 | t1.start() 86 | 87 | # save Meta Map of username+filename->clusterName that its stored on 88 | if(resp1.success): 89 | db.saveMetaData(username, filename, clusterName, clusterReplica) 90 | db.saveUserFile(username, filename) 91 | 92 | return resp1 93 | 94 | 95 | # 96 | # This service gets invoked when user requests an uploaded file. 97 | # 98 | def DownloadFile(self, request, context): 99 | 100 | # Check if file exists 101 | if(self.fileExists(request.username, request.filename)==False): 102 | return fileService_pb2.FileData(username=request.username, filename=request.filename, data=bytes("",'utf-8')) 103 | 104 | fileMeta = db.parseMetaData(request.username, request.filename) 105 | 106 | primaryIP, replicaIP = -1,-1 107 | channel1, channel2 = -1,-1 108 | if(fileMeta[0] in self.clusterLeaders): 109 | primaryIP = self.clusterLeaders[fileMeta[0]] 110 | channel1 = self.clusterStatus.isChannelAlive(primaryIP) 111 | 112 | if(fileMeta[1] in self.clusterLeaders): 113 | replicaIP = self.clusterLeaders[fileMeta[1]] 114 | channel2 = self.clusterStatus.isChannelAlive(replicaIP) 115 | 116 | if(channel1): 117 | stub = fileService_pb2_grpc.FileserviceStub(channel1) 118 | responses = stub.DownloadFile(fileService_pb2.FileInfo(username = request.username, filename = request.filename)) 119 | for response in responses: 120 | yield response 121 | elif(channel2): 122 | stub = fileService_pb2_grpc.FileserviceStub(channel2) 123 | responses = stub.DownloadFile(fileService_pb2.FileInfo(username = request.username, filename = request.filename)) 124 | for response in responses: 125 | yield response 126 | else: 127 | return fileService_pb2.FileData(username=request.username, filename=request.filename, data=bytes("",'utf-8')) 128 | 129 | # 130 | # Function to check if file exists in db (redis) 131 | # 132 | def fileExists(self,username, filename): 133 | return db.keyExists(username + "_" + filename) 134 | 135 | # 136 | # Function that takes care of file replication on alternate cluster 137 | # 138 | def replicateData(self,stub, username, filename, data): 139 | 140 | def streamData(username, filename, data): 141 | chunk_size = 4000000 142 | start, end = 0, chunk_size 143 | while(True): 144 | chunk = data[start:end] 145 | if(len(chunk)==0): break 146 | start=end 147 | end += chunk_size 148 | yield fileService_pb2.FileData(username=username, filename=filename, data=chunk) 149 | 150 | resp = stub.UploadFile(streamData(username,filename,data)) 151 | 152 | 153 | # 154 | # This services is invoked when user wants to delete a file 155 | # 156 | def FileDelete(self, request, context): 157 | print("In FileDelete") 158 | 159 | if(self.fileExists(request.username, request.filename)==False): 160 | return fileService_pb2.ack(success=False, message="File {} does not exist.".format(request.filename)) 161 | 162 | fileMeta = db.parseMetaData(request.username, request.filename) 163 | print("FileMeta = ", fileMeta) 164 | 165 | primaryIP, replicaIP = -1,-1 166 | channel1, channel2 = -1,-1 167 | if(fileMeta[0] in self.clusterLeaders): 168 | primaryIP = self.clusterLeaders[fileMeta[0]] 169 | channel1 = self.clusterStatus.isChannelAlive(primaryIP) 170 | 171 | if(fileMeta[1] in self.clusterLeaders): 172 | replicaIP = self.clusterLeaders[fileMeta[1]] 173 | channel2 = self.clusterStatus.isChannelAlive(replicaIP) 174 | 175 | print("PrimarIP={}, replicaIP={}".format(primaryIP,replicaIP)) 176 | 177 | if(channel1!=-1): 178 | stub = fileService_pb2_grpc.FileserviceStub(channel1) 179 | response = stub.FileDelete(fileService_pb2.FileInfo(username = request.username, filename = request.filename)) 180 | 181 | if(channel2!=-1): 182 | stub = fileService_pb2_grpc.FileserviceStub(channel2) 183 | response = stub.FileDelete(fileService_pb2.FileInfo(username = request.username, filename = request.filename)) 184 | 185 | if(response.success==True): 186 | db.deleteEntry(request.username + "_" + request.filename) 187 | return fileService_pb2.ack(success=True, message="File successfully deleted from cluster : " + fileMeta[0]) 188 | else: 189 | return fileService_pb2.ack(success=False, message="Internal error") 190 | 191 | # 192 | # This services is invoked when user wants to check if a file exists 193 | # 194 | def FileSearch(self, request, context): 195 | 196 | if(self.fileExists(request.username, request.filename)==False): 197 | return fileService_pb2.ack(success=False, message="File {} does not exist.".format(request.filename)) 198 | 199 | fileMeta = db.parseMetaData(request.username, request.filename) 200 | 201 | primaryIP, replicaIP = -1,-1 202 | channel1, channel2 = -1,-1 203 | 204 | if(fileMeta[0] in self.clusterLeaders): 205 | primaryIP = self.clusterLeaders[fileMeta[0]] 206 | channel1 = self.clusterStatus.isChannelAlive(primaryIP) 207 | 208 | if(fileMeta[1] in self.clusterLeaders): 209 | replicaIP = self.clusterLeaders[fileMeta[1]] 210 | channel2 = self.clusterStatus.isChannelAlive(replicaIP) 211 | 212 | if(channel1 != -1): 213 | stub = fileService_pb2_grpc.FileserviceStub(channel1) 214 | response = stub.FileSearch(fileService_pb2.FileInfo(username = request.username, filename = request.filename)) 215 | 216 | if(channel2 != -1): 217 | stub = fileService_pb2_grpc.FileserviceStub(channel2) 218 | response = stub.FileSearch(fileService_pb2.FileInfo(username = request.username, filename = request.filename)) 219 | 220 | if(response.success==True): 221 | return fileService_pb2.ack(success=True, message="File exists! ") 222 | else: 223 | return fileService_pb2.ack(success=False, message="File does not exist in any cluster.") 224 | 225 | # 226 | # This services lists all files under a user 227 | # 228 | def FileList(self, request, context): 229 | userFiles = db.getUserFiles(request.username) 230 | return fileService_pb2.FileListResponse(Filenames=str(userFiles)) 231 | 232 | 233 | def run_server(hostIP, port): 234 | print('Supernode started on {}:{}'.format(hostIP, port)) 235 | 236 | #GRPC 237 | server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) 238 | fileService_pb2_grpc.add_FileserviceServicer_to_server(FileServer(hostIP, port), server) 239 | server.add_insecure_port('[::]:{}'.format(port)) 240 | server.start() 241 | 242 | try: 243 | while True: 244 | time.sleep(_ONE_DAY_IN_SECONDS) 245 | except KeyboardInterrupt: 246 | server.stop(0) 247 | 248 | # ----------------------Main-------------------- # 249 | if __name__ == '__main__': 250 | hostIP = "192.168.0.9" 251 | port = "9000" 252 | run_server(hostIP, port) -------------------------------------------------------------------------------- /SuperNode/proto/fileService_pb2.py: -------------------------------------------------------------------------------- 1 | # Generated by the protocol buffer compiler. DO NOT EDIT! 2 | # source: proto/fileService.proto 3 | 4 | import sys 5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import message as _message 8 | from google.protobuf import reflection as _reflection 9 | from google.protobuf import symbol_database as _symbol_database 10 | from google.protobuf import descriptor_pb2 11 | # @@protoc_insertion_point(imports) 12 | 13 | _sym_db = _symbol_database.Default() 14 | 15 | 16 | 17 | 18 | DESCRIPTOR = _descriptor.FileDescriptor( 19 | name='proto/fileService.proto', 20 | package='fileservice', 21 | syntax='proto3', 22 | serialized_pb=_b('\n\x17proto/fileService.proto\x12\x0b\x66ileservice\"<\n\x08\x46ileData\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x10\n\x08\x66ilename\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"\'\n\x03\x61\x63k\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x1c\n\x08UserInfo\x12\x10\n\x08username\x18\x01 \x01(\t\".\n\x08\x46ileInfo\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x10\n\x08\x66ilename\x18\x02 \x01(\t\"%\n\x10\x46ileListResponse\x12\x11\n\tFilenames\x18\x01 \x01(\t\"<\n\x0b\x43lusterInfo\x12\n\n\x02ip\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\t\x12\x13\n\x0b\x63lusterName\x18\x03 \x01(\t\"G\n\x0c\x43lusterStats\x12\x11\n\tcpu_usage\x18\x01 \x01(\t\x12\x12\n\ndisk_space\x18\x02 \x01(\t\x12\x10\n\x08used_mem\x18\x03 \x01(\t\"\x07\n\x05\x45mpty2\xaa\x04\n\x0b\x46ileservice\x12\x37\n\nUploadFile\x12\x15.fileservice.FileData\x1a\x10.fileservice.ack(\x01\x12>\n\x0c\x44ownloadFile\x12\x15.fileservice.FileInfo\x1a\x15.fileservice.FileData0\x01\x12\x35\n\nFileSearch\x12\x15.fileservice.FileInfo\x1a\x10.fileservice.ack\x12:\n\rReplicateFile\x12\x15.fileservice.FileData\x1a\x10.fileservice.ack(\x01\x12@\n\x08\x46ileList\x12\x15.fileservice.UserInfo\x1a\x1d.fileservice.FileListResponse\x12\x35\n\nFileDelete\x12\x15.fileservice.FileInfo\x1a\x10.fileservice.ack\x12\x37\n\nUpdateFile\x12\x15.fileservice.FileData\x1a\x10.fileservice.ack(\x01\x12@\n\x0fgetClusterStats\x12\x12.fileservice.Empty\x1a\x19.fileservice.ClusterStats\x12;\n\rgetLeaderInfo\x12\x18.fileservice.ClusterInfo\x1a\x10.fileservice.ackb\x06proto3') 23 | ) 24 | 25 | 26 | 27 | 28 | _FILEDATA = _descriptor.Descriptor( 29 | name='FileData', 30 | full_name='fileservice.FileData', 31 | filename=None, 32 | file=DESCRIPTOR, 33 | containing_type=None, 34 | fields=[ 35 | _descriptor.FieldDescriptor( 36 | name='username', full_name='fileservice.FileData.username', index=0, 37 | number=1, type=9, cpp_type=9, label=1, 38 | has_default_value=False, default_value=_b("").decode('utf-8'), 39 | message_type=None, enum_type=None, containing_type=None, 40 | is_extension=False, extension_scope=None, 41 | options=None, file=DESCRIPTOR), 42 | _descriptor.FieldDescriptor( 43 | name='filename', full_name='fileservice.FileData.filename', index=1, 44 | number=2, type=9, cpp_type=9, label=1, 45 | has_default_value=False, default_value=_b("").decode('utf-8'), 46 | message_type=None, enum_type=None, containing_type=None, 47 | is_extension=False, extension_scope=None, 48 | options=None, file=DESCRIPTOR), 49 | _descriptor.FieldDescriptor( 50 | name='data', full_name='fileservice.FileData.data', index=2, 51 | number=3, type=12, cpp_type=9, label=1, 52 | has_default_value=False, default_value=_b(""), 53 | message_type=None, enum_type=None, containing_type=None, 54 | is_extension=False, extension_scope=None, 55 | options=None, file=DESCRIPTOR), 56 | ], 57 | extensions=[ 58 | ], 59 | nested_types=[], 60 | enum_types=[ 61 | ], 62 | options=None, 63 | is_extendable=False, 64 | syntax='proto3', 65 | extension_ranges=[], 66 | oneofs=[ 67 | ], 68 | serialized_start=40, 69 | serialized_end=100, 70 | ) 71 | 72 | 73 | _ACK = _descriptor.Descriptor( 74 | name='ack', 75 | full_name='fileservice.ack', 76 | filename=None, 77 | file=DESCRIPTOR, 78 | containing_type=None, 79 | fields=[ 80 | _descriptor.FieldDescriptor( 81 | name='success', full_name='fileservice.ack.success', index=0, 82 | number=1, type=8, cpp_type=7, label=1, 83 | has_default_value=False, default_value=False, 84 | message_type=None, enum_type=None, containing_type=None, 85 | is_extension=False, extension_scope=None, 86 | options=None, file=DESCRIPTOR), 87 | _descriptor.FieldDescriptor( 88 | name='message', full_name='fileservice.ack.message', index=1, 89 | number=2, type=9, cpp_type=9, label=1, 90 | has_default_value=False, default_value=_b("").decode('utf-8'), 91 | message_type=None, enum_type=None, containing_type=None, 92 | is_extension=False, extension_scope=None, 93 | options=None, file=DESCRIPTOR), 94 | ], 95 | extensions=[ 96 | ], 97 | nested_types=[], 98 | enum_types=[ 99 | ], 100 | options=None, 101 | is_extendable=False, 102 | syntax='proto3', 103 | extension_ranges=[], 104 | oneofs=[ 105 | ], 106 | serialized_start=102, 107 | serialized_end=141, 108 | ) 109 | 110 | 111 | _USERINFO = _descriptor.Descriptor( 112 | name='UserInfo', 113 | full_name='fileservice.UserInfo', 114 | filename=None, 115 | file=DESCRIPTOR, 116 | containing_type=None, 117 | fields=[ 118 | _descriptor.FieldDescriptor( 119 | name='username', full_name='fileservice.UserInfo.username', index=0, 120 | number=1, type=9, cpp_type=9, label=1, 121 | has_default_value=False, default_value=_b("").decode('utf-8'), 122 | message_type=None, enum_type=None, containing_type=None, 123 | is_extension=False, extension_scope=None, 124 | options=None, file=DESCRIPTOR), 125 | ], 126 | extensions=[ 127 | ], 128 | nested_types=[], 129 | enum_types=[ 130 | ], 131 | options=None, 132 | is_extendable=False, 133 | syntax='proto3', 134 | extension_ranges=[], 135 | oneofs=[ 136 | ], 137 | serialized_start=143, 138 | serialized_end=171, 139 | ) 140 | 141 | 142 | _FILEINFO = _descriptor.Descriptor( 143 | name='FileInfo', 144 | full_name='fileservice.FileInfo', 145 | filename=None, 146 | file=DESCRIPTOR, 147 | containing_type=None, 148 | fields=[ 149 | _descriptor.FieldDescriptor( 150 | name='username', full_name='fileservice.FileInfo.username', index=0, 151 | number=1, type=9, cpp_type=9, label=1, 152 | has_default_value=False, default_value=_b("").decode('utf-8'), 153 | message_type=None, enum_type=None, containing_type=None, 154 | is_extension=False, extension_scope=None, 155 | options=None, file=DESCRIPTOR), 156 | _descriptor.FieldDescriptor( 157 | name='filename', full_name='fileservice.FileInfo.filename', index=1, 158 | number=2, type=9, cpp_type=9, label=1, 159 | has_default_value=False, default_value=_b("").decode('utf-8'), 160 | message_type=None, enum_type=None, containing_type=None, 161 | is_extension=False, extension_scope=None, 162 | options=None, file=DESCRIPTOR), 163 | ], 164 | extensions=[ 165 | ], 166 | nested_types=[], 167 | enum_types=[ 168 | ], 169 | options=None, 170 | is_extendable=False, 171 | syntax='proto3', 172 | extension_ranges=[], 173 | oneofs=[ 174 | ], 175 | serialized_start=173, 176 | serialized_end=219, 177 | ) 178 | 179 | 180 | _FILELISTRESPONSE = _descriptor.Descriptor( 181 | name='FileListResponse', 182 | full_name='fileservice.FileListResponse', 183 | filename=None, 184 | file=DESCRIPTOR, 185 | containing_type=None, 186 | fields=[ 187 | _descriptor.FieldDescriptor( 188 | name='Filenames', full_name='fileservice.FileListResponse.Filenames', index=0, 189 | number=1, type=9, cpp_type=9, label=1, 190 | has_default_value=False, default_value=_b("").decode('utf-8'), 191 | message_type=None, enum_type=None, containing_type=None, 192 | is_extension=False, extension_scope=None, 193 | options=None, file=DESCRIPTOR), 194 | ], 195 | extensions=[ 196 | ], 197 | nested_types=[], 198 | enum_types=[ 199 | ], 200 | options=None, 201 | is_extendable=False, 202 | syntax='proto3', 203 | extension_ranges=[], 204 | oneofs=[ 205 | ], 206 | serialized_start=221, 207 | serialized_end=258, 208 | ) 209 | 210 | 211 | _CLUSTERINFO = _descriptor.Descriptor( 212 | name='ClusterInfo', 213 | full_name='fileservice.ClusterInfo', 214 | filename=None, 215 | file=DESCRIPTOR, 216 | containing_type=None, 217 | fields=[ 218 | _descriptor.FieldDescriptor( 219 | name='ip', full_name='fileservice.ClusterInfo.ip', index=0, 220 | number=1, type=9, cpp_type=9, label=1, 221 | has_default_value=False, default_value=_b("").decode('utf-8'), 222 | message_type=None, enum_type=None, containing_type=None, 223 | is_extension=False, extension_scope=None, 224 | options=None, file=DESCRIPTOR), 225 | _descriptor.FieldDescriptor( 226 | name='port', full_name='fileservice.ClusterInfo.port', index=1, 227 | number=2, type=9, cpp_type=9, label=1, 228 | has_default_value=False, default_value=_b("").decode('utf-8'), 229 | message_type=None, enum_type=None, containing_type=None, 230 | is_extension=False, extension_scope=None, 231 | options=None, file=DESCRIPTOR), 232 | _descriptor.FieldDescriptor( 233 | name='clusterName', full_name='fileservice.ClusterInfo.clusterName', index=2, 234 | number=3, type=9, cpp_type=9, label=1, 235 | has_default_value=False, default_value=_b("").decode('utf-8'), 236 | message_type=None, enum_type=None, containing_type=None, 237 | is_extension=False, extension_scope=None, 238 | options=None, file=DESCRIPTOR), 239 | ], 240 | extensions=[ 241 | ], 242 | nested_types=[], 243 | enum_types=[ 244 | ], 245 | options=None, 246 | is_extendable=False, 247 | syntax='proto3', 248 | extension_ranges=[], 249 | oneofs=[ 250 | ], 251 | serialized_start=260, 252 | serialized_end=320, 253 | ) 254 | 255 | 256 | _CLUSTERSTATS = _descriptor.Descriptor( 257 | name='ClusterStats', 258 | full_name='fileservice.ClusterStats', 259 | filename=None, 260 | file=DESCRIPTOR, 261 | containing_type=None, 262 | fields=[ 263 | _descriptor.FieldDescriptor( 264 | name='cpu_usage', full_name='fileservice.ClusterStats.cpu_usage', index=0, 265 | number=1, type=9, cpp_type=9, label=1, 266 | has_default_value=False, default_value=_b("").decode('utf-8'), 267 | message_type=None, enum_type=None, containing_type=None, 268 | is_extension=False, extension_scope=None, 269 | options=None, file=DESCRIPTOR), 270 | _descriptor.FieldDescriptor( 271 | name='disk_space', full_name='fileservice.ClusterStats.disk_space', index=1, 272 | number=2, type=9, cpp_type=9, label=1, 273 | has_default_value=False, default_value=_b("").decode('utf-8'), 274 | message_type=None, enum_type=None, containing_type=None, 275 | is_extension=False, extension_scope=None, 276 | options=None, file=DESCRIPTOR), 277 | _descriptor.FieldDescriptor( 278 | name='used_mem', full_name='fileservice.ClusterStats.used_mem', index=2, 279 | number=3, type=9, cpp_type=9, label=1, 280 | has_default_value=False, default_value=_b("").decode('utf-8'), 281 | message_type=None, enum_type=None, containing_type=None, 282 | is_extension=False, extension_scope=None, 283 | options=None, file=DESCRIPTOR), 284 | ], 285 | extensions=[ 286 | ], 287 | nested_types=[], 288 | enum_types=[ 289 | ], 290 | options=None, 291 | is_extendable=False, 292 | syntax='proto3', 293 | extension_ranges=[], 294 | oneofs=[ 295 | ], 296 | serialized_start=322, 297 | serialized_end=393, 298 | ) 299 | 300 | 301 | _EMPTY = _descriptor.Descriptor( 302 | name='Empty', 303 | full_name='fileservice.Empty', 304 | filename=None, 305 | file=DESCRIPTOR, 306 | containing_type=None, 307 | fields=[ 308 | ], 309 | extensions=[ 310 | ], 311 | nested_types=[], 312 | enum_types=[ 313 | ], 314 | options=None, 315 | is_extendable=False, 316 | syntax='proto3', 317 | extension_ranges=[], 318 | oneofs=[ 319 | ], 320 | serialized_start=395, 321 | serialized_end=402, 322 | ) 323 | 324 | DESCRIPTOR.message_types_by_name['FileData'] = _FILEDATA 325 | DESCRIPTOR.message_types_by_name['ack'] = _ACK 326 | DESCRIPTOR.message_types_by_name['UserInfo'] = _USERINFO 327 | DESCRIPTOR.message_types_by_name['FileInfo'] = _FILEINFO 328 | DESCRIPTOR.message_types_by_name['FileListResponse'] = _FILELISTRESPONSE 329 | DESCRIPTOR.message_types_by_name['ClusterInfo'] = _CLUSTERINFO 330 | DESCRIPTOR.message_types_by_name['ClusterStats'] = _CLUSTERSTATS 331 | DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY 332 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 333 | 334 | FileData = _reflection.GeneratedProtocolMessageType('FileData', (_message.Message,), dict( 335 | DESCRIPTOR = _FILEDATA, 336 | __module__ = 'proto.fileService_pb2' 337 | # @@protoc_insertion_point(class_scope:fileservice.FileData) 338 | )) 339 | _sym_db.RegisterMessage(FileData) 340 | 341 | ack = _reflection.GeneratedProtocolMessageType('ack', (_message.Message,), dict( 342 | DESCRIPTOR = _ACK, 343 | __module__ = 'proto.fileService_pb2' 344 | # @@protoc_insertion_point(class_scope:fileservice.ack) 345 | )) 346 | _sym_db.RegisterMessage(ack) 347 | 348 | UserInfo = _reflection.GeneratedProtocolMessageType('UserInfo', (_message.Message,), dict( 349 | DESCRIPTOR = _USERINFO, 350 | __module__ = 'proto.fileService_pb2' 351 | # @@protoc_insertion_point(class_scope:fileservice.UserInfo) 352 | )) 353 | _sym_db.RegisterMessage(UserInfo) 354 | 355 | FileInfo = _reflection.GeneratedProtocolMessageType('FileInfo', (_message.Message,), dict( 356 | DESCRIPTOR = _FILEINFO, 357 | __module__ = 'proto.fileService_pb2' 358 | # @@protoc_insertion_point(class_scope:fileservice.FileInfo) 359 | )) 360 | _sym_db.RegisterMessage(FileInfo) 361 | 362 | FileListResponse = _reflection.GeneratedProtocolMessageType('FileListResponse', (_message.Message,), dict( 363 | DESCRIPTOR = _FILELISTRESPONSE, 364 | __module__ = 'proto.fileService_pb2' 365 | # @@protoc_insertion_point(class_scope:fileservice.FileListResponse) 366 | )) 367 | _sym_db.RegisterMessage(FileListResponse) 368 | 369 | ClusterInfo = _reflection.GeneratedProtocolMessageType('ClusterInfo', (_message.Message,), dict( 370 | DESCRIPTOR = _CLUSTERINFO, 371 | __module__ = 'proto.fileService_pb2' 372 | # @@protoc_insertion_point(class_scope:fileservice.ClusterInfo) 373 | )) 374 | _sym_db.RegisterMessage(ClusterInfo) 375 | 376 | ClusterStats = _reflection.GeneratedProtocolMessageType('ClusterStats', (_message.Message,), dict( 377 | DESCRIPTOR = _CLUSTERSTATS, 378 | __module__ = 'proto.fileService_pb2' 379 | # @@protoc_insertion_point(class_scope:fileservice.ClusterStats) 380 | )) 381 | _sym_db.RegisterMessage(ClusterStats) 382 | 383 | Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict( 384 | DESCRIPTOR = _EMPTY, 385 | __module__ = 'proto.fileService_pb2' 386 | # @@protoc_insertion_point(class_scope:fileservice.Empty) 387 | )) 388 | _sym_db.RegisterMessage(Empty) 389 | 390 | 391 | 392 | _FILESERVICE = _descriptor.ServiceDescriptor( 393 | name='Fileservice', 394 | full_name='fileservice.Fileservice', 395 | file=DESCRIPTOR, 396 | index=0, 397 | options=None, 398 | serialized_start=405, 399 | serialized_end=959, 400 | methods=[ 401 | _descriptor.MethodDescriptor( 402 | name='UploadFile', 403 | full_name='fileservice.Fileservice.UploadFile', 404 | index=0, 405 | containing_service=None, 406 | input_type=_FILEDATA, 407 | output_type=_ACK, 408 | options=None, 409 | ), 410 | _descriptor.MethodDescriptor( 411 | name='DownloadFile', 412 | full_name='fileservice.Fileservice.DownloadFile', 413 | index=1, 414 | containing_service=None, 415 | input_type=_FILEINFO, 416 | output_type=_FILEDATA, 417 | options=None, 418 | ), 419 | _descriptor.MethodDescriptor( 420 | name='FileSearch', 421 | full_name='fileservice.Fileservice.FileSearch', 422 | index=2, 423 | containing_service=None, 424 | input_type=_FILEINFO, 425 | output_type=_ACK, 426 | options=None, 427 | ), 428 | _descriptor.MethodDescriptor( 429 | name='ReplicateFile', 430 | full_name='fileservice.Fileservice.ReplicateFile', 431 | index=3, 432 | containing_service=None, 433 | input_type=_FILEDATA, 434 | output_type=_ACK, 435 | options=None, 436 | ), 437 | _descriptor.MethodDescriptor( 438 | name='FileList', 439 | full_name='fileservice.Fileservice.FileList', 440 | index=4, 441 | containing_service=None, 442 | input_type=_USERINFO, 443 | output_type=_FILELISTRESPONSE, 444 | options=None, 445 | ), 446 | _descriptor.MethodDescriptor( 447 | name='FileDelete', 448 | full_name='fileservice.Fileservice.FileDelete', 449 | index=5, 450 | containing_service=None, 451 | input_type=_FILEINFO, 452 | output_type=_ACK, 453 | options=None, 454 | ), 455 | _descriptor.MethodDescriptor( 456 | name='UpdateFile', 457 | full_name='fileservice.Fileservice.UpdateFile', 458 | index=6, 459 | containing_service=None, 460 | input_type=_FILEDATA, 461 | output_type=_ACK, 462 | options=None, 463 | ), 464 | _descriptor.MethodDescriptor( 465 | name='getClusterStats', 466 | full_name='fileservice.Fileservice.getClusterStats', 467 | index=7, 468 | containing_service=None, 469 | input_type=_EMPTY, 470 | output_type=_CLUSTERSTATS, 471 | options=None, 472 | ), 473 | _descriptor.MethodDescriptor( 474 | name='getLeaderInfo', 475 | full_name='fileservice.Fileservice.getLeaderInfo', 476 | index=8, 477 | containing_service=None, 478 | input_type=_CLUSTERINFO, 479 | output_type=_ACK, 480 | options=None, 481 | ), 482 | ]) 483 | _sym_db.RegisterServiceDescriptor(_FILESERVICE) 484 | 485 | DESCRIPTOR.services_by_name['Fileservice'] = _FILESERVICE 486 | 487 | # @@protoc_insertion_point(module_scope) 488 | -------------------------------------------------------------------------------- /service/FileServer.py: -------------------------------------------------------------------------------- 1 | from concurrent import futures 2 | 3 | from threading import Thread 4 | import os 5 | import grpc 6 | import sys 7 | sys.path.append('../generated') 8 | sys.path.append('../utils') 9 | sys.path.append('../proto') 10 | import db 11 | import fileService_pb2_grpc 12 | import fileService_pb2 13 | import heartbeat_pb2_grpc 14 | import heartbeat_pb2 15 | import time 16 | import yaml 17 | import threading 18 | import hashlib 19 | from ShardingHandler import ShardingHandler 20 | from DownloadHelper import DownloadHelper 21 | from DeleteHelper import DeleteHelper 22 | from lru import LRU 23 | 24 | UPLOAD_SHARD_SIZE = 50*1024*1024 25 | 26 | # 27 | # *** FileServer Service : FileServer service as per fileService.proto file. *** 28 | # *** This class implements all the required methods to serve the user requests. *** 29 | # 30 | class FileServer(fileService_pb2_grpc.FileserviceServicer): 31 | def __init__(self, hostname, server_port, activeNodesChecker, shardingHandler, superNodeAddress): 32 | self.serverPort = server_port 33 | self.serverAddress = hostname+":"+server_port 34 | self.activeNodesChecker = activeNodesChecker 35 | self.shardingHandler = shardingHandler 36 | self.hostname = hostname 37 | self.lru = LRU(5) 38 | self.superNodeAddress = superNodeAddress 39 | 40 | # 41 | # This service gets invoked when user uploads a new file. 42 | # 43 | def UploadFile(self, request_iterator, context): 44 | print("Inside Server method ---------- UploadFile") 45 | data=bytes("",'utf-8') 46 | username, filename = "", "" 47 | totalDataSize=0 48 | active_ip_channel_dict = self.activeNodesChecker.getActiveChannels() 49 | 50 | # list to store the info related to file location. 51 | metaData=[] 52 | 53 | # If the node is the leader of the cluster. 54 | if(int(db.get("primaryStatus"))==1): 55 | print("Inside primary upload") 56 | currDataSize = 0 57 | currDataBytes = bytes("",'utf-8') 58 | seqNo=1 59 | 60 | # Step 1: 61 | # Get 2 least loaded nodes based on the CPU stats. 62 | # 'Node' is where the actual data goes and 'node_replica' is where replica will go. 63 | node, node_replica = self.getLeastLoadedNode() 64 | 65 | if(node==-1): 66 | return fileService_pb2.ack(success=False, message="Error Saving File. No active nodes.") 67 | 68 | # Step 2: 69 | # Check whether file already exists, if yes then return with message 'File already exists'. 70 | for request in request_iterator: 71 | username, filename = request.username, request.filename 72 | print("Key is-----------------", username+"_"+filename) 73 | if(self.fileExists(username, filename)==1): 74 | print("sending neg ack") 75 | return fileService_pb2.ack(success=False, message="File already exists for this user. Please rename or delete file first.") 76 | break 77 | 78 | # Step 3: 79 | # Make chunks of size 'UPLOAD_SHARD_SIZE' and start sending the data to the least utilized node trough gRPC streaming. 80 | currDataSize+= sys.getsizeof(request.data) 81 | currDataBytes+=request.data 82 | 83 | for request in request_iterator: 84 | 85 | if((currDataSize + sys.getsizeof(request.data)) > UPLOAD_SHARD_SIZE): 86 | response = self.sendDataToDestination(currDataBytes, node, node_replica, username, filename, seqNo, active_ip_channel_dict[node]) 87 | metaData.append([node, seqNo, node_replica]) 88 | currDataBytes = request.data 89 | currDataSize = sys.getsizeof(request.data) 90 | seqNo+=1 91 | node, node_replica = self.getLeastLoadedNode() 92 | else: 93 | currDataSize+= sys.getsizeof(request.data) 94 | currDataBytes+=request.data 95 | 96 | if(currDataSize>0): 97 | response = self.sendDataToDestination(currDataBytes, node, node_replica, username, filename, seqNo, active_ip_channel_dict[node]) 98 | metaData.append([node, seqNo, node_replica]) 99 | 100 | # Step 4: 101 | # Save the metadata on the primary node after the completion of sharding. 102 | if(response.success): 103 | db.saveMetaData(username, filename, metaData) 104 | db.saveUserFile(username, filename) 105 | 106 | # Step 5: 107 | # Make a gRPC call to replicate the matadata on all the other nodes. 108 | self.saveMetadataOnAllNodes(username, filename, metaData) 109 | 110 | return fileService_pb2.ack(success=True, message="Saved") 111 | 112 | # If the node is not the leader. 113 | else: 114 | print("Saving the data on my local db") 115 | sequenceNumberOfChunk = 0 116 | dataToBeSaved = bytes("",'utf-8') 117 | 118 | # Gather all the data from gRPC stream 119 | for request in request_iterator: 120 | username, filename, sequenceNumberOfChunk = request.username, request.filename, request.seqNo 121 | dataToBeSaved+=request.data 122 | key = username + "_" + filename + "_" + str(sequenceNumberOfChunk) 123 | 124 | # Save the data in local DB. 125 | db.setData(key, dataToBeSaved) 126 | 127 | # After saving the chunk in the local DB, make a gRPC call to save the replica of the chunk on different 128 | # node only if the replicaNode is present. 129 | if(request.replicaNode!=""): 130 | print("Sending replication to ", request.replicaNode) 131 | replica_channel = active_ip_channel_dict[request.replicaNode] 132 | t1 = Thread(target=self.replicateChunkData, args=(replica_channel, dataToBeSaved, username, filename, sequenceNumberOfChunk ,)) 133 | t1.start() 134 | # stub = fileService_pb2_grpc.FileserviceStub(replica_channel) 135 | # response = stub.UploadFile(self.sendDataInStream(dataToBeSaved, username, filename, sequenceNumberOfChunk, "")) 136 | 137 | return fileService_pb2.ack(success=True, message="Saved") 138 | 139 | def replicateChunkData(self, replica_channel, dataToBeSaved, username, filename, sequenceNumberOfChunk): 140 | stub = fileService_pb2_grpc.FileserviceStub(replica_channel) 141 | response = stub.UploadFile(self.sendDataInStream(dataToBeSaved, username, filename, sequenceNumberOfChunk, "")) 142 | 143 | # This helper method is responsible for sending the data to destination node through gRPC stream. 144 | def sendDataToDestination(self, currDataBytes, node, nodeReplica, username, filename, seqNo, channel): 145 | if(node==self.serverAddress): 146 | key = username + "_" + filename + "_" + str(seqNo) 147 | db.setData(key, currDataBytes) 148 | if(nodeReplica!=""): 149 | print("Sending replication to ", nodeReplica) 150 | active_ip_channel_dict = self.activeNodesChecker.getActiveChannels() 151 | replica_channel = active_ip_channel_dict[nodeReplica] 152 | stub = fileService_pb2_grpc.FileserviceStub(replica_channel) 153 | response = stub.UploadFile(self.sendDataInStream(currDataBytes, username, filename, seqNo, "")) 154 | return response 155 | else: 156 | print("Sending the UPLOAD_SHARD_SIZE to node :", node) 157 | stub = fileService_pb2_grpc.FileserviceStub(channel) 158 | response = stub.UploadFile(self.sendDataInStream(currDataBytes, username, filename, seqNo, nodeReplica)) 159 | print("Response from uploadFile: ", response.message) 160 | return response 161 | 162 | # This helper method actually makes chunks of less than 4MB and streams them through gRPC. 163 | # 4 MB is the max data packet size in gRPC while sending. That's why it is necessary. 164 | def sendDataInStream(self, dataBytes, username, filename, seqNo, replicaNode): 165 | chunk_size = 4000000 166 | start, end = 0, chunk_size 167 | while(True): 168 | chunk = dataBytes[start:end] 169 | if(len(chunk)==0): break 170 | start=end 171 | end += chunk_size 172 | yield fileService_pb2.FileData(username=username, filename=filename, data=chunk, seqNo=seqNo, replicaNode=replicaNode) 173 | 174 | # 175 | # This service gets invoked when user requests an uploaded file. 176 | # 177 | def DownloadFile(self, request, context): 178 | 179 | print("Inside Download") 180 | 181 | # If the node is the leader of the cluster. 182 | if(int(db.get("primaryStatus"))==1): 183 | 184 | print("Inside primary download") 185 | 186 | # Check if file exists 187 | if(self.fileExists(request.username, request.filename)==0): 188 | print("File does not exist") 189 | yield fileService_pb2.FileData(username = request.username, filename = request.filename, data=bytes("",'utf-8'), seqNo = 0) 190 | return 191 | 192 | # If the file is present in cache then just fetch it and return. No need to go to individual node. 193 | if(self.lru.has_key(request.username + "_" + request.filename)): 194 | print("Fetching data from Cache") 195 | CHUNK_SIZE=4000000 196 | fileName = request.username + "_" + request.filename 197 | filePath = self.lru[fileName] 198 | outfile = os.path.join(filePath, fileName) 199 | 200 | with open(outfile, 'rb') as infile: 201 | while True: 202 | chunk = infile.read(CHUNK_SIZE) 203 | if not chunk: break 204 | yield fileService_pb2.FileData(username=request.username, filename=request.filename, data=chunk, seqNo=1) 205 | 206 | # If the file is not present in the cache, then fetch it from the individual node. 207 | else: 208 | print("Fetching the metadata") 209 | 210 | # Step 1: get metadata i.e. the location of chunks. 211 | metaData = db.parseMetaData(request.username, request.filename) 212 | 213 | print(metaData) 214 | 215 | #Step 2: make gRPC calls and get the fileData from all the nodes. 216 | downloadHelper = DownloadHelper(self.hostname, self.serverPort, self.activeNodesChecker) 217 | data = downloadHelper.getDataFromNodes(request.username, request.filename, metaData) 218 | print("Sending the data to client") 219 | 220 | #Step 3: send the file to supernode using gRPC streaming. 221 | chunk_size = 4000000 222 | start, end = 0, chunk_size 223 | while(True): 224 | chunk = data[start:end] 225 | if(len(chunk)==0): break 226 | start=end 227 | end += chunk_size 228 | yield fileService_pb2.FileData(username = request.username, filename = request.filename, data=chunk, seqNo = request.seqNo) 229 | 230 | # Step 4: update the cache based on LRU(least recently used) algorithm. 231 | self.saveInCache(request.username, request.filename, data) 232 | 233 | # If the node is not the leader, then just fetch the fileChunk from the local db and stream it back to leader. 234 | else: 235 | key = request.username + "_" + request.filename + "_" + str(request.seqNo) 236 | print(key) 237 | data = db.getFileData(key) 238 | chunk_size = 4000000 239 | start, end = 0, chunk_size 240 | while(True): 241 | chunk = data[start:end] 242 | if(len(chunk)==0): break 243 | start=end 244 | end += chunk_size 245 | yield fileService_pb2.FileData(username = request.username, filename = request.filename, data=chunk, seqNo = request.seqNo) 246 | 247 | # This service is responsible fetching all the files. 248 | def FileList(self, request, context): 249 | print("File List Called") 250 | userFiles = db.getUserFiles(request.username) 251 | return fileService_pb2.FileListResponse(Filenames=str(userFiles)) 252 | 253 | # This helper method checks whether the file is present in db or not. 254 | def fileExists(self, username, filename): 255 | print("isFile Present", db.keyExists(username + "_" + filename)) 256 | return db.keyExists(username + "_" + filename) 257 | 258 | # This helper method returns 2 least loaded nodes from the cluster. 259 | def getLeastLoadedNode(self): 260 | print("Ready to enter sharding handler") 261 | node, node_replica = self.shardingHandler.leastUtilizedNode() 262 | print("Least loaded node is :", node) 263 | print("Replica node - ", node_replica) 264 | return node, node_replica 265 | 266 | # This helper method replicates the metadata on all nodes. 267 | def saveMetadataOnAllNodes(self, username, filename, metadata): 268 | print("saveMetadataOnAllNodes") 269 | active_ip_channel_dict = self.activeNodesChecker.getActiveChannels() 270 | uniqueFileName = username + "_" + filename 271 | for ip, channel in active_ip_channel_dict.items(): 272 | if(self.isChannelAlive(channel)): 273 | stub = fileService_pb2_grpc.FileserviceStub(channel) 274 | response = stub.MetaDataInfo(fileService_pb2.MetaData(filename=uniqueFileName, seqValues=str(metadata).encode('utf-8'))) 275 | print(response.message) 276 | 277 | # This service is responsible for saving the metadata on local db. 278 | def MetaDataInfo(self, request, context): 279 | print("Inside Metadatainfo") 280 | fileName = request.filename 281 | seqValues = request.seqValues 282 | db.saveMetaDataOnOtherNodes(fileName, seqValues) 283 | ack_message = "Successfully saved the metadata on " + self.serverAddress 284 | return fileService_pb2.ack(success=True, message=ack_message) 285 | 286 | # This helper method checks whethere created channel is alive or not 287 | def isChannelAlive(self, channel): 288 | try: 289 | grpc.channel_ready_future(channel).result(timeout=1) 290 | except grpc.FutureTimeoutError: 291 | #print("Connection timeout. Unable to connect to port ") 292 | return False 293 | return True 294 | 295 | # This helper method is responsible for updating the cache for faster lookup. 296 | def saveInCache(self, username, filename, data): 297 | if(len(self.lru.items())>=self.lru.get_size()): 298 | fileToDel, path = self.lru.peek_last_item() 299 | os.remove(path+"/"+fileToDel) 300 | 301 | self.lru[username+"_"+filename]="cache" 302 | filePath=os.path.join('cache', username+"_"+filename) 303 | saveFile = open(filePath, 'wb') 304 | saveFile.write(data) 305 | saveFile.close() 306 | 307 | # This service is responsible for sending the whole cluster stats to superNode 308 | def getClusterStats(self, request, context): 309 | print("Inside getClusterStats") 310 | active_ip_channel_dict = self.activeNodesChecker.getActiveChannels() 311 | total_cpu_usage, total_disk_space, total_used_mem = 0.0,0.0,0.0 312 | total_nodes = 0 313 | for ip, channel in active_ip_channel_dict.items(): 314 | if(self.isChannelAlive(channel)): 315 | stub = heartbeat_pb2_grpc.HearBeatStub(channel) 316 | stats = stub.isAlive(heartbeat_pb2.NodeInfo(ip="", port="")) 317 | total_cpu_usage = float(stats.cpu_usage) 318 | total_disk_space = float(stats.disk_space) 319 | total_used_mem = float(stats.used_mem) 320 | total_nodes+=1 321 | 322 | if(total_nodes==0): 323 | return fileService_pb2.ClusterStats(cpu_usage = str(100.00), disk_space = str(100.00), used_mem = str(100.00)) 324 | 325 | return fileService_pb2.ClusterStats(cpu_usage = str(total_cpu_usage/total_nodes), disk_space = str(total_disk_space/total_nodes), used_mem = str(total_used_mem/total_nodes)) 326 | 327 | # This service is responsible for sending the leader info to superNode as soon as leader changes. 328 | def getLeaderInfo(self, request, context): 329 | channel = grpc.insecure_channel('{}'.format(self.superNodeAddress)) 330 | stub = fileService_pb2_grpc.FileserviceStub(channel) 331 | response = stub.getLeaderInfo(fileService_pb2.ClusterInfo(ip = self.hostname, port= self.serverPort, clusterName="team1")) 332 | print(response.message) 333 | 334 | # 335 | # This service gets invoked when user deletes a file. 336 | # 337 | def FileDelete(self, request, data): 338 | username = request.username 339 | filename = request.filename 340 | 341 | if(int(db.get("primaryStatus"))==1): 342 | 343 | if(self.fileExists(username, filename)==0): 344 | print("File does not exist") 345 | return fileService_pb2.ack(success=False, message="File does not exist") 346 | 347 | print("Fetching metadata from leader") 348 | metadata = db.parseMetaData(request.username, request.filename) 349 | print("Successfully retrieved metadata from leader") 350 | 351 | deleteHelper = DeleteHelper(self.hostname, self.serverPort, self.activeNodesChecker) 352 | deleteHelper.deleteFileChunksAndMetaFromNodes(username, filename, metadata) 353 | 354 | return fileService_pb2.ack(success=True, message="Successfully deleted file from the cluster") 355 | 356 | else: 357 | seqNo = -1 358 | 359 | try: 360 | seqNo = request.seqNo 361 | except: 362 | return fileService_pb2.ack(success=False, message="Internal Error") 363 | 364 | metaDataKey = username+"_"+filename 365 | dataChunkKey = username+"_"+filename+"_"+str(seqNo) 366 | 367 | if(db.keyExists(metaDataKey)==1): 368 | print("FileDelete: Deleting the metadataEntry from local db :") 369 | db.deleteEntry(metaDataKey) 370 | if(db.keyExists(dataChunkKey)): 371 | print("FileDelete: Deleting the data chunk from local db: ") 372 | db.deleteEntry(dataChunkKey) 373 | 374 | return fileService_pb2.ack(success=True, message="Successfully deleted file from the cluster") 375 | 376 | # 377 | # This service gets invoked when user wants to check if the file is present. 378 | # 379 | def FileSearch(self, request, data): 380 | username, filename = request.username, request.filename 381 | 382 | if(self.fileExists(username, filename)==1): 383 | return fileService_pb2.ack(success=True, message="File exists in the cluster.") 384 | else: 385 | return fileService_pb2.ack(success=False, message="File does not exist in the cluster.") 386 | 387 | # 388 | # This service gets invoked when user wants to update a file. 389 | # 390 | def UpdateFile(self, request_iterator, context): 391 | 392 | username, filename = "", "" 393 | fileData = bytes("",'utf-8') 394 | 395 | for request in request_iterator: 396 | fileData+=request.data 397 | username, filename = request.username, request.filename 398 | 399 | def getFileChunks(fileData): 400 | # Maximum chunk size that can be sent 401 | CHUNK_SIZE=4000000 402 | 403 | outfile = os.path.join('files', fileName) 404 | 405 | sTime=time.time() 406 | 407 | while True: 408 | chunk = fileData.read(CHUNK_SIZE) 409 | if not chunk: break 410 | 411 | yield fileService_pb2.FileData(username=username, filename=fileName, data=chunk, seqNo=1) 412 | print("Time for upload= ", time.time()-sTime) 413 | 414 | if(int(db.get("primaryStatus"))==1): 415 | channel = grpc.insecure_channel('{}'.format(self.serverAddress)) 416 | stub = fileService_pb2_grpc.FileserviceStub(channel) 417 | 418 | response1 = stub.FileDelete(fileService_pb2.FileInfo(username=userName, filename=fileName)) 419 | 420 | if(response1.success): 421 | response2 = stub.UploadFile(getFileChunks(fileData)) 422 | if(response2.success): 423 | return fileService_pb2.ack(success=True, message="File suceessfully updated.") 424 | else: 425 | return fileService_pb2.ack(success=False, message="Internal error.") 426 | else: 427 | return fileService_pb2.ack(success=False, message="Internal error.") 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | 439 | 440 | 441 | 442 | 443 | 444 | 445 | 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | -------------------------------------------------------------------------------- /proto/fileService_pb2.py: -------------------------------------------------------------------------------- 1 | # Generated by the protocol buffer compiler. DO NOT EDIT! 2 | # source: proto/fileService.proto 3 | 4 | import sys 5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) 6 | from google.protobuf import descriptor as _descriptor 7 | from google.protobuf import message as _message 8 | from google.protobuf import reflection as _reflection 9 | from google.protobuf import symbol_database as _symbol_database 10 | # @@protoc_insertion_point(imports) 11 | 12 | _sym_db = _symbol_database.Default() 13 | 14 | 15 | 16 | 17 | DESCRIPTOR = _descriptor.FileDescriptor( 18 | name='proto/fileService.proto', 19 | package='fileservice', 20 | syntax='proto3', 21 | serialized_options=None, 22 | serialized_pb=_b('\n\x17proto/fileService.proto\x12\x0b\x66ileservice\"`\n\x08\x46ileData\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x10\n\x08\x66ilename\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\x12\r\n\x05seqNo\x18\x04 \x01(\x05\x12\x13\n\x0breplicaNode\x18\x05 \x01(\t\"/\n\x08MetaData\x12\x10\n\x08\x66ilename\x18\x01 \x01(\t\x12\x11\n\tseqValues\x18\x02 \x01(\x0c\"\'\n\x03\x61\x63k\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"\x1c\n\x08UserInfo\x12\x10\n\x08username\x18\x01 \x01(\t\"=\n\x08\x46ileInfo\x12\x10\n\x08username\x18\x01 \x01(\t\x12\x10\n\x08\x66ilename\x18\x02 \x01(\t\x12\r\n\x05seqNo\x18\x03 \x01(\x05\"%\n\x10\x46ileListResponse\x12\x11\n\tFilenames\x18\x01 \x01(\t\"<\n\x0b\x43lusterInfo\x12\n\n\x02ip\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\t\x12\x13\n\x0b\x63lusterName\x18\x03 \x01(\t\"\x07\n\x05\x45mpty\"G\n\x0c\x43lusterStats\x12\x11\n\tcpu_usage\x18\x01 \x01(\t\x12\x12\n\ndisk_space\x18\x02 \x01(\t\x12\x10\n\x08used_mem\x18\x03 \x01(\t2\xe3\x04\n\x0b\x46ileservice\x12\x37\n\nUploadFile\x12\x15.fileservice.FileData\x1a\x10.fileservice.ack(\x01\x12>\n\x0c\x44ownloadFile\x12\x15.fileservice.FileInfo\x1a\x15.fileservice.FileData0\x01\x12\x35\n\nFileSearch\x12\x15.fileservice.FileInfo\x1a\x10.fileservice.ack\x12:\n\rReplicateFile\x12\x15.fileservice.FileData\x1a\x10.fileservice.ack(\x01\x12@\n\x08\x46ileList\x12\x15.fileservice.UserInfo\x1a\x1d.fileservice.FileListResponse\x12\x35\n\nFileDelete\x12\x15.fileservice.FileInfo\x1a\x10.fileservice.ack\x12\x37\n\nUpdateFile\x12\x15.fileservice.FileData\x1a\x10.fileservice.ack(\x01\x12@\n\x0fgetClusterStats\x12\x12.fileservice.Empty\x1a\x19.fileservice.ClusterStats\x12;\n\rgetLeaderInfo\x12\x18.fileservice.ClusterInfo\x1a\x10.fileservice.ack\x12\x37\n\x0cMetaDataInfo\x12\x15.fileservice.MetaData\x1a\x10.fileservice.ackb\x06proto3') 23 | ) 24 | 25 | 26 | 27 | 28 | _FILEDATA = _descriptor.Descriptor( 29 | name='FileData', 30 | full_name='fileservice.FileData', 31 | filename=None, 32 | file=DESCRIPTOR, 33 | containing_type=None, 34 | fields=[ 35 | _descriptor.FieldDescriptor( 36 | name='username', full_name='fileservice.FileData.username', index=0, 37 | number=1, type=9, cpp_type=9, label=1, 38 | has_default_value=False, default_value=_b("").decode('utf-8'), 39 | message_type=None, enum_type=None, containing_type=None, 40 | is_extension=False, extension_scope=None, 41 | serialized_options=None, file=DESCRIPTOR), 42 | _descriptor.FieldDescriptor( 43 | name='filename', full_name='fileservice.FileData.filename', index=1, 44 | number=2, type=9, cpp_type=9, label=1, 45 | has_default_value=False, default_value=_b("").decode('utf-8'), 46 | message_type=None, enum_type=None, containing_type=None, 47 | is_extension=False, extension_scope=None, 48 | serialized_options=None, file=DESCRIPTOR), 49 | _descriptor.FieldDescriptor( 50 | name='data', full_name='fileservice.FileData.data', index=2, 51 | number=3, type=12, cpp_type=9, label=1, 52 | has_default_value=False, default_value=_b(""), 53 | message_type=None, enum_type=None, containing_type=None, 54 | is_extension=False, extension_scope=None, 55 | serialized_options=None, file=DESCRIPTOR), 56 | _descriptor.FieldDescriptor( 57 | name='seqNo', full_name='fileservice.FileData.seqNo', index=3, 58 | number=4, type=5, cpp_type=1, label=1, 59 | has_default_value=False, default_value=0, 60 | message_type=None, enum_type=None, containing_type=None, 61 | is_extension=False, extension_scope=None, 62 | serialized_options=None, file=DESCRIPTOR), 63 | _descriptor.FieldDescriptor( 64 | name='replicaNode', full_name='fileservice.FileData.replicaNode', index=4, 65 | number=5, type=9, cpp_type=9, label=1, 66 | has_default_value=False, default_value=_b("").decode('utf-8'), 67 | message_type=None, enum_type=None, containing_type=None, 68 | is_extension=False, extension_scope=None, 69 | serialized_options=None, file=DESCRIPTOR), 70 | ], 71 | extensions=[ 72 | ], 73 | nested_types=[], 74 | enum_types=[ 75 | ], 76 | serialized_options=None, 77 | is_extendable=False, 78 | syntax='proto3', 79 | extension_ranges=[], 80 | oneofs=[ 81 | ], 82 | serialized_start=40, 83 | serialized_end=136, 84 | ) 85 | 86 | 87 | _METADATA = _descriptor.Descriptor( 88 | name='MetaData', 89 | full_name='fileservice.MetaData', 90 | filename=None, 91 | file=DESCRIPTOR, 92 | containing_type=None, 93 | fields=[ 94 | _descriptor.FieldDescriptor( 95 | name='filename', full_name='fileservice.MetaData.filename', index=0, 96 | number=1, type=9, cpp_type=9, label=1, 97 | has_default_value=False, default_value=_b("").decode('utf-8'), 98 | message_type=None, enum_type=None, containing_type=None, 99 | is_extension=False, extension_scope=None, 100 | serialized_options=None, file=DESCRIPTOR), 101 | _descriptor.FieldDescriptor( 102 | name='seqValues', full_name='fileservice.MetaData.seqValues', index=1, 103 | number=2, type=12, cpp_type=9, label=1, 104 | has_default_value=False, default_value=_b(""), 105 | message_type=None, enum_type=None, containing_type=None, 106 | is_extension=False, extension_scope=None, 107 | serialized_options=None, file=DESCRIPTOR), 108 | ], 109 | extensions=[ 110 | ], 111 | nested_types=[], 112 | enum_types=[ 113 | ], 114 | serialized_options=None, 115 | is_extendable=False, 116 | syntax='proto3', 117 | extension_ranges=[], 118 | oneofs=[ 119 | ], 120 | serialized_start=138, 121 | serialized_end=185, 122 | ) 123 | 124 | 125 | _ACK = _descriptor.Descriptor( 126 | name='ack', 127 | full_name='fileservice.ack', 128 | filename=None, 129 | file=DESCRIPTOR, 130 | containing_type=None, 131 | fields=[ 132 | _descriptor.FieldDescriptor( 133 | name='success', full_name='fileservice.ack.success', index=0, 134 | number=1, type=8, cpp_type=7, label=1, 135 | has_default_value=False, default_value=False, 136 | message_type=None, enum_type=None, containing_type=None, 137 | is_extension=False, extension_scope=None, 138 | serialized_options=None, file=DESCRIPTOR), 139 | _descriptor.FieldDescriptor( 140 | name='message', full_name='fileservice.ack.message', index=1, 141 | number=2, type=9, cpp_type=9, label=1, 142 | has_default_value=False, default_value=_b("").decode('utf-8'), 143 | message_type=None, enum_type=None, containing_type=None, 144 | is_extension=False, extension_scope=None, 145 | serialized_options=None, file=DESCRIPTOR), 146 | ], 147 | extensions=[ 148 | ], 149 | nested_types=[], 150 | enum_types=[ 151 | ], 152 | serialized_options=None, 153 | is_extendable=False, 154 | syntax='proto3', 155 | extension_ranges=[], 156 | oneofs=[ 157 | ], 158 | serialized_start=187, 159 | serialized_end=226, 160 | ) 161 | 162 | 163 | _USERINFO = _descriptor.Descriptor( 164 | name='UserInfo', 165 | full_name='fileservice.UserInfo', 166 | filename=None, 167 | file=DESCRIPTOR, 168 | containing_type=None, 169 | fields=[ 170 | _descriptor.FieldDescriptor( 171 | name='username', full_name='fileservice.UserInfo.username', index=0, 172 | number=1, type=9, cpp_type=9, label=1, 173 | has_default_value=False, default_value=_b("").decode('utf-8'), 174 | message_type=None, enum_type=None, containing_type=None, 175 | is_extension=False, extension_scope=None, 176 | serialized_options=None, file=DESCRIPTOR), 177 | ], 178 | extensions=[ 179 | ], 180 | nested_types=[], 181 | enum_types=[ 182 | ], 183 | serialized_options=None, 184 | is_extendable=False, 185 | syntax='proto3', 186 | extension_ranges=[], 187 | oneofs=[ 188 | ], 189 | serialized_start=228, 190 | serialized_end=256, 191 | ) 192 | 193 | 194 | _FILEINFO = _descriptor.Descriptor( 195 | name='FileInfo', 196 | full_name='fileservice.FileInfo', 197 | filename=None, 198 | file=DESCRIPTOR, 199 | containing_type=None, 200 | fields=[ 201 | _descriptor.FieldDescriptor( 202 | name='username', full_name='fileservice.FileInfo.username', index=0, 203 | number=1, type=9, cpp_type=9, label=1, 204 | has_default_value=False, default_value=_b("").decode('utf-8'), 205 | message_type=None, enum_type=None, containing_type=None, 206 | is_extension=False, extension_scope=None, 207 | serialized_options=None, file=DESCRIPTOR), 208 | _descriptor.FieldDescriptor( 209 | name='filename', full_name='fileservice.FileInfo.filename', index=1, 210 | number=2, type=9, cpp_type=9, label=1, 211 | has_default_value=False, default_value=_b("").decode('utf-8'), 212 | message_type=None, enum_type=None, containing_type=None, 213 | is_extension=False, extension_scope=None, 214 | serialized_options=None, file=DESCRIPTOR), 215 | _descriptor.FieldDescriptor( 216 | name='seqNo', full_name='fileservice.FileInfo.seqNo', index=2, 217 | number=3, type=5, cpp_type=1, label=1, 218 | has_default_value=False, default_value=0, 219 | message_type=None, enum_type=None, containing_type=None, 220 | is_extension=False, extension_scope=None, 221 | serialized_options=None, file=DESCRIPTOR), 222 | ], 223 | extensions=[ 224 | ], 225 | nested_types=[], 226 | enum_types=[ 227 | ], 228 | serialized_options=None, 229 | is_extendable=False, 230 | syntax='proto3', 231 | extension_ranges=[], 232 | oneofs=[ 233 | ], 234 | serialized_start=258, 235 | serialized_end=319, 236 | ) 237 | 238 | 239 | _FILELISTRESPONSE = _descriptor.Descriptor( 240 | name='FileListResponse', 241 | full_name='fileservice.FileListResponse', 242 | filename=None, 243 | file=DESCRIPTOR, 244 | containing_type=None, 245 | fields=[ 246 | _descriptor.FieldDescriptor( 247 | name='Filenames', full_name='fileservice.FileListResponse.Filenames', index=0, 248 | number=1, type=9, cpp_type=9, label=1, 249 | has_default_value=False, default_value=_b("").decode('utf-8'), 250 | message_type=None, enum_type=None, containing_type=None, 251 | is_extension=False, extension_scope=None, 252 | serialized_options=None, file=DESCRIPTOR), 253 | ], 254 | extensions=[ 255 | ], 256 | nested_types=[], 257 | enum_types=[ 258 | ], 259 | serialized_options=None, 260 | is_extendable=False, 261 | syntax='proto3', 262 | extension_ranges=[], 263 | oneofs=[ 264 | ], 265 | serialized_start=321, 266 | serialized_end=358, 267 | ) 268 | 269 | 270 | _CLUSTERINFO = _descriptor.Descriptor( 271 | name='ClusterInfo', 272 | full_name='fileservice.ClusterInfo', 273 | filename=None, 274 | file=DESCRIPTOR, 275 | containing_type=None, 276 | fields=[ 277 | _descriptor.FieldDescriptor( 278 | name='ip', full_name='fileservice.ClusterInfo.ip', index=0, 279 | number=1, type=9, cpp_type=9, label=1, 280 | has_default_value=False, default_value=_b("").decode('utf-8'), 281 | message_type=None, enum_type=None, containing_type=None, 282 | is_extension=False, extension_scope=None, 283 | serialized_options=None, file=DESCRIPTOR), 284 | _descriptor.FieldDescriptor( 285 | name='port', full_name='fileservice.ClusterInfo.port', index=1, 286 | number=2, type=9, cpp_type=9, label=1, 287 | has_default_value=False, default_value=_b("").decode('utf-8'), 288 | message_type=None, enum_type=None, containing_type=None, 289 | is_extension=False, extension_scope=None, 290 | serialized_options=None, file=DESCRIPTOR), 291 | _descriptor.FieldDescriptor( 292 | name='clusterName', full_name='fileservice.ClusterInfo.clusterName', index=2, 293 | number=3, type=9, cpp_type=9, label=1, 294 | has_default_value=False, default_value=_b("").decode('utf-8'), 295 | message_type=None, enum_type=None, containing_type=None, 296 | is_extension=False, extension_scope=None, 297 | serialized_options=None, file=DESCRIPTOR), 298 | ], 299 | extensions=[ 300 | ], 301 | nested_types=[], 302 | enum_types=[ 303 | ], 304 | serialized_options=None, 305 | is_extendable=False, 306 | syntax='proto3', 307 | extension_ranges=[], 308 | oneofs=[ 309 | ], 310 | serialized_start=360, 311 | serialized_end=420, 312 | ) 313 | 314 | 315 | _EMPTY = _descriptor.Descriptor( 316 | name='Empty', 317 | full_name='fileservice.Empty', 318 | filename=None, 319 | file=DESCRIPTOR, 320 | containing_type=None, 321 | fields=[ 322 | ], 323 | extensions=[ 324 | ], 325 | nested_types=[], 326 | enum_types=[ 327 | ], 328 | serialized_options=None, 329 | is_extendable=False, 330 | syntax='proto3', 331 | extension_ranges=[], 332 | oneofs=[ 333 | ], 334 | serialized_start=422, 335 | serialized_end=429, 336 | ) 337 | 338 | 339 | _CLUSTERSTATS = _descriptor.Descriptor( 340 | name='ClusterStats', 341 | full_name='fileservice.ClusterStats', 342 | filename=None, 343 | file=DESCRIPTOR, 344 | containing_type=None, 345 | fields=[ 346 | _descriptor.FieldDescriptor( 347 | name='cpu_usage', full_name='fileservice.ClusterStats.cpu_usage', index=0, 348 | number=1, type=9, cpp_type=9, label=1, 349 | has_default_value=False, default_value=_b("").decode('utf-8'), 350 | message_type=None, enum_type=None, containing_type=None, 351 | is_extension=False, extension_scope=None, 352 | serialized_options=None, file=DESCRIPTOR), 353 | _descriptor.FieldDescriptor( 354 | name='disk_space', full_name='fileservice.ClusterStats.disk_space', index=1, 355 | number=2, type=9, cpp_type=9, label=1, 356 | has_default_value=False, default_value=_b("").decode('utf-8'), 357 | message_type=None, enum_type=None, containing_type=None, 358 | is_extension=False, extension_scope=None, 359 | serialized_options=None, file=DESCRIPTOR), 360 | _descriptor.FieldDescriptor( 361 | name='used_mem', full_name='fileservice.ClusterStats.used_mem', index=2, 362 | number=3, type=9, cpp_type=9, label=1, 363 | has_default_value=False, default_value=_b("").decode('utf-8'), 364 | message_type=None, enum_type=None, containing_type=None, 365 | is_extension=False, extension_scope=None, 366 | serialized_options=None, file=DESCRIPTOR), 367 | ], 368 | extensions=[ 369 | ], 370 | nested_types=[], 371 | enum_types=[ 372 | ], 373 | serialized_options=None, 374 | is_extendable=False, 375 | syntax='proto3', 376 | extension_ranges=[], 377 | oneofs=[ 378 | ], 379 | serialized_start=431, 380 | serialized_end=502, 381 | ) 382 | 383 | DESCRIPTOR.message_types_by_name['FileData'] = _FILEDATA 384 | DESCRIPTOR.message_types_by_name['MetaData'] = _METADATA 385 | DESCRIPTOR.message_types_by_name['ack'] = _ACK 386 | DESCRIPTOR.message_types_by_name['UserInfo'] = _USERINFO 387 | DESCRIPTOR.message_types_by_name['FileInfo'] = _FILEINFO 388 | DESCRIPTOR.message_types_by_name['FileListResponse'] = _FILELISTRESPONSE 389 | DESCRIPTOR.message_types_by_name['ClusterInfo'] = _CLUSTERINFO 390 | DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY 391 | DESCRIPTOR.message_types_by_name['ClusterStats'] = _CLUSTERSTATS 392 | _sym_db.RegisterFileDescriptor(DESCRIPTOR) 393 | 394 | FileData = _reflection.GeneratedProtocolMessageType('FileData', (_message.Message,), dict( 395 | DESCRIPTOR = _FILEDATA, 396 | __module__ = 'proto.fileService_pb2' 397 | # @@protoc_insertion_point(class_scope:fileservice.FileData) 398 | )) 399 | _sym_db.RegisterMessage(FileData) 400 | 401 | MetaData = _reflection.GeneratedProtocolMessageType('MetaData', (_message.Message,), dict( 402 | DESCRIPTOR = _METADATA, 403 | __module__ = 'proto.fileService_pb2' 404 | # @@protoc_insertion_point(class_scope:fileservice.MetaData) 405 | )) 406 | _sym_db.RegisterMessage(MetaData) 407 | 408 | ack = _reflection.GeneratedProtocolMessageType('ack', (_message.Message,), dict( 409 | DESCRIPTOR = _ACK, 410 | __module__ = 'proto.fileService_pb2' 411 | # @@protoc_insertion_point(class_scope:fileservice.ack) 412 | )) 413 | _sym_db.RegisterMessage(ack) 414 | 415 | UserInfo = _reflection.GeneratedProtocolMessageType('UserInfo', (_message.Message,), dict( 416 | DESCRIPTOR = _USERINFO, 417 | __module__ = 'proto.fileService_pb2' 418 | # @@protoc_insertion_point(class_scope:fileservice.UserInfo) 419 | )) 420 | _sym_db.RegisterMessage(UserInfo) 421 | 422 | FileInfo = _reflection.GeneratedProtocolMessageType('FileInfo', (_message.Message,), dict( 423 | DESCRIPTOR = _FILEINFO, 424 | __module__ = 'proto.fileService_pb2' 425 | # @@protoc_insertion_point(class_scope:fileservice.FileInfo) 426 | )) 427 | _sym_db.RegisterMessage(FileInfo) 428 | 429 | FileListResponse = _reflection.GeneratedProtocolMessageType('FileListResponse', (_message.Message,), dict( 430 | DESCRIPTOR = _FILELISTRESPONSE, 431 | __module__ = 'proto.fileService_pb2' 432 | # @@protoc_insertion_point(class_scope:fileservice.FileListResponse) 433 | )) 434 | _sym_db.RegisterMessage(FileListResponse) 435 | 436 | ClusterInfo = _reflection.GeneratedProtocolMessageType('ClusterInfo', (_message.Message,), dict( 437 | DESCRIPTOR = _CLUSTERINFO, 438 | __module__ = 'proto.fileService_pb2' 439 | # @@protoc_insertion_point(class_scope:fileservice.ClusterInfo) 440 | )) 441 | _sym_db.RegisterMessage(ClusterInfo) 442 | 443 | Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict( 444 | DESCRIPTOR = _EMPTY, 445 | __module__ = 'proto.fileService_pb2' 446 | # @@protoc_insertion_point(class_scope:fileservice.Empty) 447 | )) 448 | _sym_db.RegisterMessage(Empty) 449 | 450 | ClusterStats = _reflection.GeneratedProtocolMessageType('ClusterStats', (_message.Message,), dict( 451 | DESCRIPTOR = _CLUSTERSTATS, 452 | __module__ = 'proto.fileService_pb2' 453 | # @@protoc_insertion_point(class_scope:fileservice.ClusterStats) 454 | )) 455 | _sym_db.RegisterMessage(ClusterStats) 456 | 457 | 458 | 459 | _FILESERVICE = _descriptor.ServiceDescriptor( 460 | name='Fileservice', 461 | full_name='fileservice.Fileservice', 462 | file=DESCRIPTOR, 463 | index=0, 464 | serialized_options=None, 465 | serialized_start=505, 466 | serialized_end=1116, 467 | methods=[ 468 | _descriptor.MethodDescriptor( 469 | name='UploadFile', 470 | full_name='fileservice.Fileservice.UploadFile', 471 | index=0, 472 | containing_service=None, 473 | input_type=_FILEDATA, 474 | output_type=_ACK, 475 | serialized_options=None, 476 | ), 477 | _descriptor.MethodDescriptor( 478 | name='DownloadFile', 479 | full_name='fileservice.Fileservice.DownloadFile', 480 | index=1, 481 | containing_service=None, 482 | input_type=_FILEINFO, 483 | output_type=_FILEDATA, 484 | serialized_options=None, 485 | ), 486 | _descriptor.MethodDescriptor( 487 | name='FileSearch', 488 | full_name='fileservice.Fileservice.FileSearch', 489 | index=2, 490 | containing_service=None, 491 | input_type=_FILEINFO, 492 | output_type=_ACK, 493 | serialized_options=None, 494 | ), 495 | _descriptor.MethodDescriptor( 496 | name='ReplicateFile', 497 | full_name='fileservice.Fileservice.ReplicateFile', 498 | index=3, 499 | containing_service=None, 500 | input_type=_FILEDATA, 501 | output_type=_ACK, 502 | serialized_options=None, 503 | ), 504 | _descriptor.MethodDescriptor( 505 | name='FileList', 506 | full_name='fileservice.Fileservice.FileList', 507 | index=4, 508 | containing_service=None, 509 | input_type=_USERINFO, 510 | output_type=_FILELISTRESPONSE, 511 | serialized_options=None, 512 | ), 513 | _descriptor.MethodDescriptor( 514 | name='FileDelete', 515 | full_name='fileservice.Fileservice.FileDelete', 516 | index=5, 517 | containing_service=None, 518 | input_type=_FILEINFO, 519 | output_type=_ACK, 520 | serialized_options=None, 521 | ), 522 | _descriptor.MethodDescriptor( 523 | name='UpdateFile', 524 | full_name='fileservice.Fileservice.UpdateFile', 525 | index=6, 526 | containing_service=None, 527 | input_type=_FILEDATA, 528 | output_type=_ACK, 529 | serialized_options=None, 530 | ), 531 | _descriptor.MethodDescriptor( 532 | name='getClusterStats', 533 | full_name='fileservice.Fileservice.getClusterStats', 534 | index=7, 535 | containing_service=None, 536 | input_type=_EMPTY, 537 | output_type=_CLUSTERSTATS, 538 | serialized_options=None, 539 | ), 540 | _descriptor.MethodDescriptor( 541 | name='getLeaderInfo', 542 | full_name='fileservice.Fileservice.getLeaderInfo', 543 | index=8, 544 | containing_service=None, 545 | input_type=_CLUSTERINFO, 546 | output_type=_ACK, 547 | serialized_options=None, 548 | ), 549 | _descriptor.MethodDescriptor( 550 | name='MetaDataInfo', 551 | full_name='fileservice.Fileservice.MetaDataInfo', 552 | index=9, 553 | containing_service=None, 554 | input_type=_METADATA, 555 | output_type=_ACK, 556 | serialized_options=None, 557 | ), 558 | ]) 559 | _sym_db.RegisterServiceDescriptor(_FILESERVICE) 560 | 561 | DESCRIPTOR.services_by_name['Fileservice'] = _FILESERVICE 562 | 563 | # @@protoc_insertion_point(module_scope) 564 | --------------------------------------------------------------------------------