├── .gitignore ├── Client ├── ClientUtil │ ├── comm.go │ ├── types.go │ └── work.go └── Launcher │ └── main.go ├── Distributed-Database ├── client │ └── client.go ├── common │ └── common.go ├── slave │ └── slave.go └── tracker │ └── tracker.go ├── Distributed-File-System ├── DataNode │ ├── DNLauncher │ │ └── main.go │ ├── Launcher │ │ └── main.go │ └── Utils │ │ ├── comm.go │ │ ├── heartbeats.go │ │ ├── types.go │ │ └── work.go ├── README.md ├── TrackerNode │ ├── Launcher │ │ └── main.go │ ├── TRLauncher │ │ └── main.go │ └── Utils │ │ ├── comm.go │ │ ├── dbscripts.go │ │ ├── dbtypes.go │ │ ├── dbwork.go │ │ ├── heartbeats.go │ │ ├── types.go │ │ └── work.go └── Utils │ ├── Comm │ └── comm.go │ ├── Constants │ └── constants.go │ ├── Database │ ├── setup.go │ └── types.go │ ├── File │ └── fileutils.go │ ├── Log │ └── logger.go │ └── Request │ ├── completion.go │ ├── download.go │ ├── replication.go │ ├── types.go │ └── upload.go └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | *.env 14 | .vscode 15 | *.mkv 16 | Distributed-File-System/DataNode/Launcher/Client* 17 | *.mp4 -------------------------------------------------------------------------------- /Client/ClientUtil/comm.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | comm "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Comm" 5 | fileutils "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/File" 6 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 7 | request "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Request" 8 | "fmt" 9 | "strconv" 10 | "time" 11 | 12 | "github.com/pebbe/zmq4" 13 | ) 14 | 15 | // EstablishConnection A function to establish communication with all Tracker ports 16 | func (clientObj *client) EstablishConnection() { 17 | socket, ok := comm.Init(zmq4.REQ, "") 18 | logger.LogFail(ok, LogSign, clientObj.id, "EstablishConnection(): Failed to acquire request Socket") 19 | 20 | clientObj.socket = socket 21 | 22 | var connectionString []string 23 | 24 | for _, port := range clientObj.trackerPorts { 25 | connectionString = append(connectionString, comm.GetConnectionString(clientObj.trackerIP, port)) 26 | } 27 | 28 | comm.Connect(socket, connectionString) 29 | logger.LogMsg(LogSign, clientObj.id, "Successfully connected to tracker ports") 30 | } 31 | 32 | // SendRequest A function to send a request to Tracker [Timeout after 30 secs] 33 | func (clientObj *client) SendRequest(serializedRequest string) bool { 34 | logger.LogMsg(LogSign, clientObj.id, "Sending Request to tracker") 35 | var sendStatus = false 36 | 37 | sendChan := make(chan bool, 1) 38 | go func() { 39 | sendStatus = comm.SendString(clientObj.socket, serializedRequest) 40 | sendChan <- sendStatus 41 | }() 42 | select { 43 | case <-sendChan: 44 | case <-time.After(30 * time.Second): 45 | logger.LogMsg(LogSign, clientObj.id, "Sending request to tracker timedout after 30 secs") 46 | return false 47 | } 48 | 49 | logger.LogFail(sendStatus, LogSign, clientObj.id, "SendRequest(): Failed to send request to tracker") 50 | 51 | return sendStatus 52 | } 53 | 54 | // ReceiveResponse A function to receive the response sent by the Tracker [Timeout after 30 secs] 55 | func (clientObj *client) ReceiveResponse() (string, bool) { 56 | socket, ok := comm.Init(zmq4.REP, "") 57 | defer socket.Close() 58 | logger.LogFail(ok, LogSign, clientObj.id, "ReceiveResponse(): Failed to acquire response Socket") 59 | 60 | var connectionString = []string{comm.GetConnectionString(clientObj.ip, clientObj.port)} 61 | comm.Bind(socket, connectionString) 62 | 63 | var response string 64 | var status = false 65 | 66 | recvChan := make(chan bool, 1) 67 | go func() { 68 | response, status = comm.RecvString(socket) 69 | recvChan <- status 70 | }() 71 | select { 72 | case <-recvChan: 73 | case <-time.After(30 * time.Second): 74 | logger.LogMsg(LogSign, clientObj.id, "Receving response from tracker timedout after 30 secs") 75 | return "", false 76 | } 77 | 78 | logger.LogFail(status, LogSign, clientObj.id, "ReceiveResponse(): Failed to receive tracker response") 79 | 80 | return response, status 81 | } 82 | 83 | // RSendRequestToDN A function to resend a request to DataNode after receiving its data from Tracker [Timeout after 30 secs] 84 | func (clientObj *client) RSendRequestToDN(dnIP string, dnReqPort string, serializedRequest string) bool { 85 | socket, ok := comm.Init(zmq4.REQ, "") 86 | defer socket.Close() 87 | logger.LogFail(ok, LogSign, clientObj.id, "RSendRequestToDN(): Failed to acquire request Socket") 88 | 89 | var connectionString = []string{comm.GetConnectionString(dnIP, dnReqPort)} 90 | comm.Connect(socket, connectionString) 91 | 92 | logger.LogMsg(LogSign, clientObj.id, "Sending Request to DataNode") 93 | 94 | var sendStatus = false 95 | 96 | sendChan := make(chan bool, 1) 97 | go func() { 98 | sendStatus = comm.SendString(socket, serializedRequest) 99 | sendChan <- sendStatus 100 | }() 101 | select { 102 | case <-sendChan: 103 | case <-time.After(30 * time.Second): 104 | logger.LogMsg(LogSign, clientObj.id, "Sending request to DataNode timedout after 30 secs") 105 | return false 106 | } 107 | 108 | logger.LogFail(sendStatus, LogSign, clientObj.id, "RSendRequestToDN(): Failed to send request to DataNode") 109 | 110 | return sendStatus 111 | } 112 | 113 | // sendChunkCount A function to send the chunk count of a file [Timeout after 30 secs] 114 | func (clientObj *client) sendChunkCount(socket *zmq4.Socket, chunksCount int) bool { 115 | logger.LogMsg(LogSign, clientObj.id, "Sending chunk count to DataNode") 116 | 117 | var status = false 118 | 119 | sendChan := make(chan bool, 1) 120 | go func() { 121 | status = comm.SendString(socket, strconv.Itoa(chunksCount)) 122 | sendChan <- status 123 | }() 124 | select { 125 | case <-sendChan: 126 | case <-time.After(30 * time.Second): 127 | logger.LogMsg(LogSign, clientObj.id, "Sending chunk count timedout after 30 secs") 128 | return false 129 | } 130 | 131 | logger.LogFail(status, LogSign, clientObj.id, "sendChunkCount(): Failed to send chunk count to DataNode") 132 | 133 | return status 134 | } 135 | 136 | // sendChunk A function to send a chunk of data [Timeout after 1 min] 137 | func (clientObj *client) sendDataChunk(socket *zmq4.Socket, data []byte, chunkID int) bool { 138 | logger.LogMsg(LogSign, clientObj.id, fmt.Sprintf("Sending chunk #%d to DataNode", chunkID)) 139 | 140 | var status = false 141 | 142 | sendChan := make(chan bool, 1) 143 | go func() { 144 | status = comm.SendBytes(socket, data) 145 | sendChan <- status 146 | }() 147 | select { 148 | case <-sendChan: 149 | case <-time.After(time.Minute): 150 | logger.LogMsg(LogSign, clientObj.id, "Sending chunk timedout after 1 min") 151 | return false 152 | } 153 | 154 | logger.LogFail(status, LogSign, clientObj.id, "sendChunk(): Failed to send chunk to DataNode") 155 | 156 | return status 157 | } 158 | 159 | // SendData A function to send data to DataNode chunk by chunk 160 | func (clientObj *client) SendData(req request.UploadRequest, dnIP string, dnDataPort string) bool { 161 | socket, ok := comm.Init(zmq4.REQ, "") 162 | defer socket.Close() 163 | logger.LogFail(ok, LogSign, clientObj.id, "SendData(): Failed to acquire request Socket") 164 | 165 | var connectionString = []string{comm.GetConnectionString(clientObj.ip, clientObj.port)} 166 | comm.Bind(socket, connectionString) 167 | 168 | file := fileutils.OpenFile(req.FileName) 169 | defer file.Close() 170 | 171 | chunksCount := fileutils.GetChunksCount(req.FileName) 172 | 173 | //Send the chunksCount to the DataNode 174 | sendChunkCountStatus := clientObj.sendChunkCount(socket, chunksCount) 175 | logger.LogSuccess(sendChunkCountStatus, LogSign, clientObj.id, "Successfully sent chunk count to DataNode") 176 | if sendChunkCountStatus == false { 177 | return false 178 | } 179 | 180 | //Send the actual chunks of data 181 | for i := 0; i < chunksCount; i++ { 182 | chunk, size, done := fileutils.ReadChunk(file) 183 | 184 | if done == true { 185 | break 186 | } 187 | 188 | sendChunkStatus := clientObj.sendDataChunk(socket, chunk[:size], i+1) 189 | logger.LogSuccess(sendChunkStatus, LogSign, clientObj.id, fmt.Sprintf("Successfully sent chunk #%d", i+1)) 190 | if sendChunkStatus == false { 191 | return false 192 | } 193 | } 194 | 195 | return true 196 | } 197 | 198 | // receiveChunk A function to receive a chunk of data [Timeout after 1 min] 199 | func (clientObj *client) receiveChunk(socket *zmq4.Socket, chunkID int) ([]byte, bool) { 200 | var chunk []byte 201 | var status = false 202 | 203 | recvChan := make(chan bool, 1) 204 | go func() { 205 | chunk, status = comm.RecvBytes(socket) 206 | recvChan <- status 207 | }() 208 | select { 209 | case <-recvChan: 210 | case <-time.After(time.Minute): 211 | logger.LogMsg(LogSign, clientObj.id, "Receiving chunk from DataNode timedout after 1 min") 212 | return []byte{}, false 213 | } 214 | 215 | logger.LogFail(status, LogSign, clientObj.id, fmt.Sprintf("receiveChunk(): Error receiving chunk #%d", chunkID)) 216 | 217 | return chunk, status 218 | } 219 | 220 | // RecvPieces A function to receive file pieces from DataNode 221 | func (clientObj *client) RecvPieces(req request.UploadRequest, start string, chunkCount int, done chan bool) bool { 222 | socket, ok := comm.Init(zmq4.REP, "") 223 | defer socket.Close() 224 | logger.LogFail(ok, LogSign, clientObj.id, "RecvPieces(): Failed to acquire response Socket") 225 | 226 | var connectionString = []string{comm.GetConnectionString(clientObj.ip, req.ClientPort)} 227 | comm.Bind(socket, connectionString) 228 | 229 | file := fileutils.CreateFile(req.FileName[:len(req.FileName)-4] + "#" + start + req.FileName[len(req.FileName)-4:]) 230 | defer file.Close() 231 | 232 | for i := 0; i < chunkCount; i++ { 233 | chunk, chunkStatus := clientObj.receiveChunk(socket, i+1) 234 | logger.LogSuccess(chunkStatus, LogSign, clientObj.id, fmt.Sprintf("Successfully received chunk #%d", i+1)) 235 | if chunkStatus == false { 236 | done <- true 237 | return false 238 | } 239 | //TODO: Clean up 240 | fileutils.WriteChunk(file, chunk) 241 | } 242 | 243 | logger.LogMsg(LogSign, clientObj.id, fmt.Sprintf("Received Block #%s", start)) 244 | 245 | done <- true 246 | return true 247 | } 248 | 249 | // ReceiveResponse A function to receive the response sent by the Tracker 250 | func (clientObj *client) ReceiveNotification() (string, bool) { 251 | socket, ok := comm.Init(zmq4.REP, "") 252 | defer socket.Close() 253 | logger.LogFail(ok, LogSign, clientObj.id, "ReceiveNotification(): Failed to acquire response Socket") 254 | 255 | var connectionString = []string{comm.GetConnectionString(clientObj.ip, clientObj.notifyPort)} 256 | comm.Bind(socket, connectionString) 257 | 258 | var response string 259 | var sendStatus bool 260 | 261 | recieveNotificationChan := make(chan bool, 1) 262 | go func() { 263 | response, sendStatus = comm.RecvString(socket) 264 | recieveNotificationChan <- sendStatus 265 | }() 266 | select { 267 | case <-recieveNotificationChan: 268 | case <-time.After(30 * time.Second): 269 | logger.LogMsg(LogSign, clientObj.id, "ReceiveNotification(): Connection timed out after 30 secs") 270 | return "", false 271 | } 272 | 273 | logger.LogFail(sendStatus, LogSign, clientObj.id, "ReceiveNotification(): Failed to receive notification from tracker") 274 | 275 | return response, sendStatus 276 | } 277 | 278 | // CloseConnection A function to shut down the request port socket 279 | func (clientObj *client) CloseConnection() { 280 | clientObj.socket.Close() 281 | } 282 | -------------------------------------------------------------------------------- /Client/ClientUtil/types.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import "github.com/pebbe/zmq4" 4 | 5 | // LogSign Used for logging client messages 6 | const LogSign string = "Client" 7 | 8 | // client A struct to represent the client structure 9 | type client struct { 10 | id int 11 | ip string 12 | port string 13 | notifyPort string 14 | trackerIP string 15 | trackerPorts []string 16 | socket *zmq4.Socket 17 | } 18 | 19 | // NewClient A constructor function for the client type 20 | func NewClient(_id int, _ip string, _port string, _notifyPort string, _trackerIP string, _trackerPorts []string) client { 21 | clientObj := client{ 22 | id: _id, 23 | ip: _ip, 24 | port: _port, 25 | notifyPort: _notifyPort, 26 | trackerIP: _trackerIP, 27 | trackerPorts: _trackerPorts, 28 | } 29 | 30 | return clientObj 31 | } 32 | -------------------------------------------------------------------------------- /Client/ClientUtil/work.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | fileutils "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/File" 5 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 6 | request "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Request" 7 | "bufio" 8 | "log" 9 | "os" 10 | "strconv" 11 | "strings" 12 | "time" 13 | 14 | "fmt" 15 | 16 | "github.com/pebbe/zmq4" 17 | ) 18 | 19 | // UploadHandler A function to handle a upload request 20 | func (clientObj *client) UploadHandler(req request.UploadRequest) { 21 | exists := fileutils.IsThere(req.FileName) 22 | if exists == false { 23 | logger.LogMsg(LogSign, clientObj.id, fmt.Sprintf("%s :no such file or directory", req.FileName)) 24 | return 25 | } 26 | 27 | serializeRequest := request.SerializeUpload(req) 28 | 29 | sendRequestStatus := clientObj.SendRequest(serializeRequest) 30 | logger.LogSuccess(sendRequestStatus, LogSign, clientObj.id, "Successfully sent request to tracker") 31 | if sendRequestStatus == false { 32 | return 33 | } 34 | 35 | response, receiveResponseStatus := clientObj.ReceiveResponse() 36 | logger.LogSuccess(receiveResponseStatus, LogSign, clientObj.id, fmt.Sprintf("Tracker response: %s", response)) 37 | if receiveResponseStatus == false { 38 | return 39 | } 40 | 41 | if response == "All data nodes are offline" { 42 | return 43 | } 44 | 45 | arr := strings.Fields(response) 46 | 47 | rsendRequestStatus := clientObj.RSendRequestToDN(arr[0], arr[1], serializeRequest) 48 | logger.LogSuccess(rsendRequestStatus, LogSign, clientObj.id, "Successfully sent request to DataNode") 49 | if rsendRequestStatus == false { 50 | return 51 | } 52 | 53 | sendDataStatus := clientObj.SendData(req, arr[0], arr[1]) 54 | logger.LogSuccess(sendDataStatus, LogSign, clientObj.id, "Successfully sent file to DataNode") 55 | if sendDataStatus == false { 56 | return 57 | } 58 | 59 | notification, notificationStatus := clientObj.ReceiveNotification() 60 | logger.LogSuccess(notificationStatus, LogSign, clientObj.id, fmt.Sprintf("Tracker confirmation: %s", notification)) 61 | if notificationStatus == false { 62 | return 63 | } 64 | } 65 | 66 | // DownloadHandler A function to handle a download request 67 | func (clientObj *client) DownloadHandler(reqObj request.UploadRequest) { 68 | serializeRequest := request.SerializeUpload(reqObj) 69 | 70 | sendRequestStatus := clientObj.SendRequest(serializeRequest) 71 | logger.LogSuccess(sendRequestStatus, LogSign, clientObj.id, "Successfully sent request to tracker") 72 | if sendRequestStatus == false { 73 | return 74 | } 75 | 76 | response, receiveResponseStatus := clientObj.ReceiveResponse() 77 | logger.LogSuccess(receiveResponseStatus, LogSign, clientObj.id, fmt.Sprintf("Tracker response: %s", response)) 78 | if receiveResponseStatus == false { 79 | return 80 | } 81 | 82 | if response == "404: File not found" { 83 | return 84 | } 85 | 86 | if response == "All source datanodes are offline, try again later" { 87 | return 88 | } 89 | 90 | arr := strings.Fields(response) 91 | chunkCount, _ := strconv.Atoi(arr[0]) 92 | dataNodeCount := (len(arr) - 1) / 2 93 | chunkEach := chunkCount / dataNodeCount 94 | start := 0 95 | blockID := 1 96 | currPort := 1 97 | 98 | done := make(chan bool) 99 | 100 | for i := 1; i < len(arr)-1; i += 2 { 101 | if i == len(arr)-2 { 102 | chunkEach += (chunkCount % dataNodeCount) 103 | } 104 | 105 | reqObj.ClientPort = reqObj.ClientPort[:3] + strconv.Itoa(i) 106 | serializeRequest := request.SerializeUpload(reqObj) 107 | 108 | req := serializeRequest + " " + strconv.Itoa(start) + " " + strconv.Itoa(chunkEach) 109 | 110 | start += chunkEach 111 | 112 | rsendRequestStatus := clientObj.RSendRequestToDN(arr[i], arr[i+1]+"1", req) 113 | logger.LogSuccess(rsendRequestStatus, LogSign, clientObj.id, "Successfully sent request to DataNode") 114 | if rsendRequestStatus == false { 115 | return 116 | } 117 | 118 | go clientObj.RecvPieces(reqObj, strconv.Itoa(blockID), chunkEach, done) 119 | 120 | blockID++ 121 | currPort++ 122 | } 123 | 124 | for i := 0; i < dataNodeCount; i++ { 125 | logger.LogMsg(LogSign, clientObj.id, fmt.Sprintf("Thread #%d finished %t", i+1, <-done)) 126 | } 127 | 128 | fileutils.AssembleFile(reqObj.FileName, reqObj.FileName[:len(reqObj.FileName)-4], reqObj.FileName[len(reqObj.FileName)-4:], dataNodeCount) 129 | } 130 | 131 | // DisplayHandler A function to handle the display request 132 | func (clientObj *client) DisplayHandler(req request.UploadRequest) { 133 | serializeRequest := request.SerializeUpload(req) 134 | 135 | sendRequestStatus := clientObj.SendRequest(serializeRequest) 136 | logger.LogSuccess(sendRequestStatus, LogSign, clientObj.id, "Successfully sent request to tracker") 137 | if sendRequestStatus == false { 138 | return 139 | } 140 | 141 | response, receiveResponseStatus := clientObj.ReceiveResponse() 142 | if receiveResponseStatus == false { 143 | return 144 | } 145 | 146 | if response == "No Files" { 147 | logger.LogMsg(LogSign, clientObj.id, "You have no files") 148 | return 149 | } 150 | 151 | clientFiles := strings.Fields(response) 152 | 153 | logger.LogMsg(LogSign, clientObj.id, "Your files:") 154 | 155 | for i := 0; i < len(clientFiles); i += 2 { 156 | fmt.Printf("%s %s MB\n", clientFiles[i], clientFiles[i+1]) 157 | } 158 | } 159 | 160 | func initPublisher(addr string) *zmq4.Socket { 161 | publisher, err := zmq4.NewSocket(zmq4.PUB) 162 | if err != nil { 163 | fmt.Print(err) 164 | return nil 165 | } 166 | publisher.SetLinger(0) 167 | publisher.Bind(addr) 168 | return publisher 169 | } 170 | 171 | func initSubscriber(addr string) *zmq4.Socket { 172 | subscriber, _ := zmq4.NewSocket(zmq4.SUB) 173 | subscriber.SetLinger(0) 174 | 175 | subscriber.Connect(addr) 176 | subscriber.SetSubscribe("") 177 | return subscriber 178 | } 179 | 180 | //AssignedSlaveListner : 181 | func AssignedSlaveListner(command *string, clientID *string, trackerIP string) { 182 | subscriber, _ := zmq4.NewSocket(zmq4.SUB) 183 | subscriber.SetLinger(0) 184 | defer subscriber.Close() 185 | slavelist := make([]*zmq4.Socket, 3) 186 | idSubs := make([]*zmq4.Socket, 3) 187 | //for i := range slavelist { 188 | // slavelist[i] = initPublisher("tcp://127.0.0.1:600" + strconv.Itoa(i+1)) 189 | //} 190 | slavelist[0] = initPublisher("tcp://127.0.0.1:600" + strconv.Itoa(0+1)) 191 | slavelist[1] = initPublisher("tcp://127.0.0.1:600" + strconv.Itoa(1+1)) 192 | slavelist[2] = initPublisher("tcp://127.0.0.1:600" + strconv.Itoa(2+1)) 193 | 194 | idSubs[0] = initSubscriber("tcp://127.0.0.1:8093") 195 | idSubs[1] = initSubscriber("tcp://127.0.0.1:8093") 196 | idSubs[2] = initSubscriber("tcp://127.0.0.1:8093") 197 | 198 | subscriber.Connect(trackerIP + "8092") 199 | subscriber.SetSubscribe("") 200 | 201 | //idSub := initSubscriber("tcp://127.0.0.1:8093") 202 | 203 | for { 204 | if strings.Split(*command, ":")[0] != "LOGIN" { 205 | continue 206 | } 207 | s, err := subscriber.Recv(0) 208 | if err != nil { 209 | log.Println(err) 210 | continue 211 | } 212 | fmt.Println("[AssignedSlaveListner] Recieved Slave info : " + s) 213 | 214 | fmt.Println("[AssignedSlaveListner] Sending Query to Assigned Slave : " + *command) 215 | 216 | sID, _ := strconv.ParseInt(s, 10, 64) 217 | 218 | slavelist[sID-1].Send(*command, 0) 219 | 220 | *clientID, err = idSubs[sID-1].Recv(0) 221 | if err == nil { 222 | fmt.Println("[AssignedSlaveListner] Recieved ID = " + *clientID) 223 | } 224 | //*command = "" 225 | 226 | } 227 | } 228 | 229 | //GetClientID : 230 | func GetClientID() string { 231 | 232 | trackerIP := "tcp://127.0.0.1:" 233 | command := "" 234 | clientID := "" 235 | publisher := initPublisher(trackerIP + "9092") 236 | 237 | defer publisher.Close() 238 | 239 | go AssignedSlaveListner(&command, &clientID, trackerIP) 240 | reader := bufio.NewReader(os.Stdin) 241 | 242 | for { 243 | fmt.Println("Your ID : " + clientID) 244 | if clientID != "" && clientID != "-15" { 245 | return clientID 246 | } 247 | 248 | fmt.Print("LOGIN/REGISTER?(L/R)") 249 | command, _ = reader.ReadString('\n') 250 | if strings.Compare(command, "R\n") == 0 { 251 | fmt.Println("ENTER REGISTER USER INFORMATION") 252 | fmt.Print("name :") 253 | name, _ := reader.ReadString('\n') 254 | fmt.Print("email :") 255 | email, _ := reader.ReadString('\n') 256 | fmt.Print("password :") 257 | password, _ := reader.ReadString('\n') 258 | 259 | name = strings.Replace(name, "\n", "", -1) 260 | email = strings.Replace(email, "\n", "", -1) 261 | password = strings.Replace(password, "\n", "", -1) 262 | command = "REGISTER:" + name + ";" + email + ";" + password 263 | publisher.Send("REGISTER:"+name+";"+email+";"+password, 0) 264 | fmt.Println("[MainThread]", "REGISTER:"+name+";"+email+";"+password) 265 | 266 | } else { 267 | fmt.Println("ENTER LOGIN USER INFORMATION") 268 | fmt.Print("email :") 269 | email, _ := reader.ReadString('\n') 270 | fmt.Print("password :") 271 | password, _ := reader.ReadString('\n') 272 | 273 | email = strings.Replace(email, "\n", "", -1) 274 | password = strings.Replace(password, "\n", "", -1) 275 | command = "LOGIN:" + email + ";" + password 276 | publisher.Send("LOGIN:"+email+";"+password, 0) 277 | fmt.Println("[MainThread]", "LOGIN:"+email+";"+password) 278 | time.Sleep(1 * time.Second) 279 | 280 | } 281 | 282 | } 283 | 284 | } 285 | -------------------------------------------------------------------------------- /Client/Launcher/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | client "Distributed-Video-Processing-Cluster/Client/ClientUtil" 5 | constants "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Constants" 6 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 7 | request "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Request" 8 | "fmt" 9 | "os" 10 | "strconv" 11 | ) 12 | 13 | func main() { 14 | //Tracker data 15 | trackerIP := constants.TrackerIP 16 | trackerPorts := constants.TrackerReqPorts 17 | 18 | //Client data 19 | clientIP := constants.ClientIP 20 | clientID, _ := strconv.Atoi(client.GetClientID()) //strconv.Atoi(os.Args[1]) 21 | clientPort := os.Args[2] 22 | clientNotifyPort := os.Args[2] + "7" 23 | 24 | clientObj := client.NewClient(clientID, clientIP, clientPort+"0", clientNotifyPort, trackerIP, trackerPorts) 25 | clientObj.EstablishConnection() 26 | 27 | logger.LogMsg(client.LogSign, clientID, "Successfully launched") 28 | 29 | //Request data 30 | requestID := 1 31 | fileName := "" 32 | requestType := "" 33 | work := 1 34 | 35 | for { 36 | logger.LogMsg(client.LogSign, clientID, "Press 0 to quit, 1 to continue") 37 | fmt.Scanf("%d", &work) 38 | 39 | if work == 0 { 40 | break 41 | } 42 | 43 | logger.LogMsg(client.LogSign, clientID, "Request type = ") 44 | fmt.Scanf("%s", &requestType) 45 | 46 | if requestType == "up" { 47 | logger.LogMsg(client.LogSign, clientID, "File name = ") 48 | fmt.Scanf("%s", &fileName) 49 | 50 | requestObj := request.UploadRequest{ 51 | ID: requestID, 52 | Type: request.Upload, 53 | ClientID: clientID, 54 | ClientIP: clientIP, 55 | ClientPort: clientPort + "0", 56 | FileName: fileName, 57 | } 58 | 59 | clientObj.UploadHandler(requestObj) 60 | 61 | } else if requestType == "dwn" { 62 | logger.LogMsg(client.LogSign, clientID, "File name = ") 63 | fmt.Scanf("%s", &fileName) 64 | 65 | requestObj := request.UploadRequest{ 66 | ID: requestID, 67 | Type: request.Download, 68 | ClientID: clientID, 69 | ClientIP: clientIP, 70 | ClientPort: clientPort + "0", 71 | FileName: fileName, 72 | } 73 | 74 | clientObj.DownloadHandler(requestObj) 75 | 76 | } else if requestType == "ls" { 77 | requestObj := request.UploadRequest{ 78 | ID: requestID, 79 | Type: request.Display, 80 | ClientID: clientID, 81 | ClientIP: clientIP, 82 | ClientPort: clientPort + "0", 83 | FileName: "dummy", 84 | } 85 | 86 | clientObj.DisplayHandler(requestObj) 87 | } 88 | 89 | requestID++ 90 | } 91 | 92 | clientObj.CloseConnection() 93 | } 94 | -------------------------------------------------------------------------------- /Distributed-Database/client/client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "log" 7 | "os" 8 | "strconv" 9 | "strings" 10 | "time" 11 | 12 | "github.com/pebbe/zmq4" 13 | ) 14 | 15 | func initPublisher(addr string) *zmq4.Socket { 16 | publisher, err := zmq4.NewSocket(zmq4.PUB) 17 | if err != nil { 18 | fmt.Print(err) 19 | return nil 20 | } 21 | publisher.SetLinger(0) 22 | publisher.Bind(addr) 23 | return publisher 24 | } 25 | 26 | func initSubscriber(addr string) *zmq4.Socket { 27 | subscriber, _ := zmq4.NewSocket(zmq4.SUB) 28 | subscriber.SetLinger(0) 29 | 30 | subscriber.Connect(addr) 31 | subscriber.SetSubscribe("") 32 | return subscriber 33 | } 34 | 35 | //AssignedSlaveListner : 36 | func AssignedSlaveListner(command *string, clientID *string, trackerIP string) { 37 | subscriber, _ := zmq4.NewSocket(zmq4.SUB) 38 | subscriber.SetLinger(0) 39 | defer subscriber.Close() 40 | slavelist := make([]*zmq4.Socket, 3) 41 | idSubs := make([]*zmq4.Socket, 3) 42 | //for i := range slavelist { 43 | // slavelist[i] = initPublisher("tcp://127.0.0.1:600" + strconv.Itoa(i+1)) 44 | //} 45 | slavelist[0] = initPublisher("tcp://127.0.0.1:600" + strconv.Itoa(0+1)) 46 | slavelist[1] = initPublisher("tcp://127.0.0.1:600" + strconv.Itoa(1+1)) 47 | slavelist[2] = initPublisher("tcp://127.0.0.1:600" + strconv.Itoa(2+1)) 48 | 49 | idSubs[0] = initSubscriber("tcp://127.0.0.1:8093") 50 | idSubs[1] = initSubscriber("tcp://127.0.0.1:8093") 51 | idSubs[2] = initSubscriber("tcp://127.0.0.1:8093") 52 | 53 | subscriber.Connect(trackerIP + "8092") 54 | subscriber.SetSubscribe("") 55 | 56 | //idSub := initSubscriber("tcp://127.0.0.1:8093") 57 | 58 | for { 59 | if strings.Split(*command, ":")[0] != "LOGIN" { 60 | continue 61 | } 62 | s, err := subscriber.Recv(0) 63 | if err != nil { 64 | log.Println(err) 65 | continue 66 | } 67 | fmt.Println("[AssignedSlaveListner] Recieved Slave info : " + s) 68 | 69 | fmt.Println("[AssignedSlaveListner] Sending Query to Assigned Slave : " + *command) 70 | 71 | sID, _ := strconv.ParseInt(s, 10, 64) 72 | 73 | slavelist[sID-1].Send(*command, 0) 74 | 75 | *clientID, err = idSubs[sID-1].Recv(0) 76 | if err == nil { 77 | fmt.Println("[AssignedSlaveListner] Recieved ID = " + *clientID) 78 | } 79 | //*command = "" 80 | 81 | } 82 | } 83 | 84 | func getClientID() string { 85 | 86 | trackerIP := "tcp://127.0.0.1:" 87 | command := "" 88 | clientID := "" 89 | publisher := initPublisher(trackerIP + "9092") 90 | 91 | defer publisher.Close() 92 | 93 | go AssignedSlaveListner(&command, &clientID, trackerIP) 94 | reader := bufio.NewReader(os.Stdin) 95 | 96 | for { 97 | fmt.Println("Your ID : " + clientID) 98 | if clientID != "" && clientID != "-15" { 99 | return clientID 100 | } 101 | 102 | fmt.Print("LOGIN/REGISTER?(L/R)") 103 | command, _ = reader.ReadString('\n') 104 | if strings.Compare(command, "R\n") == 0 { 105 | fmt.Println("ENTER REGISTER USER INFORMATION") 106 | fmt.Print("name :") 107 | name, _ := reader.ReadString('\n') 108 | fmt.Print("email :") 109 | email, _ := reader.ReadString('\n') 110 | fmt.Print("password :") 111 | password, _ := reader.ReadString('\n') 112 | 113 | name = strings.Replace(name, "\n", "", -1) 114 | email = strings.Replace(email, "\n", "", -1) 115 | password = strings.Replace(password, "\n", "", -1) 116 | command = "REGISTER:" + name + ";" + email + ";" + password 117 | publisher.Send("REGISTER:"+name+";"+email+";"+password, 0) 118 | fmt.Println("[MainThread]", "REGISTER:"+name+";"+email+";"+password) 119 | 120 | } else { 121 | fmt.Println("ENTER LOGIN USER INFORMATION") 122 | fmt.Print("email :") 123 | email, _ := reader.ReadString('\n') 124 | fmt.Print("password :") 125 | password, _ := reader.ReadString('\n') 126 | 127 | email = strings.Replace(email, "\n", "", -1) 128 | password = strings.Replace(password, "\n", "", -1) 129 | command = "LOGIN:" + email + ";" + password 130 | publisher.Send("LOGIN:"+email+";"+password, 0) 131 | fmt.Println("[MainThread]", "LOGIN:"+email+";"+password) 132 | time.Sleep(1 * time.Second) 133 | 134 | } 135 | 136 | } 137 | 138 | } 139 | 140 | func main() { 141 | getClientID() 142 | } 143 | -------------------------------------------------------------------------------- /Distributed-Database/common/common.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/pebbe/zmq4" 8 | ) 9 | 10 | func initSubscriber(addr string) *zmq4.Socket { 11 | subscriber, _ := zmq4.NewSocket(zmq4.SUB) 12 | subscriber.SetLinger(0) 13 | 14 | subscriber.Connect(addr) 15 | subscriber.SetSubscribe("") 16 | return subscriber 17 | } 18 | 19 | func initPublisher(addr string) *zmq4.Socket { 20 | publisher, err := zmq4.NewSocket(zmq4.PUB) 21 | if err != nil { 22 | fmt.Print(err) 23 | return nil 24 | } 25 | publisher.SetLinger(0) 26 | publisher.Bind(addr) 27 | return publisher 28 | } 29 | func commandDeseralizer(s string) (string, string) { 30 | fields := strings.Split(s, ":") 31 | if len(fields) < 2 { 32 | return "", "" 33 | } 34 | return fields[0], fields[1] 35 | } 36 | func commandDataDeseralizer(s string) (string, string, string) { 37 | fields := strings.Split(s, ";") 38 | if len(fields) < 3 { 39 | if len(fields) < 2 { 40 | return fields[0], "", "" 41 | } 42 | return fields[0], fields[1], "" 43 | } 44 | return fields[0], fields[1], fields[2] 45 | } 46 | func registerUser(name string, email string, password string) bool { 47 | fmt.Println("[RegisterUser] Saving user data ..") 48 | fmt.Println("[RegisterUser] Success") 49 | return true 50 | } 51 | -------------------------------------------------------------------------------- /Distributed-Database/slave/slave.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "log" 7 | "os" 8 | "strconv" 9 | "strings" 10 | "time" 11 | 12 | "github.com/joho/godotenv" 13 | _ "github.com/lib/pq" // here 14 | "github.com/pebbe/zmq4" 15 | ) 16 | 17 | //======================= Common Functions ================== 18 | 19 | func initSubscriber(addr string) *zmq4.Socket { 20 | subscriber, _ := zmq4.NewSocket(zmq4.SUB) 21 | subscriber.SetLinger(0) 22 | 23 | subscriber.Connect(addr) 24 | subscriber.SetSubscribe("") 25 | return subscriber 26 | } 27 | 28 | func initPublisher(addr string) *zmq4.Socket { 29 | publisher, err := zmq4.NewSocket(zmq4.PUB) 30 | if err != nil { 31 | fmt.Print(err) 32 | return nil 33 | } 34 | publisher.SetLinger(0) 35 | publisher.Bind(addr) 36 | return publisher 37 | } 38 | 39 | func commandDataDeseralizer(s string) (string, string, string) { 40 | fields := strings.Split(s, ";") 41 | if len(fields) < 3 { 42 | if len(fields) < 2 { 43 | return fields[0], "", "" 44 | } 45 | 46 | return fields[0], fields[1], "" 47 | } 48 | return fields[0], fields[1], fields[2] 49 | } 50 | func registerUser(name string, email string, password string, db *sql.DB) bool { 51 | sqlStatement := `INSERT INTO clients (name, email, password) VALUES ($1,$2,$3);` 52 | fmt.Println("[RegisterUser] Saving user data ..") 53 | _, err := db.Exec(sqlStatement, name, email, password) 54 | if err != nil { 55 | log.Println(err) 56 | return false 57 | } else { 58 | fmt.Println("[RegisterUser] Success") 59 | } 60 | 61 | return true 62 | } 63 | func loginUser(email string, password string, db *sql.DB) int { 64 | sqlStatement := `SELECT * FROM clients WHERE email=$1 and password=$2;` 65 | 66 | var clientID int 67 | var clientName, clientEmail, clientPassword string 68 | 69 | row := db.QueryRow(sqlStatement, email, password) 70 | switch err := row.Scan(&clientID, &clientName, &clientEmail, &clientPassword); err { 71 | case sql.ErrNoRows: 72 | return -1 73 | case nil: 74 | return clientID 75 | default: 76 | fmt.Println(err) 77 | return -1 78 | 79 | } 80 | } 81 | 82 | func connectDB() *sql.DB { 83 | err := godotenv.Load() 84 | if err != nil { 85 | log.Fatal("[DB]Error loading .env file") 86 | } 87 | 88 | host := os.Getenv("HOST") 89 | port := os.Getenv("PORT") 90 | user := os.Getenv("USER_NAME") 91 | password := os.Getenv("PASSWORD") 92 | dbname := os.Getenv("DB_NAME") 93 | 94 | psqlInfo := fmt.Sprintf("host=%s port=%s user=%s "+ 95 | "password=%s dbname=%s sslmode=disable", 96 | host, port, user, password, dbname) 97 | db, err := sql.Open("postgres", psqlInfo) 98 | if err != nil { 99 | panic(err) 100 | } 101 | 102 | err = db.Ping() 103 | if err != nil { 104 | panic(err) 105 | } 106 | 107 | fmt.Println("[DB] Successfully connected!") 108 | return db 109 | } 110 | 111 | //======================= Common Functions ================== 112 | 113 | //ReadQueryListner : 114 | func ReadQueryListner(status *string, db *sql.DB, id int, clientIP string) { 115 | 116 | subscriber := initSubscriber(clientIP + "600" + strconv.Itoa(id)) 117 | idPub := initPublisher(clientIP + "8093") 118 | defer subscriber.Close() 119 | 120 | for { 121 | s, err := subscriber.Recv(0) 122 | if err != nil { 123 | log.Println(err) 124 | continue 125 | } 126 | fmt.Println("[ReadQueryListner] recieved", s) 127 | email, password, _ := commandDataDeseralizer(strings.Split(s, ":")[1]) 128 | fmt.Println("[ReadQueryListner] " + email) 129 | fmt.Println("[ReadQueryListner] " + password) 130 | 131 | id := loginUser(email, password, db) 132 | if id > 0 { 133 | idPub.Send(strconv.Itoa(id), 0) 134 | fmt.Println("[ReadQueryListner] access granted ") 135 | } else { 136 | idPub.Send("-15", 0) 137 | fmt.Println("[ReadQueryListner] access denied") 138 | } 139 | 140 | } 141 | } 142 | 143 | //TrackerUpdateListner : 144 | func TrackerUpdateListner(status *string, db *sql.DB, id int, trackerIP string) { 145 | subscriber := initSubscriber(trackerIP + "500" + strconv.Itoa(id)) 146 | defer subscriber.Close() 147 | 148 | for { 149 | s, err := subscriber.Recv(0) 150 | if err != nil { 151 | log.Println(err) 152 | continue 153 | } 154 | fmt.Println("[TrackerUpdateListner] rec", s) 155 | name, email, password := commandDataDeseralizer(s) 156 | registerUser(name, email, password, db) 157 | 158 | } 159 | } 160 | 161 | //HeartBeatPublisher : 162 | func HeartBeatPublisher(id int, trackerIP string) { 163 | publisher := initPublisher(trackerIP + "300" + strconv.Itoa(id)) 164 | 165 | defer publisher.Close() 166 | 167 | publisher.Bind(trackerIP + "300" + strconv.Itoa(id)) 168 | 169 | for range time.Tick(time.Second * 2) { 170 | publisher.Send("Heartbeat", 0) 171 | log.Println("send", "Heartbeat:") 172 | } 173 | } 174 | 175 | func main() { 176 | clientIP := "tcp://127.0.0.1:" 177 | trackerIP := "tcp://127.0.0.1:" 178 | 179 | db := connectDB() 180 | defer db.Close() 181 | status := "Avaliable" 182 | id := 2 183 | go HeartBeatPublisher(id+1, trackerIP) 184 | go TrackerUpdateListner(&status, db, id+1, trackerIP) 185 | go ReadQueryListner(&status, db, id+1, clientIP) 186 | for { 187 | 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /Distributed-Database/tracker/tracker.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "log" 7 | "math/rand" 8 | "os" 9 | "strconv" 10 | "strings" 11 | "time" 12 | 13 | _ "github.com/lib/pq" // here 14 | 15 | "github.com/joho/godotenv" 16 | "github.com/pebbe/zmq4" 17 | ) 18 | 19 | //======================= Common Functions ================== 20 | 21 | func initSubscriber(addr string) *zmq4.Socket { 22 | subscriber, _ := zmq4.NewSocket(zmq4.SUB) 23 | subscriber.SetLinger(0) 24 | 25 | subscriber.Connect(addr) 26 | subscriber.SetSubscribe("") 27 | return subscriber 28 | } 29 | 30 | func initPublisher(addr string) *zmq4.Socket { 31 | publisher, err := zmq4.NewSocket(zmq4.PUB) 32 | if err != nil { 33 | fmt.Print(err) 34 | return nil 35 | } 36 | publisher.SetLinger(0) 37 | publisher.Bind(addr) 38 | return publisher 39 | } 40 | func commandDeseralizer(s string) (string, string) { 41 | fields := strings.Split(s, ":") 42 | if len(fields) < 2 { 43 | return "", "" 44 | } 45 | return fields[0], fields[1] 46 | } 47 | func commandDataDeseralizer(s string) (string, string, string) { 48 | fields := strings.Split(s, ";") 49 | if len(fields) < 3 { 50 | if len(fields) < 2 { 51 | return fields[0], "", "" 52 | } 53 | return fields[0], fields[1], "" 54 | } 55 | return fields[0], fields[1], fields[2] 56 | } 57 | func registerUser(name string, email string, password string, db *sql.DB) int { 58 | sqlStatement := `INSERT INTO clients (name, email, password) VALUES ($1,$2,$3);` 59 | fmt.Println("[RegisterUser] Saving user data ..") 60 | _, err := db.Exec(sqlStatement, name, email, password) 61 | if err != nil { 62 | log.Println(err) 63 | return -1 64 | } else { 65 | fmt.Println("[RegisterUser] Success") 66 | return loginUser(name, password, db) 67 | } 68 | 69 | } 70 | func loginUser(name string, password string, db *sql.DB) int { 71 | sqlStatement := `SELECT * FROM clients WHERE email=$1 and password=$2;` 72 | 73 | var clientID int 74 | var clientName, clientEmail, clientPassword string 75 | 76 | row := db.QueryRow(sqlStatement, name, password) 77 | switch err := row.Scan(&clientID, &clientName, &clientEmail, &clientPassword); err { 78 | case sql.ErrNoRows: 79 | return -1 80 | case nil: 81 | return clientID 82 | default: 83 | fmt.Println(err) 84 | return -1 85 | 86 | } 87 | } 88 | func getOnlineSlaves(BeatStamps []time.Time) []int { 89 | onlineSlaves := make([]int, 0) 90 | 91 | for i := range BeatStamps { 92 | delay := time.Now().Sub(BeatStamps[i]) / 1000000000 93 | if delay > 4 { 94 | fmt.Println("[OnlineSlavesFetcher] Slave[" + strconv.Itoa(i) + "] Disqualified for been away(+4 seconds)") 95 | } else { 96 | onlineSlaves = append(onlineSlaves, i) 97 | } 98 | 99 | } 100 | return onlineSlaves 101 | } 102 | 103 | func connectDB() *sql.DB { 104 | err := godotenv.Load() 105 | if err != nil { 106 | log.Fatal("[DB]Error loading .env file") 107 | } 108 | 109 | host := os.Getenv("HOST") 110 | port := os.Getenv("PORT") 111 | user := os.Getenv("USER_NAME") 112 | password := os.Getenv("PASSWORD") 113 | dbname := os.Getenv("DB_NAME") 114 | /* 115 | 116 | db, err := sql.Open("mysql", "root:.1@/os_db") 117 | */ 118 | psqlInfo := fmt.Sprintf("host=%s port=%s user=%s "+ 119 | "password=%s dbname=%s sslmode=disable", 120 | host, port, user, password, dbname) 121 | db, err := sql.Open("postgres", psqlInfo) 122 | if err != nil { 123 | panic(err) 124 | } 125 | 126 | err = db.Ping() 127 | if err != nil { 128 | panic(err) 129 | } 130 | 131 | fmt.Println("[DB] Successfully connected!") 132 | return db 133 | } 134 | 135 | //======================= Common Functions ================== 136 | 137 | // ExecuteQuery A function to execute queries that don't return any rows 138 | func ExecuteQuery(db *sql.DB, sqlStatement string) bool { 139 | _, err := db.Exec(sqlStatement) 140 | 141 | return (err == nil) 142 | } 143 | 144 | // Migrate A function to perform the DB migration 145 | func Migrate(db *sql.DB) { 146 | migrationStatement := ` 147 | DROP TABLE clients; 148 | CREATE TABLE clients ( 149 | id SERIAL PRIMARY KEY, 150 | email varchar(60) UNIQUE, 151 | password varchar(60) NOT NULL, 152 | name varchar(60) NOT NULL 153 | ); 154 | 155 | ` 156 | ExecuteQuery(db, migrationStatement) 157 | } 158 | 159 | //ListenToClientReq : 160 | func ListenToClientReq(InsertionsStack [][]string, BeatStamp []time.Time, slaveIPs []string, trackerIP string) { 161 | db := connectDB() 162 | defer db.Close() 163 | Migrate(db) 164 | clientSubscriber := initSubscriber(trackerIP + "9092") 165 | 166 | defer clientSubscriber.Close() 167 | 168 | clientPublisher := initPublisher(trackerIP + "8092") 169 | 170 | defer clientPublisher.Close() 171 | 172 | for { 173 | s, err := clientSubscriber.Recv(0) 174 | if err != nil { 175 | log.Println(err) 176 | continue 177 | } 178 | 179 | commandType, commandData := commandDeseralizer(s) 180 | fmt.Println("[ClientSubscriber] rec", commandType) 181 | 182 | if commandType == "" { 183 | fmt.Println("[ClientSubscriber] Dropping Message as invalid :" + s) 184 | continue 185 | } 186 | 187 | onlineSlaves := getOnlineSlaves(BeatStamp) 188 | rand.Seed(time.Now().Unix()) 189 | chosenSlave := -1 190 | 191 | if len(onlineSlaves) > 0 { 192 | chosenSlave = onlineSlaves[rand.Intn(len(onlineSlaves))] 193 | } 194 | 195 | if strings.Compare(commandType, "REGISTER") == 0 { 196 | fmt.Println("[ClientSubscriber] Sending Command Data to DB Execution Layer") 197 | name, email, password := commandDataDeseralizer(commandData) 198 | registerUser(name, email, password, db) 199 | 200 | fmt.Println("[ClientSubscriber] Adding InsertionQuery to all slaves :", commandData) 201 | 202 | for i := range InsertionsStack { 203 | InsertionsStack[i] = append(InsertionsStack[i], commandData) 204 | } 205 | 206 | } else if strings.Compare(commandType, "LOGIN") == 0 { 207 | if chosenSlave != -1 { 208 | fmt.Println("[ClientSubscriber] Assigning ReadQuery to slave ["+strconv.Itoa(chosenSlave)+"] :", s) 209 | clientPublisher.Send(strconv.Itoa(1+chosenSlave), 0) 210 | } 211 | 212 | } 213 | 214 | } 215 | } 216 | 217 | //ListenToHeartBeat : 218 | func ListenToHeartBeat(InsertionsStack [][]string, id int, BeatStamp []time.Time, slaveIP string) { 219 | 220 | slaveSubscriber := initSubscriber(slaveIP + "300" + strconv.Itoa(id+1)) 221 | defer slaveSubscriber.Close() 222 | slavePublisher := initPublisher(slaveIP + "500" + strconv.Itoa(id+1)) 223 | 224 | defer slavePublisher.Close() 225 | 226 | for { 227 | s, err := slaveSubscriber.Recv(0) 228 | if err != nil { 229 | log.Println(err) 230 | continue 231 | } 232 | fmt.Println("[SlaveSubscriber] rec", s) 233 | BeatStamp[id] = time.Now() 234 | stackSize := len(InsertionsStack[id]) 235 | 236 | if stackSize > 0 { 237 | fmt.Println("[HeartBeatSubscriber] Updating slave[" + strconv.Itoa(id) + "] : ") 238 | 239 | for i := range InsertionsStack[id] { 240 | fmt.Println("[HeartBeatSubscriber] Sending Query[" + strconv.Itoa(id) + "] : " + InsertionsStack[id][i]) 241 | fmt.Println("[DEBUG]" + slaveIP + "500" + strconv.Itoa(id+1)) 242 | slavePublisher.Send(InsertionsStack[id][i], 0) 243 | } 244 | InsertionsStack[id] = InsertionsStack[id][:0] 245 | } else { 246 | fmt.Println("[HeartBeatSubscriber] Slave[" + strconv.Itoa(id) + "] is up to date") 247 | } 248 | 249 | } 250 | } 251 | 252 | func main() { 253 | InsertionsStack := make([][]string, 3) 254 | BeatStamp := make([]time.Time, 3) 255 | slaves := make([]string, 3) 256 | //clientIP := "tcp://127.0.0.1:" 257 | trackerIP := "tcp://127.0.0.1:" 258 | fmt.Println() 259 | 260 | slaves[0] = "tcp://127.0.0.1:" 261 | slaves[1] = "tcp://127.0.0.1:" 262 | slaves[2] = "tcp://127.0.0.1:" 263 | 264 | for i := range InsertionsStack { 265 | InsertionsStack[i] = make([]string, 0) 266 | } 267 | go ListenToClientReq(InsertionsStack, BeatStamp, slaves, trackerIP) 268 | for i := range slaves { 269 | go ListenToHeartBeat(InsertionsStack, i, BeatStamp, slaves[i]) 270 | } 271 | 272 | for { 273 | 274 | } 275 | } 276 | -------------------------------------------------------------------------------- /Distributed-File-System/DataNode/DNLauncher/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | datanode "Distributed-Video-Processing-Cluster/Distributed-File-System/DataNode/Utils" 5 | "fmt" 6 | "log" 7 | "os" 8 | "strconv" 9 | ) 10 | 11 | var trackerIP = "" 12 | var trackerDNPorts = []string{"", ""} 13 | 14 | func main() { 15 | //Receive command line params 16 | args := os.Args 17 | fmt.Println(args[1:]) 18 | 19 | //Tracker Data 20 | trackerIP = args[1] 21 | trackerDNPorts[0] = args[2] 22 | trackerDNPorts[1] = args[3] 23 | 24 | //Data Node Data 25 | ip := args[4] 26 | id, _ := strconv.Atoi(args[5]) 27 | reqPort := args[6] 28 | upPort := args[7] 29 | downPort := args[8] 30 | repUpPort := args[9] 31 | repDownPort := args[10] 32 | 33 | dataNodeObj := datanode.NewDataNode(id, ip, reqPort, upPort, downPort, 34 | repUpPort, repDownPort, trackerIP, trackerDNPorts) 35 | 36 | log.Println(datanode.LogSignDN, "#", id, "Successfully launched") 37 | 38 | dataNodeObj.ListenToClients() 39 | } 40 | -------------------------------------------------------------------------------- /Distributed-File-System/DataNode/Launcher/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | datanode "Distributed-Video-Processing-Cluster/Distributed-File-System/DataNode/Utils" 5 | constants "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Constants" 6 | "log" 7 | "os" 8 | "os/exec" 9 | "strconv" 10 | "time" 11 | ) 12 | 13 | //DataNode Launcher Data 14 | var dataNodeLauncherIP = constants.DataNodeLauncherIP 15 | var heartbeatInterval = time.Second 16 | 17 | //Tracker Data 18 | var trackerIP = constants.TrackerIP 19 | 20 | var trackerIPsPort = constants.TrackerIPsPort 21 | 22 | var trackerDNPorts = constants.TrackerReqPorts 23 | 24 | func getTrackerParams() string { 25 | trackerParams := trackerIP + " " + trackerDNPorts[0] + " " + trackerDNPorts[1] 26 | 27 | return trackerParams 28 | } 29 | 30 | func launchDataNodes(launcherID string, launcherPort string) { 31 | log.Println(datanode.LogSignL, "Launching Data Nodes Processes") 32 | 33 | reqPorts := []string{launcherPort + "11", launcherPort + "21"} 34 | upPorts := []string{launcherPort + "12", launcherPort + "22"} 35 | downPorts := []string{launcherPort + "13", launcherPort + "23"} 36 | repUpPorts := []string{launcherPort + "14", launcherPort + "24"} 37 | repDownPorts := []string{launcherPort + "15", launcherPort + "25"} 38 | 39 | path := "../DNLauncher/main.go" 40 | 41 | for i := 0; i < 2; i++ { 42 | params := getTrackerParams() + " " + dataNodeLauncherIP + " " + launcherID + " " + 43 | reqPorts[i] + " " + upPorts[i] + " " + downPorts[i] + " " + repUpPorts[i] + " " + repDownPorts[i] 44 | command := "go run " + path + " " + params 45 | 46 | cmd := exec.Command("gnome-terminal", "--title=DataNode"+launcherID, "-e", command) 47 | err := cmd.Start() 48 | 49 | if err != nil { 50 | log.Println(datanode.LogSignL, "Error starting Data Node Process#", i+1) 51 | return 52 | } 53 | 54 | log.Println(datanode.LogSignL, "Launched Data Node Process#", i+1) 55 | } 56 | 57 | log.Println(datanode.LogSignL, "is all set!") 58 | } 59 | 60 | func getHandshake(launcherID string, launcherPort string) string { 61 | handshake := dataNodeLauncherIP + " " + launcherID + " " + launcherPort 62 | 63 | return handshake 64 | } 65 | 66 | func main() { 67 | //Receive command line params 68 | args := os.Args 69 | 70 | //DataNode Launcher Params 71 | dataNodeLauncherID, _ := strconv.Atoi(args[1]) 72 | dataNodeLauncherPort := args[2] //Sent to the tracker as handshake 73 | 74 | dataNodeLauncherObj := datanode.NewDataNodeLauncher(dataNodeLauncherID, dataNodeLauncherIP, trackerIP, 75 | heartbeatInterval, dataNodeLauncherPort+"00", trackerIPsPort) 76 | 77 | log.Println(datanode.LogSignL, "#", dataNodeLauncherID, "Successfully launched") 78 | 79 | launchDataNodes(args[1], dataNodeLauncherPort) 80 | 81 | dataNodeLauncherObj.SendHandshake(getHandshake(args[1], dataNodeLauncherPort)) 82 | 83 | dataNodeLauncherObj.SendHeartBeat() 84 | } 85 | -------------------------------------------------------------------------------- /Distributed-File-System/DataNode/Utils/comm.go: -------------------------------------------------------------------------------- 1 | package datanode 2 | 3 | import ( 4 | comm "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Comm" 5 | fileutils "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/File" 6 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 7 | request "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Request" 8 | "fmt" 9 | "path/filepath" 10 | "strconv" 11 | "time" 12 | 13 | "github.com/pebbe/zmq4" 14 | ) 15 | 16 | // establishPublisherConnection A function to establish a TCP connection for publishing heartbeats 17 | func (dataNodeLauncherObj *dataNodeLauncher) establishPublisherConnection() { 18 | publisher, ok := comm.Init(zmq4.PUB, "") 19 | dataNodeLauncherObj.publisherSocket = publisher 20 | logger.LogFail(ok, LogSignL, dataNodeLauncherObj.id, "establishPublisherConnection(): Failed to acquire Publisher Socket") 21 | 22 | var connectionString = []string{comm.GetConnectionString(dataNodeLauncherObj.ip, dataNodeLauncherObj.heartbeatPort)} 23 | comm.Bind(dataNodeLauncherObj.publisherSocket, connectionString) 24 | } 25 | 26 | // SendHandshake A function the datanode launcher uses to send the IPs and the ID of all 3 processes (HB and normal DNs (client ports)) 27 | func (dataNodeLauncherObj dataNodeLauncher) SendHandshake(handshake string) { 28 | socket, ok := comm.Init(zmq4.REQ, "") 29 | defer socket.Close() 30 | logger.LogFail(ok, LogSignL, dataNodeLauncherObj.id, "SendHandshake(): Failed to acquire request Socket") 31 | 32 | var connectionString = []string{comm.GetConnectionString(dataNodeLauncherObj.trackerIP, dataNodeLauncherObj.trackerIPsPort)} 33 | comm.Connect(socket, connectionString) 34 | 35 | sendStatus := false 36 | 37 | for sendStatus != true { 38 | logger.LogMsg(LogSignL, dataNodeLauncherObj.id, "Sending handshake") 39 | 40 | sendStatus = comm.SendString(socket, handshake) 41 | logger.LogFail(sendStatus, LogSignL, dataNodeLauncherObj.id, "SendHandshake(): Failed to connect to Tracker ... Trying again") 42 | } 43 | 44 | logger.LogMsg(LogSignL, dataNodeLauncherObj.id, "Successfully connected to Tracker") 45 | } 46 | 47 | // sendReplicationRequest A function to send replication request to target machine [Timeout after 30 secs] 48 | func (datanodeObj *dataNode) sendReplicationRequest(req request.ReplicationRequest) bool { 49 | socket, ok := comm.Init(zmq4.REQ, "") 50 | defer socket.Close() 51 | logger.LogFail(ok, LogSignDN, datanodeObj.id, "sendReplicationRequest(): Failed to acquire request Socket") 52 | 53 | var connectionString = []string{comm.GetConnectionString(req.TargetNodeIP, req.TargetNodeBasePort+"21")} 54 | comm.Connect(socket, connectionString) 55 | 56 | var status = false 57 | 58 | sendChan := make(chan bool, 1) 59 | go func() { 60 | status = comm.SendString(socket, request.SerializeReplication(req)) 61 | sendChan <- status 62 | }() 63 | select { 64 | case <-sendChan: 65 | case <-time.After(30 * time.Second): 66 | logger.LogMsg(LogSignDN, datanodeObj.id, "Sending replication request timedout after 30 secs") 67 | return false 68 | } 69 | 70 | logger.LogFail(status, LogSignDN, datanodeObj.id, "sendReplicationRequest(): Failed to send RPQ to target") 71 | logger.LogSuccess(status, LogSignDN, datanodeObj.id, "Successfully sent RPQ to target") 72 | 73 | return status 74 | } 75 | 76 | // receiveChunkCount A function to recieve the chunk count of a file 77 | func (datanodeObj *dataNode) receiveChunkCount(socket *zmq4.Socket) (int, bool) { 78 | var chunkCount string 79 | var ok = false 80 | 81 | recvChan := make(chan bool, 1) 82 | go func() { 83 | chunkCount, ok = comm.RecvString(socket) 84 | recvChan <- ok 85 | }() 86 | select { 87 | case <-recvChan: 88 | case <-time.After(30 * time.Second): 89 | logger.LogMsg(LogSignDN, datanodeObj.id, "Receiving chunk count timedout after 30 secs") 90 | return 0, false 91 | } 92 | 93 | logger.LogFail(ok, LogSignDN, datanodeObj.id, "receiveChunkCount(): Error receiving chunk count") 94 | 95 | ret, convErr := strconv.Atoi(chunkCount) 96 | logger.LogErr(convErr, LogSignDN, datanodeObj.id, "receiveChunkCount(): Error converting chunk count from string to int") 97 | 98 | logger.LogSuccess(ok, LogSignDN, datanodeObj.id, "Received chunk count") 99 | 100 | return ret, (ok && (convErr == nil)) 101 | } 102 | 103 | // sendChunkCount A function to send the chunk count of a file 104 | func (datanodeObj *dataNode) sendChunkCount(socket *zmq4.Socket, chunksCount int) bool { 105 | logger.LogMsg(LogSignDN, datanodeObj.id, "Sending chunk count to target") 106 | 107 | var status = false 108 | 109 | sendChan := make(chan bool, 1) 110 | go func() { 111 | status = comm.SendString(socket, strconv.Itoa(chunksCount)) 112 | sendChan <- status 113 | }() 114 | select { 115 | case <-sendChan: 116 | case <-time.After(30 * time.Second): 117 | logger.LogMsg(LogSignDN, datanodeObj.id, "Sending RPQ chunk count timedout after 30 secs") 118 | return false 119 | } 120 | 121 | logger.LogFail(status, LogSignDN, datanodeObj.id, "sendChunkCount(): Failed to RPQ send chunk count to target") 122 | logger.LogSuccess(status, LogSignDN, datanodeObj.id, "Successfully sent RPQ chunk count to target") 123 | 124 | return status 125 | } 126 | 127 | // receiveChunk A function to recieve a chunk of data 128 | func (datanodeObj *dataNode) receiveChunk(socket *zmq4.Socket, chunkID int) ([]byte, bool) { 129 | var chunk []byte 130 | var ok = false 131 | 132 | recvChan := make(chan bool, 1) 133 | go func() { 134 | chunk, ok = comm.RecvBytes(socket) 135 | recvChan <- ok 136 | }() 137 | select { 138 | case <-recvChan: 139 | case <-time.After(time.Minute): 140 | logger.LogMsg(LogSignDN, datanodeObj.id, "Receiving data chunk timedout after 1 min") 141 | return []byte{}, false 142 | } 143 | 144 | logger.LogFail(ok, LogSignDN, datanodeObj.id, "receiveChunk(): Error receiving chunk") 145 | 146 | logger.LogSuccess(ok, LogSignDN, datanodeObj.id, fmt.Sprintf("Received chunk %d", chunkID)) 147 | 148 | return chunk, ok 149 | } 150 | 151 | // sendChunk A function to send a chunk of data 152 | func (datanodeObj *dataNode) sendDataChunk(socket *zmq4.Socket, data []byte, chunkID int) bool { 153 | logger.LogMsg(LogSignDN, datanodeObj.id, fmt.Sprintf("Sending chunk #%d to target", chunkID)) 154 | 155 | var status = false 156 | 157 | sendChan := make(chan bool, 1) 158 | go func() { 159 | status = comm.SendBytes(socket, data) 160 | sendChan <- status 161 | }() 162 | select { 163 | case <-sendChan: 164 | case <-time.After(time.Minute): 165 | logger.LogMsg(LogSignDN, datanodeObj.id, "Sending data chunk timedout after 1 min") 166 | return false 167 | } 168 | 169 | logger.LogFail(status, LogSignDN, datanodeObj.id, "sendChunk(): Failed to send chunk to target") 170 | logger.LogSuccess(status, LogSignDN, datanodeObj.id, fmt.Sprintf("Successfully sent chunk #%d to target", chunkID)) 171 | 172 | return status 173 | } 174 | 175 | func (datanodeObj *dataNode) receiveData(fileName string, ip string, port string, clientID int, dir int) int { 176 | socket, ok := comm.Init(zmq4.REP, "") 177 | defer socket.Close() 178 | logger.LogFail(ok, LogSignDN, datanodeObj.id, "receiveDataFromClient(): Failed to acquire response Socket") 179 | 180 | var connectionString = []string{comm.GetConnectionString(ip, port)} 181 | if dir == 1 { 182 | comm.Connect(socket, connectionString) 183 | } else { 184 | comm.Bind(socket, connectionString) 185 | } 186 | 187 | directory := "Client" + strconv.Itoa(clientID) 188 | fileutils.CreateDirectory(directory) 189 | 190 | path := filepath.Join(directory, fileName) 191 | file := fileutils.CreateFile(path) 192 | defer file.Close() 193 | 194 | chunkCount, chunkCountStatus := datanodeObj.receiveChunkCount(socket) 195 | 196 | if chunkCountStatus == false { 197 | logger.LogMsg(LogSignDN, datanodeObj.id, "receiveDataFromClient(): chunkCount Abort!") 198 | return 0 199 | } 200 | 201 | for i := 0; i < chunkCount; i++ { 202 | chunk, chunkStatus := datanodeObj.receiveChunk(socket, i+1) 203 | 204 | if chunkStatus == false { 205 | logger.LogMsg(LogSignDN, datanodeObj.id, "receiveDataFromClient(): Data chunk Abort!") 206 | return 0 207 | } 208 | 209 | fileutils.WriteChunk(file, chunk) 210 | } 211 | 212 | logger.LogMsg(LogSignDN, datanodeObj.id, "File received") 213 | 214 | return chunkCount 215 | } 216 | 217 | // sendData A function to send Data to the target machine 218 | func (datanodeObj *dataNode) sendData(fileName string, targetID int, targetIP string, targetPort string, clientID int) bool { 219 | socket, ok := comm.Init(zmq4.REQ, "") 220 | defer socket.Close() 221 | logger.LogFail(ok, LogSignDN, datanodeObj.id, "sendData(): Failed to acquire request Socket") 222 | 223 | var connectionString = []string{comm.GetConnectionString(targetIP, targetPort)} 224 | comm.Connect(socket, connectionString) 225 | 226 | directory := "Client" + strconv.Itoa(clientID) 227 | path := filepath.Join(directory, fileName) 228 | file := fileutils.OpenFile(path) 229 | defer file.Close() 230 | 231 | chunksCount := fileutils.GetChunksCount(path) 232 | 233 | //Send the chunksCount to the DataNode 234 | chunkCountStatus := datanodeObj.sendChunkCount(socket, chunksCount) 235 | 236 | if chunkCountStatus == false { 237 | logger.LogMsg(LogSignDN, datanodeObj.id, "sendData(): Abort!") 238 | return false 239 | } 240 | 241 | //Send the actual chunks of data 242 | for i := 0; i < chunksCount; i++ { 243 | chunk, size, done := fileutils.ReadChunk(file) 244 | 245 | if done == true { 246 | break 247 | } 248 | 249 | chunkStatus := datanodeObj.sendDataChunk(socket, chunk[:size], i+1) 250 | 251 | if chunkStatus == false { 252 | logger.LogMsg(LogSignDN, datanodeObj.id, "sendData(): Abort!") 253 | return false 254 | } 255 | } 256 | 257 | logger.LogMsg(LogSignDN, datanodeObj.id, fmt.Sprintf("Successfully replicated file to data node #%d", targetID)) 258 | return true 259 | } 260 | 261 | // sendPieces A function to send a group of pieces to clients 262 | func (datanodeObj *dataNode) sendPieces(req request.UploadRequest, start int, chunksCount int, clientID int) bool { 263 | socket, ok := comm.Init(zmq4.REQ, "") 264 | defer socket.Close() 265 | logger.LogFail(ok, LogSignDN, datanodeObj.id, "sendPieces(): Failed to acquire request Socket") 266 | 267 | var connectionString = []string{comm.GetConnectionString(req.ClientIP, req.ClientPort)} 268 | comm.Connect(socket, connectionString) 269 | 270 | directory := "Client" + strconv.Itoa(clientID) 271 | path := filepath.Join(directory, req.FileName) 272 | 273 | file := fileutils.OpenSeekFile(path, start) 274 | defer file.Close() 275 | 276 | for i := 0; i < chunksCount; i++ { 277 | chunk, size, done := fileutils.ReadChunk(file) 278 | 279 | if done == true { 280 | break 281 | } 282 | 283 | chunkStatus := datanodeObj.sendDataChunk(socket, chunk[:size], start+i) 284 | 285 | if chunkStatus == false { 286 | logger.LogMsg(LogSignDN, datanodeObj.id, "sendPieces(): Abort!") 287 | return false 288 | } 289 | } 290 | 291 | logger.LogMsg(LogSignDN, datanodeObj.id, fmt.Sprintf("Successfully sent pieces to client #%d", req.ClientID)) 292 | return true 293 | } 294 | 295 | // sendCompletionNotifcation A function to notify the tracker of an action completion 296 | func (datanodeObj *dataNode) sendCompletionNotifcation(req request.CompletionRequest) bool { 297 | socket, ok := comm.Init(zmq4.REQ, "") 298 | defer socket.Close() 299 | logger.LogFail(ok, LogSignDN, datanodeObj.id, "sendCompletionNotifcation(): Failed to acquire request Socket") 300 | 301 | var connectionString = []string{comm.GetConnectionString(datanodeObj.trackerIP, datanodeObj.trackerPorts[1])} 302 | comm.Connect(socket, connectionString) 303 | 304 | var status = false 305 | 306 | sendChan := make(chan bool, 1) 307 | go func() { 308 | status = comm.SendString(socket, request.SerializeCompletion(req)) 309 | sendChan <- status 310 | }() 311 | select { 312 | case <-sendChan: 313 | case <-time.After(30 * time.Second): 314 | logger.LogMsg(LogSignDN, datanodeObj.id, "Sending Completion Notifcation timedout after 30 secs") 315 | return false 316 | } 317 | 318 | logger.LogFail(status, LogSignDN, datanodeObj.id, "sendCompletionNotifcation(): Failed to notify tracker of completion") 319 | logger.LogSuccess(status, LogSignDN, datanodeObj.id, "Successfully notified tracker of completion") 320 | 321 | return status 322 | } 323 | 324 | // notifyReplicationCompletion A function to notify tracker of replication completion 325 | func (datanodeObj *dataNode) notifyReplicationCompletion(port string, msg string) bool { 326 | socket, ok := comm.Init(zmq4.REQ, "") 327 | defer socket.Close() 328 | logger.LogFail(ok, LogSignDN, datanodeObj.id, "notifyReplicationCompletion(): Failed to acquire request Socket") 329 | 330 | var connectionString = []string{comm.GetConnectionString(datanodeObj.trackerIP, port)} 331 | comm.Connect(socket, connectionString) 332 | 333 | var status = false 334 | 335 | sendChan := make(chan bool, 1) 336 | go func() { 337 | status = comm.SendString(socket, msg) 338 | sendChan <- status 339 | }() 340 | select { 341 | case <-sendChan: 342 | case <-time.After(30 * time.Second): 343 | logger.LogMsg(LogSignDN, datanodeObj.id, "Sending Replication completion notifcation timedout after 30 secs") 344 | return false 345 | } 346 | 347 | logger.LogFail(status, LogSignDN, datanodeObj.id, "notifyReplicationCompletion(): Failed to notify tracker of completion") 348 | logger.LogSuccess(status, LogSignDN, datanodeObj.id, "Successfully notified tracker of replication completion") 349 | 350 | return status 351 | } 352 | -------------------------------------------------------------------------------- /Distributed-File-System/DataNode/Utils/heartbeats.go: -------------------------------------------------------------------------------- 1 | package datanode 2 | 3 | import ( 4 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | // SendHeartBeat A function to publish heartbeat signals 10 | func (dataNodeLauncherObj *dataNodeLauncher) SendHeartBeat() { 11 | defer dataNodeLauncherObj.publisherSocket.Close() 12 | 13 | dataNodeLauncherObj.establishPublisherConnection() 14 | 15 | for range time.Tick(dataNodeLauncherObj.heartbeatInterval) { 16 | heartbeat := fmt.Sprintf("Heartbeat %d", dataNodeLauncherObj.id) 17 | 18 | dataNodeLauncherObj.publisherSocket.Send(heartbeat, 0) 19 | 20 | logger.LogMsg(LogSignL, dataNodeLauncherObj.id, "Sent Heartbeat") 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /Distributed-File-System/DataNode/Utils/types.go: -------------------------------------------------------------------------------- 1 | package datanode 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/pebbe/zmq4" 7 | ) 8 | 9 | // LogSignL Used for logging Launcher messages 10 | const LogSignL string = "[Data Node Launcher]" 11 | 12 | // LogSignDN Used for logging Data Node messages 13 | const LogSignDN string = "[Data Node]" 14 | 15 | // dataNode A struct to represent the basic structure of a Data Node 16 | type dataNode struct { 17 | id int //A unique ID for the current machine 18 | ip string //The IP of the current machine 19 | reqPort string //Request port 20 | upPort string //Port that handles upload requests 21 | downPort string //Port that handles download requests 22 | repUpPort string //Port that handles outgoing replication requests 23 | repDownPort string //Port that handles incoming replication requests 24 | trackerIP string //The IP of the tracker machine 25 | trackerPorts []string //The ports of the tracker machine (processes) 26 | } 27 | 28 | // dtHeartbeatNode A struct to represent a data node that sends heartbeat signals 29 | // This struct extends the dataNode struct for added functionality 30 | type dataNodeLauncher struct { 31 | id int //Unique ID 32 | ip string //Launcher IP 33 | trackerIP string //Tracker IP 34 | publisherSocket *zmq4.Socket //A publisher socket to which the machine publishes heartbeats 35 | heartbeatInterval time.Duration //Defines the frequency at which the launcer publishes heartbeats 36 | trackerIPsPort string //The port on which the tracker machine receives IPs 37 | heartbeatPort string //The port on which the launcher publishes heartbeats 38 | } 39 | 40 | // NewDataNode A constructor function for the dataNode type 41 | func NewDataNode(_id int, _ip string, _reqPort string, _upPort string, _downPort string, _repUpPort string, 42 | _repDownPort string, _trackerIP string, _trackerPorts []string) dataNode { 43 | dataNodeObj := dataNode{ 44 | id: _id, 45 | ip: _ip, 46 | reqPort: _reqPort, 47 | upPort: _upPort, 48 | downPort: _downPort, 49 | repUpPort: _repUpPort, 50 | repDownPort: _repDownPort, 51 | trackerIP: _trackerIP, 52 | trackerPorts: _trackerPorts, 53 | } 54 | 55 | return dataNodeObj 56 | } 57 | 58 | // NewDataNodeLauncher A constructor function for the DataNode Launcher type 59 | func NewDataNodeLauncher(_id int, _ip string, _trackerIP string, _heartbeatInterval time.Duration, 60 | _heartbeatPort string, _trackerIPsPort string) dataNodeLauncher { 61 | dataNodeLauncher := dataNodeLauncher{ 62 | id: _id, 63 | ip: _ip, 64 | trackerIP: _trackerIP, 65 | heartbeatInterval: _heartbeatInterval, 66 | heartbeatPort: _heartbeatPort, 67 | trackerIPsPort: _trackerIPsPort, 68 | } 69 | 70 | return dataNodeLauncher 71 | } 72 | -------------------------------------------------------------------------------- /Distributed-File-System/DataNode/Utils/work.go: -------------------------------------------------------------------------------- 1 | package datanode 2 | 3 | import ( 4 | comm "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Comm" 5 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 6 | request "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Request" 7 | "strconv" 8 | "strings" 9 | "time" 10 | 11 | "github.com/pebbe/zmq4" 12 | ) 13 | 14 | // ListenToClients A function to listen to requests from clients 15 | func (datanodeObj *dataNode) ListenToClients() { 16 | socket, ok := comm.Init(zmq4.REP, "") 17 | defer socket.Close() 18 | logger.LogFail(ok, LogSignDN, datanodeObj.id, "ListenToClients(): Failed to acquire response socket") 19 | 20 | connectionString := []string{comm.GetConnectionString(datanodeObj.ip, datanodeObj.reqPort)} 21 | comm.Bind(socket, connectionString) 22 | 23 | for { 24 | serializedRequest, recvStatus := comm.RecvString(socket) 25 | 26 | if recvStatus == true { 27 | go datanodeObj.handleRequest(serializedRequest) 28 | } 29 | } 30 | } 31 | 32 | func (datanodeObj *dataNode) handleRequest(serializedRequest string) { 33 | reqType := request.GetType(serializedRequest) 34 | 35 | if reqType == request.Upload { 36 | req := request.DeserializeUpload(serializedRequest) 37 | datanodeObj.uploadRequestHandler(req) 38 | 39 | } else if reqType == request.Download { 40 | req := request.DeserializeUpload(serializedRequest) 41 | arr := strings.Fields(serializedRequest) 42 | start, _ := strconv.Atoi(arr[6]) 43 | chunkCount, _ := strconv.Atoi(arr[7]) 44 | datanodeObj.downloadRequestHandler(req, start, chunkCount) 45 | 46 | } else if reqType == request.Replicate { 47 | req := request.DeserializeReplication(serializedRequest) 48 | datanodeObj.replicationRequestHandler(req) 49 | 50 | } else if reqType == request.Invalid { 51 | logger.LogMsg(LogSignDN, datanodeObj.id, "Invalid Request") 52 | return 53 | } 54 | } 55 | 56 | func (datanodeObj *dataNode) uploadRequestHandler(req request.UploadRequest) { 57 | logger.LogMsg(LogSignDN, datanodeObj.id, "Upload Request Handler Started") 58 | 59 | fileSize := datanodeObj.receiveData(req.FileName, req.ClientIP, req.ClientPort, req.ClientID, 1) 60 | if fileSize == 0 { 61 | return 62 | } 63 | 64 | location := strconv.Itoa(datanodeObj.id) 65 | 66 | compReq := request.CompletionRequest{ 67 | Type: request.Completion, 68 | ClientID: req.ClientID, 69 | ClientIP: req.ClientIP, 70 | ClientPort: req.ClientPort, 71 | FileName: req.FileName, 72 | FileSize: fileSize, 73 | Location: location, 74 | } 75 | 76 | status := datanodeObj.sendCompletionNotifcation(compReq) 77 | if status == false { 78 | return 79 | } 80 | } 81 | 82 | func (datanodeObj *dataNode) downloadRequestHandler(req request.UploadRequest, start int, chunksCount int) { 83 | logger.LogMsg(LogSignDN, datanodeObj.id, "Download Request Handler Started") 84 | 85 | sendStatus := datanodeObj.sendPieces(req, start, chunksCount, req.ClientID) 86 | if sendStatus == false { 87 | return 88 | } 89 | } 90 | 91 | func (datanodeObj *dataNode) replicationRequestHandler(req request.ReplicationRequest) { 92 | logger.LogMsg(LogSignDN, datanodeObj.id, "Replication Request Handler Started") 93 | 94 | if req.SourceID == datanodeObj.id { 95 | logger.LogMsg(LogSignDN, datanodeObj.id, "Replication Source") 96 | 97 | sendRPQStatus := datanodeObj.sendReplicationRequest(req) 98 | if sendRPQStatus == false { 99 | return 100 | } 101 | 102 | sendDataStatus := datanodeObj.sendData(req.FileName, req.TargetNodeID, req.TargetNodeIP, req.TargetNodeBasePort+"24", req.ClientID) 103 | if sendDataStatus == false { 104 | logger.LogMsg(LogSignDN, datanodeObj.id, "Replication Failed") 105 | 106 | notifyStatus := datanodeObj.notifyReplicationCompletion(req.TrackerPort, "Replication Failed") 107 | time.Sleep(5 * time.Second) 108 | if notifyStatus == false { 109 | return 110 | } 111 | 112 | return 113 | } 114 | 115 | notifyStatus := datanodeObj.notifyReplicationCompletion(req.TrackerPort, "Replication Finished") 116 | if notifyStatus == false { 117 | return 118 | } 119 | 120 | } else if req.TargetNodeID == datanodeObj.id { 121 | logger.LogMsg(LogSignDN, datanodeObj.id, "Replication Destination") 122 | 123 | recvStatus := datanodeObj.receiveData(req.FileName, datanodeObj.ip, datanodeObj.repUpPort, req.ClientID, 2) 124 | if recvStatus == 0 { 125 | return 126 | } 127 | 128 | } else { 129 | logger.LogMsg(LogSignDN, datanodeObj.id, "Malformed replication request") 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /Distributed-File-System/README.md: -------------------------------------------------------------------------------- 1 | # Distributed File System Port Map 2 | This detials the port map used by the different submodules within the Distributed File System module. 3 | 4 | ## Tracker Ports 5 | The launcher process in the Tracker module spwans 2 types of processes: 6 | - **The Master Tracker** 7 | 8 | ``` 9 | - Receiving the IPs for all Data Nodes in the system 10 | - Receiving the heartbeat signals from the launcher module of each data node 11 | - Updating the Alive Data Nodes in the Database. 12 | ``` 13 | **Ports:** 14 | - Port 9000 for the Master Tracker 15 | - **The Side Tracker Processes** 16 | 17 | These processes are responsible for the Tracker job 18 | ``` 19 | - Listening to Client requests. 20 | - Communicating with Data Nodes. 21 | - Updating the meta file Database. 22 | ``` 23 | - Ports 9001 and 9002 for the side Tracker processes communications with Data Nodes. 24 | - Ports 8001 and 8002 for the side Tracker processes communications with Clients. 25 | 26 | ## Data Node Ports 27 | The lanucher process in the Data Node module spwans 2 types of processes: 28 | - **The Heartbeat Node** 29 | 30 | process 31 | ``` 32 | - Sending the Heartbeat Port to the Tracker. 33 | - Sending the Data Node IPs. 34 | - Sending the Data Node machine ID. 35 | - Sending the heartbeat signals. 36 | ``` 37 | Here, we use a base port, and based upon we derive the other ports as follows: 38 | If the base port of a Data Node machine is 70 for example, then: 39 | - Port 7000 for the launcher Data Node (Heart beat). 40 | - Ports 7011, 7021 for the request ports. 41 | - Ports 7012, 7022 for the upload ports. 42 | - Ports 7013, 7023 for the download ports. 43 | - Ports 7014, 7024 for the replication upload ports. 44 | - Ports 7015, 7025 for the replication download ports. 45 | 46 | # Testing Data Set 47 | The testing data set can be found [here](https://drive.google.com/drive/folders/1pEVD85lamr6kkhFnDCPDCZFO5JPk7Ajd?usp=sharing) -------------------------------------------------------------------------------- /Distributed-File-System/TrackerNode/Launcher/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | trackernode "Distributed-Video-Processing-Cluster/Distributed-File-System/TrackerNode/Utils" 5 | constants "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Constants" 6 | dbwrapper "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Database" 7 | 8 | "log" 9 | "os/exec" 10 | "sync" 11 | ) 12 | 13 | // Master Tracker data 14 | var masterTrackerIP = constants.TrackerIP 15 | var ipListenerPort = constants.TrackerIPsPort 16 | var masterTrackerID = constants.MasterTrackerID 17 | 18 | func launchTrackers() { 19 | log.Println(trackernode.LogSignL, "Launching Tracker Processes") 20 | 21 | sideTrackerDNIDs := []string{"1", "2"} 22 | sideTrackerDNPorts := constants.TrackerDNPorts 23 | sideTrackerReqPorts := constants.TrackerReqPorts 24 | path := "../TRLauncher/main.go" 25 | 26 | for i := 0; i < 2; i++ { 27 | params := masterTrackerIP + " " + sideTrackerDNIDs[i] + " " + sideTrackerDNPorts[i] + " " + sideTrackerReqPorts[i] 28 | command := "go run " + path + " " + params 29 | 30 | cmd := exec.Command("gnome-terminal", "--title=Tracker"+sideTrackerDNIDs[i], "-e", command) 31 | err := cmd.Start() 32 | 33 | if err != nil { 34 | log.Println(trackernode.LogSignL, "Error starting Tracker Process #", sideTrackerDNIDs[i]) 35 | return 36 | } 37 | 38 | log.Println(trackernode.LogSignL, "Launched Tracker Process#", sideTrackerDNIDs[i]) 39 | } 40 | 41 | log.Println(trackernode.LogSignL, "is all set!") 42 | } 43 | 44 | func main() { 45 | disconnectionThreshold := constants.DisconnectionThreshold 46 | 47 | db := dbwrapper.ConnectDB() 48 | defer db.Close() 49 | 50 | dbwrapper.CleanUP(db, trackernode.SQLDropDataNodesTable) 51 | dbwrapper.Migrate(db, trackernode.SQLCreateDataNodesTable) 52 | 53 | var portsMutex sync.Mutex 54 | var ipMutex sync.Mutex 55 | var timeStampsMutex sync.Mutex 56 | var dbMutex sync.Mutex 57 | 58 | trackerNodeLauncherObj := trackernode.NewTrackerNodeLauncher(masterTrackerID, masterTrackerIP, disconnectionThreshold, 59 | ipListenerPort, db, &timeStampsMutex, &ipMutex, &portsMutex, &dbMutex) 60 | 61 | log.Println(trackernode.LogSignL, "Successfully launched") 62 | 63 | launchTrackers() 64 | 65 | go trackerNodeLauncherObj.ReceiveHandshake() 66 | 67 | go trackerNodeLauncherObj.UpdateDataNodeAliveStatus() 68 | 69 | trackerNodeLauncherObj.ListenToHeartbeats() 70 | } 71 | -------------------------------------------------------------------------------- /Distributed-File-System/TrackerNode/TRLauncher/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | trackernode "Distributed-Video-Processing-Cluster/Distributed-File-System/TrackerNode/Utils" 5 | dbwrapper "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Database" 6 | "fmt" 7 | "log" 8 | "os" 9 | "strconv" 10 | "sync" 11 | ) 12 | 13 | func main() { 14 | args := os.Args 15 | fmt.Println(args[1:]) 16 | 17 | //Tracker parameters 18 | ip := args[1] 19 | id, _ := strconv.Atoi(args[2]) 20 | dnPort := args[3] 21 | reqPort := args[4] 22 | 23 | db := dbwrapper.ConnectDB() 24 | defer db.Close() 25 | 26 | var dbMutex sync.Mutex 27 | trackerNodeObj := trackernode.NewTrackerNode(id, ip, reqPort, dnPort, db, &dbMutex) 28 | 29 | log.Println(trackernode.LogSignTR, args[2], "Successfully launched") 30 | 31 | if id == 1 { 32 | go trackerNodeObj.Replicate() 33 | } 34 | 35 | trackerNodeObj.ListenToClientRequests() 36 | } 37 | -------------------------------------------------------------------------------- /Distributed-File-System/TrackerNode/Utils/comm.go: -------------------------------------------------------------------------------- 1 | package trackernode 2 | 3 | import ( 4 | comm "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Comm" 5 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 6 | request "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Request" 7 | "fmt" 8 | "strconv" 9 | "strings" 10 | "time" 11 | 12 | "github.com/pebbe/zmq4" 13 | ) 14 | 15 | // establishSubscriberConnection A function to establish a TCP connection for subscribing to heartbeats 16 | func (trackerNodeLauncherObj *trackerNodeLauncher) establishSubscriberConnection() { 17 | subscriber, ok := comm.Init(zmq4.SUB, "Heartbeat") 18 | trackerNodeLauncherObj.subscriberSocket = subscriber 19 | logger.LogFail(ok, LogSignL, trackerNodeLauncherObj.id, "establishPublisherConnection(): Failed to acquire subscriber Socket") 20 | } 21 | 22 | // serializeIPsMaps A function to serialize IP maps 23 | func (trackerNodeLauncherObj *trackerNodeLauncher) getHBConnections() []string { 24 | var connectionStrings []string 25 | 26 | trackerNodeLauncherObj.ipsMutex.Lock() 27 | trackerNodeLauncherObj.portsMutex.Lock() 28 | 29 | for id, ip := range trackerNodeLauncherObj.datanodeIPs { 30 | connection := comm.GetConnectionString(ip, trackerNodeLauncherObj.datanodeBasePorts[id]+"00") 31 | connectionStrings = append(connectionStrings, connection) 32 | } 33 | 34 | trackerNodeLauncherObj.portsMutex.Unlock() 35 | trackerNodeLauncherObj.ipsMutex.Unlock() 36 | 37 | return connectionStrings 38 | } 39 | 40 | // updateSubscriberConnection A function to update the heartbeat susbcription list 41 | func (trackerNodeLauncherObj *trackerNodeLauncher) updateSubscriberConnection() { 42 | comm.Connect(trackerNodeLauncherObj.subscriberSocket, trackerNodeLauncherObj.getHBConnections()) 43 | } 44 | 45 | // ReceiveHandshake A function to constantly check for incoming datanode handshakes 46 | func (trackerNodeLauncherObj *trackerNodeLauncher) ReceiveHandshake() { 47 | socket, ok := comm.Init(zmq4.REP, "") 48 | defer socket.Close() 49 | logger.LogFail(ok, LogSignL, trackerNodeLauncherObj.id, "ReceiveHandshake(): Failed to acquire response Socket") 50 | 51 | var connectionString = []string{comm.GetConnectionString(trackerNodeLauncherObj.ip, trackerNodeLauncherObj.trackerIPsPort)} 52 | comm.Bind(socket, connectionString) 53 | 54 | for { 55 | msg, status := comm.RecvString(socket) 56 | logger.LogFail(status, LogSignL, trackerNodeLauncherObj.id, "ReceiveHandshake(): Failed to receive handshake") 57 | 58 | if status == true { 59 | fields := strings.Fields(msg) 60 | incomingIP := fields[0] 61 | incomingBasePort := fields[2] 62 | incomingID, convErr := strconv.Atoi(fields[1]) 63 | logger.LogErr(convErr, LogSignL, trackerNodeLauncherObj.id, "ReceiveHandshake(): Failed to convert incoming ID") 64 | 65 | trackerNodeLauncherObj.ipsMutex.Lock() 66 | trackerNodeLauncherObj.datanodeIPs[incomingID] = incomingIP 67 | trackerNodeLauncherObj.ipsMutex.Unlock() 68 | 69 | trackerNodeLauncherObj.portsMutex.Lock() 70 | trackerNodeLauncherObj.datanodeBasePorts[incomingID] = incomingBasePort 71 | trackerNodeLauncherObj.portsMutex.Unlock() 72 | 73 | trackerNodeLauncherObj.timeStampMutex.Lock() 74 | trackerNodeLauncherObj.datanodeTimeStamps[incomingID] = time.Now() 75 | trackerNodeLauncherObj.timeStampMutex.Unlock() 76 | 77 | trackerNodeLauncherObj.dbMutex.Lock() 78 | insertDataNode(trackerNodeLauncherObj.db, incomingID, incomingIP, incomingBasePort) 79 | trackerNodeLauncherObj.dbMutex.Unlock() 80 | 81 | logMsg := fmt.Sprintf("Received IP = %s from data node#%d", incomingIP+":"+incomingBasePort+"00", incomingID) 82 | logger.LogMsg(LogSignL, 0, logMsg) 83 | } 84 | } 85 | } 86 | 87 | // sendDataNodePortsToClient A function send a data node connection string to client 88 | func (trackerNodeObj *trackerNode) sendDataNodePortsToClient(req request.UploadRequest, dataNodeConnectionString string) { 89 | socket, ok := comm.Init(zmq4.REQ, "") 90 | defer socket.Close() 91 | logger.LogFail(ok, LogSignTR, trackerNodeObj.id, "sendDataNodePortsToClient(): Failed acquire request socket") 92 | 93 | var connectionString = []string{comm.GetConnectionString(req.ClientIP, req.ClientPort)} 94 | comm.Connect(socket, connectionString) 95 | 96 | status := false 97 | 98 | for status != true { 99 | 100 | logger.LogMsg(LogSignTR, trackerNodeObj.id, fmt.Sprintf("Responding to request#%d, from client #%d", req.ID, req.ClientID)) 101 | 102 | status = comm.SendString(socket, dataNodeConnectionString) 103 | logger.LogFail(status, LogSignTR, trackerNodeObj.id, fmt.Sprintf("sendDataNodePortsToClient(): Failed to respond to request#%d, from client #%d, ... Trying again", 104 | req.ID, req.ClientID)) 105 | } 106 | 107 | logger.LogMsg(LogSignTR, trackerNodeObj.id, fmt.Sprintf("Responded to request#%d, from client #%d", req.ID, req.ClientID)) 108 | } 109 | 110 | // sendReplicationRequest A function to send a replication request 111 | func (trackerNodeObj *trackerNode) sendReplicationRequest(req request.ReplicationRequest, sourceIP string, sourcePort string) { 112 | socket, ok := comm.Init(zmq4.REQ, "") 113 | defer socket.Close() 114 | logger.LogFail(ok, LogSignTR, trackerNodeObj.id, "sendReplicationRequest(): Failed acquire request socket") 115 | 116 | var connectionString = []string{comm.GetConnectionString(sourceIP, sourcePort)} 117 | comm.Connect(socket, connectionString) 118 | 119 | logMsg := fmt.Sprintf("Sending RPQ {Src:%d, file:%s, client:%d, Dst:%d}", 120 | req.SourceID, req.FileName, req.ClientID, req.TargetNodeID) 121 | logger.LogMsg(LogSignTR, trackerNodeObj.id, logMsg) 122 | 123 | status := comm.SendString(socket, request.SerializeReplication(req)) 124 | logger.LogFail(status, LogSignTR, trackerNodeObj.id, "sendDataNodePortsToClient(): Failed to send RPQ") 125 | logger.LogSuccess(status, LogSignTR, trackerNodeObj.id, "Successfully sent RPQ") 126 | } 127 | 128 | // notifyClient A function to notify client of an action compeltion 129 | func (trackerNodeObj *trackerNode) notifyClient(ip string, port string, msg string, id int) { 130 | socket, ok := comm.Init(zmq4.REQ, "") 131 | defer socket.Close() 132 | logger.LogFail(ok, LogSignTR, trackerNodeObj.id, "notifyClient(): Failed acquire request socket") 133 | 134 | var connectionString = []string{comm.GetConnectionString(ip, port)} 135 | comm.Connect(socket, connectionString) 136 | 137 | logger.LogMsg(LogSignTR, trackerNodeObj.id, fmt.Sprintf("Sending notification to client %d", id)) 138 | 139 | status := comm.SendString(socket, msg) 140 | logger.LogFail(status, LogSignTR, trackerNodeObj.id, "notifyClient(): Failed to send notification") 141 | logger.LogSuccess(status, LogSignTR, trackerNodeObj.id, "Successfully sent notification") 142 | } 143 | 144 | func (trackerNodeObj *trackerNode) recieveReplicationCompletion() bool { 145 | socket, ok := comm.Init(zmq4.REP, "") 146 | defer socket.Close() 147 | logger.LogFail(ok, LogSignTR, trackerNodeObj.id, "recieveReplicationCompletion(): Failed to acquire response Socket") 148 | 149 | var connectionString = []string{comm.GetConnectionString(trackerNodeObj.ip, trackerNodeObj.datanodePort)} 150 | comm.Bind(socket, connectionString) 151 | 152 | msg, status := comm.RecvString(socket) 153 | logger.LogFail(status, LogSignTR, trackerNodeObj.id, "recieveReplicationCompletion(): Failed to receive replication completion") 154 | logger.LogSuccess(status, LogSignTR, trackerNodeObj.id, "Recieved "+msg) 155 | 156 | return (msg == "Replication Finished") 157 | } 158 | -------------------------------------------------------------------------------- /Distributed-File-System/TrackerNode/Utils/dbscripts.go: -------------------------------------------------------------------------------- 1 | package trackernode 2 | 3 | // SQLCreateDataNodesTable SQL to create the DataNodes table 4 | const SQLCreateDataNodesTable string = ` 5 | CREATE TABLE datanodes ( 6 | id SERIAL PRIMARY KEY, 7 | dataNodeID int UNIQUE NOT NULL, 8 | ip varchar(60) NOT NULL, 9 | basePort varchar(60) NOT NULL 10 | ); 11 | ` 12 | 13 | // SQLCreateMetaFile SQL to create the Meta Files table 14 | const SQLCreateMetaFile string = ` 15 | CREATE TABLE metafiles ( 16 | id SERIAL PRIMARY KEY, 17 | fileName varchar(60) NOT NULL, 18 | clientID int NOT NULL, 19 | fileSize int NOT NULL, 20 | location varchar(60) NOT NULL 21 | ); 22 | ALTER TABLE metafiles 23 | ADD CONSTRAINT unq_filename_clientid UNIQUE(fileName, clientID); 24 | ` 25 | 26 | // SQLDropDataNodesTable SQL to drop the DataNodes table 27 | const SQLDropDataNodesTable string = `DROP TABLE IF EXISTS datanodes;` 28 | 29 | // SQLDropMetaFileTable SQL to drop the Meta Files table 30 | const SQLDropMetaFileTable string = `DROP TABLE IF EXISTS metafiles;` 31 | 32 | // sqlInsertDataNode SQL to insert a data node in the DataNodes table 33 | const sqlInsertDataNode string = ` 34 | INSERT INTO datanodes (dataNodeID, ip, basePort) 35 | VALUES ($1, $2, $3) 36 | ` 37 | 38 | // sqlInsertFileEntry SQL to insert a a file entry into the Meta File table 39 | const sqlInsertFileEntry string = ` 40 | INSERT INTO metafiles (fileName, clientID, fileSize, location) 41 | VALUES ($1, $2, $3, $4) 42 | ` 43 | 44 | // sqlDeleteDataNode SQL to delete a data node from the DataNodes table 45 | const sqlDeleteDataNode string = `DELETE FROM datanodes WHERE dataNodeID=$1` 46 | 47 | // sqlSelectAllDataNodes SQL to select all datanodes 48 | const sqlSelectAllDataNodes string = `SELECT * FROM datanodes` 49 | 50 | // sqlSelectDataNode SQL to select a datanode indentified by its ID 51 | const sqlSelectDataNode string = ` 52 | SELECT * FROM datanodes WHERE dataNodeID=$1 53 | ` 54 | 55 | // sqlSelectAllMetaFiles SQL to select all entries in the metafile table 56 | const sqlSelectAllMetaFiles string = `SELECT * FROM metafiles` 57 | 58 | // sqlSelectMetaFile SQL to select a meta file entry 59 | const sqlSelectMetaFile string = ` 60 | SELECT * FROM metafiles WHERE fileName = $1 and clientID = $2 61 | ` 62 | 63 | // sqlUpdateMetaFile SQL to update a metafile entry 64 | const sqlUpdateMetaFile string = ` 65 | UPDATE metafiles 66 | SET location = $1 67 | WHERE fileName = $2 AND clientID = $3; 68 | ` 69 | 70 | // sqlSelectAllMetaFilesForClient SQL to select all metafiles for a client 71 | const sqlSelectAllMetaFilesForClient string = ` 72 | SELECT fileName, fileSize 73 | FROM metafiles 74 | WHERE clientID = $1 75 | ` 76 | -------------------------------------------------------------------------------- /Distributed-File-System/TrackerNode/Utils/dbtypes.go: -------------------------------------------------------------------------------- 1 | package trackernode 2 | 3 | // dataNodeRow Represents a data node row in the Database 4 | type dataNodeRow struct { 5 | id int 6 | ip string 7 | basePort string 8 | } 9 | 10 | // fileRow Represents a file entry row in the Database 11 | type fileRow struct { 12 | fileName string 13 | clientID int 14 | fileSize int 15 | location string 16 | } 17 | -------------------------------------------------------------------------------- /Distributed-File-System/TrackerNode/Utils/dbwork.go: -------------------------------------------------------------------------------- 1 | package trackernode 2 | 3 | import ( 4 | dbwrapper "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Database" 5 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 6 | "fmt" 7 | 8 | "database/sql" 9 | ) 10 | 11 | // insertDataNode A function to insert a data node in the DataNodes table 12 | func insertDataNode(db *sql.DB, dataNodeID int, ip string, basePort string) bool { 13 | sqlStatement := sqlInsertDataNode 14 | 15 | logMsgs := logger.LogInfo{ 16 | Success: fmt.Sprintf("DataNode #%d created Successfully", dataNodeID), 17 | Error: fmt.Sprintf("Failed to create DataNode #%d", dataNodeID), 18 | } 19 | 20 | ok := dbwrapper.ExecuteQuery(db, sqlStatement, logMsgs, false, dataNodeID, ip, basePort) 21 | 22 | return ok 23 | } 24 | 25 | // insertMetaFile A function to insert a meta file entry into the Database 26 | func insertMetaFile(db *sql.DB, fileName string, clientID int, filseSize int, location string) bool { 27 | sqlStatement := sqlInsertFileEntry 28 | 29 | logMsgs := logger.LogInfo{ 30 | Success: fmt.Sprintf("Successfully insert file: %s of client %d", fileName, clientID), 31 | Error: fmt.Sprintf("Failed to insert file: %s of client %d", fileName, clientID), 32 | } 33 | 34 | ok := dbwrapper.ExecuteQuery(db, sqlStatement, logMsgs, false, fileName, clientID, filseSize, location) 35 | 36 | return ok 37 | } 38 | 39 | // deleteDataNode A function to delete a data node from the DataNodes table 40 | func deleteDataNode(db *sql.DB, dataNodeID int) bool { 41 | sqlStatement := sqlDeleteDataNode 42 | 43 | logMsgs := logger.LogInfo{ 44 | Success: fmt.Sprintf("DataNode #%d deleted Successfully", dataNodeID), 45 | Error: fmt.Sprintf("Failed to delete DataNode #%d", dataNodeID), 46 | } 47 | 48 | ok := dbwrapper.ExecuteQuery(db, sqlStatement, logMsgs, false, dataNodeID) 49 | 50 | return ok 51 | } 52 | 53 | // selectDatanodes A function to select all datanodes 54 | func selectDatanodes(db *sql.DB) []dataNodeRow { 55 | sqlStatement := sqlSelectAllDataNodes 56 | 57 | logMsgs := logger.LogInfo{ 58 | Success: "Datanode list selected Successfully", 59 | Error: "Datanode list selection failed", 60 | } 61 | 62 | rows, ok := dbwrapper.ExecuteRowsQuery(db, sqlStatement, logMsgs, false) 63 | defer rows.Close() 64 | 65 | var datanodeList []dataNodeRow 66 | for rows.Next() { 67 | var serialID int 68 | var dataNodeID int 69 | var ip string 70 | var basePort string 71 | 72 | err := rows.Scan(&serialID, &dataNodeID, &ip, &basePort) 73 | logger.LogDBErr(err, dbwrapper.LogSign, "selectDatanodes(): Error while extracting results", false) 74 | 75 | res := dataNodeRow{ 76 | id: dataNodeID, 77 | ip: ip, 78 | basePort: basePort, 79 | } 80 | 81 | datanodeList = append(datanodeList, res) 82 | } 83 | 84 | err := rows.Err() 85 | logger.LogDBErr(err, dbwrapper.LogSign, "selectDatanodes(): Error while extracting results", false) 86 | logger.LogDBSuccess(err, dbwrapper.LogSign, "DataNode list extracted successfully") 87 | 88 | if ok == false { 89 | datanodeList = []dataNodeRow{} 90 | } 91 | 92 | return datanodeList 93 | } 94 | 95 | // selectDataNode A function to select a datanode 96 | func selectDataNode(db *sql.DB, id int) (dataNodeRow, bool) { 97 | sqlStatement := sqlSelectDataNode 98 | 99 | row := dbwrapper.ExecuteRowQuery(db, sqlStatement, id) 100 | 101 | var serialID int 102 | var res dataNodeRow 103 | 104 | err := row.Scan(&serialID, &res.id, &res.ip, &res.basePort) 105 | if err == sql.ErrNoRows { 106 | return dataNodeRow{}, false 107 | } 108 | 109 | return res, true 110 | } 111 | 112 | func selectMetaFiles(db *sql.DB) []fileRow { 113 | sqlStatement := sqlSelectAllMetaFiles 114 | 115 | logMsgs := logger.LogInfo{ 116 | Success: "File list selected Successfully", 117 | Error: "File list selection failed", 118 | } 119 | 120 | rows, ok := dbwrapper.ExecuteRowsQuery(db, sqlStatement, logMsgs, false) 121 | defer rows.Close() 122 | 123 | var fileList []fileRow 124 | for rows.Next() { 125 | var serialID int 126 | var fileName string 127 | var clientID int 128 | var fileSize int 129 | var location string 130 | 131 | err := rows.Scan(&serialID, &fileName, &clientID, &fileSize, &location) 132 | logger.LogDBErr(err, dbwrapper.LogSign, "selectMetaFiles(): Error while extracting results", false) 133 | 134 | res := fileRow{ 135 | fileName: fileName, 136 | clientID: clientID, 137 | fileSize: fileSize, 138 | location: location, 139 | } 140 | 141 | fileList = append(fileList, res) 142 | } 143 | 144 | err := rows.Err() 145 | logger.LogDBErr(err, dbwrapper.LogSign, "selectMetaFiles(): Error while extracting results", false) 146 | logger.LogDBSuccess(err, dbwrapper.LogSign, "File list extracted successfully") 147 | 148 | if ok == false { 149 | fileList = []fileRow{} 150 | } 151 | 152 | return fileList 153 | } 154 | 155 | // selectMetaFile A function to select a metafile entry 156 | func selectMetaFile(db *sql.DB, fileName string, clientID int) (fileRow, bool) { 157 | sqlStatement := sqlSelectMetaFile 158 | 159 | row := dbwrapper.ExecuteRowQuery(db, sqlStatement, fileName, clientID) 160 | 161 | var serialID int 162 | var res fileRow 163 | 164 | err := row.Scan(&serialID, &res.fileName, &res.clientID, &res.fileSize, &res.location) 165 | 166 | if err == sql.ErrNoRows { 167 | return fileRow{}, false 168 | } 169 | 170 | return res, true 171 | } 172 | 173 | // selectMetaFileForClient A function to select a metafile entry for a certain client 174 | func selectMetaFileForClient(db *sql.DB, clientID int) []fileRow { 175 | sqlStatement := sqlSelectAllMetaFilesForClient 176 | 177 | logMsgs := logger.LogInfo{ 178 | Success: fmt.Sprintf("File list for client #%d selected Successfully", clientID), 179 | Error: fmt.Sprintf("File list for client #%d selection failed", clientID), 180 | } 181 | 182 | rows, ok := dbwrapper.ExecuteRowsQuery(db, sqlStatement, logMsgs, false, clientID) 183 | defer rows.Close() 184 | 185 | var fileList []fileRow 186 | for rows.Next() { 187 | var fileName string 188 | var fileSize int 189 | 190 | err := rows.Scan(&fileName, &fileSize) 191 | logger.LogDBErr(err, dbwrapper.LogSign, "selectMetaFiles(): Error while extracting results", false) 192 | 193 | res := fileRow{ 194 | fileName: fileName, 195 | clientID: clientID, 196 | fileSize: fileSize, 197 | location: "", 198 | } 199 | 200 | fileList = append(fileList, res) 201 | } 202 | 203 | err := rows.Err() 204 | logger.LogDBErr(err, dbwrapper.LogSign, "selectMetaFileForClient(): Error while extracting results", false) 205 | 206 | if ok == false { 207 | fileList = []fileRow{} 208 | } 209 | 210 | return fileList 211 | } 212 | 213 | // UpdatePeerDownload A function to update the peer-download status 214 | func updateMetaFile(db *sql.DB, location string, fileName string, clientID int) bool { 215 | sqlStatement := sqlUpdateMetaFile 216 | 217 | logMsgs := logger.LogInfo{ 218 | Success: "Updated metafile Successfully", 219 | Error: "Metafile update failed", 220 | } 221 | 222 | ok := dbwrapper.ExecuteQuery(db, sqlStatement, logMsgs, false, location, fileName, clientID) 223 | 224 | return ok 225 | } 226 | -------------------------------------------------------------------------------- /Distributed-File-System/TrackerNode/Utils/heartbeats.go: -------------------------------------------------------------------------------- 1 | package trackernode 2 | 3 | import ( 4 | comm "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Comm" 5 | "log" 6 | "strconv" 7 | "strings" 8 | "time" 9 | 10 | "github.com/pebbe/zmq4" 11 | ) 12 | 13 | // ListenToHeartbeats A function to listen to incoming heartbeats 14 | func (trackerNodeLauncherObj *trackerNodeLauncher) ListenToHeartbeats() { 15 | trackerNodeLauncherObj.establishSubscriberConnection() 16 | 17 | defer trackerNodeLauncherObj.subscriberSocket.Close() 18 | 19 | for { 20 | trackerNodeLauncherObj.updateSubscriberConnection() 21 | 22 | heartbeat, _ := trackerNodeLauncherObj.subscriberSocket.Recv(zmq4.DONTWAIT) 23 | 24 | if heartbeat != "" { 25 | log.Println(LogSignL, "Received", heartbeat) 26 | 27 | trackerNodeLauncherObj.registerTimeStap(heartbeat) 28 | } 29 | } 30 | } 31 | 32 | // registerTimeStap A function to register the timestamp of the last received heartbeat 33 | func (trackerNodeLauncherObj *trackerNodeLauncher) registerTimeStap(heartbeat string) { 34 | id, _ := strconv.Atoi((strings.Fields(heartbeat))[1]) 35 | 36 | trackerNodeLauncherObj.timeStampMutex.Lock() 37 | trackerNodeLauncherObj.datanodeTimeStamps[id] = time.Now() 38 | trackerNodeLauncherObj.timeStampMutex.Unlock() 39 | } 40 | 41 | // updateDataNodeAliveStatus A function the update the status of the alive datanodes 42 | func (trackerNodeLauncherObj *trackerNodeLauncher) UpdateDataNodeAliveStatus() { 43 | for { 44 | trackerNodeLauncherObj.timeStampMutex.Lock() 45 | 46 | for id, timestamp := range trackerNodeLauncherObj.datanodeTimeStamps { 47 | diff := time.Now().Sub(timestamp) 48 | threshold := trackerNodeLauncherObj.disconnectionThreshold 49 | 50 | if diff > threshold { 51 | connection := []string{comm.GetConnectionString(trackerNodeLauncherObj.datanodeIPs[id], trackerNodeLauncherObj.datanodeBasePorts[id]+"00")} 52 | comm.Disconnect(trackerNodeLauncherObj.subscriberSocket, connection) 53 | 54 | trackerNodeLauncherObj.ipsMutex.Lock() 55 | delete(trackerNodeLauncherObj.datanodeIPs, id) 56 | trackerNodeLauncherObj.ipsMutex.Unlock() 57 | 58 | trackerNodeLauncherObj.portsMutex.Lock() 59 | delete(trackerNodeLauncherObj.datanodeBasePorts, id) 60 | trackerNodeLauncherObj.portsMutex.Unlock() 61 | 62 | delete(trackerNodeLauncherObj.datanodeTimeStamps, id) 63 | 64 | trackerNodeLauncherObj.dbMutex.Lock() 65 | deleteDataNode(trackerNodeLauncherObj.db, id) 66 | trackerNodeLauncherObj.dbMutex.Unlock() 67 | 68 | log.Println(LogSignL, "Node#", id, "has gone offline") 69 | } 70 | } 71 | 72 | trackerNodeLauncherObj.timeStampMutex.Unlock() 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /Distributed-File-System/TrackerNode/Utils/types.go: -------------------------------------------------------------------------------- 1 | package trackernode 2 | 3 | import ( 4 | "database/sql" 5 | "sync" 6 | "time" 7 | 8 | "github.com/pebbe/zmq4" 9 | ) 10 | 11 | // LogSignL Used for logging Launcher messages 12 | const LogSignL string = "[Tracker Launcher]" 13 | 14 | // LogSignTR Used for logging Tracker messages 15 | const LogSignTR string = "[Tracker]" 16 | 17 | // trackerNode A struct to represent the basic structure of a Tracker Node 18 | type trackerNode struct { 19 | id int //ID of the tracker process 20 | ip string //The IP of the Tracker machine 21 | requestsPort string //The requests port of the Tracker machine 22 | datanodePort string //The datanode port on the Tracker machine 23 | db *sql.DB //A handle on the DB 24 | dbMutex *sync.Mutex //To restrict access to the DB 25 | } 26 | 27 | // heartbeatTrackerNode A struct to represent a Tracker Node that listens to heartbeats 28 | //This struct extends the dataNode struct for added functionality 29 | type trackerNodeLauncher struct { 30 | id int //Unique ID 31 | ip string //Tracker IP 32 | trackerIPsPort string //A port on which the tracker listens for incoming IPs 33 | subscriberSocket *zmq4.Socket //A susbscriber socket 34 | disconnectionThreshold time.Duration //A threshold to disconnect a machine 35 | datanodeTimeStamps map[int]time.Time //Keep track of the timestamps 36 | timeStampMutex *sync.Mutex //To restrict access to the timestamp map 37 | datanodeIPs map[int]string //Keep tracker of datanode IPs 38 | ipsMutex *sync.Mutex //To restrict access to DN IPs map 39 | datanodeBasePorts map[int]string //Keep track of the datanode base ports 40 | portsMutex *sync.Mutex //To restrict access to the DN ports map 41 | db *sql.DB //A handle on the DB 42 | dbMutex *sync.Mutex //To restrict access to the DB 43 | } 44 | 45 | //NewTrackerNode A constructor function for the trackerNode type 46 | func NewTrackerNode(_id int, _ip string, _requestsPort string, _datanodePort string, _db *sql.DB, _dbMutex *sync.Mutex) trackerNode { 47 | trackerNodeObj := trackerNode{ 48 | id: _id, 49 | ip: _ip, 50 | requestsPort: _requestsPort, 51 | datanodePort: _datanodePort, 52 | db: _db, 53 | dbMutex: _dbMutex, 54 | } 55 | 56 | return trackerNodeObj 57 | } 58 | 59 | // NewTrackerNodeLauncher A constructor function for the trackerNodeLauncher type 60 | func NewTrackerNodeLauncher(_id int, _ip string, _disconnectionThreshold time.Duration, _trackerIPsPort string, _db *sql.DB, 61 | _timeStampMutex *sync.Mutex, _ipsMutex *sync.Mutex, _portsMutex *sync.Mutex, _dbMutex *sync.Mutex) trackerNodeLauncher { 62 | 63 | trackerNodeLauncherObj := trackerNodeLauncher{ 64 | id: _id, 65 | ip: _ip, 66 | trackerIPsPort: _trackerIPsPort, 67 | disconnectionThreshold: _disconnectionThreshold, 68 | db: _db, 69 | timeStampMutex: _timeStampMutex, 70 | ipsMutex: _ipsMutex, 71 | portsMutex: _portsMutex, 72 | dbMutex: _dbMutex, 73 | } 74 | 75 | trackerNodeLauncherObj.datanodeTimeStamps = make(map[int]time.Time) 76 | trackerNodeLauncherObj.datanodeBasePorts = make(map[int]string) 77 | trackerNodeLauncherObj.datanodeIPs = make(map[int]string) 78 | 79 | return trackerNodeLauncherObj 80 | } 81 | -------------------------------------------------------------------------------- /Distributed-File-System/TrackerNode/Utils/work.go: -------------------------------------------------------------------------------- 1 | package trackernode 2 | 3 | import ( 4 | comm "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Comm" 5 | constants "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Constants" 6 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 7 | request "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Request" 8 | "fmt" 9 | "log" 10 | "strconv" 11 | "strings" 12 | "time" 13 | 14 | "github.com/pebbe/zmq4" 15 | ) 16 | 17 | var lastPickedNode int 18 | var lastPickedProcess = 1 19 | 20 | // ListenToClientRequests A function to listen to client requests 21 | func (trackerNodeObj *trackerNode) ListenToClientRequests() { 22 | socket, ok := comm.Init(zmq4.REP, "") 23 | defer socket.Close() 24 | logger.LogFail(ok, LogSignTR, trackerNodeObj.id, "ListenToClientRequests(): Failed to acquire response Socket") 25 | 26 | var connectionString = []string{comm.GetConnectionString(trackerNodeObj.ip, trackerNodeObj.requestsPort)} 27 | comm.Bind(socket, connectionString) 28 | 29 | for { 30 | serializedRequest, recvStatus := comm.RecvString(socket) 31 | 32 | if recvStatus == true { 33 | go trackerNodeObj.handleRequest(serializedRequest) 34 | } 35 | } 36 | } 37 | 38 | // handleRequest A function to handle requests based on their types 39 | func (trackerNodeObj *trackerNode) handleRequest(serializedRequest string) { 40 | reqType := request.GetType(serializedRequest) 41 | 42 | if reqType == request.Upload { 43 | req := request.DeserializeUpload(serializedRequest) 44 | trackerNodeObj.uploadRequestHandler(req) 45 | 46 | } else if reqType == request.Download { 47 | req := request.DeserializeUpload(serializedRequest) 48 | trackerNodeObj.downloadRequestHandler(req) 49 | 50 | } else if reqType == request.Completion { 51 | req := request.DeserializeCompletion(serializedRequest) 52 | trackerNodeObj.completionRequestHandler(req) 53 | 54 | } else if reqType == request.Display { 55 | req := request.DeserializeUpload(serializedRequest) 56 | trackerNodeObj.displayRequestHandler(req) 57 | 58 | } else if reqType == request.Invalid { 59 | logger.LogMsg(LogSignTR, trackerNodeObj.id, "Invalid Request") 60 | return 61 | } 62 | } 63 | 64 | // pickUploadDataNode A function pick a datanode to handle an upload request 65 | func (trackerNodeObj *trackerNode) pickUploadDataNode() (dataNodeRow, int, bool) { 66 | trackerNodeObj.dbMutex.Lock() 67 | res := selectDatanodes(trackerNodeObj.db) 68 | 69 | if len(res) == 0 { 70 | return dataNodeRow{}, 0, false 71 | } 72 | 73 | if lastPickedNode >= len(res) { 74 | lastPickedNode = 0 75 | } 76 | 77 | pickedDN := lastPickedNode 78 | lastPickedNode++ 79 | 80 | if lastPickedProcess == 3 { 81 | lastPickedProcess = 1 82 | } 83 | 84 | pickedProcess := lastPickedProcess 85 | lastPickedProcess++ 86 | trackerNodeObj.dbMutex.Unlock() 87 | 88 | return res[pickedDN], pickedProcess, true 89 | } 90 | 91 | // uploadRequestHandler A function to handle a request of type Upload 92 | func (trackerNodeObj *trackerNode) uploadRequestHandler(req request.UploadRequest) { 93 | logMsg := fmt.Sprintf("Handling upload request #%d, from client #%d", req.ID, req.ClientID) 94 | logger.LogMsg(LogSignTR, trackerNodeObj.id, logMsg) 95 | 96 | pickedDN, pickedProcess, atleast := trackerNodeObj.pickUploadDataNode() 97 | 98 | if atleast == false { 99 | msg := "All data nodes are offline" 100 | trackerNodeObj.sendDataNodePortsToClient(req, msg) 101 | return 102 | } 103 | 104 | dataNodeConnectionString := pickedDN.ip + " " + pickedDN.basePort + strconv.Itoa(pickedProcess) + "1" 105 | trackerNodeObj.sendDataNodePortsToClient(req, dataNodeConnectionString) 106 | } 107 | 108 | // checkFileExistance Function to check whether a metafile entry exsists 109 | func (trackerNodeObj *trackerNode) checkFileExistance(fileName string, clientID int) (fileRow, bool) { 110 | trackerNodeObj.dbMutex.Lock() 111 | metafile, exists := selectMetaFile(trackerNodeObj.db, fileName, clientID) 112 | trackerNodeObj.dbMutex.Unlock() 113 | 114 | return metafile, exists 115 | } 116 | 117 | // getDownloadSrcs A function to obtain all possible download sources 118 | func (trackerNodeObj *trackerNode) getDownloadSrcs(location string) (string, bool) { 119 | locations := strings.Fields(location) 120 | 121 | downloadLocations := "" 122 | atleast := false 123 | 124 | for i := 0; i < len(locations); i++ { 125 | dnID, _ := strconv.Atoi(locations[i]) 126 | 127 | trackerNodeObj.dbMutex.Lock() 128 | src, srcAlive := selectDataNode(trackerNodeObj.db, dnID) 129 | trackerNodeObj.dbMutex.Unlock() 130 | 131 | if srcAlive == false { 132 | continue 133 | } 134 | 135 | downloadLocations += src.ip + " " + src.basePort + "1" + " " 136 | downloadLocations += src.ip + " " + src.basePort + "2" + " " 137 | atleast = true 138 | } 139 | 140 | return downloadLocations, atleast 141 | } 142 | 143 | // downloadRequestHandler A function handle requests of type download 144 | func (trackerNodeObj *trackerNode) downloadRequestHandler(req request.UploadRequest) { 145 | logMsg := fmt.Sprintf("Handling download request #%d, from client #%d", req.ID, req.ClientID) 146 | logger.LogMsg(LogSignTR, trackerNodeObj.id, logMsg) 147 | 148 | //Check if the file to download exists and where 149 | metafile, exists := trackerNodeObj.checkFileExistance(req.FileName, req.ClientID) 150 | response := "" 151 | if exists == false { //If the file doesn't exist, send a 404 152 | response = "404: File not found" 153 | trackerNodeObj.sendDataNodePortsToClient(req, response) 154 | return 155 | } 156 | 157 | //If the file exists, get all possible download srcs 158 | downloadSrcs, atleast := trackerNodeObj.getDownloadSrcs(metafile.location) 159 | if atleast == false { 160 | response = "All source datanodes are offline, try again later" 161 | trackerNodeObj.sendDataNodePortsToClient(req, response) 162 | return 163 | } 164 | 165 | response = strconv.Itoa(metafile.fileSize) + " " + downloadSrcs 166 | 167 | trackerNodeObj.sendDataNodePortsToClient(req, response) 168 | } 169 | 170 | // getReplicationSrc A function to obtain the replication source out of all source nodes 171 | func (trackerNodeObj *trackerNode) getReplicationSrc(locations []int) (dataNodeRow, bool) { 172 | for i := 0; i < len(locations); i++ { 173 | trackerNodeObj.dbMutex.Lock() 174 | src, srcAlive := selectDataNode(trackerNodeObj.db, locations[i]) 175 | trackerNodeObj.dbMutex.Unlock() 176 | 177 | if srcAlive == true { 178 | return src, true 179 | } 180 | } 181 | 182 | return dataNodeRow{}, false 183 | } 184 | 185 | // replicateFile A function to replicate a file to all avaiable datanodes to at most 3 186 | func (trackerNodeObj *trackerNode) replicateFile(metafile fileRow) { 187 | fields := strings.Fields(metafile.location) 188 | 189 | //The file is fully replicated 190 | if len(fields) == 3 { 191 | return 192 | } 193 | 194 | var locations []int 195 | for i := 0; i < len(fields); i++ { 196 | id, _ := strconv.Atoi(fields[i]) 197 | locations = append(locations, id) 198 | } 199 | 200 | newLocation := metafile.location 201 | 202 | //The file needs replication 203 | for dn := 1; dn <= 3; dn++ { 204 | found := false 205 | for i := 0; i < len(locations); i++ { 206 | if dn == locations[i] { 207 | found = true 208 | break 209 | } 210 | } 211 | //This dn has the file, no need to replicate 212 | if found == true { 213 | continue 214 | } 215 | 216 | //This dn doesn't have the file, we need to replicate 217 | 218 | //Get the destination node, and check if it's alive 219 | trackerNodeObj.dbMutex.Lock() 220 | dst, dstAlive := selectDataNode(trackerNodeObj.db, dn) 221 | trackerNodeObj.dbMutex.Unlock() 222 | 223 | if dstAlive == false { //If dst isn't alive, then ignore it 224 | log.Println("Dst is dead") 225 | continue 226 | } 227 | 228 | //Dst is alive, let's find a src 229 | src, srcAlive := trackerNodeObj.getReplicationSrc(locations) 230 | if srcAlive == false { //If there are no alive sources, then ignore it 231 | log.Println("Src is dead") 232 | continue 233 | } 234 | 235 | //We have an alive src and an alive dst 236 | //If any of them went down later, then no replication will take place 237 | //And the error handling will take care of that 238 | repReqObj := request.ReplicationRequest{ 239 | ID: 0, 240 | Type: request.Replicate, 241 | ClientID: metafile.clientID, 242 | FileName: metafile.fileName, 243 | SourceID: src.id, 244 | TargetNodeID: dst.id, 245 | TargetNodeIP: dst.ip, 246 | TargetNodeBasePort: dst.basePort, 247 | TrackerPort: trackerNodeObj.datanodePort, 248 | } 249 | 250 | trackerNodeObj.sendReplicationRequest(repReqObj, src.ip, src.basePort+"21") 251 | success := trackerNodeObj.recieveReplicationCompletion() 252 | 253 | if success == true { 254 | newLocation += " " + strconv.Itoa(dst.id) 255 | 256 | //Update the metafile entry 257 | trackerNodeObj.dbMutex.Lock() 258 | updateMetaFile(trackerNodeObj.db, newLocation, metafile.fileName, metafile.clientID) 259 | trackerNodeObj.dbMutex.Unlock() 260 | } 261 | } 262 | } 263 | 264 | // Replicate A function that implements the periodic Replication routine 265 | func (trackerNodeObj *trackerNode) Replicate() { 266 | for range time.Tick(constants.ReplicationRoutineFrequency) { 267 | logger.LogMsg(LogSignL, trackerNodeObj.id, "Replication Routine, running ...") 268 | 269 | trackerNodeObj.dbMutex.Lock() 270 | metaFiles := selectMetaFiles(trackerNodeObj.db) 271 | trackerNodeObj.dbMutex.Unlock() 272 | 273 | for _, metaFile := range metaFiles { 274 | trackerNodeObj.replicateFile(metaFile) 275 | } 276 | } 277 | } 278 | 279 | // completionRequestHandler A function to handle the completion notifications 280 | func (trackerNodeObj *trackerNode) completionRequestHandler(req request.CompletionRequest) { 281 | trackerNodeObj.dbMutex.Lock() 282 | insertMetaFile(trackerNodeObj.db, req.FileName, req.ClientID, req.FileSize, req.Location) 283 | trackerNodeObj.dbMutex.Unlock() 284 | 285 | msg := fmt.Sprintf("Successfully uploaded file %s of size %d", req.FileName, req.FileSize) 286 | trackerNodeObj.notifyClient(req.ClientIP, req.ClientPort[:3]+"7", msg, req.ClientID) 287 | } 288 | 289 | // displayRequestHandler A function to handle the display request 290 | func (trackerNodeObj *trackerNode) displayRequestHandler(req request.UploadRequest) { 291 | var fileList []fileRow 292 | 293 | trackerNodeObj.dbMutex.Lock() 294 | fileList = selectMetaFileForClient(trackerNodeObj.db, req.ClientID) 295 | trackerNodeObj.dbMutex.Unlock() 296 | 297 | if len(fileList) == 0 { 298 | response := "No Files" 299 | trackerNodeObj.sendDataNodePortsToClient(req, response) 300 | 301 | return 302 | } 303 | 304 | response := "" 305 | for i := 0; i < len(fileList); i++ { 306 | response += fileList[i].fileName + " " + strconv.Itoa(fileList[i].fileSize) + " " 307 | } 308 | 309 | fmt.Println("response = ", response) 310 | trackerNodeObj.sendDataNodePortsToClient(req, response) 311 | } 312 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/Comm/comm.go: -------------------------------------------------------------------------------- 1 | package comm 2 | 3 | import "github.com/pebbe/zmq4" 4 | 5 | // ACK The synchronous acknowledge message 6 | const ACK string = "ACK" 7 | 8 | // Init A function to initialize communication 9 | func Init(socketType zmq4.Type, topic string) (*zmq4.Socket, bool) { 10 | socket, err := zmq4.NewSocket(socketType) 11 | status := isOkay(err) 12 | 13 | socket.SetLinger(0) 14 | 15 | if socketType == zmq4.SUB { 16 | socket.SetSubscribe(topic) 17 | } 18 | 19 | return socket, status 20 | } 21 | 22 | // Connect A function that connects a socket to all provided connections 23 | func Connect(socket *zmq4.Socket, connections []string) { 24 | for _, connection := range connections { 25 | socket.Connect(connection) 26 | } 27 | } 28 | 29 | // Bind A function that binds a socket to all provided connections 30 | func Bind(socket *zmq4.Socket, connections []string) { 31 | for _, connection := range connections { 32 | socket.Bind(connection) 33 | } 34 | } 35 | 36 | // Disconnect A function that disconnects a socket from all provided connections 37 | func Disconnect(socket *zmq4.Socket, connections []string) { 38 | for _, connection := range connections { 39 | socket.Disconnect(connection) 40 | } 41 | } 42 | 43 | // SendString A function that synchronously sends a string on a socket 44 | func SendString(socket *zmq4.Socket, msg string) bool { 45 | _, sendErr := socket.Send(msg, 0) 46 | acknowledge, recvErr := socket.Recv(0) 47 | 48 | status := isOkay(sendErr) && isOkay(recvErr) && (acknowledge == ACK) 49 | 50 | return status 51 | } 52 | 53 | // SendBytes A function that synchronously sends an array of bytes on a socket 54 | func SendBytes(socket *zmq4.Socket, data []byte) bool { 55 | _, sendErr := socket.SendBytes(data, 0) 56 | acknowledge, recvErr := socket.Recv(0) 57 | 58 | status := isOkay(sendErr) && isOkay(recvErr) && (acknowledge == ACK) 59 | 60 | return status 61 | } 62 | 63 | // RecvString A function that synchronously receives a string from a socket 64 | func RecvString(socket *zmq4.Socket) (string, bool) { 65 | msg, recvErr := socket.Recv(0) 66 | status := isOkay(recvErr) && (msg != "") 67 | 68 | if status == true { 69 | socket.Send(ACK, 0) 70 | } 71 | 72 | return msg, status 73 | } 74 | 75 | // RecvBytes A function that synchronously sends an array of bytes from a socket 76 | func RecvBytes(socket *zmq4.Socket) ([]byte, bool) { 77 | data, recvErr := socket.RecvBytes(0) 78 | status := isOkay(recvErr) 79 | 80 | if status == true { 81 | socket.Send(ACK, 0) 82 | } 83 | 84 | return data, status 85 | } 86 | 87 | // GetConnectionString A function to formulate the connection string given IP and Port 88 | func GetConnectionString(ip string, port string) string { 89 | connectionString := "tcp://" + ip + ":" + port 90 | 91 | return connectionString 92 | } 93 | 94 | // isOkay A function to check if there is an error 95 | func isOkay(err error) bool { 96 | if err == nil { 97 | return true 98 | } 99 | return false 100 | } 101 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/Constants/constants.go: -------------------------------------------------------------------------------- 1 | package constants 2 | 3 | import "time" 4 | 5 | // TrackerIP Tracker machine IP 6 | var TrackerIP = "127.0.0.1" 7 | 8 | // TrackerReqPorts Tracker requests ports [used by clients] 9 | var TrackerReqPorts = []string{"8001", "8002"} 10 | 11 | // TrackerDNPorts Tracker data node ports 12 | var TrackerDNPorts = []string{"9001", "9002"} 13 | 14 | // TrackerIPsPort A port on which the tracker receives IP handshakes 15 | var TrackerIPsPort = "9000" 16 | 17 | // MasterTrackerID The process ID of the master tracker 18 | var MasterTrackerID = 0 19 | 20 | // DisconnectionThreshold The time after which we consider a data node offline 21 | var DisconnectionThreshold = time.Duration(2*time.Second + 1) 22 | 23 | // TrackerResponse A temporary tracker response 24 | var TrackerResponse = DataNodeLauncherIP + " " + "7011" 25 | 26 | // ReplicationRoutineFrequency The time after which the replication routine runs 27 | var ReplicationRoutineFrequency = time.Duration(1 * time.Minute) 28 | 29 | var DownloadIP1 = DataNodeLauncherIP 30 | var DownloadPort1 = "701" 31 | var DownloadIP2 = DataNodeLauncherIP 32 | var DownloadPort2 = "702" 33 | var DownloadIP3 = DataNodeLauncherIP 34 | var DownloadPort3 = "601" 35 | var DownloadIP4 = DataNodeLauncherIP 36 | var DownloadPort4 = "602" 37 | var DownloadIP5 = DataNodeLauncherIP 38 | var DownloadPort5 = "501" 39 | var DownloadIP6 = DataNodeLauncherIP 40 | var DownloadPort6 = "502" 41 | 42 | //---------------------------------------------------------------------- 43 | 44 | // DataNodeLauncherIP The IP of a single Data Node 45 | var DataNodeLauncherIP = "127.0.0.1" 46 | 47 | //---------------------------------------------------------------------- 48 | 49 | // ClientIP Client IP 50 | var ClientIP = "127.0.0.1" 51 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/Database/setup.go: -------------------------------------------------------------------------------- 1 | package dbwrapper 2 | 3 | import ( 4 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 5 | "database/sql" 6 | "fmt" 7 | "os" 8 | 9 | "github.com/joho/godotenv" 10 | _ "github.com/lib/pq" //Imports the postgres driver 11 | ) 12 | 13 | // loadEnvironmentVars A function to load DB environment vairables 14 | func loadEnvironmentVars() string { 15 | host := os.Getenv(Host) 16 | port := os.Getenv(Port) 17 | user := os.Getenv(UserName) 18 | password := os.Getenv(Password) 19 | dbName := os.Getenv(DBName) 20 | 21 | vars := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable", 22 | host, port, user, password, dbName) 23 | 24 | return vars 25 | } 26 | 27 | // ConnectDB A function to obtain a database connection 28 | func ConnectDB() *sql.DB { 29 | err := godotenv.Load() 30 | logger.LogDBErr(err, LogSign, "connetDB(): Error loading environment variables", true) 31 | 32 | enVars := loadEnvironmentVars() 33 | 34 | db, err := sql.Open(DriverName, enVars) 35 | logger.LogDBErr(err, LogSign, "connetDB(): Error opening Database", true) 36 | 37 | err = db.Ping() 38 | logger.LogDBErr(err, LogSign, "connetDB(): Database ping test failed", true) 39 | 40 | logger.LogDBSuccess(err, LogSign, "Successfully Connected") 41 | 42 | return db 43 | } 44 | 45 | // ExecuteQuery A function to execute queries that don't return any rows 46 | func ExecuteQuery(db *sql.DB, sqlStatement string, logMsgs logger.LogInfo, abort bool, params ...interface{}) bool { 47 | _, err := db.Exec(sqlStatement, params...) 48 | logger.LogDBErr(err, LogSign, "executeQuery():"+logMsgs.Error, abort) 49 | logger.LogDBSuccess(err, LogSign, logMsgs.Success) 50 | 51 | return (err == nil) 52 | } 53 | 54 | // ExecuteRowQuery A function to execute queries that are expected to return at most 1 row 55 | func ExecuteRowQuery(db *sql.DB, sqlStatement string, params ...interface{}) *sql.Row { 56 | row := db.QueryRow(sqlStatement, params...) 57 | 58 | return row 59 | } 60 | 61 | // ExecuteRowsQuery A function to execute queries that are expected to return many rows 62 | func ExecuteRowsQuery(db *sql.DB, sqlStatement string, logMsgs logger.LogInfo, abort bool, params ...interface{}) (*sql.Rows, bool) { 63 | rows, err := db.Query(sqlStatement, params...) 64 | logger.LogDBErr(err, LogSign, logMsgs.Error, false) 65 | logger.LogDBSuccess(err, LogSign, logMsgs.Success) 66 | 67 | return rows, (err == nil) 68 | } 69 | 70 | // Migrate A function to perform the DB migration 71 | func Migrate(db *sql.DB, migrationStatement string) { 72 | logMsgs := logger.LogInfo{ 73 | Success: "Successfully migrated the Database", 74 | Error: "Database Migration failed", 75 | } 76 | 77 | ExecuteQuery(db, migrationStatement, logMsgs, true) 78 | } 79 | 80 | // CleanUP A function to perform DB clean up 81 | func CleanUP(db *sql.DB, cleanUpStatement string) { 82 | logMsgs := logger.LogInfo{ 83 | Success: "Successfully cleaned up the Database", 84 | Error: "Clean Up failed", 85 | } 86 | 87 | ExecuteQuery(db, cleanUpStatement, logMsgs, true) 88 | } 89 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/Database/types.go: -------------------------------------------------------------------------------- 1 | package dbwrapper 2 | 3 | const ( 4 | // LogSign Used for logging the DB messages 5 | LogSign string = "[DB]" 6 | 7 | // DriverName The DB driver name 8 | DriverName string = "postgres" 9 | 10 | // Host Host name 11 | Host string = "HOST" 12 | 13 | // Port Port number 14 | Port string = "PORT" 15 | 16 | // UserName User Name 17 | UserName string = "USER_NAME" 18 | 19 | // Password The DB Password 20 | Password string = "PASSWORD" 21 | 22 | // DBName The DB name 23 | DBName string = "DB_NAME" 24 | ) 25 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/File/fileutils.go: -------------------------------------------------------------------------------- 1 | package fileutils 2 | 3 | import ( 4 | "io" 5 | "log" 6 | "os" 7 | "strconv" 8 | ) 9 | 10 | // LogSign Used in logging FileUtil errors 11 | const LogSign string = "[File]" 12 | 13 | // ChunkSize The chunk size for reading and sending files 14 | const ChunkSize int64 = 1024 * 1024 15 | 16 | // logErr A function to log the error message 17 | func logErr(err error) { 18 | if err != nil { 19 | log.Fatal(LogSign, err) 20 | } 21 | } 22 | 23 | // isDone A function to determine when read a file is done 24 | func isDone(err error) bool { 25 | if err == io.EOF { 26 | return true 27 | } 28 | return false 29 | } 30 | 31 | // CreateFile A function to create a file specified by name 32 | func CreateFile(fileName string) *os.File { 33 | file, err := os.Create(fileName) 34 | logErr(err) 35 | 36 | return file 37 | } 38 | 39 | // OpenFile A function to open a file specified by name 40 | func OpenFile(fileName string) *os.File { 41 | file, err := os.Open(fileName) 42 | logErr(err) 43 | 44 | return file 45 | } 46 | 47 | // GetFileSize A function to return the file size in bytes 48 | func GetFileSize(fileName string) int64 { 49 | fileInfo, err := os.Stat(fileName) 50 | logErr(err) 51 | 52 | return fileInfo.Size() 53 | } 54 | 55 | // SeekPosition A function to seek a certian position in a file 56 | func SeekPosition(file *os.File, start int) *os.File { 57 | offset := int64(start) * ChunkSize 58 | whence := 0 59 | 60 | file.Seek(offset, whence) 61 | 62 | return file 63 | } 64 | 65 | // OpenSeekFile A function to open a file from a certain start 66 | func OpenSeekFile(fileName string, start int) *os.File { 67 | file := OpenFile(fileName) 68 | file = SeekPosition(file, start) 69 | 70 | return file 71 | } 72 | 73 | // GetChunksCount A function to get the number of chunks in a file 74 | func GetChunksCount(fileName string) int { 75 | fileSize := GetFileSize(fileName) 76 | chunksCount := (fileSize + ChunkSize - 1) / ChunkSize 77 | 78 | return int(chunksCount) 79 | } 80 | 81 | // ReadChunk A function to read chunkSize bytes of a file from the previous position 82 | func ReadChunk(file *os.File) ([]byte, int, bool) { 83 | buffer := make([]byte, ChunkSize) 84 | size, err := file.Read(buffer) 85 | 86 | done := isDone(err) 87 | 88 | return buffer, size, done 89 | } 90 | 91 | // WriteChunk A function to write chunkSize bytes to a file 92 | func WriteChunk(file *os.File, chunk []byte) int { 93 | size, err := file.Write(chunk) 94 | logErr(err) 95 | 96 | return size 97 | } 98 | 99 | // DeleteFile A function to delete a file 100 | func DeleteFile(fileName string) { 101 | err := os.Remove(fileName) 102 | logErr(err) 103 | } 104 | 105 | // IsThere A function to check of a file/folder exists 106 | func IsThere(fileName string) bool { 107 | _, err := os.Stat(fileName) 108 | if err != nil { 109 | if os.IsNotExist(err) { 110 | return false 111 | } 112 | } 113 | return true 114 | } 115 | 116 | // CreateDirectory A function to create a directory 117 | func CreateDirectory(fileName string) { 118 | if !IsThere(fileName) { 119 | os.Mkdir(fileName, os.ModePerm) 120 | } 121 | } 122 | 123 | // AssembleFile A function to assemble pieces into a single file 124 | func AssembleFile(outputFileName string, pieceName string, extension string, blockCount int) { 125 | outFile := CreateFile(outputFileName) 126 | defer outFile.Close() 127 | 128 | for i := 1; i <= blockCount; i++ { 129 | fileName := pieceName + "#" + strconv.Itoa(i) + extension 130 | count := GetChunksCount(fileName) 131 | file := OpenFile(fileName) 132 | 133 | for j := 1; j <= count; j++ { 134 | data, _, _ := ReadChunk(file) 135 | WriteChunk(outFile, data) 136 | log.Println("Assembled chunk #", j) 137 | } 138 | 139 | file.Close() 140 | DeleteFile(fileName) 141 | log.Println("Assembled Block #", i) 142 | } 143 | log.Println("Assembly Finished") 144 | } 145 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/Log/logger.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import "log" 4 | 5 | // LogInfo Used to customize logging info insider generic functions 6 | type LogInfo struct { 7 | Success string //The message in case of success 8 | Error string //The message in case of failure 9 | } 10 | 11 | // LogMsg A function to log messages 12 | func LogMsg(sign string, id int, msg string) { 13 | log.Println(sign, "#", id, msg) 14 | } 15 | 16 | // LogFail A function to log failure messages 17 | func LogFail(ok bool, sign string, id int, msg string) { 18 | if ok == false { 19 | LogMsg(sign, id, msg) 20 | } 21 | } 22 | 23 | // LogSuccess A function to log success messages 24 | func LogSuccess(ok bool, sign string, id int, msg string) { 25 | if ok == true { 26 | LogMsg(sign, id, msg) 27 | } 28 | } 29 | 30 | // LogErr A function to log error messages 31 | func LogErr(err error, sign string, id int, msg string) { 32 | if err != nil { 33 | LogMsg(sign, id, msg) 34 | } 35 | } 36 | 37 | // LogDBMsg A function to log messages 38 | func LogDBMsg(sign string, msg string) { 39 | log.Println(sign, "#", msg) 40 | } 41 | 42 | // LogDBSuccess A function to log success messages 43 | func LogDBSuccess(err error, sign string, msg string) { 44 | if err == nil { 45 | LogDBMsg(sign, msg) 46 | } 47 | } 48 | 49 | // LogDBErr A function to log error messages 50 | func LogDBErr(err error, sign string, msg string, abort bool) { 51 | if err != nil { 52 | LogDBMsg(sign, msg) 53 | if abort == true { 54 | panic(err) 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/Request/completion.go: -------------------------------------------------------------------------------- 1 | package request 2 | 3 | import ( 4 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | ) 9 | 10 | // SerializeCompletion A function to serialize a completion request 11 | func SerializeCompletion(request CompletionRequest) string { 12 | serializedRequest := string(request.Type) + " " + 13 | strconv.Itoa(request.ClientID) + " " + 14 | request.ClientIP + " " + 15 | request.ClientPort + " " + 16 | request.FileName + " " + 17 | strconv.Itoa(request.FileSize) + " " + 18 | request.Location 19 | 20 | return serializedRequest 21 | } 22 | 23 | // DeserializeCompletion A function to deserialize a completion request 24 | func DeserializeCompletion(serializedRequest string) CompletionRequest { 25 | fields := strings.Fields(serializedRequest) 26 | clientID, err0 := strconv.Atoi(fields[1]) 27 | fileSize, err1 := strconv.Atoi(fields[5]) 28 | 29 | if (err0 != nil) || (err1 != nil) { 30 | logger.LogMsg(LogSign, 0, "Completion:Deserialize() Deserialization failed") 31 | return CompletionRequest{} 32 | } 33 | 34 | location := "" 35 | for i := 6; i < len(fields); i++ { 36 | location += fields[i] 37 | if i < len(fields)-1 { 38 | location += " " 39 | } 40 | } 41 | 42 | requestObj := CompletionRequest{ 43 | Type: Type(fields[0]), 44 | ClientID: clientID, 45 | ClientIP: fields[2], 46 | ClientPort: fields[3], 47 | FileName: fields[4], 48 | FileSize: fileSize, 49 | Location: location, 50 | } 51 | 52 | return requestObj 53 | } 54 | 55 | // PrintCompletion A function to print a completion request 56 | func PrintCompletion(request CompletionRequest) { 57 | fmt.Println("Request info:") 58 | fmt.Println(" Type = ", string(request.Type)) 59 | fmt.Println(" ClientID = ", request.ClientID) 60 | fmt.Println(" ClientIP = ", request.ClientIP) 61 | fmt.Println(" ClientPort = ", request.ClientPort) 62 | fmt.Println(" FileName = ", request.FileName) 63 | fmt.Println(" FileSize = ", request.FileSize) 64 | fmt.Println(" Location = ", request.Location) 65 | fmt.Println() 66 | } 67 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/Request/download.go: -------------------------------------------------------------------------------- 1 | package request 2 | 3 | import ( 4 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | ) 9 | 10 | // SerializeDownload A function to serialize an upload request 11 | func SerializeDownload(request DownloadRequest) string { 12 | serializedRequest := string(request.Type) + " " + 13 | strconv.Itoa(request.ID) + " " + 14 | strconv.Itoa(request.ClientID) + " " + 15 | request.ClientIP + " " + 16 | request.ClientPort + " " + 17 | request.FileName 18 | 19 | return serializedRequest 20 | } 21 | 22 | // DeserializeDownload A function to deserialize an upload request 23 | func DeserializeDownload(serializedRequest string) DownloadRequest { 24 | fields := strings.Fields(serializedRequest) 25 | requestID, err0 := strconv.Atoi(fields[1]) 26 | clientID, err1 := strconv.Atoi(fields[2]) 27 | 28 | if (err0 != nil) || (err1 != nil) { 29 | logger.LogMsg(LogSign, 0, "Upload:Deserialize() Deserialization failed") 30 | return DownloadRequest{} 31 | } 32 | 33 | requestObj := DownloadRequest{ 34 | ID: requestID, 35 | Type: Type(fields[0]), 36 | ClientID: clientID, 37 | ClientIP: fields[3], 38 | ClientPort: fields[4], 39 | FileName: fields[5], 40 | } 41 | 42 | return requestObj 43 | } 44 | 45 | // PrintDownload A function to print an upload request 46 | func PrintDownload(request DownloadRequest) { 47 | fmt.Println("Request info:") 48 | fmt.Println(" ID = ", request.ID) 49 | fmt.Println(" Type = ", string(request.Type)) 50 | fmt.Println(" ClientID = ", request.ClientID) 51 | fmt.Println(" ClientIP = ", request.ClientIP) 52 | fmt.Println(" ClientPort = ", request.ClientPort) 53 | fmt.Println(" FileName = ", request.FileName) 54 | fmt.Println() 55 | } 56 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/Request/replication.go: -------------------------------------------------------------------------------- 1 | package request 2 | 3 | import ( 4 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | ) 9 | 10 | // SerializeReplication A function to serialize replication requests 11 | func SerializeReplication(request ReplicationRequest) string { 12 | serializedRequest := string(request.Type) + " " + 13 | strconv.Itoa(request.ID) + " " + 14 | strconv.Itoa(request.ClientID) + " " + 15 | request.FileName + " " + 16 | strconv.Itoa(request.SourceID) + " " + 17 | strconv.Itoa(request.TargetNodeID) + " " + 18 | request.TargetNodeIP + " " + 19 | request.TargetNodeBasePort + " " + 20 | request.TrackerPort 21 | 22 | return serializedRequest 23 | } 24 | 25 | // DeserializeReplication A function to deserialize replication requests 26 | func DeserializeReplication(serializedRequest string) ReplicationRequest { 27 | fields := strings.Fields(serializedRequest) 28 | requestID, err0 := strconv.Atoi(fields[1]) 29 | clientID, err1 := strconv.Atoi(fields[2]) 30 | sourceID, err3 := strconv.Atoi(fields[4]) 31 | targetNodeID, err4 := strconv.Atoi(fields[5]) 32 | 33 | if (err0 != nil) || (err1 != nil) || (err3 != nil) || (err4 != nil) { 34 | logger.LogMsg(LogSign, 0, "Replicate:Deserialize() Deserialization failed") 35 | return ReplicationRequest{} 36 | } 37 | 38 | requestObj := ReplicationRequest{ 39 | ID: requestID, 40 | Type: Type(fields[0]), 41 | ClientID: clientID, 42 | FileName: fields[3], 43 | SourceID: sourceID, 44 | TargetNodeID: targetNodeID, 45 | TargetNodeIP: fields[6], 46 | TargetNodeBasePort: fields[7], 47 | TrackerPort: fields[8], 48 | } 49 | 50 | return requestObj 51 | } 52 | 53 | // PrintReplication A function to print replication requests 54 | func PrintReplication(request ReplicationRequest) { 55 | fmt.Println("Request info:") 56 | fmt.Println(" ID = ", request.ID) 57 | fmt.Println(" Type = ", string(request.Type)) 58 | fmt.Println(" ClientID = ", request.ClientID) 59 | fmt.Println(" FileName = ", request.FileName) 60 | fmt.Println(" SoruceID = ", request.SourceID) 61 | fmt.Println(" TargetNodeID = ", request.TargetNodeID) 62 | fmt.Println(" TargetNodeIP = ", request.TargetNodeIP) 63 | fmt.Println(" TargetNodeBasePort = ", request.TargetNodeBasePort) 64 | fmt.Println(" TrackerPort = ", request.TrackerPort) 65 | fmt.Println() 66 | } 67 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/Request/types.go: -------------------------------------------------------------------------------- 1 | package request 2 | 3 | import "strings" 4 | 5 | // LogSign Used for logging Request related messages 6 | const LogSign string = "[Request]" 7 | 8 | // Type An Enum to represent the different types of client requests 9 | type Type string 10 | 11 | const ( 12 | //Download A Download request (dwn) 13 | Download Type = "dwn" 14 | 15 | //Upload A Upload request (up) 16 | Upload Type = "up" 17 | 18 | //Display A Download request (ls) 19 | Display Type = "ls" 20 | 21 | //Replicate A replication request 22 | Replicate Type = "rep" 23 | 24 | //Completion An action completion notification 25 | Completion Type = "comp" 26 | 27 | //Invalid An error type 28 | Invalid Type = "inv" 29 | ) 30 | 31 | // UploadRequest Represents an upload request 32 | type UploadRequest struct { 33 | ID int //The ID of the request 34 | Type Type //Represents the type of a request 35 | ClientID int //The ID of the client who sent the requst 36 | ClientIP string //The IP of the client who sent the request 37 | ClientPort string //The port of the client who sent the request 38 | FileName string //The name of the file to be uploaded 39 | } 40 | 41 | // DownloadRequest Represents a download request 42 | type DownloadRequest struct { 43 | ID int //The ID of the request 44 | Type Type //Represents the type of a request 45 | ClientID int //The ID of the client who sent the requst 46 | ClientIP string //The IP of the client who sent the request 47 | ClientPort string //The port of the client who sent the request 48 | FileName string //The name of the file to be uploaded 49 | } 50 | 51 | // ReplicationRequest Represents a replication request 52 | type ReplicationRequest struct { 53 | ID int //The ID of the replication request 54 | Type Type //Represents the type of a request 55 | ClientID int //The client ID associated with the replicated file 56 | FileName string //The file name to be replicated 57 | SourceID int //The ID of the source Data Node 58 | TargetNodeID int //The ID of the target machine 59 | TargetNodeIP string //The IP of the target machine (connect there) 60 | TargetNodeBasePort string //The replication port of the target machine (connect there) 61 | TrackerPort string //Port of the tracker process that issued the request 62 | } 63 | 64 | // CompletionRequest Represents a notification of an action completion 65 | type CompletionRequest struct { 66 | Type Type //The request type 67 | ClientID int //The ID for the client 68 | ClientIP string //The IP of the client 69 | ClientPort string //The port of the client 70 | FileName string //The name of the file 71 | FileSize int //The size of the file 72 | Location string //A concatenated string of all locations where the file exists 73 | } 74 | 75 | // GetType A function to get the type of a request 76 | func GetType(req string) Type { 77 | reqType := strings.Fields(req)[0] 78 | 79 | if reqType == "dwn" { 80 | return Download 81 | 82 | } else if reqType == "up" { 83 | return Upload 84 | 85 | } else if reqType == "ls" { 86 | return Display 87 | 88 | } else if reqType == "rep" { 89 | return Replicate 90 | 91 | } else if reqType == "comp" { 92 | return Completion 93 | 94 | } else { 95 | return Invalid 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /Distributed-File-System/Utils/Request/upload.go: -------------------------------------------------------------------------------- 1 | package request 2 | 3 | import ( 4 | logger "Distributed-Video-Processing-Cluster/Distributed-File-System/Utils/Log" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | ) 9 | 10 | // SerializeUpload A function to serialize an upload request 11 | func SerializeUpload(request UploadRequest) string { 12 | serializedRequest := string(request.Type) + " " + 13 | strconv.Itoa(request.ID) + " " + 14 | strconv.Itoa(request.ClientID) + " " + 15 | request.ClientIP + " " + 16 | request.ClientPort + " " + 17 | request.FileName 18 | 19 | return serializedRequest 20 | } 21 | 22 | // DeserializeUpload A function to deserialize an upload request 23 | func DeserializeUpload(serializedRequest string) UploadRequest { 24 | fields := strings.Fields(serializedRequest) 25 | requestID, err0 := strconv.Atoi(fields[1]) 26 | clientID, err1 := strconv.Atoi(fields[2]) 27 | 28 | if (err0 != nil) || (err1 != nil) { 29 | logger.LogMsg(LogSign, 0, "Upload:Deserialize() Deserialization failed") 30 | return UploadRequest{} 31 | } 32 | 33 | requestObj := UploadRequest{ 34 | ID: requestID, 35 | Type: Type(fields[0]), 36 | ClientID: clientID, 37 | ClientIP: fields[3], 38 | ClientPort: fields[4], 39 | FileName: fields[5], 40 | } 41 | 42 | return requestObj 43 | } 44 | 45 | // PrintUpload A function to print an upload request 46 | func PrintUpload(request UploadRequest) { 47 | fmt.Println("Request info:") 48 | fmt.Println(" ID = ", request.ID) 49 | fmt.Println(" Type = ", string(request.Type)) 50 | fmt.Println(" ClientID = ", request.ClientID) 51 | fmt.Println(" ClientIP = ", request.ClientIP) 52 | fmt.Println(" ClientPort = ", request.ClientPort) 53 | fmt.Println(" FileName = ", request.FileName) 54 | fmt.Println() 55 | } 56 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # **Distributed File System** 2 | The main objective of the File System is to offer a platform for data storage, that's [**distributed**](#system-components) on multiple machines, [**highly available**](#data-replication-mechanism), [**reliable**](#data-replication-mechanism) and [**fault tolerant**](#fault-tolerance). 3 | 4 | ## **Table of Contents** 5 | - [**System Components**](#system-components) 6 | * [Tracker Node](#tracker-node) 7 | * [Data Keeper Node](#data-keeper-node) 8 | - [**Data Replication Mechanism**](#data-replication-mechanism) 9 | - [**Types of Requests**](#types-of-requests) 10 | - [**Request Handling**](#request-handling) 11 | * [Upload Request Handler](#upload-request-handler) 12 | * [Download Request Handler](#download-request-handler) 13 | * [Display Request Handler](#display-request-handler) 14 | - [**Fault Tolerance**](#fault-tolerance) 15 | - [**System Testing**](#system-testing) 16 | 17 | ## **System Components** 18 | The system consists of 2 types of **Nodes**, namely, the **Tracker Node** and the **Data Keeper Node**. 19 | 20 | - ### **Tracker Node** 21 | The Tracker Node is a multi-process node that works as the coordinator of the system. The following highlights its main functionalities: 22 | - Receives handshake signals from Data Keeper Nodes and starts to track them. 23 | - Keeps track of all currently alive Data Keeper Nodes through receiving a periodic (every 1 sec) heartbeat signals and updating its internal tracking structure accordingly. The criteria is that if any Data Keeper Node missed its heartbeat window (2 secs), it will be considered dead. 24 | - Receives user requests (upload/download/display) and handles them accordingly. 25 | - Initiates a Replication routine that replicates data on multiple machines for higher reliability. 26 | - Keeps a Database of all the files stored on the File System. 27 | 28 | - ### **Data Keeper Node** 29 | The Data Keeper Node is multi-process node that works as the storage utility of the system. The following highlights its main functionalities: 30 | - Sending handshake signals to the Tracker Node to establish communication with it. 31 | - Sending periodic heartbeat signals to inform the Tracker Node of its status. 32 | - Receives files from users in case of handling an upload request and manages internal users' directories. 33 | - Sends files to users in case of handling a download request. 34 | - Receives and sends files to other Data Keeper Nodes in case of handling a Replication command from the Tracker Node. 35 | 36 | ## **Data Replication Mechanism** 37 | File System does Data Replication to increase its reliability and availability. The system aims for a 3/n replication ratio, meaning that every file that's uploaded to the file system is replicated 3 times across the system. The replication also enables the system to provide a multi-source download to users that speeds up the download process and stops the server capacity from being a bottleneck during download. 38 | 39 | The Replication is a periodic routine that initiated by the Tracker Node once every 2 mins (parameter, can be changed). The Replication routine goes as follows: 40 | - The Tracker Node refers to its database and determines the files that need replication. 41 | - The Tracker Node chooses a suitable source Data Keeper Node, and a suitable destination Data Keeper Node for replication. 42 | - The Tracker Node sends a replication request to the source Data Keeper Node and provides the needed information about the destination Data Keeper Node. 43 | - The source Data Keeper Node establishes communication with the destination Node and the file replication starts. 44 | - Once the replication is done, the source Data Keeper Node sends a completion confirmation to the Tracker Node so it can update its database. 45 | 46 | The Replication algorithm handles the following network errors: 47 | - If a file has been partially replicated (to 2 machines), and any of the two sources went offline, it chooses the other. 48 | - If all possible destinations are offline, no replicas are made. 49 | - If a replication was interrupted by network failure from either source or destination Data Keeper Nodes, the system recovers after a timeout and the replication is restarted. 50 | 51 | ## **Types of Requests** 52 | The system supports 3 types of requests: 53 | - An Upload request. 54 | - A download request. 55 | - A display request (analogous to the `ls` command in UNIX). 56 | 57 | 58 | ## **Request Handling** 59 | The system handles the 3 formerly mentioned types of requests in the following fashion: 60 | - ### **Upload Request Handler** 61 | 62 | The upload request handler works as follows: 63 | - The authenticated user sends an upload request to the Tracker Node. 64 | - The Tracker Node refers to its Database and selects a Data Keeper Node for the file transfer. 65 | - The Tracker Node sends back the IP:Port combination to the client software in order to establish communication with the user. 66 | - The users re-sends the request to the designated Data Keeper Node and established connection. 67 | - The Data Keeper Node starts receiving data from the user 1 chunk (1 MB) at a time, and stores the data into the user's directory. 68 | - When the file transfer is finished, the Data Keeper Node sends a completion confirmation to the Tracker Node. 69 | - The Tracker Node updates its files database and sends a completion confirmation back to the user. 70 | 71 | Notes: 72 | - The process of upload request handling is completely multi-threaded. A single Data Keeper Node can handle virtually infinite number of upload requests (only bounded by the network capacity) at the same time, but the performance tends to suffer when the number of requests being served is high. 73 | - The data upload is done on chunk basis, so if the connection was interrupted, the receiving can still resume from the point it has left off. 74 | 75 | - ### **Download Request Handler** 76 | 77 | The download request handler works as follows: 78 | - The authenticated user sends a download request to the Tracker Node. 79 | - The Tracker Nodes verifies if the user has access to the desired file and terminates if the user doesn't have access rights. 80 | - If user has access rights, the Tracker Node refers to its database to determine all the locations of the file (Every Data Keeper Node that has a copy of this file, whether through direct upload, or replication). 81 | - The Tracker Nodes prepares the IP:Port combination of all Data Keeper Nodes that are ready to provide the file and sends them back to the user along with the file size. 82 | - The user's software uses the file size to divide the file download on all the available Data Keeper Nodes. 83 | - The user's software dispatches a thread for each file block, each threads sends a request to its corresponding Data Keeper Node, starts to received its designated block of the file and temporarily writes it on the disk. 84 | - The user's software waits for all threads to terminate, and then it combines all the downloaded pieces by all threads into a single file on the user's machine. 85 | 86 | Notes: 87 | - The process of download request handling is completely multi-threaded. A single Data Keeper Node can handle virtually infinite number of download requests (only bounded by the network capacity) at the same time, but the performance tends to suffer when the number of requests being served is high. 88 | - The data download process is done on chunk basis, so if the connection was interrupted, the download can still continue by only ordering the chunks that are not currently present on the disk [Not implemented yet]. 89 | - The file assembly process is done offline, so if the user was disconnected, the assembly would still work. 90 | - The download process is no longer limited by the server's capacity, because the download is taking place form multiple different servers simultaneously. So the only bottleneck becomes the user's network capacity. 91 | 92 | - ### **Display Request Handler** 93 | 94 | The display request handler works as follows: 95 | - The authenticated user sends a display request to the Tracker Node. 96 | - The Tracker Node refers to its database and fetches all the files owned by the current user. 97 | - The Tracker Node serializes the response and sends it back to the user. 98 | 99 | **Note**: Due to the multi-threaded, multi-processing nature of the system, a single Node can serve any number of any type of request all at the same time. 100 | 101 | ## **Fault Tolerance** 102 | The system is designed with fault tolerance in mind. The system is able to identify and handle the following types of faults: 103 | - User - Tracker Node connection drop. 104 | - User - Data Keeper Node connection drop. 105 | - Data Keeper Node - Data Keeper Node connection drop. 106 | - Tracker - Data Keeper Node connection drop. 107 | 108 | The system identifies these types of faults by running blocking functions (send/receive) on different threads and uses channels to notify the main thread of completion. The main thread makes sure that any thread that doesn't terminate before a preset timeout is gracefully handled and forcibly terminated. 109 | 110 | The system is also able to identify other types of errors such as: 111 | - Wrong file names in upload requests. 112 | - Unauthorized access of files. 113 | 114 | ## **System Testing** 115 | The system has been tested in a distributed configuration (1 Tracker Node and 3-4 Data Keeper Nodes) under all types of supported requests. 116 | --------------------------------------------------------------------------------