├── videooperator ├── __init__.py ├── splitmerge.py ├── nbsubprocess.py └── encoder.py ├── README.md ├── client.py ├── LICENSE ├── worker.py └── server.py /videooperator/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | distvidc 2 | ======== 3 | 4 | Distributed video encoding 5 | -------------------------------------------------------------------------------- /client.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 2 | import uuid 3 | import socket 4 | from socket import error as SocketException 5 | import json 6 | import sys 7 | 8 | print sys.argv 9 | #quit() 10 | 11 | 12 | def _send_message(socket_, message): 13 | encoded_message = json.dumps(message) 14 | try: 15 | socket_.sendall(encoded_message + '\n') 16 | except: 17 | return False 18 | else: 19 | return True 20 | 21 | 22 | def _close_socket(socket_): 23 | socket_.shutdown(socket.SHUT_RDWR) 24 | socket_.close() 25 | 26 | 27 | if __name__ == "__main__": 28 | 29 | # Generate worker id 30 | client_id = str(uuid.uuid4()) 31 | sock = socket.create_connection(('localhost', 8000)) 32 | try: 33 | e_a = sys.argv[2] 34 | except: 35 | e_a = '' 36 | _send_message(sock, {"worker_id": client_id, "action": "addjob", "file": sys.argv[1], "encoding_arguments": e_a}) 37 | _close_socket(sock) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 ValdikSS 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /videooperator/splitmerge.py: -------------------------------------------------------------------------------- 1 | import shlex 2 | import subprocess 3 | 4 | class SplitMerge(object): 5 | 6 | MKVMERGE = 'mkvmerge' 7 | SPLIT_TEMPLATE = MKVMERGE + ' -A -S -M -B -T --no-chapters --no-global-tags --split %(split)s -o %(output)s %(input)s' 8 | MERGE_TEMPLATE = MKVMERGE + ' -D %(original)s %(files_to_merge)s -o %(output)s' 9 | 10 | def split(self, input_file, output_files, split_time): 11 | command = SplitMerge.SPLIT_TEMPLATE % ({ 12 | "input": input_file, 13 | "output": output_files, 14 | "split": split_time 15 | }) 16 | 17 | returncode = subprocess.call(shlex.split(command)) 18 | return True if returncode == 0 else False 19 | 20 | def merge(self, original_file, files_to_merge, output_file): 21 | merge_files = '' 22 | for file in files_to_merge: 23 | merge_files += ' -T --no-global-tags %s +' % file 24 | if merge_files[-1] == '+': 25 | merge_files = merge_files[:-2:] 26 | 27 | command = SplitMerge.MERGE_TEMPLATE % ({ 28 | "original": original_file, 29 | "files_to_merge": merge_files, 30 | "output": output_file 31 | }) 32 | 33 | returncode = subprocess.call(shlex.split(command)) 34 | return True if returncode == 0 else False -------------------------------------------------------------------------------- /videooperator/nbsubprocess.py: -------------------------------------------------------------------------------- 1 | from subprocess import * 2 | import select 3 | import fcntl 4 | import os 5 | 6 | class Poll(object): 7 | ''' 8 | Poll class with len() which returns registered fd count. 9 | Can't interhit from select.poll directly. 10 | ''' 11 | 12 | def __init__(self): 13 | self.poller = select.poll() 14 | self.fds = list() 15 | 16 | def __len__(self): 17 | return len(self.fds) 18 | 19 | def _get_fd(self, fd): 20 | return fd.fileno() if hasattr(fd, 'fileno') else fd 21 | 22 | def poll(self): 23 | return self.poller.poll() 24 | 25 | def register(self, fd, flags): 26 | fdno = self._get_fd(fd) 27 | if fdno not in self.fds: 28 | self.fds.append(fdno) 29 | return self.poller.register(fd,flags) 30 | 31 | def unregister(self, fd): 32 | fdno = self._get_fd(fd) 33 | if fdno in self.fds: 34 | self.fds.remove(fdno) 35 | return self.poller.unregister(fd) 36 | 37 | 38 | class NBPopen(Popen): 39 | ''' 40 | Non-blocking subprocess class 41 | ''' 42 | 43 | def __init__(self, *args, **kwargs): 44 | result = super(self.__class__, self).__init__(*args, **kwargs) 45 | 46 | # Setting O_NONBLOCK on stdin, strout, stderr 47 | fcntl.fcntl(self.stdin, fcntl.F_SETFL, fcntl.fcntl(self.stdin, fcntl.F_GETFL) | os.O_NONBLOCK) 48 | fcntl.fcntl(self.stdout, fcntl.F_SETFL, fcntl.fcntl(self.stdout, fcntl.F_GETFL) | os.O_NONBLOCK) 49 | fcntl.fcntl(self.stderr, fcntl.F_SETFL, fcntl.fcntl(self.stderr, fcntl.F_GETFL) | os.O_NONBLOCK) 50 | 51 | # Using poll to get file status 52 | self.poller = Poll() # My own class with len() 53 | self.poller.register(self.stdin, select.POLLOUT | select.POLLERR | select.POLLHUP) 54 | self.poller.register(self.stdout, select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP) 55 | self.poller.register(self.stderr, select.POLLIN | select.POLLPRI | select.POLLERR | select.POLLHUP) 56 | 57 | return result 58 | 59 | #def __del__(self, *args, **kwargs): 60 | #super(self.__class__, self).__del__() 61 | #map(self.poller.unregister, (self.stdin, self.stdout, self.stderr)) -------------------------------------------------------------------------------- /videooperator/encoder.py: -------------------------------------------------------------------------------- 1 | import shlex 2 | import select 3 | import fcntl 4 | import os 5 | import threading 6 | import videooperator.nbsubprocess as subprocess 7 | 8 | class Encoder(object): 9 | ''' 10 | Video encoding class 11 | ''' 12 | 13 | def __init__(self): 14 | self._terminate = threading.Event() 15 | self._jobfinished = threading.Event() 16 | self._error = None 17 | self._returncode = None 18 | self._encoding_thread = None 19 | 20 | def _run_encoder(self, input, output, log, encoding_arguments, bufsize, ffmpeg): 21 | ''' 22 | Runs actual encoding process with FFmpeg. 23 | Should be called in separate thread. 24 | ''' 25 | 26 | ffmpeg_template = '%(ffmpeg)s -i - %(arguments)s -f matroska -avoid_negative_ts 0 -' 27 | ffmpeg_command = shlex.split(ffmpeg_template % ( 28 | { 29 | "arguments": encoding_arguments, 30 | "ffmpeg": ffmpeg 31 | } 32 | )) 33 | print "FFMPEG COMMAND" 34 | print ffmpeg_command 35 | # Running FFmpeg process 36 | process = subprocess.NBPopen(ffmpeg_command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, 37 | stderr=subprocess.PIPE) 38 | if not process: 39 | message = 'Cannot start FFmpeg process!' 40 | self._error = message 41 | print message 42 | self._jobfinished.set() 43 | return 44 | 45 | # Dictionary of file descriptors and its' names 46 | fds = {'in': process.stdin.fileno(), 'out': process.stdout.fileno(), 'err': process.stderr.fileno()} 47 | fds_name = dict((v,k) for k, v in fds.iteritems()) 48 | 49 | try: 50 | while True: 51 | # Poll process state 52 | process.poll() 53 | returncode = process.returncode 54 | if returncode is not None: 55 | print "Returned ", str(returncode) 56 | self._returncode = returncode 57 | break 58 | 59 | if self._terminate.is_set(): 60 | # Terminating 61 | process.terminate() 62 | self._error = 'Terminated' 63 | self._jobfinished.set() 64 | break 65 | 66 | # Poll descriptors state 67 | # Poll will hang if poll() without registered descriptors was called 68 | if len(process.poller) < 1: 69 | continue 70 | 71 | pollout = process.poller.poll() 72 | for efd, eflag in pollout: 73 | if eflag & (select.POLLERR | select.POLLHUP): 74 | # If error or EOF happened 75 | message = 'error' if (eflag & select.POLLERR) else 'HUP' 76 | print fds_name[efd], message 77 | self._error = fds_name[efd], message 78 | process.poller.unregister(efd) 79 | if eflag & select.POLLERR: 80 | break 81 | 82 | elif efd == fds['in']: 83 | # Input event 84 | # Writing video to stdin 85 | readdata = input.read(bufsize) 86 | if not readdata: 87 | process.poller.unregister(fds['in']) 88 | process.stdin.close() 89 | else: 90 | process.stdin.write(readdata) 91 | 92 | elif efd == fds['out']: 93 | # Encoded video event 94 | # Grabbing encoded data 95 | output.write(process.stdout.read()) 96 | 97 | elif efd == fds['err']: 98 | # Log event 99 | log.write(process.stderr.read()) 100 | except Exception as e: 101 | print "Unknown exception in run encoder", repr(e) 102 | finally: 103 | # Set jobfinished flag 104 | self._jobfinished.set() 105 | # Closing stdin, stdout, stderr and input, output and log files 106 | try: 107 | map(lambda x: x.close(), [input, log, output, process.stdin, process.stdout, process.stderr]) 108 | except: 109 | pass 110 | 111 | 112 | def encode(self, input, output, log, encoding_arguments='', bufsize=4096, ffmpeg='ffmpeg'): 113 | ''' 114 | Start encoding process 115 | ''' 116 | 117 | # Reinint 118 | self.__init__() 119 | 120 | self._encoding_thread = threading.Thread(name='encodethread', target=self._run_encoder, 121 | args=(input, output, log, encoding_arguments, bufsize, ffmpeg)) 122 | self._encoding_thread.start() 123 | 124 | def join(self): 125 | ''' 126 | Wait intil encoding process is finished 127 | ''' 128 | 129 | if not self._encoding_thread: 130 | # Encoding was not started 131 | return 132 | return self._jobfinished.wait() 133 | 134 | def stop(self): 135 | ''' 136 | Terminate encoding process 137 | ''' 138 | 139 | self._terminate.set() 140 | return self.join() 141 | 142 | def get_result(self): 143 | return { 144 | "job_finished": self._jobfinished.is_set(), 145 | "error": self._error, 146 | "returncode": self._returncode 147 | } -------------------------------------------------------------------------------- /worker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | import uuid 3 | import videooperator.encoder 4 | import socket 5 | from socket import error as SocketException 6 | import os 7 | import json 8 | import logging 9 | import time 10 | 11 | class logfile(object): 12 | ''' 13 | File-alike object to handle log information from FFmpeg 14 | ''' 15 | def write(self, message): 16 | print message 17 | #pass 18 | 19 | def close(self): 20 | pass 21 | 22 | 23 | class Worker(object): 24 | ''' 25 | Worker class 26 | ''' 27 | RECONNECTION_TIMEOUT = 10 28 | 29 | def __init__(self, addr, port): 30 | self._addr = addr 31 | self._port = port 32 | self._server = None 33 | self._server_file = None 34 | self._upload = None 35 | self._connect_to_server() 36 | 37 | def _send_message(self, socket_, message, raw=False): 38 | if not raw: 39 | encoded_message = json.dumps(message) + '\n' 40 | else: 41 | encoded_message = message 42 | socket_.sendall(encoded_message) 43 | 44 | def _unpack_message(self, message): 45 | try: 46 | # Unpack JSON 47 | command_json = json.loads(message) 48 | except: 49 | logger.error('Mailformed message received') 50 | logger.error(command) 51 | return False 52 | else: 53 | return command_json 54 | 55 | def _close_socket(self, socket_): 56 | try: 57 | socket_.shutdown(socket.SHUT_RDWR) 58 | except SocketException: 59 | result = False 60 | else: 61 | result = True 62 | socket_.close() 63 | return result 64 | 65 | def _connect_to_server(self): 66 | ''' 67 | Make connection to server 68 | ''' 69 | logger = logging.getLogger(self.__class__.__name__) 70 | self._server = self._server_file = None 71 | while not self._server: 72 | try: 73 | self._server = socket.create_connection((self._addr, self._port)) 74 | except SocketException as e: 75 | logger.error('Cannot connect to server') 76 | logger.error(repr(e)) 77 | time.sleep(Worker.RECONNECTION_TIMEOUT) 78 | 79 | self._server_file = self._server.makefile() 80 | try: 81 | self._send_message(self._server, {"action": "getjob", "worker_id": worker_id}) 82 | except SocketException: 83 | logger.error('Cannot send message to server') 84 | return False 85 | 86 | logger.info('Connected to server') 87 | 88 | return True 89 | 90 | def _update_job_status(self, job_id, status): 91 | sock = socket.create_connection((self._addr, self._port)) 92 | self._send_message(sock, {"action": "updatejob", "worker_id": worker_id, "job_id": job_id, "status": status}) 93 | self._close_socket(sock) 94 | 95 | def terminate(self): 96 | for thissocket in (self._server, self._upload): 97 | if thissocket is not None: 98 | self._close_socket(thissocket) 99 | 100 | def run(self): 101 | ''' 102 | Main worker loop 103 | ''' 104 | logger = logging.getLogger(self.__class__.__name__) 105 | while True: 106 | print 'cycle' 107 | if self._server is None: 108 | logger.info("Reconnecting to server") 109 | self._connect_to_server() 110 | 111 | command = self._server_file.readline().strip() 112 | if not command: 113 | # socket closed, reconnect 114 | logger.error('Connection lost') 115 | self._close_socket(self._server) 116 | self._server_file.close() 117 | self._server = self._server_file = None 118 | continue 119 | 120 | command_json = self._unpack_message(command) 121 | if not command_json: 122 | logger.error('Mailformed command received') 123 | logger.error(command) 124 | break 125 | 126 | action = command_json.get('action') 127 | job_id = command_json.get('job_id') 128 | encoding_arguments = command_json.get('encoding_arguments', '') 129 | 130 | if action == 'ping': 131 | # PING from server, do nothing 132 | logger.info('Got ping command') 133 | continue 134 | 135 | elif action == 'do': 136 | # Encode command from server 137 | logger.info('Got encode command') 138 | logger.info(command) 139 | 140 | try: 141 | # Creating upload socket 142 | self._upload = socket.create_connection((self._addr, self._port)) 143 | self._send_message(self._upload, { 144 | "action": "putjob", 145 | "worker_id": worker_id, 146 | "job_id": job_id 147 | }) 148 | except Exception as e: 149 | logger.error("Cannot create upload socket") 150 | logger.error(repr(e)) 151 | break 152 | 153 | logger.info('Starting encoder') 154 | encoder = videooperator.encoder.Encoder() 155 | encoder.encode(self._server_file, self._upload.makefile(), logfile(), 156 | encoding_arguments = encoding_arguments) 157 | logger.info('Encoder started') 158 | encoder.join() 159 | logger.info('Encoder joined') 160 | map(self._close_socket, (self._upload, self._server)) 161 | self._upload = self._server = None 162 | logger.info('Sockets closed') 163 | result = encoder.get_result() 164 | logger.info(result) 165 | sendresult = 'success' if result['returncode'] == 0 else 'fail' 166 | logger.info('Status upadting...') 167 | self._update_job_status(job_id, sendresult) 168 | print job_id 169 | 170 | 171 | if __name__ == "__main__": 172 | # Logging initialization 173 | logging.basicConfig( 174 | format='%(asctime)s %(levelname)s %(name)s: %(message)s', 175 | datefmt='%d.%m.%Y %H:%M:%S', 176 | level=logging.DEBUG) 177 | 178 | # Generate worker id 179 | worker_id = str(uuid.uuid4()) 180 | worker = Worker('localhost', 8000) 181 | try: 182 | worker.run() 183 | except KeyboardInterrupt: 184 | worker.terminate() -------------------------------------------------------------------------------- /server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | import SocketServer 3 | from socket import error as SocketException 4 | import time 5 | import Queue 6 | import json 7 | import threading 8 | import uuid 9 | import shutil 10 | import os 11 | import logging 12 | import videooperator.splitmerge 13 | 14 | class JobServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): 15 | daemon_threads = True 16 | allow_reuse_address = True 17 | 18 | 19 | class JobStuff(object): 20 | SOURCE_DIR = 'source' 21 | ENCODED_DIR = 'encoded-segments' 22 | SEGMENT_DIR = 'segments' 23 | FINAL_DIR = 'final' 24 | 25 | timeout = 5 26 | 27 | job_queue = Queue.Queue() 28 | failed_jobs = list() 29 | successful_jobs = list() 30 | inprogress_jobs = dict() 31 | source_info = dict() 32 | 33 | PING = {"action": "ping"} 34 | IN_PROGRESS = 0 35 | ENCODING = 1 36 | DONE = 2 37 | ERROR = 3 38 | LOST = 4 39 | 40 | @staticmethod 41 | def add_file(file, encoding_arguments, split_time='15s'): 42 | if not os.access(file, os.R_OK): 43 | print "no file" 44 | return False 45 | split = videooperator.splitmerge.SplitMerge() 46 | filename = os.path.basename(file) 47 | source_id = str(uuid.uuid4()) 48 | segment_dir = JobStuff.SEGMENT_DIR + '/' + filename + '.' + source_id 49 | encoded_dir = JobStuff.ENCODED_DIR + '/' + filename + '.' + source_id 50 | os.mkdir(segment_dir) 51 | os.mkdir(encoded_dir) 52 | split.split(file, segment_dir + '/' + filename, split_time) 53 | JobStuff.source_info[source_id] = { 54 | "file": file, 55 | "encoding_arguments": encoding_arguments, 56 | "jobs": list() 57 | } 58 | for segment_name in os.listdir(segment_dir): 59 | job_id = str(uuid.uuid4()) 60 | JobStuff.job_queue.put({ 61 | "job_id": job_id, 62 | "filename": segment_name, 63 | "source_id": source_id, 64 | "worker_id": None, 65 | "status": None 66 | }) 67 | JobStuff.source_info[source_id]['jobs'].append(job_id) 68 | 69 | import pprint 70 | print "source_info" 71 | pprint.pprint(JobStuff.source_info) 72 | print "JOBS" 73 | pprint.pprint(JobStuff.job_queue.queue) 74 | return True 75 | 76 | @staticmethod 77 | def merge_file(original_file, segment_dir, output_file): 78 | merger = videooperator.splitmerge.SplitMerge() 79 | segments = os.listdir(segment_dir) 80 | segments = map(lambda x: segment_dir + x, segments) 81 | segments.sort() 82 | return merger.merge(original_file, segments, output_file) 83 | 84 | class JobHandler(SocketServer.BaseRequestHandler): 85 | def _check_connection(self, socket_): 86 | socket_.setblocking(0) 87 | try: 88 | if not socket_.recv(0): 89 | # No data has been read, connection is dead 90 | result = False 91 | except SocketException: 92 | # Connection is alive 93 | result = True 94 | socket_.setblocking(1) 95 | return result 96 | 97 | def _send_message(self, socket_, message, raw=False): 98 | if not raw: 99 | encoded_message = json.dumps(message) + '\n' 100 | else: 101 | encoded_message = message 102 | socket_.sendall(encoded_message) 103 | 104 | def _assign_job(self, job, worker_id): 105 | # Got job, set status 106 | job['status'] = JobStuff.IN_PROGRESS 107 | # Assigned worker for job 108 | job['worker'] = worker_id 109 | job_id = job.get('job_id') 110 | # add job into inprogress_jobs 111 | JobStuff.inprogress_jobs[job_id] = job 112 | 113 | def _job_success(self, job_id): 114 | # Getting job from inprogress_jobs 115 | job = JobStuff.inprogress_jobs.get(job_id) 116 | if not job: 117 | print "no job to mark as success" 118 | return False 119 | # Deleting job from inprogress_jobs 120 | del JobStuff.inprogress_jobs[job_id] 121 | # Deleting job status 122 | del job['status'] 123 | # Adding job to successful_jobs 124 | JobStuff.successful_jobs.append(job) 125 | # Deleting job_id from source_info 126 | JobStuff.source_info[job['source_id']]['jobs'].remove(job_id) 127 | return True 128 | 129 | def _job_fail(self, job_id): 130 | # Getting job from inprogress_jobs 131 | job = JobStuff.inprogress_jobs.get(job_id) 132 | if not job: 133 | print "no job to mark as fail" 134 | return False 135 | # Deleting job from inprogress_jobs 136 | del JobStuff.inprogress_jobs[job_id] 137 | # Deleting job status 138 | del job['status'] 139 | # Adding job to successful_jobs 140 | JobStuff.failed_jobs.append(job) 141 | return True 142 | 143 | def _get_source_info(self, source_id): 144 | source = JobStuff.source_info.get(source_id) 145 | if not source: 146 | print "no source found" 147 | return None 148 | result = source 149 | result['segment_dir'] = JobStuff.SEGMENT_DIR + '/' + os.path.basename(source['file']) + '.' + source_id + '/' 150 | result['encoded_dir'] = JobStuff.ENCODED_DIR + '/' + os.path.basename(source['file']) + '.' + source_id + '/' 151 | result['final'] = JobStuff.FINAL_DIR + '/' + os.path.basename(source['file']) + '.' + \ 152 | source_id + '/' + os.path.basename(source['file']) 153 | return result 154 | 155 | def _get_source_jobs(self, source_id): 156 | source = self._get_source_info(source_id) 157 | return source['jobs'] 158 | 159 | def _get_inprogress_job(self, job_id): 160 | return JobStuff.inprogress_jobs.get(job_id) 161 | 162 | def _set_inprogress_job_status(self, job_id, status): 163 | job = self._get_inprogress_job(job_id) 164 | if not job: 165 | return False 166 | JobStuff.inprogress_jobs[job_id]['status'] = status 167 | return True 168 | 169 | def getjob(self, worker_id): 170 | ''' 171 | Pushes a job to worker 172 | ''' 173 | logger = logging.getLogger(self.__class__.__name__) 174 | logger.info('New worker connected: %s', worker_id) 175 | 176 | while True: 177 | try: 178 | job = JobStuff.job_queue.get(timeout=JobStuff.timeout) 179 | break 180 | except Queue.Empty: 181 | # ping 182 | if not self._check_connection(self.request): 183 | print 'connection dropped' 184 | return 185 | self._send_message(self.request, JobStuff.PING) 186 | #print "sent message" 187 | 188 | self._assign_job(job, worker_id) 189 | job_id = job['job_id'] 190 | 191 | logger.info('Job %s assigned to %s' % (job['filename'], worker_id)) 192 | source = self._get_source_info(job['source_id']) 193 | 194 | self._send_message(self.request,{ 195 | "action": "do", 196 | "job_id": job_id, 197 | "encoding_arguments": source['encoding_arguments'], 198 | }) 199 | 200 | 201 | try: 202 | with open(source['segment_dir'] + job['filename'], 'rb') as vid: 203 | shutil.copyfileobj(vid, self.request_file) 204 | except Exception as e: 205 | # upload error 206 | self._job_fail(job_id) 207 | logger.error('Cannot upload video %s to worker %s' % (job['filename'], worker_id)) 208 | logger.error(repr(e)) 209 | return 210 | 211 | logger.info('Successfully uploaded video %s to worker %s!' % (job['filename'], worker_id)) 212 | return 213 | 214 | def putjob(self, worker_id, job_id): 215 | logger = logging.getLogger(self.__class__.__name__) 216 | job = self._get_inprogress_job(job_id) 217 | if not job: 218 | self.die_with_error('no job') 219 | return 220 | self._set_inprogress_job_status(job_id, JobStuff.ENCODING) 221 | source = self._get_source_info(job['source_id']) 222 | 223 | logger.info('Getting encoded data for file %s from worker %s' % (job['filename'], worker_id)) 224 | 225 | try: 226 | with open(source['encoded_dir'] + job['filename'], 'wb') as segment: 227 | shutil.copyfileobj(self.request_file, segment) 228 | except Exception as e: 229 | # receive error 230 | self._job_fail(job_id) 231 | logger.error('Cannot download video %s from worker %s ' % (job['filename'], worker_id)) 232 | logger.error(repr(e)) 233 | return 234 | 235 | logger.info('Successfully downloaded video %s from worker %s!' % (job['filename'], worker_id)) 236 | return 237 | 238 | 239 | def updatejob(self, job_id, status): 240 | logger = logging.getLogger(self.__class__.__name__) 241 | job = self._get_inprogress_job(job_id) 242 | source = self._get_source_info(job['source_id']) 243 | if not job: 244 | self.die_with_error('no job') 245 | return 246 | if status == 'success': 247 | self._job_success(job_id) 248 | logger.info('Job %s successful' % job['filename']) 249 | if len(source['jobs']) < 1: 250 | # Merge file and remove segments 251 | if JobStuff.merge_file(source['file'], source['encoded_dir'], source['final']): 252 | print "muxing successful!" 253 | shutil.rmtree(source['encoded_dir']) 254 | shutil.rmtree(source['segment_dir']) 255 | elif status == 'fail': 256 | self._job_fail(job_id) 257 | logger.info('Job %s failed' % job['filename']) 258 | 259 | def die_with_error(self, errormsg): 260 | print errormsg 261 | self._send_message(self.request,{ 262 | "action": "error", 263 | "message": errormsg 264 | }) 265 | 266 | def handle(self): 267 | logger = logging.getLogger(self.__class__.__name__) 268 | logger.info('New connection: %s' % (self.client_address,)) 269 | self.request_file = self.request.makefile() 270 | worker_data = self.request_file.readline() 271 | 272 | try: 273 | worker = json.loads(worker_data) 274 | except: 275 | logger.error("Cannot load worker data") 276 | return 277 | 278 | worker_id = worker.get('worker_id') 279 | req = worker.get('action') 280 | if not (worker_id and req): 281 | self.die_with_error("no worker id or req") 282 | return 283 | 284 | if req == 'getjob': 285 | self.getjob(worker_id) 286 | 287 | elif req == 'putjob': 288 | job_id = worker.get('job_id') 289 | if not job_id: 290 | self.die_with_error("no job id") 291 | return 292 | 293 | self.putjob(worker_id, job_id) 294 | 295 | 296 | elif req == 'updatejob': 297 | job_id = worker.get('job_id') 298 | status = worker.get('status') 299 | if not (job_id and status): 300 | self.die_with_error("no job_id or status") 301 | return 302 | self.updatejob(job_id, status) 303 | 304 | elif req == 'addjob': 305 | file = worker.get('file') 306 | encoding_arguments = worker.get('encoding_arguments', '') 307 | if not file: 308 | self.die_with_error('no file') 309 | 310 | JobStuff.add_file(file, encoding_arguments) 311 | 312 | 313 | logging.basicConfig( 314 | format='%(asctime)s %(levelname)s %(name)s: %(message)s', 315 | datefmt='%d.%m.%Y %H:%M:%S', 316 | level=logging.DEBUG) 317 | 318 | def run(server_class=JobServer, 319 | handler_class=JobHandler): 320 | server_address = ('', 8000) 321 | httpd = server_class(server_address, handler_class) 322 | httpd.allow_reuse_address = True 323 | httpd.serve_forever() 324 | 325 | run() 326 | #thread = threading.Thread(target=run) 327 | #thread.daemon = True 328 | #thread.start() --------------------------------------------------------------------------------