├── .gitignore ├── LICENSE ├── README.md ├── combo_creator ├── __init__.py ├── combo.py ├── logo.png └── ui.py ├── requirements.txt ├── screens ├── app.png ├── appworking.png ├── completion.png └── workingallframes.png └── vea ├── __init__.py ├── assets └── icon.png ├── controller.py ├── dialog.py ├── motion.py ├── play_video.py ├── tools ├── __init__.py ├── timestamps.py ├── video_get.py └── video_show.py └── ui.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | ./env 6 | .idea 7 | .gitignore 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | pip-wheel-metadata/ 26 | share/python-wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .nox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | *.py,cover 53 | .hypothesis/ 54 | .pytest_cache/ 55 | 56 | # Translations 57 | *.mo 58 | *.pot 59 | 60 | # Django stuff: 61 | *.log 62 | local_settings.py 63 | db.sqlite3 64 | db.sqlite3-journal 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # IPython 83 | profile_default/ 84 | ipython_config.py 85 | 86 | # pyenv 87 | .python-version 88 | 89 | # pipenv 90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 93 | # install all needed dependencies. 94 | #Pipfile.lock 95 | 96 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 97 | __pypackages__/ 98 | 99 | # Celery stuff 100 | celerybeat-schedule 101 | celerybeat.pid 102 | 103 | # SageMath parsed files 104 | *.sage.py 105 | 106 | # Environments 107 | .env 108 | .venv 109 | env/ 110 | venv/ 111 | ENV/ 112 | env.bak/ 113 | venv.bak/ 114 | 115 | # Spyder project settings 116 | .spyderproject 117 | .spyproject 118 | 119 | # Rope project settings 120 | .ropeproject 121 | 122 | # mkdocs documentation 123 | /site 124 | 125 | # mypy 126 | .mypy_cache/ 127 | .dmypy.json 128 | dmypy.json 129 | 130 | # Pyre type checker 131 | .pyre/ 132 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Atul Patare 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Video-Editing-Automation 2 | A Python-based Video Editing Automation with Motion Detection using OpenCV and FFMPEG (MoviePy) 3 | 4 | [Ref: PyImageSearch awesome blog post on Motion Detection](https://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/) 5 | 6 | - Working on different approach to automate Video Editing [check here](https://github.com/AP-Atul/Torpido) 7 | 8 | ## Directories 9 | 1. combo_creator: Creates a single video by combining multiple videos from a single folder 10 | 2. vea: main directory that performs video editing based on motion activity. 11 | 12 | ## Execution 13 | 1. Install ffmpeg, required for video trimming 14 | 2. Install all the requirements 15 | ``` $ pip3 install -r requirements.txt ``` 16 | 3. You are ready to roll. 17 | 18 | ## Basic working of the product 19 | The project starts by taking an input video file, reads it frame by frame and then resizes the frame to 500px width to reduce the processing, then making the image grayscale to add up more ease. 20 | 21 | Then using OpenCV's diff to calculate the difference between the frames and thresholding the frame with required value, 25 suggested. Then calculating whether the threshold value changes and then detecting motion. Once, motion is detected calculating the time at it occurred. 22 | timeOccurred = frameNumber / fps # this will give us the time. 23 | 24 | Store the startTime and endTime of the motion and then make cuts to the video starting from the startTime to the endTime using the FFMPEG tool. 25 | Storing the video to the file directory specified. Since only the processing of video is carried out, it is much faster. 26 | 27 | The video reading is done in the thread in the VideoGet file which improves reading by *2 the normal way. 28 | 29 | ## Screens 30 | 31 | ![app](https://raw.githubusercontent.com/AP-Atul/Video-Editing-Automation/master/screens/appworking.png "App UI") 32 | ![app](https://github.com/AP-Atul/Video-Editing-Automation/blob/master/screens/workingallframes.png?raw=true "App Real-time Working") 33 | 34 | ## Help 35 | If any problem occurs, regarding the code or libraries, you may raise an issue on GitHub. Thank You! 36 | -------------------------------------------------------------------------------- /combo_creator/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ap-atul/Video-Editing-Automation/cadf3e66f208737c1a273b958c4654a3d08f4f71/combo_creator/__init__.py -------------------------------------------------------------------------------- /combo_creator/combo.py: -------------------------------------------------------------------------------- 1 | import glob 2 | 3 | import cv2 4 | from moviepy.editor import VideoFileClip, concatenate_videoclips 5 | 6 | 7 | def createCombo(ComboWindow, inputFolder, outputFile): 8 | """ 9 | find all video files and combine it into one file 10 | params: 11 | ComboWindow : UI for the application, object of QWindow class 12 | inputFolder : input files folder path 13 | outputFile : path to store the video file created 14 | 15 | output : Outputs a single video file with file name provided at the location given 16 | """ 17 | input_files = [] # array to store names of input files 18 | clip = [] # to store the combination of above files 19 | 20 | # reading the path of input folder and output file name 21 | inputFolder = str(inputFolder) 22 | outputFile = str(outputFile) 23 | 24 | ComboWindow.setComboStatusTipText('Creating Video.......') # setting status on the ui 25 | 26 | # retrieving file names 27 | for fileInput in glob.glob(inputFolder + '/*.' + "mp4"): 28 | input_files.append(fileInput) 29 | input_files = sorted(input_files, key=str.lower) 30 | lenInputFiles = len(input_files) 31 | 32 | i = 0 33 | # appending file names 34 | for i in range(0, lenInputFiles): 35 | per = float(i + 1) / float(lenInputFiles) 36 | ComboWindow.setComboProgress(round(per * 60)) 37 | clip.append(VideoFileClip(input_files[i])) 38 | 39 | # get default fps for output video 40 | myClip = cv2.VideoCapture(input_files[i]) 41 | fps = myClip.get(cv2.CAP_PROP_FPS) 42 | 43 | # creating a video and writing it to the directory 44 | final_clip = concatenate_videoclips(clip) 45 | final_clip.write_videofile(inputFolder + "/" + outputFile, fps) 46 | ComboWindow.setComboProgress(100) 47 | -------------------------------------------------------------------------------- /combo_creator/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ap-atul/Video-Editing-Automation/cadf3e66f208737c1a273b958c4654a3d08f4f71/combo_creator/logo.png -------------------------------------------------------------------------------- /combo_creator/ui.py: -------------------------------------------------------------------------------- 1 | """ 2 | ui file for application, which creates a single video from multiple videos 3 | make sure that input files have names in alphabetic order 4 | """ 5 | import sys 6 | 7 | from PyQt5.QtGui import QFont, QIcon 8 | from PyQt5.QtWidgets import (QApplication, QFileDialog, QPushButton, QLineEdit, 9 | QLabel, QProgressBar, QMainWindow, QStatusBar) 10 | 11 | from combo_creator import combo 12 | 13 | 14 | class ComboWindow(QMainWindow): 15 | 16 | def __init__(self): 17 | super(ComboWindow, self).__init__() 18 | self.setGeometry(100, 100, 500, 300) 19 | self.setFixedSize(500, 300) 20 | self.setWindowTitle("Video Combining Tool") 21 | self.setWindowIcon(QIcon('logo.png')) # application window icon 22 | 23 | # select folder components 24 | inputDetailsFileLabel = QLabel(self) 25 | inputDetailsFileLabel.setText("Input Details ") 26 | inputDetailsFileLabel.setFont(QFont('Arial', 25, QFont.Bold)) 27 | inputDetailsFileLabel.resize(200, 27) 28 | inputDetailsFileLabel.move(20, 10) 29 | 30 | self.selectFileLabel = QLabel(self) 31 | self.selectFileLabel.setText("Select the input folder") 32 | self.selectFileLabel.resize(200, 27) 33 | self.selectFileLabel.move(20, 50) 34 | 35 | self.selectFolderTextbox = QLineEdit(self) 36 | self.selectFolderTextbox.move(20, 80) 37 | self.selectFolderTextbox.resize(380, 27) 38 | self.selectFolderTextbox.setPlaceholderText('Folder Path') 39 | 40 | btn = QPushButton("Browse", self) 41 | btn.setStatusTip('Select the folder to edit') 42 | btn.clicked.connect(self.browseFolder) 43 | btn.resize(btn.sizeHint()) 44 | btn.move(400, 80) 45 | 46 | tip1 = QLabel(self) 47 | tip1.setText("Tip : We will create a combination of clips from the contents. ") 48 | tip1.setFont(QFont('Courier', 10)) 49 | tip1.resize(tip1.sizeHint()) 50 | tip1.move(20, 120) 51 | 52 | # status components && variables 53 | outputDetailsFileLabel = QLabel(self) 54 | outputDetailsFileLabel.setText("Options & Status") 55 | outputDetailsFileLabel.setFont(QFont('Arial', 25, QFont.Bold)) 56 | outputDetailsFileLabel.resize(250, 27) 57 | outputDetailsFileLabel.move(20, 150) 58 | 59 | self.progress = QProgressBar(self) 60 | self.progress.setGeometry(20, 200, 460, 20) 61 | 62 | destinationFileLabel = QLabel(self) 63 | destinationFileLabel.setText("Enter a File Name") 64 | destinationFileLabel.resize(200, 27) 65 | destinationFileLabel.move(20, 220) 66 | 67 | self.outputFileName = QLineEdit(self) 68 | self.outputFileName.move(20, 250) 69 | self.outputFileName.resize(130, 27) 70 | self.outputFileName.setText('mycombo.mp4') 71 | 72 | btnCreate = QPushButton("Create", self) 73 | btnCreate.setStatusTip('Click to create your files') 74 | btnCreate.clicked.connect(self.callCombo) 75 | btnCreate.resize(310, 27) 76 | btnCreate.move(175, 250) 77 | 78 | self.statusBar = QStatusBar(self) 79 | self.setStatusBar(self.statusBar) 80 | 81 | # All Custom Methods 82 | # select folder path for input files 83 | def browseFolder(self): 84 | name = QFileDialog.getExistingDirectory(None, "Select Directory") 85 | self.selectFolderTextbox.setText(name) 86 | 87 | # set progress bar value 88 | def setComboProgress(self, value): 89 | self.progress.setValue(value) 90 | 91 | # set status text 92 | def setComboStatusTipText(self, value): 93 | self.statusBar.showMessage(value, 10) 94 | 95 | # call combination video creation video 96 | def callCombo(self): 97 | inputFolder = self.selectFolderTextbox.text() 98 | outputFile = self.outputFileName.text() 99 | 100 | if inputFolder and outputFile: 101 | combo.createCombo(self, inputFolder, outputFile) 102 | else: 103 | pass 104 | 105 | 106 | def main(): 107 | app = QApplication(sys.argv) 108 | window = ComboWindow() 109 | window.show() 110 | sys.exit(app.exec_()) 111 | 112 | 113 | if __name__ == '__main__': 114 | main() 115 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | imutils 3 | PyQt5 4 | moviepy 5 | git+https://github.com/AP-Atul/pympeg 6 | -------------------------------------------------------------------------------- /screens/app.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ap-atul/Video-Editing-Automation/cadf3e66f208737c1a273b958c4654a3d08f4f71/screens/app.png -------------------------------------------------------------------------------- /screens/appworking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ap-atul/Video-Editing-Automation/cadf3e66f208737c1a273b958c4654a3d08f4f71/screens/appworking.png -------------------------------------------------------------------------------- /screens/completion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ap-atul/Video-Editing-Automation/cadf3e66f208737c1a273b958c4654a3d08f4f71/screens/completion.png -------------------------------------------------------------------------------- /screens/workingallframes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ap-atul/Video-Editing-Automation/cadf3e66f208737c1a273b958c4654a3d08f4f71/screens/workingallframes.png -------------------------------------------------------------------------------- /vea/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ap-atul/Video-Editing-Automation/cadf3e66f208737c1a273b958c4654a3d08f4f71/vea/__init__.py -------------------------------------------------------------------------------- /vea/assets/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ap-atul/Video-Editing-Automation/cadf3e66f208737c1a273b958c4654a3d08f4f71/vea/assets/icon.png -------------------------------------------------------------------------------- /vea/controller.py: -------------------------------------------------------------------------------- 1 | from PyQt5.QtCore import QThread, pyqtSignal 2 | 3 | from vea import play_video 4 | from vea.dialog import Complete 5 | from vea.motion import Motion 6 | 7 | 8 | class Controller(QThread): 9 | progress = pyqtSignal(int) 10 | frames = pyqtSignal(str) 11 | fps = pyqtSignal(str) 12 | 13 | def __init__(self): 14 | super().__init__() 15 | 16 | self._motion = Motion() 17 | 18 | self._videoFile = None 19 | self._outputFolder = None 20 | self._dialog = None 21 | 22 | def set_input_file(self, inputFile): 23 | self._videoFile = inputFile 24 | 25 | def set_output_fol(self, folder): 26 | self._outputFolder = folder 27 | 28 | def set_threshold(self, val): 29 | self._motion.setThreshold(val) 30 | 31 | def start_processing(self): 32 | self._motion.setHandler(self) 33 | self.start() 34 | 35 | def set_progress(self, val): 36 | self.progress.emit(int(val)) 37 | 38 | def set_total_frames(self, msg): 39 | self.frames.emit(str(msg)) 40 | 41 | def set_video_fps(self, msg): 42 | self.fps.emit(str(msg)) 43 | 44 | def run(self) -> None: 45 | self._motion.startProcessing(self._videoFile, self._outputFolder) 46 | 47 | def set_dialog(self): 48 | self._dialog = Complete() 49 | self._dialog.show() 50 | 51 | def start_display(self, threshold): 52 | play_video.display_contours(self._videoFile, threshold) 53 | -------------------------------------------------------------------------------- /vea/dialog.py: -------------------------------------------------------------------------------- 1 | """ 2 | adds a dialog to show the completion of the process 3 | if anything goes wrong no dialog is displayed 4 | """ 5 | from PyQt5.QtGui import QIcon, QFont 6 | from PyQt5.QtWidgets import QLabel, QPushButton 7 | 8 | from PyQt5.QtWidgets import QMainWindow 9 | 10 | 11 | class Complete(QMainWindow): 12 | def __init__(self): 13 | super().__init__() 14 | 15 | self.setGeometry(100, 100, 500, 100) 16 | self.setFixedSize(500, 100) 17 | self.setWindowTitle("Video Status") 18 | self.setWindowIcon(QIcon('./assets/icon.png')) # application window icon 19 | 20 | label = QLabel(self) 21 | label.setText("Your video is processed successfully!") 22 | label.setFont(QFont('Arial', 15, QFont.Black)) 23 | label.resize(label.sizeHint()) 24 | label.move(100, 10) 25 | 26 | button = QPushButton("Ok", self) 27 | button.move(180, 40) 28 | button.clicked.connect(self.closeDialog) 29 | 30 | def closeDialog(self): 31 | self.close() 32 | -------------------------------------------------------------------------------- /vea/motion.py: -------------------------------------------------------------------------------- 1 | import os 2 | from subprocess import Popen, PIPE, STDOUT 3 | from time import sleep 4 | 5 | import cv2 6 | import imutils 7 | import numpy as np 8 | import pympeg 9 | 10 | from vea.tools import VideoGet, get_timestamps 11 | 12 | 13 | class Motion: 14 | """ 15 | in short, we detect the motion in the video, store the times(start & end) of the motion 16 | and then cut sub clips 17 | 18 | this file contains functions to read the input file and create a sequence of sub clips 19 | that are detected as interesting parts from the video file 20 | params : 21 | Window : QWindow object, ui window for set values 22 | inputFile : path of the input file 23 | outputFolder : path of the output folder 24 | threshold : an float value that represent the amount of motion to detect, values = 0 -> 255 25 | 26 | """ 27 | 28 | def __init__(self): 29 | self.__stream = None 30 | self.__video_getter = None 31 | self.__controller = None 32 | self._inputFile = None 33 | self._outputFolder = None 34 | self._output_video_file_name = None 35 | self._threshold = None 36 | self.__fps = None 37 | self.__totalFrames = 0 38 | self._motion = None 39 | 40 | def setHandler(self, app): 41 | self.__controller = app 42 | 43 | def setThreshold(self, value): 44 | self._threshold = int(value) 45 | 46 | def startProcessing(self, inputFile, outputFolder): 47 | self._inputFile = inputFile 48 | self._outputFolder = outputFolder 49 | 50 | self.__video_getter = VideoGet(str(self._inputFile)).start() 51 | self.__stream = self.__video_getter.getCapture() 52 | 53 | self.__fps = self.__stream.get(cv2.CAP_PROP_FPS) 54 | self.__totalFrames = self.__stream.get(cv2.CAP_PROP_FRAME_COUNT) 55 | print("TotalFrames ::", self.__totalFrames) 56 | print("Video FPS ::", self.__fps) 57 | 58 | # set labels on the application window 59 | self.__controller.set_total_frames(self.__totalFrames) 60 | self.__controller.set_video_fps(self.__fps) 61 | 62 | # wait 63 | if not self.__video_getter.more(): 64 | print("Waiting for the buffer to fill up.") 65 | sleep(0.3) 66 | 67 | self.readMotionFrames() 68 | self._frames_normalize() 69 | self.__video_getter.stop() 70 | 71 | def readMotionFrames(self): 72 | """ 73 | reads the video and creates timestamps for interesting parts 74 | """ 75 | firstFrame = None # assuming the first frame as no motion 76 | self._motion = [] # stores the timestamps 77 | prev = None 78 | count = 0 79 | np.seterr(divide='ignore') 80 | 81 | # self.__controller.setStatusTipText("Reading a total of " + str(self.__totalFrames) + " frames....") 82 | 83 | while self.__video_getter.more(): 84 | 85 | frame = self.__video_getter.read() 86 | # if the frame could not be grabbed, then we have reached the end 87 | # of the video 88 | if frame is None: 89 | break 90 | 91 | # resize the frame, convert it to grayscale, and blur it 92 | frame = imutils.resize(frame, width=500) 93 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 94 | gray = cv2.GaussianBlur(gray, (21, 21), 0) 95 | 96 | # if the first frame is None, initialize it 97 | if firstFrame is None: 98 | firstFrame = gray 99 | continue 100 | 101 | frameDelta = cv2.absdiff(firstFrame, gray) 102 | # 25 threshold value, 255 maxvalue 103 | thresh = cv2.threshold(frameDelta, self._threshold, 255, cv2.THRESH_BINARY)[1] 104 | 105 | threshSum = thresh.sum() 106 | if threshSum > 0: 107 | self._motion.append(1) 108 | else: 109 | self._motion.append(0) 110 | 111 | self.__controller.set_progress((count / self.__totalFrames) * 90) 112 | count += 1 113 | 114 | def _frames_normalize(self): 115 | motion_normalize = [] 116 | 117 | for i in range(0, int(self.__totalFrames), int(self.__fps)): 118 | if len(self._motion) >= (i + int(self.__fps)): 119 | motion_normalize.append(np.mean(self._motion[i: i + int(self.__fps)])) 120 | else: 121 | break 122 | 123 | print(f"Motion rank length {len(motion_normalize)} ") 124 | self.createVideo(motion_normalize) 125 | 126 | def createVideo(self, motion_normalize): 127 | self.build_command(motion_normalize) 128 | self.__controller.set_progress(100) 129 | 130 | command = self.build_command(motion_normalize) 131 | print(f"Generated command for the output :: %s" % command) 132 | print("Making the output video .....") 133 | 134 | process = Popen(args=command, 135 | shell=True, 136 | stdout=PIPE, 137 | stderr=STDOUT, 138 | universal_newlines=True) 139 | 140 | out, err = process.communicate() 141 | code = process.poll() 142 | 143 | if code: 144 | raise Exception("FFmpeg ran into an error :: %s" % out) 145 | 146 | # display the completion dialog 147 | self.__controller.set_dialog() 148 | 149 | def build_command(self, motion_normalize): 150 | """ 151 | For this I've used my own ffmeg wrapper 152 | https://github.com/AP-Atul/pympeg 153 | """ 154 | 155 | timestamps = get_timestamps(motion_normalize) 156 | 157 | _, _extension = os.path.splitext(self._inputFile) 158 | in_file = pympeg.input(name=str(self._inputFile)) 159 | 160 | args = "split=%s" % len(timestamps) 161 | outputs = list() 162 | 163 | # making labels 164 | for i in range(len(timestamps)): 165 | outputs.append("split_%s" % str(i)) 166 | 167 | split = pympeg.arg(inputs=in_file, args=args, outputs=outputs) 168 | 169 | # making trims 170 | for i, times in enumerate(timestamps): 171 | start, duration = times 172 | trim_filter = ( 173 | split[i].filter(filter_name="trim", 174 | params={"start": start, "duration": duration}) 175 | .setpts() 176 | ) 177 | 178 | output_file = os.path.join(self._outputFolder, str("output_%s_%s" % (str(i), _extension))) 179 | trim_filter.output(name=output_file) 180 | 181 | return pympeg.command() 182 | -------------------------------------------------------------------------------- /vea/play_video.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import cv2 3 | import imutils 4 | 5 | 6 | # params : 7 | # inputFile: input video file 8 | # threshold: threshold value from 0 -> 255 9 | # In short this file detects frames with motion and display it to the user 10 | # the video can be exited by pressing 'q' 11 | 12 | def display_contours(inputFile, threshold): 13 | inputFile = str(inputFile) 14 | threshold = float(threshold) 15 | 16 | # initialize the first frame in the video __stream 17 | vs = cv2.VideoCapture(inputFile) 18 | firstFrame = None 19 | text = "Unoccupied" 20 | 21 | # loop over the frames of the video 22 | while True: 23 | # grab the current frame and initialize the occupied/unoccupied 24 | # text 25 | frame = vs.read()[1] 26 | # if the frame could not be grabbed, then we have reached the end 27 | # of the video 28 | if frame is None: 29 | break 30 | 31 | # resize the frame, convert it to grayscale, and blur it 32 | frame = imutils.resize(frame, width=500) 33 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 34 | gray = cv2.GaussianBlur(gray, (21, 21), 0) 35 | 36 | # if the first frame is None, initialize it 37 | if firstFrame is None: 38 | firstFrame = gray 39 | continue 40 | else: 41 | firstFrame = gray 42 | 43 | # compute the absolute difference between the current frame and 44 | # first frame 45 | frameDelta = cv2.absdiff(firstFrame, gray) 46 | thresh = cv2.threshold(frameDelta, threshold, 255, cv2.THRESH_BINARY)[1] 47 | 48 | # dilate the threshold image to fill in holes, then find contours 49 | # on threshold image 50 | thresh = cv2.dilate(thresh, None, iterations=2) 51 | contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 52 | contours = imutils.grab_contours(contours) 53 | 54 | # loop over the contours 55 | for c in contours: 56 | # if the contour is too small, ignore it 57 | if cv2.contourArea(c) < 500: 58 | continue 59 | 60 | # compute the bounding box for the contour, draw it on the frame, 61 | # and update the text 62 | (x, y, w, h) = cv2.boundingRect(c) 63 | cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) 64 | text = "Occupied" 65 | 66 | # draw the text and timestamp on the frame 67 | cv2.putText(frame, "Room Status: {}".format(text), (10, 20), 68 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) 69 | cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), 70 | (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) 71 | 72 | # show the frame and record if the user presses a key 73 | cv2.imshow("Thresh", thresh) 74 | cv2.imshow("Frame Delta", frameDelta) 75 | cv2.imshow("Security Feed", frame) 76 | key = cv2.waitKey(1) & 0xFF 77 | 78 | # if the `q` key is pressed, break from the lop 79 | if key == ord("q"): 80 | break 81 | 82 | # cleanup the camera and close any open windows 83 | vs.release() 84 | cv2.destroyAllWindows() 85 | -------------------------------------------------------------------------------- /vea/tools/__init__.py: -------------------------------------------------------------------------------- 1 | from vea.tools.timestamps import * 2 | from vea.tools.video_get import * 3 | from vea.tools.video_show import * 4 | -------------------------------------------------------------------------------- /vea/tools/timestamps.py: -------------------------------------------------------------------------------- 1 | def get_timestamps(frames): 2 | """ Parse the frames value to generate timestamps. """ 3 | timestamps = [] 4 | start = None 5 | end = None 6 | prev_end = 0 7 | for i in range(0, len(frames)): 8 | if frames[i] > 0: 9 | if start is None: 10 | start = i 11 | prev_end += 1 12 | 13 | else: 14 | if end is None and start is not None: 15 | end = start + prev_end - 1 16 | if start == end: 17 | end = None 18 | 19 | if start is not None and end is not None: 20 | start = start 21 | end = end 22 | timestamps.append([start, end]) 23 | end = None 24 | start = None 25 | prev_end = 0 26 | 27 | if start is not None and end is None: 28 | end = len(frames) 29 | timestamps.append([start, end]) 30 | 31 | return timestamps 32 | -------------------------------------------------------------------------------- /vea/tools/video_get.py: -------------------------------------------------------------------------------- 1 | from threading import Thread 2 | import cv2 3 | from queue import Queue 4 | from time import sleep 5 | 6 | 7 | class VideoGet: 8 | """ 9 | Class that continuously gets frames from a VideoCapture object 10 | with a dedicated thread, which actually speeds up the video 11 | reading 2 times faster than working in the main UI Thread. 12 | """ 13 | 14 | def __init__(self, src): 15 | self.__stream = cv2.VideoCapture(src) 16 | (self.grabbed, self.frame) = self.__stream.read() 17 | self.__stopped = False 18 | self.__Q = Queue(maxsize=1024) 19 | 20 | def start(self): 21 | Thread(target=self.get, args=()).start() # creating a thread 22 | return self 23 | 24 | def get(self): 25 | while not self.__stopped: 26 | if self.__Q.full(): 27 | while self.__Q.full(): 28 | sleep(0.3) 29 | 30 | else: 31 | (self.grabbed, self.frame) = self.__stream.read() 32 | self.__Q.put(self.frame) 33 | 34 | def read(self): 35 | return self.__Q.get(timeout=None) 36 | 37 | def getCapture(self): 38 | return self.__stream 39 | 40 | def more(self): 41 | return not self.__stopped 42 | 43 | def stop(self): 44 | self.__stopped = True 45 | self.__stream.release() 46 | -------------------------------------------------------------------------------- /vea/tools/video_show.py: -------------------------------------------------------------------------------- 1 | from threading import Thread 2 | import cv2 3 | 4 | 5 | class VideoShow: 6 | 7 | # Class that continuously shows a frame using a dedicated thread. 8 | # Since it works in the thread other than the main UI Thread the 9 | # video playback is faster and would be difficult to analyze. 10 | 11 | # I haven't used it in the the video editing automation but it 12 | # can be used for other purposes. 13 | 14 | def __init__(self, frame=None): 15 | self.frame = frame 16 | self.stopped = False 17 | 18 | def start(self): 19 | Thread(target=self.show, args=()).start() # creating a thread 20 | return self 21 | 22 | def show(self): 23 | while not self.stopped: 24 | cv2.imshow("Video", self.frame) 25 | if cv2.waitKey(1) == ord("q"): 26 | self.stopped = True 27 | 28 | def stop(self): 29 | self.stopped = True 30 | -------------------------------------------------------------------------------- /vea/ui.py: -------------------------------------------------------------------------------- 1 | """ 2 | this application divides a video into segments when it finds motion specified by thresh value 3 | it works with use of OPENCV to detect motion and uses FFMPEG to create an output file. 4 | """ 5 | 6 | import sys 7 | 8 | from PyQt5 import QtGui 9 | from PyQt5.QtWidgets import (QMainWindow, QLabel, QLineEdit, QPushButton, 10 | QProgressBar, QStatusBar, QFileDialog, QApplication) 11 | 12 | from vea.controller import Controller 13 | 14 | 15 | class Window(QMainWindow): 16 | 17 | def __init__(self): 18 | super(Window, self).__init__() 19 | 20 | self._controller = Controller() 21 | 22 | self.setGeometry(100, 100, 500, 600) 23 | self.setFixedSize(500, 600) 24 | self.setWindowTitle("Video Editing Automation") 25 | self.setWindowIcon(QtGui.QIcon('./assets/icon.png')) # application window icon 26 | 27 | # select file components 28 | inputDetailsFileLabel = QLabel(self) 29 | inputDetailsFileLabel.setText("Input Details ") 30 | inputDetailsFileLabel.setFont(QtGui.QFont('Arial', 20, QtGui.QFont.Bold)) 31 | inputDetailsFileLabel.resize(200, 25) 32 | inputDetailsFileLabel.move(20, 10) 33 | 34 | self.selectFileLabel = QLabel(self) 35 | self.selectFileLabel.setText("Select the file to edit") 36 | self.selectFileLabel.resize(200, 27) 37 | self.selectFileLabel.move(20, 50) 38 | 39 | self.selectFileTextbox = QLineEdit(self) 40 | self.selectFileTextbox.move(20, 80) 41 | self.selectFileTextbox.resize(380, 27) 42 | self.selectFileTextbox.setPlaceholderText('File Path') 43 | 44 | self.totalFramesLabel = QLabel(self) 45 | self.totalFramesLabel.setStyleSheet('color: red') 46 | self.totalFramesLabel.move(20, 110) 47 | 48 | self.videoFps = QLabel(self) 49 | self.videoFps.setStyleSheet('color: red') 50 | self.videoFps.move(20, 125) 51 | 52 | btn = QPushButton("Browse", self) 53 | btn.setStatusTip('Select the file to edit') 54 | btn.clicked.connect(self.browseFiles) 55 | btn.resize(btn.sizeHint()) 56 | btn.move(400, 80) 57 | 58 | tip1 = QLabel(self) 59 | tip1.setText("Tip : Select a video of your favourite formats, we will \n make sure that we find best motion " 60 | "content \n and provide you the output files. ") 61 | tip1.setFont(QtGui.QFont('Courier', 10)) 62 | tip1.resize(tip1.sizeHint()) 63 | tip1.move(20, 150) 64 | 65 | # destination file components 66 | outputDetailsFileLabel = QLabel(self) 67 | outputDetailsFileLabel.setText("Output Details ") 68 | outputDetailsFileLabel.setFont(QtGui.QFont('Arial', 20, QtGui.QFont.Bold)) 69 | outputDetailsFileLabel.resize(200, 25) 70 | outputDetailsFileLabel.move(20, 210) 71 | 72 | self.destinationFileLabel = QLabel(self) 73 | self.destinationFileLabel.setText("Select the destination folder") 74 | self.destinationFileLabel.resize(200, 27) 75 | self.destinationFileLabel.move(20, 260) 76 | 77 | self.destinationFileTextbox = QLineEdit(self) 78 | self.destinationFileTextbox.move(20, 290) 79 | self.destinationFileTextbox.resize(380, 27) 80 | self.destinationFileTextbox.setPlaceholderText('Folder Path') 81 | 82 | self.videoPercentCut = QLabel(self) 83 | self.videoPercentCut.setStyleSheet('color: red') 84 | self.videoPercentCut.move(20, 320) 85 | 86 | btnDestination = QPushButton("Browse", self) 87 | btnDestination.setStatusTip('Select the folder to store') 88 | btnDestination.clicked.connect(self.browseFolders) 89 | btnDestination.resize(btn.sizeHint()) 90 | btnDestination.move(400, 290) 91 | 92 | tip1 = QLabel(self) 93 | tip1.setText("Tip : We will create number of clips where, we find best \n motion " 94 | "content and provide you the output files. ") 95 | tip1.setFont(QtGui.QFont('Courier', 10)) 96 | tip1.resize(tip1.sizeHint()) 97 | tip1.move(20, 340) 98 | 99 | # status components && variables 100 | outputDetailsFileLabel = QLabel(self) 101 | outputDetailsFileLabel.setText("Options & Status") 102 | outputDetailsFileLabel.setFont(QtGui.QFont('Arial', 20, QtGui.QFont.Bold)) 103 | outputDetailsFileLabel.resize(250, 25) 104 | outputDetailsFileLabel.move(20, 410) 105 | 106 | self.progress = QProgressBar(self) 107 | self.progress.setGeometry(20, 450, 460, 20) 108 | 109 | destinationFileLabel = QLabel(self) 110 | destinationFileLabel.setText("Enter a Threshold Value") 111 | destinationFileLabel.resize(200, 27) 112 | destinationFileLabel.move(20, 500) 113 | 114 | self.thresholdTextbox = QLineEdit(self) 115 | self.thresholdTextbox.move(20, 520) 116 | self.thresholdTextbox.resize(130, 27) 117 | self.thresholdTextbox.setPlaceholderText('ex. 25') 118 | 119 | self.btnPlayContours = QPushButton("Play Live", self) 120 | self.btnPlayContours.setStatusTip('Click to play your files with Motion Changes') 121 | self.btnPlayContours.clicked.connect(self.playContours) 122 | self.btnPlayContours.resize(120, 27) 123 | self.btnPlayContours.move(200, 520) 124 | 125 | self.btnCalculate = QPushButton("Create", self) 126 | self.btnCalculate.setStatusTip('Click to create your files') 127 | self.btnCalculate.clicked.connect(self.callMotionDetection) 128 | self.btnCalculate.resize(120, 27) 129 | self.btnCalculate.move(350, 520) 130 | 131 | self.statusBar = QStatusBar(self) 132 | self.setStatusBar(self.statusBar) 133 | self._controller.progress.connect(self.setProgress) 134 | self._controller.fps.connect(self.setVideoFpsLabel) 135 | self._controller.frames.connect(self.setTotalFramesLabel) 136 | 137 | # All Custom Methods 138 | # select a input file 139 | def browseFiles(self): 140 | name = QFileDialog.getOpenFileName(None, "Open File", "~", 141 | "Video Files (*.mp4 *.flv *.avi *.mov *.mpg *.mxf)") 142 | self.selectFileTextbox.setText(str(name[0])) 143 | 144 | # select the output folder 145 | def browseFolders(self): 146 | name = QFileDialog.getExistingDirectory(None, "Select Directory") 147 | self.destinationFileTextbox.setText(name) 148 | 149 | # set progress to the progress bar 150 | def setProgress(self, value): 151 | self.progress.setValue(value) 152 | 153 | # set status to the status bar 154 | def setStatusTipText(self, value): 155 | self.statusBar.showMessage(value, 10) 156 | 157 | # set total number of frames on the window 158 | def setTotalFramesLabel(self, value): 159 | self.totalFramesLabel.setText("Total Frames :- " + str(value)) 160 | self.totalFramesLabel.resize(self.totalFramesLabel.sizeHint()) 161 | 162 | # set video fps on the window 163 | def setVideoFpsLabel(self, value): 164 | self.videoFps.setText("Video FPS :- " + str(value)) 165 | self.videoFps.resize(self.videoFps.sizeHint()) 166 | 167 | # set percentage of video output to the input on the window 168 | def setVideoPercentCuts(self, value): 169 | self.videoPercentCut.setText("Percentage of video cut out :- " + str(value) + "%") 170 | self.videoPercentCut.resize(self.videoPercentCut.sizeHint()) 171 | 172 | # play the video with motion algorithm applied 173 | def playContours(self): 174 | self.btnPlayContours.setEnabled(False) 175 | threshold = self.thresholdTextbox.text() 176 | inputFile = self.selectFileTextbox.text() 177 | outputFile = self.destinationFileTextbox.text() 178 | 179 | if threshold and inputFile and outputFile: 180 | self._controller.set_threshold(threshold) 181 | self._controller.set_input_file(inputFile) 182 | self._controller.set_output_fol(outputFile) 183 | self._controller.start_display(threshold) 184 | self.btnPlayContours.setEnabled(True) 185 | else: 186 | self.btnPlayContours.setEnabled(True) 187 | 188 | # process the video and create output files 189 | def callMotionDetection(self): 190 | self.btnCalculate.setEnabled(False) 191 | threshold = self.thresholdTextbox.text() 192 | inputFile = self.selectFileTextbox.text() 193 | outputFile = self.destinationFileTextbox.text() 194 | 195 | if threshold and inputFile and outputFile: 196 | self._controller.set_threshold(threshold) 197 | self._controller.set_input_file(inputFile) 198 | self._controller.set_output_fol(outputFile) 199 | self._controller.start_processing() 200 | self.btnCalculate.setEnabled(True) 201 | else: 202 | self.btnCalculate.setEnabled(True) 203 | 204 | 205 | def main(): 206 | app = QApplication(sys.argv) 207 | window = Window() 208 | window.show() 209 | sys.exit(app.exec_()) 210 | 211 | 212 | if __name__ == '__main__': 213 | main() 214 | --------------------------------------------------------------------------------