├── LICENSE ├── README.md ├── output_window.py ├── preview_snap_shot.JPG └── view.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 l-Al-l 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | view.py is Python module for vapoursynth scripts that previews, compares VideoNodes (clips) within script itself by just running script. 2 | It is using openCV to put vapoursynth's RGB on screen. 3 | 4 | 5 |
Preview(list of clips or clip[, int[] frames=None, int delay=None, str img_dir=None, str matrix_in_s=None,
6 | str kernel='Point', int mod_x=2, int mod_y=2, bool ignore_subsampling = False,
7 | tuple position = (60,60), int preview_width = None, int preview_height = None,
8 | bool output_window=False, bool fullscreen=False, bool play=False, bool slider=False ])
9 | import vapoursynth as vs
13 | from view import Preview
14 | clip = vs.core.lsmas.LibavSMASHSource('source.mp4')
15 | Preview(clip)
16 |
17 |
18 | pip install numpy
python -m pip install opencv-contrib-python
sudo apt-get install libjpeg-dev libpng-dev libtiff-dev
35 | sudo apt-get install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev
36 | sudo apt-get install libxvidcore-dev libx264-dev
37 | sudo apt-get install libgtk-3-dev
38 | sudo apt-get install libatlas-base-dev gfortran
39 | sudo apt-get install python3-dev
40 | sudo apt install python3-opencv
41 |
42 | Put view.py and output_window.py into Pythons site-packages directory. 43 | Make some Vapoursynth script in your favourite Python editor, IDLE or else, give it .py extension. 44 | Run it your script, information is printed into a python console (output_window=False, which is default) or into tkinter pop up window (output_window=True).
45 | 46 |output_window=True
to actually see print-outs, because simple print('something')
does not work while running a script in vsedit.clip.set_output()
#all arguments
55 | import vapoursynth as vs
56 | from view import Preview
57 | clip = vs.core.ffms2.Source('source.mp4')
58 | clip_fixed = clip.std.Expr(['x 20 -','','']) #darken luma
59 | Preview(clips=[clip, clip_fixed], frames=[0,2000], delay=20, img_dir=r'F:\images', matrix_in_s = '709',
60 | kernel='Point', mod_x=4, mod_y=4, ignore_subsampling = False,
61 | position = (100,100), preview_width = 640, preview_height = 360,
62 | output_window=False, fullscreen=False, play=False, slider=False)
63 |
64 |
65 |
66 | #passing crop values back to script
67 | #(besides simple copy/paste cropping line that can be done during any crop)
68 | import vapoursynth as vs
69 | from view import Preview
70 | clip = vs.core.lsmas.LibavSMASHSource('source.mp4')
71 | p = Preview(clip) #and cancel preview window while in desired crop selection
72 | cropped_clip = clip.std.CropAbs(width=p.width, height=p.height, left=p.left, top=p.top)
73 | Preview(cropped_clip)
74 |
75 |
76 | #comparing different arguments for functions
77 | from vapoursynth import core
78 | import vapoursynth as vs
79 | import havsfunc
80 | from view import Preview
81 | clip = core.ffms2.Source('NTSC_source.avi')
82 | clip = clip.resize.Point(format=vs.YUV420P8)
83 | presets = [
84 | dict(Preset='medium', TFF=False, EZDenoise=2.0, DenoiseMC=True , Denoiser ='fft3df'),
85 | dict(Preset='medium', TFF=False)
86 | ]
87 | Preview( [ havsfunc.QTGMC(clip, **preset) for preset in presets ] )
88 |
89 |
90 |
91 | #other example of comparing different arguments for functions
92 | from vapoursynth import core
93 | import vapoursynth as vs
94 | import havsfunc
95 | from view import Preview
96 | clip = core.ffms2.Source('NTSC_source.avi')
97 | clip = clip.resize.Bicubic(format=vs.YUV420P8)
98 | presets = ['placebo', 'very slow', 'slower', 'slow', 'medium', 'fast', 'faster', 'very fast', 'super fast', 'ultra fast', 'draft']
99 | Preview( [ havsfunc.QTGMC(clip, Preset=preset, TFF=False) for preset in presets ] )
100 |
101 |
102 |
103 | #comparing different attributes (not arguments)
104 | from vapoursynth import core
105 | import vapoursynth as vs
106 | import havsfunc
107 | from view import Preview
108 | clip = core.ffms2.Source('NTSC_source.avi') #ffms2 loads NTSC as 411 (be careful avisource converts it to 420 by default, which you might not want )
109 | kernels = ['Point', 'Bicubic', 'Spline36', 'Lanczos']
110 | Preview( [ getattr(clip.resize, kernel)(format=vs.YUV420P8) for kernel in kernels ] ) #to change it to 420 (or 422) for QTGMC and comparing those kernels
111 |
112 |
113 | #saving cropped PNG's to disk with no subsampling restraines or mods:
114 | import vapoursynth as vs
115 | from view import Preview
116 | clip = vs.core.lsmas.LibavSMASHSource('source.mp4')
117 | Preview(clip, img_dir=r'F:\images', mod_x=1, mod_y=1, ignore_subsampling=True)
118 | #while in preview to save image in window pres 'W' (1:1 pixels) or 'E' (exact window copy) to 'images' directory
119 |
120 |
121 | #color range, full vs. limited:
122 | import vapoursynth as vs
123 | from view import Preview
124 | clip = vs.core.lsmas.LibavSMASHSource('source.mp4')
125 | clip = vs.core.std.SetFrameProp(clip, prop="_ColorRange", intval=1)
126 | clip1 = vs.core.std.SetFrameProp(clip, prop="_ColorRange", intval=0)
127 | Preview([clip, clip1])
128 | #switch clips pressing keys '1' and '2' while playing, or zoom-in couple of times
129 | #and notice, the same YUV values, but different rgb values for those clips
130 |
131 |
132 | #Creating 10 clips with different shades of gray and passing them to Preview
133 | import vapoursynth as vs
134 | from view import Preview
135 |
136 | DEPTH = 10 #8 to 16
137 |
138 | format = vs.core.register_format(vs.ColorFamily.YUV, vs.INTEGER, DEPTH, 1, 1).id
139 | max = 2**DEPTH
140 | step = max//9
141 | color= (max-1, max//2, max//2)
142 | clip = vs.core.std.BlankClip(width=1280, height=720, format=format, color=color)
143 | shades_of_gray = [ clip.std.Expr([f'x {dimming} -','','']) for dimming in range(0, max, step) ]
144 | #in Preview window pressing keys '1' to '9' or '0' to select a clip
145 | #press "I" with mouse over window to read pixel values
146 | Preview(shades_of_gray)
147 |
148 | list clips
150 | list of clips (vapoursynth VideoNodes)
151 | if there is only one clip passed, it could be passed as a simple clip (VideoNode).
152 |
153 |
154 | list frames
155 | is a list with first frame and last frame+1,
156 | if not specified, default is all frames.
157 | Same list as you'd make slicing in vapoursynth clip=clip[some frame:other frame]
158 | Seeking a frame is a source plugin dependant, it could take a time to seek n-th frame,
159 | it could take much longer than seeking subsequece frames, or it could fail, or proper seeking have to be specified
160 | as source plugin argument.
161 |
162 | int delay
163 | is delay for playback in milliseconds between frames,
164 | if not specified, delay is taken from a first clip fps, using fps_num and fps_den
165 |
166 | string img_dir
167 | is a string path for directory to save 8bit PNG images, if keys 'w' or 'e' are pressed, example:
168 | windows: img_dir = r'E:\images'
169 | linux: img_dir = /home/username/Documents/images
170 | if path is not provided, png's will be written into scripts directory
171 |
172 | string matrix_in_s
173 | To override matrix_in_s for YUV to RGB conversion for preview, FOR ALL CLIPS.
174 | Only applies to YUV clips.
175 | The way YUV-> RGB conversion works within this script, is that proper matrix_in
176 | is being pulled from Vapoursynth props: matrix_in = clip.get_frame(0).props["_Matrix"].
177 | If it is usable value, matrix_in is used for YUV to RGB conversion.
178 | If that value is not usable, like matrix_in = 2 ('unspec' ), matrix_in is defaulted then.
179 | Anything from 1024x576 up, matrix_in is 1 ( '709'), below defaults to:
180 | if clip.width <= 1024 and clip.height <= 480: matrix_in, matrix_in_s = (6, '170m')
181 | elif clip.width <= 1024 and clip.height <= 576: matrix_in, matrix_in_s = (5, '470bg')
182 | But default translates to: "It could be wrong!".
183 | So if you do not want to default while matrix is not set in vapoursynth props, you have some choices,
184 |
185 | -to specify color space by this argument, for example:
186 | Preview(clip, matrix_in_s='709') #but that would be applied for all clips if there are
187 | -you just register '_Matrix' prop in Vapoursynth for clip to make sure, before you call Preview():
188 | clip = core.std.SetFrameProp(clip, prop="_Matrix", intval=1)
189 | Preview(clip)
190 | or
191 | -you just know, after some experience, that your Source plugin and type of video you work with
192 | always generate _Matrix value correctly and you do not care
193 | or
194 | -you change it in Vapoursynth to 8bit RGB24 ANY WAY YOU WANT before calling Preview(),
195 | then zimg just copies RGB planes for preview on screen, it has to be RGB24 though
196 |
197 | matrix_in : matrix_in_s
198 | 1 : '709'
199 | 4 : 'fcc'
200 | 5 : '470bg'
201 | 6 : '170m'
202 | 7 : '240m'
203 | 8 : 'ycgco'
204 | 9 : '2020ncl'
205 | 10 : '2020cl'
206 | 12 : 'chromancl'
207 | 13 : 'chromacl '
208 | 14 : 'ictcp'
209 |
210 | The resolution is, there is no way to get correct RGB colors trying to play YUV video automatically
211 | and be always correct at the same time.
212 | If there is no matrix flag in video that vapoursynth source plugin can pass into script (into _Matrix prop)
213 | then defaults are selected.
214 | This goes for any video player out there. If that color space info , flag is not present or wrong
215 | or videoplayer can default to wrong color space.
216 | If video player does not know proper color space, it defaults to something and it could be wrong.
217 | That is why explaining all the details above so colors are correct during preview.
218 | Good habit is to flag color space while encoding, exactly for the reasons above,
219 | so video players AND vapoursynth Source plugin can identify color space later.
220 |
221 | string kernel
222 | default: 'Point', same a Vapoursynth resize attributes: 'Point','Bicubic','Lanczos','Spline16','Spline36'
223 |
224 | int mod_x , int mod_y
225 | while cropping and zooming, width and height snaps to lenghts respecting those mods,
226 | if arguments are not provided, defalts are 2 and 2
227 | Values could be any integers, so if not defaulting, choose some sane values: 2,4,8 or 16
228 | tip:
229 | Preview(clip, ignore_subsampling=True, mod_x=1, mod_y=1, img_dir='F:\images')
230 | This way any pixel could be selected and cropping is done to any lenghts. This is handy if writing some cropped PNG's
231 | in a row for GIF creating etc. (holding key 'w' down, while playing a clip would write PNG's on disk continuously)
232 |
233 | bool ignore_subsampling
234 | False value guarantees snapping while cropping is subsampling safe, it snaps to legal values.
235 | You want to leave it False as default, so cropping respects subsampling,
236 | otherwise if using those crop lines in Vapoursynth later on, you do not get errors.
237 | But if you do anyway, setting ignore_subsampling=True,
238 | there will be warning printed next to the crop line ,about vapoursynth failing, if using in your Vapoursynth script.
239 | Cropping lines will be red , not green.
240 | Reason to ignore subsumpling? When getting RGB images for gifs (maybe together with: mod_x=1, mod_y=1 to also allow wny width and height)
241 | Or other reason, you do not care about having it legal in vapoursynth, because you just inspect video on screen.
242 |
243 | bool output_window
244 | default is False,
245 | needs Python tkinter module to be in PATH. Tkinter is used to create output window.
246 | if True and output_window.py is in PATH, print is redirected to tkinter window.
247 | False or no output_window.py present in PATH prints logs into regular python console if there is.
248 | If using Vapoursynth editor, you have to have that output_window.py to see the output logs printed, so set it to True
249 | (or there are at least one liner infos in statusbar if opencv was compiled with Qt library)
250 |
251 | bool fullscreen
252 | sets fullscreen when app starts
253 |
254 | bool play
255 | If True, it starts playing when app starts
256 |
257 | bool slider
258 | show slider or not
259 | Toggle anytime later with key 's' to show or destroy slider.
260 | With slider on, you cannot manually resize that Preview window on your monitor screen,
261 | this is opencv limitation.
262 | Also, seeking could be dragging or freezing, it depends on video, clip's source plugin and source plugin arguments.
263 |
264 |
265 |
266 |
267 | Check global variables WINDOWS_KEYMAP and LINUX_KEYMAP.
269 | Not sure about Mac at all about correct keybindings, just have set it same as for Linux,
270 | so it might need corrections, if things do not work,see global DARWIN_KEYMAP and change keybindings apropriatelly.
271 | Active (selected/focused) opencv window response to these keys on keyboard:
272 |
273 | Keys '1' to '9' and '0' to switch between clips to compare them if loading more clips.
274 | So there is max 10 clips. But technically, it could be more if keybinding is adjusted.
275 |
276 | MOUSE LEFT DOUBLECLICK zooms in 2x, centers on mouse position
277 | 'Z' - same as above, zooms in 2x as well, again centers on mouse position
278 | MOUSE LEFT CLICK and MOVE initiates crop mode, selecting rectangle with mouse, confirmcrop with ENTER KEY
279 | or with DOUBLECLICK inside of selected area or with RIGHT MOUSE CLICK
280 | while touching first anchor point, it snaps to to a pixel following subsumpling,
281 | also while drawing a rectangle , it snaps to mods (2,4,8 or 16), whatever is passed as argument
282 | default modes: mod_x=2, mod_y=2
283 | key 'Z' - zooms 2x in as well, again using mouse to be a center if available
284 | key 'R' - RESETs preview to original 1:1 clip on screen
285 | key 'I' - prints YUV or RGB values for pixel under mouse pointer in preview window
286 | printing is in this format:
287 | clip number, _PictType (I,B or P), frame number ,absolute pixel coordinates, original clip YUV or RGB values,
288 | preview RGB values
289 | key 'P' prints all available frame properties (_PictType, _Matrix, _Primaries ...etc.)
290 | key 'W' - save PNG image, what you just preview and it will be saved on hardisk as 8bit PNG as 1:1, ingnoring zoom that you have on screen
291 | key 'E' - save PNG image, what you just preview and it will be saved on hardisk as 8bit PNG, it will be saved as you see on screen, respecting zoom, pixel blocks
292 | key Spacebar - Play/Pause switch
293 | key ',' - '<' step one frame back
294 | key '.' - '>' step one frame forward
295 | 'Home' - go to first frame
296 | 'End' - go to last frame
297 | Seeking is source plugin dependant so it could take a time and window could freeze for a while.
298 | key 'Esc' returns to PREVIOUS zoom or crop
299 | key 'Q' Quits app.
300 | key 'S' Slider on/off,
301 | Seeking is source plugin dependant, could be major problem,
302 | so moving slider might have conciderable delay and freeze
303 | key 'F' Fullscreen on/off switch
304 | key 'H' help, prints this keybinding text into console
305 |
306 |
--------------------------------------------------------------------------------
/output_window.py:
--------------------------------------------------------------------------------
1 | #Python 3
2 | """
3 | named errorwindow originally
4 | Import this module into graphical Python apps to provide a
5 | sys.stderr. No functions to call, just import it. It uses
6 | only facilities in the Python standard distribution.
7 |
8 | If nothing is ever written to stderr, then the module just
9 | sits there and stays out of your face. Upon write to stderr,
10 | it launches a new process, piping it error stream. The new
11 | process throws up a window showing the error messages.
12 |
13 | Code derived from Bryan Olson's source posted in this related Usenet discussion:
14 | https://groups.google.com/d/msg/comp.lang.python/HWPhLhXKUos/TpFeWxEE9nsJ
15 | https://groups.google.com/d/msg/comp.lang.python/HWPhLhXKUos/eEHYAl4dH9YJ
16 |
17 | martineau - Modified to use subprocess.Popen instead of the os.popen
18 | which has been deprecated since Py 2.6. Changed so it
19 | redirects both stdout and stderr. Also inserted double quotes around paths
20 | in case they have embedded space characters in them, as
21 | they did on my Windows system.
22 |
23 |
24 | to use it with Preview() together with for openCV player within vapoursynth script:
25 | -changed subprocess.Popen command to list instead of string , so it works under linux
26 | -added exception to catch window canceled by user and deleting pipe, so new window is automatically constructed again if needed,
27 | -added st.ScrolledText instead of Text
28 | -made sure that subprocess.Popen executable is python executable (or pythonw under windows),
29 | under windows, running it from Mystery Keeper's vsedit, sys.executable returned 'vsedit',
30 | """
31 | import subprocess
32 | import sys
33 | import _thread as thread
34 | import os
35 |
36 | ERROR_FILENAME_LOG = 'error_printing_to_gui.txt'
37 |
38 | if __name__ == '__main__': # When spawned as separate process.
39 | # create window in which to display output
40 | # then copy stdin to the window until EOF
41 | # will happen when output is sent to each OutputPipe created
42 | import tkinter as tk
43 | import tkinter.scrolledtext as st
44 | from tkinter import BOTH, END, Frame, TOP, YES
45 | import tkinter.font as tkFont
46 | import queue as Queue
47 |
48 | Q_EMPTY = Queue.Empty # An exception class.
49 | queue = Queue.Queue(1000) # FIFO, first put first get
50 |
51 | def read_stdin(app, bufsize=4096):
52 | while True:
53 | queue.put(os.read(sys.stdin.fileno(), bufsize))
54 |
55 | class Application(Frame):
56 | def __init__(self, master, font_size=10, family='Courier', text_color='#0000AA', rows=25, cols=128):
57 | super().__init__(master)
58 | self.master = master
59 | if len(sys.argv) < 2:
60 | title = "Output stream from unknown source"
61 | elif len(sys.argv) < 3:
62 | title = "Output stream from {}".format(sys.argv[1])
63 | else: # Assume it's a least 3.
64 | title = "Output stream '{}' from {}".format(sys.argv[2], sys.argv[1])
65 | self.master.title(title)
66 | self.pack(fill=BOTH, expand=YES)
67 | font = tkFont.Font(family=family, size=font_size)
68 | width = font.measure(' ' * (cols+1))
69 | height = font.metrics('linespace') * (rows+1)
70 | self.configure(width=width, height=height)
71 | self.pack_propagate(0) # Force frame to be configured size.
72 |
73 | self.logwidget = st.ScrolledText(self, font=font)
74 | self.logwidget.pack(side=TOP, fill=BOTH, expand=YES)
75 | self.logwidget.configure(foreground=text_color)
76 | self.after(200, self.start_thread, ()) # Start polling thread.
77 |
78 | def start_thread(self, _):
79 | thread.start_new_thread(read_stdin, (self,))
80 | self.after(200, self.check_q, ())
81 |
82 | def check_q(self, _):
83 | go = True
84 | while go:
85 | try:
86 | data = queue.get_nowait().decode()
87 | if not data:
88 | data = '[EOF]'
89 | go = False
90 | self.logwidget.insert(END, data)
91 | self.logwidget.see(END)
92 | except Q_EMPTY:
93 | self.after(200, self.check_q, ())
94 | go = False
95 |
96 | root = tk.Tk(baseName='whatever_name')
97 | app = Application(master=root)
98 | app.mainloop()
99 |
100 | else: # when module is first imported
101 | import traceback
102 |
103 | class OutputPipe(object):
104 | def __init__(self, name=''):
105 | self.lock = thread.allocate_lock()
106 | self.name = name
107 |
108 | def flush(self): # NO-OP.
109 | pass
110 |
111 | def __getattr__(self, attr):
112 | if attr == 'pipe': # Attribute doesn't exist, so create it.
113 | # Launch this module as a separate process to display any output it receives
114 |
115 | executable = sys.executable
116 | try:
117 | basename = os.path.basename(executable)
118 | name, _ = os.path.splitext(basename)
119 | if not name.lower().startswith('python'):
120 | executable = self.get_executable()
121 | except:
122 | executable = self.get_executable()
123 |
124 | argv1 = __file__
125 |
126 | try:
127 | argv2 = os.path.basename(sys.argv[0])
128 | except:
129 | argv2 = ''
130 | argv3 = self.name
131 |
132 | command = [executable]
133 | for arg in [argv1, argv2, argv3]:
134 | if arg:
135 | command.append(arg)
136 | try:
137 | # Had to also make stdout and stderr PIPEs too, to work with pythonw.exe
138 | self.pipe = subprocess.Popen(command,
139 | bufsize=0,
140 | stdin=subprocess.PIPE,
141 | stdout=subprocess.PIPE,
142 | stderr=subprocess.PIPE).stdin
143 | except Exception:
144 | # Output exception info to a file since this module isn't working.
145 | exc_type, exc_value, exc_traceback = sys.exc_info()
146 | msg = '{} exception in {}\n'.format(exc_type.__name__, os.path.basename(__file__))
147 | with open(ERROR_FILENAME_LOG, 'wt') as info:
148 | info.write('fatal error occurred spawning output process')
149 | info.write('exeception info:' + msg)
150 | traceback.print_exc(file=info)
151 |
152 | sys.exit('fatal error occurred')
153 |
154 | return super(OutputPipe, self).__getattribute__(attr)
155 |
156 | def get_executable(self):
157 | #if running this within vsedit under windows sys.executable name is 'vsedit'
158 | return 'pythonw'
159 |
160 | def write(self, data):
161 | with self.lock:
162 | try:
163 | data = data.encode()
164 | self.pipe.write(data) # First reference to pipe attr will cause an
165 | # OutputPipe process for the stream to be created.
166 | except Exception:
167 | #gui was canceled by user, piping would cause error
168 | #pipe attr is deleted so new is constructed with __getattr__() and therefore new GUI pops up if needed
169 | del self.pipe
170 | #pass
171 |
172 | try:
173 | os.remove(EXC_INFO_FILENAME) # Delete previous file, if any.
174 | except Exception:
175 | pass
176 |
177 | # Redirect standard output streams in the process that imported this module.
178 | sys.stderr = OutputPipe('stderr')
179 | sys.stdout = OutputPipe('stdout')
180 |
--------------------------------------------------------------------------------
/preview_snap_shot.JPG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/UniversalAl/view/29ca2cf9c9f21949d872a960e6f655d996bc60c8/preview_snap_shot.JPG
--------------------------------------------------------------------------------
/view.py:
--------------------------------------------------------------------------------
1 | '''
2 | This is opencv module for Python/vapoursynth scripts that previews VideoNode (clip) within script itself by just running script.
3 | It is a pythonic solution to play or compare vapoursynth clips by using opencv module.
4 | '''
5 |
6 | import os
7 | import sys
8 | import platform
9 | import timeit
10 |
11 |
12 | import vapoursynth as vs
13 | from vapoursynth import core
14 | import numpy as np
15 | import cv2
16 | imported_cv2_ver = tuple(map(int, cv2.__version__.split('.')))[0:3]
17 | if imported_cv2_ver < (3,4,1):
18 | raise ImportError(f'openCV version is {cv2.__version__}, it needs to be at least 3.4.1')
19 |
20 | #optional for windows or linux but needed for darwin platform to figure out free RAM
21 | try:
22 | import psutil
23 | except ImportError:
24 | pass
25 |
26 | try:
27 | isAPI4 = vs.__api_version__.api_major >= 4
28 | except AttributeError:
29 | isAPI4 = False
30 |
31 | RESPECT_X_SUBSAMPLING = True #leave both True if wanting snapping to legit cropping values for Vapoursynth based on clip subsampling
32 | RESPECT_Y_SUBSAMPLING = True #user can override these with: Preview([clip], ignore_subsampling = True)
33 |
34 | #assigning keys '1','2','3',...'9', '0' to rgb clip indexes 0,1,2,..., 8, 9
35 | CLIP_KEYMAP = [ ord('1'), ord('2'), ord('3'), ord('4'), ord('5'), ord('6'), ord('7'), ord('8') ,ord('9'), ord('0') ]
36 |
37 |
38 | WINDOWS_KEYMAP = {
39 |
40 | ord(' ') : 'pause_play', #spacebar as a switch for play and pause
41 | ord(',') : 'left_arrow', #key ',' or '<'
42 | ord('.') : 'right_arrow', #key '.' or '>'
43 |
44 | 13 : 'execute_cropping', #key 'Enter' , right mouse click also executes cropping
45 | 2359296 : 'home', #key 'Home' - first video frame
46 | 2293760 : 'end', #key 'End'- last video frame
47 | ord('y') : 'object_step_up', #key 'y' selected object to move step up
48 | ord('n') : 'object_step_down', #key 'h' selected object to move step down
49 | ord('g') : 'object_step_left', #key 'g' selected object to move step left
50 | ord('j') : 'object_step_right', #key 'j' selected object to move step right
51 | ord('p') : 'frame_props', #key 'p' to print frame properties
52 | ord('z') : 'quick_2x_zoom_in', #key 'z' zooming 2x
53 | ord('i') : 'pixel_info', #key 'i' print pixel info under mouse: pixel coordinates, frame#, YUV and RGB values
54 | ord('r') : 'reset_preview', #key 'r' reseting preview
55 | ord('e') : 'write_image', #key 'e' to write showing frame you have on screen as png to hardisk, what you see
56 | ord('w') : 'write_image_1_to_1', #key 'w' to write showing frame you have on screen as png to hardisk, image is 1:1, ignores zooms in blowup
57 | ord('q') : 'closing', #key 'q' to quit
58 | 27 : 'zoom_out', #key 'Esc' to go back to previous zoom or crop
59 | ord('s') : 'slider_switch', #key 's' slider on/off, to show slider or to destroy it
60 | ord('f') : 'fullscreen_switch', #key 'f' fullscreen on/off
61 | ord('h') : 'help' #key 'h' help, shows hotkeys for keybinding
62 |
63 | }
64 |
65 |
66 | LINUX_KEYMAP = {
67 |
68 | ## 65361 : 'left_arrow', #81
69 | ## 65362 : 'up_arrow', #82
70 | ## 65363 : 'right_arrow',#83
71 | ## 65364 : 'down_arrow', #84
72 | ord(' ') : 'pause_play',
73 | ord(',') : 'left_arrow',
74 | ord('.') : 'right_arrow',
75 | 13 : 'execute_cropping',
76 | 65360 : 'home',
77 | 65367 : 'end',
78 | ord('y') : 'object_step_up',
79 | ord('n') : 'object_step_down',
80 | ord('g') : 'object_step_left',
81 | ord('j') : 'object_step_right',
82 | ord('p') : 'frame_props',
83 | ord('z') : 'quick_2x_zoom_in',
84 | ord('i') : 'pixel_info',
85 | ord('r') : 'reset_preview',
86 | ord('e') : 'write_image',
87 | ord('w') : 'write_image_1_to_1',
88 | ord('q') : 'closing',
89 | 27 : 'zoom_out',
90 | ord('s') : 'slider_switch',
91 | ord('f') : 'fullscreen_switch',
92 | ord('h') : 'help'
93 | }
94 |
95 |
96 | DARWIN_KEYMAP = {
97 |
98 | ord(' ') : 'pause_play',
99 | ord(',') : 'left_arrow',
100 | ord('.') : 'right_arrow',
101 | 13 : 'execute_cropping',
102 | 65360 : 'home',
103 | 65367 : 'end',
104 | ord('y') : 'object_step_up',
105 | ord('n') : 'object_step_down',
106 | ord('g') : 'object_step_left',
107 | ord('j') : 'object_step_right',
108 | ord('p') : 'frame_props',
109 | ord('z') : 'quick_2x_zoom_in',
110 | ord('i') : 'pixel_info',
111 | ord('r') : 'reset_preview',
112 | ord('e') : 'write_image',
113 | ord('w') : 'write_image_1_to_1',
114 | ord('q') : 'closing',
115 | 27 : 'zoom_out',
116 | ord('s') : 'slider_switch',
117 | ord('f') : 'fullscreen_switch',
118 | ord('h') : 'help'
119 | }
120 |
121 |
122 | TRANSFER = {
123 | #transfer_in or transfer : transfer_in_s or transfer_s
124 | 0:'reserved',
125 | 1:'709',
126 | 2:'unspec',
127 | 3:'reserved',
128 | 4:'470m',
129 | 5:'470bg',
130 | 6:'601',
131 | 7:'240m',
132 | 8:'linear',
133 | 9:'log100',
134 | 10:'log316',
135 | 11:'xvycc',
136 | 13:'srgb',
137 | 14:'2020_10',
138 | 15:'2020_12',
139 | 16:'st2084',
140 | 18:'std-b67'
141 | }
142 |
143 |
144 | MATRIX = {
145 | #matrix_in or matrix : matrix_in_s or matrix_s
146 | 0:'rgb',
147 | 1:'709',
148 | 2:'unspec',
149 | 3:'reserved',
150 | 4:'fcc',
151 | 5:'470bg',
152 | 6:'170m',
153 | 7:'240m',
154 | 8:'ycgco',
155 | 9:'2020ncl',
156 | 10:'2020cl' ,
157 | 12:'chromancl',
158 | 13:'chromacl',
159 | 14:'ictcp'
160 | }
161 |
162 |
163 | PRIMARIES = {
164 | #primaries_in or primaries : primaries_in_s or primaries_s
165 | 1 : '709' ,
166 | 2 : 'unspec' ,
167 | 4 : '470m' ,
168 | 5 : '470bg' ,
169 | 6 : '170m' ,
170 | 7 : '240m' ,
171 | 8 : 'film' ,
172 | 9 : '2020' ,
173 | 10 : 'st428' , #'xyz'
174 | 11 : 'st431-2',
175 | 12 : 'st432-1',
176 | 22 : 'jedec-p22'
177 | }
178 |
179 | PROPS = {
180 | '_ChromaLocation': {0:'left', 1:'center', 2:'topleft', 3:'top', 4:'bottomleft', 5:'bottom'},
181 | '_ColorRange': {0:'full range', 1:'limited range'},
182 | '_Matrix': MATRIX ,
183 | '_Primaries': PRIMARIES ,
184 | '_Transfer': TRANSFER ,
185 | '_FieldBased': {0:'progressive', 1:'bottom field first', 2:'top field first'},
186 | '_AbsoluteTime': {},
187 | '_DurationNum': {},
188 | '_DurationDen': {},
189 | '_Combed': {},
190 | '_Field': {0:'from bottom field, if frame was generated by SeparateFields',
191 | 1:'from top field, if frame was generated by SeparateFields'},
192 | '_PictType': {},
193 | '_SARNum': {},
194 | '_SARDen': {},
195 | '_SceneChangeNext': {0:'nope',1:'LAST FRAME of the current scene'},
196 | '_SceneChangePrev': {0:'nope',1:'FRAME STARTS a new scene'},
197 | '_Alpha': {}
198 | }
199 |
200 |
201 |
202 | ERROR_FILENAME_LOG = 'error_NO_output_window.txt'
203 |
204 | HOTKEYS_HELP ='''KEYBINDING:
205 | '1' to '9' or '0' to switch between clips to compare them if loading more clips.
206 | So there is max 10 clips. But technically,
207 | it could be more if keybinding is adjusted.
208 | MOUSE LEFT DOUBLECLICK zooms in 2x, centers on mouse position
209 | 'Z' zooms in 2x as well, again centers on mouse position
210 |
211 | MOUSE LEFT CLICK and MOVE initiates crop mode, selecting rectangle with mouse,
212 | confirm selection with ENTER KEY or doubleclicking within selection or with RIGHT MOUSE CLICK,
213 | while touching first point, it snaps to to a pixel following subsumpling (max from loaded clips),
214 | also while drawing a rectangle , it snaps to mods passed as argument, (recomended values 2,4,8 or 16)
215 | default mods: mod_x=2, mod_y=2
216 | 'R' RESETs preview to original
217 | ',' '<' step one frame back
218 | '.' '>' step one frame forward
219 | 'Home' go to first frame
220 | 'End' go to last frame
221 | Seeking is source plugin dependant so it could take a time to request a frame.
222 | 'Q' quit app, but if zoom or crop was applied, it just resets to original clip first
223 | 'Esc' preview goes back to previous zoom or crop
224 | 'I' prints YUV or RGB values for pixel under mouse pointer in preview window
225 | printing is in this format:
226 | clip number, pictur type, frame number ,absolute pixel coordinates, original clip YUV or RGB values
227 | or preview RGB values
228 | 'P' prints all available frame properties (_PictType, _Matrix, _Primaries ...etc.)
229 | http://www.vapoursynth.com/doc/apireference.html#reserved-frame-properties
230 | 'W' save PNG image, what you just preview and it will be saved on hardisk as 8bit PNG as 1:1, ingnoring zoom that you have on screen
231 | 'E' save PNG image, what you just preview and it will be saved on hardisk as 8bit PNG, it will be saved as you see on screen, respecting zoom, pixel blocks
232 | 'Spacebar' Play/Pause switch
233 | 'S' Slider on/off,
234 | Using slider - grab slider , move it to a frame.
235 | Seeking is video and vapoursynth source plugin dependant or its argument selection,
236 | you could experiance conciderable delay and freeze
237 | 'F' Fullscreen on/off switch
238 | 'H' help, prints this KEYBINDING text
239 |
240 | During cropping and just before confirming that crop,
241 | any selected object is defined by clicking on a 'corner' or 'line' or 'all' (clicking within selected rectangle)
242 | So selected rentagle could be manualy re-defined by moving that object up, down, lef or right one step
243 |
244 | 'Y' selected object to move step up
245 | 'N' selected object to move step down
246 | 'G' selected object to move step left
247 | 'J' selected object to move step right
248 | '''
249 |
250 |
251 |
252 | class Preview:
253 | '''
254 | --- previewing vapoursynth videonodes/clips by opencv(at least 3.4.1 version needed)
255 | --- comparing, swapping, previewing vapoursynth clips by pressing assigned keys on keyboard (1 - 9)
256 | --- clip switching is smooth, seamless during playback
257 | --- with slider or without
258 | --- printing , previewing pixel values for YUV, RGB or other vapoursynth formats, readings could be for example:
259 | clip 1: I Frame: 2345 Pixel: 411,129 CompatYUY2: y:171 u:104 v:160 RGB24: r:233 g:164 b:128
260 | --- printing all available properties for a frame (props)
261 | --- QUICK zooming/cropping, 2x, by mouse doubleclick, centered to mouse position
262 | --- or MANUAL cropping/zooming by drawing cropping area on screen, when all corners, lines could be adjusted
263 | or selected area could be moved/panned,
264 | --- all crops and zoom snap to mods and subsumpling for YUV clips (could be turned off: mod_x=1, mod_y=1, ignore_subsampling=True)
265 | --- using SHIFT key while selecting, cropping area is snapping to original clip's aspect ratio
266 | --- all crops and zoom show real core.std.CropAbs(), vapoursynth command, to obtain that crop, or even live feedback during selection
267 | --- returning back to previous zoom or crop (pressing 'Esc')
268 | --- writing PNG images to hardisk (what you see, gets saved (with blow-up pixels) or what you see 1:1),
269 | --- when writing PNG images during playback it writes subsequent PNG's (for gif creation or other purposes)
270 | '''
271 |
272 | def __init__(self, clips,
273 | frames=None, delay = None, img_dir=None, matrix_in_s=None, kernel='Point',
274 | mod_x=2, mod_y=2, ignore_subsampling=False,
275 | position = (60,60), preview_width = None, preview_height = None,
276 | output_window=False, fullscreen=False, play=False, slider=False):
277 |
278 | #setting output print first
279 | self.validate_boolean(dict(output_window=output_window))
280 |
281 | error_message = ''
282 | if output_window:
283 | try:
284 | import tkinter
285 | except ImportError:
286 | raise Exception("No standard library tkinter module in PATH\n output_window=True needs python's tkinter module to create output window")
287 | try:
288 | import output_window
289 | except ImportError:
290 | error_message = ( 'No module output_window.py in PATH\n'
291 | 'Using with Vapoursynth Editor:\n'
292 | 'you can put output_window.py into script\'s directory or add that directory to sys.path:\n'
293 | 'import sys\n'
294 | 'import os\n'
295 | 'sys.path.append(os.path.abspath("...your path to directory ..."))\n'
296 | )
297 | self.log('No module output_window.py in PATH\n')
298 | with open(ERROR_FILENAME_LOG, 'a') as info:
299 | info.write('[view]\n' + error_message )
300 |
301 |
302 | self.clips_orig = clips
303 | self.frames = frames
304 | self.delay = delay
305 | self.matrix_in_s = matrix_in_s
306 | self.kernel = kernel
307 | self.img_dir = img_dir
308 | self.modx = mod_x
309 | self.mody = mod_y
310 | self.position = position
311 | self.init_preview_width = preview_width
312 | self.init_preview_height = preview_height
313 | self.fullscreen = fullscreen
314 | self.play = play
315 | self.slider = slider
316 | self.ignore_subsampling = ignore_subsampling
317 | try:
318 | self.validate_clips()
319 | except ValueError as err:
320 | raise ValueError('[Preview]:', err)
321 |
322 | self.validate_frames()
323 | self.validate_delay()
324 | self.validate_img_dir()
325 | self.validate_matrix()
326 | self.validate_kernel()
327 | self.validate_position()
328 | self.validate_preview_dimensions()
329 | self.validate_boolean(dict(fullscreen=fullscreen, play=play, slider=slider, ignore_subsampling=ignore_subsampling))
330 |
331 |
332 | #limiting Vapoursynth cache if not enough RAM'''
333 | available = None
334 | available_RAM = self.freeRAM()
335 | vapoursynth_cache = core.max_cache_size
336 | self.log(f'Vapoursynth cache is set to: {vapoursynth_cache}MB')
337 | if available_RAM:
338 | self.log(f'free RAM: {available_RAM}MB')
339 | cache = self.limit_cache(vapoursynth_cache, available_RAM)
340 | if not cache == vapoursynth_cache:
341 | self.log(f'setting Vapoursynth cache to: {cache}MB\n')
342 | core.max_cache_size = cache
343 | else:
344 | self.log('\nWARNING, failed to get available free RAM,')
345 | self.log(' Vapoursynth cache was not limited if needed,')
346 | self.log(' RAM overrun or freeze possible\n')
347 |
348 |
349 | #converting clips to RGB clips for opencv preview
350 | self.rgbs = [] #currently previewing rgb clips
351 | self.rgbs_orig = [] #back ups of original rgb clips
352 | self.rgbs_error = [] #list of booleans, True if rgb had errors
353 | convert = Conversions()
354 | depth = 8 #openCV would scale 16bit int or 32bit float to 0-255 anyway
355 | sample_type = vs.INTEGER
356 |
357 | def error_clip(err):
358 | err_clip = core.std.BlankClip(self.clips_orig[i], format=vs.RGB24)
359 | err_clip = core.text.Text(err_clip, err)
360 | self.rgbs.append(err_clip)
361 | self.rgbs_error.append(True)
362 |
363 | for i, clip in enumerate(self.clips_orig):
364 | rgb, log = convert.toRGB(clip, matrix_in_s=self.matrix_in_s, depth=depth, kernel=self.kernel, sample_type = sample_type)
365 | log = 'clip {} to RGB for preview:\n'.format(i+1) + log
366 |
367 | try:
368 | rgb.get_frame(0)
369 | except vs.Error as err:
370 | log += '\n[toRGB]'+ str(err)
371 | error_clip(err)
372 | else:
373 | if isinstance(rgb, vs.VideoNode):
374 | self.rgbs.append(rgb)
375 | self.rgbs_error.append(False)
376 | else:
377 | err = '\n[toRGB] converted RGB is not vs.VideoNode'
378 | log += err
379 | error_clip(err)
380 | self.log(log)
381 |
382 | if self.rgbs:
383 | self.modx, self.mody, self.modx_subs, self.mody_subs = self.validate_mod(self.modx, self.mody)
384 | self.rgbs_orig = self.rgbs.copy()
385 | self.show()
386 |
387 | else:
388 | self.log('[Preview.__init__] no clips loaded ')
389 |
390 |
391 | def show(self):
392 | '''
393 | setting up show loop
394 | '''
395 | #self.log('\n[Preview.show]')
396 | #getting keymap and indexMap depending on OS
397 | OS = self.get_platform()
398 | self.windows_keymap = WINDOWS_KEYMAP
399 | self.linux_keymap = LINUX_KEYMAP
400 | self.darwin_keymap = DARWIN_KEYMAP
401 | KEYMAP = getattr(self, OS + '_keymap')
402 |
403 | #loop properties
404 | self.close = False # True will exit a show, app
405 | self.frame = self.frames[0] # starting frame
406 | self.i = 0 # self.i is current previewing clip index
407 | j = 0 # tracks clip changes, if it matches self.i
408 | if self.play: self.play = 1 # make bint from bool
409 | else: self.play = 0
410 | self.previewData_reset() #makes first stored crop data (width, height, left, top)
411 |
412 | self.width = self.rgbs_orig[self.i].width
413 | self.height = self.rgbs_orig[self.i].height
414 | self.left = 0
415 | self.top = 0
416 |
417 | #mouseAction() properties
418 | self.ix, self.iy = (-1 , -1) #assuming mouse off preview area so no readings yet
419 | self.tx, self.ty = (-10,-10) #first touchdown coordinates while drawing rectangle
420 | self.isCropping = False #initiates cropping
421 | self.drawing = False #True after left mouse button clicks so cropping drawing animation is activated
422 | self.panning = False #selected rectangle object for moving any direction - panning
423 | self.execute_crop = False #True executes selected cropping for all videos
424 | self.object = None #name of object, used in manual step correction using keys
425 | self.proximity = 10 #while clicking down it will pick up corner, line or selection with this pixel proximity in that order
426 | self.good_c = (0,255,0) #BGR color for selection lines if crop is ok in vapoursynth
427 | self.bad_c = (0,0,255) #lines turn into this BGR color if cropping would give error in vapoursynth
428 | self.color = self.good_c
429 | self.flash_color = (255,255,255) #flashing color after selecting an object (a line, lines, corner)
430 | self.x1 = None #left selection x coordinate and also a flag if there was a selection
431 |
432 | #opencv window
433 | text=''
434 | for i , rgb in enumerate(self.rgbs):
435 | text +='clip{} {} '.format(i+1, self.clips_orig[i].format.name)
436 | clip_KEYMAP = CLIP_KEYMAP[:len(self.rgbs)]
437 |
438 | self.title = 'VideoNodes: {}'.format(text)
439 | self.build_window(self.title, self.mouseAction)
440 | self.log('OpenCV version: ' + cv2.__version__)
441 | try:
442 | cv2.displayStatusBar(self.title, '')
443 | self.Qt = True
444 | except:
445 | self.print_info(' No Status Bar, This OpenCV was compiled without QT library')
446 | self.Qt = False
447 | self.placement = (self.position[0], self.position[1], self.init_preview_width, self.init_preview_height)
448 | if not self.fullscreen:
449 | cv2.resizeWindow(self.title, self.init_preview_width, self.init_preview_height)
450 | cv2.moveWindow(self.title, self.position[0],self.position[1])
451 | if self.slider:
452 | self.build_slider()
453 |
454 | #print clip resolutions if different in clips
455 | if not len(set(self.resolutions)) <= 1:
456 | self.log(f"Clips DO NOT HAVE THE SAME RESOLUTIONS, expect weird behaviour if cropping")
457 |
458 | #init print
459 | self.print_info(self.print_clip_name() +': {}'.format(self.i+1))
460 |
461 | if self.play: self.ref = timeit.default_timer() #starting time reference for timing frames
462 |
463 | '''
464 | main openCV playback loop
465 | '''
466 | while True:
467 |
468 | self.show_frame()
469 | if self.slider:
470 | cv2.setTrackbarPos('Frames', self.title, self.frame)
471 | key = cv2.waitKeyEx(self.play)
472 | #print(key)
473 | if key != -1: #if a key was pressed
474 | try:
475 | getattr(self, KEYMAP[key])() #execute functions for hotkeys
476 | except KeyError:
477 | try:
478 | self.i = clip_KEYMAP.index(key) #if key was pressed that suppose to change clips, change index for clips
479 | except ValueError:
480 | pass
481 | else:
482 | #print this only one time
483 | if self.i!= j:
484 | self.print_info(self.cropping_line_text(*self.previewData[-1]))
485 | j=self.i
486 |
487 | if self.close:
488 | break #exiting loop and app
489 |
490 | self.frame = self.update_frame(self.frame)
491 |
492 | if cv2.getWindowProperty(self.title, cv2.WND_PROP_VISIBLE) < 1: #canceling window clicking 'x'
493 | break
494 |
495 | cv2.destroyAllWindows()
496 |
497 | def update_frame(self, f):
498 | if self.play :
499 | f += 1
500 | if f >= self.frames[1]:
501 | self.play = 0
502 | f = self.frames[1]-1
503 | elif f < self.frames[0]:
504 | self.play = 0
505 | f = self.frames[0]
506 | return f
507 |
508 | def show_frame(self):
509 | '''
510 | Vapoursynth frame is converted to numpy arrays for opencv to show
511 | delay is handled here, not in cv2.waitKey() because timeit.default_timer() takes app&system time overhead into an account
512 | '''
513 |
514 | try:
515 | f = self.rgbs[self.i].get_frame(self.frame)
516 | except:
517 | f = self.error_frame()
518 | if isAPI4: self.img = np.dstack([np.array(f[p], copy=False) for p in [2,1,0]])
519 | else: self.img = np.dstack([np.array(f.get_read_array(p), copy=False) for p in [2,1,0]])
520 | if self.isCropping and self.x1 is not None:
521 | img = self.img_and_selection(self.img, (self.x1,self.y1,self.x2,self.y2),self.color)
522 | if self.play: self.delay_it()
523 | cv2.imshow(self.title, img)
524 | else:
525 | if self.play: self.delay_it()
526 | cv2.imshow(self.title, self.img)
527 |
528 | def error_frame(self):
529 | self.play = 0
530 | def log_err():
531 | err = '\n' + str(sys.exc_info()[0])+'\n'
532 | err += 'in line ' + str(sys.exc_info()[2].tb_lineno)+'\n'
533 | err += str(sys.exc_info()[1])+'\n'
534 | return err
535 | err = log_err()
536 | info = '\nclip: {} Frame: {} ,Frame could not be rendered for this clip'.format(self.i, self.frame)
537 | self.log(err+info)
538 | if self.Qt: self.print_statusBar(info)
539 | err_clip = core.std.BlankClip(self.clips_orig[self.i], format=vs.RGB24, length=1).text.Text(err+info)
540 | return err_clip.get_frame(0)
541 |
542 | def delay_it(self):
543 | while True:
544 | new = timeit.default_timer()
545 | if new >= self.ref + self.delay:
546 | self.ref = new
547 | break
548 |
549 | def get_platform(self):
550 | '''
551 | sys.platform gets 'Linux', 'Windows' or 'Darwin'
552 | retuns: 'linux', 'windows' or 'darwin'
553 | '''
554 | OS = None
555 | if sys.platform.startswith('linux'):
556 | OS = 'linux'
557 | elif sys.platform.startswith('win'):
558 | OS = 'windows'
559 | elif sys.platform == 'darwin':
560 | OS = 'darwin'
561 | else:
562 | try:
563 | OS = platform.system()
564 | except:
565 | OS = None
566 | if OS: return OS.lower()
567 | else: return None
568 |
569 | def execute_cropping(self):
570 | if self.execute_crop:
571 | self.crop_to_new(self.width, self.height, *self.get_absolute_offsets(self.x1, self.y1))
572 | self.isCropping = False
573 | self.execute_crop = False
574 |
575 | def pause_play(self):
576 | if self.play:
577 | self.play = 0
578 | else:
579 | self.play = 1
580 | self.ref = timeit.default_timer()
581 |
582 | def log(self, *args):
583 | '''
584 | if
585 | argument output_window = True
586 | and
587 | output_window modul is imported,
588 | then print is redirected to tkinter window
589 | '''
590 | text =''
591 | for item in args:
592 | try:
593 | text += str(item) + ' '
594 | except:
595 | pass
596 | print(text[:-1])
597 |
598 | def frame_props(self):
599 | self.log(f'\nclip{self.i+1} {self.clips_orig[self.i].format.name}, properties of frame {self.frame}:')
600 | self.log(self.get_frame_props(self.clips_orig[self.i], self.frame))
601 |
602 | def get_frame_props(self, clip, frame):
603 | '''
604 | prints all available frame properties (_PictType, _Matrix, _Primaries ...etc.)
605 | http://www.vapoursynth.com/doc/apireference.html#reserved-frame-properties
606 | '''
607 |
608 | info = []
609 | props_dict = dict(clip.get_frame(frame).props)
610 | for prop, prop_value in props_dict.items():
611 | if isinstance(prop_value, bytes):
612 | prop_value = prop_value.decode()
613 |
614 | elif isinstance(prop_value, vs.VideoFrame): #this is a wild guess for alpha, did not look into it yet
615 | prop_value = 'yes'
616 |
617 | info.append(' {: <25}{}'.format(prop, prop_value))
618 | try:
619 | info.append('={}'.format(PROPS[prop][prop_value]))
620 | except:
621 | pass
622 | info.append('\n')
623 | return ''.join(info)
624 |
625 |
626 | def mouseAction(self,event,x,y,flags,p=None):
627 | '''
628 | Mouse click initiates drawing of a cropping rectangle.
629 | While holding SHIFT, new rectangle snaps to aspect ration of a clip.
630 | Drawing of rectangle respects and snaps to original YUV clip's subsampling.
631 | (if not deactivated by: mod_x=1, mod_y=1, ignore_subsampling=True)
632 | Clicking outside of selected rentangle cancels cropping.
633 | (or initiates new cropping selection if mouse keeps moving)
634 | Double click inside of selected rentangle (or keyboard ENTER) confirms and performs crop.
635 | Clicking once inside of selected rentangle activates selection for moving,
636 | that could be done by mouse or just using keyboard ('g','y','j','n' - one step to left,top,right or down),
637 | to move it in smallest subsampling steps.
638 | Clicking on particular single object (corner or a line) also activates moving but only for that object.
639 |
640 | mouseAction needs globals so using Preview class attributes for that purpose to store values:
641 | ix, iy mouse position
642 | xa,ya first anchor point for drawing selection
643 | x1,x2,y1,y2 current selection points (rectangle)
644 | width, height width which is (x2-x1) and height (y2-y1) ,for current rectangle
645 | '''
646 | if event == cv2.EVENT_LBUTTONDOWN:
647 | self.useX = True
648 | self.useY = True
649 |
650 | if not self.isCropping:
651 | self.isCropping = True
652 | self.drawing = True
653 | self.execute_crop = False
654 | self.init_new_selection(x,y)
655 |
656 | elif self.isCropping:
657 | self.drawing = True
658 | self.object = self.select_object(x,y)
659 | if self.object == 'all': #whole selection selected, set for panning
660 | self.panning = True
661 | elif self.object is None: #none object selected, initiate new crop
662 | self.execute_crop = False
663 | self.x1 = None #for show_frame() to not show selection, cannot use isCropping switch, it could be True
664 | self.init_new_selection(x,y)
665 |
666 | elif event == cv2.EVENT_MOUSEMOVE:
667 | self.ix = x
668 | self.iy = y
669 |
670 | if self.isCropping and self.drawing and not self.panning:
671 | rectangle = self.new_rectangle(x,y,flags&cv2.EVENT_FLAG_SHIFTKEY)
672 | self.live_crop_info(rectangle)
673 | if not self.play:
674 | cv2.imshow(self.title, self.img_and_selection(self.img,rectangle,self.color))
675 |
676 | elif self.panning:
677 | rectangle = self.move_rectangle(x,y,flags&cv2.EVENT_FLAG_SHIFTKEY)
678 | self.live_crop_info(rectangle)
679 | if not self.play:
680 | cv2.imshow(self.title, self.img_and_selection(self.img,rectangle,self.color))
681 |
682 | elif event == cv2.EVENT_LBUTTONUP:
683 | self.panning = False
684 | self.drawing = False
685 | if self.tx == x and self.ty == y: #mouse touched screen but did not moved, no drawing happened, quit cropping
686 | self.isCropping = False
687 | self.print_info(self.cropping_line_text(*self.previewData[-1]))
688 | self.show_frame()
689 | self.ix = x
690 | self.iy = y
691 |
692 | elif self.isCropping: #rectangle is selected, relevant atributes ready for crop: self.x1,self.y1,self.x2,self.y2
693 | self.execute_crop = True #but self.isCropping is still True because cropping can be modified
694 | #self.isCropping becomes False only after user executes cropping (key Enter or dbl click or right mouse click)
695 | elif event == cv2.EVENT_LBUTTONDBLCLK:
696 | if self.isCropping: #doubleclick into selected area would make crop
697 | self.execute_crop = False
698 | self.isCropping = False
699 | self.crop_to_new(self.width, self.height, *self.get_absolute_offsets(self.x1, self.y1))
700 | self.show_frame()
701 | self.ix = x
702 | self.iy = y
703 | else: #doubleclick outside of selected area or if there is no selection
704 | self.quick_2x_zoom_in(x,y) #quick 2x zoom, centered to mouse position x,y
705 | #self.show_frame() #cv2.EVENT_LBUTTONUP renders frame because self.tx == x and self.ty == y
706 |
707 | elif event == cv2.EVENT_RBUTTONDOWN: #if rectangle is drawn for crop, right doubleclick executes crop
708 | if self.execute_crop:
709 | self.execute_crop = False
710 | self.isCropping = False
711 | self.crop_to_new(self.width, self.height, *self.get_absolute_offsets(self.x1, self.y1))
712 | self.show_frame()
713 | self.ix = x
714 | self.iy = y
715 |
716 | def init_new_selection(self, x,y):
717 | '''initiate drawing of selection area by creating first anchor point and other properties'''
718 | self.tx = x
719 | self.ty = y
720 | #self.play = 0 #stopping playback while starting to crop
721 | self.w = self.rgbs[self.i].width
722 | self.h = self.rgbs[self.i].height
723 | self.origw = self.rgbs_orig[self.i].width
724 | self.origh = self.rgbs_orig[self.i].height
725 | self.xa = x - x % self.modx_subs #snapping to correct subsumpling column
726 | self.ya = y - y % self.mody_subs #snapping to correct subsumpling line
727 |
728 |
729 | def new_rectangle(self,x,y, flags=0):
730 | '''
731 | draw a rectangle selection x1,y1; x2,y2 , snapping to video resolution mods and subsampling mods
732 | keep selection within rgb's dimensions w,h
733 | if SHIFT is pressed keep clips aspect ratio (and of course keep mods as well!)
734 | also xa,ya is always needed, that is anchor point for selection, first point of selection
735 | '''
736 |
737 | if self.useX:
738 | if x>=self.xa:
739 | x1 = self.xa
740 | x2 = min(x, self.w)
741 | else:
742 | x1 = max(x, 0)
743 | x2 = self.xa
744 | w = x2 - x1
745 | w = w - w % self.modx
746 | else:
747 | x1 = self.x1
748 | x2 = self.x2
749 |
750 | if self.useY:
751 | if y>=self.ya:
752 | y1 = self.ya
753 | y2 = min(y, self.h)
754 | else:
755 | y1 = max(y, 0)
756 | y2 = self.ya
757 | h = y2 - y1
758 | h = h - h % self.mody
759 | else:
760 | y1 = self.y1
761 | y2 = self.y2
762 |
763 | if flags == 16:
764 | '''SHIFT key is pressed, snap rectangle into aspect ratio '''
765 | t_w = w
766 | ar = self.origh/self.origw
767 | while t_w > 0:
768 | t_h = t_w*ar
769 | if t_h.is_integer() and t_h % self.mody == 0 and t_h + y1 <= self.h and t_h <= y2:
770 | h = int(t_h)
771 | w = t_w
772 | break
773 | t_w -= self.modx
774 | if t_w <= 0: w = 0
775 |
776 | #final correction
777 | if self.useX:
778 | if x>=self.xa: x2 = x1 + w
779 | else: x1 = x2 - w
780 | self.width = w
781 | if self.useY:
782 | if y>=self.ya: y2 = y1 + h
783 | else: y1 = y2 - h
784 | self.height = h
785 |
786 | self.x1 = self.left = x1
787 | self.y1 = self.top = y1
788 | self.x2 = x2
789 | self.y2 = y2
790 |
791 | return (x1,y1,x2,y2)
792 |
793 |
794 | def move_rectangle(self,x,y, flags=0):
795 | '''
796 | move object 'all' (all lines ergo all selected area)
797 | which technically is making new rectangle x1,y1; x2,y2 but same width and height and snapping to subsampling mods,
798 | keep selection within rgb's dimensions w,h
799 | if SHIFT key is pressed while moving, slide selection horizontaly or verticaly only,
800 | dx (x - self.x1) and dy (y - self.y1) are introduced to imitate mouse always dragging x1,y1 to not have weird delays if dragging off screen
801 | '''
802 | x1 = x - self.dx
803 | y1 = y - self.dy
804 | x1 = max((x1 - x1 % self.modx_subs), 0)
805 | if x1 + self.width > self.w:
806 | x1 = self.w-self.width
807 |
808 | y1 = max((y1 - y1 % self.mody_subs), 0)
809 | if y1 + self.height > self.h:
810 | y1 = self.h-self.height
811 | if flags == 16:
812 | '''SHIFT key is pressed'''
813 | if abs(x1-self.xa) > abs(y1-self.ya):
814 | y1 = self.ya #anchor/freeze y
815 | else:
816 | x1 = self.xa #anchor/freeze x
817 | x2 = x1+self.width
818 | y2 = y1+self.height
819 | self.x1 = self.left = x1
820 | self.y1 = self.top = y1
821 | self.x2 = x2
822 | self.y2 = y2
823 | return (x1,y1,x2,y2)
824 |
825 | def select_object(self, x,y):
826 | '''
827 | locate object with mouse click on x,y coordinates, it could be: a corner, line or middle of selection(object 'all')
828 | set that object for drawing/moving and flash particular selected object,
829 | priority for selections: corner, then line, then selected area(object 'all')
830 | return found name of object
831 | '''
832 | p = max(self.proximity,1) #proximity to "see" an object from that pixel distance when clicking
833 | f = 2 #flashing line proximity in pixels
834 | c = 5 #flashing corner border proximity in pixels
835 | x1 = self.x1
836 | y1 = self.y1
837 | x2 = self.x2
838 | y2 = self.y2
839 | r = (x1,y1,x2,y2)
840 | #self.play = 0 #stopping playback while modifying crop
841 |
842 | if x > x1-p and x < x1+p:
843 | if y > y1-p and y < y1+p:
844 | self.set_object_left_top_corner()
845 | self.flash_object(r,[((x1-c,y1-c),(x1+c, y1+c))])
846 | return 'left_top_corner'
847 | elif y > y2-p-1 and y < y2+p-1:
848 | self.set_object_left_bottom_corner()
849 | self.flash_object(r,[((x1-c,y2-c-1),(x1+c, y2+c-1))])
850 | return 'left_bottom_corner'
851 | else:
852 | self.set_object_left_line()
853 | self.flash_object(r,[((x1-f,0),(x1+f, self.h))])
854 | return 'left_line'
855 | elif x > x2-p-1 and x < x2+p-1:
856 | if y > y1-p-1 and y < y1+p-1:
857 | self.set_object_right_top_corner()
858 | self.flash_object(r,[((x2-c-1,y1-c),(x2+c-1, y1+c))])
859 | return 'right_top_corner'
860 | elif y > y2-p-1 and y < y2+p-1:
861 | self.set_object_right_bottom_corner()
862 | self.flash_object(r,[((x2-c-1,y2-c-1),(x2+c-1, y2+c-1))])
863 | return 'right_bottom_corner'
864 | else:
865 | self.set_object_right_line()
866 | self.flash_object(r,[((x2-f-1,0),(x2+f-1,self.h))])
867 | return 'right_line'
868 | elif y > y1-p and y < y1+p:
869 | self.set_object_top_line()
870 | self.flash_object(r,[((0,y1-f),(self.w, y1+f))])
871 | return 'top_line'
872 | elif y > y2-p-1 and y < y2+p-1:
873 | self.set_object_bottom_line()
874 | self.flash_object(r,[((0,y2-f-1),(self.w, y2+f-1))])
875 | return 'bottom_line'
876 | elif x > x1 and x < x2 and y > y1 and y < y2:
877 | self.set_object_all(x, y)
878 | self.flash_object(r,[((x1-f,y1-f),(x2+f-1, y2+f-1)),((x1+f,y1+f),(x2-f-1, y2-f-1))])
879 | return 'all'
880 | else:
881 | return None
882 |
883 |
884 | def set_object_left_top_corner(self,*_):
885 | self.xa, self.ya = self.x2, self.y2
886 |
887 | def set_object_left_bottom_corner(self,*_):
888 | self.xa, self.ya = self.x2, self.y1
889 |
890 | def set_object_left_line(self,*_):
891 | self.xa, self.ya = self.x2, self.y1
892 | self.useY = False
893 |
894 | def set_object_right_top_corner(self,*_):
895 | self.xa, self.ya = self.x1, self.y2
896 |
897 | def set_object_right_bottom_corner(self,*_):
898 | self.xa, self.ya = self.x1, self.y1
899 |
900 | def set_object_right_line(self,*_):
901 | self.xa, self.ya = self.x1, self.y1
902 | self.useY = False
903 |
904 | def set_object_top_line(self,*_):
905 | self.xa, self.ya = self.x2, self.y2
906 | self.useX = False
907 |
908 | def set_object_bottom_line(self,*_):
909 | self.xa, self.ya = self.x1, self.y1
910 | self.useX = False
911 |
912 | def set_object_all(self, x, y):
913 | self.xa, self.ya = self.x1,self.y1
914 | self.dx = x - self.x1
915 | self.dy = y - self.y1
916 |
917 | '''
918 | object_step_up()down, left and right gets a step value, smallest by resolution mods and subsumpling mods,
919 | this is for a keyboard stepping only,it never gets here if using mouse
920 | '''
921 | def object_step_up(self):
922 | if self.object == 'all': self.move_object(0, -self.mody_subs)
923 | else: self.move_object(0, -max(self.mody, self.mody_subs))
924 |
925 | def object_step_down(self):
926 | if self.object == 'all': self.move_object(0, self.mody_subs)
927 | else: self.move_object(0, max(self.mody, self.mody_subs))
928 |
929 | def object_step_left(self):
930 | if self.object == 'all': self.move_object(-self.modx_subs, 0)
931 | else: self.move_object(-max(self.modx, self.modx_subs), 0)
932 |
933 | def object_step_right(self):
934 | if self.object == 'all': self.move_object(self.modx_subs, 0)
935 | else: self.move_object(max(self.modx, self.modx_subs), 0)
936 |
937 |
938 | def move_object(self,x,y):
939 | '''
940 | move_object() is for keyboard stepping to simulate mouse movement,
941 | it never gets here if using mouse,
942 | x,y is step increment for moving
943 | '''
944 | if self.object is None: return
945 |
946 | if self.object == 'all':
947 | new_position = (self.x1+x, self.y1+y)
948 | self.set_object_all(self.x1, self.y1)
949 | elif self.object == 'top_line': new_position = (self.x1+x, self.y1+y)
950 | elif self.object == 'bottom_line': new_position = (self.x2+x, self.y2+y)
951 | elif self.object == 'right_line': new_position = (self.x2+x, self.y2+y)
952 | elif self.object == 'right_bottom_corner': new_position = (self.x2+x, self.y2+y)
953 | elif self.object == 'right_top_corner': new_position = (self.x2+x, self.y1+y)
954 | elif self.object == 'left_line': new_position = (self.x1+x, self.y1+y)
955 | elif self.object == 'left_bottom_corner': new_position = (self.x1+x, self.y2+y)
956 | elif self.object == 'left_top_corner': new_position = (self.x1+x, self.y1+y)
957 |
958 | if self.object == 'all': rectangle = self.move_rectangle(*new_position)
959 | else: rectangle = self.new_rectangle(*new_position)
960 | self.live_crop_info(rectangle)
961 | if not self.play:
962 | cv2.imshow(self.title, self.img_and_selection(self.img, rectangle, self.color))
963 | getattr(self, f'set_object_{self.object}')(self.x1,self.y1) #set object for another move if there is
964 |
965 |
966 | def flash_object(self, r, flash_rectangles):
967 | img = self.img_and_selection(self.img, r, self.color)
968 | for tuple_pair in flash_rectangles:
969 | cv2.rectangle(img, *tuple_pair, self.flash_color, 1, cv2.LINE_AA)
970 | cv2.imshow(self.title, img)
971 |
972 | def img_and_selection(self, img, r, c):
973 | x1,y1,x2,y2 = r
974 | final = cv2.bitwise_not(img)
975 | #crop = cv2.UMat(img, [y1, y2], [x1, x2]) #to do accelerating
976 | final[y1:y2, x1:x2] = img[y1:y2, x1:x2]
977 | cv2.line(final, (x1, 0), (x1, self.h), c, 1, cv2.LINE_AA)
978 | cv2.line(final, (0, y1), (self.w, y1), c, 1, cv2.LINE_AA)
979 | cv2.line(final, (max(x1,x2-1), 0), (max(x1,x2-1), self.h), c, 1, cv2.LINE_AA)
980 | cv2.line(final, (0, max(y1,y2-1)), (self.w, max(y1,y2-1)), c, 1, cv2.LINE_AA)
981 | return final
982 |
983 | def live_crop_info(self, r):
984 | x1,y1,x2,y2 = r
985 | self.print_info(self.cropping_line_text(x2-x1,y2-y1,*self.get_absolute_offsets(x1,y1)))
986 | #### cv2.putText(dist, info, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 4) #fontScale=0.4
987 |
988 |
989 | def trackbar_change(self, pos):
990 | self.frame = int(pos)
991 | if self.play == 0:
992 | self.show_frame()
993 |
994 | def reset_preview(self):
995 | self.reset_preview()
996 |
997 | def build_window(self, title, mouseAction):
998 | if self.fullscreen:
999 | self.set_window_fullscreen(title)
1000 | else:
1001 | self.set_window_normal(title)
1002 | cv2.setMouseCallback(title, mouseAction)
1003 |
1004 |
1005 | def set_window_normal(self, title):
1006 | try:
1007 | cv2.namedWindow(title, cv2.WINDOW_NORMAL) #strict 1:1 cv2.WINDOW_AUTOSIZE | cv2.WINDOW_KEEPRATIO or flags=cv2.WINDOW_GUI_NORMAL
1008 | cv2.setWindowProperty(title,cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)
1009 | except:
1010 | self.print_info(self.print_clip_name() +': {} errors in normal screen resizing'.format(self.i+1))
1011 |
1012 | def set_window_fullscreen(self, title):
1013 | try:
1014 | cv2.namedWindow(title, cv2.WINDOW_KEEPRATIO) #cv2.WND_PROP_FULLSCREEN a bez aspect ratio radku
1015 | cv2.setWindowProperty(title, cv2.WND_PROP_ASPECT_RATIO,cv2.WINDOW_KEEPRATIO)
1016 | cv2.setWindowProperty(title, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
1017 | except:
1018 | self.print_info(self.print_clip_name() +': {} errors in full screen resizing'.format(self.i+1))
1019 |
1020 | def slider_switch(self):
1021 | if self.slider:
1022 | cv2.destroyWindow(self.title) #have to re-build window to get rid of slider
1023 | self.build_window(self.title, self.mouseAction)
1024 | else:
1025 | self.build_slider()
1026 | self.slider = not self.slider
1027 |
1028 | def fullscreen_switch(self):
1029 | if self.fullscreen:
1030 | #set normal screen
1031 | self.redraw_normal_screen(reset=False)
1032 | cv2.resizeWindow(self.title, self.placement[2], self.placement[3])
1033 | cv2.moveWindow(self.title, self.placement[0], self.placement[1])
1034 | #move correction, (cv2.moveWindow and cv2.getWindowImageRect() are off from each other)
1035 | x,y,_,_ = cv2.getWindowImageRect(self.title)
1036 | cv2.moveWindow(self.title, 2*self.placement[0] - x, 2*self.placement[1] - y)
1037 | else:
1038 | #storing normal window position before going fullscreen
1039 | self.placement = cv2.getWindowImageRect(self.title)
1040 | self.redraw_fullscreen()
1041 |
1042 | def redraw_normal_screen(self, reset):
1043 | self.fullscreen = False
1044 | if self.slider:
1045 | cv2.destroyWindow(self.title) #slider cannot be adjusted, so build all again
1046 | self.build_window(self.title, self.mouseAction)
1047 | if not reset:
1048 | cv2.resizeWindow(self.title, self.placement[2], self.placement[3])
1049 | cv2.moveWindow(self.title, self.placement[0], self.placement[1])
1050 | else:
1051 | cv2.resizeWindow(self.title, self.init_preview_width, self.init_preview_height )
1052 | if not self.init_window_move():
1053 | cv2.moveWindow(self.title, self.placement[0], self.placement[1])
1054 | self.build_slider()
1055 | else:
1056 | self.set_window_normal(self.title)
1057 | if reset:
1058 | cv2.resizeWindow(self.title, self.init_preview_width, self.init_preview_height )
1059 | self.init_window_move()
1060 |
1061 | #img = cv2.resize(img, (w, h), fx=1.0 / tilt, fy=1.0, interpolation=cv2.INTER_NEAREST)
1062 |
1063 | def init_window_move(self):
1064 | x,y,_,_ = cv2.getWindowImageRect(self.title)
1065 | if x < 0: #if left, top corner is off monitor , move window
1066 | cv2.moveWindow(self.title, self.position[0], y)
1067 | return True
1068 | return False
1069 |
1070 | def redraw_fullscreen(self):
1071 | self.fullscreen = True
1072 | if self.slider:
1073 | cv2.destroyWindow(self.title)
1074 | self.build_window(self.title, self.mouseAction)
1075 | self.build_slider()
1076 | else:
1077 | self.set_window_fullscreen(self.title)
1078 |
1079 | def build_slider(self):
1080 | cv2.createTrackbar('Frames', self.title, self.frame, self.frames[1]-1, self.trackbar_change)
1081 |
1082 | def print_statusBar(self,info):
1083 | cv2.displayStatusBar(self.title, info)
1084 |
1085 | def print_info(self, info):
1086 | try:
1087 | if self.Qt:
1088 | cv2.displayStatusBar(self.title, info)
1089 | except:
1090 | pass
1091 | self.log(info)
1092 |
1093 | def zoom_out(self):
1094 | self.crop_to_previous()
1095 |
1096 | def quick_2x_zoom_in(self, *mouse_position):
1097 | '''
1098 | quick zooms-in 2x, using mouse position as a center for zooming,
1099 | basically it is just cropping getting half width and heinght and drawing it into the same window,
1100 | it saves RAM and preview is faster
1101 | '''
1102 | try:
1103 | x,y = mouse_position
1104 | except Exception:
1105 | x,y = (self.ix,self.iy)
1106 |
1107 | #self.play = 0 #stopping preview
1108 |
1109 | current_w = self.rgbs[self.i].width
1110 | current_h = self.rgbs[self.i].height
1111 |
1112 | if x == -1:
1113 | x,y = (current_w/2,current_h/2)
1114 |
1115 | #new width and height for cropping (half whatever window has now) and respecting mod
1116 | w = int(current_w/2)
1117 | w = w - w % self.modx
1118 | h = int(current_h/2)
1119 | h = h - h % self.mody
1120 |
1121 | #x1 and y1 are left top corner coordinates for crop
1122 | x = max(0, x - int(w/2) )
1123 | y = max(0, y - int(h/2) )
1124 | x1 = x - x % self.modx_subs
1125 | y1 = y - y % self.mody_subs
1126 |
1127 | #right bottom corner for crop, it is never used, it is just used to recalculate x1,y1
1128 | #if that corner would end up off screen
1129 | x2 = min(current_w, x1 + w)
1130 | y2 = min(current_h, y1 + h)
1131 | if x2 == current_w: x1 = current_w - w
1132 | if y2 == current_h: y1 = current_h - h
1133 |
1134 | #absolute values for crop, class atributes
1135 | self.x1, self.y1 = x1, y1
1136 | self.x2 = self.x1 + w
1137 | self.y2 = self.y1 + h
1138 | self.width = w
1139 | self.height = h
1140 | self.left, self.top = self.get_absolute_offsets(self.x1, self.y1)
1141 |
1142 | #cropping
1143 | self.crop_to_new(self.width, self.height, self.left, self.top)
1144 |
1145 |
1146 | def pixel_info(self):
1147 | '''
1148 | using self.ix and self.iy as current mouse coordinates from mouseAction()
1149 |
1150 | prints original clip's:
1151 | clip index, pic. type, frame number, absolute pixel coordinates, original clip pixel values, and RGB preview pixel values
1152 | '''
1153 | if self.ix == -1:
1154 | self.print_info(self.print_clip_name() +': {} mouse is off preview area'.format(self.i+1))
1155 | return
1156 | x = self.ix
1157 | y = self.iy
1158 | xa, ya = self.get_absolute_offsets(x, y) #absolute pixel coordinates
1159 |
1160 | #original clip pixel values
1161 | clip = self.clips_orig[self.i]
1162 | p0, p1, p2 = self.get_pixel_values(clip, self.frame, xa,ya)
1163 |
1164 | pt=''
1165 | try:
1166 | pt = f'{clip.get_frame(self.frame).props["_PictType"].decode()}'
1167 | except:
1168 | pass
1169 | info = []
1170 | info.append(f'clip{self.i+1}: {pt} Frame:{self.frame} Pixel: {xa},{ya} {clip.format.name} ')
1171 | cf = clip.format.color_family
1172 | if cf == vs.YUV or clip.format.name == 'CompatYUY2':
1173 | info.append(f'y:{p0} u:{p1} v:{p2}')
1174 |
1175 | elif cf == vs.RGB or clip.format.name == 'CompatBGR32':
1176 | info.append(f'r:{p0} g:{p1} b:{p2}')
1177 |
1178 | elif cf == vs.GRAY:
1179 | info.append(f'y:{p0}')
1180 |
1181 | elif cf == vs.YCOCG:
1182 | info.append(f'y:{p0} Co:{p1} Cg:{p2}')
1183 |
1184 | else:
1185 | info.append(f'could not read source format, values: {p0}, {p1}, {p2}')
1186 |
1187 | #preview clip values
1188 | info.append(' preview: r:{2} g:{1} b:{0}'.format(*self.img[y][x])) #tuple is returned from numpy array: (B,G,R)
1189 | info = ''.join(info)
1190 | self.log(info)
1191 | if self.Qt:
1192 | self.print_statusBar(info)
1193 |
1194 | def get_pixel_values(self, clip, frame, x,y):
1195 | '''
1196 | returns pixel values for pixel,
1197 | p0,p1,p2 could be Y,U,V or R,G,B values
1198 | '''
1199 | try:
1200 | fr = clip.get_frame(frame)
1201 | planes =[fr.get_read_array(i) for i in range(clip.format.num_planes)]
1202 | except:
1203 | pass
1204 |
1205 | if clip.format.name == 'CompatYUY2': #Interleaved COMPATYUY2, two pixels share U and V
1206 | try: #values seem to be in 2byte packs: YU,YV, ....
1207 | pack = planes[0][y][x]
1208 | p0 = pack & 0xFF
1209 | p1 = (pack >> 8) & 0xFF
1210 | if x % 2 == 0: #got YU pack
1211 | pack = planes[0][y][x+1]
1212 | p2 = (pack >> 8) & 0xFF
1213 | else: #got YV pack
1214 | p2 = p1
1215 | pack = planes[0][y][x-1]
1216 | p1 = (pack >> 8) & 0xFF
1217 | except:
1218 | p0, p1, p2 = ('x','x','x')
1219 |
1220 | elif clip.format.name == 'CompatBGR32': #Interleaved COMPATBGR32, 1 pixel = BGRA = 4byte pack
1221 | try:
1222 | pack = planes[0][clip.height-1 - y][x] #COMPATBGR32 is vertically flipped
1223 | p2 = pack & 0xFF
1224 | p1 = (pack >> 8) & 0xFF
1225 | p0 = (pack >> 16) & 0xFF
1226 | #pA = (pack >> 24) & 0xFF #alpha in CompatBGR32, did not went into this yet, if vs can store it in there or how
1227 | except:
1228 | p0, p1, p2 = ('x','x','x')
1229 |
1230 | else: #Planar videos
1231 | try: p0 = planes[0][y][x]
1232 | except: p0 = 'x'
1233 | ys = y >> clip.format.subsampling_h #chroma planes are reduced if subsampling
1234 | xs = x >> clip.format.subsampling_w
1235 | try: p1 = planes[1][ys,xs]
1236 | except: p1 = 'x'
1237 | try: p2 = planes[2][ys,xs]
1238 | except: p2 = 'x'
1239 |
1240 | return p0,p1,p2
1241 |
1242 |
1243 | def left_arrow(self):
1244 | self.frame -= 1
1245 |
1246 | def end(self):
1247 | self.frame = self.frames[1]-1
1248 |
1249 | def right_arrow(self):
1250 | self.frame += 1
1251 |
1252 |
1253 | def home(self):
1254 | self.frame = 0
1255 |
1256 | def write_image(self):
1257 | #if self.play: return #no writing during playback
1258 | if self.img_dir and not os.path.isdir(self.img_dir):
1259 | self.log('not a valid path: ', self.img_dir)
1260 | return
1261 | img_path = os.path.join(self.img_dir, self.print_clip_name() +'_{:02}_{}_frame_{:07}.png'.format(self.i+1,self.previewData[-1],self.frame))
1262 | '''
1263 | self.img is numpy data and up-scaled by openCV to fit whatever window,
1264 | it needs to be upscaled for real writing otherwise print would be just real clip resolution (1:1, smaller).
1265 | '''
1266 | _,_,w,h = cv2.getWindowImageRect(self.title)
1267 | resized = cv2.resize(self.img, (w, h), interpolation = cv2.INTER_NEAREST)
1268 | cv2.imwrite(img_path, resized)
1269 | self.print_info(self.print_clip_name() +': {} writing image: {}'.format(self.i,img_path))
1270 |
1271 | ''' cv2.resize interpolation:
1272 | INTER_NEAREST - a nearest-neighbor interpolation
1273 | INTER_LINEAR - a bilinear interpolation (used by default)
1274 | INTER_AREA - resampling using pixel area relation. It may be a preferred method for image decimation, as it gives moire’-free results.
1275 | But when the image is zoomed, it is similar to the INTER_NEAREST method.
1276 | INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood
1277 | INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood
1278 | '''
1279 |
1280 | def write_image_1_to_1(self):
1281 | #if self.play: return #no writing during playback
1282 | if self.img_dir and not os.path.isdir(self.img_dir):
1283 | self.log('not a valid path: ', self.img_dir)
1284 | return
1285 | img_path = os.path.join(self.img_dir, self.print_clip_name() +'_{:02}__1;1_{}_frame_{:07}.png'.format(self.i+1,self.previewData[-1],self.frame))
1286 | cv2.imwrite(img_path, self.img)
1287 | self.print_info(self.print_clip_name() +': {} writing image: {}'.format(self.i,img_path))
1288 |
1289 | def help(self):
1290 | self.log(HOTKEYS_HELP)
1291 |
1292 |
1293 | def closing(self):
1294 | self.close = True
1295 |
1296 |
1297 | def validate_clips(self):
1298 | '''
1299 | clips must be single vs.VideoNode or list of vs.VideoNodes
1300 | '''
1301 | self.resolutions = []
1302 | if isinstance(self.clips_orig, vs.VideoNode):
1303 | self.clips_orig = [self.clips_orig]
1304 | elif isinstance(self.clips_orig, list):
1305 | for clip in self.clips_orig:
1306 | if not isinstance(clip, vs.VideoNode):
1307 | raise ValueError("[Preview] Input needs to be a clip or list of clips!")
1308 | self.resolutions.append((clip.width, clip.height))
1309 | else:
1310 | raise ValueError("[Preview] Input needs to be a clip or list of clips")
1311 | if len(self.clips_orig)>9:
1312 | self.log("more than 9 clips")
1313 |
1314 |
1315 |
1316 | def validate_frames(self):
1317 | '''
1318 | default is the whole clip length, if not specified
1319 | '''
1320 | if isinstance(self.frames, vs.VideoNode): #clip loaded as frames argument
1321 | raise TypeError("[Preview] clip arguments must be enclosed in a list, example: Preview([clip1,clip2])")
1322 |
1323 | if self.frames is None:
1324 | self.frames = [0,len(self.clips_orig[0])]
1325 | try:
1326 | s = self.frames[0]
1327 | e = self.frames[1]-1
1328 | self.clips_orig[0][s]
1329 | self.clips_orig[0][e]
1330 | except:
1331 | self.log("wrong 'frames', must be a list of two integers within clip's range")
1332 | self.log("defaulting to frames = [0,{}]".format(len(self.clips_orig[0])))
1333 | self.frames = [0,len(self.clips_orig[0])]
1334 |
1335 |
1336 | def validate_delay(self):
1337 | if self.delay is None:
1338 | self.delay = self.clips_orig[0].fps_den/self.clips_orig[0].fps_num
1339 |
1340 | else:
1341 | try:
1342 | self.delay = self.delay/1000
1343 | except:
1344 | raise TypeError(f"[Preview] wrong delay: {self.delay} , it has to be an integer, miliseconds between frames")
1345 | self.delay = abs(self.delay)
1346 |
1347 |
1348 | def validate_img_dir(self):
1349 | if self.img_dir is None:
1350 | self.img_dir = os.path.dirname(os.path.abspath(__file__))
1351 |
1352 | if self.img_dir and not os.path.isdir(self.img_dir):
1353 | raise ValueError('[Preview] not a valid directory path: ', self.img_dir)
1354 |
1355 | def validate_matrix(self):
1356 | '''
1357 | Has to be same matrix_in_s string value as used in Vapoursynths scripts.
1358 | If more clips are loaded, this would override all of them,
1359 | so you'd rather specify matrix_in_s in Vapoursynth script, not here, if color space is not the same for clips.
1360 | '''
1361 | if self.matrix_in_s and not self.matrix_in_s in Conversions.MATRIX_USABLE.values():
1362 | self.log(f"\nWarning, matrix_in_s argument: '{self.matrix_in_s}', is not usable")
1363 | self.log("usable values:\n{}\n".format([x for x in Conversions.MATRIX_USABLE.values()]))
1364 | self.matrix_in_s = None
1365 |
1366 |
1367 | def validate_kernel(self):
1368 | try:
1369 | getattr(vs.core.resize, self.kernel)
1370 | except:
1371 | raise ValueError(f"[Preview] wrong kernel argument: '{self.kernel}', valid resize arguments:\n'Point','Bicubic','Lanczos','Spline16','Spline36'")
1372 |
1373 |
1374 | def validate_position(self):
1375 | '''
1376 | left, top corner of Preview window
1377 | needs to be tuple or list with two integers
1378 | '''
1379 | if isinstance(self.position, tuple) or isinstance(self.position, list):
1380 | if not isinstance(self.position[0], int) or not isinstance(self.position[1], int):
1381 | raise ValueError('[Preview] position argument for Preview window must be tuple with two integers, example: position=(60,60)')
1382 |
1383 | else:
1384 | raise ValueError("[Preview] 'position' argument for Preview window must be tuple, example: position=(60,60)")
1385 |
1386 |
1387 | def validate_preview_dimensions(self):
1388 | '''
1389 | preview window dimensions in pixels on screen, not video resolution,
1390 | clips do not get resized
1391 | if None, self.clips_orig[0].width and self.clips_orig[0].height are used, real first clip width and height
1392 | '''
1393 | for string in ['width', 'height']:
1394 | var = getattr(self, 'init_preview_' + string)
1395 | if var is None:
1396 | setattr(self, 'init_preview_' + string, getattr(self.clips_orig[0], string))
1397 | else:
1398 | if not (isinstance(var, int) and var > 1 and var <=20000):
1399 | raise ValueError(f"[Preview] 'preview_{string}' argument must be positive integer and less than 20000")
1400 |
1401 |
1402 | def validate_mod(self, modx, mody):
1403 | '''
1404 | if mods are not specified, defaults are: modx=2 and mody=2
1405 | then subsumpling is being checked and mod could be adjusted because
1406 | this must be True, otherwise crop in Vapoursynth might fail with real YUV clips:
1407 | modx >= (1 << clip.format.subsampling_w)
1408 | mody >= (1 << clip.format.subsampling_y)
1409 |
1410 | and get maximum subsumplings for all clips, mod_x_subs, mod_y_subs
1411 | because first point for crop must follow subsampling, not modx or mody
1412 |
1413 | maximum values of subsumpling and mods or are used for crop, unless, ignore_subsumpling=True, modx=1, mod_y=1 for example,
1414 | this would crop to any size and ignoring subsumpling. If as a result crop would fail in real Vapoursynth,
1415 | it gives a message while printing crop line though: #fails with original YUV!
1416 | '''
1417 |
1418 | self.log('[Preview.validate_mod]')
1419 | if not isinstance(modx, int):
1420 | raise TypeError(f"[Preview] wrong 'mod_x' argument: {mod_x}, it has to be an integer, usual values are 2,4,8 or 16")
1421 | if not isinstance(mody, int):
1422 | raise TypeError(f"[Preview] wrong 'mod_y' argument: {mod_y}, it has to be an integer, usual values are 2,4,8 or 16")
1423 | modx=abs(modx)
1424 | mody=abs(mody)
1425 | old_modx = modx
1426 | old_mody = mody
1427 | s_w_max = 1
1428 | s_h_max = 1
1429 | modx_subs, mody_subs = (1,1)
1430 | self.log(f'video resolution mod_x={modx}')
1431 | self.log(f'video resolution mod_y={mody}')
1432 | respect_x_subs = RESPECT_X_SUBSAMPLING
1433 | respect_y_subs = RESPECT_Y_SUBSAMPLING
1434 | self.log(f'global RESPECT_X_SUBSAMPLING is {RESPECT_X_SUBSAMPLING}')
1435 | self.log(f'global RESPECT_Y_SUBSAMPLING is {RESPECT_Y_SUBSAMPLING}')
1436 | if self.ignore_subsampling:
1437 | respect_x_subs = False
1438 | respect_y_subs = False
1439 | self.log('subsampling_w, subsampling_h for clips:')
1440 | for i , is_error in enumerate(self.rgbs_error):
1441 | if is_error:
1442 | self.log(f' clip{i+1}: loaded with error')
1443 | else:
1444 | s_w = 1 << self.clips_orig[i].format.subsampling_w
1445 | s_w_max = max(s_w_max, s_w)
1446 | if respect_x_subs:
1447 | modx = max(modx, s_w)
1448 | modx_subs = max(modx_subs, s_w)
1449 | s_h = 1 << self.clips_orig[i].format.subsampling_h
1450 | s_h_max = max(s_h_max, s_h)
1451 | if respect_y_subs:
1452 | mody = max(mody, s_h)
1453 | mody_subs = max(mody_subs, s_h)
1454 | self.log(f' clip{i+1}: {s_w}, {s_h} {self.clips_orig[i].format.name}')
1455 | if self.ignore_subsampling:
1456 | self.log('ignore_subsampling=True (both x and y)')
1457 | if not (s_w_max == 1 and s_h_max == 1):
1458 | self.log(' user overriding global settings to also select points within subsampling'
1459 | '\n which gives error if using that cropping line in vapoursynth')
1460 | else:
1461 | self.log(' no loaded clip has subsampling anyway')
1462 | else:
1463 | self.log('ignore_subsampling=False')
1464 | self.log('script evaluation for crop selection:')
1465 | if respect_x_subs and respect_y_subs:
1466 | if modx != old_modx: self.log(f' mod_x={modx}, that is correction of mod_x={old_modx}, cannot be less than subsampling')
1467 | if mody != old_mody: self.log(f' mod_y={mody}, that is correction of mod_y={old_mody}, cannot be less than subsampling')
1468 |
1469 | elif not respect_x_subs and respect_y_subs:
1470 | modx_subs = 1
1471 | if mody != old_mody: self.log(f' mod_y={mody}, that is correction of mod_y={old_mody}, cannot be less than subsampling')
1472 |
1473 | elif respect_x_subs and not respect_y_subs:
1474 | mody_subs = 1
1475 | if modx != old_modx: self.log(f' mod_x={modx}, that is correction of mod_x={old_modx}, because it must be >= {modx}')
1476 |
1477 | elif not respect_x_subs and not respect_y_subs:
1478 | modx_subs, mody_subs = 1, 1
1479 |
1480 | self.log(f' subsampling mods: {modx_subs}, {mody_subs} while selecting crop area, first selected point snaps to those mods')
1481 | self.log(f' video resolution mods: {modx}, {mody} while drawing crop area, width and height are snapping to those mods')
1482 |
1483 | return modx, mody, modx_subs, mody_subs
1484 |
1485 |
1486 | def validate_boolean(self, dictionary):
1487 | for key, value in dictionary.items():
1488 | if not isinstance(value, bool):
1489 | raise TypeError(f"[Preview] wrong '{key}' argument: '{value}', it has to be boolean: True or False")
1490 |
1491 |
1492 | def limit_cache(self, given_cache, avail):
1493 | if avail >= given_cache:
1494 | return given_cache
1495 | deduc = 0
1496 | if avail < 200:
1497 | deduc = 50
1498 | self.log('almost no RAM, preview and system likely to freeze'.format(avail))
1499 |
1500 | elif avail >=200 and avail < 400:
1501 | deduc = 100
1502 | self.log('not much RAM at all, freezing likely, lagish preview'.format(avail))
1503 |
1504 | elif avail >= 400 and avail < 1024:
1505 | deduc = 200
1506 | self.log('more RAM would give better performance'.format(avail))
1507 |
1508 | elif avail >= 1024 and avail < 1536:
1509 | deduc = 380
1510 |
1511 | else:
1512 | deduc = 450
1513 |
1514 | new_cache = avail - deduc
1515 | return new_cache if new_cache < given_cache else given_cache
1516 |
1517 |
1518 | def crop_to_previous(self):
1519 | '''
1520 | zoom-out when 'Esc' is pressed
1521 | previous index data from self.previewData are used to set proper core.std.cropAbs() from original clip'''
1522 |
1523 | if len(self.previewData) == 1: #fully zoomed out already
1524 | return
1525 |
1526 | width, height, left, top = self.previewData[-2]
1527 |
1528 | try:
1529 | for i, rgb in enumerate(self.rgbs_orig):
1530 | self.rgbs[i] = core.std.CropAbs(rgb, width, height, left, top)
1531 | except:
1532 | self.log(f'{self.print_clip_name()}: {self.i+1} preview return failed')
1533 |
1534 | else:
1535 | if self.isCropping:
1536 | '''update selection values from previous crop to become current crop'''
1537 | x_abs,y_abs = self.get_absolute_offsets(self.x1, self.y1)
1538 | self.x1 = x_abs - self.previewData[-2][2]
1539 | self.y1 = y_abs - self.previewData[-2][3]
1540 | self.x2 = self.x1 + self.width
1541 | self.y2 = self.y1 + self.height
1542 | self.w = self.rgbs[self.i].width
1543 | self.h = self.rgbs[self.i].height
1544 | del self.previewData[-1]
1545 | self.print_info(self.cropping_line_text(self.width, self.height, x_abs,y_abs))
1546 | else:
1547 | del self.previewData[-1]
1548 | self.print_info(self.cropping_line_text(width, height, left, top))
1549 | self.width, self.height, self.left, self.top = width, height, left, top
1550 |
1551 |
1552 | #self.redraw_window()
1553 |
1554 | def crop_to_new(self,width, height, left, top):
1555 | '''
1556 | zoom in or crop
1557 | crop preview to new width and height and store data into self.previewData
1558 | '''
1559 |
1560 | try:
1561 | for i, rgb in enumerate(self.rgbs_orig):
1562 | self.rgbs[i] = core.std.CropAbs(rgb, width, height, left, top)
1563 | except:
1564 | self.log(f'{self.print_clip_name()}: {self.i+1} preview return failed')
1565 |
1566 | else:
1567 | self.previewData.append([width,height,left,top])
1568 | self.print_info(self.cropping_line_text(width, height, left, top))
1569 |
1570 |
1571 | def reset_preview(self):
1572 | try:
1573 | for i, rgb in enumerate(self.rgbs_orig):
1574 | self.rgbs[i] = rgb
1575 | except:
1576 | self.log(f'clip: {self.i+1} preview reset failed in clip or one of clips')
1577 | else:
1578 | self.isCropping = False
1579 | self.redraw_window()
1580 | self.previewData_reset()
1581 | self.print_info(self.cropping_line_text(*self.previewData[0]))
1582 |
1583 |
1584 | def previewData_reset(self):
1585 | '''
1586 | creating first list index as original rgb data: width, height, left, top
1587 | '''
1588 | self.previewData = [[self.rgbs_orig[0].width, self.rgbs_orig[0].height , 0, 0]]
1589 |
1590 | def redraw_window(self):
1591 | '''
1592 | avoiding this redraw_window() actually keeps original window size after crop or zoom,
1593 | if calling this function, window gets redrawn to 1:1 after crop or zoom, so window gets smaller
1594 | '''
1595 | self.placement = cv2.getWindowImageRect(self.title)
1596 | if self.fullscreen:
1597 | self.redraw_fullscreen()
1598 | else:
1599 | self.redraw_normal_screen(reset=True)
1600 |
1601 |
1602 | def get_absolute_offsets(self, relative_x , relative_y):
1603 | '''
1604 | absolute current offsets are stored in self.previewData,
1605 | [-1] means last index in list,
1606 | current self.left and self.top are third and forth index so using [2] and [3]
1607 | '''
1608 | return self.previewData[-1][2] + relative_x, self.previewData[-1][3] + relative_y
1609 |
1610 |
1611 | def cropping_line_text(self, w,h,l,t):
1612 | s_w = 1 << self.clips_orig[self.i].format.subsampling_w
1613 | s_h = 1 << self.clips_orig[self.i].format.subsampling_h
1614 | if w % s_w == 0 and h % s_h == 0 and l % s_w == 0 and t % s_h == 0 and w != 0 and h != 0:
1615 | check = ''
1616 | self.color = self.good_c
1617 | else:
1618 | check = ' #fails in vs'
1619 | self.color = self.bad_c
1620 | info = (f'{self.print_clip_name()}{self.i+1} = '
1621 | f'core.std.CropAbs(clip{self.i+1}, width={w}, height={h}, left={l}, top={t}) ')
1622 | if len(self.previewData) > 1 or self.isCropping:
1623 | info += f'#mods({self.modx},{self.mody}) #subs({self.modx_subs},{self.mody_subs}){check}'
1624 | return info
1625 |
1626 | def print_clip_name(self):
1627 | if self.isCropping:
1628 | return 'selection_in_clip'
1629 |
1630 | if len(self.previewData) >1:
1631 | return 'cropped_clip'
1632 | else:
1633 | return 'clip'
1634 |
1635 | def freeRAM(self):
1636 | '''
1637 | getting free RAM
1638 | first it uses non standard library cross platform psutil
1639 | if modul is not installed, it falls back using Linux or Windows ways to get free RAM
1640 | so for Mac it needs psutil: to install psutil: pip3 psutil install
1641 | '''
1642 |
1643 | avail = None
1644 |
1645 | #cross platform try if psutil is installed
1646 | try:
1647 | mem = pstutil.virtual_memory()
1648 | avail = int(mem.available/1024/1024)
1649 | if avail and isinstance(avail, int):
1650 | return avail
1651 | except:
1652 | pass
1653 |
1654 | #linux fallback
1655 | try:
1656 | meminfo = dict((i.split()[0].rstrip(':'),int(i.split()[1])) for i in open('/proc/meminfo').readlines())
1657 | avail = int(meminfo['MemAvailable']/1024)
1658 | if avail and isinstance(avail, int):
1659 | return avail
1660 | except:
1661 | pass
1662 |
1663 | #windows fallback
1664 | try:
1665 | proc = os.popen('wmic.exe OS get FreePhysicalMemory')
1666 | l = proc.readlines() #l should be a list
1667 | proc.close()
1668 | except:
1669 | pass
1670 |
1671 | else:
1672 | for i, item in enumerate(l):
1673 | try:
1674 | avail = int(l[i])
1675 | avail = int(avail/1024)
1676 | if avail and isinstance(avail, int):
1677 | return avail
1678 | except:
1679 | pass
1680 |
1681 | #failed to get free RAM
1682 | self.log("[freeRAM] Install psutil(pip3 psutil install) to get free RAM for limiting cache")
1683 | self.log("[freeRAM] psutil is needed to get free RAM on Mac") #or add some fallback code for Mac here that works
1684 |
1685 | class Conversions:
1686 |
1687 | MATRIX_USABLE = {
1688 | 1 : '709' ,
1689 | 4 : 'fcc' ,
1690 | 5 : '470bg' ,
1691 | 6 : '170m' ,
1692 | 7 : '240m' ,
1693 | 8 : 'ycgco' ,
1694 | 9 : '2020ncl' ,
1695 | 10 : '2020cl' ,
1696 | 12 : 'chromancl',
1697 | 13 : 'chromacl ',
1698 | 14 : 'ictcp' }
1699 |
1700 | def getMatrix(self, clip=None, matrix_in_s=None):
1701 | '''
1702 | lots of logging, so its a bit wild with ifs,
1703 | wanted to make clear and print what actually happens, why matrix was selected as such etc...
1704 | '''
1705 | matrix_in = None
1706 | log = ''
1707 | if not isinstance(clip, vs.VideoNode):
1708 | log = "[getMatrix] input needs to be a clip"
1709 | return None, matrix_in_s, log
1710 |
1711 | if clip.format.color_family != vs.YUV:
1712 | return matrix_in, matrix_in_s, log
1713 |
1714 | if matrix_in_s:
1715 | if matrix_in_s not in self.MATRIX_USABLE.values():
1716 | log = "[getMatrix] input argument {} is not usable as matrix_in_s\n".format(matrix_in_s)
1717 | matrix_in_s = None
1718 | else:
1719 | log = "[getMatrix] matrix_in_s argument override, matrix_in_s = '{}'\n".format(matrix_in_s)
1720 | return matrix_in, matrix_in_s, log
1721 |
1722 | else:
1723 | try:
1724 | matrix_in = clip.get_frame(0).props['_Matrix']
1725 | except:
1726 | log = "[getMatrix] _Matrix NOT found in props\n"
1727 | if matrix_in:
1728 | try:
1729 | matrix_in_s = self.MATRIX_USABLE[matrix_in]
1730 | if matrix_in_s not in self.MATRIX_USABLE.values():
1731 | matrix_in_s = None
1732 | log = "[getMatrix] _Matrix={} is not usable\n".format(matrix_in)
1733 | except:
1734 | matrix_in_s = None
1735 | log = "[getMatrix] _Matrix={} is not usable\n".format(matrix_in)
1736 |
1737 | if matrix_in_s:
1738 | log = "[getMatrix] _Matrix={0}, using matrix_in={0}/matrix_in_s = '{1}'\n".format(matrix_in, matrix_in_s )
1739 | else:
1740 | if clip.width <= 1024 and clip.height <= 480: matrix_in, matrix_in_s = (6, '170m')
1741 | elif clip.width <= 1024 and clip.height <= 576: matrix_in, matrix_in_s = (5, '470bg')
1742 | else: matrix_in, matrix_in_s = (1, '709')
1743 | log += "[getMatrix] defaulting, which could be wrong, to matrix_in_s = '{}'\n".format(matrix_in_s)
1744 |
1745 | return matrix_in, matrix_in_s, log
1746 |
1747 |
1748 | def toRGB(self, clip=None, matrix_in_s=None, depth=None, kernel=None, sample_type=None):
1749 | '''
1750 | yuv to rgb conversion
1751 | there are bunch of YUV to RGB scripts out there, but needed to make my own so it prints what it actually does,
1752 | if it defaults to matrix or not , prints actual conversion line etc...
1753 |
1754 | returns rgb clip and log,
1755 | returns None and log, if conversion fails,
1756 | defaults: input is limited range, RGB full range, and output is full RGB range
1757 |
1758 | matrix_in_s, same as in Vapoursynth
1759 | kernel, same as Vapoursynth attribute values
1760 |
1761 | sample type is only relevant with 16bit, otherwise is ignored, less than 16bit must be INTEGER and 32 is FLOAT
1762 | sample_type = None , same as input clip
1763 | = 0 or vs.INTEGER
1764 | = 1 or vs.FLOAT
1765 | depth = None converts all clips into RGB with the same bit depth as original,
1766 | if that bit depth is not pre-registered (like 8,9,10,16 are), rgb depth will be registered (11,12,13,14,15),
1767 | if clip is 32bit floating point, it will convert to RGBS 32bit floating point
1768 |
1769 | depth = 8,9,10,11,12,13,14,15,16 or 32 #same as None, except bit depth is given, not taking it from original clip
1770 | '''
1771 |
1772 | if not isinstance(clip, vs.VideoNode):
1773 | return None, '[toRGB] input needs to be a vapoursynth VideoNode/clip'
1774 |
1775 | log=''
1776 |
1777 | #bitdepth
1778 | depth_in = clip.format.bits_per_sample
1779 | if not depth:
1780 | depth = depth_in
1781 | else:
1782 | if not isinstance(depth, int) or depth < 8 or (depth > 16 and depth != 32):
1783 | depth = depth_in
1784 | log += '[toRGB] depth must be integer from 8 to 16 range or 32'
1785 | log += '[toRGB] defaulting to clips bit depth: {}bit\n'.format(depth_in)
1786 |
1787 | #sample_type output
1788 | if sample_type == 0: sample_out = vs.INTEGER
1789 | elif sample_type == 1: sample_out = vs.FLOAT
1790 | elif not sample_type:
1791 | sample_out = clip.format.sample_type
1792 | else:
1793 | #sample type was defined wrong
1794 | if depth == 16:
1795 | if clip.format.sample_type == vs.FLOAT:
1796 | string = 'vs.FLOAT'
1797 | else: string = 'vs.INTEGER'
1798 | log += '[toRGB] wrong sample type argument, using {}, same as the clip for 16bit output\n'.format(string)
1799 | sample_out = clip.format.sample_type
1800 | elif depth == 32:
1801 | sample_out = vs.FLOAT
1802 | log += '[toRGB] sample type arg ignored, must be vs.FLOAT for 32bit\n'
1803 | else:
1804 | sample_out = vs.INTEGER
1805 | log += '[toRGB] sample type arg ignored, must be vs.INTEGER for less than 16bit output\n'
1806 | #sample_type correction
1807 | if depth < 16 and sample_out == vs.FLOAT:
1808 | sample_out = vs.INTEGER
1809 | log += '[toRGB] wrong sample type argument, using vs.INTEGER for less than 16bit\n'
1810 | if depth == 32 and sample_out == vs.INTEGER:
1811 | sample_out = vs.FLOAT
1812 | log += '[toRGB] wrong sample type argument, only vs.FLOAT for 32 bit\n'
1813 |
1814 | #RGB output in string for getattr()
1815 | RGBattr = f'RGB{depth*3}'
1816 | try:
1817 | getattr(vs, RGBattr)
1818 | except:
1819 | if depth != 32:
1820 | setattr(vs, RGBattr, core.register_format(vs.ColorFamily.RGB, vs.INTEGER, depth, 0, 0).id)
1821 | log += '[toRGB] setting new vapoursynth attribute {}\n'.format(RGBattr)
1822 | if depth == 32:
1823 | RGBattr = 'RGBS'
1824 | if depth == 16 and sample_out==vs.FLOAT:
1825 | RGBattr = 'RGBH'
1826 |
1827 | #resize kernel
1828 | if kernel == None:
1829 | log += "[toRGB] Defaulting to: kernel = 'Bicubic'\n"
1830 | kernel = 'Bicubic'
1831 | else:
1832 | try:
1833 | getattr(vs.core.resize, kernel)
1834 | except:
1835 | log += "[toRGB] Wrong kernel, '{}', defaulting to: kernel = 'Bicubic'\n".format(kernel)
1836 | kernel = 'Bicubic'
1837 |
1838 | #matrix_in_s
1839 | if clip.format.color_family == vs.YUV:
1840 | matrix_in, matrix_in_s, matrix_log = self.getMatrix(clip, matrix_in_s)
1841 | log = log + matrix_log
1842 |
1843 | else: matrix_in_s = None
1844 |
1845 | #attributes needed for conversion
1846 | _resize = getattr(vs.core.resize, kernel)
1847 | format_out = dict(format = getattr(vs, RGBattr))
1848 |
1849 | #variables for printing and logging ,otherwise they have no purpose in conversion
1850 | name = clip.format.name
1851 | if matrix_in_s: inMatrix = "matrix_in_s = '{}',".format(matrix_in_s)
1852 | else: inMatrix = ''
1853 | if clip.format.sample_type == 0: inSample = 'INTEGER'
1854 | elif clip.format.sample_type == 1: inSample = 'FLOAT'
1855 | if sample_out == vs.INTEGER : outSample = 'vs.INTEGER'
1856 | else: outSample = 'vs.FLOAT'
1857 |
1858 | #zimg conversion
1859 | try:
1860 | log += '[toRGB] zimg conversion, {}bit {} {} to {}bit {} RGB:\n'.format(depth_in, inSample, name, depth, outSample)
1861 | if matrix_in_s : clip = _resize(clip, **format_out, matrix_in_s = matrix_in_s)
1862 | else: clip = _resize(clip, **format_out)
1863 | log += "[toRGB] vs.core.resize.{}(clip, {} format = vs.{})\n".format(kernel, inMatrix, RGBattr)
1864 | except:
1865 | log += log_err()
1866 | log += '[toRGB] conversion failed'
1867 | return None, log
1868 |
1869 | return clip, log
1870 |
1871 |
1872 |
1873 | if __name__ == '__main__':
1874 |
1875 | file='video.mp4'
1876 | clip = core.ffms2.Source(file)
1877 | Preview([clip])
1878 |
--------------------------------------------------------------------------------