├── .flake8 ├── .github └── dependabot.yml ├── .gitignore ├── .mypy.ini ├── LICENSE ├── README.md ├── examples ├── example_aa.py ├── example_debug_output.py ├── example_mask.py └── example_ocr.py ├── requirements.txt ├── setup.cfg ├── setup.py └── vardefunc ├── __init__.py ├── mask.py ├── misc.py ├── noise.py ├── ocr.py ├── py.typed ├── scale.py ├── types.py ├── util.py └── vsjet_proxy.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | count = True 3 | ignore = W503 4 | max-line-length = 140 5 | max-doc-length = 200 6 | max-complexity = 10 7 | exclude = stubs/* 8 | show-source = True 9 | statistics = True 10 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "pip" 9 | directory: "/" 10 | schedule: 11 | interval: "daily" 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vspreview 2 | .vscode/ 3 | .mypy_cache/ 4 | __pycache__ 5 | 6 | 7 | build/ 8 | dist/ 9 | vardefunc.egg-info/ 10 | 11 | test_files/ 12 | upload_to_pip 13 | 14 | .vsjet/ 15 | test.* 16 | -------------------------------------------------------------------------------- /.mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | python_version = 3.11 3 | 4 | ignore_missing_imports = False 5 | 6 | disallow_any_generics = False 7 | 8 | disallow_untyped_defs = True 9 | disallow_incomplete_defs = True 10 | check_untyped_defs = True 11 | disallow_untyped_decorators = False 12 | 13 | no_implicit_optional = True 14 | strict_optional = True 15 | 16 | warn_redundant_casts = True 17 | warn_unused_ignores = False 18 | warn_no_return = True 19 | warn_return_any = True 20 | warn_unreachable = True 21 | 22 | ignore_errors = False 23 | 24 | allow_untyped_globals = False 25 | allow_redefinition = False 26 | implicit_reexport = True 27 | strict_equality = True 28 | 29 | show_error_context = False 30 | show_column_numbers = True 31 | show_error_codes = True 32 | color_output = True 33 | error_summary = True 34 | pretty = True 35 | 36 | plugins = numpy.typing.mypy_plugin 37 | 38 | [mypy-cytoolz.*] 39 | ignore_errors = True 40 | 41 | [mypy-_pytest.*] 42 | ignore_errors = True 43 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Vardë 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vardefunc 2 | Vardë's VapourSynth functions 3 | 4 | # How to install vardefunc 5 | If you have the old `vardefunc.py` module, first of all, please remove it from your system. 6 | 7 | You can now install `vardefunc` with the following command: 8 | ``` 9 | python -m pip install vardefunc 10 | ``` 11 | or from Github: 12 | ``` 13 | python -m pip install git+https://github.com/Ichunjo/vardefunc.git 14 | ``` 15 | -------------------------------------------------------------------------------- /examples/example_aa.py: -------------------------------------------------------------------------------- 1 | import vardefunc as vdf 2 | import vapoursynth as vs 3 | 4 | core = vs.core 5 | 6 | # TODO -------------------------------------------------------------------------------- /examples/example_debug_output.py: -------------------------------------------------------------------------------- 1 | import vapoursynth as vs 2 | from vardefunc.misc import DebugOutput 3 | from vsutil import split 4 | 5 | core = vs.core 6 | 7 | # Import your clip 8 | SOURCE = core.std.BlankClip(format=vs.YUV420P16) 9 | 10 | 11 | # Initialise the DebugOutput object 12 | DEBUG = DebugOutput(SOURCE, props=7, num=9, scale=1) 13 | # DEBUG = DebugOutput((0, SOURCE)) 14 | # DEBUG = DebugOutput(source=SOURCE) 15 | # DEBUG = DebugOutput(source=(0, SOURCE)) 16 | # DEBUG = DebugOutput() 17 | 18 | 19 | @DEBUG.catch # Catch the output of main_filter(). Here it's the grained clip 20 | def main_filter() -> vs.VideoNode: 21 | debug = DEBUG 22 | src = SOURCE 23 | 24 | den = denoise(src) 25 | debug <<= den # Add the den clip from the biggest index 26 | 27 | db = deband(den) 28 | debug <<= dict(deband=db) # Add the named den clip from the biggest index 29 | 30 | grained = grain(db) 31 | debug <<= split(grained) # Add grained den planes from the biggest index 32 | 33 | return grained 34 | 35 | 36 | def denoise(clip: vs.VideoNode) -> vs.VideoNode: 37 | ... 38 | 39 | 40 | def deband(clip: vs.VideoNode) -> vs.VideoNode: 41 | ... 42 | 43 | 44 | def grain(clip: vs.VideoNode) -> vs.VideoNode: 45 | ... 46 | 47 | 48 | 49 | if __name__ == '__main__': 50 | pass 51 | else: 52 | filtered = main_filter() 53 | -------------------------------------------------------------------------------- /examples/example_mask.py: -------------------------------------------------------------------------------- 1 | import vapoursynth as vs 2 | from vardefunc.mask import FreyChen 3 | 4 | core = vs.core 5 | 6 | # Import your clip 7 | SOURCE = core.std.BlankClip(format=vs.YUV420P16) 8 | 9 | 10 | def filtering() -> vs.VideoNode: 11 | clip = SOURCE 12 | # Use a EdgeDetect mask 13 | mask = FreyChen().get_mask(clip, lthr=4000, hthr=8000, multi=1.5) 14 | return mask 15 | 16 | 17 | if __name__ == '__main__': 18 | pass 19 | else: 20 | filtered = filtering() 21 | filtered.set_output(0) 22 | -------------------------------------------------------------------------------- /examples/example_ocr.py: -------------------------------------------------------------------------------- 1 | import vapoursynth as vs 2 | from vardefunc.ocr import OCR 3 | from vsutil import get_y 4 | 5 | core = vs.core 6 | 7 | # Import your clip 8 | SOURCE = core.std.BlankClip(format=vs.YUV410P8) 9 | 10 | 11 | def ocring() -> None: 12 | clip = SOURCE 13 | 14 | ocr = OCR(get_y(clip), (1900, 125, 70), coord_alt=(1500, 125, 70)) 15 | ocr.preview_cropped.set_output(0) 16 | ocr.preview_cleaned.set_output(1) 17 | 18 | ocr.launch(datapath=r'C:\Users\Varde\AppData\Roaming\VapourSynth\plugins64\tessdata', language='fra+eng') 19 | ocr.write_ass( 20 | 'output.ass', 21 | [('_', '-'), ('…', '...'), ('‘', "'"), ('’', "'"), (" '", "'"), 22 | ('—', '-'), ('- ', '– '), ('0u', 'Ou'), ('Gomme', 'Comme'), ('A ', 'À '), 23 | ('II', 'Il'), ('ees', 'ces'), ('@', 'O'), ('oe', 'œ'), ('téte', 'tête')] 24 | ) 25 | 26 | 27 | if __name__ == '__main__': 28 | ocring() 29 | else: 30 | ocring() 31 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | lvsfunc>=0.8.1 2 | numpy>=2.0.1 3 | pytimeconv>=0.0.2 4 | VapourSynth>=65 5 | vsscale>=2.0.1 6 | vsaa>=1.9.1 7 | vskernels>=3.1.0 8 | vsrgtools>=1.6.2 9 | vsmasktools>=1.2.1 10 | vsexprtools>=1.5.1 11 | vstools>=3.1.0 -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description_file=README.md 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | 4 | with open("README.md", encoding='utf-8') as fh: 5 | long_description = fh.read() 6 | 7 | with open("requirements.txt", encoding='utf-8') as fh: 8 | install_requires = fh.read() 9 | 10 | NAME = "vardefunc" 11 | VERSION = "0.10.0" 12 | 13 | setup( 14 | name=NAME, 15 | version=VERSION, 16 | author="Vardë", 17 | author_email="ichunjo.le.terrible@gmail.com", 18 | description="Vardë's Vapoursynth functions", 19 | long_description=long_description, 20 | long_description_content_type="text/markdown", 21 | url="https://github.com/Ichunjo/vardefunc", 22 | packages=["vardefunc"], 23 | package_data={ 24 | 'vardefunc': ['py.typed'], 25 | }, 26 | install_requires=install_requires, 27 | python_requires=">=3.11", 28 | zip_safe=False, 29 | classifiers=[ 30 | "Intended Audience :: Developers", 31 | "Programming Language :: Python :: 3", 32 | "License :: OSI Approved :: MIT License", 33 | "Operating System :: OS Independent", 34 | ], 35 | ) 36 | -------------------------------------------------------------------------------- /vardefunc/__init__.py: -------------------------------------------------------------------------------- 1 | """Some cool functions""" 2 | 3 | # flake8: noqa 4 | from . import mask, misc, noise, scale, types, util 5 | from .mask import * 6 | from .misc import * 7 | from .noise import * 8 | from .ocr import * 9 | from .scale import * 10 | from .types import * 11 | from .util import * 12 | from .vsjet_proxy import * 13 | -------------------------------------------------------------------------------- /vardefunc/mask.py: -------------------------------------------------------------------------------- 1 | """Random masking functions""" 2 | 3 | __all__ = [ 4 | 'cambi_mask', 5 | ] 6 | 7 | from typing import Any 8 | 9 | from vsexprtools import average_merge 10 | from vskernels import Bilinear, Scaler, ScalerT 11 | from vsrgtools import box_blur 12 | from vstools import DitherType, VSFunction, core, depth, get_depth, vs 13 | 14 | 15 | def cambi_mask( 16 | clip: vs.VideoNode, 17 | scale: int = 1, 18 | merge_previous: bool = True, 19 | blur_func: VSFunction = lambda clip: box_blur(clip, 2, 3, planes=0), 20 | scaler: ScalerT = Bilinear, 21 | **cambi_args: Any 22 | ) -> vs.VideoNode: 23 | """Generate a deband mask 24 | 25 | :param clip: Input clip 26 | :param scale: 0 <= i < 5, defaults to 1 27 | :param merge_previous: Will merge the GRAYS cscore frame stored as frame property for scale 0 <= i < 5, defaults to True 28 | :param blur_func: A bluring function called on the mask, defaults to lambdaclip:box_blur(clip, 2, 3, 0) 29 | :param scaler: Scaler used to resize the cscore frames, defaults to Bilinear 30 | :return: GRAY float deband mask 31 | """ 32 | if get_depth(clip) > 10: 33 | clip = depth(clip, 10, dither_type=DitherType.NONE) 34 | 35 | scores = core.akarin.Cambi(clip, scores=True, **cambi_args) 36 | if merge_previous: 37 | cscores = [ 38 | blur_func(scores.std.PropToClip(f'CAMBI_SCALE{i}').std.Deflate().std.Deflate()) 39 | for i in range(0, scale + 1) 40 | ] 41 | scaler = Scaler.ensure_obj(scaler) 42 | deband_mask = average_merge([scaler.scale(c, scores.width, scores.height) for c in cscores]) 43 | else: 44 | deband_mask = blur_func(scores.std.PropToClip(f'CAMBI_SCALE{scale}').std.Deflate().std.Deflate()) 45 | 46 | return deband_mask.std.CopyFrameProps(scores) 47 | -------------------------------------------------------------------------------- /vardefunc/misc.py: -------------------------------------------------------------------------------- 1 | """Miscellaneous functions and wrappers that didn't really have a place in any other submodules.""" 2 | from __future__ import annotations 3 | 4 | __all__ = [ 5 | 'DebugOutput', 'Thresholds', 'thresholding', 6 | 'fade_filter', 7 | 'Planes', 'YUVPlanes', 'RGBPlanes', 8 | 'get_chroma_shift', 'get_bicubic_params', 9 | 'set_ffms2_log_level' 10 | ] 11 | 12 | import math 13 | import warnings 14 | 15 | from abc import ABC 16 | from contextlib import AbstractContextManager 17 | from functools import partial, wraps 18 | from itertools import count 19 | from operator import ilshift, imatmul, ior 20 | from types import TracebackType 21 | from typing import ( 22 | Any, Callable, ClassVar, Dict, Iterable, Iterator, List, Literal, MutableMapping, NamedTuple, 23 | Optional, Sequence, Tuple, Type, TypeVar, Union, cast, overload 24 | ) 25 | 26 | import vapoursynth as vs 27 | 28 | from lvsfunc.comparison import Stack 29 | from vstools import Direction, depth, get_depth, get_w, insert_clip, join, plane 30 | 31 | from .types import F_OpInput, OpInput, Output 32 | 33 | core = vs.core 34 | 35 | 36 | OpDebug = Callable[["DebugOutput", OpInput], "DebugOutput"] 37 | _OPS = { 38 | '<<=': cast(OpDebug, ilshift), 39 | '@=': cast(OpDebug, imatmul), 40 | '|=': cast(OpDebug, ior) 41 | } 42 | 43 | 44 | class DebugOutputMMap(MutableMapping[int, vs.VideoNode], ABC): 45 | """Abstract Debug Output interface implementing the mutable mapping methods""" 46 | outputs: ClassVar[Dict[int, vs.VideoNode]] = {} 47 | 48 | _props: int 49 | _num: int 50 | _scale: int 51 | 52 | _min_idx: int 53 | _max_idx: int 54 | 55 | def __getitem__(self, index: int) -> vs.VideoNode: 56 | return self.outputs[index] 57 | 58 | def __setitem__(self, index: int, clip: vs.VideoNode) -> None: 59 | self.outputs[index] = clip 60 | 61 | self._update_minmax() 62 | 63 | if self._props: 64 | clip = clip.text.FrameProps(alignment=self._props, scale=self._scale) 65 | if self._num: 66 | clip = clip.text.FrameNum(self._num, self._scale) 67 | 68 | clip.set_output(index) 69 | 70 | def __delitem__(self, index: int) -> None: 71 | del self.outputs[index] 72 | self._update_minmax() 73 | vs.clear_output(index) 74 | 75 | def __len__(self) -> int: 76 | return len(self.outputs) 77 | 78 | def __iter__(self) -> Iterator[int]: 79 | yield from self.outputs.keys() 80 | 81 | def __str__(self) -> str: 82 | string = '' 83 | for idx, clip in sorted(self.items()): 84 | string += f'Index N° {idx}\n' + str(clip) + '---------------\n' 85 | return string 86 | 87 | def __repr__(self) -> str: 88 | return repr(self.outputs) 89 | 90 | def __del__(self) -> None: 91 | """ 92 | Deleting an item will effectively freed the memory since we're invoking vs.clear_output. 93 | Indexes are also updated. 94 | 95 | However, we can't clear the outputs in the destructor of the DebugOutput instance. 96 | Previewers won't be able to get the outputs because they run after the end of the script, 97 | and the destructor is already used. 98 | 99 | So ``del debug[0]`` will clear the output 0 but ``del debug`` won't. 100 | If you want to clear outputs just do: ``debug.clear()`` and ``del debug`` 101 | """ 102 | self.outputs.clear() 103 | for name in set(self.__dict__): 104 | delattr(self, name) 105 | 106 | def _update_minmax(self) -> None: 107 | try: 108 | self._min_idx, self._max_idx = min(self.outputs.keys()), max(self.outputs.keys()) 109 | except ValueError: 110 | del self._min_idx, self._max_idx 111 | 112 | 113 | class DebugOutput(DebugOutputMMap): 114 | """Utility class to ouput multiple clips""" 115 | 116 | def __init__(self, *clips: Output, props: int = 0, num: int = 0, scale: int = 1, 117 | clear_outputs: bool = False, check_curr_env: bool = True, **named_clips: Output) -> None: 118 | """ 119 | Args: 120 | clips (vs.VideoNode | List[vs.VideoNode] | Tuple[int, vs.VideoNode] | Tuple[int, List[vs.VideoNode]]): 121 | `clips` can be a VideoNode, a list of planes, 122 | a tuple of an index and VideoNode or a tuple of an index and a list of planes. 123 | If a list of planes is passed, DebugOutput will try to stack the planes for previewing. 124 | Only 444 and 420 format are allowed. Otherwise a warning will be raise and a garbage clip will be displayed. 125 | 126 | named_clips (Dict[str, vs.VideoNode | List[vs.VideoNode] | Tuple[int, vs.VideoNode] | Tuple[int, List[vs.VideoNode]]]): 127 | Same as clips except it's Keyword arguments. 128 | Location of named_clips's names are hardcoded to 8. 129 | 130 | props (int, optional): 131 | Location of the displayed FrameProps. 0 means no display. 132 | Defaults to 0. 133 | 134 | num (int, optional): 135 | Location of the displayed FrameNum. 0 means no display. 136 | Defaults to 0. 137 | 138 | scale (int, optional): 139 | Global integer scaling factor for the bitmap font. 140 | Defaults to 1. 141 | 142 | clear_outputs (bool, optional): 143 | Clears all clips set for output in the current environment. 144 | Defaults to False. 145 | 146 | check_curr_env (bool, optional): 147 | Check all clips set for output in the current environment. 148 | Defaults to True. 149 | """ 150 | self._props = props 151 | self._num = num 152 | self._scale = scale 153 | self._max_idx = 0 154 | self._min_idx = 0 155 | self._load_clips(*clips, clear_outputs=clear_outputs, check_curr_env=check_curr_env, **named_clips) 156 | 157 | def _load_clips(self, *clips: Output, clear_outputs: bool = False, check_curr_env: bool = True, **named_clips: Output) -> None: 158 | rclips = [ 159 | self._resolve_clips(i, clip, None) for i, clip in enumerate(clips) 160 | ] 161 | rclips += [ 162 | self._resolve_clips(i, clip, name) 163 | for i, (name, clip) in enumerate(named_clips.items(), start=len(rclips)) 164 | ] 165 | 166 | if len(all_idx := [idx for idx, _ in rclips]) != len(set(all_idx)): 167 | raise ValueError('DebugOutput: there are shared indexes!') 168 | 169 | if clear_outputs: 170 | self.clear() 171 | self.update(rclips) 172 | else: 173 | if check_curr_env: 174 | self._check_curr_env(all_idx) 175 | self.update(self._get_outputs() | dict(rclips)) 176 | 177 | def __ilshift__(self, clips: OpInput) -> DebugOutput: 178 | """Adds from the biggest index <<=""" 179 | return self._resolve_input_operator(self._index_gen(self._max_idx + 1), clips, True) 180 | 181 | def __imatmul__(self, clips: OpInput) -> DebugOutput: 182 | """Fills unused indexes @=""" 183 | return self._resolve_input_operator(self._index_not_used_gen(), clips, True) 184 | 185 | def __ior__(self, clips: OpInput) -> DebugOutput: 186 | """Fills and replaces existing indexes |=""" 187 | return self._resolve_input_operator(self._index_gen(self._min_idx), clips, False) 188 | 189 | def _resolve_clips(self, i: int, clip: Output, name: Optional[str]) -> Tuple[int, vs.VideoNode]: 190 | if isinstance(clip, vs.VideoNode): 191 | out = i, clip 192 | elif isinstance(clip, list): 193 | out = i, self._stack_planes(clip) 194 | else: 195 | idx, clp = clip 196 | if isinstance(clp, list): 197 | out = idx, self._stack_planes(clp) 198 | else: 199 | out = idx, clp 200 | 201 | if name: 202 | idx, c = out 203 | out = idx, c.text.Text(name, 8, self._scale) 204 | 205 | return out 206 | 207 | def _resolve_input_operator(self, yield_func: Iterable[int], clips: OpInput, env: bool = True) -> DebugOutput: 208 | if isinstance(clips, dict): 209 | self._load_clips( 210 | clear_outputs=False, check_curr_env=env, 211 | **{name: cast(Output, (i, clip)) for i, (name, clip) in zip(yield_func, clips.items())} 212 | ) 213 | elif isinstance(clips, tuple): 214 | if isinstance(clips[0], vs.VideoNode): 215 | self._load_clips( 216 | *zip(yield_func, (c for c in clips if isinstance(c, vs.VideoNode))), check_curr_env=env, 217 | ) 218 | else: 219 | self._load_clips(*zip(yield_func, (c for c in clips if isinstance(c, list))), check_curr_env=env,) 220 | elif isinstance(clips, list): 221 | self._load_clips(*zip(yield_func, [clips]), check_curr_env=env,) 222 | else: 223 | self._load_clips(*zip(yield_func, [clips]), check_curr_env=env) 224 | return self 225 | 226 | def _index_not_used_gen(self) -> Iterable[int]: 227 | for i in self._index_gen(self._min_idx): 228 | if i not in self.keys(): 229 | yield i 230 | 231 | @overload 232 | def catch(self, func: Optional[F_OpInput], /) -> F_OpInput: 233 | ... 234 | 235 | @overload 236 | def catch(self, /, *, op: Union[OpDebug, str] = '<<=') -> Callable[[F_OpInput], F_OpInput]: 237 | ... 238 | 239 | def catch(self, func: Optional[F_OpInput] = None, /, *, op: Union[OpDebug, str] = '<<=' 240 | ) -> Union[Callable[[F_OpInput], F_OpInput], F_OpInput]: 241 | """Decorator to catch the output of the function decorated""" 242 | if func is None: 243 | return cast( 244 | Callable[[F_OpInput], F_OpInput], 245 | partial(self.catch, op=op) 246 | ) 247 | 248 | @wraps(func) 249 | def _wrapper(*args: Any, **kwargs: Any) -> OpInput: 250 | assert func 251 | out = func(*args, **kwargs) 252 | opera = _OPS[op] if isinstance(op, str) else op 253 | opera(self, out) 254 | return out 255 | 256 | return cast(F_OpInput, _wrapper) 257 | 258 | @staticmethod 259 | def _index_gen(start: int) -> Iterable[int]: 260 | yield from count(start=start) 261 | 262 | @staticmethod 263 | def _stack_planes(planes: List[vs.VideoNode]) -> vs.VideoNode: 264 | if len(planes) > 3: 265 | warnings.warn('DebugOutput: output list out of range', Warning) 266 | out = core.std.BlankClip( 267 | format=vs.GRAY8, color=128 268 | ).text.Text('Problematic output: \noutput list out of range', 5, 2) 269 | else: 270 | if len({c.width for c in planes}) == len({c.height for c in planes}) == 1: 271 | out = Stack(planes).clip 272 | else: 273 | try: 274 | out = Stack([planes[0], Stack(planes[1:], direction=Direction.VERTICAL).clip]).clip 275 | except ValueError: 276 | warnings.warn('DebugOutput: unexpected subsampling') 277 | out = core.std.BlankClip( 278 | format=vs.GRAY8, color=128 279 | ).text.Text('Problematic output: \nunexpected subsampling', 5, 2) 280 | return out 281 | 282 | @staticmethod 283 | def _check_curr_env(idx: Iterable[int]) -> None: 284 | for i in idx: 285 | if i in vs.get_outputs().keys(): 286 | raise ValueError(f'DebugOutput: index {i} is already used in current environment!') 287 | 288 | @staticmethod 289 | def _get_outputs() -> Dict[int, vs.VideoNode]: 290 | outputs: Dict[int, vs.VideoNode] = {} 291 | for idx, output in vs.get_outputs().items(): 292 | if isinstance(output, vs.VideoOutputTuple): 293 | if output.alpha: 294 | outputs[idx] = output.clip.std.ClipToProp(output.alpha) 295 | else: 296 | outputs[idx] = output.clip 297 | return outputs 298 | 299 | 300 | class Thresholds(NamedTuple): 301 | """ 302 | [soft_bound_min, [hard_bound_min, hard_bound_max], soft_bound_max) 303 | """ 304 | clip: vs.VideoNode 305 | soft_bound_min: int | float | Sequence[int] | Sequence[float] 306 | hard_bound_min: int | float | Sequence[int] | Sequence[float] 307 | hard_bound_max: int | float | Sequence[int] | Sequence[float] 308 | soft_bound_max: int | float | Sequence[int] | Sequence[float] 309 | coef_min: int | float | Sequence[int] | Sequence[float] | None = None 310 | coef_max: int | float | Sequence[int] | Sequence[float] | None = None 311 | 312 | 313 | def thresholding(*thrs: Thresholds, base: Optional[vs.VideoNode] = None, guidance: Optional[vs.VideoNode] = None) -> vs.VideoNode: 314 | """ 315 | General function for applying specific filtering on specific thresholds 316 | with gradation support before and after the hard thresholds 317 | 318 | Args: 319 | thrs (Thresholds): 320 | Positional arguments of Thresholds. 321 | 322 | base (vs.VideoNode, optional): 323 | Base clip on which the first application will be made. 324 | If not specified, a blank clip is made from the first ``thrs``. 325 | 326 | guidance (VideoNode, optional): 327 | Guidance clip on which the threshold references are taken. 328 | If not specified, the guidance clip is made from the first ``thrs``. 329 | 330 | Returns: 331 | vs.VideoNode: 332 | Thresholded clip. 333 | """ 334 | if not base: 335 | base = thrs[0].clip.std.BlankClip() 336 | if not guidance: 337 | guidance = thrs[0].clip 338 | 339 | if not base.format or not guidance.format: 340 | raise ValueError('thresholding: variable format not allowed') 341 | 342 | for i, thr in enumerate(thrs): 343 | if thr.clip.format != base.format: 344 | raise ValueError(f'thresholding: threshold {i} has a different format than base clip') 345 | 346 | def _normalise_thr(thr: int | float | Sequence[int] | Sequence[float], num_planes: int) -> List[int | float]: 347 | thr = [thr] if isinstance(thr, (float, int)) else thr 348 | return (list(thr) + [thr[-1]] * (num_planes - len(thr)))[:num_planes] 349 | 350 | pclip = base 351 | 352 | for thr in thrs: 353 | soft_bound_min, hard_bound_min, hard_bound_max, soft_bound_max = (_normalise_thr(t, base.format.num_planes) for t in thr[1:5]) 354 | coef_min = _normalise_thr(thr.coef_min, base.format.num_planes) if thr.coef_min else None 355 | coef_max = _normalise_thr(thr.coef_max, base.format.num_planes) if thr.coef_max else None 356 | 357 | exprs: List[str] = [] 358 | for i in range(base.format.num_planes): 359 | if_in_min = f'x {soft_bound_min[i]} >= x {hard_bound_min[i]} < and' 360 | if_in_max = f'x {hard_bound_max[i]} >= x {soft_bound_max[i]} < and' 361 | if_in_hard = f'x {hard_bound_min[i]} >= x {hard_bound_max[i]} < and' 362 | 363 | str_min = f'x {soft_bound_min[i]} - {hard_bound_min[i]} {soft_bound_min[i]} - /' 364 | if coef_min: 365 | str_min += f' {coef_min[i]} pow' 366 | 367 | str_max = f'x {hard_bound_max[i]} - {soft_bound_max[i]} {hard_bound_max[i]} - /' 368 | if coef_max: 369 | str_max += f' {coef_max[i]} pow' 370 | 371 | exprs.append( 372 | if_in_min + f' z {str_min} * y 1 {str_min} - * + ' 373 | + if_in_max + f' y {str_max} * z 1 {str_max} - * + ' 374 | + if_in_hard + ' z y ? ? ?' 375 | ) 376 | 377 | pclip = core.std.Expr([guidance, pclip, thr.clip], exprs) 378 | 379 | return pclip 380 | 381 | 382 | def fade_filter(clip: vs.VideoNode, clip_a: vs.VideoNode, clip_b: vs.VideoNode, 383 | start_f: int, end_f: int) -> vs.VideoNode: 384 | """Applies a filter by fading clip_a to clip_b. 385 | 386 | Args: 387 | clip (vs.VideoNode): Source clip 388 | 389 | clip_a (vs.VideoNode): Fade in clip. 390 | 391 | clip_b (vs.VideoNode): Fade out clip. 392 | 393 | start_f (int): Start frame. 394 | 395 | end_f (int): End frame. 396 | 397 | Returns: 398 | vs.VideoNode: Faded clip. 399 | """ 400 | length = end_f - start_f 401 | 402 | def _fade(n: int, clip_a: vs.VideoNode, clip_b: vs.VideoNode, length: int) -> vs.VideoNode: 403 | return core.std.Merge(clip_a, clip_b, n / length) 404 | 405 | func = partial(_fade, clip_a=clip_a[start_f:end_f + 1], clip_b=clip_b[start_f:end_f + 1], length=length) 406 | clip_fad = core.std.FrameEval(clip[start_f:end_f + 1], func) 407 | 408 | return insert_clip(clip, clip_fad, start_f) 409 | 410 | 411 | PlanesT = TypeVar('PlanesT', bound='Planes') 412 | 413 | 414 | class Planes(AbstractContextManager[PlanesT], Sequence[vs.VideoNode]): 415 | """General context manager for easier planes management""" 416 | 417 | __slots__ = ('_clip', '_family', '_final_clip', '_planes', '_in_context') 418 | 419 | def __init__(self, clip: vs.VideoNode, bits: Optional[int] = None, family: vs.ColorFamily = vs.YUV) -> None: 420 | """ 421 | Args: 422 | clip (vs.VideoNode): 423 | Source clip 424 | 425 | bits (Optional[int], optional): 426 | Target bitdepth. Defaults to None. 427 | 428 | family (vs.ColorFamily, optional): 429 | Colour family. Defaults to vs.YUV. 430 | """ 431 | self._clip = depth(clip, bits) if bits else clip 432 | self._family = family 433 | # Initialisation 434 | self._final_clip: vs.VideoNode 435 | self._planes: List[vs.VideoNode] 436 | self._in_context = False 437 | super().__init__() 438 | 439 | def __enter__(self: PlanesT) -> PlanesT: 440 | if isinstance(planes := self._clip.std.SplitPlanes(), Sequence): 441 | self._planes = list(planes) 442 | else: 443 | raise ValueError(f'{self.__class__.__name__}: GRAY colour family isn\'t supported!') 444 | self._in_context = True 445 | return self 446 | 447 | def __exit__(self, __exc_type: Type[BaseException] | None, __exc_value: BaseException | None, 448 | __traceback: TracebackType | None) -> bool | None: 449 | self._final_clip = join(self._planes, self._family) 450 | self._planes.clear() 451 | self._in_context = False 452 | return None 453 | 454 | @overload 455 | def __getitem__(self, i: int) -> vs.VideoNode: 456 | ... 457 | 458 | @overload 459 | def __getitem__(self, i: slice) -> Sequence[vs.VideoNode]: 460 | ... 461 | 462 | def __getitem__(self, i: int | slice) -> vs.VideoNode | Sequence[vs.VideoNode]: 463 | if self._in_context: 464 | return self._planes[i] 465 | raise RuntimeError( 466 | f'{self.__class__.__name__}: You can only get the planes inside the context manager' 467 | ) 468 | 469 | def __setitem__(self, index: int, gray: vs.VideoNode) -> None: 470 | if not self._in_context: 471 | raise RuntimeError( 472 | f'{self.__class__.__name__}: You can only set the planes inside the context manager' 473 | ) 474 | try: 475 | self._planes[index] = gray 476 | except IndexError as i_err: 477 | raise ValueError(f'{self.__class__.__name__}: plane number out of range') from i_err 478 | if get_depth(gray) != (bits := get_depth(self._clip)): 479 | # 32 bits float in YUV and doing on chroma planes 480 | if bits == 32 and self._family == vs.YUV and index in {1, 2}: 481 | gray = plane(depth(join([gray] * 3, self._family), bits), index) 482 | else: 483 | gray = depth(gray, bits) 484 | self._planes[index] = depth(gray, bits) 485 | 486 | def __delitem__(self, index: int) -> None: 487 | if self._in_context: 488 | self[index] = self[index].std.BlankClip() 489 | raise RuntimeError( 490 | f'{self.__class__.__name__}: You can only delete the planes inside the context manager' 491 | ) 492 | 493 | def __len__(self) -> Literal[3]: 494 | return 3 495 | 496 | @property 497 | def clip(self) -> vs.VideoNode: 498 | """Get final merged clip""" 499 | try: 500 | out = self._final_clip 501 | except AttributeError as attr_err: 502 | raise ValueError( 503 | f'{self.__class__.__name__}: you can only get "clip" outside of the context manager and once' 504 | ) from attr_err 505 | else: 506 | del self._clip, self._family, self._final_clip, self._planes 507 | return out 508 | 509 | 510 | class YUVPlanes(Planes): 511 | def __init__(self, clip: vs.VideoNode, bits: Optional[int] = None) -> None: 512 | super().__init__(clip, bits, vs.YUV) 513 | 514 | @property 515 | def Y(self) -> vs.VideoNode: 516 | return self[0] 517 | 518 | @Y.setter 519 | def Y(self, _x: vs.VideoNode) -> None: 520 | self[0] = _x 521 | 522 | @Y.deleter 523 | def Y(self) -> None: 524 | del self[0] 525 | 526 | @property 527 | def U(self) -> vs.VideoNode: 528 | return self[1] 529 | 530 | @U.setter 531 | def U(self, _x: vs.VideoNode) -> None: 532 | self[1] = _x 533 | 534 | @U.deleter 535 | def U(self) -> None: 536 | del self[1] 537 | 538 | @property 539 | def V(self) -> vs.VideoNode: 540 | return self[2] 541 | 542 | @V.setter 543 | def V(self, _x: vs.VideoNode) -> None: 544 | self[2] = _x 545 | 546 | @V.deleter 547 | def V(self) -> None: 548 | del self[2] 549 | 550 | 551 | class RGBPlanes(Planes): 552 | def __init__(self, clip: vs.VideoNode, bits: Optional[int] = None) -> None: 553 | super().__init__(clip, bits, vs.RGB) 554 | 555 | @property 556 | def R(self) -> vs.VideoNode: 557 | return self[0] 558 | 559 | @R.setter 560 | def R(self, _x: vs.VideoNode) -> None: 561 | self[0] = _x 562 | 563 | @R.deleter 564 | def R(self) -> None: 565 | del self[0] 566 | 567 | @property 568 | def G(self) -> vs.VideoNode: 569 | return self[1] 570 | 571 | @G.setter 572 | def G(self, _x: vs.VideoNode) -> None: 573 | self[1] = _x 574 | 575 | @G.deleter 576 | def G(self) -> None: 577 | del self[1] 578 | 579 | @property 580 | def B(self) -> vs.VideoNode: 581 | return self[2] 582 | 583 | @B.setter 584 | def B(self, _x: vs.VideoNode) -> None: 585 | self[2] = _x 586 | 587 | @B.deleter 588 | def B(self) -> None: 589 | del self[2] 590 | 591 | 592 | def get_chroma_shift(src_h: int, dst_h: int, aspect_ratio: float = 16 / 9) -> float: 593 | """Intended to calculate the right value for chroma shifting when doing subsampled scaling. 594 | 595 | Args: 596 | src_h (int): Source height. 597 | dst_h (int): Destination height. 598 | aspect_ratio (float, optional): Defaults to 16/9. 599 | 600 | Returns: 601 | float: 602 | """ 603 | src_w = get_w(src_h, aspect_ratio) 604 | dst_w = get_w(dst_h, aspect_ratio) 605 | 606 | ch_shift = 0.25 - 0.25 * (src_w / dst_w) 607 | ch_shift = float(round(ch_shift, 5)) 608 | return ch_shift 609 | 610 | 611 | def get_bicubic_params(cubic_filter: str) -> Tuple[float, float]: 612 | """Return the parameter b and c for the bicubic filter 613 | Source: https://www.imagemagick.org/discourse-server/viewtopic.php?f=22&t=19823 614 | https://www.imagemagick.org/Usage/filter/#mitchell 615 | 616 | Args: 617 | cubic_filter (str): Can be: Spline, B-Spline, Hermite, Mitchell-Netravali, Mitchell, 618 | Catmull-Rom, Catrom, Sharp Bicubic, Robidoux soft, Robidoux, Robidoux Sharp. 619 | 620 | Returns: 621 | Tuple: b/c combo 622 | """ 623 | sqrt = math.sqrt 624 | 625 | def _get_robidoux_soft() -> Tuple[float, float]: 626 | b = (9 - 3 * sqrt(2)) / 7 627 | c = (1 - b) / 2 628 | return b, c 629 | 630 | def _get_robidoux() -> Tuple[float, float]: 631 | sqrt2 = sqrt(2) 632 | b = 12 / (19 + 9 * sqrt2) 633 | c = 113 / (58 + 216 * sqrt2) 634 | return b, c 635 | 636 | def _get_robidoux_sharp() -> Tuple[float, float]: 637 | sqrt2 = sqrt(2) 638 | b = 6 / (13 + 7 * sqrt2) 639 | c = 7 / (2 + 12 * sqrt2) 640 | return b, c 641 | 642 | cubic_filter = cubic_filter.lower().replace(' ', '_').replace('-', '_') 643 | cubic_filters = { 644 | 'spline': (1.0, 0.0), 645 | 'b_spline': (1.0, 0.0), 646 | 'hermite': (0.0, 0.0), 647 | 'mitchell_netravali': (1 / 3, 1 / 3), 648 | 'mitchell': (1 / 3, 1 / 3), 649 | 'catmull_rom': (0.0, 0.5), 650 | 'catrom': (0.0, 0.5), 651 | 'bicubic_sharp': (0.0, 1.0), 652 | 'sharp_bicubic': (0.0, 1.0), 653 | 'robidoux_soft': _get_robidoux_soft(), 654 | 'robidoux': _get_robidoux(), 655 | 'robidoux_sharp': _get_robidoux_sharp() 656 | } 657 | return cubic_filters[cubic_filter] 658 | 659 | 660 | def set_ffms2_log_level(level: Union[str, int] = 0) -> None: 661 | """A more friendly set of log level in ffms2 662 | 663 | Args: 664 | level (int, optional): The target level in ffms2. 665 | Valid choices are "quiet" or 0, "panic" or 1, "fatal" or 2, "error" or 3, 666 | "warning" or 4, "info" or 5, "verbose" or 6, "debug" or 7 and "trace" or 8. 667 | Defaults to 0. 668 | """ 669 | levels = { 670 | 'quiet': -8, 671 | 'panic': 0, 672 | 'fatal': 8, 673 | 'error': 16, 674 | 'warning': 24, 675 | 'info': 32, 676 | 'verbose': 40, 677 | 'debug': 48, 678 | 'trace': 56, 679 | 0: -8, 680 | 1: 0, 681 | 2: 8, 682 | 3: 16, 683 | 4: 24, 684 | 5: 32, 685 | 6: 40, 686 | 7: 48, 687 | 8: 56 688 | } 689 | core.ffms2.SetLogLevel(levels[level]) 690 | -------------------------------------------------------------------------------- /vardefunc/noise.py: -------------------------------------------------------------------------------- 1 | """Noising/denoising functions""" 2 | from __future__ import annotations 3 | 4 | __all__ = [ 5 | 'mvtools_args_defaults', 'nl_means_defaults', 'bm3d_profile_ffast', 'denoise', 6 | 'Grainer', 'AddGrain', 'F3kdbGrain', 7 | 'Graigasm', 'BilateralMethod', 'decsiz', 8 | 'adaptative_regrain' 9 | ] 10 | 11 | from abc import ABC, abstractmethod 12 | from enum import Enum 13 | from functools import partial 14 | from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, cast 15 | 16 | from vsdenoise import ( 17 | BM3DCPU, BM3DCuda, BM3DCudaRTC, DeviceType, MotionMode, MVTools, PelType, Prefilter, Profile, 18 | SADMode, SearchMode, NLMWeightMode, nl_means 19 | ) 20 | from vsdenoise.bm3d import ProfileBase 21 | from vsmasktools import FDoGTCanny, adg_mask, range_mask 22 | from vstools import ColorRange, DitherType, KwargsT, core, depth, get_depth, get_plane_sizes, get_y, join, split, vs 23 | 24 | from .util import pick_px_op 25 | 26 | 27 | def mvtools_args_defaults() -> KwargsT: 28 | return KwargsT( 29 | sad_mode=SADMode.SPATIAL.same_recalc, 30 | motion=MotionMode.HIGH_SAD, 31 | prefilter=Prefilter.MINBLUR2, 32 | pel_type=PelType.WIENER, 33 | search=SearchMode.DIAMOND.defaults, 34 | block_size=16, 35 | overlap=8, 36 | limit=255 37 | ) 38 | 39 | 40 | def nl_means_defaults() -> KwargsT: 41 | return KwargsT( 42 | sr=2, 43 | simr=4, 44 | wmode=NLMWeightMode.BISQUARE_HR, # wmode=3 45 | device_type=DeviceType.CUDA, 46 | num_streams=2 47 | ) 48 | 49 | 50 | def bm3d_profile_ffast() -> Profile.Config: 51 | return ProfileBase.Config(Profile.FAST, KwargsT(), KwargsT(), KwargsT(), KwargsT(fast=True), KwargsT(), KwargsT()) 52 | 53 | 54 | def denoise( 55 | clip: vs.VideoNode, 56 | thSAD: int | tuple[int, int | tuple[int, int]] | None = 115, 57 | sigma_y: float | None = 0.7, 58 | strength_uv: float | None = 0.2, 59 | tr: int = 1, 60 | mvtools_args: KwargsT | None = None, 61 | bm3d_impl: type[BM3DCPU | BM3DCuda | BM3DCudaRTC] = BM3DCudaRTC, 62 | bm3d_profile: Profile | Profile.Config = bm3d_profile_ffast(), 63 | nl_args: KwargsT | None = None, 64 | store_mv_as_prop: bool = False 65 | ) -> vs.VideoNode: 66 | """ 67 | MVTools + BM3D + NLMeans denoise. 68 | """ 69 | ref = ( 70 | MVTools.denoise(clip, thSAD, tr, **mvtools_args_defaults() | (mvtools_args or KwargsT())) 71 | if thSAD else clip 72 | ) 73 | den_luma = bm3d_impl.denoise(clip, sigma_y, tr, 1, bm3d_profile, ref, planes=0) if sigma_y else clip 74 | den_chroma = ( 75 | nl_means(den_luma, strength_uv, tr, ref=ref, planes=[1, 2], **nl_means_defaults() | (nl_args or KwargsT())) 76 | if strength_uv else den_luma 77 | ) 78 | if store_mv_as_prop: 79 | den_chroma = den_chroma.std.ClipToProp(ref, "DenoiseMotionVectorsRef") 80 | 81 | return den_chroma 82 | 83 | 84 | class Grainer(ABC): 85 | """Abstract graining interface""" 86 | def __init__(self, **kwargs: Any) -> None: 87 | self.kwargs = kwargs 88 | super().__init__() 89 | 90 | @abstractmethod 91 | def grain(self, clip: vs.VideoNode, /, strength: Tuple[float, float]) -> vs.VideoNode: 92 | """Graining function of the Grainer 93 | 94 | Args: 95 | clip (vs.VideoNode): 96 | Source clip. 97 | 98 | strength (Tuple[float, float]): 99 | First value is luma strength, second value is chroma strength. 100 | 101 | Returns: 102 | vs.VideoNode: Grained clip. 103 | """ 104 | 105 | 106 | class AddGrain(Grainer): 107 | """Built-in grain.Add plugin""" 108 | def grain(self, clip: vs.VideoNode, /, strength: Tuple[float, float]) -> vs.VideoNode: 109 | return clip.grain.Add(var=strength[0], uvar=strength[1], **self.kwargs) 110 | 111 | 112 | class F3kdbGrain(Grainer): 113 | """Built-in f3kdb.Deband plugin""" 114 | def grain(self, clip: vs.VideoNode, /, strength: Tuple[float, float]) -> vs.VideoNode: 115 | return core.neo_f3kdb.Deband(clip, None, 1, 1, 1, int(strength[0]), int(strength[1]), **self.kwargs) 116 | 117 | 118 | class Graigasm(): 119 | """Custom graining interface based on luma values""" 120 | thrs: List[float] 121 | strengths: List[Tuple[float, float]] 122 | sizes: List[float] 123 | sharps: List[float] 124 | overflows: List[float] 125 | grainers: List[Grainer] 126 | 127 | def __init__(self, 128 | thrs: Sequence[float], strengths: Sequence[Tuple[float, float]], sizes: Sequence[float], sharps: Sequence[float], *, 129 | overflows: Union[float, Sequence[float], None] = None, 130 | grainers: Union[Grainer, Sequence[Grainer]] = AddGrain(seed=-1, constant=False)) -> None: 131 | """Constructor checks and initializes the values. 132 | Length of thrs must be equal to strengths, sizes and sharps. 133 | thrs, strengths, sizes and sharps match the same area. 134 | 135 | Args: 136 | thrs (Sequence[float]): 137 | Sequence of thresholds defining the grain boundary. 138 | Below the threshold, it's grained, above the threshold, it's not grained. 139 | 140 | strengths (Sequence[Tuple[float, float]]): 141 | Sequence of tuple representing the grain strengh of the luma and the chroma, respectively. 142 | 143 | sizes (Sequence[float]): 144 | Sequence of size of grain. 145 | 146 | sharps (Sequence[float]): 147 | Sequence of sharpened grain values. 50 is neutral Catmull-Rom (b=0, c=0.5). 148 | 149 | overflows (Union[float, Sequence[float]], optional): 150 | Percentage value determining by how much the hard limit of threshold will be extended. 151 | Range 0.0 - 1.0. Defaults to 1 divided by thrs's length for each thr. 152 | 153 | grainers (Union[Grainer, Sequence[Grainer]], optional): 154 | Grainer used for each combo of thrs, strengths, sizes and sharps. 155 | Defaults to AddGrain(seed=-1, constant=False). 156 | """ 157 | self.thrs = list(thrs) 158 | self.strengths = list(strengths) 159 | self.sizes = list(sizes) 160 | self.sharps = list(sharps) 161 | 162 | length = len(self.thrs) 163 | datas: List[Any] = [self.strengths, self.sizes, self.sharps] 164 | if all(len(lst) != length for lst in datas): 165 | raise ValueError('Graigasm: "thrs", "strengths", "sizes" and "sharps" must have the same length!') 166 | 167 | if overflows is None: 168 | overflows = [1 / length] 169 | if isinstance(overflows, (float, int)): 170 | overflows = [float(overflows)] * length 171 | else: 172 | overflows = list(overflows) 173 | overflows += [overflows[-1]] * (length - len(overflows)) 174 | self.overflows = overflows 175 | 176 | if isinstance(grainers, Grainer): 177 | grainers = [grainers] * length 178 | else: 179 | grainers = list(grainers) 180 | grainers += [grainers[-1]] * (length - len(grainers)) 181 | self.grainers = grainers 182 | 183 | def graining(self, 184 | clip: vs.VideoNode, /, *, 185 | prefilter: Optional[vs.VideoNode] = None, show_masks: bool = False) -> vs.VideoNode: 186 | """Do grain stuff using settings from constructor. 187 | 188 | Args: 189 | clip (vs.VideoNode): Source clip. 190 | 191 | prefilter (clip, optional): 192 | Prefilter clip used to compute masks. 193 | Defaults to None. 194 | 195 | show_masks (bool, optional): 196 | Returns interleaved masks. Defaults to False. 197 | 198 | Returns: 199 | vs.VideoNode: Grained clip. 200 | """ 201 | assert clip.format 202 | if clip.format.color_family not in (vs.YUV, vs.GRAY): 203 | raise ValueError('graining: Only YUV and GRAY format are supported!') 204 | 205 | bits = get_depth(clip) 206 | is_float = clip.format.sample_type == vs.FLOAT 207 | peak = 1.0 if is_float else (1 << bits) - 1 208 | num_planes = clip.format.num_planes 209 | neutral = [0.5] + [0.0] * (num_planes - 1) if is_float else [float(1 << (bits - 1))] * num_planes 210 | 211 | pref = prefilter if prefilter is not None else get_y(clip) 212 | 213 | mod = self._get_mod(clip) 214 | 215 | masks = [self._make_mask(pref, thr, ovf, peak, is_float=is_float) for thr, ovf in zip(self.thrs, self.overflows)] 216 | masks = [pref.std.BlankClip(color=0)] + masks 217 | masks = [core.std.Expr([masks[i], masks[i - 1]], 'x y -') for i in range(1, len(masks))] 218 | 219 | if num_planes == 3: 220 | if is_float: 221 | masks_chroma = [mask.resize.Bilinear(*get_plane_sizes(clip, 1)) for mask in masks] 222 | masks = [join([mask, mask_chroma, mask_chroma]) for mask, mask_chroma in zip(masks, masks_chroma)] 223 | else: 224 | masks = [join([mask] * 3).resize.Bilinear(format=clip.format.id) for mask in masks] 225 | 226 | if show_masks: 227 | return core.std.Interleave( 228 | [mask.text.Text(f'Threshold: {thr}', 7).text.FrameNum(9) 229 | for thr, mask in zip(self.thrs, masks)] 230 | ) 231 | 232 | graineds = [self._make_grained(clip, strength, size, sharp, grainer, neutral, mod) 233 | for strength, size, sharp, grainer in zip(self.strengths, self.sizes, self.sharps, self.grainers)] 234 | 235 | clips_adg = [core.std.Expr([grained, clip, mask], f'x z {peak} / * y 1 z {peak} / - * +') 236 | for grained, mask in zip(graineds, masks)] 237 | 238 | out = clip 239 | for clip_adg in clips_adg: 240 | out = core.std.MergeDiff(clip_adg, core.std.MakeDiff(clip, out)) # type: ignore 241 | 242 | return out 243 | 244 | def _make_grained(self, 245 | clip: vs.VideoNode, 246 | strength: Tuple[float, float], size: float, sharp: float, grainer: Grainer, 247 | neutral: List[float], mod: int) -> vs.VideoNode: 248 | ss_w = self._m__(round(clip.width / size), mod) 249 | ss_h = self._m__(round(clip.height / size), mod) 250 | b = sharp / -50 + 1 251 | c = (1 - b) / 2 252 | 253 | blank = core.std.BlankClip(clip, ss_w, ss_h, color=neutral) 254 | grained = grainer.grain(blank, strength=strength).resize.Bicubic(clip.width, clip.height, filter_param_a=b, filter_param_b=c) 255 | 256 | return clip.std.MakeDiff(grained) 257 | 258 | @staticmethod 259 | def _get_mod(clip: vs.VideoNode) -> int: 260 | ss_mod: Dict[Tuple[int, int], int] = { 261 | (0, 0): 1, 262 | (1, 1): 2, 263 | (1, 0): 2, 264 | (0, 1): 2, 265 | (2, 2): 4, 266 | (2, 0): 4 267 | } 268 | assert clip.format is not None 269 | try: 270 | return ss_mod[(clip.format.subsampling_w, clip.format.subsampling_h)] 271 | except KeyError as kerr: 272 | raise ValueError('Graigasm: Format unknown!') from kerr 273 | 274 | @staticmethod 275 | def _make_mask(clip: vs.VideoNode, 276 | thr: float, overflow: float, peak: float, *, 277 | is_float: bool) -> vs.VideoNode: 278 | 279 | def _func(x: float) -> int: 280 | min_thr = thr - (overflow * peak) / 2 281 | max_thr = thr + (overflow * peak) / 2 282 | if min_thr <= x <= max_thr: 283 | x = abs(((x - min_thr) / (max_thr - min_thr)) * peak - peak) 284 | elif x < min_thr: 285 | x = peak 286 | elif x > max_thr: 287 | x = 0.0 288 | return round(x) 289 | 290 | min_thr = f'{thr} {overflow} {peak} * 2 / -' 291 | max_thr = f'{thr} {overflow} {peak} * 2 / +' 292 | # if x >= min_thr and x <= max_thr -> gradient else ... 293 | expr = f'x {min_thr} >= x {max_thr} <= and x {min_thr} - {max_thr} {min_thr} - / {peak} * {peak} - abs _ ?' 294 | # ... if x < min_thr -> peak else ... 295 | expr = expr.replace('_', f'x {min_thr} < {peak} _ ?') 296 | # ... if x > max_thr -> 0 else x 297 | expr = expr.replace('_', f'x {max_thr} > 0 x ?') 298 | 299 | return pick_px_op(is_float, (expr, _func))(clip) 300 | 301 | @staticmethod 302 | def _m__(x: int, mod: int, /) -> int: 303 | return x - x % mod 304 | 305 | 306 | class BilateralMethod(Enum): 307 | BILATERAL = 0 308 | BILATERAL_GPU = 1 309 | BILATERAL_GPU_RTC = 2 310 | 311 | @property 312 | def func(self) -> Callable[..., vs.VideoNode]: 313 | return [ # type: ignore 314 | lambda: core.bilateral.Bilateral, # type: ignore 315 | lambda: core.bilateralgpu.Bilateral, # type: ignore 316 | lambda: core.bilateralgpu_rtc.Bilateral # type: ignore 317 | ][self.value]() # type: ignore 318 | 319 | 320 | def decsiz(clip: vs.VideoNode, sigmaS: float = 10.0, sigmaR: float = 0.009, 321 | min_in: Optional[float] = None, max_in: Optional[float] = None, gamma: float = 1.0, 322 | blur_method: BilateralMethod = BilateralMethod.BILATERAL, 323 | protect_mask: Optional[vs.VideoNode] = None, prefilter: bool = True, 324 | planes: Optional[List[int]] = None, show_mask: bool = False) -> vs.VideoNode: 325 | """Denoising function using Bilateral intended to decrease the filesize 326 | by just blurring the invisible grain above max_in and keeping all of it 327 | below min_in. The range in between is progressive. 328 | 329 | Args: 330 | clip (vs.VideoNode): Source clip. 331 | 332 | sigmaS (float, optional): Bilateral parameter. 333 | Sigma of Gaussian function to calculate spatial weight. Defaults to 10.0. 334 | 335 | sigmaR (float, optional): Bilateral parameter. 336 | Sigma of Gaussian function to calculate range weight. Defaults to 0.009. 337 | 338 | min_in (Union[int, float], optional): 339 | Minimum pixel value below which the grain is kept. Defaults to None. 340 | 341 | max_in (Union[int, float], optional): 342 | Maximum pixel value above which the grain is blurred. Defaults to None. 343 | 344 | gamma (float, optional): 345 | Controls the degree of non-linearity of the conversion. Defaults to 1.0. 346 | 347 | protect_mask (vs.VideoNode, optional): 348 | Mask that includes all the details that should not be blurred. 349 | If None, it uses the default one. 350 | 351 | prefilter (bool, optional): 352 | Blurs the luma as reference or not. Defaults to True. 353 | 354 | planes (List[int], optional): Defaults to all planes. 355 | 356 | show_mask (bool, optional): Returns the mask. 357 | 358 | Returns: 359 | vs.VideoNode: Denoised clip. 360 | 361 | Example: 362 | import vardefunc as vdf 363 | 364 | clip = depth(clip, 16) 365 | clip = vdf.decsiz(clip, min_in=128<<8, max_in=200<<8) 366 | """ 367 | if clip.format is None: 368 | raise ValueError('decsiz: Variable format not allowed!') 369 | 370 | bits = clip.format.bits_per_sample 371 | is_float = clip.format.sample_type == vs.FLOAT 372 | peak = (1 << bits) - 1 if not is_float else 1.0 373 | gamma = 1 / gamma 374 | if clip.format.color_family == vs.GRAY: 375 | planes = [0] 376 | else: 377 | planes = [0, 1, 2] if not planes else planes 378 | 379 | if not protect_mask: 380 | clip16 = depth(clip, 16) 381 | masks = split( 382 | range_mask(clip16, rad=3, radc=2).resize.Bilinear(format=vs.YUV444P16) 383 | ) + [ 384 | FDoGTCanny().edgemask(get_y(clip16)).std.Maximum().std.Minimum() 385 | ] 386 | protect_mask = core.std.Expr(masks, 'x y max z max 3250 < 0 65535 ? a max 8192 < 0 65535 ?') \ 387 | .std.BoxBlur(hradius=1, vradius=1, hpasses=2, vpasses=2) 388 | 389 | clip_y = get_y(clip) 390 | if prefilter: 391 | pre = clip_y.std.BoxBlur(hradius=2, vradius=2, hpasses=4, vpasses=4) 392 | else: 393 | pre = clip_y 394 | 395 | denoise_mask = pick_px_op( 396 | is_float, (f'x {min_in} max {max_in} min {min_in} - {max_in} {min_in} - / {gamma} pow 0 max 1 min {peak} *', 397 | lambda x: round(min(1, max(0, pow((min(max_in, max(min_in, x)) - min_in) / (max_in - min_in), gamma))) * peak)) # type: ignore 398 | )(pre) 399 | 400 | mask = core.std.Expr( 401 | [depth( 402 | protect_mask, bits, 403 | range_out=ColorRange.FULL, 404 | range_in=ColorRange.FULL, dither_type=DitherType.NONE 405 | ), 406 | denoise_mask], 407 | 'y x -' 408 | ) 409 | 410 | if show_mask: 411 | return mask 412 | 413 | if blur_method == BilateralMethod.BILATERAL: 414 | denoise = core.bilateral.Bilateral(clip, sigmaS=sigmaS, sigmaR=sigmaR, planes=planes, algorithm=0) 415 | else: 416 | denoise = blur_method.func(clip, sigmaS, sigmaR) 417 | 418 | return core.std.MaskedMerge(clip, denoise, mask, planes) 419 | 420 | 421 | def adaptative_regrain(denoised: vs.VideoNode, new_grained: vs.VideoNode, original_grained: vs.VideoNode, 422 | range_avg: Tuple[float, float] = (0.5, 0.4), luma_scaling: int = 28) -> vs.VideoNode: 423 | """Merge back the original grain below the lower range_avg value, 424 | apply the new grain clip above the higher range_avg value 425 | and weight both of them between the range_avg values for a smooth merge. 426 | Intended for use in applying a static grain in higher PlaneStatsAverage values 427 | to decrease the file size since we can't see a dynamic grain on that level. 428 | However, in dark scenes, it's more noticeable so we apply the original grain. 429 | 430 | Args: 431 | denoised (vs.VideoNode): The denoised clip. 432 | new_grained (vs.VideoNode): The new regrained clip. 433 | original_grained (vs.VideoNode): The original regrained clip. 434 | range_avg (Tuple[float, float], optional): Range used in PlaneStatsAverage. Defaults to (0.5, 0.4). 435 | luma_scaling (int, optional): Parameter in adg.Mask. Defaults to 28. 436 | 437 | Returns: 438 | vs.VideoNode: The new adaptative grained clip. 439 | 440 | Example: 441 | import vardefunc as vdf 442 | 443 | denoise = denoise_filter(src, ...) 444 | diff = core.std.MakeDiff(src, denoise) 445 | ... 446 | some filters 447 | ... 448 | new_grained = core.neo_f3kdb.Deband(last, preset='depth', grainy=32, grainc=32) 449 | original_grained = core.std.MergeDiff(last, diff) 450 | adapt_regrain = vdf.adaptative_regrain(last, new_grained, original_grained, range_avg=(0.5, 0.4), luma_scaling=28) 451 | """ 452 | 453 | avg = core.std.PlaneStats(denoised) 454 | adapt_mask = adg_mask(get_y(avg), luma_scaling) 455 | adapt_grained = core.std.MaskedMerge(new_grained, original_grained, adapt_mask) 456 | 457 | avg_max = max(range_avg) 458 | avg_min = min(range_avg) 459 | 460 | def _diff(n: int, f: vs.VideoFrame, avg_max: float, avg_min: float, # noqa: PLW0613 461 | new: vs.VideoNode, adapt: vs.VideoNode) -> vs.VideoNode: 462 | psa = cast(float, f.props['PlaneStatsAverage']) 463 | if psa > avg_max: 464 | clip = new 465 | elif psa < avg_min: 466 | clip = adapt 467 | else: 468 | weight = (psa - avg_min) / (avg_max - avg_min) 469 | clip = core.std.Merge(adapt, new, [weight]) 470 | return clip 471 | 472 | diff_function = partial(_diff, avg_max=avg_max, avg_min=avg_min, new=new_grained, adapt=adapt_grained) 473 | 474 | return core.std.FrameEval(denoised, diff_function, [avg]) 475 | -------------------------------------------------------------------------------- /vardefunc/ocr.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = ['OCR'] 3 | 4 | import math 5 | 6 | from functools import partial 7 | from typing import Dict, List, Optional, Sequence, Set, Tuple, Union 8 | 9 | import vapoursynth as vs 10 | 11 | from pytimeconv import Convert 12 | from vsmasktools import max_planes, region_rel_mask 13 | from vstools import clip_async_render 14 | 15 | from .types import AnyPath 16 | 17 | core = vs.core 18 | 19 | 20 | class OCR: 21 | """OCR Interface using ocr.Recognize""" 22 | clip: vs.VideoNode 23 | coord: Tuple[int, int, int] 24 | coord_alt: Optional[Tuple[int, int, int]] 25 | thr_in: Sequence[int] 26 | thr_out: Sequence[int] 27 | thr_scd: float 28 | 29 | results: List[Tuple[int, bytes]] 30 | 31 | _brd_crop: int = 8 32 | 33 | def __init__(self, clip: vs.VideoNode, coord: Tuple[int, int, int], 34 | coord_alt: Optional[Tuple[int, int, int]] = None, 35 | thr_in: Union[int, Tuple[int, int, int]] = 225, 36 | thr_out: Union[int, Tuple[int, int, int]] = 80) -> None: 37 | """ 38 | Args: 39 | clip (vs.VideoNode): 40 | Source clip. If GRAY clip, `thr_in` and `thr_out` should be an integer. 41 | 42 | coord (Tuple[int, int, int]): 43 | Tuple of coordinates following the syntax: width, height, margin vertical from the bottom 44 | 45 | coord_alt (Optional[Tuple[int, int, int]], optional): 46 | Tuple of alternate coordinates following the syntax: width, height, margin vertical from the top. 47 | Defaults to None 48 | 49 | thr_in (Union[int, Tuple[int, int, int]], optional): 50 | Threshold for subtitles representing the minimum inline brightness. 51 | Defaults to 225. 52 | 53 | thr_out (Union[int, Tuple[int, int, int]], optional): 54 | Threshold for subtitles representing the maximum outline brightness. 55 | Defaults to 80. 56 | """ 57 | assert clip.format 58 | 59 | self.clip = clip 60 | 61 | self.coord = coord 62 | self.coord_alt = coord_alt 63 | 64 | self.thr_in = thr_in if isinstance(thr_in, tuple) else [thr_in] 65 | self.thr_out = thr_out if isinstance(thr_out, tuple) else [thr_out] 66 | 67 | if len(set([clip.format.num_planes, len(self.thr_in), len(self.thr_out)])) > 1: 68 | raise ValueError('OCR: number of thr_in and thr_out values must correspond to the number of clip planes!') 69 | 70 | def launch(self, datapath: Optional[str] = None, language: Optional[str] = None, 71 | options: Optional[Sequence[str]] = None) -> None: 72 | """http://www.vapoursynth.com/doc/plugins/ocr.html 73 | 74 | Args: 75 | datapath (Optional[str], optional): 76 | Path to a folder containing a “tessdata” folder, in which Tesseract’s data files must be found. 77 | Must have a trailing slash. 78 | Defaults to None. 79 | 80 | language (Optional[str], optional): 81 | An ISO 639-3 language string. 82 | Uses Tesseract’s default language if unset (usually eng). 83 | Defaults to None. 84 | 85 | options (Optional[Sequence], optional): 86 | Options to be passed to Tesseract, as a list of (key, value) pairs. 87 | Defaults to None. 88 | """ 89 | ppclip = self._cleaning(self._cropping(self.clip, self.coord, False)).resize.Point(format=vs.GRAY8) 90 | ocred = core.ocr.Recognize(ppclip, datapath, language, options) 91 | self.results = [] 92 | self._do_ocr(ppclip, ocred) 93 | del ppclip, ocred 94 | 95 | if self.coord_alt: 96 | ppclip_alt = self._cleaning(self._cropping(self.clip, self.coord_alt, True)).resize.Point(format=vs.GRAY8) 97 | ocred_alt = core.ocr.Recognize(ppclip_alt, datapath, language, options) 98 | self._do_ocr(ppclip_alt, ocred_alt) 99 | del ppclip_alt, ocred_alt 100 | 101 | def _do_ocr(self, ppclip: vs.VideoNode, ocred: vs.VideoNode) -> None: 102 | def _select_clips(n: int, f: vs.VideoFrame, clips: List[vs.VideoNode]) -> vs.VideoNode: 103 | return clips[1] if f.props['PlaneStatsMax'] > 0 else clips[0].std.BlankClip(1, 1) # type: ignore 104 | 105 | ocred = core.std.FrameEval( 106 | core.std.Splice([ppclip[:-1], ppclip.std.BlankClip(1, 1, length=1)], True), 107 | partial(_select_clips, clips=[ppclip, ocred]), 108 | prop_src=ppclip.std.PlaneStats() 109 | ) 110 | 111 | results: Set[Tuple[int, bytes]] = set() 112 | 113 | def _callback(n: int, f: vs.VideoFrame) -> None: 114 | if (prop_ocr := 'OCRString') in f.props.keys(): 115 | results.add((n, f.props[prop_ocr])) # type: ignore 116 | 117 | clip_async_render(ocred, progress='OCRing clip...', callback=_callback) 118 | self.results += sorted(results) 119 | 120 | def write_ass( 121 | self, output: AnyPath, 122 | string_replace: List[Tuple[str, str]] = [ 123 | ('_', '-'), ('…', '...'), ('‘', "'"), ('’', "'"), (" '", "'") 124 | ] 125 | ) -> None: 126 | """Write results as a readable ass file. 127 | 128 | Args: 129 | output (AnyPath): Output path 130 | 131 | string_replace (List[Tuple[str, str]], optional): 132 | List of strings you want to replace. 133 | Defaults to [ ('_', '-'), ('…', '...'), ('‘', "'"), ('’', "'"), (" '", "'") ]. 134 | """ 135 | resultsd: Dict[int, Tuple[int, str]] = {} 136 | for frame, string_byte in sorted(self.results): 137 | nstring = string_byte.decode('utf-8').replace('\n', '\\N') 138 | for r in string_replace: 139 | nstring = nstring.replace(*r) 140 | resultsd[frame] = (frame + 1, nstring) 141 | 142 | results_s = sorted(resultsd.items(), reverse=True) 143 | 144 | for (start1, (end1, string1)), (start2, (end2, string2)) in zip(results_s, results_s[1:]): 145 | if string1 == string2 and end2 == start1: 146 | resultsd[start2] = (max(end1, resultsd[start1][0]), string1) 147 | del resultsd[start1] 148 | 149 | fps = self.clip.fps 150 | 151 | with open(output, 'w', encoding='utf-8-sig') as ass: 152 | ass.write('[Events]\n') 153 | ass.write('Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n') 154 | for s, (e, string) in sorted(resultsd.items()): 155 | if string: 156 | ass.write(f'Dialogue: 0,{Convert.f2assts(s, fps)},{Convert.f2assts(e, fps)},Default,,0,0,0,,{string}\n') 157 | 158 | def _cropping(self, clip: vs.VideoNode, c: Tuple[int, int, int], alt: bool) -> vs.VideoNode: 159 | cw, ch, h = c 160 | wcrop = (self.clip.width - cw) / 2 161 | hcrop = h if alt else self.clip.height - ch - h 162 | 163 | return clip.std.CropAbs(cw, ch, round(wcrop), hcrop) 164 | 165 | def _cleaning(self, clip: vs.VideoNode) -> vs.VideoNode: 166 | clip_black = clip.std.BlankClip( 167 | clip.width - self._brd_crop, clip.height - self._brd_crop 168 | ) 169 | square = core.std.AddBorders( 170 | clip_black, *(int(self._brd_crop / 2), ) * 4, 171 | color=[(1 << clip.format.bits_per_sample) - 1] * clip_black.format.num_planes # type: ignore 172 | ) 173 | 174 | white_raw = clip.std.Binarize(self.thr_in) 175 | bright_raw = clip.std.Binarize(self.thr_out) 176 | 177 | bright_out = core.std.Expr([bright_raw, square], 'x y min') 178 | bright_not = core.misc.Hysteresis(bright_out, bright_raw).std.InvertMask() 179 | white_txt = core.std.MaskedMerge(clip.std.BlankClip(), white_raw, bright_not) 180 | 181 | white_txt = max_planes(white_txt) 182 | 183 | try: 184 | return white_txt.rgvs.RemoveGrain(3).rgvs.RemoveGrain(3) 185 | except vs.Error: 186 | return white_txt.rgsf.RemoveGrain(3).rgsf.RemoveGrain(3) 187 | 188 | @property 189 | def preview_cropped(self) -> vs.VideoNode: 190 | cmask = self._compute_preview_cropped(self.coord, False) 191 | 192 | if self.coord_alt: 193 | cmask_alt = self._compute_preview_cropped(self.coord_alt, True) 194 | cmask = core.std.Lut2(cmask, cmask_alt, function=lambda x, y: max(x, y)) 195 | 196 | return core.std.MaskedMerge( 197 | core.std.Lut(self.clip, function=lambda x: round(x / 2)), 198 | self.clip, cmask 199 | ) 200 | 201 | def _compute_preview_cropped(self, c: Tuple[int, int, int], alt: bool) -> vs.VideoNode: 202 | cw, ch, h = c 203 | wcrop = (self.clip.width - cw) / 2 204 | left, right = math.ceil(wcrop), math.floor(wcrop) 205 | hcrop = self.clip.height - ch - h, h 206 | if alt: 207 | hcrop = hcrop[::-1] 208 | return region_rel_mask( 209 | self.clip.std.BlankClip(format=vs.GRAY8, color=255), 210 | left, right, *hcrop 211 | ) 212 | 213 | @property 214 | def preview_cleaned(self) -> vs.VideoNode: 215 | cclip = self._cleaning(self._cropping(self.clip, self.coord, False)) 216 | 217 | if self.coord_alt: 218 | cclip_alt = self._cleaning(self._cropping(self.clip, self.coord_alt, True)) 219 | else: 220 | return cclip 221 | 222 | try: 223 | return core.std.StackVertical([cclip_alt, cclip]) 224 | except vs.Error: 225 | if cclip.width > cclip_alt.width: 226 | cclip_alt = core.std.AddBorders(cclip_alt, right=cclip.width - cclip_alt.width) 227 | else: 228 | cclip = core.std.AddBorders(cclip_alt, right=cclip_alt.width - cclip.width) 229 | return core.std.StackVertical([cclip_alt, cclip]) 230 | -------------------------------------------------------------------------------- /vardefunc/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ichunjo/vardefunc/d8c695e052acd163181fc77da4622e9c53956383/vardefunc/py.typed -------------------------------------------------------------------------------- /vardefunc/scale.py: -------------------------------------------------------------------------------- 1 | """(Up/De)scaling functions""" 2 | from __future__ import annotations 3 | 4 | from abc import abstractmethod 5 | from functools import cached_property, wraps 6 | from math import floor 7 | from typing import TYPE_CHECKING, Any, Callable, NamedTuple, TypeAlias, Union, cast 8 | 9 | from vsaa import Nnedi3 10 | from vsexprtools import ExprOp, norm_expr 11 | from vskernels import Bilinear, BorderHandling, Hermite, Kernel, KernelT, Scaler, ScalerT 12 | from vskernels.types import LeftShift, TopShift 13 | from vsmasktools import FDoG, FDoGTCanny, KirschTCanny, Morpho, XxpandMode, region_abs_mask, region_rel_mask 14 | from vsmasktools.utils import _get_region_expr 15 | from vsrgtools import RemoveGrainMode, bilateral, box_blur, gauss_blur, removegrain 16 | from vsscale import PlaceboShader 17 | from vstools import ( 18 | ChromaLocation, ColorRange, ConstantFormatVideoNode, DitherType, FieldBased, FieldBasedT, 19 | GenericVSFunction, KwargsT, VSFunction, check_variable, core, depth, expect_bits, 20 | get_peak_value, get_w, get_y, initialize_clip, iterate, join, mod2, scale_value, split, vs 21 | ) 22 | 23 | from .types import Count 24 | from .vsjet_proxy import is_preview 25 | 26 | __all__ = [ 27 | 'BaseRescale', 'Rescale', 'RescaleFrac', 28 | 'RescaleCropBase', 'RescaleCropRel', 'RescaleCropAbs', 29 | 'RescaleInter', 'MixedRescale' 30 | ] 31 | 32 | 33 | 34 | 35 | RescaleFunc = Callable[["BaseRescale", vs.VideoNode], vs.VideoNode] 36 | 37 | class BaseRescale: 38 | """ 39 | A rewritten DescaleTarget class 40 | """ 41 | clip: ConstantFormatVideoNode 42 | clipy: ConstantFormatVideoNode 43 | 44 | width: int 45 | height: int 46 | src_top: TopShift 47 | src_left: LeftShift 48 | src_width: float 49 | src_height: float 50 | 51 | kernel: Kernel 52 | upscaler: Scaler 53 | downscaler: Scaler 54 | 55 | border_handling: BorderHandling 56 | 57 | if TYPE_CHECKING: 58 | class VideoNodeWithChromaEmbed(vs.VideoNode): 59 | def __init__(*args: Any, **kwargs: Any) -> None: 60 | ... 61 | 62 | def with_chroma(self, chroma: vs.VideoNode | None = None) -> vs.VideoNode: 63 | ... 64 | else: 65 | class VideoNodeWithChromaEmbed: 66 | def __init__(self, luma: vs.VideoNode, chroma: vs.VideoNode | None) -> None: 67 | self.luma = luma 68 | self.chroma = chroma 69 | 70 | def __getattr__(self, __name: str) -> Any: 71 | return getattr(self.luma, __name) 72 | 73 | def __add__(self, other): 74 | return self.luma.__add__(other) 75 | 76 | def __radd__(self, other): 77 | return self.luma.__radd__(other) 78 | 79 | def __mul__(self, other: int): 80 | return self.luma.__mul__(other) 81 | 82 | def __rmul__(self, other: int): 83 | return self.luma.__rmul__(other) 84 | 85 | def __getitem__(self, index: int | slice, /): 86 | return self.luma.__getitem__(index) 87 | 88 | def __len__(self) -> int: 89 | return self.luma.__len__() 90 | 91 | def with_chroma(self, chroma: vs.VideoNode | None = None) -> vs.VideoNode: 92 | if not (chroma or (self.chroma and self.chroma.format.color_family == vs.YUV)): 93 | return self.luma 94 | 95 | chroma = initialize_clip(chroma or self.chroma, -1) 96 | withchroma = join(self.luma, chroma) 97 | withchroma = core.akarin.PropExpr( 98 | [withchroma, chroma], 99 | lambda: dict(_ChromaLocation='y._ChromaLocation', _SARNum='y._SARNum', _SARDen='y._SARDen') 100 | ) 101 | return withchroma 102 | 103 | def __init__( 104 | self, 105 | clip: vs.VideoNode, 106 | /, 107 | height: int, 108 | kernel: KernelT, 109 | upscaler: ScalerT = Nnedi3, 110 | downscaler: ScalerT = Hermite(linear=True), 111 | width: int | None = None, 112 | shift: tuple[TopShift, LeftShift] = (0, 0), 113 | border_handling: BorderHandling = BorderHandling.MIRROR 114 | ) -> None: 115 | """ 116 | Initialize the rescaling process. 117 | 118 | :param clip: Clip to be rescaled. 119 | :param height: Height to be descaled to. 120 | :param kernel: Kernel used for descaling. 121 | :param upscaler: Scaler that supports doubling, defaults to Nnedi3 122 | :param downscaler: Scaler used for downscaling the upscaled clip back to input res, defaults to Hermite(linear=True) 123 | :param width: Width to be descaled to, defaults to None 124 | :param shift: Shifts to apply during descale and upscale, defaults to (0, 0) 125 | :param border_handling: Adjust the way the clip is padded internally during the scaling process. 126 | Accepted values are: 127 | 0: Assume the image was resized with mirror padding. 128 | 1: Assume the image was resized with zero padding. 129 | 2: Assume the image was resized with extend padding, where the outermost row was extended infinitely far. 130 | Defaults to 0 131 | """ 132 | assert check_variable(clip, self.__class__) 133 | self.clip = clip 134 | self.clipy = get_y(clip) #type: ignore[assignment] 135 | 136 | self.height = height 137 | if not width: 138 | self.width = get_w(self.height, self.clipy) 139 | else: 140 | self.width = width 141 | self.src_top, self.src_left = [float(x) for x in shift][:2] 142 | self.src_width = float(self.width) 143 | self.src_height = float(self.height) 144 | 145 | self.kernel = Kernel.ensure_obj(kernel) 146 | self.upscaler = Scaler.ensure_obj(upscaler) 147 | 148 | self.downscaler = Scaler.ensure_obj(downscaler) 149 | 150 | self.border_handling = self.kernel.kwargs.pop("border_handling", border_handling) 151 | 152 | @cached_property 153 | def descale(self) -> vs.VideoNode: 154 | """Returns the descaled clip""" 155 | return self._generate_descale(self.clipy) 156 | 157 | @cached_property 158 | def rescale(self) -> vs.VideoNode: 159 | """Returns the descaled clip upscaled back with the specified kernel""" 160 | return self._generate_rescale(self.descale) 161 | 162 | @cached_property 163 | def doubled(self) -> vs.VideoNode: 164 | """Returns the doubled clip""" 165 | return self._generate_doubled(self.descale) 166 | 167 | @cached_property 168 | def upscale(self) -> VideoNodeWithChromaEmbed: 169 | """Returns the upscaled clip""" 170 | return self.VideoNodeWithChromaEmbed(self._generate_upscale(self.doubled), self.clip) 171 | 172 | def _trydelattr(self, attr: str) -> None: 173 | try: 174 | delattr(self, attr) 175 | except AttributeError: 176 | pass 177 | 178 | def __delattr__(self, __name: str) -> None: 179 | match __name: 180 | case 'descale': 181 | self._trydelattr('rescale') 182 | self._trydelattr('doubled') 183 | case 'doubled': 184 | self._trydelattr('upscale') 185 | case _: 186 | pass 187 | delattr(self, __name) 188 | 189 | def diff(self, clip: vs.VideoNode, expr: str = 'x y - abs dup 0.015 > swap 0 ?') -> vs.VideoNode: 190 | return norm_expr((depth(self.rescale, 32), depth(get_y(clip), 32)), expr).std.Crop(5, 5, 5, 5).std.PlaneStats() 191 | 192 | @staticmethod 193 | def _add_props(function: RescaleFunc) -> RescaleFunc: 194 | @wraps(function) 195 | def wrap(self: BaseRescale, clip: vs.VideoNode) -> vs.VideoNode: 196 | w, h = (f"{int(d)}" if d.is_integer() else f"{d:.2f}" for d in [self.src_width, self.src_height]) 197 | return core.std.SetFrameProp( 198 | function(self, clip), 199 | "VdfRescale" + function.__name__.split('_')[-1].capitalize() + 'From', 200 | data=f'{self.kernel.__class__.__name__} - {w} x {h}' 201 | ) 202 | return wrap 203 | 204 | # generate 205 | @_add_props 206 | def _generate_descale(self: BaseRescale, clip: vs.VideoNode) -> vs.VideoNode: 207 | return self.kernel.descale(clip, **self.scale_args._asdict(), border_handling=self.border_handling) 208 | 209 | @_add_props 210 | def _generate_rescale(self: BaseRescale, clip: vs.VideoNode) -> vs.VideoNode: 211 | return self.kernel.scale( 212 | clip, 213 | self.clip.width, self.clip.height, 214 | src_left=self.src_left, 215 | src_top=self.src_top, 216 | src_width=self.src_width - ((clip.width - self.width) if self.src_width.is_integer() else 0), 217 | src_height=self.src_height - ((clip.height - self.height) if self.src_height.is_integer() else 0), 218 | border_handling=self.border_handling 219 | ) 220 | 221 | @_add_props 222 | def _generate_doubled(self: BaseRescale, clip: vs.VideoNode) -> vs.VideoNode: 223 | return self.upscaler.multi(clip, 2) 224 | 225 | @_add_props 226 | def _generate_upscale(self: BaseRescale, clip: vs.VideoNode) -> vs.VideoNode: 227 | return self.downscaler.scale( 228 | clip, 229 | **{k: v * 2 for k, v in self.scale_args._asdict().items()} | KwargsT(width=self.clip.width, height=self.clip.height) 230 | ) 231 | 232 | class _ScaleArgs(NamedTuple): 233 | width: int 234 | height: int 235 | src_top: float 236 | src_left: float 237 | src_width: float 238 | src_height: float 239 | 240 | @property 241 | def scale_args(self) -> _ScaleArgs: 242 | """Scaling arguments""" 243 | return self._ScaleArgs(self.width, self.height, self.src_top, self.src_left, self.src_width, self.src_height) 244 | 245 | 246 | class Rescale(BaseRescale): 247 | _line_mask: vs.VideoNode | None 248 | _credit_mask: vs.VideoNode | None 249 | 250 | def __init__( 251 | self, 252 | clip: vs.VideoNode, 253 | /, 254 | height: int, 255 | kernel: KernelT, 256 | upscaler: ScalerT = Nnedi3, 257 | downscaler: ScalerT = Hermite(linear=True), 258 | width: int | None = None, 259 | shift: tuple[TopShift, LeftShift] = (0, 0), 260 | border_handling: BorderHandling = BorderHandling.MIRROR, 261 | ) -> None: 262 | """ 263 | Initialize the rescaling process. 264 | 265 | :param clip: Clip to be rescaled. 266 | :param height: Height to be descaled to. 267 | :param kernel: Kernel used for descaling. 268 | :param upscaler: Scaler that supports doubling, defaults to Nnedi3 269 | :param downscaler: Scaler used for downscaling the upscaled clip back to input res, defaults to Hermite(linear=True) 270 | :param width: Width to be descaled to, defaults to None 271 | :param shift: Shifts to apply during descale and upscale, defaults to (0, 0) 272 | :param border_handling: Adjust the way the clip is padded internally during the scaling process. 273 | Accepted values are: 274 | 0: Assume the image was resized with mirror padding. 275 | 1: Assume the image was resized with zero padding. 276 | 2: Assume the image was resized with extend padding, where the outermost row was extended infinitely far. 277 | Defaults to 0 278 | """ 279 | super().__init__(clip, height, kernel, upscaler, downscaler, width, shift, border_handling) 280 | 281 | self._line_mask = None 282 | self._credit_mask = None 283 | # self.default_line_mask() 284 | 285 | def _generate_upscale(self, clip: vs.VideoNode) -> vs.VideoNode: 286 | upscale = depth(super()._generate_upscale(clip), self.clip) 287 | if self._line_mask or self.border_handling: 288 | upscale = core.std.MaskedMerge(self.clipy, upscale, self.line_mask).std.CopyFrameProps(upscale) 289 | if self._credit_mask: 290 | upscale = core.std.MaskedMerge(upscale, self.clipy, self.credit_mask) 291 | return upscale 292 | 293 | # LINEMASK 294 | @property 295 | def line_mask(self) -> vs.VideoNode: 296 | if self._line_mask: 297 | _line_mask = self._line_mask 298 | else: 299 | _line_mask = self.clipy.std.BlankClip(color=get_peak_value(self.clipy)).std.SetFrameProps(BlankClip=1) 300 | 301 | if self.border_handling: 302 | px = (self.kernel.kernel_radius, ) * 4 303 | _line_mask = norm_expr( 304 | _line_mask, 305 | _get_region_expr(_line_mask, *px, replace=str(get_peak_value(_line_mask)) + " x") 306 | ) 307 | 308 | self._line_mask = _line_mask 309 | 310 | return self._line_mask 311 | 312 | @line_mask.setter 313 | def line_mask(self, mask: vs.VideoNode | None) -> None: 314 | if not mask: 315 | self._line_mask = None 316 | else: 317 | self._line_mask = depth(mask, self.clipy, dither_type=DitherType.NONE) 318 | 319 | @line_mask.deleter 320 | def line_mask(self) -> None: 321 | self._line_mask = None 322 | 323 | def default_line_mask(self, clip: vs.VideoNode | None = None, scaler: ScalerT = Bilinear) -> vs.VideoNode: 324 | """ 325 | Load a default Kirsch line mask in the class instance. Additionnaly, it is returned. 326 | 327 | :param clip: Reference clip, defaults to luma source clip if None. 328 | :param scaler: Scaled used for matching the source clip format, defaults to Bilinear 329 | :return: Generated mask. 330 | """ 331 | line_mask = KirschTCanny.edgemask(clip if clip else self.clipy).std.Maximum().std.Minimum() 332 | line_mask = Scaler.ensure_obj(scaler).scale(line_mask, self.clipy.width, self.clipy.height, format=self.clipy.format) 333 | self.line_mask = line_mask 334 | return self.line_mask 335 | 336 | def placebo_line_mask( 337 | self, clip: vs.VideoNode | None = None, 338 | lthrs: tuple[float, float] = (0.2, 0.2), 339 | multis: tuple[float, float] = (1.2, 0.3), 340 | scaler: ScalerT = Bilinear, 341 | **kwargs: Any 342 | ) -> vs.VideoNode: 343 | """ 344 | Load a combinaison of FDoG ridge and edge masks. Additionnaly, it is returned. 345 | 346 | :param clip: Reference clip, defaults to luma source clip if None. 347 | :param scaler: Scaled used for matching the source clip format, defaults to Bilinear 348 | :return: Generated mask. 349 | """ 350 | clip = clip if clip else self.clip 351 | clipy = get_y(clip) if clip else self.clipy 352 | scaler = Scaler.ensure_obj(scaler) 353 | 354 | hthrs = kwargs.setdefault("hthrs", (None, None)) 355 | clamps = kwargs.setdefault("clamps", (False, False)) 356 | planes = kwargs.setdefault("planes", (None, None)) 357 | 358 | edgemask = FDoGTCanny.edgemask(clip, lthrs[0], hthrs[0], multis[0], clamps[0], planes[0]).std.Maximum().std.Minimum() 359 | edgemask = ColorRange.FULL.apply(edgemask) 360 | edgemask = ExprOp.ADD.combine( 361 | scaler.scale(c, edgemask.width, edgemask.height, format=vs.GRAYS) for c in split(edgemask) 362 | ) 363 | 364 | ridgemask = FDoG.ridgemask(depth(clipy, 32), lthrs[1], hthrs[1], multis[1], clamps[1], planes[1]).std.Maximum().std.Minimum() 365 | 366 | mask = core.akarin.Expr([edgemask, ridgemask], 'x y 0 max + 0 1 clamp') 367 | mask = scaler.scale(mask, self.clipy.width, self.clipy.height, format=self.clipy.format) 368 | 369 | self.line_mask = box_blur(mask) 370 | return self.line_mask 371 | 372 | def vodes_line_mask( 373 | self, 374 | clip: vs.VideoNode | None = None, scaler: ScalerT = Bilinear, 375 | lthr: float | None = None, hthr: float | None = None 376 | ) -> vs.VideoNode: 377 | """ 378 | Load DescaleTarget default mask 379 | 380 | :param clip: Reference clip, defaults to luma source clip if None. 381 | :param scaler: Scaled used for matching the source clip format, defaults to Bilinear 382 | :param lthr: Low threshold 383 | :param hthr: High threshold 384 | :return: Generated mask. 385 | """ 386 | scaler = Scaler.ensure_obj(scaler) 387 | mask = KirschTCanny.edgemask( 388 | get_y(clip) if clip else self.clipy, 389 | scale_value(80, 8, 32) if not lthr else lthr, 390 | scale_value(150, 8, 32) if not hthr else hthr 391 | ) 392 | self.line_mask = scaler.scale(mask, self.clipy.width, self.clipy.height, format=self.clipy.format) 393 | return self.line_mask 394 | 395 | # CREDITMASK 396 | @property 397 | def credit_mask(self) -> vs.VideoNode: 398 | if self._credit_mask: 399 | return self._credit_mask 400 | self.credit_mask = self.clipy.std.BlankClip().std.SetFrameProps(BlankClip=1) 401 | return self.credit_mask 402 | 403 | @credit_mask.setter 404 | def credit_mask(self, mask: vs.VideoNode | None) -> None: 405 | if not mask: 406 | self._credit_mask = None 407 | else: 408 | self._credit_mask = depth(mask, self.clipy, dither_type=DitherType.NONE) 409 | 410 | @credit_mask.deleter 411 | def credit_mask(self) -> None: 412 | self._credit_mask = None 413 | 414 | def default_credit_mask( 415 | self, rescale: vs.VideoNode | None = None, src: vs.VideoNode | None = None, 416 | thr: float = 0.216, blur: float | KwargsT | None = None, 417 | prefilter: int | KwargsT | bool | VSFunction = False, 418 | postfilter: int | tuple[Count, RemoveGrainMode] | list[tuple[Count, RemoveGrainMode]] | VSFunction = 2, 419 | ampl_expr: str | None = None, 420 | expand: int = 2 421 | ) -> vs.VideoNode: 422 | """ 423 | Load a credit mask based on vsmasktools.credit_mask and vsmasktools.diff_rescale 424 | 425 | :param rescale: Rescaled clip, defaults to rescaled instance clip 426 | :param src: Source clip, defaults to source instance clip 427 | :param thr: Threshold of the amplification expr, defaults to 0.216 428 | :param blur: Sigma of the gaussian blur applied before prefilter, defaults to None 429 | :param prefilter: Filter applied before extracting the difference between rescale and src 430 | int -> equivalent of number of taps used in the bilateral call applied to clips 431 | True -> 5 taps 432 | KwargsT -> Arguments passed to the bilateral function 433 | :param postfilter: Filter applied to the difference clip. Default is RemoveGrainMode.MINMAX_AROUND2 applied twice. 434 | :param ampl_expr: Amplification expression. 435 | :param expand: Additional expand radius applied to the mask, defaults to 2 436 | :return: Generated mask 437 | """ 438 | if not src: 439 | src = self.clip 440 | if not rescale: 441 | rescale = self.rescale 442 | 443 | src, rescale = get_y(src), get_y(rescale) 444 | 445 | if blur: 446 | if isinstance(blur, dict): 447 | src, rescale = gauss_blur(src, **blur), gauss_blur(rescale, **blur) 448 | else: 449 | src, rescale = gauss_blur(src, blur), gauss_blur(rescale, blur) 450 | 451 | if prefilter: 452 | if callable(prefilter): 453 | src, rescale = prefilter(src), prefilter(rescale) 454 | else: 455 | if isinstance(prefilter, int): 456 | sigma = 5 if prefilter is True else prefilter 457 | kwargs = KwargsT(sigmaS=((sigma ** 2 - 1) / 12) ** 0.5, sigmaR=sigma / 10) 458 | else: 459 | kwargs = prefilter 460 | 461 | src, rescale = bilateral(src, **kwargs), bilateral(rescale, **kwargs) 462 | 463 | pre, bits = expect_bits(src, 32) 464 | rescale = depth(rescale, 32) 465 | 466 | diff = ExprOp.mae(src)(pre, rescale) 467 | 468 | if postfilter: 469 | if isinstance(postfilter, int): 470 | mask = iterate(diff, removegrain, postfilter, RemoveGrainMode.MINMAX_AROUND2) 471 | elif isinstance(postfilter, tuple): 472 | mask = iterate(diff, removegrain, postfilter[0], postfilter[1]) 473 | elif isinstance(postfilter, list): 474 | mask = diff 475 | for count, rgmode in postfilter: 476 | mask = iterate(mask, removegrain, count, rgmode) 477 | else: 478 | mask = postfilter(diff) 479 | 480 | mask = mask.std.Expr(ampl_expr or f'x 2 4 pow * {thr} < 0 1 ?') 481 | 482 | mask = Morpho.expand(mask, 2 + expand, mode=XxpandMode.ELLIPSE).std.Deflate() 483 | 484 | mask = ColorRange.FULL.apply(mask) 485 | 486 | self.credit_mask = depth(mask, bits, dither_type=DitherType.NONE) 487 | return self.credit_mask 488 | 489 | def vodes_credit_mask(self, rescale: vs.VideoNode | None = None, src: vs.VideoNode | None = None, thr: float = 0.04) -> vs.VideoNode: 490 | """ 491 | Load DescaleTarget default mask 492 | 493 | :param rescale: Rescaled clip, defaults to rescaled instance clip 494 | :param src: Source clip, defaults to source instance clip 495 | :param thr: Threshold of difference, defaults to 0.01 496 | :return: Generated mask. 497 | """ 498 | if not src: 499 | src = self.clip 500 | if not rescale: 501 | rescale = self.rescale 502 | credit_mask = core.akarin.Expr([depth(src, 32), depth(rescale, 32)], f'x y - abs {thr} < 0 1 ?') 503 | credit_mask = depth(credit_mask, 16, range_in=ColorRange.FULL, range_out=ColorRange.FULL, dither_type=DitherType.NONE) 504 | credit_mask = credit_mask.rgvs.RemoveGrain(6).std.Maximum().std.Maximum().std.Inflate().std.Inflate() 505 | self.credit_mask = credit_mask 506 | return self.credit_mask 507 | 508 | 509 | class RescaleFrac(Rescale): 510 | base_width: int 511 | base_height: int 512 | 513 | def __init__( 514 | self, 515 | clip: vs.VideoNode, 516 | /, 517 | height: float, 518 | kernel: KernelT, 519 | base_height: int, 520 | upscaler: ScalerT = Nnedi3, 521 | downscaler: ScalerT = Hermite(linear=True), 522 | width: float | None = None, 523 | base_width: int | None = None, 524 | shift: tuple[TopShift, LeftShift] = (0, 0), 525 | border_handling: BorderHandling = BorderHandling.MIRROR, 526 | ) -> None: 527 | """ 528 | Initialize the rescaling process. 529 | 530 | :param clip: Clip to be rescaled. 531 | :param height: Float Height to be descaled to. 532 | :param kernel: Kernel used for descaling. 533 | :param base_height: Integer height at which the clip will be contained 534 | :param upscaler: Scaler that supports doubling, defaults to Nnedi3 535 | :param downscaler: Scaler used for downscaling the upscaled clip back to input res, defaults to Hermite(linear=True) 536 | :param width: Float width to be descaled to, defaults to None 537 | :param base_width: Integer width at which the clip will be contained, defaults to None 538 | :param shift: Shifts to apply during descale and upscale, defaults to (0, 0) 539 | :param border_handling: Adjust the way the clip is padded internally during the scaling process. 540 | Accepted values are: 541 | 0: Assume the image was resized with mirror padding. 542 | 1: Assume the image was resized with zero padding. 543 | 2: Assume the image was resized with extend padding, where the outermost row was extended infinitely far. 544 | Defaults to 0 545 | """ 546 | self.base_height = base_height 547 | if not base_width: 548 | self.base_width = get_w(self.base_height, clip) 549 | else: 550 | self.base_width = base_width 551 | 552 | if not width: 553 | width = height * clip.width / clip.height 554 | 555 | cropped_width = self.base_width - 2 * floor((self.base_width - width) / 2) 556 | cropped_height = self.base_height - 2 * floor((self.base_height - height) / 2) 557 | self.width = cropped_width 558 | self.height = cropped_height 559 | self.src_top = (cropped_height - height) / 2 + shift[0] 560 | self.src_left = (cropped_width - width) / 2 + shift[1] 561 | 562 | super().__init__( 563 | clip, self.height, kernel, upscaler, downscaler, self.width, 564 | (self.src_top, self.src_left), border_handling 565 | ) 566 | 567 | self.src_width = width 568 | self.src_height = height 569 | 570 | def default_credit_mask( 571 | self, rescale: vs.VideoNode | None = None, src: vs.VideoNode | None = None, 572 | thr: float = 0.216, blur: float | KwargsT | None = None, 573 | prefilter: int | KwargsT | bool | VSFunction = False, 574 | postfilter: int | tuple[Count, RemoveGrainMode] | list[tuple[Count, RemoveGrainMode]] | VSFunction = 2, 575 | ampl_expr: str | None = None, 576 | expand: int = 2, 577 | use_base_height: bool = False 578 | ) -> vs.VideoNode: 579 | """ 580 | Load a credit mask based on vsmasktools.credit_mask and vsmasktools.diff_rescale 581 | 582 | :param rescale: Rescaled clip, defaults to rescaled instance clip 583 | :param src: Source clip, defaults to source instance clip 584 | :param thr: Threshold of the amplification expr, defaults to 0.216 585 | :param blur: Sigma of the gaussian blur applied before prefilter, defaults to None 586 | :param prefilter: Filter applied before extracting the difference between rescale and src 587 | int -> equivalent of number taps used in the bilateral call applied to clips 588 | True -> 5 taps 589 | KwargsT -> Arguments passed to the bilateral function 590 | :param postfilter: Filter applied to the difference clip. Default is RemoveGrainMode.MINMAX_AROUND2 applied twice. 591 | :param ampl_expr: Amplification expression. 592 | :param expand: Additional expand radius applied to the mask, defaults to 2 593 | :param use_base_height: Will use a rescaled clip based on base_height instead of height 594 | :return: Generated mask 595 | """ 596 | if use_base_height: 597 | rescale = Rescale( 598 | self.clipy, self.base_height, self.kernel, 599 | width=self.base_width, border_handling=self.border_handling 600 | ).rescale 601 | 602 | return super().default_credit_mask(rescale, src, thr, blur, prefilter, postfilter, ampl_expr, expand) 603 | 604 | 605 | LeftCrop: TypeAlias = int 606 | RightCrop: TypeAlias = int 607 | TopCrop: TypeAlias = int 608 | BottomCrop: TypeAlias = int 609 | WidthCrop: TypeAlias = int 610 | HeightCrop: TypeAlias = int 611 | 612 | 613 | class RescaleCropBase(RescaleFrac): 614 | pre: vs.VideoNode 615 | crop: tuple[int, ...] 616 | 617 | crop_function: GenericVSFunction 618 | 619 | def __init__( 620 | self, 621 | clip: vs.VideoNode, 622 | /, 623 | height: float, 624 | kernel: KernelT, 625 | crop: tuple[int, ...] | None = None, 626 | upscaler: ScalerT = Nnedi3, 627 | downscaler: ScalerT = Hermite(linear=True), 628 | width: float | None = None, 629 | shift: tuple[TopShift, LeftShift] = (0, 0), 630 | border_handling: BorderHandling = BorderHandling.MIRROR, 631 | ) -> None: 632 | self.pre = clip 633 | self.crop = crop if crop else (0, 0, 0, 0) 634 | 635 | clip_cropped = self.crop_function(clip, *self.crop) 636 | 637 | if not width: 638 | if isinstance(height, int): 639 | width = get_w(height, get_y(clip)) 640 | else: 641 | width = height * clip.width / clip.height 642 | 643 | height = clip_cropped.height / (self.pre.height / height) 644 | width = clip_cropped.width / (self.pre.width / width) 645 | 646 | base_height = mod2(height) 647 | base_width = mod2(width) 648 | 649 | super().__init__(clip_cropped, height, kernel, base_height, upscaler, downscaler, width, base_width, shift, border_handling) 650 | 651 | def _generate_upscale(self, clip: vs.VideoNode) -> vs.VideoNode: 652 | white = get_y(self.pre).std.BlankClip(color=get_peak_value(self.pre)) 653 | 654 | upscale = super()._generate_upscale(clip) 655 | 656 | return core.std.MaskedMerge( 657 | upscale.std.AddBorders(*self._abs_to_rel()), 658 | get_y(self.pre), 659 | self.region_function(white, *self.crop).std.Invert() 660 | ) 661 | 662 | @cached_property 663 | def upscale(self) -> BaseRescale.VideoNodeWithChromaEmbed: 664 | """Returns the upscaled clip""" 665 | return self.VideoNodeWithChromaEmbed(self._generate_upscale(self.doubled), self.pre) 666 | 667 | @abstractmethod 668 | def _abs_to_rel(self) -> tuple[int, ...]: 669 | ... 670 | 671 | @abstractmethod 672 | def region_function(self, *args: Any, **kwargs: Any) -> vs.VideoNode: 673 | ... 674 | 675 | 676 | class RescaleCropRel(RescaleCropBase): 677 | crop: tuple[LeftCrop, RightCrop, TopCrop, BottomCrop] 678 | 679 | crop_function = core.lazy.std.CropRel 680 | 681 | def __init__( 682 | self, 683 | clip: vs.VideoNode, 684 | /, 685 | height: float, 686 | kernel: KernelT, 687 | crop: tuple[LeftCrop, RightCrop, TopCrop, BottomCrop], 688 | upscaler: ScalerT = Nnedi3, 689 | downscaler: ScalerT = Hermite(linear=True), 690 | width: float | None = None, 691 | shift: tuple[TopShift, LeftShift] = (0, 0), 692 | border_handling: BorderHandling = BorderHandling.MIRROR, 693 | ) -> None: 694 | super().__init__(clip, height, kernel, crop, upscaler, downscaler, width, shift, border_handling) 695 | 696 | def _abs_to_rel(self) -> tuple[int, ...]: 697 | return self.crop 698 | 699 | def region_function(self, *args: Any, **kwargs: Any) -> vs.VideoNode: 700 | return region_rel_mask(*args, **kwargs) 701 | 702 | 703 | class RescaleCropAbs(RescaleCropBase): 704 | crop: tuple[WidthCrop, HeightCrop, LeftCrop, TopCrop] 705 | 706 | crop_function = core.lazy.std.CropAbs 707 | 708 | def __init__( 709 | self, 710 | clip: vs.VideoNode, 711 | /, 712 | height: float, 713 | kernel: KernelT, 714 | crop: Union[ 715 | tuple[WidthCrop, HeightCrop], 716 | tuple[WidthCrop, HeightCrop, LeftCrop, TopCrop], 717 | ], 718 | upscaler: ScalerT = Nnedi3, 719 | downscaler: ScalerT = Hermite(linear=True), 720 | width: float | None = None, 721 | shift: tuple[TopShift, LeftShift] = (0, 0), 722 | border_handling: BorderHandling = BorderHandling.MIRROR, 723 | ) -> None: 724 | 725 | ncrop = crop + (0, ) * (4 - len(crop)) 726 | 727 | super().__init__(clip, height, kernel, ncrop, upscaler, downscaler, width, shift, border_handling) 728 | 729 | def _abs_to_rel(self) -> tuple[int, ...]: 730 | return ( 731 | self.crop[2], 732 | self.pre.width - self.crop[0] - self.crop[2], 733 | self.crop[3], 734 | self.pre.height - self.crop[1] - self.crop[3] 735 | ) 736 | 737 | def region_function(self, *args: Any, **kwargs: Any) -> vs.VideoNode: 738 | return region_abs_mask(*args, **kwargs) 739 | 740 | 741 | RescaleInterFunc = Callable[["RescaleInter", vs.VideoNode], vs.VideoNode] 742 | 743 | 744 | class RescaleInter(Rescale): 745 | field_based: FieldBased 746 | 747 | def __init__( 748 | self, 749 | clip: vs.VideoNode, 750 | /, 751 | height: int, 752 | kernel: KernelT, 753 | upscaler: ScalerT = Nnedi3, 754 | downscaler: ScalerT = Hermite(linear=True), 755 | width: int | None = None, 756 | shift: tuple[TopShift, LeftShift] = (0, 0), 757 | field_based: FieldBasedT | None = None, 758 | border_handling: BorderHandling = BorderHandling.MIRROR, 759 | ) -> None: 760 | self.field_based = FieldBased.from_param(field_based) or FieldBased.from_video(clip) 761 | super().__init__(clip, height, kernel, upscaler, downscaler, width, shift, border_handling) 762 | 763 | @staticmethod 764 | def _apply_field_based(function: RescaleInterFunc) -> RescaleInterFunc: 765 | @wraps(function) 766 | def wrap(self: RescaleInter, clip: vs.VideoNode) -> vs.VideoNode: 767 | clip = self.field_based.apply(clip) 768 | clip = function(self, clip) 769 | return FieldBased.PROGRESSIVE.apply(clip) 770 | return wrap 771 | 772 | @_apply_field_based 773 | def _generate_descale(self: RescaleInter, clip: vs.VideoNode) -> vs.VideoNode: 774 | return super()._generate_descale(clip) 775 | 776 | @_apply_field_based 777 | def _generate_rescale(self: RescaleInter, clip: vs.VideoNode) -> vs.VideoNode: 778 | return super()._generate_rescale(clip) 779 | 780 | @staticmethod 781 | def crossconv_shift_calc_irregular(clip: vs.VideoNode, native_height: int) -> float: 782 | return 0.25 / (clip.height / native_height) 783 | 784 | 785 | # class RescaleFracInter(RescaleInter, RescaleFrac): 786 | # ... 787 | 788 | 789 | class MixedRescale: 790 | upscaled: vs.VideoNode 791 | 792 | def __init__(self, src: vs.VideoNode, *rescales: Rescale) -> None: 793 | prop_srcs = [rs.diff(src) for rs in rescales] 794 | rescales_idx = tuple(range(len(rescales))) 795 | 796 | blank = core.std.BlankClip(None, 1, 1, vs.GRAY8, src.num_frames, keep=True) 797 | 798 | map_prop_srcs = [blank.std.CopyFrameProps(prop_src).akarin.Expr("x.PlaneStatsAverage", vs.GRAYS) for prop_src in prop_srcs] 799 | 800 | base_frame = blank.get_frame(0) 801 | 802 | class IdxFrame(NamedTuple): 803 | idx: int 804 | frame: vs.VideoFrame 805 | 806 | idx_frames = list[IdxFrame]() 807 | 808 | for idx in rescales_idx: 809 | fcurr = base_frame.copy() 810 | 811 | fcurr[0][0, 0] = idx 812 | 813 | idx_frames.append(IdxFrame(idx, fcurr)) 814 | 815 | def _select(n: int, f: list[vs.VideoFrame]) -> vs.VideoFrame: 816 | return min(idx_frames, key=lambda idx_frame: f[idx_frame.idx][0][0, 0]).frame 817 | 818 | select_clip = blank.std.ModifyFrame(map_prop_srcs, _select) 819 | 820 | def _selector(clips: list[vs.VideoNode]) -> vs.VideoNode: 821 | base = next(filter(None, clips), None) 822 | 823 | if base is None: 824 | raise ValueError("Requested clip was None") 825 | 826 | base = base.std.BlankClip(keep=True) 827 | clips = [c or base for c in clips] 828 | 829 | def _eval(n: int, f: vs.VideoFrame) -> vs.VideoNode: 830 | return clips[cast(int, f[0][0, 0])] 831 | 832 | return core.std.FrameEval(base, _eval, select_clip) 833 | 834 | self.upscaled = _selector([rs.upscale.with_chroma() for rs in rescales]) 835 | -------------------------------------------------------------------------------- /vardefunc/types.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | __all__ = ['DuplicateFrame'] 4 | 5 | from fractions import Fraction 6 | from os import PathLike 7 | from typing import Any, Callable, Dict, List, Protocol, Sequence, Tuple, TypeAlias, TypeVar, Union 8 | 9 | from numpy import array as np_array 10 | from numpy import c_, int8, int16, int32, uint8, uint16, uint32 11 | from numpy.typing import NDArray 12 | from pytimeconv import Convert 13 | from vapoursynth import VideoNode 14 | from vstools import vs 15 | 16 | Range: TypeAlias = tuple[int, int] 17 | RangeN: TypeAlias = tuple[int | None, int | None] 18 | Trim: TypeAlias = RangeN 19 | 20 | Count: TypeAlias = int 21 | 22 | 23 | # Some outputs 24 | Output = Union[ 25 | VideoNode, 26 | List[VideoNode], 27 | Tuple[int, VideoNode], 28 | Tuple[int, List[VideoNode]] 29 | ] 30 | # Operator Input 31 | OpInput = Union[ 32 | VideoNode, 33 | List[VideoNode], 34 | Tuple[VideoNode, ...], 35 | Tuple[List[VideoNode], ...], 36 | Dict[str, VideoNode], 37 | Dict[str, List[VideoNode]] 38 | ] 39 | # Function Debug 40 | F_OpInput = TypeVar('F_OpInput', bound=Callable[..., OpInput]) 41 | # Any Numpy integrer 42 | AnyInt = Union[int8, int16, int32, uint8, uint16, uint32] 43 | 44 | AnyPath = Union[PathLike[str], str] 45 | 46 | 47 | class VNumpy: 48 | @staticmethod 49 | def array(obj: Union[NDArray[AnyInt], Sequence[Any]], **kwargs: Any) -> NDArray[AnyInt]: 50 | return np_array(obj, **kwargs) 51 | 52 | @classmethod 53 | def zip_arrays(cls, *arrays: NDArray[AnyInt]) -> NDArray[AnyInt]: 54 | return c_[*arrays] # type: ignore[no-any-return] 55 | 56 | 57 | class DuplicateFrame(int): 58 | """Class depicting a duplicate frame""" 59 | dup: int 60 | 61 | def __new__(cls, x: int, /, dup: int = 1) -> DuplicateFrame: 62 | df = super().__new__(cls, x) 63 | df.dup = dup 64 | return df 65 | 66 | def to_samples(self, ref_fps: Fraction, sample_rate: int) -> DuplicateFrame: 67 | return DuplicateFrame(Convert.f2samples(int(self), ref_fps, sample_rate), dup=self.dup) 68 | 69 | def __repr__(self) -> str: 70 | return f'' 71 | 72 | def __str__(self) -> str: 73 | return f'{super().__str__()} * {self.dup}' 74 | 75 | def __add__(self, x: int) -> DuplicateFrame: 76 | return DuplicateFrame(self, dup=self.dup + x) 77 | 78 | def __sub__(self, x: int) -> DuplicateFrame: 79 | return DuplicateFrame(self, dup=self.dup - x) 80 | 81 | def __mul__(self, x: int) -> DuplicateFrame: 82 | return DuplicateFrame(self, dup=self.dup * x) 83 | 84 | def __floordiv__(self, x: int) -> DuplicateFrame: 85 | return DuplicateFrame(self, dup=self.dup // x) 86 | 87 | 88 | _VideoFrameT_contra = TypeVar("_VideoFrameT_contra", vs.VideoFrame, list[vs.VideoFrame], contravariant=True) 89 | 90 | 91 | class RangesCallBack(Protocol): 92 | def __call__(self, n: int) -> bool: 93 | ... 94 | 95 | class RangesCallBackF(Protocol[_VideoFrameT_contra]): 96 | def __call__(self, f: _VideoFrameT_contra) -> bool: 97 | ... 98 | 99 | class RangesCallBackNF(Protocol[_VideoFrameT_contra]): 100 | def __call__(self, n: int, f: _VideoFrameT_contra) -> bool: 101 | ... 102 | 103 | RangesCallBackT = Union[ 104 | RangesCallBack, 105 | RangesCallBackF[vs.VideoFrame], 106 | RangesCallBackNF[vs.VideoFrame], 107 | RangesCallBackF[list[vs.VideoFrame]], 108 | RangesCallBackNF[list[vs.VideoFrame]], 109 | ] 110 | -------------------------------------------------------------------------------- /vardefunc/util.py: -------------------------------------------------------------------------------- 1 | """Helper functions for the main functions in this module""" 2 | from __future__ import annotations 3 | 4 | __all__ = [ 5 | 'select_frames', 'normalise_ranges', 'ranges_to_indices', 6 | 'adjust_clip_frames', 'adjust_audio_frames', 7 | 'to_incl_incl', 8 | 'to_incl_excl', 9 | 'MutableVideoNode' 10 | ] 11 | 12 | import math 13 | import warnings 14 | 15 | from fractions import Fraction 16 | from functools import cached_property, partial 17 | from itertools import groupby 18 | from types import NoneType 19 | from typing import Any, Callable, Iterable, MutableSequence, Optional, Self, Sequence, TypeGuard, cast, overload 20 | 21 | import numpy as np 22 | import vapoursynth as vs 23 | 24 | from pytimeconv import Convert 25 | from vstools import ClipsCache, FrameRangeN, FrameRangesN 26 | 27 | from .types import AnyInt 28 | from .types import DuplicateFrame as DF 29 | from .types import NDArray, Range, RangesCallBack, Trim 30 | from .types import VNumpy as vnp 31 | 32 | core = vs.core 33 | 34 | 35 | def select_frames( 36 | clips: vs.VideoNode | Sequence[vs.VideoNode], 37 | indices: NDArray[AnyInt] | Sequence[int] | Sequence[tuple[int, int]], 38 | *, 39 | mismatch: bool = False 40 | ) -> vs.VideoNode: 41 | """ 42 | Select frames from one or more clips at specified indices. 43 | 44 | Passing one clip will perform as frame remap just like vstools.remap_frames. 45 | Passing two or more clips will perform as a mix of remap and replace_ranges function. 46 | 47 | Original idea from EoE. 48 | 49 | :param clips: A clip or a sequence of clips to select the frames from 50 | :param indices: Indices of frames to select. 51 | Provide a sequence of indices for a single clip, or for multiple clips, 52 | a sequence of tuples in the form ``(clip_index, frame_index)`` 53 | :param mismatch: Splicing clips with different formats or dimensions is considered an error 54 | unless mismatch is true. Defaults to False. 55 | :return: The selected frames in a single clip. 56 | """ 57 | clips = clips if isinstance(clips, Sequence) else [clips] 58 | indices = vnp.array(indices) if isinstance(indices, Sequence) else indices 59 | 60 | if indices.ndim == 1: 61 | indices = vnp.zip_arrays(np.zeros(len(indices), np.uint32), indices) 62 | elif indices.ndim == 2: 63 | pass 64 | else: 65 | raise ValueError('select_frames: only 1D and 2D array is allowed!') 66 | 67 | base = ( 68 | clips[0].std.BlankClip(length=len(indices)) 69 | if not mismatch else 70 | clips[0].std.BlankClip(length=len(indices), varsize=True, varformat=True) 71 | ) 72 | 73 | def _select_func(n: int, clips: Sequence[vs.VideoNode], indices: NDArray[AnyInt]) -> vs.VideoNode: 74 | # index: NDArray[AnyInt] = indices[n] # Get the index / num_frame pair 75 | # i_clip = int(index[0]) # Get the index 76 | # num = int(index[1]) # Get the num_frame 77 | # nclip = clips[i_clip] # Select the clip to be returned 78 | # tclip = nclip[num] # Slice the clip 79 | # return tclip 80 | return clips[int(indices[n][0])][int(indices[n][1])] 81 | 82 | return core.std.FrameEval(base, partial(_select_func, clips=clips, indices=indices)) 83 | 84 | 85 | @overload 86 | def normalise_ranges( 87 | clip: vs.VideoNode, ranges: FrameRangeN | FrameRangesN | RangesCallBack, 88 | *, 89 | norm_dups: bool = True 90 | ) -> list[Range]: 91 | ... 92 | 93 | 94 | @overload 95 | def normalise_ranges( 96 | clip: vs.AudioNode, ranges: FrameRangeN | FrameRangesN | RangesCallBack, 97 | *, 98 | norm_dups: bool = True, ref_fps: Fraction | None = None 99 | ) -> list[Range]: 100 | ... 101 | 102 | 103 | @overload 104 | def normalise_ranges( 105 | clip: None, ranges: FrameRangeN | FrameRangesN, 106 | *, 107 | norm_dups: bool = True, 108 | ) -> list[tuple[int, int | None]]: 109 | ... 110 | 111 | 112 | def normalise_ranges( 113 | clip: vs.VideoNode | vs.AudioNode | None, ranges: FrameRangeN | FrameRangesN | RangesCallBack, 114 | *, 115 | norm_dups: bool = True, ref_fps: Fraction | None = None 116 | ) -> list[Range] | list[tuple[int, int | None]]: 117 | """ 118 | Normalise ranges to a list of positive ranges following python slicing syntax `(inclusive, exclusive)` 119 | 120 | :param clip: Input clip. 121 | :param ranges: Frame range list of frame ranges, or range callbacks. 122 | :param norm_dups: Normalise duplicated, defaults to True 123 | :param ref_fps: FPS reference when passing an AudioNode, defaults to None 124 | :return: Normalised ranges 125 | """ 126 | if isinstance(clip, vs.VideoNode): 127 | num_frames = clip.num_frames 128 | elif isinstance(clip, vs.AudioNode): 129 | if ref_fps is not None: 130 | num_frames = clip.num_samples 131 | else: 132 | num_frames = clip.num_frames 133 | else: 134 | num_frames = None 135 | 136 | if ranges is None: 137 | return [(0, num_frames)] 138 | 139 | def _resolve_ranges_type( 140 | rngs: int | tuple[int | None, int | None] | FrameRangesN | RangesCallBack 141 | ) -> Sequence[int | tuple[int | None, int | None] | None]: 142 | if isinstance(rngs, int): 143 | return [rngs] 144 | 145 | if isinstance(rngs, tuple) and len(rngs) == 2: 146 | if isinstance(rngs[0], int) or rngs[0] is None and isinstance(rngs[1], int) or rngs[1] is None: 147 | return [cast(tuple[int | None, int | None], rngs)] 148 | else: 149 | raise ValueError 150 | 151 | if callable(rngs): 152 | if not num_frames: 153 | raise ValueError 154 | 155 | cb_rngs = list[tuple[int, int]]() 156 | r = 0 157 | 158 | for i, j in groupby(rngs(n) for n in range(num_frames)): 159 | step = len(list(j)) 160 | if i: 161 | cb_rngs.append((r, r + step)) 162 | r += step 163 | return cb_rngs 164 | 165 | rngs = cast(FrameRangesN, rngs) 166 | 167 | return rngs 168 | 169 | ranges = _resolve_ranges_type(ranges) 170 | 171 | nranges = set[tuple[int, int | None]]() 172 | f2s = Convert.f2samples 173 | 174 | for r in ranges: 175 | 176 | if r is None: 177 | r = (None, None) 178 | 179 | if isinstance(r, tuple): 180 | start, end = r 181 | if start is None: 182 | start = 0 183 | if end is None: 184 | end = num_frames 185 | else: 186 | start = r 187 | end = r + 1 188 | 189 | if isinstance(clip, vs.AudioNode) and ref_fps is not None: 190 | if start != 0: 191 | start = f2s(start, ref_fps, clip.sample_rate) 192 | if end != num_frames and end: 193 | end = f2s(end, ref_fps, clip.sample_rate) 194 | 195 | if start < 0 and num_frames is not None: 196 | start += num_frames 197 | if end is not None and end <= 0 and num_frames is not None: 198 | end += num_frames 199 | 200 | if end is not None: 201 | if start > end: 202 | warnings.warn(f'normalise_ranges: start frame "{start}" is higher than end frame "{end}"') 203 | 204 | if num_frames is not None: 205 | if start >= num_frames or end > num_frames: 206 | warnings.warn(f'normalise_ranges: {r} out of range') 207 | 208 | if num_frames is not None: 209 | start = min(start, num_frames - 1) 210 | if end is not None: 211 | end = min(end, num_frames) 212 | 213 | nranges.add((start, end)) 214 | 215 | out = sorted(nranges) 216 | 217 | if norm_dups: 218 | nranges_d = dict(out) 219 | nranges_rev = sorted(nranges_d.items(), reverse=True) 220 | 221 | for (start1, end1), (start2, end2) in zip(nranges_rev, nranges_rev[1:]): 222 | if end1 is None or end2 is None: 223 | continue 224 | 225 | if start2 < start1 <= end2 < end1: 226 | nranges_d[start2] = max(end1, nranges_d[start1], key=lambda x: x if x is not None else math.inf) 227 | del nranges_d[start1] 228 | 229 | if start2 < start1 and end1 <= end2: 230 | del nranges_d[start1] 231 | 232 | out = list(nranges_d.items()) 233 | 234 | return out 235 | 236 | 237 | def to_incl_incl(ranges: list[Range]) -> list[Range]: 238 | return [(s, e - 1) for (s, e) in ranges] 239 | 240 | 241 | def to_incl_excl(ranges: list[Range]) -> list[Range]: 242 | return [(s, e + 1) for (s, e) in ranges] 243 | 244 | 245 | class _ranges_to_indices: 246 | def __call__( 247 | self, ref: vs.VideoNode, ranges: FrameRangeN | FrameRangesN | RangesCallBack, 248 | ref_indices: tuple[int, int] = (0, 1) 249 | ) -> NDArray[AnyInt]: 250 | return vnp.zip_arrays( 251 | np.fromiter(self.gen_indices(ref, ranges, ref_indices), np.uint32, ref.num_frames), 252 | np.arange(ref.num_frames, dtype=np.uint32) 253 | ) 254 | 255 | def gen_indices( 256 | self, ref: vs.VideoNode, ranges: FrameRangeN | FrameRangesN | RangesCallBack, 257 | ref_indices: tuple[int, int] 258 | ) -> Iterable[int]: 259 | nranges = normalise_ranges(ref, ranges) 260 | 261 | for f in range(ref.num_frames): 262 | i = ref_indices[0] 263 | for start, end in nranges: 264 | if start <= f < end: 265 | i = ref_indices[1] 266 | break 267 | yield i 268 | 269 | 270 | ranges_to_indices = _ranges_to_indices() 271 | 272 | 273 | def adjust_clip_frames(clip: vs.VideoNode, trims_or_dfs: list[Trim | DF] | Trim) -> vs.VideoNode: 274 | """Trims and/or duplicates frames""" 275 | trims_or_dfs = [trims_or_dfs] if isinstance(trims_or_dfs, tuple) else trims_or_dfs 276 | indices: list[int] = [] 277 | for trim_or_df in trims_or_dfs: 278 | if isinstance(trim_or_df, tuple): 279 | ntrim = normalise_ranges(clip, trim_or_df).pop() 280 | indices.extend(range(*ntrim)) 281 | else: 282 | df = trim_or_df 283 | indices.extend([df.numerator] * df.dup) 284 | return select_frames(clip, indices) 285 | 286 | 287 | def adjust_audio_frames(audio: vs.AudioNode, trims_or_dfs: list[Trim | DF] | Trim, *, ref_fps: Optional[Fraction] = None) -> vs.AudioNode: 288 | audios: list[vs.AudioNode] = [] 289 | trims_or_dfs = [trims_or_dfs] if isinstance(trims_or_dfs, tuple) else trims_or_dfs 290 | for trim_or_df in trims_or_dfs: 291 | if isinstance(trim_or_df, tuple): 292 | ntrim = normalise_ranges(audio, trim_or_df, ref_fps=ref_fps).pop() 293 | audios.append(audio[slice(*ntrim)]) 294 | else: 295 | df = trim_or_df 296 | if ref_fps: 297 | df = df.to_samples(ref_fps, audio.sample_rate) 298 | audios.append(audio[int(df)] * df.dup) 299 | return core.std.AudioSplice(audios) 300 | 301 | 302 | def pick_px_op( 303 | use_expr: bool, 304 | operations: tuple[str, Sequence[int] | Sequence[float] | int | float | Callable[..., Any]] 305 | ) -> Callable[..., vs.VideoNode]: 306 | """Pick either std.Lut or std.Expr""" 307 | expr, lut = operations 308 | if use_expr: 309 | func = partial(core.std.Expr, expr=expr) 310 | else: 311 | if callable(lut): 312 | func = partial(core.std.Lut, function=lut) 313 | elif isinstance(lut, Sequence): 314 | if all(isinstance(x, int) for x in lut): 315 | func = partial(core.std.Lut, lut=lut) # type: ignore 316 | elif all(isinstance(x, float) for x in lut): 317 | func = partial(core.std.Lut, lutf=lut) 318 | else: 319 | raise ValueError('pick_px_operation: operations[1] is not a valid type!') 320 | elif isinstance(lut, int): 321 | func = partial(core.std.Lut, lut=lut) 322 | elif isinstance(lut, float): 323 | func = partial(core.std.Lut, lutf=lut) 324 | else: 325 | raise ValueError('pick_px_operation: operations[1] is not a valid type!') 326 | return func 327 | 328 | 329 | class MutableVideoNode(MutableSequence[vs.VideoNode]): 330 | def __init__(self, node: vs.VideoNode | Sequence[tuple[int, vs.VideoNode]]) -> None: 331 | if isinstance(node, vs.VideoNode): 332 | self._mutable_node: list[vs.VideoNode | tuple[int, None | vs.VideoNode]] = [(x, node) for x in range(node.num_frames)] 333 | else: 334 | self._mutable_node = list(node) 335 | 336 | @overload 337 | def __getitem__(self, index: int) -> vs.VideoNode: 338 | ... 339 | 340 | @overload 341 | def __getitem__(self, index: slice) -> Self: 342 | ... 343 | 344 | def __getitem__(self, index: int | slice) -> vs.VideoNode | Self: 345 | self._normalize_inner_list() 346 | 347 | if isinstance(index, int): 348 | value = cast(tuple[int, vs.VideoNode], self._mutable_node[index]) 349 | 350 | return value[1][value[0]] 351 | 352 | values = cast(list[tuple[int, vs.VideoNode]], self._mutable_node[index]) 353 | 354 | return self.__class__(values) 355 | 356 | @overload 357 | def __setitem__(self, index: int, value: vs.VideoNode | tuple[int, None | vs.VideoNode]) -> None: 358 | ... 359 | 360 | @overload 361 | def __setitem__( 362 | self, index: slice, 363 | value: vs.VideoNode | tuple[int, None | vs.VideoNode] | Iterable[vs.VideoNode] | Iterable[tuple[int, None | vs.VideoNode]] 364 | ) -> None: 365 | ... 366 | 367 | def __setitem__( 368 | self, index: int | slice, 369 | value: vs.VideoNode | tuple[int, None | vs.VideoNode] | Iterable[vs.VideoNode] | Iterable[tuple[int, None | vs.VideoNode]] 370 | ) -> None: 371 | self._normalize_inner_list() 372 | 373 | def _no_iterable(value: Any) -> TypeGuard[vs.VideoNode | tuple[int, None | vs.VideoNode]]: 374 | return ( 375 | isinstance(value, tuple) and isinstance(value[0], int) and isinstance(value[1], (NoneType, vs.VideoNode)) 376 | ) or isinstance(value, vs.VideoNode) 377 | 378 | if isinstance(index, int): 379 | self._mutable_node[index] = cast(vs.VideoNode | tuple[int, None | vs.VideoNode], value) 380 | elif isinstance(index, slice): 381 | if _no_iterable(value): 382 | self._mutable_node[index] = [value] 383 | else: 384 | self._mutable_node[index] = cast(Iterable[Any], value) 385 | 386 | def __delitem__(self, index: int | slice) -> None: 387 | self._normalize_inner_list() 388 | del self._mutable_node[index] 389 | 390 | def __len__(self) -> int: 391 | self._normalize_inner_list() 392 | return len(self.indices) 393 | 394 | def insert(self, index: int, value: vs.VideoNode | tuple[int, vs.VideoNode | None]) -> None: 395 | self._normalize_inner_list() 396 | self._mutable_node.insert(index, value) 397 | 398 | def _normalize_inner_list(self) -> None: 399 | try: 400 | del self.all_nodes 401 | except AttributeError: 402 | pass 403 | try: 404 | del self.indices 405 | except AttributeError: 406 | pass 407 | self._mutable_node = [ 408 | (f_i, self.all_nodes[c_i]) for c_i, f_i in self.indices 409 | ] 410 | 411 | @cached_property 412 | def all_nodes(self) -> list[vs.VideoNode]: 413 | all_nodes = ClipsCache() 414 | 415 | for n in self._mutable_node: 416 | if isinstance(n, vs.VideoNode): 417 | all_nodes[n] = n 418 | else: 419 | if isinstance(nn := n[1], vs.VideoNode): 420 | all_nodes[nn] = nn 421 | try: 422 | return list(all_nodes.keys()) 423 | finally: 424 | del all_nodes 425 | 426 | @cached_property 427 | def indices(self) -> list[tuple[int, int]]: 428 | indices = list[tuple[int, int]]() 429 | 430 | for n in self._mutable_node: 431 | if isinstance(n, vs.VideoNode): 432 | indices.extend((self.all_nodes.index(n), i) for i in range(n.num_frames)) 433 | else: 434 | indices.append((0 if n[1] is None else self.all_nodes.index(n[1]), n[0])) 435 | 436 | return indices 437 | 438 | def to_node(self) -> vs.VideoNode: 439 | self._normalize_inner_list() 440 | return select_frames(self.all_nodes, self.indices) 441 | -------------------------------------------------------------------------------- /vardefunc/vsjet_proxy.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import logging 4 | 5 | from functools import lru_cache 6 | from typing import Any, Callable, Concatenate, Iterator, Literal, Sequence, SupportsFloat, cast, overload 7 | 8 | import numpy as np 9 | import vsdenoise 10 | import vsmasktools 11 | import vstools 12 | 13 | from vskernels import Catrom, KernelT 14 | from vstools import CustomStrEnum, FrameRangeN, FrameRangesN, MatrixT, copy_signature, core, set_output, vs 15 | 16 | from .types import AnyInt, NDArray, RangesCallBack, RangesCallBackF, RangesCallBackNF, RangesCallBackT 17 | from .types import VNumpy as vnp 18 | from .util import normalise_ranges, ranges_to_indices, select_frames, to_incl_incl 19 | 20 | __all__ = [ 21 | "is_preview", "set_output", "replace_ranges", "BestestSource", 22 | "BoundingBox", 23 | "DeferredMask", "HardsubASS", "HardsubLine", "HardsubLineFade", "HardsubMask", 24 | "HardsubSign", "HardsubSignFades", 25 | "replace_squaremask", "rekt_partial", 26 | "dpir", 27 | ] 28 | 29 | 30 | @lru_cache 31 | def is_preview() -> bool: 32 | try: 33 | import vspreview.api 34 | except ImportError: 35 | is_preview = False 36 | else: 37 | is_preview = vspreview.api.is_preview() 38 | return is_preview 39 | 40 | 41 | @overload 42 | def replace_ranges( 43 | clip_a: vs.VideoNode, clip_b: vs.VideoNode, 44 | ranges: FrameRangeN | FrameRangesN, 45 | /, *, 46 | exclusive: bool = True, mismatch: bool = False, 47 | ) -> vs.VideoNode: 48 | ... 49 | 50 | @overload 51 | def replace_ranges( 52 | clip_a: vs.VideoNode, clip_b: vs.VideoNode, 53 | ranges: RangesCallBack, 54 | /, *, 55 | mismatch: bool = False, 56 | ) -> vs.VideoNode: 57 | ... 58 | 59 | @overload 60 | def replace_ranges( 61 | clip_a: vs.VideoNode, clip_b: vs.VideoNode, 62 | ranges: RangesCallBackF[vs.VideoFrame] | RangesCallBackNF[vs.VideoFrame], 63 | /, *, 64 | mismatch: bool = False, 65 | prop_src: vs.VideoNode 66 | ) -> vs.VideoNode: 67 | ... 68 | 69 | @overload 70 | def replace_ranges( 71 | clip_a: vs.VideoNode, clip_b: vs.VideoNode, 72 | ranges: RangesCallBackF[list[vs.VideoFrame]] | RangesCallBackNF[list[vs.VideoFrame]], 73 | /, *, 74 | mismatch: bool = False, 75 | prop_src: list[vs.VideoNode] 76 | ) -> vs.VideoNode: 77 | ... 78 | 79 | @overload 80 | def replace_ranges( 81 | clip_a: vs.VideoNode, *clip_b: tuple[vs.VideoNode, FrameRangeN | FrameRangesN | RangesCallBack], 82 | mismatch: bool = False, 83 | ) -> vs.VideoNode: 84 | ... 85 | 86 | def replace_ranges( 87 | clip_a: vs.VideoNode, *args: Any, 88 | exclusive: bool = True, mismatch: bool = False, 89 | prop_src: vs.VideoNode | list[vs.VideoNode] | None = None 90 | ) -> vs.VideoNode: 91 | """ 92 | Replaces frames in a clip, either with pre-calculated indices or on-the-fly with a callback. 93 | Frame ranges are by default exclusive. This behaviour can be changed by setting `exclusive=False`. 94 | 95 | Examples with clips ``black`` and ``white`` of equal length: 96 | * ``replace_ranges(black, white, [(0, 1)])``: replace frames 0 with ``white`` 97 | * ``replace_ranges(black, white, [(0, 2)])``: replace frames 0 and 1 with ``white`` 98 | * ``replace_ranges(black, white, [(None, None)])``: replace the entire clip with ``white`` 99 | * ``replace_ranges(black, white, [(0, None)])``: same as previous 100 | * ``replace_ranges(black, white, [(200, None)])``: replace 200 until the end with ``white`` 101 | * ``replace_ranges(black, white, [(200, -1)])``: replace 200 until the end with ``white``, 102 | leaving 1 frame of ``black`` 103 | 104 | Optional Dependencies: 105 | * Either of the following two plugins: 106 | * `VS Julek Plugin `_ (recommended!) 107 | * `VSRemapFrames `_ 108 | 109 | :param clip_a: Original clip. 110 | :param clip_b: Replacement clip. 111 | :param ranges: Ranges to replace clip_a (original clip) with clip_b (replacement clip). 112 | Integer values in the list indicate single frames, 113 | Tuple values indicate inclusive ranges. 114 | Callbacks must return true to replace a with b. 115 | Negative integer values will be wrapped around based on clip_b's length. 116 | None values are context dependent: 117 | * None provided as sole value to ranges: no-op 118 | * Single None value in list: Last frame in clip_b 119 | * None as first value of tuple: 0 120 | * None as second value of tuple: Last frame in clip_b 121 | :param exclusive: Use exclusive ranges (Default: True). 122 | :param mismatch: Accept format or resolution mismatch between clips. 123 | 124 | :return: Clip with ranges from clip_a replaced with clip_b. 125 | """ 126 | if len(args) == 0: 127 | return clip_a 128 | 129 | if isinstance(clip_b := args[0], vs.VideoNode): 130 | ranges: FrameRangeN | FrameRangesN | RangesCallBackT | None = args[1] 131 | 132 | if exclusive and not callable(ranges): 133 | ranges = normalise_ranges(clip_b, ranges, norm_dups=True) 134 | 135 | return vstools.replace_ranges(clip_a, clip_b, ranges, exclusive, mismatch, prop_src=prop_src) 136 | 137 | if not exclusive: 138 | raise NotImplementedError 139 | 140 | rclips: tuple[tuple[vs.VideoNode, FrameRangeN | FrameRangesN | RangesCallBack], ...] = args 141 | 142 | if len(rclips) <= 10: 143 | for c, r in rclips: 144 | clip_a = replace_ranges(clip_a, c, r, mismatch=mismatch) 145 | return clip_a 146 | 147 | ref_indices = np.zeros(clip_a.num_frames, np.uint32) 148 | 149 | rrclips = [ 150 | (c, np.fromiter(ranges_to_indices.gen_indices(c, r, (0, i)), np.uint32, c.num_frames)) 151 | for (i, (c, r)) in enumerate(rclips, 1) 152 | ] 153 | 154 | clips, indices_iter = cast(tuple[Iterator[vs.VideoNode], Iterator[NDArray[AnyInt]]], zip(*rrclips)) 155 | 156 | indices = list[NDArray[AnyInt]]() 157 | 158 | for i in indices_iter: 159 | if (isize := i.size) < (rsize := ref_indices.size): 160 | i = np.pad(i, (0, rsize - isize)) 161 | elif isize > rsize: 162 | i = i[:rsize] 163 | 164 | indices.append(i) 165 | 166 | nindices = np.max([ref_indices, *indices], axis=0, out=ref_indices) 167 | 168 | return select_frames( 169 | [clip_a, *clips], 170 | vnp.zip_arrays(nindices, np.arange(clip_a.num_frames, dtype=np.uint32)), 171 | mismatch=mismatch 172 | ) 173 | 174 | 175 | try: 176 | from vssource import BestSource 177 | except ImportError: 178 | pass 179 | else: 180 | class BestestSource(BestSource): 181 | def __init__(self, *, force: bool = True, **kwargs: Any) -> None: 182 | kwargs.setdefault("showprogress", True) 183 | kwargs.setdefault("cachemode", 3) 184 | super().__init__(force=force, **kwargs) 185 | 186 | def handler_func(m_type: vs.MessageType, msg: str) -> None: 187 | if all([ 188 | m_type == vs.MESSAGE_TYPE_INFORMATION, 189 | msg.startswith(("VideoSource ", "AudioSource ")), 190 | logging.getLogger().level <= logging.WARNING, 191 | is_preview() 192 | ]): 193 | print(msg, end="\r") 194 | 195 | self._log_handle = core.add_log_handler(handler_func) 196 | 197 | def __del__(self) -> None: 198 | core.remove_log_handler(self._log_handle) 199 | 200 | 201 | class BoundingBox(vsmasktools.BoundingBox): 202 | """Same as vsmasktools.BoundingBox but follow CropAbs order""" 203 | 204 | @overload 205 | def __init__(self, width: int, height: int, offset_x: int, offset_y: int, /, *, invert: bool = False) -> None: 206 | ... 207 | 208 | @overload 209 | def __init__(self, pos: tuple[int, int] | vstools.Position, size: tuple[int, int] | vstools.Size, /, *, invert: bool = False) -> None: 210 | ... 211 | 212 | def __init__(self, *args: Any, invert: bool = False) -> None: 213 | if len(args) == 4: 214 | pos, size = (args[2], args[3]), (args[0], args[1]) 215 | elif len(args) == 2: 216 | pos, size = args[0], args[1] 217 | else: 218 | raise NotImplementedError 219 | super().__init__(pos, size, invert) 220 | 221 | 222 | class DeferredMask(vsmasktools.DeferredMask): 223 | _incl_excl_ranges: FrameRangesN 224 | 225 | @property 226 | def ranges(self) -> FrameRangesN: 227 | return [ 228 | (s, (e - 1) if e is not None else e) 229 | for (s, e) in normalise_ranges(None, self._incl_excl_ranges, norm_dups=True) 230 | ] 231 | 232 | @ranges.setter 233 | def ranges(self, value: FrameRangesN) -> None: 234 | self._incl_excl_ranges = value 235 | 236 | 237 | class HardsubMask(vsmasktools.HardsubMask, DeferredMask): ... 238 | 239 | 240 | class HardsubSignFades(vsmasktools.HardsubSignFades, HardsubMask): ... 241 | 242 | 243 | class HardsubSign(vsmasktools.HardsubSign, HardsubMask): ... 244 | 245 | 246 | class HardsubLine(vsmasktools.HardsubLine, HardsubMask): ... 247 | 248 | 249 | class HardsubLineFade(vsmasktools.HardsubLineFade, HardsubMask): ... 250 | 251 | 252 | class HardsubASS(vsmasktools.HardsubASS, HardsubMask): ... 253 | 254 | 255 | @copy_signature(vsmasktools.replace_squaremask) 256 | def replace_squaremask(*args: Any, **kwargs: Any) -> Any: 257 | argsl = list(args) 258 | argsl[3] = to_incl_incl(normalise_ranges( 259 | kwargs.get("clipa", argsl[0]), kwargs.pop("ranges", argsl[3]), norm_dups=True 260 | )) 261 | 262 | return vsmasktools.replace_squaremask(*argsl, **kwargs) 263 | 264 | 265 | def rekt_partial( 266 | clip: vs.VideoNode, left: int = 0, right: int = 0, top: int = 0, bottom: int = 0, 267 | func: Callable[Concatenate[vs.VideoNode, vstools.P], vs.VideoNode] = lambda clip, *args, **kwargs: clip, 268 | *args: vstools.P.args, **kwargs: vstools.P.kwargs 269 | ) -> vs.VideoNode: 270 | """Same as vsmasktools.rekt_partial but follow CropRel order""" 271 | return vsmasktools.rekt_partial(clip, left, top, right, bottom, func, *args, **kwargs) 272 | 273 | 274 | StrengthT = SupportsFloat | vs.VideoNode | None 275 | 276 | 277 | class _dpir(CustomStrEnum): 278 | DEBLOCK: _dpir = 'deblock' # type: ignore 279 | DENOISE: _dpir = 'denoise' # type: ignore 280 | 281 | def __call__( 282 | self, clip: vs.VideoNode, strength: StrengthT | tuple[StrengthT, StrengthT] = 10, 283 | matrix: MatrixT | None = None, cuda: bool | Literal['trt'] | None = None, i444: bool = False, 284 | tiles: int | tuple[int, int] | None = None, overlap: int | tuple[int, int] | None = 8, 285 | zones: Sequence[tuple[FrameRangeN | FrameRangesN | None, StrengthT]] | None = None, 286 | fp16: bool | None = None, num_streams: int | None = None, device_id: int = 0, kernel: KernelT = Catrom, 287 | **kwargs: Any 288 | ) -> vs.VideoNode: 289 | if zones: 290 | zones = [(to_incl_incl(normalise_ranges(clip, r, norm_dups=True)), stre) for r, stre in zones] 291 | 292 | return vsdenoise.deblock._dpir(self.value)( 293 | clip, strength, matrix, cuda, i444, tiles, overlap, 294 | zones, fp16, num_streams, device_id, kernel, **kwargs # type: ignore 295 | ) 296 | 297 | 298 | dpir = _dpir.DEBLOCK 299 | --------------------------------------------------------------------------------