├── .github
└── workflows
│ └── publish.yml
├── .gitignore
├── .vscode
└── settings.json
├── LICENSE
├── MANIFEST.in
├── README.md
├── pyproject.toml
└── vodesfunc
├── __init__.py
├── aa.py
├── denoise.py
├── misc.py
├── noise.py
├── rescale.py
├── rescale_ext
├── __init__.py
├── base.py
├── fieldbased_rescale.py
├── mixed_rescale.py
├── regular_rescale.py
└── scaling_args.py
├── scale.py
├── spikefinder.py
└── util.py
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish releases when new tag
2 | on:
3 | push:
4 | tags:
5 | - v[0-9]+**
6 |
7 | jobs:
8 | package_build:
9 | name: Build and push to PyPI
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v4
13 |
14 | - name: Prep Python
15 | uses: actions/setup-python@v5
16 | with:
17 | python-version: "3.11"
18 |
19 | - name: Build
20 | run: pipx run build
21 |
22 | - name: Publish to PyPI
23 | uses: pypa/gh-action-pypi-publish@release/v1
24 | with:
25 | user: __token__
26 | password: ${{ secrets.AUTH_TOKEN }}
27 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | .mypy-cache/
3 | *egg-info/
4 | build/
5 | test/
6 | test.py
7 | .vsjet/
8 | .venv/
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "[python]": {
3 | "editor.formatOnType": false,
4 | "editor.formatOnSave": true,
5 | "editor.defaultFormatter": "charliermarsh.ruff"
6 | },
7 | "ruff.lint.enable": true,
8 | "ruff.lint.args": ["--config", "./pyproject.toml"],
9 | "ruff.format.args": ["--config", "./pyproject.toml"]
10 | }
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Mozilla Public License Version 2.0
2 | ==================================
3 |
4 | 1. Definitions
5 | --------------
6 |
7 | 1.1. "Contributor"
8 | means each individual or legal entity that creates, contributes to
9 | the creation of, or owns Covered Software.
10 |
11 | 1.2. "Contributor Version"
12 | means the combination of the Contributions of others (if any) used
13 | by a Contributor and that particular Contributor's Contribution.
14 |
15 | 1.3. "Contribution"
16 | means Covered Software of a particular Contributor.
17 |
18 | 1.4. "Covered Software"
19 | means Source Code Form to which the initial Contributor has attached
20 | the notice in Exhibit A, the Executable Form of such Source Code
21 | Form, and Modifications of such Source Code Form, in each case
22 | including portions thereof.
23 |
24 | 1.5. "Incompatible With Secondary Licenses"
25 | means
26 |
27 | (a) that the initial Contributor has attached the notice described
28 | in Exhibit B to the Covered Software; or
29 |
30 | (b) that the Covered Software was made available under the terms of
31 | version 1.1 or earlier of the License, but not also under the
32 | terms of a Secondary License.
33 |
34 | 1.6. "Executable Form"
35 | means any form of the work other than Source Code Form.
36 |
37 | 1.7. "Larger Work"
38 | means a work that combines Covered Software with other material, in
39 | a separate file or files, that is not Covered Software.
40 |
41 | 1.8. "License"
42 | means this document.
43 |
44 | 1.9. "Licensable"
45 | means having the right to grant, to the maximum extent possible,
46 | whether at the time of the initial grant or subsequently, any and
47 | all of the rights conveyed by this License.
48 |
49 | 1.10. "Modifications"
50 | means any of the following:
51 |
52 | (a) any file in Source Code Form that results from an addition to,
53 | deletion from, or modification of the contents of Covered
54 | Software; or
55 |
56 | (b) any new file in Source Code Form that contains any Covered
57 | Software.
58 |
59 | 1.11. "Patent Claims" of a Contributor
60 | means any patent claim(s), including without limitation, method,
61 | process, and apparatus claims, in any patent Licensable by such
62 | Contributor that would be infringed, but for the grant of the
63 | License, by the making, using, selling, offering for sale, having
64 | made, import, or transfer of either its Contributions or its
65 | Contributor Version.
66 |
67 | 1.12. "Secondary License"
68 | means either the GNU General Public License, Version 2.0, the GNU
69 | Lesser General Public License, Version 2.1, the GNU Affero General
70 | Public License, Version 3.0, or any later versions of those
71 | licenses.
72 |
73 | 1.13. "Source Code Form"
74 | means the form of the work preferred for making modifications.
75 |
76 | 1.14. "You" (or "Your")
77 | means an individual or a legal entity exercising rights under this
78 | License. For legal entities, "You" includes any entity that
79 | controls, is controlled by, or is under common control with You. For
80 | purposes of this definition, "control" means (a) the power, direct
81 | or indirect, to cause the direction or management of such entity,
82 | whether by contract or otherwise, or (b) ownership of more than
83 | fifty percent (50%) of the outstanding shares or beneficial
84 | ownership of such entity.
85 |
86 | 2. License Grants and Conditions
87 | --------------------------------
88 |
89 | 2.1. Grants
90 |
91 | Each Contributor hereby grants You a world-wide, royalty-free,
92 | non-exclusive license:
93 |
94 | (a) under intellectual property rights (other than patent or trademark)
95 | Licensable by such Contributor to use, reproduce, make available,
96 | modify, display, perform, distribute, and otherwise exploit its
97 | Contributions, either on an unmodified basis, with Modifications, or
98 | as part of a Larger Work; and
99 |
100 | (b) under Patent Claims of such Contributor to make, use, sell, offer
101 | for sale, have made, import, and otherwise transfer either its
102 | Contributions or its Contributor Version.
103 |
104 | 2.2. Effective Date
105 |
106 | The licenses granted in Section 2.1 with respect to any Contribution
107 | become effective for each Contribution on the date the Contributor first
108 | distributes such Contribution.
109 |
110 | 2.3. Limitations on Grant Scope
111 |
112 | The licenses granted in this Section 2 are the only rights granted under
113 | this License. No additional rights or licenses will be implied from the
114 | distribution or licensing of Covered Software under this License.
115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a
116 | Contributor:
117 |
118 | (a) for any code that a Contributor has removed from Covered Software;
119 | or
120 |
121 | (b) for infringements caused by: (i) Your and any other third party's
122 | modifications of Covered Software, or (ii) the combination of its
123 | Contributions with other software (except as part of its Contributor
124 | Version); or
125 |
126 | (c) under Patent Claims infringed by Covered Software in the absence of
127 | its Contributions.
128 |
129 | This License does not grant any rights in the trademarks, service marks,
130 | or logos of any Contributor (except as may be necessary to comply with
131 | the notice requirements in Section 3.4).
132 |
133 | 2.4. Subsequent Licenses
134 |
135 | No Contributor makes additional grants as a result of Your choice to
136 | distribute the Covered Software under a subsequent version of this
137 | License (see Section 10.2) or under the terms of a Secondary License (if
138 | permitted under the terms of Section 3.3).
139 |
140 | 2.5. Representation
141 |
142 | Each Contributor represents that the Contributor believes its
143 | Contributions are its original creation(s) or it has sufficient rights
144 | to grant the rights to its Contributions conveyed by this License.
145 |
146 | 2.6. Fair Use
147 |
148 | This License is not intended to limit any rights You have under
149 | applicable copyright doctrines of fair use, fair dealing, or other
150 | equivalents.
151 |
152 | 2.7. Conditions
153 |
154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
155 | in Section 2.1.
156 |
157 | 3. Responsibilities
158 | -------------------
159 |
160 | 3.1. Distribution of Source Form
161 |
162 | All distribution of Covered Software in Source Code Form, including any
163 | Modifications that You create or to which You contribute, must be under
164 | the terms of this License. You must inform recipients that the Source
165 | Code Form of the Covered Software is governed by the terms of this
166 | License, and how they can obtain a copy of this License. You may not
167 | attempt to alter or restrict the recipients' rights in the Source Code
168 | Form.
169 |
170 | 3.2. Distribution of Executable Form
171 |
172 | If You distribute Covered Software in Executable Form then:
173 |
174 | (a) such Covered Software must also be made available in Source Code
175 | Form, as described in Section 3.1, and You must inform recipients of
176 | the Executable Form how they can obtain a copy of such Source Code
177 | Form by reasonable means in a timely manner, at a charge no more
178 | than the cost of distribution to the recipient; and
179 |
180 | (b) You may distribute such Executable Form under the terms of this
181 | License, or sublicense it under different terms, provided that the
182 | license for the Executable Form does not attempt to limit or alter
183 | the recipients' rights in the Source Code Form under this License.
184 |
185 | 3.3. Distribution of a Larger Work
186 |
187 | You may create and distribute a Larger Work under terms of Your choice,
188 | provided that You also comply with the requirements of this License for
189 | the Covered Software. If the Larger Work is a combination of Covered
190 | Software with a work governed by one or more Secondary Licenses, and the
191 | Covered Software is not Incompatible With Secondary Licenses, this
192 | License permits You to additionally distribute such Covered Software
193 | under the terms of such Secondary License(s), so that the recipient of
194 | the Larger Work may, at their option, further distribute the Covered
195 | Software under the terms of either this License or such Secondary
196 | License(s).
197 |
198 | 3.4. Notices
199 |
200 | You may not remove or alter the substance of any license notices
201 | (including copyright notices, patent notices, disclaimers of warranty,
202 | or limitations of liability) contained within the Source Code Form of
203 | the Covered Software, except that You may alter any license notices to
204 | the extent required to remedy known factual inaccuracies.
205 |
206 | 3.5. Application of Additional Terms
207 |
208 | You may choose to offer, and to charge a fee for, warranty, support,
209 | indemnity or liability obligations to one or more recipients of Covered
210 | Software. However, You may do so only on Your own behalf, and not on
211 | behalf of any Contributor. You must make it absolutely clear that any
212 | such warranty, support, indemnity, or liability obligation is offered by
213 | You alone, and You hereby agree to indemnify every Contributor for any
214 | liability incurred by such Contributor as a result of warranty, support,
215 | indemnity or liability terms You offer. You may include additional
216 | disclaimers of warranty and limitations of liability specific to any
217 | jurisdiction.
218 |
219 | 4. Inability to Comply Due to Statute or Regulation
220 | ---------------------------------------------------
221 |
222 | If it is impossible for You to comply with any of the terms of this
223 | License with respect to some or all of the Covered Software due to
224 | statute, judicial order, or regulation then You must: (a) comply with
225 | the terms of this License to the maximum extent possible; and (b)
226 | describe the limitations and the code they affect. Such description must
227 | be placed in a text file included with all distributions of the Covered
228 | Software under this License. Except to the extent prohibited by statute
229 | or regulation, such description must be sufficiently detailed for a
230 | recipient of ordinary skill to be able to understand it.
231 |
232 | 5. Termination
233 | --------------
234 |
235 | 5.1. The rights granted under this License will terminate automatically
236 | if You fail to comply with any of its terms. However, if You become
237 | compliant, then the rights granted under this License from a particular
238 | Contributor are reinstated (a) provisionally, unless and until such
239 | Contributor explicitly and finally terminates Your grants, and (b) on an
240 | ongoing basis, if such Contributor fails to notify You of the
241 | non-compliance by some reasonable means prior to 60 days after You have
242 | come back into compliance. Moreover, Your grants from a particular
243 | Contributor are reinstated on an ongoing basis if such Contributor
244 | notifies You of the non-compliance by some reasonable means, this is the
245 | first time You have received notice of non-compliance with this License
246 | from such Contributor, and You become compliant prior to 30 days after
247 | Your receipt of the notice.
248 |
249 | 5.2. If You initiate litigation against any entity by asserting a patent
250 | infringement claim (excluding declaratory judgment actions,
251 | counter-claims, and cross-claims) alleging that a Contributor Version
252 | directly or indirectly infringes any patent, then the rights granted to
253 | You by any and all Contributors for the Covered Software under Section
254 | 2.1 of this License shall terminate.
255 |
256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all
257 | end user license agreements (excluding distributors and resellers) which
258 | have been validly granted by You or Your distributors under this License
259 | prior to termination shall survive termination.
260 |
261 | ************************************************************************
262 | * *
263 | * 6. Disclaimer of Warranty *
264 | * ------------------------- *
265 | * *
266 | * Covered Software is provided under this License on an "as is" *
267 | * basis, without warranty of any kind, either expressed, implied, or *
268 | * statutory, including, without limitation, warranties that the *
269 | * Covered Software is free of defects, merchantable, fit for a *
270 | * particular purpose or non-infringing. The entire risk as to the *
271 | * quality and performance of the Covered Software is with You. *
272 | * Should any Covered Software prove defective in any respect, You *
273 | * (not any Contributor) assume the cost of any necessary servicing, *
274 | * repair, or correction. This disclaimer of warranty constitutes an *
275 | * essential part of this License. No use of any Covered Software is *
276 | * authorized under this License except under this disclaimer. *
277 | * *
278 | ************************************************************************
279 |
280 | ************************************************************************
281 | * *
282 | * 7. Limitation of Liability *
283 | * -------------------------- *
284 | * *
285 | * Under no circumstances and under no legal theory, whether tort *
286 | * (including negligence), contract, or otherwise, shall any *
287 | * Contributor, or anyone who distributes Covered Software as *
288 | * permitted above, be liable to You for any direct, indirect, *
289 | * special, incidental, or consequential damages of any character *
290 | * including, without limitation, damages for lost profits, loss of *
291 | * goodwill, work stoppage, computer failure or malfunction, or any *
292 | * and all other commercial damages or losses, even if such party *
293 | * shall have been informed of the possibility of such damages. This *
294 | * limitation of liability shall not apply to liability for death or *
295 | * personal injury resulting from such party's negligence to the *
296 | * extent applicable law prohibits such limitation. Some *
297 | * jurisdictions do not allow the exclusion or limitation of *
298 | * incidental or consequential damages, so this exclusion and *
299 | * limitation may not apply to You. *
300 | * *
301 | ************************************************************************
302 |
303 | 8. Litigation
304 | -------------
305 |
306 | Any litigation relating to this License may be brought only in the
307 | courts of a jurisdiction where the defendant maintains its principal
308 | place of business and such litigation shall be governed by laws of that
309 | jurisdiction, without reference to its conflict-of-law provisions.
310 | Nothing in this Section shall prevent a party's ability to bring
311 | cross-claims or counter-claims.
312 |
313 | 9. Miscellaneous
314 | ----------------
315 |
316 | This License represents the complete agreement concerning the subject
317 | matter hereof. If any provision of this License is held to be
318 | unenforceable, such provision shall be reformed only to the extent
319 | necessary to make it enforceable. Any law or regulation which provides
320 | that the language of a contract shall be construed against the drafter
321 | shall not be used to construe this License against a Contributor.
322 |
323 | 10. Versions of the License
324 | ---------------------------
325 |
326 | 10.1. New Versions
327 |
328 | Mozilla Foundation is the license steward. Except as provided in Section
329 | 10.3, no one other than the license steward has the right to modify or
330 | publish new versions of this License. Each version will be given a
331 | distinguishing version number.
332 |
333 | 10.2. Effect of New Versions
334 |
335 | You may distribute the Covered Software under the terms of the version
336 | of the License under which You originally received the Covered Software,
337 | or under the terms of any subsequent version published by the license
338 | steward.
339 |
340 | 10.3. Modified Versions
341 |
342 | If you create software not governed by this License, and you want to
343 | create a new license for such software, you may create and use a
344 | modified version of this License if you rename the license and remove
345 | any references to the name of the license steward (except to note that
346 | such modified license differs from this License).
347 |
348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary
349 | Licenses
350 |
351 | If You choose to distribute Source Code Form that is Incompatible With
352 | Secondary Licenses under the terms of this version of the License, the
353 | notice described in Exhibit B of this License must be attached.
354 |
355 | Exhibit A - Source Code Form License Notice
356 | -------------------------------------------
357 |
358 | This Source Code Form is subject to the terms of the Mozilla Public
359 | License, v. 2.0. If a copy of the MPL was not distributed with this
360 | file, You can obtain one at http://mozilla.org/MPL/2.0/.
361 |
362 | If it is not possible or desirable to put the notice in a particular
363 | file, then You may include the notice in a location (such as a LICENSE
364 | file in a relevant directory) where a recipient would be likely to look
365 | for such a notice.
366 |
367 | You may add additional accurate notices of copyright ownership.
368 |
369 | Exhibit B - "Incompatible With Secondary Licenses" Notice
370 | ---------------------------------------------------------
371 |
372 | This Source Code Form is "Incompatible With Secondary Licenses", as
373 | defined by the Mozilla Public License, v. 2.0.
374 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include requirements.txt
2 |
3 | exclude docs/*
4 | exclude test/*
5 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # vodesfunc
2 |
3 | Contains various functions for automation and other stuff I use in my scripts.
4 |
5 | Auto generated docs are at https://muxtools.vodes.pw.
6 |
7 | #### This is by no means me trying to be professional and as such, the code will not be treated like it.
8 |
9 | ## Installation
10 |
11 | `pip install vodesfunc`
for ~~mostly~~ stable versions
12 |
13 | `pip install git+https://github.com/Vodes/vodesfunc.git`
for absolutely latest
14 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 |
2 | [project]
3 | name = "vodesfunc"
4 | version = "1.8.4"
5 | description = "Vodes' random Vapoursynth Functions."
6 | authors = [{ name = "Vodes", email = "vodes.imp@gmail.com" }]
7 | dependencies = [
8 | "Vapoursynth>=66",
9 | "vsmuxtools>=0.2.1",
10 | "vsjetpack>=0.2.2,<0.5.0",
11 | "jetpytools>=1.2.3",
12 | "packaging>=23.2"
13 | ]
14 | classifiers = [
15 | "Natural Language :: English",
16 | "Intended Audience :: Developers",
17 | "Intended Audience :: End Users/Desktop",
18 | "Programming Language :: Python :: 3.11",
19 | "Operating System :: OS Independent",
20 | "Typing :: Typed",
21 | "Topic :: Multimedia :: Video",
22 | "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
23 | ]
24 | requires-python = ">=3.11"
25 | readme = "README.md"
26 | license = "MPL-2.0"
27 |
28 | [build-system]
29 | requires = ["hatchling"]
30 | build-backend = "hatchling.build"
31 |
32 | [project.urls]
33 | "Source Code" = "https://github.com/Vodes/vodesfunc"
34 | "Contact" = "https://discord.gg/Kf94Nv6WVN"
35 |
36 | [tool.ruff]
37 | line-length = 150
38 | indent-width = 4
39 | target-version = "py311"
40 |
41 | [tool.ruff.lint]
42 | ignore = ["E722"]
43 | fixable = ["F541", "F401", "E712", "E711"]
44 | exclude = ["__init__.py"]
45 |
46 | [tool.ruff.format]
47 | quote-style = "double"
48 | indent-style = "space"
49 | skip-magic-trailing-comma = false
50 | line-ending = "auto"
51 | docstring-code-format = false
--------------------------------------------------------------------------------
/vodesfunc/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Oh god no
3 | """
4 |
5 | # flake8: noqa
6 |
7 | from . import aa, misc, noise, scale, denoise, rescale, util, spikefinder, rescale_ext
8 | from .aa import *
9 | from .misc import *
10 | from .noise import *
11 | from .scale import *
12 | from .rescale import *
13 | from .denoise import *
14 | from .util import *
15 | from .spikefinder import *
16 |
17 | from .rescale_ext.mixed_rescale import *
18 |
--------------------------------------------------------------------------------
/vodesfunc/aa.py:
--------------------------------------------------------------------------------
1 | from enum import IntEnum
2 | from typing import Sequence, Union
3 | from vsaa import Antialiaser, Eedi3
4 | from vskernels import Bicubic, Kernel, KernelT, Lanczos, Scaler, ScalerT
5 | from vstools import FrameRangesN, KwargsT, mod2, vs, get_w, scale_mask
6 |
7 |
8 | __all__ = ["cope_aa", "CopeMode"]
9 |
10 |
11 | class CopeMode(IntEnum):
12 | UpDown = 1
13 | Descale = 2
14 | Inverse = 3
15 |
16 |
17 | def cope_aa(
18 | clip: vs.VideoNode,
19 | multiplier: float | None = None,
20 | antialiaser: Antialiaser = Eedi3(0.125, 0.25, gamma=65, vthresh0=40, vthresh1=60, field=1, sclip_aa=None),
21 | scaler: KernelT | ScalerT | Sequence[Union[KernelT, ScalerT]] = Lanczos(),
22 | mode: CopeMode | int = CopeMode.Inverse,
23 | mask: bool | vs.VideoNode = True,
24 | no_aa_ranges: FrameRangesN = [],
25 | hybrid_cl: bool = False,
26 | **kwargs: KwargsT,
27 | ) -> vs.VideoNode:
28 | """
29 | Cope and lazy function to AA a doubled clip. Usually while rescaling.
30 | This is probably overall an awful idea.
31 |
32 | :param multiplier: Basically rfactor. If you're doubling a 720p clip you'll only have a 1440p clip to AA. EEDI3 will fuck it.
33 | Defaults to 1.2 if the input clip is smaller than 1700p. 1 otherwise.
34 | :param antialiaser: Antialiaser used for actually doing the stuff. Defaults to EEDI3 with some somewhat conservative settings kindof.
35 | :param scaler: Scaler(s) or rather kernel(s) in this case. Used to up- and downscale for the rfactor.
36 | If you specify a third one it will get used to scale the mask. (Otherwise Bilinear)
37 | :param mode: Method to return back to input res. Available are UpDown (simple downscale), Descale and Inverse (fmtc).
38 | :param mask: Mask for eedi3 possibly save some calculations. Can be a custom one, True for a Kirsch or False to disable.
39 | :param no_aa_ranges: Ranges you might not wanna AA for one reason or another.
40 | :param hybrid_cl: Use eedi3cl on one of the two interpolate calls.
41 | Not sure if this is useful or not. Just wrote it to test.
42 | """
43 |
44 | def fmtc_args(kernel: Kernel) -> KwargsT:
45 | if isinstance(kernel, Bicubic):
46 | return KwargsT(kernel="bicubic", a1=kernel.b, a2=kernel.c)
47 | else:
48 | return KwargsT(kernel=kernel.__class__.__name__.lower(), taps=kernel.taps if isinstance(kernel, Lanczos) else None)
49 |
50 | if not isinstance(scaler, Sequence):
51 | scaler = [scaler, scaler]
52 | scalers = [Kernel.ensure_obj(s) if mode != CopeMode.UpDown else Scaler.ensure_obj(s) for s in scaler]
53 |
54 | if mask is True:
55 | from vsmasktools import KirschTCanny
56 |
57 | mask = KirschTCanny.edgemask(clip, lthr=60 / 255, hthr=150 / 255, planes=0)
58 |
59 | if not multiplier:
60 | multiplier = 1.2 if clip.height < 1700 else 1.0
61 | height = mod2(clip.height * multiplier)
62 | width = get_w(height, mod=None)
63 | if mask:
64 | mask = mask.resize.Bilinear(width, height) if len(scalers) < 3 else scalers[2].scale(mask, width, height)
65 | mask = mask.std.Binarize(scale_mask(16, 8, mask))
66 | wclip = scalers[0].scale(clip, width, height)
67 | aa = wclip.std.Transpose()
68 | aa = antialiaser.interpolate(aa, False, sclip=aa, mclip=mask.std.Transpose() if mask else None, **kwargs).std.Transpose()
69 | if hybrid_cl and isinstance(antialiaser, Eedi3):
70 | from copy import deepcopy
71 |
72 | other_antialiaser = deepcopy(antialiaser)
73 | other_antialiaser.opencl = True
74 | aa = other_antialiaser.interpolate(aa, False, sclip=aa, **kwargs)
75 | else:
76 | aa = antialiaser.interpolate(aa, False, sclip=aa, mclip=mask if mask else None, **kwargs)
77 | aa = wclip.std.MaskedMerge(aa, mask)
78 | match mode:
79 | case CopeMode.Descale:
80 | aa = scalers[1].descale(aa, clip.width, clip.height)
81 | case CopeMode.Inverse:
82 | aa = aa.fmtc.resample(clip.width, clip.height, invks=True, **fmtc_args(scalers[1]))
83 | case _:
84 | aa = scalers[1].scale(aa, clip.width, clip.height)
85 | if not no_aa_ranges:
86 | return aa
87 | else:
88 | try:
89 | from jvsfunc import rfs
90 |
91 | return rfs(aa, clip, no_aa_ranges)
92 | except:
93 | from vstools import replace_ranges as rfs
94 |
95 | return rfs(aa, clip, no_aa_ranges)
96 |
--------------------------------------------------------------------------------
/vodesfunc/denoise.py:
--------------------------------------------------------------------------------
1 | from vstools import vs, core, get_y, get_u, get_v, depth, get_depth, join, KwargsT, get_var_infos, FunctionUtil, classproperty
2 | from vsrgtools import contrasharpening
3 | from vsdenoise import MVToolsPreset, MotionMode, SearchMode, prefilter_to_full_range
4 |
5 | from inspect import signature
6 | from importlib.metadata import version as fetch_version
7 | from packaging.version import Version
8 |
9 | __all__ = ["VMDegrain", "schizo_denoise", "MVPresets"]
10 |
11 |
12 | def check_jetpack_version() -> bool:
13 | jetpack_version = Version(fetch_version("vsjetpack"))
14 | if jetpack_version >= Version("0.3.0"):
15 | if jetpack_version < Version("0.3.2"):
16 | print("Please update vsjetpack to atleast 0.3.2 if you want to use 0.3.X. There are some necessary repair fixes on it.")
17 | return True
18 | return False
19 |
20 |
21 | class MVPresets:
22 | @classproperty
23 | def MaybeNotTerrible(self) -> MVToolsPreset:
24 | """
25 | This is just me throwing stuff at the wall to have some improvements without major slowdowns.
26 | """
27 | from vsdenoise import AnalyzeArgs, RecalculateArgs
28 |
29 | return MVToolsPreset(
30 | pel=1,
31 | search_clip=prefilter_to_full_range,
32 | analyze_args=AnalyzeArgs(truemotion=MotionMode.SAD, search=SearchMode.HEXAGON, pelsearch=2),
33 | recalculate_args=RecalculateArgs(truemotion=MotionMode.SAD, search=SearchMode.HEXAGON, searchparam=1),
34 | )
35 |
36 | @classproperty
37 | def ActualOldWrapperMatch(self) -> MVToolsPreset:
38 | """
39 | Preset to match the old wrapper as well as possible.
40 | It is arguable if this is desirable.
41 | """
42 | from vsdenoise import AnalyzeArgs, RecalculateArgs
43 |
44 | return MVToolsPreset(
45 | pel=1,
46 | pad=16,
47 | search_clip=prefilter_to_full_range,
48 | analyze_args=AnalyzeArgs(truemotion=MotionMode.SAD, search=SearchMode.DIAMOND, pelsearch=2),
49 | recalculate_args=RecalculateArgs(truemotion=MotionMode.SAD, search=SearchMode.ONETIME, searchparam=0),
50 | )
51 |
52 | @classproperty
53 | def Default(self) -> MVToolsPreset | None:
54 | """
55 | Returns `MVPresets.MaybeNotTerrible` if used on a version where mc_degrain is available.
56 | Otherwise `None`.
57 | """
58 | if check_jetpack_version():
59 | return MVPresets.MaybeNotTerrible
60 | return None
61 |
62 |
63 | def VMDegrain(
64 | src: vs.VideoNode,
65 | thSAD: int = 60,
66 | prefilter: vs.VideoNode | int = 2,
67 | smooth: bool = True,
68 | block_size: int | None = None,
69 | overlap: int | None = None,
70 | refine: int = 2,
71 | tr: int = 2,
72 | preset: MVToolsPreset | None = MVPresets.Default,
73 | **kwargs: KwargsT,
74 | ) -> vs.VideoNode:
75 | """
76 | Just some convenience function for mvtools with a useable preset and temporal smoothing.\n
77 | Check the MVTools Docs for the params that aren't listed below.\n
78 | `block_size`, `overlap` and `refine` are using somewhat optimized defaults depending on the resolution if `None`.
79 |
80 |
81 | :param src: Input to denoise
82 | :param smooth: Run TTempsmooth on the denoised clip if True
83 | :return: Denoised clip
84 | """
85 | from vsdenoise import MVTools, SADMode, SearchMode, MotionMode, Prefilter
86 |
87 | if isinstance(prefilter, int):
88 | prefilter = Prefilter(prefilter)
89 |
90 | futil = FunctionUtil(src, VMDegrain, 0, vs.YUV, 16)
91 |
92 | if any([block_size, overlap]) and not all([block_size, overlap]):
93 | raise ValueError("VMDegrain: If you want to play around with blocksize, overlap or refine, you have to set all of them.")
94 |
95 | if not block_size or not overlap:
96 | _, width, height = get_var_infos(src)
97 | if width <= 1024 and height <= 576:
98 | block_size = 32
99 | overlap = 16
100 | elif width <= 2048 and height <= 1536:
101 | block_size = 64
102 | overlap = 32
103 | else:
104 | block_size = 128
105 | overlap = 64
106 |
107 | try:
108 | from vsdenoise import (
109 | mc_degrain,
110 | RFilterMode,
111 | )
112 |
113 | if preset is None:
114 | raise ValueError("VMDegrain: preset cannot be None when on vsjetpack>=0.3.0!")
115 |
116 | # Dirty clean up for random args getting removed from on git.
117 | # (You should not be using git jetpack with vodesfunc but it is what it is)
118 | mc_degrain_sig = signature(mc_degrain)
119 | args = KwargsT(
120 | prefilter=prefilter,
121 | thsad=thSAD,
122 | thsad_recalc=thSAD,
123 | blksize=block_size,
124 | refine=refine,
125 | rfilter=RFilterMode.TRIANGLE,
126 | preset=preset,
127 | tr=tr,
128 | )
129 | clean_args = {k: v for k, v in args.items() if k in mc_degrain_sig.parameters}
130 |
131 | if len(args) != len(clean_args):
132 | args_string = ", ".join(list(k for k, _ in args.items() if k not in clean_args))
133 | print(f"VMDegrain: A couple of arguments are not passed to mc_degrain anymore! ({args_string})\nPlease do report this to the maintainer.")
134 |
135 | out = mc_degrain(futil.work_clip, **clean_args)
136 | except ImportError:
137 | from vsdenoise import PelType
138 |
139 | d_args = KwargsT(
140 | prefilter=prefilter,
141 | thSAD=thSAD,
142 | block_size=block_size,
143 | overlap=overlap,
144 | sad_mode=SADMode.SPATIAL.same_recalc,
145 | search=SearchMode.DIAMOND,
146 | motion=MotionMode.HIGH_SAD,
147 | pel_type=PelType.BICUBIC,
148 | pel=1,
149 | refine=refine + 1, # Refine calcs are broken on the old wrapper, 3 is basically equivalent to 2 on the new one
150 | rfilter=2,
151 | sharp=2,
152 | tr=tr,
153 | )
154 | d_args.update(**kwargs)
155 | out = MVTools.denoise(futil.work_clip, **d_args)
156 |
157 | if smooth:
158 | out = out.ttmpsm.TTempSmooth(maxr=1, thresh=1, mdiff=0, strength=1)
159 |
160 | return futil.return_clip(out)
161 |
162 |
163 | def schizo_denoise(
164 | src: vs.VideoNode,
165 | sigma: float | list[float] = [0.8, 0.3],
166 | thSAD: int = 60,
167 | radius: int | list[int] = 2,
168 | nlm_a: int = 2,
169 | prefilter: vs.VideoNode | int = 2,
170 | cuda: bool | list[bool] = True,
171 | csharp: int | bool = False,
172 | **kwargs,
173 | ) -> vs.VideoNode:
174 | """
175 | Convenience function for (k)nlm on chroma and mvtools + bm3d(cuda) on luma.
176 | Mostly for personal scripts so please don't complain too much unless it's an actual issue.
177 |
178 | :param src: Input to denoise
179 | :param sigma: Essentially strength for NLMeans and BM3D.
180 | Float or list of floats in this order [bm3d, nlm_uv] or [bm3d, nlm_u, nlm_v]
181 | :param thSAD: Not exactly strength but something like that, for mvtools.
182 | :param radius: Temporal Radius used for NLMeans and BM3D.
183 | Int or list of ints in this order [bm3d, nlm]
184 | :param prefilter: vsdenoise Prefilter or prefiltered clip to use for mvtools.
185 | Defaults to MINBLUR3
186 | :param cuda: Uses NlmCuda and BM3DCuda respectively if available. The latter prefers RTC if available.
187 | Will fallback to BM3DHip if installed and no cuda available.
188 | :param csharp: Apply contrasharpening after denoising. True defaults to 3 while False obviously disables it.
189 | :param kwargs: Any parameters you might wanna pass to bm3d or mvtools.\n
190 | Note that this also takes `tr` or `preset` for mvtools which might be very useful.
191 |
192 | :return: Denoised clip
193 | """
194 | if src.format.color_family != vs.YUV: # type: ignore
195 | raise ValueError("schizo_denoise: This function expects a full YUV clip.")
196 |
197 | if not isinstance(radius, list):
198 | radius = [radius, radius]
199 |
200 | if not isinstance(sigma, list):
201 | sigma = [sigma, sigma]
202 |
203 | if not isinstance(cuda, list):
204 | cuda = [cuda, cuda]
205 |
206 | if isinstance(prefilter, int):
207 | from vsdenoise import Prefilter
208 |
209 | prefilter = Prefilter(prefilter)
210 |
211 | clip = depth(src, 16)
212 |
213 | nlmfunc = core.knlm.KNLMeansCL if not hasattr(core, "nlm_cuda") or not cuda[0] else core.nlm_cuda.NLMeans
214 |
215 | if len(sigma) == 3:
216 | clip_u = nlmfunc(clip, a=nlm_a, d=radius[1], h=sigma[1], channels="UV")
217 | clip_v = nlmfunc(clip, a=nlm_a, d=radius[1], h=sigma[2], channels="UV")
218 | nlm = join(get_y(clip), get_u(clip_u), get_v(clip_v)) # type: ignore
219 | else:
220 | clip_uv = nlmfunc(clip, a=nlm_a, d=radius[1], h=sigma[1], channels="UV")
221 | nlm = join(clip, clip_uv) # type: ignore
222 |
223 | # 'Extract' possible bm3d args before passing kwargs to mvtools :)
224 | bm3dargs = dict(
225 | block_step=kwargs.pop("block_step", 8),
226 | bm_range=kwargs.pop("bm_range", 9),
227 | ps_num=kwargs.pop("ps_num", 2),
228 | ps_range=kwargs.pop("ps_range", 4),
229 | fast=kwargs.pop("fast", True),
230 | )
231 |
232 | y = get_y(clip)
233 | mv = VMDegrain(y, thSAD, prefilter, **kwargs)
234 |
235 | has_cuda = hasattr(core, "bm3dcuda") or hasattr(core, "bm3dcuda_rtc")
236 | has_hip = hasattr(core, "bm3dhip")
237 |
238 | if cuda[1] and (has_cuda or has_hip):
239 | if has_cuda:
240 | bm3dfunc = core.bm3dcuda if not hasattr(core, "bm3dcuda_rtc") else core.bm3dcuda_rtc
241 | else:
242 | bm3dfunc = core.bm3dhip
243 | else:
244 | bm3dargs.pop("fast")
245 | bm3dfunc = core.bm3dcpu
246 |
247 | bm3d = bm3dfunc.BM3Dv2(depth(y, 32), depth(mv, 32), sigma[0], radius=radius[0], **bm3dargs)
248 |
249 | out = join(depth(bm3d, 16), nlm) # type: ignore
250 | out = depth(out, get_depth(src))
251 | if csharp != False: # noqa: E712
252 | out = contrasharpening(out, src, mode=3 if csharp == True else csharp) # noqa: E712
253 | return out.std.CopyFrameProps(src)
254 |
--------------------------------------------------------------------------------
/vodesfunc/misc.py:
--------------------------------------------------------------------------------
1 | from vstools import vs, core, depth, get_y, get_h, get_w
2 | from typing import Any
3 | from math import ceil
4 | from functools import partial
5 |
6 | from .rescale_ext import RescaleBase
7 |
8 | __all__ = ["dirty_prop_set"]
9 |
10 |
11 | def dirty_prop_set(
12 | clip: vs.VideoNode,
13 | threshold: int = 1100,
14 | luma_scaling: int = 24,
15 | prop_name: str | None = None,
16 | src_prop_val: Any | None = None,
17 | bbm_prop_val: Any | None = None,
18 | debug_output: bool = False,
19 | ) -> list[vs.VideoNode]:
20 | """
21 | Dirty-edge-based frameprop setting function using bbm, a brightness difference check and a brightness scaling
22 | (might be a very specific usecase)
23 |
24 | Returns both filtered clip and mask in a VideoNode List (0 = clip, 1 = mask)
25 |
26 | An example for this would be my tanya script:
27 | Only 720p frames have dirty edges so write a 720 prop if dirty edges are detected.
28 |
29 | dirty_prop_set(.., prop_name = 'Rescale', src_prop_val = 812, bbm_prop_val = 720)
30 | """
31 |
32 | def _select_frame(n: int, f: vs.VideoFrame, clip_a: vs.VideoNode, clip_b: vs.VideoNode) -> vs.VideoNode:
33 | plane_stats_average = f.props["PlaneStatsAverage"]
34 | # print(f"Frame {n}: {plane_stats_average:.20f}")
35 | return clip_b if plane_stats_average > 0.00010 else clip_a # type: ignore
36 |
37 | def _get_mask(n: int, f: vs.VideoFrame, clip_a: vs.VideoNode, clip_b: vs.VideoNode) -> vs.VideoNode:
38 | brightness = f.props["PlaneStatsAverage"]
39 | weighted_thr = threshold * (1 - (1 - brightness) ** (brightness**2 * luma_scaling)) # type: ignore
40 | if debug_output:
41 | print(f"Frame {n}: Average Brightness - {brightness:.20f}, Weighted - {weighted_thr:.20f}")
42 | return core.std.Expr([clip_a, clip_b], [f"y x - {weighted_thr} > 65536 0 ?", ""])
43 |
44 | try:
45 | import awsmfunc as awf
46 | except:
47 | raise ModuleNotFoundError("awsmfunc not found!")
48 |
49 | clip = depth(clip, 16).std.PlaneStats() # Wouldn't this be set way earlier?
50 | bbm = awf.bbmod(clip, 1, 1, 1, 1, thresh=50, blur=666)
51 | mask = get_y(core.std.FrameEval(clip, partial(_get_mask, clip_a=clip, clip_b=bbm), clip)).std.PlaneStats()
52 |
53 | if isinstance(src_prop_val, int) and isinstance(bbm_prop_val, int):
54 | bbm_prop, src_prop = [c.std.SetFrameProp(prop=prop_name, intval=i) for c, i in zip([bbm, clip], [bbm_prop_val, src_prop_val])]
55 | else:
56 | bbm_prop, src_prop = [c.std.SetFrameProp(prop=prop_name, data=i) for c, i in zip([bbm, clip], [str(bbm_prop_val), str(src_prop_val)])]
57 |
58 | return [core.std.FrameEval(clip, partial(_select_frame, clip_a=src_prop, clip_b=bbm_prop), prop_src=mask), mask]
59 |
60 |
61 | # fmt: off
62 | def get_border_crop(input_clip: vs.VideoNode, base: RescaleBase, override_window: int | None = None) -> tuple[int]:
63 | """Get the crops for the border handling masking."""
64 |
65 | kernel_window = override_window or base.kernel.kernel_radius
66 |
67 | if base.height == input_clip.height:
68 | vertical_crop = (0, 0)
69 | else:
70 | base_height = base.base_height or get_h(base.base_width, base.descaled) if base.base_width else base.height
71 | src_top = base.descale_func_args.get("src_top", 0)
72 |
73 | top = max(ceil(
74 | (-(base.height - 1) / 2 + kernel_window - src_top - 1)
75 | * input_clip.height / base.height + (input_clip.height - 1) / 2
76 | ), 0)
77 |
78 | bottom = max(ceil(
79 | (-(base.height - 1) / 2 + kernel_window - (base_height - base.height - src_top) - 1)
80 | * input_clip.height / base.height + (input_clip.height - 1) / 2
81 | ), 0)
82 |
83 | vertical_crop = (top, bottom)
84 |
85 | if base.width == input_clip.width:
86 | horizontal_crop = (0, 0)
87 | else:
88 | base_width = base.base_width or get_w(base.base_height, base.descaled) if base.base_height else base.width
89 | src_left = base.descale_func_args.get("src_left", 0)
90 |
91 | left = max(ceil(
92 | (-(base.width - 1) / 2 + kernel_window - src_left - 1)
93 | * input_clip.width / base.width + (input_clip.width - 1) / 2
94 | ), 0)
95 |
96 | right = max(ceil(
97 | (-(base.width - 1) / 2 + kernel_window - (base_width - base.width - src_left) - 1)
98 | * input_clip.width / base.width + (input_clip.width - 1) / 2
99 | ), 0)
100 |
101 | horizontal_crop = (left, right)
102 |
103 | return horizontal_crop + vertical_crop
104 | # fmt: on
105 |
--------------------------------------------------------------------------------
/vodesfunc/noise.py:
--------------------------------------------------------------------------------
1 | from typing import Sequence, Callable
2 | from vstools import vs, core, get_depth, scale_value, split, normalize_seq, get_neutral_value, get_peak_value, mod4
3 | from vskernels import Scaler, Lanczos, Bicubic
4 |
5 | __all__ = ["adaptive_grain", "grain", "ntype4", "itype4"]
6 |
7 | ntype4 = {"type": 2, "scale": 0.7, "scaler": Bicubic(b=-1 / 2, c=1 / 4)}
8 | """vodesfunc grainer type 4 preset"""
9 |
10 | itype4 = {"type": 2, "size": 0.769, "sharp": Bicubic(b=-1 / 2, c=1 / 4), "protect_chroma": True, "fade_limits": True}
11 | """Also the vodesfunc type 4 preset but 'should' work for vsdeband grainers."""
12 |
13 |
14 | def adaptive_grain(
15 | clip: vs.VideoNode,
16 | strength: float | list[float] = [2.0, 0.5],
17 | size: float | list[float] = 3,
18 | type: int = 3,
19 | static: bool = False,
20 | temporal_average: int = 25,
21 | luma_scaling: float = 6,
22 | seed: int = -1,
23 | temporal_radius: int = 3,
24 | scale: float = 1,
25 | scaler: Scaler = Lanczos(),
26 | post_grain: Callable[[vs.VideoNode], vs.VideoNode] | None = None,
27 | fade_edges: bool = True,
28 | tv_range: bool = True,
29 | lo: int | Sequence[int] | None = None,
30 | hi: int | Sequence[int] | None = None,
31 | protect_neutral: bool = True,
32 | **kwargs,
33 | ) -> vs.VideoNode:
34 | """
35 | Very frankenstein'd mix of setsu's and the original adptvgrnMod
36 | Only supports https://github.com/wwww-wwww/vs-noise and has some stuff I don't need stripped out.
37 |
38 | :param clip: Input clip.
39 | :param strength: Grainer strength. Use a list to specify [luma, chroma] graining.
40 | Default chroma grain is luma / 5.
41 | :param size: Grain size. Will be passed as xsize and ysize. Can be adjusted individually with a list.
42 | This should not be confused with the resizing of adptvgrnMod. For something similar, use the `scale` param.
43 | :param type: See vs-noise github for 0-4.
44 | :param static: Static or dynamic grain.
45 | :param seed: Grain seed for the grainer.
46 | :param temporal_average: Reference frame weighting for temporal softening and grain consistency.
47 | :param temporal_radius: How many frames the averaging will use.
48 | :param luma_scaling: Luma scaling passed to the adaptivegrain mask. While use the absolute value on an inverted clip if a negative number is passed.
49 | Mainly useful for graining the bright parts of an image.
50 | :param scale: Makes the grain bigger if > 1 and smaller if < 1 by graining a different sized blankclip and scaling to clip res after.
51 | Can be used to tweak sharpness/frequency considering vs-noise always keeps those the same no matter the size.
52 | :param scaler: Scaler/Kernel used for down- or upscaling the grained blankclip.
53 | :param post_grain: A callable function to run on the grained blankclip pre scaling. An example use would be to sharpen like I did for something.
54 |
55 | :param fade_edges: Keeps grain from exceeding legal range.
56 | With this, values whiclip.height go towards the neutral point, but would generate
57 | illegal values if they pointed in the other direction are also limited.
58 | This is better at maintaining average values and prevents flickering pixels on OLEDs.
59 | :param tv_range: TV or PC legal range.
60 | :param lo: Overwrite legal range's minimums. Value is scaled from 8-bit to clip depth.
61 | :param hi: Overwrite legal range's maximums. Value is scaled from 8-bit to clip depth.
62 | :param protect_neutral: Disable chroma grain on neutral chroma.
63 | :param kwargs: Kwargs passed to the grainer.
64 |
65 | :returns: Grained clip.
66 | """
67 |
68 | strength = strength if isinstance(strength, list) else [strength, 0.2 * strength]
69 | size = size if isinstance(size, list) else [size, size]
70 |
71 | if type > 4 or type < 0:
72 | raise ValueError("adaptive_grain: Type has to be a number between 0 and 4")
73 |
74 | if scale >= 2:
75 | raise ValueError("adaptive_grain: Scale has to be a number below 2. (Default is 1, to disable scaling)")
76 |
77 | mask = core.adg.Mask(clip.std.PlaneStats() if luma_scaling >= 0 else clip.std.Invert().std.PlaneStats(), abs(luma_scaling))
78 | ogdepth = get_depth(clip)
79 |
80 | # Type 4 depends on the input clip and as such should not be static, averaged or scaled
81 | if type == 4:
82 | grained = clip.noise.Add(strength[0], strength[1], type=type, xsize=size[0], ysize=size[1], constant=False, **kwargs)
83 | return clip.std.MaskedMerge(grained, mask)
84 |
85 | def scale_val8x(value: int, chroma: bool = False) -> float:
86 | return scale_value(value, 8, ogdepth, scale_offsets=not tv_range, chroma=chroma)
87 |
88 | if not static and temporal_average > 0:
89 | length = clip.num_frames + temporal_radius - 1
90 | else:
91 | length = clip.num_frames
92 |
93 | width = clip.width - (clip.width * scale - clip.width)
94 | height = clip.height - (clip.height * scale - clip.height)
95 |
96 | if scale != 1:
97 | width = mod4(width)
98 | height = mod4(height)
99 |
100 | neutral = get_neutral_value(clip)
101 | blank = clip.std.BlankClip(width, height, length=length, color=normalize_seq(neutral, clip.format.num_planes))
102 | grained = blank.noise.Add(strength[0], strength[1], type=type, xsize=size[0], ysize=size[1], seed=seed, constant=static, **kwargs)
103 |
104 | if callable(post_grain):
105 | grained = post_grain(grained)
106 |
107 | grained = scaler.scale(grained, clip.width, clip.height)
108 |
109 | if not static and temporal_average > 0:
110 | cut = (temporal_radius - 1) // 2
111 | grained = core.std.Merge(grained, core.std.AverageFrames(grained, weights=[1] * temporal_radius), weight=temporal_average / 100)
112 | grained = grained[cut:-cut]
113 |
114 | if fade_edges:
115 | if lo is None:
116 | lo = [scale_val8x(16), scale_val8x(16, True)]
117 | elif not isinstance(lo, list):
118 | lo = [scale_val8x(lo), scale_val8x(lo, True)]
119 |
120 | if hi is None:
121 | hi = [scale_val8x(235), scale_val8x(240, True)]
122 | elif not isinstance(hi, list):
123 | hi = [scale_val8x(hi), scale_val8x(hi, True)]
124 |
125 | limit_expr = "x y {0} - abs - {1} < x y {0} - abs + {2} > or x y {0} - x + ?"
126 | if clip.format.sample_type == vs.INTEGER:
127 | limit_expr = 2 * [limit_expr]
128 | else:
129 | limit_expr = [limit_expr, "x y abs + {2} > x abs y - {1} < or x x y + ?"]
130 |
131 | grained = core.std.Expr([clip, grained], [limit_expr[_].format(neutral, lo[_], hi[_]) for _ in range(0, clip.format.num_planes - 1)])
132 |
133 | if protect_neutral and strength[1] > 0 and clip.format.color_family == vs.YUV:
134 | format444 = core.query_video_format(vs.YUV, clip.format.sample_type, ogdepth, 0, 0)
135 | neutral_mask = clip.resize.Bicubic(format=format444)
136 | # disable grain if neutral chroma
137 | neutral_mask = core.std.Expr(split(neutral_mask), f"y {neutral} = z {neutral} = and {get_peak_value(clip)} 0 ?")
138 | grained = core.std.MaskedMerge(grained, clip, neutral_mask, planes=[1, 2])
139 | else:
140 | if clip.format.sample_type == vs.INTEGER:
141 | grained = core.std.MergeDiff(clip, grained)
142 | else:
143 | grained = core.std.Expr([clip, grained], [f"y {neutral} - x +" for _ in range(clip.format.num_planes - 1)])
144 |
145 | return clip.std.MaskedMerge(grained, mask)
146 |
147 |
148 | grain = adaptive_grain
149 |
--------------------------------------------------------------------------------
/vodesfunc/rescale.py:
--------------------------------------------------------------------------------
1 | from vstools import (
2 | vs,
3 | core,
4 | FunctionUtil,
5 | GenericVSFunction,
6 | iterate,
7 | replace_ranges,
8 | FrameRangesN,
9 | get_peak_value,
10 | FieldBasedT,
11 | FieldBased,
12 | CustomValueError,
13 | get_video_format,
14 | )
15 | from vskernels import KernelT, Kernel, ScalerT, Bilinear, Hermite
16 | from vsmasktools import EdgeDetectT, KirschTCanny
17 | from vsrgtools import removegrain
18 | from typing import Self
19 | import inspect
20 |
21 | from .scale import Doubler
22 | from .rescale_ext import RescBuildFB, RescBuildNonFB
23 | from .rescale_ext.mixed_rescale import RescBuildMixed
24 |
25 | __all__ = ["RescaleBuilder"]
26 |
27 |
28 | class RescaleBuilder(RescBuildFB, RescBuildNonFB, RescBuildMixed):
29 | """
30 | The fancy new rescale wrapper to make life easier.
31 | Now 99% less buggy and should handle everything.
32 |
33 | Example usage:
34 | ```py
35 | builder, rescaled = (
36 | RescaleBuilder(clip)
37 | .descale(Bilinear, 1500, 843.75, base_height=846)
38 | .double()
39 | .errormask(0.0975)
40 | .linemask()
41 | .post_double(lambda x: aa_dehalo(x)) # Or a function like post_double(aa_dehalo)
42 | .downscale(Hermite(linear=True))
43 | .final()
44 | )
45 | ```
46 | """
47 |
48 | def __init__(self, clip: vs.VideoNode):
49 | self.funcutil = FunctionUtil(clip, self.__class__.__name__, planes=0, color_family=(vs.YUV, vs.GRAY), bitdepth=32)
50 |
51 | def descale(
52 | self,
53 | kernel: KernelT,
54 | width: int | float,
55 | height: int | float,
56 | base_height: int | None = None,
57 | base_width: int | None = None,
58 | shift: tuple[float, float] = (0, 0),
59 | field_based: FieldBasedT | None = None,
60 | mode: str = "hw",
61 | ) -> Self:
62 | """
63 | Performs descale and rescale (with the same kernel).
64 |
65 | :param kernel: Kernel to descale with
66 | :param height: Height to descale to
67 | :param width: Width to descale to. Please be absolutely certain of what you're doing if you're using get_w for this.
68 | :param base_height: Padded height used in a "fractional" descale
69 | :param base_width: Padded width used in a "fractional" descale
70 | Both of these are technically optional but highly recommended to have set for float width/height.
71 |
72 | :param shift: A custom shift to be applied
73 | :param mode: Whether to descale only height, only width, or both.
74 | "h" or "w" respectively for the former two.
75 | :param field_based: To descale a cross-converted/interlaced clip.
76 | Will try to take the prop from the clip if `None` was passed.
77 | """
78 | clip = self.funcutil.work_clip
79 |
80 | if isinstance(height, float) and len(stack := inspect.stack()) > 1:
81 | has_getw = [ctx for ctx in stack[1].code_context if "get_w" in ctx.lower()]
82 | if has_getw:
83 | print("RescaleBuilder: Please make sure get_w returns the width you really want!")
84 |
85 | self.kernel = Kernel.ensure_obj(kernel)
86 | self.border_handling = self.kernel.kwargs.pop("border_handling", 0)
87 | self.field_based = FieldBased.from_param(field_based) or FieldBased.from_video(clip)
88 |
89 | self.height = height if "h" in mode else clip.height
90 | self.width = width if "w" in mode else clip.width
91 | self.base_height = base_height
92 | self.base_width = base_width
93 |
94 | if (isinstance(width, float) or isinstance(height, float)) and self.field_based.is_inter:
95 | raise CustomValueError("Float is not supported for fieldbased descales!", self.descale)
96 |
97 | if self.field_based.is_inter:
98 | self._fieldbased_descale(clip, width=self.width, height=self.height, shift=shift, border_handling=self.border_handling)
99 | else:
100 | self._non_fieldbased_descale(clip, width, height, base_height, base_width, shift, mode)
101 |
102 | self.descaled = self.descaled.std.CopyFrameProps(clip)
103 | self.rescaled = self.rescaled.std.CopyFrameProps(clip)
104 |
105 | return self
106 |
107 | def post_descale(self, func: GenericVSFunction | list[GenericVSFunction]) -> Self:
108 | """
109 | A function to apply any arbitrary function on the descaled clip.\n
110 | I can't think of a good usecase/example for this but I was asked to add this before.
111 |
112 | :param func: This can be any function or list of functions that take a videonode input
113 | and returns a videonode. You are responsible for keeping the format the same.
114 | """
115 | if not isinstance(func, list):
116 | func = [func]
117 |
118 | for f in func:
119 | if not callable(f):
120 | raise CustomValueError(f"post_descale: Function {f.__name__} is not callable!", self.post_descale)
121 |
122 | self.descaled = f(self.descaled)
123 |
124 | return self
125 |
126 | def linemask(
127 | self,
128 | mask: vs.VideoNode | EdgeDetectT | None = None,
129 | downscaler: ScalerT | None = None,
130 | maximum_iter: int = 0,
131 | inflate_iter: int = 0,
132 | expand: int | tuple[int, int | None] = 0,
133 | kernel_window: int | None = None,
134 | **kwargs,
135 | ) -> Self:
136 | """
137 | A function to apply a linemask to the final output.
138 |
139 | :param mask: This can be a masking function like `KirschTCanny` (also the default if `None`) or a clip.
140 | :param downscaler: Downscaler to use if creating a linemask on the doubled clip. Defaults to `Bilinear` if `None`.
141 | :param maximum_iter: Apply std.Maximum x amount of times
142 | :param inflate_iter: Apply std.inflate x amount of times
143 | :param expand: Apply an ellipse morpho expand with the passed amount.
144 | Can be a tuple of (horizontal, vertical) or a single value for both.
145 | :param kernel_window: To override kernel radius used in case of border_handling being used.
146 | :param **kwargs: Any other params to pass to the edgemask creation. For example `lthr` or `hthr`.
147 | """
148 | if self.upscaled:
149 | raise SyntaxError("RescaleBuilder: Downscaled clip already created. Create linemasks before calling downscale.")
150 | if isinstance(mask, vs.VideoNode):
151 | self.linemask_clip = mask
152 | return self
153 | edgemaskFunc = KirschTCanny.ensure_obj(mask)
154 |
155 | # Perform on doubled clip if exists and downscale
156 | if self.doubled:
157 | scaler = Bilinear.ensure_obj(downscaler)
158 | self.linemask_clip = edgemaskFunc.edgemask(self.doubled, **kwargs)
159 | self.linemask_clip = scaler.scale(self.linemask_clip, self.funcutil.work_clip.width, self.funcutil.work_clip.height, **self.post_crop)
160 | else:
161 | self.linemask_clip = edgemaskFunc.edgemask(self.funcutil.work_clip, **kwargs)
162 |
163 | self.linemask_clip = self._process_mask(self.linemask_clip, maximum_iter, inflate_iter, expand)
164 |
165 | if self.border_handling:
166 | from .misc import get_border_crop
167 |
168 | borders = get_border_crop(self.funcutil.work_clip, self, kernel_window)
169 | self.linemask_clip = self.linemask_clip.std.Crop(*borders).std.AddBorders(*borders, [get_peak_value(self.linemask_clip)])
170 |
171 | self.linemask_clip = self.linemask_clip.std.Limiter()
172 |
173 | return self
174 |
175 | def _errormask(
176 | self, mask: vs.VideoNode | float = 0.05, maximum_iter: int = 2, inflate_iter: int = 3, expand: int | tuple[int, int | None] = 0
177 | ) -> vs.VideoNode:
178 | if self.upscaled:
179 | raise SyntaxError("RescaleBuilder: Downscaled clip already created. Create errormasks before calling downscale.")
180 | if isinstance(mask, vs.VideoNode):
181 | return mask
182 |
183 | err_mask = core.std.Expr([self.funcutil.work_clip, self.rescaled], f"x y - abs {mask} < 0 1 ?")
184 | err_mask = removegrain(err_mask, 6)
185 | err_mask = self._process_mask(err_mask, maximum_iter, inflate_iter, expand)
186 |
187 | return err_mask
188 |
189 | def errormask(
190 | self, mask: vs.VideoNode | float = 0.05, maximum_iter: int = 2, inflate_iter: int = 3, expand: int | tuple[int, int | None] = 0
191 | ) -> Self:
192 | """
193 | A function to apply a basic error mask to the final output.
194 |
195 | :param mask: With a float, and by default, will be created internally. Could also pass a clip.
196 | :param maximum_iter: Apply std.Maximum x amount of times
197 | :param inflate_iter: Apply std.inflate x amount of times
198 | :param expand: Apply an ellipse morpho expand with the passed amount.
199 | Can be a tuple of (horizontal, vertical) or a single value for both.
200 | """
201 | self.errormask_clip = self._errormask(mask, maximum_iter, inflate_iter, expand).std.Limiter()
202 | return self
203 |
204 | def errormask_zoned(
205 | self,
206 | ranges: FrameRangesN,
207 | mask: vs.VideoNode | float = 0.05,
208 | maximum_iter: int = 2,
209 | inflate_iter: int = 3,
210 | expand: int | tuple[int, int | None] = 0,
211 | ) -> Self:
212 | """
213 | A function to apply a basic error mask to the final output.\n
214 | But with this rfs'd to certain ranges.
215 | """
216 | if not ranges:
217 | return self
218 | err_mask = self._errormask(mask, maximum_iter, inflate_iter, expand)
219 | if not self.errormask_clip:
220 | self.errormask_clip = core.std.BlankClip(self.funcutil.work_clip, format=get_video_format(err_mask))
221 |
222 | self.errormask_clip = replace_ranges(self.errormask_clip, err_mask, ranges)
223 | return self
224 |
225 | def double(self, upscaler: Doubler | ScalerT | None = None) -> Self:
226 | """
227 | Upscales the descaled clip by 2x
228 |
229 | :param upscaler: Any kind of vsscale scaler. Defaults to Waifu2x.
230 | """
231 | if isinstance(upscaler, Doubler):
232 | self.doubled = upscaler.double(self.descaled)
233 | else:
234 | from vsscale import Waifu2x
235 |
236 | scaler = Waifu2x.ensure_obj(upscaler) # type: ignore
237 | self.doubled = scaler.multi(self.descaled)
238 | return self
239 |
240 | def post_double(self, func: GenericVSFunction | list[GenericVSFunction]) -> Self:
241 | """
242 | A function to apply any arbitrary function on the doubled clip.
243 |
244 | :param func: This can be any function or list of functions that take a videonode input
245 | and returns a videonode. You are responsible for keeping the format the same.
246 | """
247 | if not self.doubled:
248 | raise SyntaxError("post_double: Doubled clip has not been generated yet. Please call this after double().")
249 |
250 | if not isinstance(func, list):
251 | func = [func]
252 |
253 | for f in func:
254 | if not callable(f):
255 | raise CustomValueError(f"post_double: Function {f.__name__} is not callable!", self.post_double)
256 |
257 | self.doubled = f(self.doubled)
258 |
259 | return self
260 |
261 | def downscale(self, downscaler: ScalerT | None = None) -> Self:
262 | """
263 | Downscales the clip back the size of the original input clip and applies the masks, if any.
264 |
265 | :param downscaler: Any vsscale scaler to use. Defaults to Linear Hermite.
266 | """
267 | scaler = Hermite(linear=True).ensure_obj(downscaler)
268 | if not self.doubled:
269 | raise SyntaxError("Downscale/Final is the last one that should be called in a chain!")
270 | wclip = self.funcutil.work_clip
271 | self.upscaled = scaler.scale(self.doubled, wclip.width, wclip.height, **self.post_crop)
272 | self._apply_masks()
273 | self.upscaled = self.upscaled.std.CopyFrameProps(wclip)
274 | return self
275 |
276 | def _apply_masks(self):
277 | wclip = self.funcutil.work_clip
278 | if isinstance(self.errormask_clip, vs.VideoNode) and isinstance(self.linemask_clip, vs.VideoNode):
279 | self.final_mask = core.std.Expr([self.linemask_clip, self.errormask_clip], "x y - 0 max 1 min")
280 | self.upscaled = wclip.std.MaskedMerge(self.upscaled, self.final_mask)
281 | elif isinstance(self.errormask_clip, vs.VideoNode):
282 | self.upscaled = self.upscaled.std.MaskedMerge(wclip, self.errormask_clip)
283 | elif isinstance(self.linemask_clip, vs.VideoNode):
284 | self.upscaled = wclip.std.MaskedMerge(self.upscaled, self.linemask_clip)
285 |
286 | def final(self) -> tuple[Self, vs.VideoNode]:
287 | """
288 | This is the last function in the chain that also returns the final clip.
289 | It internally calls `downscale` if you haven't done so before and then merges the resulting clip with the input chroma, if any.
290 |
291 | :return: A tuple of this class and the resulting final rescale.
292 | """
293 | if not self.upscaled:
294 | self.downscale()
295 | if not self.upscaled:
296 | raise TypeError("No downscaled clip has been generated yet!")
297 |
298 | return (self, self.funcutil.return_clip(self.upscaled))
299 |
300 | def _process_mask(
301 | self, mask: vs.VideoNode, maximum_iter: int = 0, inflate_iter: int = 0, expand: int | tuple[int, int | None] = 0
302 | ) -> vs.VideoNode:
303 | if maximum_iter:
304 | mask = iterate(mask, core.std.Maximum, maximum_iter)
305 |
306 | if inflate_iter:
307 | mask = iterate(mask, core.std.Inflate, inflate_iter)
308 |
309 | if expand:
310 | if isinstance(expand, int):
311 | expand = (expand, expand)
312 | from vsmasktools import Morpho, XxpandMode
313 |
314 | mask = Morpho.expand(mask, expand[0], expand[1], XxpandMode.ELLIPSE)
315 |
316 | return mask
317 |
--------------------------------------------------------------------------------
/vodesfunc/rescale_ext/__init__.py:
--------------------------------------------------------------------------------
1 | from .mixed_rescale import *
2 | from .fieldbased_rescale import *
3 | from .regular_rescale import *
4 | from .base import *
5 |
6 | from . import base, fieldbased_rescale, regular_rescale, mixed_rescale
7 |
--------------------------------------------------------------------------------
/vodesfunc/rescale_ext/base.py:
--------------------------------------------------------------------------------
1 | from vstools import FunctionUtil, KwargsT, vs, FieldBasedT, core, expect_bits, depth, vs_object
2 | from vskernels import Kernel, Bilinear, Bicubic, Lanczos
3 | from typing import Self, MutableMapping, TYPE_CHECKING
4 | from abc import abstractmethod
5 |
6 | __all__ = ["RescaleBase", "RescaleNumbers", "descale_rescale"]
7 |
8 |
9 | class RescaleNumbers:
10 | height: float | int
11 | width: float | int
12 | base_height: int | None
13 | base_width: int | None
14 | border_handling: int = 0
15 |
16 |
17 | class RescaleBase(RescaleNumbers, vs_object):
18 | funcutil: FunctionUtil
19 | kernel: Kernel
20 | post_crop: KwargsT
21 | rescale_args: KwargsT
22 | descale_func_args: KwargsT
23 | field_based: FieldBasedT | None = None
24 |
25 | descaled: vs.VideoNode
26 | rescaled: vs.VideoNode
27 | upscaled: vs.VideoNode | None = None
28 | doubled: vs.VideoNode | None = None
29 | linemask_clip: vs.VideoNode | None = None
30 | errormask_clip: vs.VideoNode | None = None
31 |
32 | @abstractmethod
33 | def final(self) -> tuple[Self, vs.VideoNode]: ...
34 |
35 | def _return_creditmask(self) -> vs.VideoNode:
36 | return self.errormask_clip if isinstance(self.errormask_clip, vs.VideoNode) else core.std.BlankClip(self.funcutil.work_clip)
37 |
38 | def _return_linemask(self) -> vs.VideoNode:
39 | return self.linemask_clip if isinstance(self.linemask_clip, vs.VideoNode) else core.std.BlankClip(self.funcutil.work_clip)
40 |
41 | def __vs_del__(self, core_id: int) -> None:
42 | if not TYPE_CHECKING:
43 | self.descaled = None
44 | self.rescaled = None
45 | self.upscaled = None
46 | self.doubled = None
47 | self.linemask_clip = None
48 | self.errormask_clip = None
49 | for v in self.__dict__.values():
50 | if not isinstance(v, MutableMapping):
51 | continue
52 |
53 | for k2, v2 in v.items():
54 | if isinstance(v2, vs.VideoNode):
55 | v[k2] = None
56 |
57 |
58 | def descale_rescale(builder: RescaleBase, clip: vs.VideoNode, **kwargs: KwargsT) -> vs.VideoNode:
59 | kernel_args = KwargsT(border_handling=builder.border_handling)
60 | if isinstance(builder.kernel, Bilinear):
61 | kernel_function = core.descale.Bilinear
62 | elif isinstance(builder.kernel, Bicubic) or issubclass(builder.kernel.__class__, Bicubic):
63 | kernel_function = core.descale.Bicubic
64 | kernel_args.update({"b": builder.kernel.b, "c": builder.kernel.c})
65 | elif isinstance(builder.kernel, Lanczos):
66 | kernel_function = core.descale.Lanczos
67 | kernel_args.update({"taps": builder.kernel.taps})
68 | else:
69 | # I'm just lazy idk
70 | raise ValueError(f"{builder.kernel.__class__} is not supported for rescaling!")
71 |
72 | kernel_args.update(kwargs)
73 | clip, bits = expect_bits(clip, 32)
74 | return depth(kernel_function(clip, **kernel_args), bits)
75 |
--------------------------------------------------------------------------------
/vodesfunc/rescale_ext/fieldbased_rescale.py:
--------------------------------------------------------------------------------
1 | from vstools import vs, KwargsT, FieldBased, padder
2 |
3 | from .base import RescaleBase
4 |
5 | __all__ = ["RescBuildFB"]
6 |
7 |
8 | class RescBuildFB(RescaleBase):
9 | def _fieldbased_descale(self, clip: vs.VideoNode, shift: tuple[float, float] = (0, 0), **descale_args: KwargsT) -> None:
10 | clip = self.field_based.apply(clip)
11 | if shift != (0, 0):
12 | descale_args.update({"shift": shift})
13 | self.descaled = self.kernel.descale(clip, **descale_args)
14 | self.descale_func_args = KwargsT()
15 | self.descale_func_args.update(descale_args)
16 | self.post_crop = KwargsT()
17 |
18 | if not self.border_handling:
19 | self.rescaled = self.kernel.scale(self.descaled, width=clip.width, height=clip.height)
20 | else:
21 | # Reimplementation of border_handling because regular scale operations aren't aware of it yet.
22 | # Can't use descale scale because we need vskernels to handle the field shifting.
23 | wclip = self.descaled
24 | left = right = 10 if self.width != clip.width else 0
25 | top = bottom = 10 if self.height != clip.height else 0
26 |
27 | match int(self.border_handling):
28 | case 1:
29 | wclip = padder.COLOR(wclip, left, right, top, bottom, color=0)
30 | case 2:
31 | wclip = padder.REPEAT(wclip, left, right, top, bottom)
32 | case _:
33 | pass
34 |
35 | shift_top = descale_args.pop("src_top", False) or shift[0]
36 | shift_left = descale_args.pop("src_left", False) or shift[1]
37 |
38 | shift = [
39 | shift_top + (self.height != wclip.height and self.border_handling) * 10,
40 | shift_left + (self.width != wclip.width and self.border_handling) * 10,
41 | ]
42 |
43 | src_width = descale_args.pop("src_width", wclip.width)
44 | src_height = descale_args.pop("src_height", wclip.height)
45 | self.rescaled = self.kernel.scale(
46 | wclip,
47 | clip.width,
48 | clip.height,
49 | shift=shift,
50 | src_width=src_width - (wclip.width - self.width),
51 | src_height=src_height - (wclip.height - self.height),
52 | )
53 | self.descaled = FieldBased.PROGRESSIVE.apply(self.descaled)
54 | self.rescaled = FieldBased.PROGRESSIVE.apply(self.rescaled)
55 |
--------------------------------------------------------------------------------
/vodesfunc/rescale_ext/mixed_rescale.py:
--------------------------------------------------------------------------------
1 | from vstools import vs, core, depth, vs_object
2 | from typing import TYPE_CHECKING, MutableMapping
3 | from enum import IntEnum
4 | from .base import RescaleBase
5 |
6 | __all__ = ["DiffMode", "MixedRB"]
7 |
8 |
9 | class DiffMode(IntEnum):
10 | """Mode used to calculate the difference between rescale and input clip."""
11 |
12 | MAE = 1
13 | """Mean Absolute Error"""
14 |
15 | MSE = 2
16 | """Mean Squared Error"""
17 |
18 | GET_NATIVE = 3
19 | """Weird headcraft from getnative"""
20 |
21 |
22 | class RescBuildMixed(RescaleBase):
23 | diffmode: DiffMode = DiffMode.GET_NATIVE
24 | crop_diff: bool = True
25 | index = 0
26 |
27 | def get_diff(self) -> vs.VideoNode:
28 | clip = depth(self.funcutil.work_clip, 32)
29 | match self.diffmode:
30 | case DiffMode.MAE:
31 | metric = "x y - abs"
32 | case DiffMode.MSE:
33 | metric = "x y - 2 pow"
34 | case _:
35 | metric = "x y - abs dup 0.015 > swap 0 ?"
36 |
37 | diff = core.std.Expr([depth(self.rescaled, 32), clip], metric)
38 | if self.crop_diff:
39 | diff = diff.std.Crop(5, 5, 5, 5)
40 | return diff.std.PlaneStats()
41 |
42 | def _add_index_to_clips(self):
43 | self.descaled = self.descaled.std.SetFrameProp("RB_Target", self.index)
44 | self.rescaled = self.rescaled.std.SetFrameProp("RB_Target", self.index)
45 | self.upscaled = self.upscaled.std.SetFrameProp("RB_Target", self.index)
46 |
47 |
48 | class MixedRB(vs_object):
49 | """
50 | Implementation of MixedRescale for RescaleBuilder(s)
51 |
52 | This is just a stop-gap solution until we (mostly Setsu) can cook up something better.
53 |
54 | Example Usage:
55 |
56 | ```py
57 | upscaler = Waifu2x("trt", 1, fp16=True)
58 |
59 | builders = [
60 | RescaleBuilder(src).descale(Bilinear(border_handling=1), 1280, 720),
61 | RescaleBuilder(src).descale(BicubicSharp, 1280, 720),
62 | ]
63 |
64 | # This will be run on all of the above
65 | builders = [
66 | b.double(upscaler)
67 | .linemask(KirschTCanny, Bilinear, lthr=50 / 255, hthr=150 / 255, inflate_iter=2)
68 | .errormask(expand=2)
69 | .downscale(Hermite(linear=True))
70 | for b in builders
71 | ]
72 |
73 | mixed = MixedRB(*builders)
74 | rescaled = mixed.get_upscaled()
75 | ```
76 | """
77 |
78 | def __init__(self, *targets: RescBuildMixed, diffmode: DiffMode = DiffMode.GET_NATIVE, crop_diff: bool = True) -> None:
79 | """
80 | A naive per-frame diff approach of trying to get the best descale.
81 | Credits to Setsu for most of this class.
82 | """
83 | y = targets[0].funcutil.work_clip
84 |
85 | for i, d in enumerate(targets):
86 | d.index = i + 1
87 | d.diffmode = diffmode
88 | d.crop_diff = crop_diff
89 | d._add_index_to_clips()
90 |
91 | prop_srcs = [d.get_diff() for d in targets]
92 | targets_idx = tuple(range(len(targets)))
93 |
94 | blank = core.std.BlankClip(None, 1, 1, vs.GRAY8, y.num_frames, keep=True)
95 |
96 | map_prop_srcs = [blank.std.CopyFrameProps(prop_src).akarin.Expr("x.PlaneStatsAverage", vs.GRAYS) for prop_src in prop_srcs]
97 |
98 | base_frame, idx_frames = blank.get_frame(0), []
99 |
100 | for i in targets_idx:
101 | fcurr = base_frame.copy()
102 |
103 | fcurr[0][0, 0] = i
104 |
105 | idx_frames.append((i, fcurr))
106 |
107 | def _select(n: int, f: vs.VideoFrame) -> vs.VideoFrame:
108 | return min(idx_frames, key=lambda i: f[i[0]][0][0, 0])[1]
109 |
110 | _select_clip = blank.std.ModifyFrame(map_prop_srcs, _select)
111 |
112 | def _selector(clips: list[vs.VideoNode | None]) -> vs.VideoNode:
113 | base = next(filter(None, clips), None)
114 |
115 | if base is None:
116 | raise ValueError("Requested clip was None")
117 |
118 | base = base.std.BlankClip(keep=True)
119 | clips = [c or base for c in clips]
120 |
121 | return core.std.FrameEval(base, lambda n, f: clips[f[0][0, 0]], _select_clip)
122 |
123 | self.upscaled = _selector([t.final()[1] for t in targets])
124 | self.final = self.upscaled
125 |
126 | self.rescaled = _selector([t.rescaled for t in targets])
127 | self.credit_mask = _selector([t._return_creditmask() for t in targets])
128 | self.line_mask = _selector([t._return_linemask() for t in targets])
129 |
130 | def get_upscaled(self, *_) -> vs.VideoNode:
131 | return self.upscaled
132 |
133 | def __vs_del__(self, core_id: int) -> None:
134 | if not TYPE_CHECKING:
135 | setattr(self, "upscaled", None)
136 | setattr(self, "final", None)
137 | setattr(self, "rescaled", None)
138 | setattr(self, "credit_mask", None)
139 | setattr(self, "line_mask", None)
140 | for v in self.__dict__.values():
141 | if not isinstance(v, MutableMapping):
142 | continue
143 |
144 | for k2, v2 in v.items():
145 | if isinstance(v2, vs.VideoNode):
146 | v[k2] = None
147 |
--------------------------------------------------------------------------------
/vodesfunc/rescale_ext/regular_rescale.py:
--------------------------------------------------------------------------------
1 | from vstools import vs, KwargsT
2 |
3 | from .base import RescaleBase, descale_rescale
4 | from .scaling_args import ScalingArgs
5 |
6 | __all__ = ["RescBuildNonFB"]
7 |
8 |
9 | class RescBuildNonFB(RescaleBase):
10 | def _non_fieldbased_descale(
11 | self,
12 | clip: vs.VideoNode,
13 | width: int | float,
14 | height: int | float,
15 | base_height: int | None = None,
16 | base_width: int | None = None,
17 | shift: tuple[float, float] = (0, 0),
18 | mode: str = "hw",
19 | ) -> None:
20 | sc_args = ScalingArgs.from_args(
21 | clip, height=height, width=width, base_height=base_height, base_width=base_width, src_top=shift[0], src_left=shift[1], mode=mode
22 | )
23 |
24 | args = KwargsT(width=sc_args.width, height=sc_args.height, border_handling=self.border_handling) | sc_args.kwargs()
25 | self.post_crop = sc_args.kwargs(2)
26 | self.rescale_args = sc_args.kwargs()
27 |
28 | self.descale_func_args = KwargsT() | args
29 |
30 | self.height = args.get("src_height", clip.height)
31 | self.width = args.get("src_width", clip.width)
32 | self.base_height = base_height
33 | self.base_width = base_width
34 |
35 | self.descaled = self.kernel.descale(clip, **args)
36 | self.rescaled = descale_rescale(self, self.descaled, width=clip.width, height=clip.height, **self.rescale_args)
37 |
--------------------------------------------------------------------------------
/vodesfunc/rescale_ext/scaling_args.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from dataclasses import dataclass
4 | from math import ceil, floor
5 | from types import NoneType
6 | from typing import Any, Self, overload, TypeAlias, NamedTuple
7 |
8 | from vstools import KwargsT, get_w, mod2, vs
9 |
10 | LeftCrop: TypeAlias = int
11 | RightCrop: TypeAlias = int
12 | TopCrop: TypeAlias = int
13 | BottomCrop: TypeAlias = int
14 |
15 |
16 | class CropRel(NamedTuple):
17 | left: int = 0
18 | right: int = 0
19 | top: int = 0
20 | bottom: int = 0
21 |
22 |
23 | class CropAbs(NamedTuple):
24 | width: int
25 | height: int
26 | left: int = 0
27 | top: int = 0
28 |
29 | def to_rel(self, base_clip: vs.VideoNode) -> CropRel:
30 | return CropRel(self.left, base_clip.width - self.width - self.left, self.top, base_clip.height - self.height - self.top)
31 |
32 |
33 | @dataclass
34 | class ScalingArgs:
35 | width: int
36 | height: int
37 | src_width: float
38 | src_height: float
39 | src_top: float
40 | src_left: float
41 | mode: str = "hw"
42 |
43 | def _do(self) -> tuple[bool, bool]:
44 | return "h" in self.mode.lower(), "w" in self.mode.lower()
45 |
46 | def _up_rate(self, clip: vs.VideoNode | None = None) -> tuple[float, float]:
47 | if clip is None:
48 | return 1.0, 1.0
49 |
50 | do_h, do_w = self._do()
51 |
52 | return ((clip.height / self.height) if do_h else 1.0, (clip.width / self.width) if do_w else 1.0)
53 |
54 | def kwargs(self, clip_or_rate: vs.VideoNode | float | None = None, /) -> KwargsT:
55 | kwargs = dict[str, Any]()
56 |
57 | do_h, do_w = self._do()
58 |
59 | if isinstance(clip_or_rate, (vs.VideoNode, NoneType)):
60 | up_rate_h, up_rate_w = self._up_rate(clip_or_rate)
61 | else:
62 | up_rate_h, up_rate_w = clip_or_rate, clip_or_rate
63 |
64 | if do_h:
65 | kwargs.update(src_height=self.src_height * up_rate_h, src_top=self.src_top * up_rate_h)
66 |
67 | if do_w:
68 | kwargs.update(src_width=self.src_width * up_rate_w, src_left=self.src_left * up_rate_w)
69 |
70 | return kwargs
71 |
72 | @overload
73 | @classmethod
74 | def from_args(
75 | cls, base_clip: vs.VideoNode, height: int, width: int | None = None, *, src_top: float = ..., src_left: float = ..., mode: str = "hw"
76 | ) -> Self:
77 | """
78 | Get (de)scaling arguments for integer scaling.
79 |
80 | :param base_clip: Source clip.
81 | :param height: Target (de)scaling height.
82 | :param width: Target (de)scaling width.
83 | If None, it will be calculated from the height and the aspect ratio of the base_clip.
84 | :param src_top: Vertical offset.
85 | :param src_left: Horizontal offset.
86 | :param mode: Scaling mode:
87 | - "w" means only the width is calculated.
88 | - "h" means only the height is calculated.
89 | - "hw or "wh" mean both width and height are calculated.
90 | :return: ScalingArgs object suitable for scaling functions.
91 | """
92 |
93 | @overload
94 | @classmethod
95 | def from_args(
96 | cls,
97 | base_clip: vs.VideoNode,
98 | height: float,
99 | width: float | None = ...,
100 | base_height: int | None = ...,
101 | base_width: int | None = ...,
102 | src_top: float = ...,
103 | src_left: float = ...,
104 | crop: tuple[LeftCrop, RightCrop, TopCrop, BottomCrop] | CropRel | CropAbs = ...,
105 | mode: str = "hw",
106 | ) -> Self:
107 | """
108 | Get (de)scaling arguments for fractional scaling.
109 |
110 | :param base_clip: Source clip.
111 | :param height: Target (de)scaling height. Casting to float will ensure fractional calculations.
112 | :param width: Target (de)scaling width. Casting to float will ensure fractional calculations.
113 | If None, it will be calculated from the height and the aspect ratio of the base_clip.
114 | :param base_height: The height from which to contain the clip. If None, it will be calculated from the height.
115 | :param base_width: The width from which to contain the clip. If None, it will be calculated from the width.
116 | :param src_top: Vertical offset.
117 | :param src_left: Horizontal offset.
118 | :param crop: Tuple of cropping values, or relative/absolute crop specification.
119 | :param mode: Scaling mode:
120 | - "w" means only the width is calculated.
121 | - "h" means only the height is calculated.
122 | - "hw or "wh" mean both width and height are calculated.
123 | :return: ScalingArgs object suitable for scaling functions.
124 | """
125 |
126 | @classmethod
127 | def from_args(
128 | cls,
129 | base_clip: vs.VideoNode,
130 | height: int | float,
131 | width: int | float | None = None,
132 | base_height: int | None = None,
133 | base_width: int | None = None,
134 | src_top: float = 0,
135 | src_left: float = 0,
136 | crop: tuple[LeftCrop, RightCrop, TopCrop, BottomCrop] | CropRel | CropAbs | None = None,
137 | mode: str = "hw",
138 | ) -> Self:
139 | if crop:
140 | if isinstance(crop, CropAbs):
141 | crop = crop.to_rel(base_clip)
142 | elif isinstance(crop, CropRel):
143 | pass
144 | else:
145 | crop = CropRel(*crop)
146 | else:
147 | crop = CropRel()
148 |
149 | ratio_height = height / base_clip.height
150 |
151 | if width is None:
152 | if isinstance(height, int):
153 | width = get_w(height, base_clip, 2)
154 | else:
155 | width = ratio_height * base_clip.width
156 |
157 | ratio_width = width / base_clip.width
158 |
159 | if all([isinstance(height, int), isinstance(width, int), base_height is None, base_width is None, crop == (0, 0, 0, 0)]):
160 | return cls(int(width), int(height), int(width), int(height), src_top, src_left, mode)
161 |
162 | if base_height is None:
163 | base_height = mod2(ceil(height))
164 |
165 | if base_width is None:
166 | base_width = mod2(ceil(width))
167 |
168 | margin_left = (base_width - width) / 2 + ratio_width * crop.left
169 | margin_right = (base_width - width) / 2 + ratio_width * crop.right
170 | cropped_width = base_width - floor(margin_left) - floor(margin_right)
171 |
172 | margin_top = (base_height - height) / 2 + ratio_height * crop.top
173 | margin_bottom = (base_height - height) / 2 + ratio_height * crop.bottom
174 | cropped_height = base_height - floor(margin_top) - floor(margin_bottom)
175 |
176 | if isinstance(width, int) and crop.left == crop.right == 0:
177 | cropped_src_width = float(cropped_width)
178 | else:
179 | cropped_src_width = ratio_width * (base_clip.width - crop.left - crop.right)
180 |
181 | cropped_src_left = margin_left - floor(margin_left) + src_left
182 |
183 | if isinstance(height, int) and crop.top == crop.bottom == 0:
184 | cropped_src_height = float(cropped_height)
185 | else:
186 | cropped_src_height = ratio_height * (base_clip.height - crop.top - crop.bottom)
187 |
188 | cropped_src_top = margin_top - floor(margin_top) + src_top
189 |
190 | return cls(cropped_width, cropped_height, cropped_src_width, cropped_src_height, cropped_src_top, cropped_src_left, mode)
191 |
--------------------------------------------------------------------------------
/vodesfunc/scale.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 | from vskernels import Catrom, Lanczos
3 | from vstools import (
4 | inject_self,
5 | vs,
6 | core,
7 | depth,
8 | get_depth,
9 | get_y,
10 | Matrix,
11 | KwargsT,
12 | get_nvidia_version,
13 | )
14 | from abc import ABC, abstractmethod
15 |
16 |
17 | __all__: list[str] = ["NNEDI_Doubler", "Waifu2x_Doubler", "Lanczos_PreSS"]
18 |
19 |
20 | class Lanczos_PreSS(Lanczos):
21 | """
22 | Convenience class to pass to a dehalo function.
23 | This serves the same purpose as NNEDI to double and reverse using point.
24 | Except it is a quite a bit faster and (if using opencl) takes a lot of load off the GPU.
25 | """
26 |
27 | @inject_self.init_kwargs.clean
28 | def scale(self, clip: vs.VideoNode, width: int, height: int, shift: tuple[float, float] = (0, 0), **kwargs) -> vs.VideoNode:
29 | if width != clip.width * 2 or height != clip.height * 2:
30 | raise ValueError("Lanczos_PreSS: You're probably not using this correctly.")
31 | return Lanczos.scale(clip, width, height, (-0.25, -0.25))
32 |
33 |
34 | class Doubler(ABC):
35 | kwargs: dict[str, Any]
36 | """Arguments passed to the internal scale function"""
37 |
38 | def __init__(self, **kwargs: Any) -> None:
39 | self.kwargs = kwargs
40 |
41 | @abstractmethod
42 | def double(self, clip: vs.VideoNode) -> vs.VideoNode:
43 | """
44 | Returns doubled clip
45 | """
46 | pass
47 |
48 |
49 | class NNEDI_Doubler(Doubler):
50 | ediargs: dict[str, Any]
51 | opencl: bool
52 |
53 | def __init__(self, opencl: bool = True, nns: int = 4, nsize: int = 4, qual: int = 2, pscrn: int = 1, **kwargs) -> None:
54 | """
55 | Simple utility class for doubling a clip using znedi or nnedi3cl (also fixes the shift)
56 |
57 | :param opencl: Will use nnedi3cl if True and znedi3 if False
58 | """
59 | self.ediargs = {"qual": qual, "nsize": nsize, "nns": nns, "pscrn": pscrn}
60 | self.ediargs.update(**kwargs)
61 | self.opencl = opencl
62 |
63 | def double(self, clip: vs.VideoNode, correct_shift: bool = True) -> vs.VideoNode:
64 | y = get_y(clip)
65 | # nnedi3cl needs padding, to avoid issues on edges (https://slow.pics/c/QcJef38u)
66 | if self.opencl:
67 | (left, right, top, bottom) = mod_padding(y, 2, 2)
68 | width = clip.width + left + right
69 | height = clip.height + top + bottom
70 | pad = y.resize.Point(width, height, src_left=-left, src_top=-top, src_width=width, src_height=height).std.Transpose()
71 | doubled_y = pad.nnedi3cl.NNEDI3CL(dh=True, dw=True, field=0, **self.ediargs).std.Transpose()
72 | doubled_y = doubled_y.std.Crop(left * 2, right * 2, top * 2, bottom * 2)
73 | else:
74 | doubled_y = (
75 | depth(y, 16)
76 | .znedi3.nnedi3(dh=True, field=0, **self.ediargs)
77 | .std.Transpose()
78 | .znedi3.nnedi3(dh=True, field=0, **self.ediargs)
79 | .std.Transpose()
80 | )
81 | doubled_y = depth(doubled_y, get_depth(clip))
82 |
83 | if correct_shift:
84 | doubled_y = doubled_y.resize.Bicubic(src_top=0.5, src_left=0.5)
85 |
86 | return doubled_y.std.CopyFrameProps(y)
87 |
88 |
89 | class Waifu2x_Doubler(Doubler):
90 | backend: Any
91 | kwargs: KwargsT
92 | w2xargs: KwargsT = {}
93 |
94 | def __init__(
95 | self,
96 | cuda: bool | str | None = None,
97 | fp16: bool = True,
98 | num_streams: int = 1,
99 | tiles: int | tuple[int, int] | None = None,
100 | model: int = 6,
101 | **kwargs,
102 | ) -> None:
103 | """
104 | Simple utility class for doubling a clip using Waifu2x
105 |
106 | :param cuda: ORT-Cuda if True, NCNN-VK or CPU (depending on what you have installed) if False, TRT if some string
107 | Automatically chosen and tuned when None
108 | :param fp16: Uses 16 bit floating point internally if True.
109 | :param num_streams: Amount of streams to use for Waifu2x; Sacrifices a lot of vram for a speedup.
110 | :param tiles: Splits up the upscaling process into multiple tiles.
111 | You will likely have to use atleast `2` if you have less than 16 GB of VRAM.
112 | :param model: Model to use from vsmlrt.
113 | :param kwargs: Args that get passed to both the Backend and actual scaling function.
114 | """
115 | from vsmlrt import Backend, backendT
116 |
117 | self.kwargs = {"num_streams": num_streams, "fp16": fp16}
118 |
119 | if "backend" in kwargs.keys():
120 | cuda = False
121 |
122 | # Partially stolen from setsu but removed useless stuff that is default in mlrt already and added version checks
123 | if cuda is None:
124 | nv = get_nvidia_version()
125 | cuda = nv is not None
126 | try:
127 | if nv is not None and not hasattr(core, "trt") and hasattr(core, "ort"):
128 | self.kwargs.update({"use_cuda_graph": True})
129 | else:
130 | props: KwargsT = core.trt.DeviceProperties(kwargs.get("device_id", 0)) # type: ignore
131 | version_props: KwargsT = core.trt.Version() # type: ignore
132 |
133 | vram = props.get("total_global_memory", 0)
134 | trt_version = float(version_props.get("tensorrt_version", 0))
135 |
136 | cuda = "trt"
137 |
138 | presumedArgs = KwargsT(
139 | workspace=vram / (1 << 22) if vram else None,
140 | use_cuda_graph=True,
141 | use_cublas=True,
142 | use_cudnn=trt_version < 8400,
143 | heuristic=trt_version >= 8500,
144 | output_format=int(fp16),
145 | )
146 |
147 | # Swinunet doesn't like forced 16. Further testing for the other models needed.
148 | if model <= 6:
149 | presumedArgs.update({"tf32": not fp16, "force_fp16": fp16})
150 |
151 | self.kwargs.update(presumedArgs)
152 | except:
153 | cuda = nv is not None
154 |
155 | self.w2xargs = KwargsT(
156 | model=model,
157 | tiles=tiles,
158 | preprocess=kwargs.pop("preprocess", True),
159 | scale=kwargs.pop("scale", 2),
160 | tilesize=kwargs.pop("tilesize", None),
161 | overlap=kwargs.pop("overlap", None),
162 | )
163 |
164 | self.kwargs.update(kwargs)
165 |
166 | if cuda is False:
167 | backend = kwargs.pop("backend", None)
168 | if backend and isinstance(backend, backendT):
169 | self.backend = backend
170 | else:
171 | if hasattr(core, "ncnn"):
172 | self.backend = Backend.NCNN_VK(**self.kwargs)
173 | else:
174 | self.kwargs.pop("device_id")
175 | self.backend = Backend.ORT_CPU(**self.kwargs) if hasattr(core, "ort") else Backend.OV_CPU(**self.kwargs)
176 | elif cuda is True:
177 | self.backend = Backend.ORT_CUDA(**self.kwargs) if hasattr(core, "ort") else Backend.OV_GPU(**self.kwargs)
178 | else:
179 | self.backend = Backend.TRT(**self.kwargs)
180 | self.kwargs = kwargs
181 | self.model = model
182 |
183 | def double(self, clip: vs.VideoNode) -> vs.VideoNode:
184 | from vsmlrt import Waifu2x
185 |
186 | pre = depth(clip, 32).std.Limiter()
187 |
188 | (left, right, top, bottom) = mod_padding(pre)
189 | width = pre.width + left + right
190 | height = pre.height + top + bottom
191 | pad = pre.resize.Point(width, height, src_left=-left, src_top=-top, src_width=width, src_height=height)
192 |
193 | # Model 0 wants a gray input
194 | needs_gray = self.w2xargs.get("model", 6) == 0
195 | was_444 = pre.format.color_family == vs.YUV and pre.format.subsampling_w == 0 and pre.format.subsampling_h == 0 and not needs_gray # type: ignore
196 |
197 | if was_444:
198 | pad = Catrom().resample(pad, format=vs.RGBS, matrix=Matrix.RGB, matrix_in=Matrix.from_video(pre))
199 | elif needs_gray is True:
200 | pad = get_y(pad)
201 | else:
202 | pad = get_y(pad).std.ShufflePlanes(0, vs.RGB)
203 |
204 | up = Waifu2x(pad, noise=-1, backend=self.backend, **self.w2xargs)
205 |
206 | if was_444:
207 | up = Catrom().resample(up, format=vs.YUV444PS, matrix=Matrix.from_video(pre), matrix_in=Matrix.RGB)
208 | elif needs_gray is False:
209 | up = up.std.ShufflePlanes(0, vs.GRAY)
210 |
211 | up = up.std.Crop(left * 2, right * 2, top * 2, bottom * 2)
212 |
213 | # Only Model 6 has the tint
214 | if self.w2xargs.get("model", 6) == 6:
215 | up = up.std.Expr("x 0.5 255 / +")
216 |
217 | return depth(up, get_depth(clip)).std.CopyFrameProps(pre)
218 |
219 |
220 | def mod_padding(clip: vs.VideoNode, mod: int = 4, min: int = 4):
221 | from math import floor
222 |
223 | width = clip.width + min * 2
224 | height = clip.height + min * 2
225 | ph = mod - ((width - 1) % mod + 1)
226 | pv = mod - ((height - 1) % mod + 1)
227 |
228 | left = floor(ph / 2)
229 | right = ph - left
230 | top = floor(pv / 2)
231 | bottom = pv - top
232 | return (left + min, right + min, top + min, bottom + min)
233 |
--------------------------------------------------------------------------------
/vodesfunc/spikefinder.py:
--------------------------------------------------------------------------------
1 | from vstools import (
2 | initialize_clip,
3 | finalize_clip,
4 | Keyframes,
5 | get_depth,
6 | vs,
7 | FrameRangesN,
8 | FrameRangeN,
9 | )
10 | from jetpytools import SoftRange, normalize_ranges_to_list, normalize_list_to_ranges
11 | from muxtools import get_executable, PathLike, VideoFile, warn, make_output, ensure_path_exists, info, debug
12 | from vsmuxtools.video.encoders import VideoEncoder
13 | from dataclasses import dataclass
14 | import shlex
15 | import subprocess
16 | import json
17 | from fractions import Fraction
18 | from pathlib import Path
19 |
20 | __all__ = ["find_spikes"]
21 |
22 |
23 | @dataclass
24 | class NVENC_H265(VideoEncoder):
25 | """
26 | Uses ffmpeg to encode clip to a h265 stream via nvenc.
27 | (Should this be in vsmuxtools?)
28 |
29 | :param settings: Can either be a string of your own settings or any of the 3 presets.
30 | :param ensure_props: Calls initialize_clip on the clip to have at the very least guessed props
31 | """
32 |
33 | settings: str = ""
34 | ensure_props: bool = True
35 |
36 | def __post_init__(self):
37 | self.executable = get_executable("ffmpeg")
38 |
39 | def encode(self, clip: vs.VideoNode, outfile: PathLike | None = None) -> VideoFile:
40 | bits = get_depth(clip)
41 | if bits > 10:
42 | warn("This encoder does not support a bit depth over 10.\nClip will be dithered to 10 bit.", self, 2)
43 | clip = finalize_clip(clip, 10)
44 | bits = 10
45 | if self.ensure_props:
46 | clip = initialize_clip(clip, bits)
47 | clip = finalize_clip(clip, bits)
48 |
49 | out = make_output("encoded_nvenc", "mkv", user_passed=outfile)
50 |
51 | args = [self.executable, "-hide_banner", "-v", "quiet", "-stats", "-f", "yuv4mpegpipe", "-i", "-", "-c:v", "hevc_nvenc"]
52 | if self.settings:
53 | args.extend(shlex.split(self.settings))
54 | args.append(str(out))
55 |
56 | process = subprocess.Popen(args, stdin=subprocess.PIPE)
57 | clip.output(process.stdin, y4m=True, progress_update=lambda x, y: self._update_progress(x, y)) # type: ignore
58 | process.communicate()
59 | return VideoFile(out)
60 |
61 |
62 | @dataclass
63 | class Framedata:
64 | frame: int
65 | frame_time: float
66 | frame_size: float
67 |
68 |
69 | def fetch_frames_with_sizes(fileIn: Path, fps: Fraction) -> list[Framedata]:
70 | """
71 | Extracts frame info with ffprobe
72 | """
73 | bitrate_data = list[Framedata]()
74 | current_frame = 0
75 | command = [
76 | get_executable("ffprobe"),
77 | "-show_entries",
78 | "packet=size,duration_time,pts_time",
79 | "-select_streams",
80 | "v",
81 | "-print_format",
82 | "json=compact=1",
83 | str(fileIn),
84 | ]
85 | out = subprocess.run(command, capture_output=True, text=True, universal_newlines=True)
86 | output = out.stdout + out.stderr
87 | for line in output.splitlines():
88 | if len(line) == 0:
89 | break
90 | if len(line) > 0 and line[-1] == ",":
91 | line = line[:-1]
92 |
93 | if "pts_time" in line:
94 | try:
95 | decoded = json.loads(line)
96 | except:
97 | print(line)
98 | raise Exception
99 |
100 | frame_bitrate = (float(decoded.get("size")) * 8 / 1000) * fps
101 | frame_time = float(decoded.get("pts_time"))
102 | bitrate_data.append(Framedata(current_frame, frame_time, frame_bitrate))
103 | current_frame += 1
104 |
105 | return bitrate_data
106 |
107 |
108 | def split_by_keyframes(data: list[Framedata], clip: vs.VideoNode) -> list[list[Framedata]]:
109 | """
110 | Search for scene changes and divide framedata into chunks that start with one.
111 | """
112 | keyframes = Keyframes.from_clip(clip)
113 | chunks = []
114 | current_chunk = []
115 |
116 | for item in data:
117 | if item.frame in keyframes:
118 | if current_chunk:
119 | chunks.append(current_chunk)
120 | current_chunk = [item]
121 | else:
122 | current_chunk.append(item)
123 |
124 | if current_chunk:
125 | chunks.append(current_chunk)
126 |
127 | return chunks
128 |
129 |
130 | def read_ranges_bookmarks(fileIn: Path) -> FrameRangesN:
131 | ranges = list[FrameRangeN]()
132 | with open(fileIn, "r", encoding="utf-8") as reader:
133 | line = reader.readline()
134 | ints = [int(it.strip()) for it in line.split(",")]
135 | for range_start in ints[0::2]:
136 | ranges.append((range_start, ints[ints.index(range_start) + 1]))
137 |
138 | return ranges
139 |
140 |
141 | def find_spikes(
142 | clip: vs.VideoNode,
143 | threshold: int = 11500,
144 | nvenc_settings: str = "-preset 3 -rc vbr_hq -pix_fmt p010le -b:v 6M -maxrate:v 22M",
145 | print_ranges: bool = False,
146 | export_file: None | PathLike = None,
147 | ignore_existing: bool = False,
148 | ) -> FrameRangesN:
149 | """
150 | Encodes a clip with nvenc hevc and analyzes the bitrate averages between scene changes to find spikes.
151 |
152 | :param clip: Clip to encode
153 | :param threshold: Bitrate threshold to add to ranges (in kbps, I think)
154 | :param nvenc_settings: Settings to use for the encoder
155 | :param print_ranges: If you want to print the ranges with corresponding bitrates
156 | :param export_file: Export the ranges to a bookmarks file with the given name. None to disable.
157 | :param ignore_existing: Run again and overwrite the exported file if it exists. By default it won't run again.
158 | """
159 | if export_file:
160 | out_file = make_output(export_file, "bookmarks", user_passed=export_file)
161 | if out_file.exists():
162 | if ignore_existing:
163 | out_file.unlink(True)
164 | else:
165 | return read_ranges_bookmarks(out_file)
166 |
167 | ranges: list[SoftRange] = []
168 | info("Encoding clip using nvenc...", find_spikes)
169 | temp_encode = NVENC_H265(nvenc_settings).encode(clip, "temp_nvenc")
170 | encoded_file = ensure_path_exists(temp_encode.file, find_spikes)
171 | info("Extracting frame data...", find_spikes)
172 | framedata = fetch_frames_with_sizes(encoded_file, Fraction(clip.fps_num, clip.fps_den))
173 | info("Finding scene changes...")
174 | chunks = split_by_keyframes(framedata, clip)
175 | encoded_file.unlink(True)
176 |
177 | for chunk in chunks:
178 | size_all: float = 0.0
179 | for data in chunk:
180 | size_all += data.frame_size
181 | avg = size_all / len(chunk)
182 | if avg > threshold:
183 | ranges.append((chunk[0].frame, chunk[-1].frame))
184 | if print_ranges:
185 | debug(f"Frames {chunk[0].frame} - {chunk[-1].frame}: {round(avg, 2)} kbps", find_spikes)
186 |
187 | # To make the ranges not have single frame outliers
188 | ranges_int = normalize_ranges_to_list(ranges)
189 | final_ranges = normalize_list_to_ranges(ranges_int)
190 |
191 | if export_file:
192 | with open(out_file, "w", encoding="utf-8") as writer:
193 | all_nums = list[int]()
194 | for start, end in final_ranges:
195 | all_nums.extend([start, end])
196 | writer.write(", ".join([str(it) for it in all_nums]))
197 |
198 | return final_ranges
199 |
--------------------------------------------------------------------------------
/vodesfunc/util.py:
--------------------------------------------------------------------------------
1 | from vstools import vs, core, KwargsT
2 |
3 | from functools import partial
4 |
5 |
6 | __all__: list[str] = [
7 | "set_output",
8 | "out",
9 | ]
10 |
11 |
12 | def set_output(
13 | clip: vs.VideoNode, name: str | None = None, frame_info: bool = False, allow_comp: bool = True, cache: bool | None = None, **kwargs: KwargsT
14 | ) -> vs.VideoNode:
15 | """
16 | Outputs a clip. Less to type.
17 | Designed to be used with the good ol 'from vodesfunc import *' and the 'out' alias
18 | """
19 | if frame_info and name:
20 | clip = _print_frameinfo(clip, name)
21 |
22 | try:
23 | args = KwargsT(name=name, cache=cache, disable_comp=not allow_comp)
24 | if kwargs:
25 | args.update(**kwargs)
26 | from vspreview import is_preview, set_output as setsu_sucks
27 |
28 | if cache is None:
29 | cache = is_preview()
30 | setsu_sucks(clip, **args)
31 | except:
32 | if name is not None:
33 | clip = clip.std.SetFrameProp("Name", data=name)
34 | clip.set_output(len(vs.get_outputs()))
35 | return clip
36 |
37 |
38 | def _print_frameinfo(clip: vs.VideoNode, title: str = "") -> vs.VideoNode:
39 | style = "sans-serif,20,&H00FFFFFF,&H000000FF,&H00000000,&H00000000,0,0,0,0,100,100,0,0,1,2,0,7,10,10,10,1"
40 |
41 | def FrameProps(n: int, f: vs.VideoFrame, clip: vs.VideoNode) -> vs.VideoNode:
42 | if "_PictType" in f.props:
43 | info = f"Frame {n} of {clip.num_frames}\nPicture type: {f.props['_PictType'].decode()}"
44 | else:
45 | info = f"Frame {n} of {clip.num_frames}\nPicture type: N/A"
46 |
47 | clip = core.sub.Subtitle(clip, text=info, style=style)
48 | return clip
49 |
50 | clip = core.std.FrameEval(clip, partial(FrameProps, clip=clip), prop_src=clip)
51 | clip = core.sub.Subtitle(clip, text=["".join(["\n"] * 4) + title], style=style)
52 | return clip
53 |
54 |
55 | out = set_output
56 |
--------------------------------------------------------------------------------