├── .github
└── workflows
│ ├── autotag.yml
│ ├── lint.yml
│ └── pypipublish.yml
├── .gitignore
├── LICENSE
├── README.md
├── docs
├── changelogs
│ └── changelogs.rst
├── conf.py
├── getting_started
│ ├── community.rst
│ └── install.rst
├── index.rst
├── requirements.txt
└── submodules
│ ├── exceptions.rst
│ ├── funcs.rst
│ ├── helpers.rst
│ ├── mask.rst
│ ├── scale.rst
│ ├── shaders.rst
│ └── types.rst
├── readthedocs.yaml
├── requirements-dev.txt
├── requirements.txt
├── setup.cfg
├── setup.py
└── vsscale
├── __init__.py
├── _metadata.py
├── base.py
├── exceptions.py
├── funcs.py
├── helpers.py
├── mask.py
├── onnx.py
├── py.typed
├── rescale.py
├── scale.py
├── shaders.py
└── types.py
/.github/workflows/autotag.yml:
--------------------------------------------------------------------------------
1 | name: Check and create tag
2 | on:
3 | push:
4 | branches:
5 | - master
6 | paths:
7 | - vsscale/_metadata.py
8 |
9 | jobs:
10 | new_version:
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - uses: actions/checkout@v2
15 |
16 | - name: Get version number
17 | run: |
18 | echo "CURR_VER=v$(python -c "import runpy;runpy.run_path('vsscale/_metadata.py', None, '__github__')")" >> $GITHUB_ENV
19 | - name: Check if version exists
20 | env:
21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
22 | uses: mukunku/tag-exists-action@v1.0.0
23 | id: tagcheck
24 | with:
25 | tag: ${{ env.CURR_VER }}
26 |
27 | - name: Make tag
28 | uses: actions/github-script@v3
29 | if: steps.tagcheck.outputs.exists == 'false'
30 | with:
31 | github-token: ${{ secrets.WORKFLOW_TOKEN }}
32 | script: |
33 | github.git.createRef({
34 | owner: context.repo.owner,
35 | repo: context.repo.repo,
36 | ref: `refs/tags/${process.env.CURR_VER}`,
37 | sha: context.sha
38 | })
39 | - name: Fallback
40 | if: steps.tagcheck.outputs.exists == 'true'
41 | run: echo "Nothing to see here, move along citizen"
42 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: Lint Python code with Ruff
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | paths:
8 | - '**.py'
9 | pull_request:
10 | paths:
11 | - '**.py'
12 |
13 | jobs:
14 | windows:
15 | runs-on: windows-latest
16 | strategy:
17 | matrix:
18 | vs-versions:
19 | - 68
20 | python-version:
21 | - '3.12'
22 |
23 | steps:
24 | - uses: actions/checkout@v4
25 |
26 | - name: Set up Python ${{ matrix.python-version }}
27 | uses: actions/setup-python@v5
28 | with:
29 | python-version: ${{ matrix.python-version }}
30 |
31 | - name: Install dependencies
32 | run: |
33 | python3 -m pip install --upgrade pip
34 | pip install vapoursynth-portable==${{ matrix.vs-versions }}
35 | pip install -r requirements.txt
36 | pip install -r requirements-dev.txt
37 |
38 | - name: Running ruff
39 | run: ruff check vsscale
40 |
--------------------------------------------------------------------------------
/.github/workflows/pypipublish.yml:
--------------------------------------------------------------------------------
1 | name: Publish releases to PyPI
2 | on:
3 | push:
4 | tags:
5 | - v[0-9]+**
6 |
7 | jobs:
8 | package_build:
9 | name: Build and push to PyPI
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v2
13 |
14 | - name: Prep Python
15 | uses: actions/setup-python@v2
16 | with:
17 | python-version: '3.12'
18 |
19 | - name: Install build tools
20 | run: |
21 | python -m pip install build setuptools twine --user
22 | continue-on-error: false
23 | - name: Build wheel
24 | id: wheel
25 | run: |
26 | python -m build --wheel --outdir dist/
27 | continue-on-error: true
28 | - name: Build source distribution
29 | id: sdist
30 | run: |
31 | python -m build --sdist --outdir dist/
32 | continue-on-error: true
33 | - name: Check the output
34 | run: |
35 | python -m twine check --strict dist/*
36 | continue-on-error: false
37 | - name: Die on failure
38 | if: steps.wheel.outcome != 'success' && steps.sdist.outcome != 'success'
39 | run: exit 1
40 | - name: Publish to PyPI
41 | uses: pypa/gh-action-pypi-publish@master
42 | with:
43 | user: __token__
44 | password: ${{ secrets.PYPI_TOKEN }}
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib64/
18 | parts/
19 | sdist/
20 | var/
21 | wheels/
22 | pip-wheel-metadata/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 |
53 | # Translations
54 | *.mo
55 | *.pot
56 |
57 | # Django stuff:
58 | *.log
59 | local_settings.py
60 | db.sqlite3
61 | db.sqlite3-journal
62 |
63 | # Flask stuff:
64 | instance/
65 | .webassets-cache
66 |
67 | # Scrapy stuff:
68 | .scrapy
69 |
70 | # Sphinx documentation
71 | docs/_build/
72 | docs/make.bat
73 | docs/Makefile
74 |
75 | # PyBuilder
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | .python-version
87 |
88 | # pipenv
89 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
90 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
91 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
92 | # install all needed dependencies.
93 | #Pipfile.lock
94 |
95 | # celery beat schedule file
96 | celerybeat-schedule
97 |
98 | # SageMath parsed files
99 | *.sage.py
100 |
101 | # Environments
102 | .env
103 | .venv
104 | env/
105 | venv/
106 | ENV/
107 | env.bak/
108 | venv.bak/
109 |
110 | # Spyder project settings
111 | .spyderproject
112 | .spyproject
113 |
114 | # Rope project settings
115 | .ropeproject
116 |
117 | # mkdocs documentation
118 | /site
119 |
120 | # mypy
121 | .mypy_cache/
122 | .dmypy.json
123 | dmypy.json
124 |
125 | # Pyre type checker
126 | .pyre/
127 |
128 | # Local test file
129 | *test*.py
130 |
131 | # Unignore tests dir
132 | !tests/
133 |
134 | # vscode folder
135 | .vscode
136 |
137 | # vsjet folder
138 | .vsjet/
139 |
140 | # Index files
141 | *.ffindex
142 | *.lwi
143 |
144 | # Video test file
145 | *.mkv
146 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Jaded-Encoding-Thaumaturgy
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # vs-scale
2 |
3 | > [!CAUTION]
4 | > This package is deprecated!
5 | > Please use https://github.com/Jaded-Encoding-Thaumaturgy/vs-jetpack instead.
6 |
7 | ### VapourSynth (de)scale functions
8 |
9 |
10 |
11 | Wrappers for scaling and descaling functions.
12 |
13 | For support you can check out the [JET Discord server](https://discord.gg/XTpc6Fa9eB).
14 |
15 | ## How to install
16 | **This package is deprecated!**
17 |
18 | Please install https://pypi.org/p/vsjetpack instead.
19 |
--------------------------------------------------------------------------------
/docs/changelogs/changelogs.rst:
--------------------------------------------------------------------------------
1 | Changelogs
2 | ==========
3 |
4 | .. image:: https://img.shields.io/github/commits-since/Jaded-Encoding-Thaumaturgy/vs-scale/latest
5 | :target: https://github.com/Jaded-Encoding-Thaumaturgy/vs-scale/commits/master
6 |
7 | Check the `Github releases page `_
8 | for a full changelog.
9 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 | from datetime import datetime
16 | from pathlib import Path
17 |
18 | sys.path.insert(0, os.path.abspath('..'))
19 |
20 | # -- Project information -----------------------------------------------------
21 |
22 | project = 'vsscale'
23 |
24 | exec(Path(f'../{project}/_metadata.py').read_text(), meta := dict[str, str]())
25 |
26 | copyright = f"{datetime.now().year}, {meta['__author__'].split(' ')[0]}"
27 | author = meta["__author__"]
28 |
29 | # The short X.Y version
30 | version = meta['__version__']
31 |
32 | # The full version, including alpha/beta/rc tags
33 | release = meta['__version__']
34 |
35 | # -- General configuration ---------------------------------------------------
36 |
37 | # Add any Sphinx extension module names here, as strings. They can be
38 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
39 | # ones.
40 | extensions = [
41 | 'sphinx.ext.autodoc',
42 | 'sphinx.ext.autosummary',
43 | 'sphinx.ext.todo',
44 | 'sphinx_autodoc_typehints',
45 | 'sphinx.ext.autosectionlabel',
46 | ]
47 |
48 | # Add any paths that contain templates here, relative to this directory.
49 | templates_path = ['_templates']
50 |
51 | # The language for content autogenerated by Sphinx. Refer to documentation
52 | # for a list of supported languages.
53 | #
54 | # This is also used if you do content translation via gettext catalogs.
55 | # Usually you set "language" from the command line for these cases.
56 | language = 'en'
57 |
58 | # List of patterns, relative to source directory, that match files and
59 | # directories to ignore when looking for source files.
60 | # This pattern also affects html_static_path and html_extra_path.
61 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
62 |
63 | # -- Options for HTML output -------------------------------------------------
64 |
65 | # The theme to use for HTML and HTML Help pages. See the documentation for
66 | # a list of builtin themes.
67 | #
68 | html_theme = "sphinx_rtd_theme"
69 |
70 | # Add any paths that contain custom static files (such as style sheets) here,
71 | # relative to this directory. They are copied after the builtin static files,
72 | # so a file named "default.css" will overwrite the builtin "default.css".
73 | html_static_path = ['_static']
74 |
75 | autosummary_generate = True
76 |
77 | autodoc_mock_imports = [
78 | 'vapoursynth'
79 | ]
80 |
81 | # -- Extension configuration -------------------------------------------------
82 |
83 | # -- Options for todo extension ----------------------------------------------
84 |
85 | # If true, `todo` and `todoList` produce output, else they produce nothing.
86 | todo_include_todos = True
87 |
88 | html_static_path = []
89 |
--------------------------------------------------------------------------------
/docs/getting_started/community.rst:
--------------------------------------------------------------------------------
1 | =========
2 | Community
3 | =========
4 |
5 | Where to find support
6 | =====================
7 |
8 | If you're looking for vsscale support, there's a couple option:
9 |
10 | * Read the official `documentation <_home>`
11 | * Check out the JET `Discord server `_
12 |
13 | .. warning::
14 |
15 | The vsscale issue tracker is **NOT** a support forum!
16 |
17 | Contributing
18 | ============
19 |
20 | .. _contribute:
21 |
22 | The easiest way to contribute is to simply send in a `pull request `_!
23 | Please keep your changes to a minimum. The following contributions will be automatically rejected:
24 |
25 | PRs that
26 |
27 | * drastically alter the codebase without prior approval
28 | * make minimal changes and are motivated by having one's name in the contributor list
29 |
30 |
--------------------------------------------------------------------------------
/docs/getting_started/install.rst:
--------------------------------------------------------------------------------
1 | ============
2 | Installation
3 | ============
4 |
5 | .. _install:
6 |
7 | There are two common ways to install vsscale.
8 |
9 | The first is to install the latest release build through `pypi `_.
10 |
11 | You can use pip to do this, as demonstrated below:
12 |
13 |
14 | .. code-block:: console
15 |
16 | pip install vsscale --no-cache-dir -U
17 |
18 | This ensures that any previous versions will be overwritten and vsscale will be upgraded if you had already previously installed it.
19 |
20 | ------------------
21 |
22 | The second method is to build the latest version from git.
23 |
24 | This will be less stable, but will feature the most up-to-date features, as well as accurately reflect the documentation.
25 |
26 | .. code-block:: console
27 |
28 | pip install git+https://github.com/Jaded-Encoding-Thaumaturgy/vs-scale.git -U
29 |
30 | It's recommended you use a release version over building from git
31 | unless you require new functionality only available upstream.
32 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | =====================
2 | vsscale Documentation
3 | =====================
4 |
5 | .. _home:
6 |
7 |
8 | .. image:: https://img.shields.io/pypi/pyversions/vsscale
9 |
10 | .. image:: https://img.shields.io/pypi/v/vsscale
11 | :target: https://pypi.org/project/vsscale/
12 |
13 | .. image:: https://img.shields.io/github/commits-since/Jaded-Encoding-Thaumaturgy/vs-scale/latest
14 | :target: https://github.com/Jaded-Encoding-Thaumaturgy/vs-scale/commits/master
15 |
16 | .. image:: https://img.shields.io/pypi/l/vsscale
17 | :target: https://github.com/Jaded-Encoding-Thaumaturgy/vs-scale/blob/master/LICENSE
18 |
19 | .. image:: https://img.shields.io/discord/856381934052704266?label=discord
20 | :target: https://discord.gg/XTpc6Fa9eB
21 |
22 | .. image:: https://static.pepy.tech/personalized-badge/vsscale?period=total&units=international_system&left_color=grey&right_color=blue&left_text=downloads
23 | :target: https://pepy.tech/project/vsscale
24 |
25 |
26 | VapourSynth (de)scale functions
27 |
28 | Wrappers for scaling and descaling functions.
29 |
30 | Want to contribute to vsscale?
31 | Check out the `how to contribute ` section!
32 |
33 | .. automodule:: vsscale
34 | :members:
35 | :undoc-members:
36 | :show-inheritance:
37 |
38 | .. toctree::
39 | :maxdepth: 1
40 | :caption: Getting started
41 |
42 | getting_started/install
43 | getting_started/community
44 |
45 | .. toctree::
46 | :maxdepth: 1
47 | :caption: Changelogs
48 |
49 | changelogs/changelogs
50 |
51 | .. toctree::
52 | :maxdepth: 1
53 | :caption: Functions
54 | :titlesonly:
55 |
56 | submodules/exceptions
57 | submodules/funcs
58 | submodules/helpers
59 | submodules/mask
60 | submodules/scale
61 | submodules/shaders
62 | submodules/types
63 |
64 |
65 | Special Credits
66 | ---------------
67 | | A special thanks to every contributor who has contributed to vsscale.
68 | | `A comprehensive list of contributors can be found here. `_
69 |
70 |
71 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | alabaster==0.7.12
2 | Babel==2.11.0
3 | certifi==2022.12.7
4 | chardet==5.1.0
5 | charset-normalizer==2.0.0
6 | colorama==0.4.6
7 | commonmark==0.9.1
8 | docutils==0.18
9 | idna==3.4
10 | imagesize==1.4.1
11 | Jinja2==3.1.2
12 | MarkupSafe==2.1.1
13 | packaging>=24.0
14 | Pygments==2.14.0
15 | pyparsing==3.0.9
16 | pytz==2022.7
17 | requests==2.28.1
18 | rich==13.0.0
19 | snowballstemmer==2.2.0
20 | Sphinx==5.3.0
21 | sphinx-autodoc-typehints==1.19.5
22 | sphinx-rtd-theme==1.2.0rc1
23 | sphinxcontrib-applehelp==1.0.2
24 | sphinxcontrib-devhelp==1.0.2
25 | sphinxcontrib-htmlhelp==2.0.0
26 | sphinxcontrib-jsmath==1.0.1
27 | sphinxcontrib-qthelp==1.0.3
28 | sphinxcontrib-serializinghtml==1.1.5
29 | typing-extensions>=4.11.0
30 | urllib3==1.26.13
--------------------------------------------------------------------------------
/docs/submodules/exceptions.rst:
--------------------------------------------------------------------------------
1 | ==========
2 | Exceptions
3 | ==========
4 |
5 | .. autosummary::
6 |
7 | vsscale.exceptions.CompareSameKernelError
8 |
9 | .. automodule:: vsscale.exceptions
10 | :members:
11 | :undoc-members:
12 | :show-inheritance:
13 |
--------------------------------------------------------------------------------
/docs/submodules/funcs.rst:
--------------------------------------------------------------------------------
1 | =========
2 | Functions
3 | =========
4 |
5 | .. autosummary::
6 |
7 | vsscale.funcs.MergeScalers
8 | vsscale.funcs.ClampScaler
9 | vsscale.funcs.MergedFSRCNNX
10 | vsscale.funcs.UnsharpLimitScaler
11 | vsscale.funcs.UnsharpedFSRCNNX
12 |
13 | .. automodule:: vsscale.funcs
14 | :members:
15 | :undoc-members:
16 | :show-inheritance:
17 |
--------------------------------------------------------------------------------
/docs/submodules/helpers.rst:
--------------------------------------------------------------------------------
1 | =======
2 | Helpers
3 | =======
4 |
5 | .. autosummary::
6 |
7 | vsscale.helpers.GenericScaler
8 | vsscale.helpers.scale_var_clip
9 |
10 | .. automodule:: vsscale.helpers
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
--------------------------------------------------------------------------------
/docs/submodules/mask.rst:
--------------------------------------------------------------------------------
1 | =======
2 | Masking
3 | =======
4 |
5 | .. autosummary::
6 |
7 | vsscale.mask.descale_detail_mask
8 | vsscale.mask.descale_error_mask
9 |
10 | .. automodule:: vsscale.mask
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
--------------------------------------------------------------------------------
/docs/submodules/scale.rst:
--------------------------------------------------------------------------------
1 | =======
2 | Scaling
3 | =======
4 |
5 | .. autosummary::
6 |
7 | vsscale.scale.DPID
8 | vsscale.scale.SSIM
9 | vsscale.scale.DLISR
10 | vsscale.scale.Waifu2x
11 |
12 | .. automodule:: vsscale.scale
13 | :members:
14 | :undoc-members:
15 | :show-inheritance:
16 |
--------------------------------------------------------------------------------
/docs/submodules/shaders.rst:
--------------------------------------------------------------------------------
1 | =======
2 | Shaders
3 | =======
4 |
5 | .. autosummary::
6 |
7 | vsscale.shaders.PlaceboShader
8 | vsscale.shaders.ShaderFile
9 | vsscale.shaders.FSRCNNXShader
10 | vsscale.shaders.FSRCNNXShaderT
11 |
12 | .. automodule:: vsscale.shaders
13 | :members:
14 | :undoc-members:
15 | :show-inheritance:
16 |
--------------------------------------------------------------------------------
/docs/submodules/types.rst:
--------------------------------------------------------------------------------
1 | =====
2 | Types
3 | =====
4 |
5 | .. autosummary::
6 |
7 | vsscale.types.Resolution
8 | vsscale.types.DescaleAttempt
9 | vsscale.types.DescaleMode
10 | vsscale.types.DescaleResult
11 | vsscale.types.PlaneStatsKind
12 |
13 | .. automodule:: vsscale.types
14 | :members:
15 | :undoc-members:
16 | :show-inheritance:
17 |
--------------------------------------------------------------------------------
/readthedocs.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | formats:
4 | - htmlzip
5 | - pdf
6 |
7 | build:
8 | os: ubuntu-22.04
9 | tools:
10 | python: "3.10"
11 |
12 | sphinx:
13 | configuration: docs/conf.py
14 | fail_on_warning: true
15 |
16 | python:
17 | install:
18 | - requirements: docs/requirements.txt
19 | - requirements: requirements-dev.txt
20 | - requirements: requirements.txt
21 |
--------------------------------------------------------------------------------
/requirements-dev.txt:
--------------------------------------------------------------------------------
1 | packaging>=24.0
2 | pycodestyle>=2.11.1
3 | ruff>=0.6.5
4 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | VapourSynth>=68
2 | vstools>=3.4.1
3 | vskernels>=3.4.2
4 | vsexprtools>=1.8.1
5 | vsrgtools>=1.9.0
6 | vsmasktools>=1.4.1
7 | vsaa>=1.12.1
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [flake8]
2 | count = True
3 | ignore = W503
4 | max-line-length = 120
5 | exclude = stubs/*
6 | show-source = True
7 | statistics = True
8 |
9 |
10 | [mypy]
11 | ignore_missing_imports = False
12 |
13 | disallow_any_generics = True
14 |
15 | disallow_untyped_defs = True
16 | disallow_incomplete_defs = True
17 | check_untyped_defs = True
18 | disallow_untyped_decorators = True
19 |
20 | no_implicit_optional = True
21 | strict_optional = True
22 |
23 | warn_redundant_casts = True
24 | warn_unused_ignores = True
25 | warn_no_return = True
26 | warn_return_any = True
27 | warn_unreachable = True
28 |
29 | ignore_errors = False
30 |
31 | allow_untyped_globals = False
32 | allow_redefinition = False
33 | implicit_reexport = False
34 | strict_equality = True
35 |
36 | show_error_context = False
37 | show_column_numbers = True
38 | show_error_codes = True
39 | color_output = True
40 | error_summary = True
41 | pretty = True
42 |
43 |
44 | [mypy-cytoolz.*]
45 | ignore_errors = True
46 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import setuptools
4 | from pathlib import Path
5 |
6 | package_name = 'vsscale'
7 |
8 | exec(Path(f'{package_name}/_metadata.py').read_text(), meta := dict[str, str]())
9 |
10 | readme = Path('README.md').read_text()
11 | requirements = Path('requirements.txt').read_text()
12 |
13 |
14 | setuptools.setup(
15 | name=package_name,
16 | version=meta['__version__'],
17 | author=meta['__author_name__'],
18 | author_email=meta['__author_email__'],
19 | maintainer=meta['__maintainer_name__'],
20 | maintainer_email=meta['__maintainer_email__'],
21 | description=meta['__doc__'],
22 | long_description=readme,
23 | long_description_content_type='text/markdown',
24 | project_urls={
25 | 'Source Code': 'https://github.com/Jaded-Encoding-Thaumaturgy/vs-scale',
26 | 'Contact': 'https://discord.gg/XTpc6Fa9eB',
27 | },
28 | install_requires=requirements,
29 | python_requires='>=3.12',
30 | packages=[
31 | package_name
32 | ],
33 | package_data={
34 | package_name: ['py.typed']
35 | },
36 | classifiers=[
37 | 'Programming Language :: Python :: 3',
38 | 'License :: OSI Approved :: MIT License',
39 | 'Operating System :: OS Independent',
40 | ],
41 | command_options={
42 | "build_sphinx": {
43 | "project": ("setup.py", package_name),
44 | "version": ("setup.py", meta['__version__']),
45 | "release": ("setup.py", meta['__version__']),
46 | "source_dir": ("setup.py", "docs")
47 | }
48 | }
49 | )
50 |
--------------------------------------------------------------------------------
/vsscale/__init__.py:
--------------------------------------------------------------------------------
1 | # ruff: noqa: F401, F403
2 |
3 | from .exceptions import *
4 | from .funcs import *
5 | from .helpers import *
6 | from .mask import *
7 | from .onnx import *
8 | from .rescale import *
9 | from .scale import *
10 | from .shaders import *
11 | from .types import *
12 |
--------------------------------------------------------------------------------
/vsscale/_metadata.py:
--------------------------------------------------------------------------------
1 | """VapourSynth (de)scaling functions"""
2 |
3 | __version__ = '2.3.2'
4 |
5 | __author_name__, __author_email__ = 'Setsugen no ao', 'setsugen@setsugen.dev'
6 | __maintainer_name__, __maintainer_email__ = __author_name__, __author_email__
7 |
8 | __author__ = f'{__author_name__} <{__author_email__}>'
9 | __maintainer__ = __author__
10 |
11 | if __name__ == '__github__':
12 | print(__version__)
13 |
--------------------------------------------------------------------------------
/vsscale/base.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import TYPE_CHECKING
4 |
5 | from vstools import CustomStrEnum
6 |
7 | __all__ = [
8 | 'ShaderFileBase',
9 | 'ShaderFileCustom'
10 | ]
11 |
12 | if TYPE_CHECKING:
13 | from .shaders import ShaderFile
14 |
15 | class ShaderFileCustomBase:
16 | CUSTOM: ShaderFileCustom
17 |
18 | class ShaderFileBase(ShaderFileCustomBase, CustomStrEnum):
19 | value: str
20 |
21 | class ShaderFileCustom(ShaderFile): # type: ignore
22 | ...
23 | else:
24 | ShaderFileBase = CustomStrEnum
25 | ShaderFileCustom = CustomStrEnum
26 |
--------------------------------------------------------------------------------
/vsscale/exceptions.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from typing import Any
4 |
5 | from vskernels import Kernel, KernelT
6 | from vstools import CustomValueError, FuncExceptT
7 |
8 | __all__ = [
9 | 'CompareSameKernelError'
10 | ]
11 |
12 |
13 | class CompareSameKernelError(CustomValueError):
14 | """Raised when two of the same kernels are compared to each other."""
15 |
16 | def __init__(
17 | self, func: FuncExceptT, kernel: KernelT, message: str = 'You may not compare {kernel} with itself!',
18 | **kwargs: Any
19 | ) -> None:
20 | super().__init__(message, func, kernel=Kernel.from_param(kernel, CompareSameKernelError), **kwargs)
21 |
--------------------------------------------------------------------------------
/vsscale/funcs.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from dataclasses import dataclass, field
4 | from functools import partial
5 | from typing import Any, Callable, Concatenate, Literal, TypeGuard, cast
6 |
7 | from vsaa import Nnedi3
8 | from vsexprtools import ExprOp, combine, norm_expr
9 | from vskernels import Scaler, ScalerT
10 | from vsmasktools import ringing_mask
11 | from vsrgtools import LimitFilterMode, RepairMode, MeanMode, limit_filter, repair, unsharp_masked
12 | from vstools import (
13 | EXPR_VARS, CustomIndexError, CustomOverflowError, P, check_ref_clip, inject_self, scale_delta, vs
14 | )
15 |
16 | from .helpers import GenericScaler
17 | from .shaders import FSRCNNXShader, FSRCNNXShaderT
18 |
19 | __all__ = [
20 | 'MergeScalers',
21 | 'ClampScaler', 'MergedFSRCNNX',
22 | 'UnsharpLimitScaler', 'UnsharpedFSRCNNX'
23 | ]
24 |
25 |
26 | class MergeScalers(GenericScaler):
27 | def __init__(self, *scalers: ScalerT | tuple[ScalerT, float | None]) -> None:
28 | """Create a unified Scaler from multiple Scalers with optional weights."""
29 | if (slen := len(scalers)) < 2:
30 | raise CustomIndexError('Not enough scalers passed!', self.__class__, slen)
31 | elif slen > len(EXPR_VARS):
32 | raise CustomIndexError('Too many scalers passed!', self.__class__, slen)
33 |
34 | def _not_all_tuple_scalers(
35 | scalers: tuple[ScalerT | tuple[ScalerT, float | None], ...]
36 | ) -> TypeGuard[tuple[ScalerT, ...]]:
37 | return all(not isinstance(s, tuple) for s in scalers)
38 |
39 | if not _not_all_tuple_scalers(scalers):
40 | norm_scalers = [
41 | scaler if isinstance(scaler, tuple) else (scaler, None) for scaler in scalers
42 | ]
43 |
44 | curr_sum = 0.0
45 | n_auto_weight = 0
46 |
47 | for i, (_, weight) in enumerate(norm_scalers):
48 | if weight is None:
49 | n_auto_weight += 1
50 | elif weight <= 0.0:
51 | raise CustomOverflowError(
52 | f'Weights have to be positive, >= 0.0! (Scaler index: ({i})', self.__class__
53 | )
54 | else:
55 | curr_sum += weight
56 |
57 | if curr_sum > 1.0:
58 | raise CustomOverflowError(
59 | 'Sum of the weights should be less or equal than 1.0!', self.__class__
60 | )
61 |
62 | if n_auto_weight:
63 | a_wgh = (1.0 - curr_sum) / n_auto_weight
64 |
65 | norm_scalers = [
66 | (scaler, a_wgh if weight is None else weight)
67 | for scaler, weight in norm_scalers
68 | ]
69 | else:
70 | weight = 1.0 / len(scalers)
71 |
72 | norm_scalers = [(scaler, weight) for scaler in scalers]
73 |
74 | self.scalers = [
75 | (self.ensure_scaler(scaler), float(weight if weight else 0))
76 | for scaler, weight in norm_scalers
77 | ]
78 |
79 | def scale( # type: ignore
80 | self, clip: vs.VideoNode, width: int | None = None, height: int | None = None,
81 | shift: tuple[float, float] = (0, 0), **kwargs: Any
82 | ) -> vs.VideoNode:
83 | width, height = self._wh_norm(clip, width, height)
84 |
85 | scalers, weights = cast(tuple[list[Scaler], list[float]], zip(*self.scalers))
86 |
87 | return combine(
88 | [scaler.scale(clip, width, height, shift, **kwargs) for scaler in scalers],
89 | ExprOp.ADD, zip(weights, ExprOp.MUL), expr_suffix=[sum(weights), ExprOp.DIV]
90 | )
91 |
92 | @property
93 | def kernel_radius(self) -> int: # type: ignore[override]
94 | return max(scaler.kernel_radius for scaler, _ in self.scalers)
95 |
96 |
97 | @dataclass
98 | class ClampScaler(GenericScaler):
99 | """Clamp a reference Scaler."""
100 |
101 | ref_scaler: ScalerT
102 | """Scaler to clamp."""
103 |
104 | strength: int = 80
105 | """Strength of clamping."""
106 |
107 | overshoot: float | None = None
108 | """Overshoot threshold."""
109 |
110 | undershoot: float | None = None
111 | """Undershoot threshold."""
112 |
113 | limit: RepairMode | bool = True
114 | """Whether to use under/overshoot limit (True) or a reference repaired clip for limiting."""
115 |
116 | operator: Literal[ExprOp.MAX, ExprOp.MIN] | None = ExprOp.MIN
117 | """Whether to take the brightest or darkest pixels in the merge."""
118 |
119 | masked: bool = True
120 | """Whether to mask with a ringing mask or not."""
121 |
122 | reference: ScalerT | vs.VideoNode = Nnedi3
123 | """Reference Scaler used to clamp ref_scaler"""
124 |
125 | def __post_init__(self) -> None:
126 | super().__post_init__()
127 |
128 | if self.strength >= 100:
129 | raise CustomOverflowError('strength can\'t be more or equal to 100!', self.__class__)
130 | elif self.strength <= 0:
131 | raise CustomOverflowError('strength can\'t be less or equal to 0!', self.__class__)
132 |
133 | if self.overshoot is None:
134 | self.overshoot = self.strength / 100
135 | if self.undershoot is None:
136 | self.undershoot = self.overshoot
137 |
138 | self._reference = None if isinstance(self.reference, vs.VideoNode) else self.ensure_scaler(self.reference)
139 | self._ref_scaler = self.ensure_scaler(self.ref_scaler)
140 |
141 | @inject_self
142 | def scale( # type: ignore
143 | self, clip: vs.VideoNode, width: int | None = None, height: int | None = None,
144 | shift: tuple[float, float] = (0, 0), *, smooth: vs.VideoNode | None = None, **kwargs: Any
145 | ) -> vs.VideoNode:
146 | width, height = self._wh_norm(clip, width, height)
147 |
148 | assert (self.undershoot or self.undershoot == 0) and (self.overshoot or self.overshoot == 0)
149 |
150 | ref = self._ref_scaler.scale(clip, width, height, shift, **kwargs)
151 |
152 | if isinstance(self.reference, vs.VideoNode):
153 | smooth = self.reference
154 |
155 | if shift != (0, 0):
156 | smooth = self._kernel.shift(smooth, shift) # type: ignore
157 | else:
158 | assert self._reference
159 | smooth = self._reference.scale(clip, width, height, shift)
160 |
161 | assert smooth
162 |
163 | check_ref_clip(ref, smooth)
164 |
165 | merge_weight = self.strength / 100
166 |
167 | if self.limit is True:
168 | expression = 'x {merge_weight} * y {ref_weight} * + a {undershoot} - z {overshoot} + clip'
169 |
170 | merged = norm_expr(
171 | [ref, smooth, smooth.std.Maximum(), smooth.std.Minimum()],
172 | expression, merge_weight=merge_weight, ref_weight=1.0 - merge_weight,
173 | undershoot=scale_delta(self.undershoot, 32, clip),
174 | overshoot=scale_delta(self.overshoot, 32, clip)
175 | )
176 | else:
177 | merged = smooth.std.Merge(ref, merge_weight)
178 |
179 | if isinstance(self.limit, RepairMode):
180 | merged = repair(merged, smooth, self.limit)
181 |
182 | if self.operator is not None:
183 | merge2 = combine([smooth, ref], self.operator)
184 |
185 | if self.masked:
186 | merged = merged.std.MaskedMerge(merge2, ringing_mask(smooth))
187 | else:
188 | merged = merge2
189 | elif self.masked:
190 | merged.std.MaskedMerge(smooth, ringing_mask(smooth))
191 |
192 | return merged
193 |
194 | @property
195 | def kernel_radius(self) -> int: # type: ignore[override]
196 | if self._reference:
197 | return max(self._reference.kernel_radius, self._ref_scaler.kernel_radius)
198 | return self._ref_scaler.kernel_radius
199 |
200 |
201 | class UnsharpLimitScaler(GenericScaler):
202 | """Limit a scaler with a masked unsharping."""
203 |
204 | def __init__(
205 | self, ref_scaler: ScalerT,
206 | unsharp_func: Callable[
207 | Concatenate[vs.VideoNode, P], vs.VideoNode
208 | ] = partial(unsharp_masked, radius=2, strength=65),
209 | merge_mode: LimitFilterMode | bool = True,
210 | reference: ScalerT | vs.VideoNode = Nnedi3(0, opencl=None),
211 | *args: P.args, **kwargs: P.kwargs
212 | ) -> None:
213 | """
214 | :param ref_scaler: Scaler of which to limit haloing.
215 | :param unsharp_func: Unsharpening function used as reference for limiting.
216 | :param merge_mode: Whether to limit with LimitFilterMode,
217 | use a median filter (True) or just take the darkest pixels (False).
218 | :param reference: Reference scaler used to fill in the haloed parts.
219 | """
220 |
221 | self.unsharp_func = unsharp_func
222 |
223 | self.merge_mode = merge_mode
224 |
225 | self.reference = reference
226 | self._reference = None if isinstance(self.reference, vs.VideoNode) else self.ensure_scaler(self.reference)
227 | self.ref_scaler = self.ensure_scaler(ref_scaler)
228 |
229 | self.args = args
230 | self.kwargs = kwargs
231 |
232 | @inject_self
233 | def scale( # type: ignore
234 | self, clip: vs.VideoNode, width: int | None = None, height: int | None = None,
235 | shift: tuple[float, float] = (0, 0), *, smooth: vs.VideoNode | None = None, **kwargs: Any
236 | ) -> vs.VideoNode:
237 | width, height = self._wh_norm(clip, width, height)
238 |
239 | fsrcnnx = self.ref_scaler.scale(clip, width, height, shift, **kwargs)
240 |
241 | if isinstance(self.reference, vs.VideoNode):
242 | smooth = self.reference
243 |
244 | if shift != (0, 0):
245 | smooth = self._kernel.shift(smooth, shift) # type: ignore
246 | else:
247 | smooth = self._reference.scale(clip, width, height, shift) # type: ignore
248 |
249 | assert smooth
250 |
251 | check_ref_clip(fsrcnnx, smooth)
252 |
253 | smooth_sharp = self.unsharp_func(smooth, *self.args, **self.kwargs)
254 |
255 | if isinstance(self.merge_mode, LimitFilterMode):
256 | return limit_filter(smooth, fsrcnnx, smooth_sharp, self.merge_mode)
257 |
258 | if self.merge_mode:
259 | return MeanMode.MEDIAN(smooth, fsrcnnx, smooth_sharp)
260 |
261 | return combine([smooth, fsrcnnx, smooth_sharp], ExprOp.MIN)
262 |
263 | @property
264 | def kernel_radius(self) -> int: # type: ignore[override]
265 | if self._reference:
266 | return max(self._reference.kernel_radius, self.ref_scaler.kernel_radius)
267 | return self.ref_scaler.kernel_radius
268 |
269 |
270 | @dataclass
271 | class MergedFSRCNNX(ClampScaler):
272 | """Clamped FSRCNNX Scaler."""
273 |
274 | ref_scaler: FSRCNNXShaderT = field(default_factory=lambda: FSRCNNXShader.x56, kw_only=True)
275 |
276 |
277 | class UnsharpedFSRCNNX(UnsharpLimitScaler):
278 | """Clamped FSRCNNX Scaler with an unsharp mask."""
279 |
280 | def __init__(
281 | self,
282 | unsharp_func: Callable[
283 | Concatenate[vs.VideoNode, P], vs.VideoNode
284 | ] = partial(unsharp_masked, radius=2, strength=65),
285 | merge_mode: LimitFilterMode | bool = True,
286 | reference: ScalerT | vs.VideoNode = Nnedi3(0, opencl=None),
287 | ref_scaler: ScalerT = FSRCNNXShader.x56,
288 | *args: P.args, **kwargs: P.kwargs
289 | ) -> None:
290 | super().__init__(ref_scaler, unsharp_func, merge_mode, reference, *args, **kwargs)
291 |
--------------------------------------------------------------------------------
/vsscale/helpers.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from dataclasses import dataclass, field
4 | from functools import partial
5 | from math import ceil, floor
6 | from types import NoneType
7 | from typing import Any, Callable, NamedTuple, Protocol, Self, TypeAlias, overload
8 |
9 | from vsaa import Nnedi3
10 | from vskernels import Catrom, Kernel, KernelT, Scaler, ScalerT
11 | from vstools import KwargsT, MatrixT, Resolution, fallback, get_w, mod2, plane, vs
12 |
13 |
14 | __all__ = [
15 | 'GenericScaler',
16 | 'scale_var_clip',
17 | 'fdescale_args',
18 | 'descale_args',
19 |
20 | 'CropRel',
21 | 'CropAbs',
22 | 'ScalingArgs'
23 | ]
24 |
25 | __abstract__ = [
26 | 'GenericScaler'
27 | ]
28 |
29 |
30 | class _GeneriScaleNoShift(Protocol):
31 | def __call__(self, clip: vs.VideoNode, width: int, height: int, *args: Any, **kwds: Any) -> vs.VideoNode:
32 | ...
33 |
34 |
35 | class _GeneriScaleWithShift(Protocol):
36 | def __call__(
37 | self, clip: vs.VideoNode, width: int, height: int, shift: tuple[float, float],
38 | *args: Any, **kwds: Any
39 | ) -> vs.VideoNode:
40 | ...
41 |
42 |
43 | @dataclass
44 | class GenericScaler(Scaler):
45 | """
46 | Generic Scaler base class.
47 | Inherit from this to create more complex scalers with built-in utils.
48 | Instantiate with a callable taking at least a VideoNode, width, and height
49 | to use that as a Scaler in functions taking that.
50 | """
51 |
52 | kernel: KernelT | None = field(default=None, kw_only=True)
53 | """
54 | Base kernel to be used for certain scaling/shifting/resampling operations.
55 | Must be specified and defaults to catrom
56 | """
57 |
58 | scaler: ScalerT | None = field(default=None, kw_only=True)
59 | """Scaler used for scaling operations. Defaults to kernel."""
60 |
61 | shifter: KernelT | None = field(default=None, kw_only=True)
62 | """Kernel used for shifting operations. Defaults to kernel."""
63 |
64 | def __post_init__(self) -> None:
65 | self._kernel = Kernel.ensure_obj(self.kernel or Catrom, self.__class__)
66 | self._scaler = Scaler.ensure_obj(self.scaler or self._kernel, self.__class__)
67 | self._shifter = Kernel.ensure_obj(
68 | self.shifter or (self._scaler if isinstance(self._scaler, Kernel) else Catrom), self.__class__
69 | )
70 |
71 | def __init__(
72 | self, func: _GeneriScaleNoShift | _GeneriScaleWithShift | Callable[..., vs.VideoNode], **kwargs: Any
73 | ) -> None:
74 | self.func = func
75 | self.kwargs = kwargs
76 |
77 | def scale( # type: ignore
78 | self, clip: vs.VideoNode, width: int | None = None, height: int | None = None,
79 | shift: tuple[float, float] = (0, 0), **kwargs: Any
80 | ) -> vs.VideoNode:
81 | width, height = self._wh_norm(clip, width, height)
82 |
83 | kwargs = self.kwargs | kwargs
84 |
85 | output = None
86 |
87 | if shift != (0, 0):
88 | try:
89 | output = self.func(clip, width, height, shift, **kwargs)
90 | except BaseException:
91 | try:
92 | output = self.func(clip, width=width, height=height, shift=shift, **kwargs)
93 | except BaseException:
94 | pass
95 |
96 | if output is None:
97 | try:
98 | output = self.func(clip, width, height, **kwargs)
99 | except BaseException:
100 | output = self.func(clip, width=width, height=height, **kwargs)
101 |
102 | return self._finish_scale(output, clip, width, height, shift)
103 |
104 | def _finish_scale(
105 | self, clip: vs.VideoNode, input_clip: vs.VideoNode, width: int, height: int,
106 | shift: tuple[float, float] = (0, 0), matrix: MatrixT | None = None,
107 | copy_props: bool = False
108 | ) -> vs.VideoNode:
109 | assert input_clip.format
110 |
111 | if input_clip.format.num_planes == 1:
112 | clip = plane(clip, 0)
113 |
114 | if (clip.width, clip.height) != (width, height):
115 | clip = self._scaler.scale(clip, width, height)
116 |
117 | if shift != (0, 0):
118 | clip = self._shifter.shift(clip, shift) # type: ignore
119 |
120 | assert clip.format
121 |
122 | if clip.format.id != input_clip.format.id:
123 | clip = self._kernel.resample(clip, input_clip, matrix)
124 |
125 | if copy_props:
126 | return clip.std.CopyFrameProps(input_clip)
127 |
128 | return clip
129 |
130 | def ensure_scaler(self, scaler: ScalerT) -> Scaler:
131 | from dataclasses import is_dataclass, replace
132 |
133 | scaler_obj = Scaler.ensure_obj(scaler, self.__class__)
134 |
135 | if is_dataclass(scaler_obj):
136 | from inspect import Signature #type: ignore[unreachable]
137 |
138 | kwargs = dict[str, ScalerT]()
139 |
140 | init_keys = Signature.from_callable(scaler_obj.__init__).parameters.keys()
141 |
142 | if 'kernel' in init_keys:
143 | kwargs.update(kernel=self.kernel or scaler_obj.kernel)
144 |
145 | if 'scaler' in init_keys:
146 | kwargs.update(scaler=self.scaler or scaler_obj.scaler)
147 |
148 | if 'shifter' in init_keys:
149 | kwargs.update(shifter=self.shifter or scaler_obj.shifter)
150 |
151 | if kwargs:
152 | scaler_obj = replace(scaler_obj, **kwargs)
153 |
154 | return scaler_obj
155 |
156 |
157 | def scale_var_clip(
158 | clip: vs.VideoNode,
159 | width: int | Callable[[Resolution], int] | None, height: int | Callable[[Resolution], int],
160 | shift: tuple[float, float] | Callable[[Resolution], tuple[float, float]] = (0, 0),
161 | scaler: Scaler | Callable[[Resolution], Scaler] = Nnedi3(), debug: bool = False
162 | ) -> vs.VideoNode:
163 | """Scale a variable clip to constant or variable resolution."""
164 | if not debug:
165 | try:
166 | return scaler.scale(clip, width, height, shift) # type: ignore
167 | except BaseException:
168 | pass
169 |
170 | _cached_clips = dict[str, vs.VideoNode]()
171 |
172 | no_accepts_var = list[Scaler]()
173 |
174 | def _eval_scale(f: vs.VideoFrame, n: int) -> vs.VideoNode:
175 | key = f'{f.width}_{f.height}'
176 |
177 | if key not in _cached_clips:
178 | res = Resolution(f.width, f.height)
179 |
180 | norm_scaler = scaler(res) if callable(scaler) else scaler
181 | norm_shift = shift(res) if callable(shift) else shift
182 | norm_height = height(res) if callable(height) else height
183 |
184 | if width is None:
185 | norm_width = get_w(norm_height, res.width / res.height)
186 | else:
187 | norm_width = width(res) if callable(width) else width
188 |
189 | part_scaler = partial( #type: ignore[misc]
190 | norm_scaler.scale, width=norm_width, height=norm_height, shift=norm_shift
191 | )
192 |
193 | scaled = clip
194 | if (scaled.width, scaled.height) != (norm_width, norm_height):
195 | if norm_scaler not in no_accepts_var:
196 | try:
197 | scaled = part_scaler(clip)
198 | except BaseException:
199 | no_accepts_var.append(norm_scaler)
200 |
201 | if norm_scaler in no_accepts_var:
202 | const_clip = clip.resize.Point(res.width, res.height)
203 |
204 | scaled = part_scaler(const_clip)
205 |
206 | if debug:
207 | scaled = scaled.std.SetFrameProps(var_width=res.width, var_height=res.height)
208 |
209 | _cached_clips[key] = scaled
210 |
211 | return _cached_clips[key]
212 |
213 | if callable(width) or callable(height):
214 | out_clip = clip
215 | else:
216 | out_clip = clip.std.BlankClip(width, height)
217 |
218 | return out_clip.std.FrameEval(_eval_scale, clip, clip)
219 |
220 |
221 | LeftCrop: TypeAlias = int
222 | RightCrop: TypeAlias = int
223 | TopCrop: TypeAlias = int
224 | BottomCrop: TypeAlias = int
225 |
226 |
227 | class CropRel(NamedTuple):
228 | left: int = 0
229 | right: int = 0
230 | top: int = 0
231 | bottom: int = 0
232 |
233 |
234 | class CropAbs(NamedTuple):
235 | width: int
236 | height: int
237 | left: int = 0
238 | top: int = 0
239 |
240 | def to_rel(self, base_clip: vs.VideoNode) -> CropRel:
241 | return CropRel(
242 | self.left,
243 | base_clip.width - self.width - self.left,
244 | self.top,
245 | base_clip.height - self.height - self.top
246 | )
247 |
248 |
249 | @dataclass
250 | class ScalingArgs:
251 | width: int
252 | height: int
253 | src_width: float
254 | src_height: float
255 | src_top: float
256 | src_left: float
257 | mode: str = 'hw'
258 |
259 | def _do(self) -> tuple[bool, bool]:
260 | return 'h' in self.mode.lower(), 'w' in self.mode.lower()
261 |
262 | def _up_rate(self, clip: vs.VideoNode | None = None) -> tuple[float, float]:
263 | if clip is None:
264 | return 1.0, 1.0
265 |
266 | do_h, do_w = self._do()
267 |
268 | return (
269 | (clip.height / self.height) if do_h else 1.0,
270 | (clip.width / self.width) if do_w else 1.0
271 | )
272 |
273 | def kwargs(self, clip_or_rate: vs.VideoNode | float | None = None, /) -> KwargsT:
274 | kwargs = dict[str, Any]()
275 |
276 | do_h, do_w = self._do()
277 |
278 | if isinstance(clip_or_rate, (vs.VideoNode, NoneType)):
279 | up_rate_h, up_rate_w = self._up_rate(clip_or_rate)
280 | else:
281 | up_rate_h, up_rate_w = clip_or_rate, clip_or_rate
282 |
283 | if do_h:
284 | kwargs.update(
285 | src_height=self.src_height * up_rate_h,
286 | src_top=self.src_top * up_rate_h
287 | )
288 |
289 | if do_w:
290 | kwargs.update(
291 | src_width=self.src_width * up_rate_w,
292 | src_left=self.src_left * up_rate_w
293 | )
294 |
295 | return kwargs
296 |
297 | @overload
298 | @classmethod
299 | def from_args(
300 | cls,
301 | base_clip: vs.VideoNode,
302 | height: int, width: int | None = None,
303 | /,
304 | *,
305 | src_top: float = ..., src_left: float = ...,
306 | mode: str = 'hw'
307 | ) -> Self:
308 | ...
309 |
310 | @overload
311 | @classmethod
312 | def from_args(
313 | cls,
314 | base_clip: vs.VideoNode,
315 | height: float, width: float | None = ...,
316 | /,
317 | base_height: int | None = ..., base_width: int | None = ...,
318 | src_top: float = ..., src_left: float = ...,
319 | crop: tuple[LeftCrop, RightCrop, TopCrop, BottomCrop] | CropRel | CropAbs = ...,
320 | mode: str = 'hw'
321 | ) -> Self:
322 | ...
323 |
324 | @classmethod
325 | def from_args(
326 | cls,
327 | base_clip: vs.VideoNode,
328 | height: int | float, width: int | float | None = None,
329 | base_height: int | None = None, base_width: int | None = None,
330 | src_top: float = 0, src_left: float = 0,
331 | crop: tuple[LeftCrop, RightCrop, TopCrop, BottomCrop] | CropRel | CropAbs | None = None,
332 | mode: str = 'hw'
333 | ) -> Self:
334 | if crop:
335 | if isinstance(crop, CropAbs):
336 | crop = crop.to_rel(base_clip)
337 | elif isinstance(crop, CropRel):
338 | pass
339 | else:
340 | crop = CropRel(*crop)
341 | else:
342 | crop = CropRel()
343 |
344 | ratio = height / base_clip.height
345 |
346 | if width is None:
347 | if isinstance(height, int):
348 | width = get_w(height, base_clip, 2)
349 | else:
350 | width = ratio * base_clip.width
351 |
352 | if all([
353 | isinstance(height, int),
354 | isinstance(width, int),
355 | base_height is None,
356 | base_width is None,
357 | crop == (0, 0, 0, 0)
358 | ]):
359 | return cls(int(width), int(height), int(width), int(height), src_top, src_left, mode)
360 |
361 | if base_height is None:
362 | base_height = mod2(ceil(height))
363 |
364 | if base_width is None:
365 | base_width = mod2(ceil(width))
366 |
367 | margin_left = (base_width - width) / 2 + ratio * crop.left
368 | margin_right = (base_width - width) / 2 + ratio * crop.right
369 | cropped_width = base_width - floor(margin_left) - floor(margin_right)
370 |
371 | margin_top = (base_height - height) / 2 + ratio * crop.top
372 | margin_bottom = (base_height - height) / 2 + ratio * crop.bottom
373 | cropped_height = base_height - floor(margin_top) - floor(margin_bottom)
374 |
375 | if isinstance(width, int) and crop.left == crop.right == 0:
376 | cropped_src_width = float(cropped_width)
377 | else:
378 | cropped_src_width = ratio * (base_clip.width - crop.left - crop.right)
379 |
380 | cropped_src_left = margin_left - floor(margin_left) + src_left
381 |
382 | if isinstance(height, int) and crop.top == crop.bottom == 0:
383 | cropped_src_height = float(cropped_height)
384 | else:
385 | cropped_src_height = ratio * (base_clip.height - crop.top - crop.bottom)
386 |
387 | cropped_src_top = margin_top - floor(margin_top) + src_top
388 |
389 | return cls(
390 | cropped_width, cropped_height,
391 | cropped_src_width, cropped_src_height,
392 | cropped_src_top, cropped_src_left,
393 | mode
394 | )
395 |
396 |
397 | def descale_args(
398 | clip: vs.VideoNode,
399 | src_height: float, src_width: float | None = None,
400 | base_height: int | None = None, base_width: int | None = None,
401 | crop_top: int = 0, crop_bottom: int = 0,
402 | crop_left: int = 0, crop_right: int = 0,
403 | mode: str = 'hw'
404 | ) -> ScalingArgs:
405 | # warnings
406 | return ScalingArgs.from_args(
407 | clip.std.AddBorders(crop_left, crop_right, crop_top, crop_bottom),
408 | src_height, src_width,
409 | base_height, base_width,
410 | 0, 0,
411 | CropRel(crop_left, crop_right, crop_top, crop_bottom),
412 | mode
413 | )
414 |
415 |
416 | def fdescale_args(
417 | clip: vs.VideoNode, src_height: float,
418 | base_height: int | None = None, base_width: int | None = None,
419 | src_top: float | None = None, src_left: float | None = None,
420 | src_width: float | None = None, mode: str = 'hw', up_rate: float = 2.0
421 | ) -> tuple[KwargsT, KwargsT]:
422 | base_height = fallback(base_height, mod2(ceil(src_height)))
423 | base_width = fallback(base_width, get_w(base_height, clip, 2))
424 |
425 | src_width = fallback(src_width, src_height * clip.width / clip.height)
426 |
427 | cropped_width = base_width - 2 * floor((base_width - src_width) / 2)
428 | cropped_height = base_height - 2 * floor((base_height - src_height) / 2)
429 |
430 | do_h, do_w = 'h' in mode.lower(), 'w' in mode.lower()
431 |
432 | de_args = dict[str, Any](
433 | width=cropped_width if do_w else clip.width,
434 | height=cropped_height if do_h else clip.height
435 | )
436 |
437 | up_args = dict[str, Any]()
438 |
439 | src_top = fallback(src_top, (cropped_height - src_height) / 2)
440 | src_left = fallback(src_left, (cropped_width - src_width) / 2)
441 |
442 | if do_h:
443 | de_args.update(src_height=src_height, src_top=src_top)
444 | up_args.update(src_height=src_height * up_rate, src_top=src_top * up_rate)
445 |
446 | if do_w:
447 | de_args.update(src_width=src_width, src_left=src_left)
448 | up_args.update(src_width=src_width * up_rate, src_left=src_left * up_rate)
449 |
450 | return de_args, up_args
451 |
--------------------------------------------------------------------------------
/vsscale/mask.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from vsexprtools import ExprOp, average_merge, norm_expr
4 | from vskernels import Catrom
5 | from vsmasktools import Morpho, XxpandMode
6 | from vsrgtools import box_blur, gauss_blur
7 | from vstools import core, get_y, iterate, shift_clip_multi, split, vs, limiter
8 |
9 | __all__ = [
10 | 'descale_detail_mask', 'descale_error_mask'
11 | ]
12 |
13 |
14 | @limiter
15 | def descale_detail_mask(
16 | clip: vs.VideoNode, rescaled: vs.VideoNode, thr: float = 0.05,
17 | inflate: int = 2, xxpand: tuple[int, int] = (4, 0)
18 | ) -> vs.VideoNode:
19 | """
20 | Mask non-native resolution detail to prevent detail loss and artifacting.
21 |
22 | Descaling without masking is very dangerous, as descaling FHD material often leads to
23 | heavy artifacting and fine detail loss.
24 |
25 | :param clip: Original clip.
26 | :param rescaled: Clip rescaled using the presumed native kernel.
27 | :param thr: Binarizing threshold. Lower will catch more.
28 | Assumes float bitdepth input.
29 | Default: 0.05.
30 | :param inflate: Amount of times to ``inflate`` the mask. Default: 2.
31 | :param xxpand: Amount of times to ``Maximum`` the clip by.
32 | The first ``Maximum`` is done before inflating, the second after.
33 | Default: 4 times pre-inflating, 0 times post-inflating.
34 |
35 | :return: Mask containing all the native FHD detail.
36 | """
37 | mask = norm_expr([get_y(clip), get_y(rescaled)], 'x y - abs')
38 |
39 | mask = Morpho.binarize(mask, thr)
40 |
41 | if xxpand[0]:
42 | mask = iterate(mask, core.std.Maximum if xxpand[0] > 0 else core.std.Minimum, xxpand[0])
43 |
44 | if inflate:
45 | mask = iterate(mask, core.std.Inflate, inflate)
46 |
47 | if xxpand[1]:
48 | mask = iterate(mask, core.std.Maximum if xxpand[1] > 0 else core.std.Minimum, xxpand[1])
49 |
50 | return mask
51 |
52 |
53 | @limiter
54 | def descale_error_mask(
55 | clip: vs.VideoNode, rescaled: vs.VideoNode,
56 | thr: float | list[float] = 0.038,
57 | expands: int | tuple[int, int, int] = (2, 2, 3),
58 | blur: int | float = 3, bwbias: int = 1, tr: int = 1
59 | ) -> vs.VideoNode:
60 | """
61 | Create an error mask from the original and rescaled clip.
62 |
63 | :param clip: Original clip.
64 | :param rescaled: Rescaled clip.
65 | :param thr: Threshold of the minimum difference.
66 | :param expands: Iterations of mask expand at each step (diff, expand, binarize).
67 | :param blur: How much to blur the clip. If int, it will be a box_blur, else gauss_blur.
68 | :param bwbias: Calculate a bias with the clip's chroma.
69 | :param tr: Make the error mask temporally stable with a temporal radius.
70 |
71 | :return: Descale error mask.
72 | """
73 | assert clip.format and rescaled.format
74 |
75 | y, *chroma = split(clip)
76 |
77 | error = norm_expr([y, rescaled], 'x y - abs')
78 |
79 | if bwbias > 1 and chroma:
80 | chroma_abs = norm_expr(chroma, 'x neutral - abs y neutral - abs max')
81 | chroma_abs = Catrom.scale(chroma_abs, y.width, y.height)
82 |
83 | bias = norm_expr([y, chroma_abs], f'x ymax >= x ymin <= or y 0 = and {bwbias} 1 ?')
84 | bias = Morpho.expand(bias, 2)
85 |
86 | error = ExprOp.MUL(error, bias)
87 |
88 | if isinstance(expands, int):
89 | exp1 = exp2 = exp3 = expands
90 | else:
91 | exp1, exp2, exp3 = expands
92 |
93 | if exp1:
94 | error = Morpho.expand(error, exp1)
95 |
96 | if exp2:
97 | error = Morpho.expand(error, exp2, mode=XxpandMode.ELLIPSE)
98 |
99 | thrs = [thr] if isinstance(thr, (float, int)) else thr
100 |
101 | error = Morpho.binarize(error, thrs[0])
102 |
103 | for scaled_thr in thrs[1:]:
104 | bin2 = Morpho.binarize(error, scaled_thr)
105 | error = bin2.misc.Hysteresis(error)
106 |
107 | if exp3:
108 | error = Morpho.expand(error, exp2, mode=XxpandMode.ELLIPSE)
109 |
110 | if tr > 1:
111 | avg = Morpho.binarize(average_merge(*shift_clip_multi(error, (-tr, tr))), 0.5)
112 |
113 | error = ExprOp.MIN(error, ExprOp.MAX(shift_clip_multi(ExprOp.MIN(error, avg), (-tr, tr))))
114 |
115 | if isinstance(blur, int):
116 | error = box_blur(error, blur)
117 | else:
118 | error = gauss_blur(error, blur)
119 |
120 | return error
121 |
--------------------------------------------------------------------------------
/vsscale/onnx.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass
2 | from typing import Any, ClassVar
3 |
4 | from vskernels import Kernel, KernelT
5 | from vstools import (
6 | CustomValueError, DependencyNotFoundError, KwargsT, NotFoundEnumValue, SPath, SPathLike, core,
7 | depth, expect_bits, get_nvidia_version, get_video_format, get_y, inject_self, limiter, vs
8 | )
9 |
10 | from .helpers import GenericScaler
11 |
12 | __all__ = ["GenericOnnxScaler", "autoselect_backend", "ArtCNN"]
13 |
14 |
15 | @dataclass
16 | class GenericOnnxScaler(GenericScaler):
17 | """Generic scaler class for an onnx model."""
18 |
19 | model: SPathLike
20 | """Path to the model."""
21 | backend: Any | None = None
22 | """
23 | vs-mlrt backend. Will attempt to autoselect the most suitable one with fp16=True if None.\n
24 | In order of trt > cuda > directml > nncn > cpu.
25 | """
26 | tiles: int | tuple[int, int] | None = None
27 | """Splits up the frame into multiple tiles. Helps if you're lacking in vram but models may behave differently."""
28 |
29 | tilesize: int | tuple[int, int] | None = None
30 | overlap: int | tuple[int, int] | None = None
31 |
32 | _static_kernel_radius = 2
33 |
34 | @inject_self
35 | def scale( # type: ignore
36 | self,
37 | clip: vs.VideoNode,
38 | width: int,
39 | height: int,
40 | shift: tuple[float, float] = (0, 0),
41 | **kwargs: Any,
42 | ) -> vs.VideoNode:
43 | if self.backend is None:
44 | self.backend = autoselect_backend()
45 |
46 | wclip, _ = expect_bits(clip, 32)
47 |
48 | from vsmlrt import calc_tilesize, inference, init_backend #type: ignore[import-untyped]
49 |
50 | if self.overlap is None:
51 | overlap_w = overlap_h = 8
52 | else:
53 | overlap_w, overlap_h = (self.overlap, self.overlap) if isinstance(self.overlap, int) else self.overlap
54 |
55 | (tile_w, tile_h), (overlap_w, overlap_h) = calc_tilesize(
56 | tiles=self.tiles,
57 | tilesize=self.tilesize,
58 | width=wclip.width,
59 | height=wclip.height,
60 | multiple=1,
61 | overlap_w=overlap_w,
62 | overlap_h=overlap_h,
63 | )
64 |
65 | if tile_w % 1 != 0 or tile_h % 1 != 0:
66 | raise CustomValueError(f"Tile size must be divisible by 1 ({tile_w}, {tile_h})", self.__class__)
67 |
68 | backend = init_backend(backend=self.backend, trt_opt_shapes=(tile_w, tile_h))
69 |
70 | scaled = inference(
71 | limiter(wclip, func=self.__class__),
72 | network_path=str(SPath(self.model).resolve()),
73 | backend=backend,
74 | overlap=(overlap_w, overlap_h),
75 | tilesize=(tile_w, tile_h),
76 | )
77 | return self._finish_scale(scaled, clip, width, height, shift)
78 |
79 |
80 | def autoselect_backend(trt_args: KwargsT = {}, **kwargs: Any) -> Any:
81 | import os
82 |
83 | from vsmlrt import Backend
84 |
85 | fp16 = kwargs.pop("fp16", True)
86 |
87 | cuda = get_nvidia_version() is not None
88 | if cuda:
89 | if hasattr(core, "trt"):
90 | kwargs.update(trt_args)
91 | return Backend.TRT(fp16=fp16, **trt_args)
92 | elif hasattr(core, "ort"):
93 | return Backend.ORT_CUDA(fp16=fp16, **kwargs)
94 | else:
95 | return Backend.OV_GPU(fp16=fp16, **kwargs)
96 | else:
97 | if hasattr(core, "ort") and os.name == "nt":
98 | return Backend.ORT_DML(fp16=fp16, **kwargs)
99 | elif hasattr(core, "ncnn"):
100 | return Backend.NCNN_VK(fp16=fp16, **kwargs)
101 |
102 | return Backend.ORT_CPU(fp16=fp16, **kwargs) if hasattr(core, "ort") else Backend.OV_CPU(fp16=fp16, **kwargs)
103 |
104 |
105 | class _BaseArtCNN:
106 | _model: ClassVar[int]
107 | _func = "ArtCNN"
108 |
109 |
110 | @dataclass
111 | class BaseArtCNN(_BaseArtCNN, GenericScaler):
112 | backend: Any | None = None
113 | """
114 | vs-mlrt backend. Will attempt to autoselect the most suitable one with fp16=True if None.\n
115 | In order of trt > cuda > directml > nncn > cpu.
116 | """
117 | chroma_scaler: KernelT | None = None
118 | """
119 | Scaler to upscale the chroma with.\n
120 | Necessary if you're trying to use one of the chroma models but aren't passing a 444 clip.\n
121 | Bilinear is probably the safe option to use.
122 | """
123 |
124 | tiles: int | tuple[int, int] | None = None
125 | """Splits up the frame into multiple tiles. Helps if you're lacking in vram but models may behave differently."""
126 | tilesize: int | tuple[int, int] | None = None
127 | overlap: int | tuple[int, int] | None = None
128 |
129 | _static_kernel_radius = 2
130 |
131 | @inject_self
132 | def scale( # type: ignore
133 | self,
134 | clip: vs.VideoNode,
135 | width: int | None = None,
136 | height: int | None = None,
137 | shift: tuple[float, float] = (0, 0),
138 | **kwargs: Any,
139 | ) -> vs.VideoNode:
140 | try:
141 | from vsmlrt import ArtCNN as mlrt_ArtCNN
142 | from vsmlrt import ArtCNNModel
143 | except ImportError:
144 | raise DependencyNotFoundError("vsmlrt", self._func)
145 |
146 | clip_format = get_video_format(clip)
147 | chroma_model = self._model in [4, 5, 9]
148 |
149 | # The chroma models aren't supposed to change the video dimensions and API wise this is more comfortable.
150 | if width is None or height is None:
151 | if chroma_model:
152 | width = clip.width
153 | height = clip.height
154 | else:
155 | raise CustomValueError("You have to pass height and width if not using a chroma model.", self._func)
156 |
157 | if chroma_model and clip_format.color_family != vs.YUV:
158 | raise CustomValueError("ArtCNN Chroma models need YUV input.", self._func)
159 |
160 | if not chroma_model and clip_format.color_family not in (vs.YUV, vs.GRAY):
161 | raise CustomValueError("Regular ArtCNN models need YUV or GRAY input.", self._func)
162 |
163 | if chroma_model and (clip_format.subsampling_h != 0 or clip_format.subsampling_w != 0):
164 | if self.chroma_scaler is None:
165 | raise CustomValueError(
166 | "ArtCNN needs a non subsampled clip. Either pass one or set `chroma_scaler`.", self._func
167 | )
168 |
169 | clip = Kernel.ensure_obj(self.chroma_scaler).resample(
170 | clip, clip_format.replace(subsampling_h=0, subsampling_w=0)
171 | )
172 |
173 | if self._model not in ArtCNNModel.__members__.values():
174 | raise NotFoundEnumValue(f'Invalid model: \'{self._model}\'. Please update \'vsmlrt\'!', self._func)
175 |
176 | wclip = get_y(clip) if not chroma_model else clip
177 |
178 | if self.backend is None:
179 | self.backend = autoselect_backend()
180 |
181 | scaled = mlrt_ArtCNN(
182 | limiter(depth(wclip, 32), func=self._func),
183 | self.tiles,
184 | self.tilesize,
185 | self.overlap,
186 | ArtCNNModel(self._model),
187 | backend=self.backend,
188 | )
189 |
190 | return self._finish_scale(scaled, wclip, width, height, shift)
191 |
192 |
193 | class ArtCNN(BaseArtCNN):
194 | """
195 | Super-Resolution Convolutional Neural Networks optimised for anime.
196 |
197 | Defaults to C16F64.
198 | """
199 |
200 | _model = 2
201 |
202 | class C4F32(BaseArtCNN):
203 | """
204 | This has 4 internal convolution layers with 32 filters each.\n
205 | If you need an even faster model.
206 | """
207 |
208 | _model = 0
209 |
210 | class C4F32_DS(BaseArtCNN):
211 | """The same as C4F32 but intended to also sharpen and denoise."""
212 |
213 | _model = 1
214 |
215 | class C16F64(BaseArtCNN):
216 | """
217 | The current default model. Looks decent and very fast. Good for AA purposes.\n
218 | This has 16 internal convolution layers with 64 filters each.
219 | """
220 |
221 | _model = 2
222 |
223 | class C16F64_DS(BaseArtCNN):
224 | """The same as C16F64 but intended to also sharpen and denoise."""
225 |
226 | _model = 3
227 |
228 | class C4F32_Chroma(BaseArtCNN):
229 | """
230 | The smaller of the two chroma models.\n
231 | These don't double the input clip and rather just try to enhance the chroma using luma information.
232 | """
233 |
234 | _model = 4
235 |
236 | class C16F64_Chroma(BaseArtCNN):
237 | """
238 | The bigger of the two chroma models.\n
239 | These don't double the input clip and rather just try to enhance the chroma using luma information.
240 | """
241 |
242 | _model = 5
243 |
244 | class R16F96(BaseArtCNN):
245 | """
246 | The biggest model. Can compete with or outperform Waifu2x Cunet.\n
247 | Also quite a bit slower but is less heavy on vram.
248 | """
249 |
250 | _model = 6
251 |
252 | class R8F64(BaseArtCNN):
253 | """
254 | A smaller and faster version of R16F96 but very competitive.
255 | """
256 |
257 | _model = 7
258 |
259 | class R8F64_DS(BaseArtCNN):
260 | """The same as R8F64 but intended to also sharpen and denoise."""
261 |
262 | _model = 8
263 |
264 | class R8F64_Chroma(BaseArtCNN):
265 | """
266 | The new and fancy big chroma model.
267 | These don't double the input clip and rather just try to enhance the chroma using luma information.
268 | """
269 |
270 | _model = 9
271 |
--------------------------------------------------------------------------------
/vsscale/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Jaded-Encoding-Thaumaturgy/vs-scale/c3006ae8e6e48be9168e84a6af234cc7eaf9da43/vsscale/py.typed
--------------------------------------------------------------------------------
/vsscale/rescale.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from functools import cached_property, wraps
4 | from typing import Any, Callable, TypeVar
5 |
6 | from vsexprtools import norm_expr
7 | from vskernels import Bilinear, BorderHandling, Hermite, Kernel, KernelT, Point, Scaler, ScalerT
8 | from vskernels.types import LeftShift, TopShift
9 | from vsmasktools import KirschTCanny, based_diff_mask
10 | from vsmasktools.utils import _get_region_expr
11 | from vstools import (
12 | ColorRange, DitherType, FieldBased, FieldBasedT, FrameRangeN, FrameRangesN, check_variable,
13 | core, depth, get_peak_value, get_y, join, limiter, replace_ranges, split, vs
14 | )
15 |
16 | from .helpers import BottomCrop, CropRel, LeftCrop, RightCrop, ScalingArgs, TopCrop
17 | from .onnx import ArtCNN
18 |
19 | __all__ = [
20 | 'Rescale',
21 | 'RescaleBase',
22 | 'RescaleT',
23 | ]
24 |
25 | RescaleT = TypeVar('RescaleT', bound="RescaleBase")
26 |
27 |
28 | class RescaleBase:
29 | descale_args: ScalingArgs
30 | field_based: FieldBased | None
31 |
32 | def __init__(
33 | self,
34 | clip: vs.VideoNode,
35 | /,
36 | kernel: KernelT,
37 | upscaler: ScalerT = ArtCNN,
38 | downscaler: ScalerT = Hermite(linear=True),
39 | field_based: FieldBasedT | bool | None = None,
40 | border_handling: int | BorderHandling = BorderHandling.MIRROR
41 | ) -> None:
42 | assert check_variable(clip, self.__class__)
43 |
44 | self.clipy, *chroma = split(clip)
45 | self.chroma = chroma
46 |
47 | self.kernel = Kernel.ensure_obj(kernel)
48 | self.upscaler = Scaler.ensure_obj(upscaler)
49 |
50 | self.downscaler = Scaler.ensure_obj(downscaler)
51 |
52 | self.field_based = FieldBased.from_param(field_based)
53 |
54 | self.border_handling = BorderHandling(int(border_handling))
55 |
56 | def __delattr__(self, __name: str) -> None:
57 | match __name:
58 | case 'descale':
59 | self._trydelattr('rescale')
60 | self._trydelattr('doubled')
61 | case 'doubled':
62 | self._trydelattr('upscale')
63 | case _:
64 | pass
65 | delattr(self, __name)
66 |
67 | def _trydelattr(self, attr: str) -> None:
68 | try:
69 | delattr(self, attr)
70 | except AttributeError:
71 | pass
72 |
73 | @staticmethod
74 | def _apply_field_based(
75 | function: Callable[[RescaleT, vs.VideoNode], vs.VideoNode]
76 | ) -> Callable[[RescaleT, vs.VideoNode], vs.VideoNode]:
77 | @wraps(function)
78 | def wrap(self: RescaleT, clip: vs.VideoNode) -> vs.VideoNode:
79 | if self.field_based:
80 | clip = self.field_based.apply(clip)
81 | clip = function(self, clip)
82 | return FieldBased.PROGRESSIVE.apply(clip)
83 | else:
84 | return function(self, clip)
85 | return wrap
86 |
87 | @staticmethod
88 | def _add_props(
89 | function: Callable[[RescaleT, vs.VideoNode], vs.VideoNode]
90 | ) -> Callable[[RescaleT, vs.VideoNode], vs.VideoNode]:
91 | @wraps(function)
92 | def wrap(self: RescaleT, clip: vs.VideoNode) -> vs.VideoNode:
93 | w, h = (
94 | f"{int(d)}" if d.is_integer() else f"{d:.2f}"
95 | for d in [self.descale_args.src_width, self.descale_args.src_height]
96 | )
97 | return function(self, clip).std.SetFrameProp(
98 | "Rescale" + function.__name__.split('_')[-1].capitalize() + 'From',
99 | data=f'{self.kernel.__class__.__name__} - {w} x {h}'
100 | )
101 | return wrap
102 |
103 | @_add_props
104 | @_apply_field_based
105 | def _generate_descale(self, clip: vs.VideoNode) -> vs.VideoNode:
106 | return self.kernel.descale(
107 | clip,
108 | self.descale_args.width, self.descale_args.height,
109 | **self.descale_args.kwargs(),
110 | border_handling=self.border_handling
111 | )
112 |
113 | @_add_props
114 | @_apply_field_based
115 | def _generate_rescale(self, clip: vs.VideoNode) -> vs.VideoNode:
116 | return self.kernel.scale(
117 | clip,
118 | self.clipy.width, self.clipy.height,
119 | **self.descale_args.kwargs(),
120 | border_handling=self.border_handling
121 | )
122 |
123 | @_add_props
124 | def _generate_doubled(self, clip: vs.VideoNode) -> vs.VideoNode:
125 | return self.upscaler.multi(clip, 2)
126 |
127 | @_add_props
128 | def _generate_upscale(self, clip: vs.VideoNode) -> vs.VideoNode:
129 | return self.downscaler.scale(
130 | clip,
131 | self.clipy.width, self.clipy.height,
132 | **self.descale_args.kwargs(clip)
133 | )
134 |
135 | @cached_property
136 | def descale(self) -> vs.VideoNode:
137 | return self._generate_descale(self.clipy)
138 |
139 | @cached_property
140 | def rescale(self) -> vs.VideoNode:
141 | return self._generate_rescale(self.descale)
142 |
143 | @cached_property
144 | def doubled(self) -> vs.VideoNode:
145 | return self._generate_doubled(self.descale)
146 |
147 | @cached_property
148 | def upscale(self) -> vs.VideoNode:
149 | """Returns the upscaled clip"""
150 | return join(
151 | self._generate_upscale(self.doubled),
152 | *self.chroma
153 | ).std.CopyFrameProps(self.clipy, '_ChromaLocation')
154 |
155 |
156 | class Rescale(RescaleBase):
157 | """
158 | Rescale wrapper
159 |
160 | Examples usage:
161 | Basic 720p rescale:
162 | ```py
163 | from vsscale import Rescale
164 | from vskernels import Bilinear
165 |
166 | rs = Rescale(clip, 720, Bilinear)
167 | final = rs.upscale
168 | ```
169 |
170 | Adding aa and dehalo on doubled clip:
171 | ```py
172 | from vsaa import based_aa
173 | from vsdehalo import fine_dehalo
174 |
175 | aa = based_aa(rs.doubled, supersampler=False)
176 | dehalo = fine_dehalo(aa, ...)
177 |
178 | rs.doubled = dehalo
179 | ```
180 |
181 | Loading line_mask and credit_mask:
182 | ```py
183 | from vsmasktools import diff_creditless_oped
184 | from vsexprtools import ExprOp
185 |
186 | rs.default_line_mask()
187 |
188 | oped_credit_mask = diff_creditless_oped(...)
189 | credit_mask = rs.default_credit_mask(thr=0.209, ranges=(200, 300), postfilter=4)
190 | rs.credit_mask = ExprOp.ADD.combine(oped_credit_mask, credit_mask)
191 | ```
192 |
193 | Fractionnal rescale:
194 | ```py
195 | from vsscale import Rescale
196 | from vskernels import Bilinear
197 |
198 | # Forcing the height to a float will ensure a fractionnal descale
199 | rs = Rescale(clip, 800.0, Bilinear)
200 | >>> rs.descale_args
201 | ScalingArgs(
202 | width=1424, height=800, src_width=1422.2222222222222, src_height=800.0,
203 | src_top=0.0, src_left=0.8888888888889142, mode='hw'
204 | )
205 |
206 | # while doing this will not
207 | rs = Rescale(clip, 800, Bilinear)
208 | >>> rs.descale_args
209 | ScalingArgs(width=1422, height=800, src_width=1422, src_height=800, src_top=0, src_left=0, mode='hw')
210 | ```
211 |
212 | Cropping is also supported:
213 | ```py
214 | from vsscale import Rescale
215 | from vskernels import Bilinear
216 |
217 | # Descaling while cropping the letterboxes at the top and bottom
218 | rs = Rescale(clip, 874, Bilinear, crop=(0, 0, 202, 202))
219 | >>> rs.descale_args
220 | ScalingArgs(
221 | width=1554, height=548, src_width=1554.0, src_height=547.0592592592592,
222 | src_top=0.4703703703703752, src_left=0.0, mode='hw'
223 | )
224 |
225 | # Same thing but ensuring the width is fractionnal descaled
226 | rs = Rescale(clip, 874.0, Bilinear, crop=(0, 0, 202, 202))
227 | >>> rs.descale_args
228 | ScalingArgs(
229 | width=1554, height=548, src_width=1553.7777777777778, src_height=547.0592592592592,
230 | src_top=0.4703703703703752, src_left=0.11111111111108585, mode='hw'
231 | )
232 | ```
233 | """
234 |
235 | def __init__(
236 | self,
237 | clip: vs.VideoNode,
238 | /,
239 | height: int | float,
240 | kernel: KernelT,
241 | upscaler: ScalerT = ArtCNN,
242 | downscaler: ScalerT = Hermite(linear=True),
243 | width: int | float | None = None,
244 | base_height: int | None = None,
245 | base_width: int | None = None,
246 | crop: tuple[LeftCrop, RightCrop, TopCrop, BottomCrop] = CropRel(),
247 | shift: tuple[TopShift, LeftShift] = (0, 0),
248 | field_based: FieldBasedT | bool | None = None,
249 | border_handling: int | BorderHandling = BorderHandling.MIRROR
250 | ) -> None:
251 | """Initialize the rescaling process.
252 |
253 | :param clip: Clip to be rescaled
254 | :param height: Height to be descaled to.
255 | Forcing the value to float will ensure a fractionnal descale
256 | :param kernel: Kernel used for descaling
257 | :param upscaler: Scaler that supports doubling, defaults to ArtCNN
258 | :param downscaler: Scaler used for downscaling the upscaled clip back to input res,
259 | defaults to Hermite(linear=True)
260 | :param width: Width to be descaled to. If None, automatically calculated from the height
261 | :param base_height: Integer height at which the clip will be contained.
262 | If None, automatically calculated from the height
263 | :param base_width: Integer width at which the clip will be contained.
264 | If None, automatically calculated from the width
265 | :param crop: Cropping values to apply before descale.
266 | The ratio descale height / source height will be preserved even after descale.
267 | The cropped area is restored when calling the `upscale` property.
268 | :param shift: Shifts to apply during descale and upscale, defaults to (0, 0)
269 | :param field_based: Parameter specifying the source is a cross-converted/interlaced upscaled content
270 | :param border_handling: Adjust the way the clip is padded internally during the scaling process.
271 | Accepted values are:
272 | 0: Assume the image was resized with mirror padding.
273 | 1: Assume the image was resized with zero padding.
274 | 2: Assume the image was resized with extend padding,
275 | where the outermost row was extended infinitely far.
276 | Defaults to 0
277 | """
278 | self._line_mask: vs.VideoNode | None = None
279 | self._credit_mask: vs.VideoNode | None = None
280 | self._ignore_mask: vs.VideoNode | None = None
281 | self._crop = crop
282 | self._pre = clip
283 |
284 | self.descale_args = ScalingArgs.from_args(
285 | clip, height, width, base_height, base_width, shift[0], shift[1], crop, mode='hw'
286 | )
287 |
288 | super().__init__(clip, kernel, upscaler, downscaler, field_based, border_handling)
289 |
290 | if self._crop > (0, 0, 0, 0):
291 | self.clipy = self.clipy.std.Crop(*self._crop)
292 |
293 | def _generate_descale(self, clip: vs.VideoNode) -> vs.VideoNode:
294 | if not self._ignore_mask:
295 | return super()._generate_descale(clip)
296 |
297 | @self._add_props
298 | @self._apply_field_based
299 | def _generate_descale_ignore_mask(self: Rescale, clip: vs.VideoNode) -> vs.VideoNode:
300 | assert self._ignore_mask
301 |
302 | self.descale_args.mode = 'h'
303 |
304 | descale_h = self.kernel.descale(
305 | clip,
306 | None, self.descale_args.height,
307 | **self.descale_args.kwargs(),
308 | border_handling=self.border_handling,
309 | ignore_mask=self._ignore_mask
310 | )
311 |
312 | self.descale_args.mode = 'w'
313 |
314 | descale_w = self.kernel.descale(
315 | descale_h,
316 | self.descale_args.width, None,
317 | **self.descale_args.kwargs(),
318 | border_handling=self.border_handling,
319 | ignore_mask=Point.scale(self._ignore_mask, height=descale_h.height)
320 | )
321 |
322 | self.descale_args.mode = 'hw'
323 |
324 | return descale_w
325 |
326 | return _generate_descale_ignore_mask(self, clip)
327 |
328 | def _generate_upscale(self, clip: vs.VideoNode) -> vs.VideoNode:
329 | upscale = super()._generate_upscale(clip)
330 |
331 | merged_mask = norm_expr([self.line_mask, self.credit_mask], "x y - 0 yrange_max clamp")
332 |
333 | upscale = core.std.MaskedMerge(self.clipy, upscale, merged_mask).std.CopyFrameProps(upscale)
334 |
335 | if self._crop > (0, 0, 0, 0):
336 | pre_y = get_y(self._pre)
337 | black = pre_y.std.BlankClip()
338 | mask = norm_expr(
339 | black, _get_region_expr(
340 | black, *self._crop, replace=f'{get_peak_value(black, False, ColorRange.FULL)} x'
341 | )
342 | )
343 |
344 | upscale = core.std.MaskedMerge(upscale.std.AddBorders(*self._crop), pre_y, mask)
345 |
346 | return upscale
347 |
348 | @property
349 | def line_mask(self) -> vs.VideoNode:
350 | lm = self._line_mask or self.clipy.std.BlankClip(
351 | color=get_peak_value(self.clipy, False, ColorRange.FULL)
352 | )
353 |
354 | if self.border_handling:
355 | px = (self.kernel.kernel_radius, ) * 4
356 | lm = norm_expr(lm, _get_region_expr(lm, *px, replace=f'{get_peak_value(lm, False, ColorRange.FULL)} x'))
357 |
358 | self._line_mask = lm
359 |
360 | return self._line_mask
361 |
362 | @line_mask.setter
363 | def line_mask(self, mask: vs.VideoNode | None) -> None:
364 | self._line_mask = (
365 | limiter(depth(
366 | mask, self.clipy, dither_type=DitherType.NONE, range_in=ColorRange.FULL, range_out=ColorRange.FULL
367 | ))
368 | if mask else mask
369 | )
370 |
371 | @line_mask.deleter
372 | def line_mask(self) -> None:
373 | self._line_mask = None
374 |
375 | @property
376 | def credit_mask(self) -> vs.VideoNode:
377 | if self._credit_mask:
378 | return self._credit_mask
379 | self.credit_mask = self.clipy.std.BlankClip()
380 | return self.credit_mask
381 |
382 | @credit_mask.setter
383 | def credit_mask(self, mask: vs.VideoNode | None) -> None:
384 | self._credit_mask = (
385 | limiter(depth(
386 | mask, self.clipy, dither_type=DitherType.NONE, range_in=ColorRange.FULL, range_out=ColorRange.FULL
387 | ))
388 | if mask else mask
389 | )
390 |
391 | @credit_mask.deleter
392 | def credit_mask(self) -> None:
393 | self._credit_mask = None
394 |
395 | @property
396 | def ignore_mask(self) -> vs.VideoNode:
397 | if self._ignore_mask:
398 | return self._ignore_mask
399 | self.ignore_mask = self.clipy.std.BlankClip(format=vs.GRAY8)
400 | return self.ignore_mask
401 |
402 | @ignore_mask.setter
403 | def ignore_mask(self, mask: vs.VideoNode | None) -> None:
404 | self._ignore_mask = (
405 | depth(mask, 8, dither_type=DitherType.NONE, range_in=ColorRange.FULL, range_out=ColorRange.FULL)
406 | if mask else mask
407 | )
408 |
409 | @ignore_mask.deleter
410 | def ignore_mask(self) -> None:
411 | self._ignore_mask = None
412 |
413 | def default_line_mask(
414 | self, clip: vs.VideoNode | None = None, scaler: ScalerT = Bilinear, **kwargs: Any
415 | ) -> vs.VideoNode:
416 | """
417 | Load a default Kirsch line mask in the class instance. Additionnaly, it is returned.
418 |
419 | :param clip: Reference clip, defaults to doubled clip if None.
420 | :param scaler: Scaled used for matching the source clip format, defaults to Bilinear
421 | :return: Generated mask.
422 | """
423 | scaler = Scaler.ensure_obj(scaler)
424 | scale_kwargs = scaler.kwargs if clip else self.descale_args.kwargs(self.doubled) | scaler.kwargs
425 |
426 | clip = clip if clip else self.doubled
427 |
428 | line_mask = KirschTCanny.edgemask(clip, **kwargs).std.Maximum().std.Minimum()
429 | line_mask = scaler.scale(line_mask, self.clipy.width, self.clipy.height, format=self.clipy.format, **scale_kwargs)
430 |
431 | self.line_mask = line_mask
432 |
433 | return self.line_mask
434 |
435 | def default_credit_mask(
436 | self, rescale: vs.VideoNode | None = None, src: vs.VideoNode | None = None,
437 | thr: float = 0.216, expand: int = 4,
438 | ranges: FrameRangeN | FrameRangesN | None = None, exclusive: bool = False,
439 | **kwargs: Any
440 | ) -> vs.VideoNode:
441 | """
442 | Load a credit mask by making a difference mask between src and rescaled clips
443 |
444 | :param rescale: Rescaled clip, defaults to rescaled instance clip
445 | :param src: Source clip, defaults to source instance clip
446 | :param thr: Threshold of the amplification expr, defaults to 0.216
447 | :param expand: Additional expand radius applied to the mask, defaults to 4
448 | :param ranges: If specified, ranges to apply the credit clip to
449 | :param exclusive: Use exclusive ranges (Default: False)
450 | :return: Generated mask
451 | """
452 | if not src:
453 | src = self.clipy
454 | if not rescale:
455 | rescale = self.rescale
456 |
457 | src, rescale = get_y(src), get_y(rescale)
458 |
459 | credit_mask = based_diff_mask(
460 | src, rescale, thr=thr, expand=expand, func=self.default_credit_mask, **kwargs
461 | )
462 |
463 | if ranges is not None:
464 | credit_mask = replace_ranges(credit_mask.std.BlankClip(keep=True), credit_mask, ranges, exclusive)
465 |
466 | self.credit_mask = credit_mask
467 |
468 | return self.credit_mask
469 |
--------------------------------------------------------------------------------
/vsscale/scale.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from dataclasses import dataclass, field
4 | from functools import partial
5 | from math import ceil, log2
6 | from typing import Any, ClassVar, Literal
7 |
8 | from vsexprtools import complexpr_available, expr_func, norm_expr
9 | from vskernels import Catrom, Hermite, LinearScaler, Mitchell, Scaler, ScalerT
10 | from vsrgtools import box_blur, gauss_blur
11 | from vstools import (
12 | DependencyNotFoundError, KwargsT, Matrix, MatrixT, PlanesT, ProcessVariableClip,
13 | ProcessVariableResClip, VSFunction, check_ref_clip, check_variable, check_variable_format,
14 | clamp, core, depth, fallback, get_nvidia_version, get_prop, inject_self, limiter, padder, vs
15 | )
16 |
17 | from .helpers import GenericScaler
18 |
19 | __all__ = [
20 | 'DPID',
21 | 'SSIM',
22 | 'DLISR',
23 | 'Waifu2x'
24 | ]
25 |
26 |
27 | @dataclass
28 | class DPID(GenericScaler):
29 | """Rapid, Detail-Preserving Image Downscaler for VapourSynth"""
30 |
31 | sigma: float = 0.1
32 | """
33 | The power factor of range kernel. It can be used to tune the amplification of the weights of pixels
34 | that represent detail—from a box filter over an emphasis of distinct pixels towards a selection
35 | of only the most distinct pixels.
36 | """
37 |
38 | ref: vs.VideoNode | ScalerT = Catrom
39 | """VideoNode or Scaler to obtain the downscaled reference for DPID."""
40 |
41 | planes: PlanesT = None
42 | """Sets which planes will be processed. Any unprocessed planes will be simply copied from ref."""
43 |
44 | def __post_init__(self) -> None:
45 | if isinstance(self.ref, vs.VideoNode):
46 | self._ref_scaler = self.ensure_scaler(self._scaler)
47 | else:
48 | self._ref_scaler = self.ensure_scaler(self.ref)
49 |
50 | @inject_self
51 | def scale( # type: ignore[override]
52 | self, clip: vs.VideoNode, width: int | None = None, height: int | None = None,
53 | shift: tuple[float, float] = (0, 0), **kwargs: Any
54 | ) -> vs.VideoNode:
55 | width, height = self._wh_norm(clip, width, height)
56 |
57 | ref = clip
58 |
59 | if isinstance(self.ref, vs.VideoNode):
60 | check_ref_clip(clip, self.ref)
61 | ref = self.ref
62 |
63 | if (ref.width, ref.height) != (width, height):
64 | ref = self._ref_scaler.scale(ref, width, height)
65 |
66 | kwargs |= {
67 | 'lambda_': self.sigma, 'planes': self.planes,
68 | 'src_left': shift[1], 'src_top': shift[0]
69 | } | kwargs | {'read_chromaloc': True}
70 |
71 | return core.dpid.DpidRaw(clip, ref, **kwargs)
72 |
73 | @inject_self.property
74 | def kernel_radius(self) -> int: # type: ignore
75 | return self._ref_scaler.kernel_radius
76 |
77 |
78 | class SSIM(LinearScaler):
79 | """
80 | SSIM downsampler is an image downscaling technique that aims to optimize
81 | for the perceptual quality of the downscaled results.
82 |
83 | Image downscaling is considered as an optimization problem
84 | where the difference between the input and output images is measured
85 | using famous Structural SIMilarity (SSIM) index.
86 |
87 | The solution is derived in closed-form, which leads to the simple, efficient implementation.
88 | The downscaled images retain perceptually important features and details,
89 | resulting in an accurate and spatio-temporally consistent representation of the high resolution input.
90 | """
91 |
92 | def __init__(
93 | self, scaler: ScalerT = Hermite, smooth: int | float | VSFunction | None = None, **kwargs: Any
94 | ) -> None:
95 | """
96 | :param scaler: Scaler to be used for downscaling, defaults to Hermite.
97 | :param smooth: Image smoothening method.
98 | If you pass an int, it specifies the "radius" of the internally-used boxfilter,
99 | i.e. the window has a size of (2*smooth+1)x(2*smooth+1).
100 | If you pass a float, it specifies the "sigma" of gauss_blur,
101 | i.e. the standard deviation of gaussian blur.
102 | If you pass a function, it acts as a general smoother.
103 | Default uses a gaussian blur based on the scaler's kernel radius.
104 | """
105 | super().__init__(**kwargs)
106 |
107 | self.scaler = Hermite.from_param(scaler)
108 |
109 | if smooth is None:
110 | smooth = (self.scaler.kernel_radius + 1.0) / 3
111 |
112 | if callable(smooth):
113 | self.filter_func = smooth
114 | elif isinstance(smooth, int):
115 | self.filter_func = partial(box_blur, radius=smooth)
116 | elif isinstance(smooth, float):
117 | self.filter_func = partial(gauss_blur, sigma=smooth)
118 |
119 | def _linear_scale(
120 | self, clip: vs.VideoNode, width: int, height: int, shift: tuple[float, float] = (0, 0), **kwargs: Any
121 | ) -> vs.VideoNode:
122 | assert check_variable(clip, self.scale)
123 |
124 | l1 = self.scaler.scale(clip, width, height, shift, **(kwargs | self.kwargs))
125 |
126 | l1_sq, c_sq = [expr_func(x, 'x dup *') for x in (l1, clip)]
127 |
128 | l2 = self.scaler.scale(c_sq, width, height, shift, **(kwargs | self.kwargs))
129 |
130 | m, sl_m_square, sh_m_square = [self.filter_func(x) for x in (l1, l1_sq, l2)]
131 |
132 | if complexpr_available:
133 | merge_expr = f'z dup * SQ! x SQ@ - SQD! SQD@ {1e-6} < 0 y SQ@ - SQD@ / sqrt ?'
134 | else:
135 | merge_expr = f'x z dup * - {1e-6} < 0 y z dup * - x z dup * - / sqrt ?'
136 |
137 | r = expr_func([sl_m_square, sh_m_square, m], merge_expr)
138 |
139 | t = expr_func([r, m], 'x y *')
140 |
141 | return expr_func([self.filter_func(m), self.filter_func(r), l1, self.filter_func(t)], 'x y z * + a -')
142 |
143 | @inject_self.property
144 | def kernel_radius(self) -> int: # type: ignore
145 | return self.scaler.kernel_radius
146 |
147 |
148 | @dataclass
149 | class DLISR(GenericScaler):
150 | """Use Nvidia NGX Technology DLISR DNN to scale up nodes. https://developer.nvidia.com/rtx/ngx"""
151 |
152 | scaler: ScalerT = field(default_factory=lambda: DPID(0.5, Mitchell))
153 | """Scaler to use to downscale clip to desired resolution, if necessary."""
154 |
155 | matrix: MatrixT | None = None
156 | """Input clip's matrix. Set only if necessary."""
157 |
158 | device_id: int | None = None
159 | """Which cuda device to run this filter on."""
160 |
161 | @inject_self
162 | def scale( # type: ignore
163 | self, clip: vs.VideoNode, width: int | None = None, height: int | None = None,
164 | shift: tuple[float, float] = (0, 0), *, matrix: MatrixT | None = None, **kwargs: Any
165 | ) -> vs.VideoNode:
166 | width, height = self._wh_norm(clip, width, height)
167 |
168 | output = clip
169 |
170 | assert check_variable(clip, self.__class__)
171 |
172 | if width > clip.width or height > clip.width:
173 | if not matrix:
174 | matrix = Matrix.from_param_or_video(matrix or self.matrix, clip, False, self.__class__)
175 |
176 | output = self._kernel.resample(output, vs.RGBS, Matrix.RGB, matrix)
177 | output = limiter(output, func=self.__class__)
178 |
179 | max_scale = max(ceil(width / clip.width), ceil(height / clip.height))
180 |
181 | output = output.akarin.DLISR(max_scale, self.device_id)
182 |
183 | return self._finish_scale(output, clip, width, height, shift, matrix)
184 |
185 | _static_kernel_radius = 2
186 |
187 |
188 | class Waifu2xPadHelper(ProcessVariableResClip):
189 | def normalize(self, clip: vs.VideoNode, cast_to: tuple[int, int]) -> vs.VideoNode:
190 | padding = padder.mod_padding(cast_to)
191 |
192 | return padder.MIRROR(super().normalize(clip, cast_to), *padding).std.SetFrameProp('_PadValues', padding)
193 |
194 |
195 | class Waifu2xCropHelper(ProcessVariableClip[tuple[int, int, int, int, int, int]]):
196 | def get_key(self, frame: vs.VideoFrame) -> tuple[int, int, int, int, int, int]:
197 | return (frame.width, frame.height, *get_prop(frame, '_PadValues', list))
198 |
199 | def normalize(self, clip: vs.VideoNode, cast_to: tuple[int, int, int, int, int, int]) -> vs.VideoNode:
200 | width, height, *padding = cast_to
201 |
202 | return ProcessVariableResClip.normalize(
203 | self, clip, (width, height)).std.Crop(*(p * 2 for p in padding) # type: ignore[arg-type]
204 | )
205 |
206 |
207 | class Waifu2xScaleHelper(ProcessVariableResClip):
208 | def __init__(
209 | self, clip: vs.VideoNode, backend: type, backend_kwargs: KwargsT, kwargs: KwargsT, cache_size: int
210 | ) -> None:
211 | super().__init__(clip, cache_size=cache_size)
212 |
213 | self.kwargs = kwargs
214 | self.backend = backend
215 | self.backend_kwargs = backend_kwargs
216 |
217 | def normalize(self, clip: vs.VideoNode, cast_to: tuple[int, int]) -> vs.VideoNode:
218 | from vsmlrt import Waifu2x as MlrtWaifu2x # type: ignore
219 |
220 | if (max_shapes := self.backend_kwargs.get('max_shapes', None)):
221 | if cast_to[0] > max_shapes[0] or cast_to[1] > max_shapes[1]:
222 | self.backend_kwargs.update(max_shapes=cast_to)
223 |
224 | return MlrtWaifu2x( # type: ignore
225 | super().normalize(clip, cast_to), backend=self.backend(**self.backend_kwargs), **self.kwargs
226 | )
227 |
228 |
229 | class Waifu2xResizeHelper(ProcessVariableResClip):
230 | def __init__(
231 | self, clip: vs.VideoNode, width: int, height: int, planes: PlanesT, is_gray: bool,
232 | scaler: Scaler, do_padding: bool, w2x_kwargs: KwargsT, w2x_cache_size: int,
233 | backend: type, backend_kwargs: KwargsT
234 | ) -> None:
235 | super().__init__(clip, (width, height))
236 |
237 | self.width = width
238 | self.height = height
239 | self.planes = planes
240 | self.is_gray = is_gray
241 | self.scaler = scaler
242 | self.do_padding = do_padding
243 | self.w2x_kwargs = w2x_kwargs
244 | self.w2x_cache_size = w2x_cache_size
245 | self.backend = backend
246 | self.backend_kwargs = backend_kwargs.copy()
247 |
248 | def normalize(self, wclip: vs.VideoNode, cast_to: tuple[int, int]) -> vs.VideoNode:
249 | mult = max(int(log2(ceil(size))) for size in (self.width / cast_to[0], self.height / cast_to[1]))
250 |
251 | try:
252 | wclip = limiter(wclip, func=self.__class__)
253 | except vs.Error:
254 | wclip = norm_expr(wclip, 'x 0 1 clamp', planes=self.planes)
255 |
256 | for _ in range(mult):
257 | if self.do_padding:
258 | wclip = Waifu2xPadHelper.from_clip(wclip)
259 |
260 | wclip = Waifu2xScaleHelper(
261 | wclip, self.backend, self.backend_kwargs, self.w2x_kwargs, self.w2x_cache_size
262 | ).eval_clip()
263 |
264 | if self.do_padding:
265 | cropped = Waifu2xCropHelper.from_clip(wclip)
266 |
267 | try:
268 | wclip = norm_expr(cropped, 'x 0.5 255 / + 0 1 clamp', planes=self.planes)
269 | except RuntimeError:
270 | wclip = norm_expr(depth(cropped, 32), 'x 0.5 255 / + 0 max 1 min', planes=self.planes)
271 |
272 | return wclip
273 |
274 | def process(self, wclip: vs.VideoNode) -> vs.VideoNode:
275 | if self.is_gray:
276 | wclip = wclip.std.ShufflePlanes(0, vs.GRAY)
277 |
278 | return self.scaler.scale(wclip, self.width, self.height)
279 |
280 |
281 | class _BaseWaifu2x:
282 | _model: ClassVar[int]
283 | _needs_gray = False
284 | _static_args = dict(noise=-1, scale=2)
285 |
286 |
287 | @dataclass
288 | class BaseWaifu2x(_BaseWaifu2x, GenericScaler):
289 | """Use Waifu2x neural network to scale clips."""
290 |
291 | cuda: bool | Literal['trt'] | None = None
292 | """Whether to run this on cpu, gpu, or use trt technology. None will pick the fastest automatically."""
293 |
294 | num_streams: int | None = None
295 | """Number of gpu streams for the model."""
296 |
297 | fp16: bool = True
298 | """Whether to use float16 precision if available."""
299 |
300 | device_id: int = 0
301 | """Id of the cuda device to use."""
302 |
303 | matrix: MatrixT | None = None
304 | """Input clip's matrix. Set only if necessary."""
305 |
306 | tiles: int | tuple[int, int] | None = None
307 | """Process in separate tiles instead of the whole frame. Use if [V]RAM limited."""
308 |
309 | tilesize: int | tuple[int, int] | None = None
310 | """Manually specify the size of a single tile."""
311 |
312 | overlap: int | tuple[int, int] | None = None
313 | """Overlap for reducing blocking artifacts between tile borders."""
314 |
315 | backend_kwargs: KwargsT | None = None
316 | """Kwargs passed to create the backend instance."""
317 |
318 | dynamic_shape: bool | None = None
319 | """
320 | Use a single model for 0-max_shapes resolutions.
321 | None to automatically detect it. Will be True when previewing and TRT is available.
322 | """
323 |
324 | max_shapes: tuple[int, int] | None = (1936, 1088)
325 | """
326 | Max shape for a dynamic model when using TRT and variable resolution clip.
327 | This can be overridden if the frame size is bigger.
328 | """
329 |
330 | max_instances: int = 2
331 | """Maximum instances to spawn when scaling a variable resolution clip."""
332 |
333 | def __post_init__(self) -> None:
334 | cuda = self.cuda
335 |
336 | if self.dynamic_shape is None:
337 | try:
338 | from vspreview.api import is_preview
339 |
340 | self.dynamic_shape = is_preview()
341 | except Exception:
342 | self.dynamic_shape = False
343 |
344 | if cuda is True:
345 | self.fp16 = False
346 | elif self.fp16:
347 | self.fp16 = complexpr_available.fp16
348 |
349 | bkwargs = (self.backend_kwargs or KwargsT()) | KwargsT(fp16=self.fp16, device_id=self.device_id)
350 |
351 | # All this will eventually be in vs-nn
352 | if cuda is None:
353 | try:
354 | data: KwargsT = core.trt.DeviceProperties(self.device_id) # type: ignore
355 | memory = data.get('total_global_memory', 0)
356 | def_num_streams = clamp(data.get('async_engine_count', 1), 1, 2)
357 |
358 | cuda = 'trt'
359 |
360 | def_bkwargs = KwargsT(
361 | workspace=memory / (1 << 22) if memory else None,
362 | use_cuda_graph=True, use_cublas=True, use_cudnn=True,
363 | use_edge_mask_convolutions=True, use_jit_convolutions=True,
364 | static_shape=True, heuristic=True, output_format=int(self.fp16),
365 | num_streams=def_num_streams
366 | )
367 |
368 | if self._model >= Waifu2x.SwinUnetArt._model:
369 | def_bkwargs |= KwargsT(tf32=not self.fp16)
370 |
371 | bkwargs = def_bkwargs | bkwargs
372 |
373 | streams_info = 'OK' if bkwargs['num_streams'] == def_num_streams else 'MISMATCH'
374 |
375 | core.log_message(
376 | vs.MESSAGE_TYPE_DEBUG,
377 | f'Selected [{data.get("name", b"").decode("utf8")}] '
378 | f'with {f"{(memory / (1 << 30))}GiB" if memory else ""} of VRAM, '
379 | f'num_streams={def_num_streams} ({streams_info})'
380 | )
381 | except Exception:
382 | self.fp16 = False
383 | bkwargs['fp16'] = False
384 | cuda = get_nvidia_version() is not None
385 |
386 | if self.num_streams is not None:
387 | bkwargs.update(num_streams=self.num_streams)
388 | elif bkwargs.get('num_streams', None) is None:
389 | bkwargs.update(num_streams=fallback(self.num_streams, 1))
390 |
391 | self._cuda = cuda
392 | self._bkwargs = bkwargs
393 |
394 | super().__post_init__()
395 |
396 | @property
397 | def _backend(self) -> object:
398 | try:
399 | from vsmlrt import Backend
400 | except ModuleNotFoundError as e:
401 | raise DependencyNotFoundError(self.__class__, e)
402 |
403 | if self._cuda is True:
404 | if hasattr(core, 'ort'):
405 | return Backend.ORT_CUDA
406 |
407 | return Backend.OV_GPU
408 | elif self._cuda is False:
409 | if hasattr(core, 'ncnn'):
410 | return Backend.NCNN_VK
411 |
412 | if hasattr(core, 'ort'):
413 | return Backend.ORT_CPU
414 |
415 | return Backend.OV_CPU
416 |
417 | return Backend.TRT
418 |
419 | @inject_self.init_kwargs.clean
420 | def scale( # type:ignore
421 | self, clip: vs.VideoNode, width: int | None = None, height: int | None = None,
422 | shift: tuple[float, float] = (0, 0), **kwargs: Any
423 | ) -> vs.VideoNode:
424 | try:
425 | from vsmlrt import Backend
426 | except ModuleNotFoundError as e:
427 | raise DependencyNotFoundError(self.__class__, e)
428 |
429 | width, height = self._wh_norm(clip, width, height)
430 |
431 | wclip = clip
432 |
433 | assert check_variable_format(clip, self.scale)
434 |
435 | matrix = self.matrix
436 | is_gray = clip.format.color_family is vs.GRAY
437 | planes = 0 if is_gray else None
438 |
439 | _static_args = kwargs.pop('_static_args', self._static_args)
440 | force = _static_args.pop('force', False)
441 | do_scale = _static_args.get('scale') > 1
442 |
443 | bkwargs = self._bkwargs.copy()
444 |
445 | dynamic_shapes = self.dynamic_shape or (0 in (clip.width, clip.height)) or not bkwargs.get('static_shape', True)
446 |
447 | kwargs.update(tiles=self.tiles, tilesize=self.tilesize, overlap=self.overlap)
448 |
449 | if dynamic_shapes and self._backend is Backend.TRT:
450 | bkwargs.update(static_shape=False, opt_shapes=(64, 64), max_shapes=self.max_shapes)
451 |
452 | if (is_upscale := width > clip.width or height > clip.width or force):
453 | model = self._model
454 |
455 | if clip.format.color_family is vs.YUV:
456 | if not matrix:
457 | matrix = Matrix.from_param_or_video(matrix or self.matrix, clip, False, self.__class__)
458 |
459 | wclip = self._kernel.resample(wclip, vs.RGBH if self.fp16 else vs.RGBS, Matrix.RGB, matrix)
460 | else:
461 | wclip = depth(wclip, 16 if self.fp16 else 32, vs.FLOAT)
462 |
463 | if is_gray and model != 0:
464 | wclip = wclip.std.ShufflePlanes(0, vs.RGB)
465 |
466 | assert wclip.format
467 |
468 | if wclip.format.color_family is vs.RGB:
469 | if model == 0:
470 | model = 1
471 |
472 | wclip = Waifu2xResizeHelper(
473 | wclip, width, height, planes, is_gray, self._scaler,
474 | do_scale and self._model == Waifu2x.Cunet._model,
475 | KwargsT(
476 | **_static_args, model=model,
477 | preprocess=False, **kwargs
478 | ), self.max_instances, self._backend, bkwargs # type: ignore[arg-type]
479 | ).eval_clip()
480 |
481 | return self._finish_scale(wclip, clip, width, height, shift, matrix, is_upscale)
482 |
483 | _static_kernel_radius = 2
484 |
485 |
486 | class Waifu2x(BaseWaifu2x):
487 | _model = 6
488 |
489 | class AnimeStyleArt(BaseWaifu2x):
490 | _model = 0
491 |
492 | class Photo(BaseWaifu2x):
493 | _model = 2
494 |
495 | class UpConv7AnimeStyleArt(BaseWaifu2x):
496 | _model = 3
497 |
498 | class UpConv7Photo(BaseWaifu2x):
499 | _model = 4
500 |
501 | class UpResNet10(BaseWaifu2x):
502 | _model = 5
503 |
504 | class Cunet(BaseWaifu2x):
505 | _model = 6
506 |
507 | class SwinUnetArt(BaseWaifu2x):
508 | _model = 7
509 |
510 | class SwinUnetPhoto(BaseWaifu2x):
511 | _model = 8
512 |
513 | class SwinUnetPhotoV2(BaseWaifu2x):
514 | _model = 9
515 |
516 | class SwinUnetArtScan(BaseWaifu2x):
517 | _model = 10
518 |
--------------------------------------------------------------------------------
/vsscale/shaders.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from dataclasses import dataclass, field
4 | from math import ceil
5 | from pathlib import Path
6 | from typing import TYPE_CHECKING, Any, overload
7 |
8 | from vstools import (
9 | MISSING, CustomRuntimeError, FileWasNotFoundError, MissingT, core, expect_bits, get_user_data_dir, get_video_format,
10 | inject_self, join, vs
11 | )
12 |
13 | from .base import ShaderFileBase, ShaderFileCustom
14 | from .helpers import GenericScaler
15 |
16 | __all__ = [
17 | 'PlaceboShader',
18 |
19 | 'ShaderFile',
20 |
21 | 'FSRCNNXShader', 'FSRCNNXShaderT'
22 | ]
23 |
24 |
25 | class PlaceboShaderMeta(GenericScaler):
26 | shader_file: str | Path | ShaderFile
27 |
28 | _static_kernel_radius = 2
29 |
30 |
31 | @dataclass
32 | class PlaceboShaderBase(PlaceboShaderMeta):
33 | """Base placebo shader class."""
34 |
35 | chroma_loc: int | None = field(default=None, kw_only=True)
36 | matrix: int | None = field(default=None, kw_only=True)
37 | trc: int | None = field(default=None, kw_only=True)
38 | linearize: int | None = field(default=None, kw_only=True)
39 | sigmoidize: int | None = field(default=None, kw_only=True)
40 | sigmoid_center: float | None = field(default=None, kw_only=True)
41 | sigmoid_slope: float | None = field(default=None, kw_only=True)
42 | antiring: float | None = field(default=None, kw_only=True)
43 | filter_shader: str | None = field(default=None, kw_only=True)
44 | clamp: float | None = field(default=None, kw_only=True)
45 | blur: float | None = field(default=None, kw_only=True)
46 | taper: float | None = field(default=None, kw_only=True)
47 | radius: float | None = field(default=None, kw_only=True)
48 | param1: float | None = field(default=None, kw_only=True)
49 | param2: float | None = field(default=None, kw_only=True)
50 |
51 | def __post_init__(self) -> None:
52 | super().__post_init__()
53 |
54 | if not hasattr(self, 'shader_file'):
55 | raise CustomRuntimeError('You must specify a "shader_file"!', self.__class__)
56 |
57 | @inject_self
58 | def scale( # type: ignore
59 | self, clip: vs.VideoNode, width: int | None = None, height: int | None = None,
60 | shift: tuple[float, float] = (0, 0), **kwargs: Any
61 | ) -> vs.VideoNode:
62 | width, height = self._wh_norm(clip, width, height)
63 |
64 | output, _ = expect_bits(clip, 16)
65 |
66 | fmt = get_video_format(output)
67 |
68 | if fmt.num_planes == 1:
69 | if width > output.width or height > output.height:
70 | output = output.resize.Point(format=vs.YUV444P16)
71 | else:
72 | for div in (4, 2):
73 | if width % div == 0 and height % div == 0:
74 | blank = core.std.BlankClip(output, output.width // div, output.height // div, vs.GRAY16)
75 | break
76 | else:
77 | blank = output.std.BlankClip(vs.GRAY16)
78 |
79 | output = join(output, blank, blank)
80 |
81 | kwargs |= {
82 | 'shader': str(
83 | self.shader_file()
84 | if isinstance(self.shader_file, ShaderFile) else
85 | ShaderFile.CUSTOM(self.shader_file) # type: ignore
86 | ),
87 | 'chroma_loc': self.chroma_loc, 'matrix': self.matrix,
88 | 'trc': self.trc, 'linearize': self.linearize,
89 | 'sigmoidize': self.sigmoidize, 'sigmoid_center': self.sigmoid_center, 'sigmoid_slope': self.sigmoid_slope,
90 | 'antiring': self.antiring, 'filter': self.filter_shader, 'clamp': self.clamp,
91 | 'blur': self.blur, 'taper': self.taper, 'radius': self.radius,
92 | 'param1': self.param1, 'param2': self.param2,
93 | } | kwargs | {
94 | 'width': output.width * ceil(width / output.width),
95 | 'height': output.height * ceil(height / output.height)
96 | }
97 |
98 | if not kwargs['filter']:
99 | kwargs['filter'] = 'box' if fmt.num_planes == 1 else 'ewa_lanczos'
100 |
101 | if not Path(kwargs['shader']).exists():
102 | try:
103 | kwargs['shader'] = str(ShaderFile.CUSTOM(kwargs['shader'])) # type: ignore
104 | except FileWasNotFoundError:
105 | ...
106 |
107 | output = output.placebo.Shader(**kwargs)
108 |
109 | return self._finish_scale(output, clip, width, height, shift)
110 |
111 |
112 | @dataclass
113 | class PlaceboShader(PlaceboShaderBase):
114 | shader_file: str | Path
115 |
116 |
117 | class ShaderFile(ShaderFileBase):
118 | """Default shader files shipped with vsscale."""
119 |
120 | if not TYPE_CHECKING:
121 | CUSTOM = 'custom'
122 |
123 | FSRCNNX_x8 = 'FSRCNNX_x2_8-0-4-1.glsl'
124 | FSRCNNX_x16 = 'FSRCNNX_x2_16-0-4-1.glsl'
125 | FSRCNNX_x56 = 'FSRCNNX_x2_56-16-4-1.glsl'
126 |
127 | SSIM_DOWNSCALER = 'SSimDownscaler.glsl'
128 | SSIM_SUPERSAMPLER = 'SSimSuperRes.glsl'
129 |
130 | @overload
131 | def __call__(self) -> Path:
132 | ...
133 |
134 | @overload
135 | def __call__(self: ShaderFileCustom, file_name: str | Path) -> Path: # type: ignore
136 | ...
137 |
138 | def __call__(self, file_name: str | Path | MissingT = MISSING) -> Path:
139 | """Get a path from the shader member, name or path."""
140 |
141 | if self is not ShaderFile.CUSTOM:
142 | file_name = self.value
143 |
144 | if file_name is MISSING: # type: ignore
145 | raise TypeError("ShaderFile.__call__() missing 1 required positional argument: 'file_name'")
146 |
147 | file_name, cwd = Path(file_name), Path.cwd()
148 |
149 | assets_dirs = [
150 | file_name,
151 | cwd / file_name,
152 | cwd / '.shaders' / file_name,
153 | cwd / '_shaders' / file_name,
154 | cwd / '.assets' / file_name,
155 | cwd / '_assets' / file_name
156 | ]
157 |
158 | for asset_dir in assets_dirs:
159 | if asset_dir.is_file():
160 | return asset_dir
161 |
162 | mpv_dir = get_user_data_dir().parent / 'Roaming' / 'mpv' / 'shaders' / file_name
163 |
164 | if mpv_dir.is_file():
165 | return mpv_dir
166 |
167 | raise FileWasNotFoundError(f'"{file_name}" could not be found!', str(ShaderFile.CUSTOM))
168 |
169 |
170 | class FSRCNNXShader(PlaceboShaderBase):
171 | """Defaults FSRCNNX shaders shipped with vsscale."""
172 |
173 | shader_file = ShaderFile.FSRCNNX_x56
174 |
175 | @dataclass
176 | class x8(PlaceboShaderBase):
177 | shader_file = ShaderFile.FSRCNNX_x8
178 |
179 | @dataclass
180 | class x16(PlaceboShaderBase):
181 | shader_file = ShaderFile.FSRCNNX_x16
182 |
183 | @dataclass
184 | class x56(PlaceboShaderBase):
185 | shader_file = ShaderFile.FSRCNNX_x56
186 |
187 |
188 | FSRCNNXShaderT = type[PlaceboShaderBase] | PlaceboShaderBase
189 |
--------------------------------------------------------------------------------
/vsscale/types.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from dataclasses import dataclass, field
4 | from typing import NamedTuple
5 |
6 | from vsexprtools import expr_func
7 | from vskernels import Kernel
8 | from vstools import (
9 | ComparatorFunc, CustomIntEnum, CustomNotImplementedError, CustomStrEnum, Resolution, VSMapValue, merge_clip_props,
10 | vs
11 | )
12 |
13 | __all__ = [
14 | 'DescaleAttempt',
15 | 'DescaleMode', 'DescaleResult', 'PlaneStatsKind',
16 | 'DescaleModeWithInfo'
17 | ]
18 |
19 |
20 | class DescaleAttempt(NamedTuple):
21 | """Tuple representing a descale attempt."""
22 |
23 | resolution: Resolution
24 | """The native resolution."""
25 |
26 | descaled: vs.VideoNode
27 | """Descaled frame in native resolution."""
28 |
29 | rescaled: vs.VideoNode
30 | """Descaled frame reupscaled with the same kernel."""
31 |
32 | diff: vs.VideoNode
33 | """The subtractive difference between the original and descaled frame."""
34 |
35 | kernel: Kernel
36 | """Kernel used for descaling."""
37 |
38 | @classmethod
39 | def from_args(
40 | cls, clip: vs.VideoNode, width: int, height: int, shift: tuple[float, float],
41 | kernel: Kernel, mode: DescaleModeWithInfo, **kwargs: VSMapValue
42 | ) -> DescaleAttempt:
43 | """Get a DescaleAttempt from args. Calculate difference nodes too."""
44 |
45 | descaled = kernel.descale(clip, width, height, shift)
46 | descaled = descaled.std.SetFrameProps(**kwargs)
47 |
48 | rescaled = kernel.scale(descaled, clip.width, clip.height)
49 |
50 | diff = expr_func([rescaled, clip], 'x y - abs').std.PlaneStats(
51 | None, prop=DescaleMode.PlaneDiff.prop_key
52 | )
53 |
54 | if mode.mode in {DescaleMode.KernelDiff, DescaleMode.KernelDiffMin, DescaleMode.KernelDiffMax}:
55 | diff_props = rescaled.std.PlaneStats(clip, prop=DescaleMode.KernelDiff.prop_key)
56 |
57 | diff = merge_clip_props(diff, diff_props)
58 |
59 | resolution = Resolution(width, height)
60 |
61 | return DescaleAttempt(resolution, descaled, rescaled, diff, kernel)
62 |
63 | def __hash__(self) -> int:
64 | return hash(f'{self.resolution}_{self.kernel.__class__.__name__}')
65 |
66 |
67 | @dataclass
68 | class DescaleResult:
69 | """Dataclass representing a complete result of vsscale.descale."""
70 |
71 | descaled: vs.VideoNode
72 | """The descaled clip. Can be a variable resolution."""
73 |
74 | rescaled: vs.VideoNode
75 | """
76 | The descaled clip reupscaled to the source resolution using the same kernel used to descale.
77 | Can be a variable resolution clip.
78 | """
79 |
80 | upscaled: vs.VideoNode | None
81 | """The descaled clip reupscaled using the given upscaler."""
82 |
83 | error_mask: vs.VideoNode | None
84 | """
85 | The descale error mask. This catches the big differences
86 | between the source clip and the rescaled clip as a mask.
87 | If no \"mask\" is passed, this attribute will be None.
88 | """
89 |
90 | pproc_mask: vs.VideoNode | None
91 | """
92 | The post-processing mask. This is the second mask passed to \"mask\".
93 | If no \"mask\" is passed, this attribute will be None.
94 | """
95 |
96 | attempts: list[DescaleAttempt]
97 | """
98 | Descale attempts made. These are used to determine
99 | the correct kernel if multiple \"Kernels\" were passed.
100 | """
101 |
102 | out: vs.VideoNode
103 | """The final clip that is returned during regular usage with \"result=False\"."""
104 |
105 |
106 | class PlaneStatsKind(CustomStrEnum):
107 | """Type of PlaneStats comparing to use."""
108 |
109 | AVG = 'Average'
110 | MIN = 'Min'
111 | MAX = 'Max'
112 | DIFF = 'Diff'
113 |
114 |
115 | class DescaleMode(CustomIntEnum):
116 | """Descale modes for vsscale.descale."""
117 |
118 | PlaneDiff = 0
119 | """Simple PlaneStatsDiff between original and descaled."""
120 |
121 | PlaneDiffMax = 1
122 | """Get the video with the maximum absolute difference from original."""
123 |
124 | PlaneDiffMin = 2
125 | """Get the video with the minimum absolute difference from original."""
126 |
127 | KernelDiff = 3
128 | """Simple PlaneStats between original and descaled kernels differences."""
129 |
130 | KernelDiffMax = 4
131 | """Get the video descaled with the kernel with the maximum absolute difference from original."""
132 |
133 | KernelDiffMin = 5
134 | """Get the video descaled with the kernel with the minimum absolute difference from original."""
135 |
136 | def __call__(self, thr: float = 5e-8, op: ComparatorFunc | None = None) -> DescaleModeWithInfo:
137 | return DescaleModeWithInfo(self, thr) if op is None else DescaleModeWithInfo(self, thr, op)
138 |
139 | @property
140 | def prop_key(self) -> str:
141 | """Get the props key for this DescaleMode."""
142 |
143 | if self.is_average:
144 | return 'PlaneStatsPAvg'
145 | elif self.is_kernel_diff:
146 | return 'PlaneStatsKDiff'
147 |
148 | raise CustomNotImplementedError
149 |
150 | @property
151 | def res_op(self) -> ComparatorFunc:
152 | """Get the operator for calculating sort operation between two resolutions."""
153 |
154 | if self in {self.PlaneDiff, self.KernelDiff, self.PlaneDiffMax, self.KernelDiffMax}:
155 | return max
156 |
157 | if self in {self.PlaneDiffMin, self.KernelDiffMin}:
158 | return min
159 |
160 | raise CustomNotImplementedError
161 |
162 | @property
163 | def diff_op(self) -> ComparatorFunc:
164 | """Get the operator for calculating sort operation between two props."""
165 |
166 | if self in {self.PlaneDiff, self.KernelDiff, self.PlaneDiffMin, self.KernelDiffMin}:
167 | return min
168 |
169 | if self in {self.KernelDiffMax, self.PlaneDiffMax}:
170 | return max
171 |
172 | raise CustomNotImplementedError
173 |
174 | @property
175 | def is_average(self) -> bool:
176 | """Whether this DescaleMode is of PlaneDiff kind."""
177 |
178 | return self in {self.PlaneDiff, self.PlaneDiffMin, self.PlaneDiffMax}
179 |
180 | @property
181 | def is_kernel_diff(self) -> bool:
182 | """Whether this DescaleMode is of KernelDiff kind."""
183 |
184 | return self in {self.KernelDiff, self.KernelDiffMin, self.KernelDiffMax}
185 |
186 | def prop_value(self, kind: PlaneStatsKind) -> str:
187 | """Get props key for getting the value of the PlaneStatsKind."""
188 |
189 | return f'{self.prop_key}{kind.value}'
190 |
191 | def __hash__(self) -> int:
192 | return hash(self._name_)
193 |
194 |
195 | @dataclass
196 | class DescaleModeWithInfo:
197 | mode: DescaleMode
198 | """Actual descale mode used for descaling."""
199 |
200 | thr: float = field(default=5e-8)
201 | """Diff threshold."""
202 |
203 | op: ComparatorFunc = field(default_factory=lambda: max)
204 | """Operator used for generic sorting."""
205 |
--------------------------------------------------------------------------------