├── .gitignore ├── .readthedocs.yaml ├── LICENSE.txt ├── README.md ├── docs ├── Makefile ├── _static │ └── css │ │ └── piccolo_overrides.css ├── changelog.rst ├── conf.py ├── index.rst ├── make.bat ├── scan_interlaced_deep_dive.rst └── usage.rst ├── pyproject.toml ├── requirements.ci.txt ├── requirements.docs.txt ├── setup.cfg └── vsfieldkit ├── __init__.py ├── deinterlacing.py ├── interlacing.py ├── kernels.py ├── output.py ├── py.typed ├── repair.py ├── scanning.py ├── types.py ├── util.py └── vapoursynth.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | 140 | # JetBreans IDEs 141 | .idea 142 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | build: 9 | os: ubuntu-20.04 10 | tools: 11 | python: '3.10' 12 | 13 | sphinx: 14 | configuration: docs/conf.py 15 | 16 | python: 17 | install: 18 | - requirements: requirements.docs.txt 19 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Justin Turner Arthur 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vsfieldkit 2 | Collection of functions for working with interlaced content in 3 | [VapourSynth](http://www.vapoursynth.com/). Most functions don't have any 4 | external dependencies. 5 | 6 | Included functions: 7 | `vsfieldkit.annotate_bobbed_fields(clip, original_clip)` 8 | `vsfieldkit.assume_bff(clip)` 9 | `vsfieldkit.assume_progressive(clip)` 10 | `vsfieldkit.assume_tff(clip)` 11 | `vsfieldkit.bob(clip)` 12 | `vsfieldkit.double(clip)` 13 | `vsfieldkit.fill_analog_frame_ends(clip)` 14 | (requires FillBorders and either ContinuityFixer or EdgeFixer plugins) 15 | `vsfieldkit.group_by_combed(clip)` 16 | `vsfieldkit.group_by_field_order(clip)` 17 | `vsfieldkit.resample_as_progressive(clip)` 18 | `vsfieldkit.scan_interlaced(clip)` 19 | `vsfieldkit.telecine(clip)` 20 | `vsfieldkit.upsample_as_progressive(clip)` 21 | `vsfieldkit.weave_fields(clip)` 22 | 23 | See [the documentation](https://vsfieldkit.justinarthur.com/) for more information. 24 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/_static/css/piccolo_overrides.css: -------------------------------------------------------------------------------- 1 | div#right_sidebar { 2 | width: 18rem; 3 | } 4 | @media (min-width: 53rem) { 5 | div#top_nav nav p.mobile_search_link { 6 | display: none; 7 | } 8 | } 9 | @media (max-width: 53rem) { 10 | div#top_nav nav div.searchbox_wrapper { 11 | display: none; 12 | } 13 | } 14 | @media (max-width: 53rem) { 15 | div.document { 16 | margin-left: 0; 17 | margin-right: 0; 18 | } 19 | } 20 | @media (max-width: 53rem) { 21 | div.sphinxsidebar { 22 | display: none; 23 | } 24 | } 25 | @media (max-width: 53rem) { 26 | div#right_sidebar { 27 | display: none; 28 | } 29 | } 30 | @media (max-width: 53rem) { 31 | div.button_nav_wrapper { 32 | margin-left: 0; 33 | margin-right: 0; 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 2.1.0 4 | ----- 5 | * :py:func:`vsfieldkit.prepare_nnedi3_chroma_upsampler` can now use the znedi3 6 | or nnedi3cl plugin in addition to plain nnedi3 as before. 7 | * Add compatibility with VapourSynth>=63 Python wrapper, broken due to renamed 8 | ``PresetVideoFormat`` enum. 9 | 10 | 2.0.1 11 | ----- 12 | Fix :py:func:`vsfieldkit.resample_as_progressive`'s ``avoid_chroma_shift`` 13 | option. It was having an opposite effect from intended and resampling with even 14 | lossier shift. 15 | 16 | The new method avoids the subsampling grid altogether by resampling the Cb and 17 | Cr planes separately, avoid unnecessary re-siting even with nearest-neighbor 18 | resampling. 19 | 20 | 2.0.0 21 | ----- 22 | New Features 23 | ^^^^^^^^^^^^ 24 | * Interlacing! Targeting deinterlacer testers and engineers in the broadcast 25 | space who've been instructed to avoid soft telecine. Two new functions: 26 | 27 | * :py:func:`vsfieldkit.telecine` 28 | * :py:func:`vsfieldkit.weave_fields` 29 | 30 | * [Re]sampling kernels to supplement the out of the box vapoursynth.resize 31 | functions, but specialized for vsfieldkit tasks. They can be found in the 32 | :py:mod:`vsfieldkit.kernels` module. Includes an nnedi3 kernel-maker for use 33 | as a chroma upsampler. 34 | 35 | * :py:func:`vsfieldkit.annotate_bobbed_fields` for retro-actively adding a 36 | property to bobbed frames noting the field (top or bottom) they came from. 37 | 38 | * :py:func:`vsfieldkit.output_frame_inferred_y4m` for outputting yuv4mpeg2 39 | (y4m) data with metadata derived from the first frame's properties, allowing 40 | for interlaced output, SAR, and chroma siting as available. 41 | 42 | Changed APIs 43 | ^^^^^^^^^^^^ 44 | 45 | * :py:func:`vsfieldkit.resample_as_progressive` ``kernel`` argument renamed 46 | to ``subsampling_kernel`` for clarity. ``upsampling_kernel`` argument added. 47 | It also now fakes luma-co-sited chroma during upsampling to avoid lossy 48 | chroma re-siting. 49 | * :py:func:`vsfieldkit.resample_as_progressive` and 50 | :py:func:`vsfieldkit.upsample_as_progressive` now default to Spline 36 for 51 | any chroma subsampling or upsampling using the new 52 | :py:func:`vsfieldkit.kernels.resample_chroma_with_spline36` . 53 | * :py:func:`vsfieldkit.upsample_as_progressive` now has 54 | ``upsample_horizontally`` argument. Defaults to ``False``. 55 | :py:func:`vsfieldkit.resample_as_progressive` uses this as ``True`` 56 | internally. 57 | 58 | 1.1.0 59 | ----- 60 | * :py:func:`vsfieldkit.fill_analog_frame_ends` allows overriding the pre-fill 61 | mode and gives better error messaging when the fillborders plugin is missing 62 | the requested mode. The default mode is now ``"fillmargins"`` instead of 63 | ``"fixborders"`` in order to work with the release version of fillborders. 64 | * :py:func:`vsfieldkit.fill_analog_frame_ends` works with progressive clips 65 | cropped by factors smaller than interlaced subsampling. 66 | * :py:func:`vsfieldkit.fill_analog_frame_ends` more compatible with code 67 | autocompletion via removal of decorators. 68 | * :py:func:`vsfieldkit.scan_interlaced` can brighten newly-scanned fields via 69 | new ``attack_factor`` argument. 70 | 71 | 1.0.2 72 | ----- 73 | * :py:func:`vsfieldkit.fill_analog_frame_ends` will now look for EdgeFixer 74 | plugin first, followed by ContinuityFixer plugin as before. Having one of the 75 | two plugins is required. 76 | 77 | 1.0.1 78 | ----- 79 | * Adds :py:func:`vsfieldkit.fill_analog_frame_ends` for cleaning the half-line 80 | black bars at the top and bottom of analog video. 81 | 82 | Output Change: 83 | 84 | * :py:func:`vsfieldkit.bob` now defaults to shifting according to the field's 85 | position. Feature added for completion, but it's also deprecated in favor of 86 | :py:func:`resize.Bob` in VapourSynth R58+. 87 | 88 | Version 1.0.0 was yanked for an immediate bug fix. 89 | 90 | 0.3.0 91 | ----- 92 | * New functions for re-interpreting progressive frames with interlaced sub-sampled chroma: 93 | 94 | * :py:func:`vsfieldkit.resample_as_progressive` 95 | * :py:func:`vsfieldkit.upsample_as_progressive` 96 | 97 | * Adds phosphor decay simulation for :py:func:`vsfieldkit.scan_interlaced` 98 | 99 | 100 | 0.2.0 101 | ----- 102 | Adds :py:func:`vsfieldkit.bob` deinterlacer. 103 | 104 | 0.1.0 105 | ----- 106 | First release. :py:func:`vsfieldkit.scan_interlaced` and some nifty utilities. 107 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 3 | 4 | # -- Path setup -------------------------------------------------------------- 5 | 6 | import os 7 | import sys 8 | 9 | sys.path.insert(0, os.path.abspath('..')) 10 | 11 | 12 | # -- Project information ----------------------------------------------------- 13 | 14 | project = 'vsfieldkit' 15 | copyright = '2023, Justin Turner Arthur' 16 | author = 'Justin Turner Arthur' 17 | 18 | 19 | # -- General configuration --------------------------------------------------- 20 | 21 | extensions = [ 22 | 'sphinx.ext.autodoc', 23 | 'sphinx.ext.intersphinx' 24 | ] 25 | templates_path = ['_templates'] 26 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 27 | intersphinx_mapping = { 28 | 'python': ('https://docs.python.org/3', None), 29 | 'vapoursynth': ('http://www.vapoursynth.com/doc', None) 30 | } 31 | autodoc_mock_imports = ['vapoursynth'] 32 | autodoc_typehints = 'description' 33 | 34 | # -- Options for HTML output ------------------------------------------------- 35 | 36 | html_theme = 'piccolo_theme' 37 | html_theme_options = { 38 | # 'page_width': 'auto', 39 | # 'body_max_width': 1280 40 | } 41 | html_static_path = ['_static'] 42 | html_css_files = [ 43 | 'css/piccolo_overrides.css', 44 | ] 45 | mathjax3_config = { 46 | 'options': { 47 | 'enableMenu': False 48 | } 49 | } 50 | 51 | toc_object_entries_show_parents = 'hide' 52 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | vsfieldkit 2 | ========== 3 | A collection of functions for working with interlaced content in VapourSynth. 4 | 5 | The code is managed on the 6 | `JustinTArthur/vsfieldkit project on GitHub `_ 7 | 8 | Documentation 9 | ------------- 10 | .. toctree:: 11 | :maxdepth: 2 12 | :caption: Contents: 13 | 14 | usage 15 | scan_interlaced_deep_dive 16 | changelog 17 | 18 | 19 | Indices and tables 20 | ================== 21 | 22 | * :ref:`genindex` 23 | * :ref:`modindex` 24 | * :ref:`search` 25 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.https://www.sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/scan_interlaced_deep_dive.rst: -------------------------------------------------------------------------------- 1 | scan_interlaced Deep Dive 2 | ========================= 3 | :py:func:`~vsfieldkit.scan_interlaced` can be thought of as a converter between 4 | storage/transport interlacing and display interlacing (a process sometimes 5 | called interlaced scan). The process is detailed below. 6 | 7 | Interlacing for Storage/Transport 8 | --------------------------------- 9 | When captured moments from a camera are stored, two moments' footage are laced 10 | into eachother in a single frame or picture, so in European video systems, this 11 | usually means 50 moments captured in a second are interlaced onto 25 12 | frames\ [#other_territories]_\. These 25 frames could then be transported over 13 | HDTV broadcast or on a DVD and it's up to the playback system how to present 14 | those 50 moments. 15 | 16 | 17 | Playing Back Interlaced Content 18 | ------------------------------- 19 | Playback systems like your DVD player and television have a few choices when 20 | they play back content that is stored or transported interlaced. Usually 21 | the choices are: 22 | 23 | Interlacing-naive Progressive Scan 24 | Take the interlaced frame and send it to a progressive scan display as-is. 25 | If the original footage was captured at the interlaced frame rate, you'll 26 | lose the notion of the smooth motion of moving objects and the comb effect 27 | will be obvious to the viewer. If the playback mechanism doesn't even read 28 | interlaced color patterns from the picture, the combing effect will be 29 | thicker and colors could be displayed in the wrong rows. 30 | 31 | Content originally captured progressively and stored interlaced might 32 | appear uncombed to the viewer if the top and bottom fields of the same 33 | moment in time are stored in the same frame, occasionally repeated. 34 | 35 | Inverse Telecine or Field-matching 36 | Identify content originally captured as progressive frames at a slower pace 37 | than the rate of field moments, stretched out over interlaced fields to fit 38 | an interlaced medium. For example, playing back a DVD of a movie filmed at 39 | 24 film-frames-per-second. 40 | 41 | Deinterlacing 42 | Extract an interlaced field and construct a new frame for that moment 43 | using that field and optionally information from the fields stored for 44 | previous and next moments. These new frames can be displayed at the 45 | original field rate or the moments could be halved and the new frames would 46 | be displayed at the stored frame rate. 47 | 48 | Interlaced Scan 49 | Extract an interlaced field and paint it onto the display with the same 50 | alternating lines it was stored with in that same position then extract the 51 | next interlaced field from the stored frame and paint it onto its 52 | corresponding lines at the relative moment in time it was captured at. The 53 | previously-painted lines might have begun to fade away during this new 54 | moment depending on the display technology. 55 | 56 | If a video playback system is incapable of interlaced scan, it could instead be 57 | fed progressive frames that represent the states of an interlaced scan display. 58 | It is these kinds of frames that :py:func:`vsfieldkit.scan_interlaced` helps 59 | produce. 60 | 61 | .. only:: html 62 | 63 | .. figure:: https://jta-code.s3.amazonaws.com/vsfieldkit/examples/storage_vs_display-10hz-25.webm 64 | :class: controls 65 | 66 | Interlaced Scan in Action 67 | 68 | Comparing the original stored clip to the output of 69 | :py:func:`vsfieldkit.scan_interlaced`. Slowed to 10fps to emphasize 70 | differences in motion smoothness. 71 | 72 | Code Rush (2000) by David Winton used under the `Creative Commons 73 | Attribution-NonCommercial-ShareAlike 3.0 Unported License 74 | `_ 75 | 76 | .. note:: 77 | To display interlaced footage scanned to progressive frames with 78 | :py:func:`vsfieldkit.scan_interlaced`\, the display device would need to 79 | support progressive scan at the original field rate (e.g. at 50 Hz or 80 | 59.94-ish Hz) and you would need a means of transporting this footage, such 81 | as high speed HDMI or DisplayPort from a computer, or a USB drive plugged 82 | into an HDTV/UHD TV capable of aligning its refresh rate to the frame rate 83 | of content in its media player app. 84 | 85 | Properties of Interlaced Scan Display 86 | ------------------------------------- 87 | The viewer will perceive motion as smooth, but may either notice a comb 88 | effect while two moments' fields remain painted in their respective lines 89 | or may notice the fading of the previous moment's lines. If you grew up 90 | with this form of display and rarely witnessed alternatives, it might 91 | appear quite natural. 92 | 93 | Why is interlaced tech still used in Modern Times? 94 | -------------------------------------------------- 95 | It's the only way to transport high-frame-rate material to the home. 96 | Modern digital theatre systems are now capable of receiving 48 progressive 97 | fps content, but home entertainment systems don't have a standard way to take 98 | in progressive 48 fps, 50 fps, or 60 fps material from the studio. If you wish 99 | to convey smooth motion of events that were captured or rendered in high speed, 100 | you can still do one of these at 50 or :math:`\frac{60000}{1001}` interlaced 101 | fields per second: 102 | 103 | * Put it on a DVD as 480i or 576i 104 | * Put it on a Blu-ray as 480i, 576i, or 1080i 105 | * Send a 1080i signal over HDTV broadcast tech. 106 | * Send 1080i over proprietary digital cable or satellite channels. 107 | 108 | When you do that, you're losing half of the vertical resolution you could be 109 | using with the slower progressive formats and you have no idea how the end 110 | viewer's home entertainment system will portray the footage you transport 111 | as interlaced. 112 | 113 | People making content are ready for high speed progressive options. Those 114 | options just aren't there yet, so the above methods are still used to 115 | transport high frame rate material, mostly for sports events, but occasionally 116 | for concert footage and adult entertainment. Pushes by Peter Jackson and James 117 | Cameron to open the doors for high speed progressive transport may also make 118 | its way to the home for cinema, especially for 3D, where smooth motion helps 119 | avoid nausea of the viewer. 120 | 121 | Why Use :py:func:`~vsfieldkit.scan_interlaced` in Modern Times? 122 | --------------------------------------------------------------- 123 | Bob deinterlacers like QTGMC and Yadif have features like motion interpolation 124 | of neighbouring moments' fields to supplement image data presented in the 125 | generated frames. This results in more detail per moment than ever before, 126 | better capturing the original reality or intentions of the capture. So, you 127 | may ask yourself why you would step backward in time and use 128 | interlaced scan for display when QTGMC or Yadif paint a prettier picture with 129 | no obvious comb effect. 130 | 131 | Here are the biggest reasons you might want to: 132 | 133 | Academia 134 | You might wish to demonstrate the evolution of video technology to a film 135 | class, but only have a progressive display system. 136 | Lossless Display 137 | You may wish to ensure that every stored pixel has its time on display 138 | without any of the guessing, aligning, or blending a modern deinterlacer 139 | might perform. With :py:func:`vsfieldkit.scan_interlaced` this is achieved 140 | while maintaining smooth motion of natively deinterlaced footage. 141 | Blend of Motion 142 | A bob deinterlacer can generate smooth motion from original interlaced 143 | fields if the final framerate isn't halved. However, you are often still 144 | placing an object in different places in different moments and if the 145 | object is filmed sharply with minimal shutter blur or is 146 | rendered/drawn/animated, the viewer could still have a jagged perception 147 | of the movement. Because interlaced scan results in remnants of the prior 148 | moment as the new moment is drawn, the net effect can be even smoother. 149 | If it was the content producer's intended playback 150 | Rarely does a filmmaker think to themselves that interlacing is great and 151 | they want to work with it more; it's usually the opposite. However, 152 | should that moment arise, perhaps wanting to give a found-footage horror 153 | film the lo-fi reality feel that fits, you're covered. 154 | 155 | 156 | Chances are, whatever modern equipment you'd normally play back interlaced 157 | material on will deinterlace that content and play a progressive 158 | representation. You could find the amount of moments presented are cut in half. 159 | 160 | True interlaced scan could be done with an old CRT TV and means to transport 161 | interlaced content to the TV or you could process interlaced content with 162 | :py:func:`vsfieldkit.scan_interlaced` to prepare video that is displayed on 163 | a progressive scan system in the same way it would in an interlaced scan 164 | system. 165 | 166 | Features 167 | -------- 168 | Phosphor Decay Simulation 169 | ^^^^^^^^^^^^^^^^^^^^^^^^^ 170 | In Cathode Ray Tube displays, an electron beam would scan alternating fields of 171 | lines from interlaced content onto cathodoluminescent substances (phosphors) to 172 | reproduce the content's light/color for display. After the phosphors were lit 173 | up they would begin to dim before being scanned onto by the electron beam 174 | again. The ``decay_factor`` and ``decay_base`` parameters of 175 | :py:func:`vsfieldkit.scan_interlaced` enable simulation of this behavior by 176 | dimming lines that were scanned onto in the previous moment. 177 | 178 | .. only:: html 179 | 180 | .. figure:: https://jta-code.s3.amazonaws.com/vsfieldkit/examples/regular_vs_decay-30hz-25.webm 181 | :class: controls 182 | 183 | Phosphor Decay Simulation in Action 184 | 185 | Slowed to 30fps for consistent demonstration on most displays. 186 | 187 | Code Rush (2000) by David Winton used under the `Creative Commons 188 | Attribution-NonCommercial-ShareAlike 3.0 Unported License 189 | `_ 190 | 191 | Additionally, the ``attack_factor`` parameter can be used to brighten 192 | newly-scanned lines. 193 | 194 | .. warning:: 195 | It's not recommended to add decay unless you know what the playback 196 | device's refresh rate will be ahead of time. If the final video's framerate 197 | is not a factor of the display refresh rate, it can result in a flickering 198 | effect that will distract from the content. 199 | 200 | The alternating lines of luma changes created by the decay simulation with 201 | a strong decay factor can also be unpleasant or potentially 202 | seizure-inducing at final frame rates between 5 and 30 progressive frames 203 | per second. 204 | 205 | 206 | Origins 207 | ------- 208 | :py:func:`~vsfieldkit.scan_interlaced` was inspired by 209 | `Juha Jeronen `_'s wonderful Phosphor 210 | deinterlacer for VideoLAN's 211 | `VLC media player `_. This code was not 212 | derived from it, but it tries to at least keep the subsampling 213 | nomenclature the same. 214 | 215 | 216 | .. rubric:: Footnotes 217 | 218 | .. [#other_territories] :math:`\frac{60000}{1001}` or 219 | :math:`59.\overline{940059}` captured moments per second interlaced onto 220 | :math:`\frac{30000}{1001}` or :math:`29.\overline{970029}` frames 221 | per second in North America, some of South America, Liberia, Myanmar, 222 | South Korea, Taiwan, Philippines, Japan, and some Pacific Islands nations 223 | and territories. 224 | -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | Usage 2 | ===== 3 | 4 | Installation 5 | ------------ 6 | vsfieldkit is `hosted on PyPI `_ so it's 7 | as simple as using your favorite installer: 8 | 9 | .. code-block:: bash 10 | 11 | python -m pip install vsfieldkit 12 | 13 | .. code-block:: bash 14 | 15 | poetry add vsfieldkit 16 | 17 | To add to a specific scripts directory: 18 | 19 | 20 | .. code-block:: bash 21 | 22 | python -m pip install --target=./my_scripts_dir vsfieldkit 23 | 24 | 25 | The package uses semantic versioning to indicate backwards 26 | compatible changes to the API. 27 | 28 | As the developer does not have Windows, vsrepo is not officially supported. 29 | That said, it seems to be able to install vsfieldkit. 30 | 31 | Dependencies 32 | ^^^^^^^^^^^^ 33 | For most functions, just VapourSynth. The 34 | :py:func:`~vsfieldkit.fill_analog_frame_ends` function requires the FillBorders 35 | and either the ContinuityFixer or EdgeFixer plugins. Generating a resampling 36 | kernel with :py:func:`~vsfieldkit.kernels.prepare_nnedi3_chroma_upsampler` 37 | requires the nnedi3 plugin. 38 | 39 | Functions 40 | --------- 41 | Reinterpreting 42 | ^^^^^^^^^^^^^^ 43 | .. autofunction:: vsfieldkit.assume_bff(clip) -> VideoNode 44 | 45 | .. autofunction:: vsfieldkit.assume_progressive(clip) -> VideoNode 46 | 47 | For progressive content that has been interlaced and has vertical chroma 48 | subsampling, :py:func:`vsfieldkit.resample_as_progressive` or 49 | :py:func:`vsfieldkit.upsample_as_progressive` could be considered as well. 50 | 51 | .. autofunction:: vsfieldkit.assume_tff(clip) -> VideoNode 52 | 53 | Deinterlacing 54 | ^^^^^^^^^^^^^ 55 | .. function:: vsfieldkit.bob(clip, shift=True, tff=None, \ 56 | keep_field_property=True, kernel=core.resize.Spline36, \ 57 | dither_type='random') 58 | 59 | A simple bob deinterlacer. Returns a clip of progressive frames, each 60 | consisting of a field from the original interlaced clip in order of its 61 | original capture. As interlaced fields have half the resolution of a given 62 | moment, the new frames are stretched up to the original clip's height. 63 | 64 | If shifting for playback comfort, VapourSynth R58 and above provides a 65 | built-in :py:func:`resize.Bob` that should be used instead as it provides 66 | near-identical functionality. 67 | 68 | :param VideoNode clip: Video with interlaced frames to bob into the 69 | resulting clip. 70 | 71 | :param bool shift: Whether to shift the lines during scaling to account for 72 | the field's position in a full frame. Recommended if the output is 73 | intended for playback. 74 | 75 | :param bool tff: 76 | Specifies the field order to assume when scanning progressive footage 77 | or clips without field order marking. ``True`` assumes top-field-first. 78 | ``False`` for bottom-field-first. 79 | 80 | :param Resizer kernel: 81 | Resampling/resizing function from vapoursynth.core.resize to use to 82 | stretch the fields to the target frame height. Defaults to 83 | :py:func:`resize.Spline36`. 84 | 85 | :param str dither_type: 86 | If video is processed at a higher bit depth internally before being 87 | returned to an original depth of less than 16 bits per plane, this 88 | dithering method will be used to avoid banding and other unnatural 89 | artifacts caused by rounding at low bit rate. 90 | 91 | .. function:: vsfieldkit.resample_as_progressive( \ 92 | clip, \ 93 | subsampling_kernel=resample_chroma_with_spline36, \ 94 | upsampling_kernel=resample_chroma_with_spline36, \ 95 | dither_type='random' \ 96 | ) -> VideoNode 97 | 98 | This can be used instead of :py:func:`vsfieldkit.assume_progressive` 99 | when progressive content has been encoded interlaced with vertical chroma 100 | subsampling. 101 | 102 | The primary use-case for this is removing 2:2 pulldown on 25p content 103 | that's been hard-telecined to 50i in DV, DVB, or DVD formats with 4:2:0 104 | chroma subsampling. It can also be used to resample chroma on frames 105 | created with field-matching that pulled up other pulldown patterns. 106 | 107 | When progressive content is encoded as interlaced pictures with 4:2:0 108 | chroma subsampling, the chroma samples span alternating instead of adjacent 109 | lines of a frame. Simply marking/assuming such clips as progressive could 110 | result in color samples being attributed to the wrong lines (bleeding), and 111 | in those cases this function can be used instead. It will prevent bleeding, 112 | though as this comes up with new samples for the progressive content, it 113 | results in loss of original color precision. 114 | 115 | If you wish to perform additional processing before the final chroma 116 | subsampling is restored, use :py:func:`vsfieldkit.upsample_as_progressive` 117 | instead. 118 | 119 | :param VideoNode clip: Video with progressive frames encoded as interlaced 120 | with vertical subsampling. 121 | 122 | :param Resizer subsampling_kernel: 123 | Resampling/resizing function to use to restore deinterlaced chroma to = 124 | the original chroma height. Defaults to 125 | :py:func:`vsfieldkit.kernels.resample_chroma_with_spline36`. 126 | 127 | :param Resizer upsampling_kernel: 128 | Resampling/resizing function to use for upsampling sub-sampled 129 | chroma. Must be interlacing-aware like most of VapourSynth's 130 | built in :external+vapoursynth:doc:`functions/video/resize` functions. 131 | Defaults to 132 | :py:func:`vsfieldkit.kernels.resample_chroma_with_spline36`. 133 | 134 | If the nnedi3 VapourSynth plugin is present, 135 | :py:func:`vsfieldkit.kernels.prepare_nnedi3_chroma_upsampler` 136 | can be used to create a suitable upsampling kernel employing 137 | the nnedi3 model. 138 | 139 | :param str dither_type: 140 | If video is processed at a higher bit depth internally before being 141 | returned to an original depth of less than 16 bits per plane, this 142 | dithering method will be used to avoid banding and other unnatural 143 | artifacts caused by rounding at low bit rate. 144 | 145 | .. function:: vsfieldkit.scan_interlaced( \ 146 | clip, \ 147 | warmup_clip=None, \ 148 | tff=None, \ 149 | chroma_subsample_scanning=ChromaSubsampleScanning.SCAN_LATEST, \ 150 | attack_factor=None, \ 151 | decay_factor=None, \ 152 | decay_base=None, \ 153 | dither_type='random', \ 154 | post_processing=(), \ 155 | post_processing_blend_kernel=core.resize.Spline36, \ 156 | ) -> VideoNode 157 | 158 | Returns a new clip where interlaced fields from the original clip are 159 | painted onto each frame in their correct position moment-by-moment like 160 | an interlaced scan display would. This is sometimes referred to as display 161 | interlacing, phosphor deinterlacing, or simply interlaced scan. 162 | Like bob deinterlacing, it doubles the amount of frames used to portray 163 | the moments represented in the interlaced footage. 164 | 165 | Interlaced content is typically stored or transmitted with two moments 166 | interlaced into one frame and each moment only appearing in that one frame. 167 | This balances the destructive compression of time and image resolution. The 168 | frames generated by ``scan_interlaced`` will repeat a field from the 169 | previous moment, losing that compression advantage. Additionally, they 170 | can't be treated as interlaced by downstream filters or playback systems 171 | that expect a field's picture to only appear once. Because of this, they 172 | are marked as progressive by the function. It might be better to call this 173 | function a "display interlacer" rather than a deinterlacer. 174 | 175 | This was inspired by `Juha Jeronen `_'s 176 | wonderful Phosphor deinterlacer for VideoLAN's 177 | `VLC media player `_. This code was not 178 | derived from it, but it tries to at least keep the subsampling 179 | nomenclature the same. 180 | 181 | More background and some examples can be found in the 182 | :doc:`scan_interlaced_deep_dive`. 183 | 184 | :param VideoNode clip: Video with interlaced frames to scan to 185 | the resulting clip. 186 | 187 | :param VideoNode warmup_clip: 188 | The first field from the main clip will be painted alongside the last 189 | field of the warmup clip if supplied. This can allow seamless splicing 190 | of scan_interlace output with other clips. If no warmup clip is 191 | supplied, black scanlines are used to warm up that field. 192 | 193 | :param bool tff: 194 | Specifies the field order to assume when scanning progressive footage 195 | or clips without field order marking. ``True`` assumes top-field-first. 196 | ``False`` for bottom-field-first. Applies to the main clip and/or the 197 | warmup clip if either have not-explicitly-interlaced frames. 198 | 199 | :param ChromaSubsampleScanning chroma_subsample_scanning: 200 | When Chroma is sub-sampled vertically, such as in Y'CbCr 4:2:0 clips, 201 | a decision must be made on how to present the color of the newly-laced 202 | scan lines in the final frames because those frames will be marked as 203 | progressive. Progressive frames don't have chroma samples for 204 | alternating scan lines. Without a chroma scanning decision, the first 205 | line's color would bleed into the second line, which was scanned from a 206 | different moment, third into the fourth… resulting in thicker visual 207 | comb lines and lines having color untrue to their source material. 208 | 209 | Enumerations are available on the vsfieldkit top level module and the 210 | :py:class:`~vsfieldkit.ChromaSubsampleScanning` enum. 211 | 212 | :param Factor attack_factor: 213 | Amount by which to brighten lines that have been scanned in the 214 | current moment. Usually expressed as a 215 | :py:class:`float`, :py:class:`~decimal.Decimal` or 216 | :py:class:`~fractions.Fraction` where ``1`` means the newly 217 | scanned line is not brightened, ``2`` means the line is doubled 218 | in brightness. 219 | 220 | :param Factor decay_factor: 221 | Amount by which to dim the lines scanned in the previous moment, 222 | exposing the ``decay_base`` clip. This simulates the decay of cathode 223 | ray tube phosphors in the moments after they've been scanned onto and 224 | the persistence of vision in humans. 225 | 226 | Usually expressed as a :py:class:`float`, :py:class:`~decimal.Decimal` 227 | or :py:class:`~fractions.Fraction` where ``1`` means the 228 | previously-laced scan lines are completely replaced by lines from the 229 | decay_base clip, ``0.5`` means the clip is dimmed half and ``0`` means 230 | there is no dimming at all. ``decay_base`` can be used to dim 231 | to a background other than solid black. 232 | 233 | :param VideoNode decay_base: 234 | A background clip that previously-scanned scan lines should be dimmed 235 | to instead of black. Ignored if ``decay_factor`` is not set. Should be 236 | one frame long. The frame will be re-used. 237 | 238 | :param str dither_type: 239 | If video is processed at a higher bit depth internally before being 240 | returned to an original depth of less than 16 bits per plane, this 241 | dithering method will be used to avoid banding and other unnatural 242 | artifacts caused by rounding colors to the nearest integer. 243 | 244 | :param Sequence[InterlacedScanPostProcessor] post_processing: 245 | Post-processing steps to run on the frames resulting from interlaced 246 | scanning. At the moment, only 247 | :py:attr:`~vsfieldkit.InterlacedScanPostProcessor.BLEND_VERTICALLY` is 248 | available. 249 | 250 | Enumerations are available on the vsfieldkit top level module and the 251 | :py:class:`~vsfieldkit.InterlacedScanPostProcessor` enum. 252 | 253 | .. function:: vsfieldkit.upsample_as_progressive(clip, \ 254 | upsample_horizontally=False, \ 255 | kernel=resample_chroma_with_spline36 \ 256 | ) -> VideoNode 257 | 258 | Returns a clip now marked as progressive and with any vertical chroma 259 | subsampling removed so that previously-alternating chroma lines will be 260 | laid out in the correct one-line-after-another order for progressive 261 | content. 262 | 263 | This can be used instead of :py:func:`vsfieldkit.assume_progressive` 264 | when the progressive frames have been encoded interlaced and additional 265 | processing is desired before restoring the target chroma sub-sampling. 266 | 267 | .. code-block:: python 268 | :caption: Example 269 | 270 | # Interpret as progressive, removing vertical chroma subsampling 271 | upsampled = vsfieldkit.upsample_as_progressive(clip) 272 | 273 | # Additional processing: 274 | fixed_edges = awsmfunc.bbmod(upsampled, left=2, right=3) 275 | 276 | # Restore original subsampling with favorite kernel then output: 277 | resampled = fixed_edges.resize.Spline36(format=clip.format) 278 | resampled.set_output() 279 | 280 | :param bool upsample_horizontally: 281 | Whether or not to horizontally upsample. The function will always 282 | vertically upsample if applicable. 283 | 284 | :param Resizer kernel: 285 | Resampling/resizing function to use for upsampling sub-sampled 286 | chroma. Must be interlacing-aware like most of VapourSynth's 287 | built in :external+vapoursynth:doc:`functions/video/resize` functions. 288 | Defaults to 289 | :py:func:`vsfieldkit.kernels.resample_chroma_with_spline36`. 290 | 291 | If the nnedi3 VapourSynth plugin is present, 292 | :py:func:`vsfieldkit.kernels.prepare_nnedi3_chroma_upsampler` 293 | can be used to create a suitable upsampling kernel employing 294 | the nnedi3 model. 295 | 296 | Interlacing 297 | ^^^^^^^^^^^ 298 | .. note:: 299 | VapourSynth's vspipe does not inspect frame properties so assumes it's 300 | outputting progressive frames in the YUV4MPEG2 headers it supplies. If 301 | needing to output interlaced frames, either supply manual interlacing hints 302 | to whatever is receiving the vspipe output or use 303 | :py:func:`vsfieldkit.output_frame_inferred_y4m` to have the script itself 304 | produce output with interlace-aware metadata. 305 | 306 | .. function:: vsfieldkit.telecine( \ 307 | clip, \ 308 | *, \ 309 | tff, \ 310 | pulldown_pattern=None, \ 311 | fpsnum=None, \ 312 | fpsden=1, \ 313 | interlace_progressive_chroma=True, \ 314 | pre_subsample_fields=False, \ 315 | subsampling_kernel=resample_chroma_with_spline36, \ 316 | upsampling_kernel=resample_chroma_with_spline36, \ 317 | dither_type='random' \ 318 | ) -> VideoNode 319 | 320 | Interlaces the given progressive clip by spreading its content over 321 | interlaced fields according to a given interlaced frame rate or a given 322 | pull-down pattern. 323 | 324 | :param VideoNode clip: 325 | Progressive video to be interlaced. Must have an even height. 326 | 327 | :param bool tff: 328 | Whether the top field is the first field from the frame to be 329 | displayed during interlaced-aware playback. If False, the bottom 330 | field is first. 331 | 332 | :param typing.Union[str, PulldownPattern, None] pulldown_pattern: 333 | A string of numbers seperated by colons, where each number 334 | indicates for how many field durations to include each frame from 335 | the original clip in the new interlaced clip. A field duration is 336 | half the interlaced frame duration. 337 | 338 | For example, the popular "2:3" pattern will include the first 339 | original progressive frame for 2 field durations in the new 340 | interlaced clip. It will then include parts of the second original 341 | frame for 3 field durations. The pattern repeats, so the third 342 | original frame is included for 2 field durations of the new clip 343 | and so-on. 344 | 345 | Some common pulldown pattern enumerations are available on the 346 | top level module or the :py:class:`~vsfieldkit.PulldownPattern` enum. 347 | They can be supplied instead of a ``str``. 348 | 349 | Either ``pulldown_pattern`` or ``fpsnum`` must be supplied 350 | 351 | :param int fpsnum: 352 | The numerator of the speed of the new interlaced clip in 353 | frames-per-second. When supplied, the original progressive frames 354 | will be pulled down or up to stretch across interlaced fields so 355 | that parts of the original frame would be displayed at the same 356 | time they occurred in the original clip. 357 | 358 | A denominator can be supplied in the corresponding ``fpsden`` 359 | argument. 360 | 361 | This allows flexibility of input and output frame rates and will 362 | consistently produce the lowest-judder interlaced representation 363 | of the original clip. For example, when going from 24000/1001 364 | progressive FPS to ``fpsnum=30_000`` and ``fpsden=1_001``, the 365 | footage will appropriately end up in the popular the 2:3 pulldown 366 | pattern (though may not start at the "2" in the cycle). Note that 367 | this virtual time-based telecine will drop original content if 368 | needed to meet the new time base. 369 | 370 | :param int fpsden: 371 | The denominator to use with a supplied ``fpsnum`` (numerator) for 372 | virtual time-based telecine. If not supplied, this defaults to 373 | ``1``. 374 | 375 | :param bool interlace_progressive_chroma: 376 | If ``False``, when both fields of an interlaced frame would come 377 | from the same original progressive frame, simply use that 378 | progressive frame and call it interlaced (fake interlacing). This 379 | results in material with vertical chroma subsampling remaining 380 | unbroken and unblurred if a downstream deinterlacer or display 381 | upsampler treats the clean frames as progressive. 382 | 383 | Defaults to ``True``. 384 | 385 | :param bool pre_subsample_fields: 386 | Crushes chroma to half its original resolution prior to upsampling 387 | for interlacing. This can be a way to produce output that is 388 | displayed consistently accross a variety of deinterlacers and 389 | display upsamplers that might otherwise be susceptible to artifacts 390 | from chroma upsampling error (CUE) or interlaced chroma problem 391 | (ICP). 392 | 393 | .. note:: 394 | The word *telecine* is often ascribed to the process of scanning 395 | physical film media to digital video. This function does not interact 396 | with physical telecine systems or color suites. It is instead a 397 | virtual equivalent of what telecine machinery would accomplish when 398 | scanning physical film media to an interlaced video signal such as NTSC 399 | or PAL. 400 | 401 | .. autofunction:: vsfieldkit.weave_fields(clip) -> VideoNode 402 | 403 | Repair 404 | ^^^^^^ 405 | .. function:: vsfieldkit.fill_analog_frame_ends( \ 406 | clip, \ 407 | top_blank_width=None, \ 408 | bottom_blank_width=None, \ 409 | continuity_radius=(5,), \ 410 | luma_splash_radius=1, \ 411 | original_format=None, \ 412 | restore_blank_detail=False, \ 413 | prefill_mode='fillmargins' \ 414 | ) -> VideoNode 415 | 416 | Fills the beginning and end half-lines from frames digitized from or 417 | for PAL/NTSC/SECAM signal. These lines are often half-blanked so that a CRT 418 | monitor's electron beam won't light up phosphors as it zig-zags from the 419 | bottom of screen to the top to start painting the next frame. 420 | 421 | It aims to interpolate only the missing data, leaving clean pixels 422 | in-tact. Interpolation is performed by repetition and averaging of adjacent 423 | line data using the FillBorders plugin followed by least-squares regression 424 | using the ContinuityFixer or EdgeFixer plugin. 425 | 426 | If the bottom black bar coincides with head-switching noise from a camera 427 | or VCR, the bottom bar repair will not be useful. 428 | 429 | :param VideoNode clip: 430 | Video from or for analog source. Can be in its original interlaced form 431 | or de-interlaced. 432 | 433 | :param int top_blank_width: 434 | Width in pixels of the top-left black bar at its longest, including any 435 | horizontal fade. If not supplied, assumed to be 65% of the top line. 436 | Set to ``0`` to not attempt top line repair. 437 | 438 | :param int bottom_blank_width: 439 | Width in pixels of the bottom-right black bar at its longest, including 440 | any horizontal fade. If not supplied, assumed to be 65% of the bottom 441 | line. Set to ``0`` to not attempt bottom line repair. 442 | 443 | :param continuity_radius: 444 | Number of rows next to the black bar to use as input for interpolating 445 | the new pixels to generate inside the bar. 446 | :type continuity_radius: int or Sequence[int] 447 | 448 | :param int luma_splash_radius: 449 | Repair this many extra rows of luma data above or below the half line. 450 | Adjacent picture data is often damaged by the black bar if the video's 451 | fields are resized from their original signal height (e.g. from 486i 452 | to 480i for NTSC to fit a DVD or DV stream) or if the studio applied 453 | artificial sharpening. 454 | 455 | If the adjacent rows have correct brightness even if they're gray, this 456 | can be set to 0 to persist the clean luma data. The function's 457 | adjustments for chroma sub-sampling should address adjacent gray area. 458 | 459 | :param original_format: 460 | If the clip to repair has been up-sampled for editing (e.g. from 461 | YUV420P8 to YUV422P16), pass in the original clip's format here 462 | so that correct assumptions are made for damage repair decisions. 463 | :type original_format: PresetVideoFormat, VideoFormat, VideoNode or int 464 | 465 | :param bool restore_blank_detail: 466 | In rare cases where the black bars contain salvageable image data, this 467 | can be used to merge some of that original data on top of the 468 | filled-and-continued repair of the bar. Otherwise, this introduces 469 | noise. 470 | 471 | :param str prefill_mode: 472 | How to fill the blank line prior to interpolation. This is 473 | passed directly to the fillborders plugin. This pre-fill is 474 | used to improve the quality of the least-squares regression that is 475 | applied afterwards. 476 | 477 | As of fillborders v2, possible values are ``"fillmargins"``, 478 | ``"mirror"``, and ``"repeat"``. 479 | 480 | Output 481 | ^^^^^^ 482 | .. function:: vsfieldkit.output_frame_inferred_y4m( \ 483 | clip, \ 484 | fileobj, \ 485 | progress_update=None, \ 486 | prefetch=0, \ 487 | backlog=-1 \ 488 | ) 489 | 490 | Similar to :py:meth:`VideoNode.output`, writes raw video data to the given 491 | file object. The output is decorated with YUV4MPEG2 headers based on the 492 | clip format and the first frame’s properties. 493 | 494 | This allows the script itself to provide video output. While potentially 495 | slower than using vspipe, vspipe as of writing does not supply y4m headers 496 | based on frame properties, so does not communicate interlacing, 497 | :abbr:`SAR (Sample Aspect Ratio)`, or chroma siting metadata to the 498 | receiving file or pipe. This function will include those if they are 499 | present in the first frame's properties and if they're supported by the 500 | YUV4MPEG2 specification. 501 | 502 | This is ultimately a hack that wraps an underlying call to 503 | :py:meth:`VideoNode.output`. 504 | 505 | .. code-block:: python 506 | :caption: Example 507 | 508 | if __name__ in ('__vapoursynth__', '__vspreview__'): 509 | # e.g. vspipe or vspreview 510 | clip.set_output(0) 511 | elif __name__ == '__main__': 512 | # Script run directly by a Python interpreter 513 | vsfieldkit.output_frame_inferred_y4m(clip, sys.stdout) 514 | 515 | :param VideoNode clip: Video clip to output. 516 | 517 | :param typing.IO fileobj: Stream or file-like object. Either stdout, 518 | stderr, or an object supporting binary writes. 519 | 520 | :param progress_update: A callback taking in the amount 521 | of outputted frames and the number of total frames in the clip. 522 | 523 | :type progress_update: typing.Callable[[int, int], None] 524 | 525 | :param int prefetch: Used for debugging the underlying 526 | :py:meth:`VideoNode.output` call. 527 | 528 | :param int backlog: Used for debugging the underlying 529 | :py:meth:`VideoNode.output` call. 530 | 531 | Utility 532 | ^^^^^^^ 533 | .. autofunction:: vsfieldkit.annotate_bobbed_fields(clip, original_clip, tff, prop='OriginalField') -> VideoNode 534 | 535 | .. autofunction:: vsfieldkit.double(clip) -> VideoNode 536 | 537 | .. function:: vsfieldkit.group_by_combed( \ 538 | clip \ 539 | ) -> Iterator[Tuple[Union[bool, None], VideoNode]] 540 | 541 | Assuming the passed-in clip was processed by a filter that performs 542 | comb detection, this splits the clip into segments based on whether they 543 | are combed or not. The values it generates are True, False, or ``None`` if 544 | it was marked combed, not combed, or not marked as well as the segment of 545 | the clip. This does not have any built-in comb detection. 546 | 547 | This function requests rendered frames and blocks until it gets them. If 548 | not needing to remove frames, splice additional frames, or analyze frames, 549 | consider using :py:func:`std.FrameEval` or :py:func:`std.ModifyFrame` 550 | instead for simple comb-based frame replacements. 551 | 552 | .. code-block:: python 553 | :caption: Example 554 | 555 | progressive_clips = [] 556 | detelecined = tivtc.TFM(clip, PP=1) 557 | for combed, segment in vsfieldkit.group_by_combed(detelecined): 558 | if combed: 559 | progressive_clips.append( 560 | havsfunc.QTGMC(segment, TFF=False) 561 | ) 562 | else: 563 | progressive_clips.append( 564 | tivtc.TDecimate(segment, tff=False) 565 | ) 566 | vs.core.std.Splice(progressive_clips).set_output() 567 | 568 | .. function:: vsfieldkit.group_by_field_order( \ 569 | clip \ 570 | ) -> Iterator[Tuple[Union[FieldBased, None], VideoNode]] 571 | 572 | Generates field orders and clips from the passed in clip split up by 573 | changes in field order. Field order is expressed as a 574 | :py:class:`FieldBased` enumeration or ``None`` if field order is not 575 | applicable or not available. 576 | 577 | This function requests rendered frames and blocks until it gets them. If 578 | not needing to remove frames, splice additional frames, or analyze frames, 579 | consider using :py:func:`std.FrameEval` or :py:func:`std.ModifyFrame` 580 | instead for simple field-order-based frame replacements. 581 | 582 | .. code-block:: python 583 | :caption: Example 584 | 585 | progressive_clips = [] 586 | for order, segment in vsfieldkit.group_by_field_order(clip): 587 | if order == vs.FIELD_TOP: 588 | progressive_clips.append( 589 | havsfunc.QTGMC(segment, TFF=True) 590 | ) 591 | elif order == vs.FIELD_BOTTOM: 592 | progressive_clips.append( 593 | havsfunc.QTGMC(segment, TFF=False) 594 | ) 595 | elif order == vs.PROGRESSIVE: 596 | progressive_clips.append( 597 | vsfieldkit.double(segment) 598 | ) 599 | vs.core.std.Splice(progressive_clips).set_output() 600 | 601 | Types 602 | ^^^^^ 603 | 604 | .. autoclass:: vsfieldkit.ChromaSubsampleScanning 605 | :members: 606 | :undoc-members: 607 | 608 | .. autoclass:: vsfieldkit.InterlacedScanPostProcessor 609 | :members: 610 | :undoc-members: 611 | 612 | .. autoclass:: vsfieldkit.PulldownPattern 613 | :members: 614 | :undoc-members: 615 | 616 | .. autoclass:: vsfieldkit.Factor 617 | 618 | .. autoclass:: vsfieldkit.Resizer 619 | 620 | Resampling Kernels 621 | ------------------ 622 | These are mostly for use as arguments to vsfieldkit's functions, but they all 623 | can be used directly by calling them as you would any of the kernels found in 624 | vapoursynth's built-in resize module. 625 | 626 | .. autofunction:: vsfieldkit.kernels.resample_chroma_with_bicubic 627 | .. autofunction:: vsfieldkit.kernels.resample_chroma_with_bilinear 628 | .. autofunction:: vsfieldkit.kernels.resample_chroma_with_lanczos 629 | .. autofunction:: vsfieldkit.kernels.resample_chroma_with_spline16 630 | .. autofunction:: vsfieldkit.kernels.resample_chroma_with_spline36 631 | .. autofunction:: vsfieldkit.kernels.resample_chroma_with_spline64 632 | .. autofunction:: vsfieldkit.kernels.prepare_nnedi3_chroma_upsampler 633 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | -------------------------------------------------------------------------------- /requirements.ci.txt: -------------------------------------------------------------------------------- 1 | flake8>=4.0.1,<5 2 | isort>=5.10.1,<6 3 | mypy==0.942 4 | -------------------------------------------------------------------------------- /requirements.docs.txt: -------------------------------------------------------------------------------- 1 | Sphinx==6.1.3 2 | piccolo-theme==0.14.0 3 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = vsfieldkit 3 | version = attr: vsfieldkit.VERSION 4 | author = Justin Turner Arthur 5 | author_email = justinarthur@gmail.com 6 | long_description = file: README.md 7 | long_description_content_type = text/markdown 8 | url = https://github.com/JustinTArthur/vsfieldkit 9 | classifiers = 10 | License :: OSI Approved :: MIT License 11 | Topic :: Multimedia :: Video 12 | Topic :: Multimedia :: Video :: Conversion 13 | Topic :: Software Development :: Libraries 14 | Topic :: Software Development :: Libraries :: Python Modules 15 | Typing :: Typed 16 | license = MIT 17 | license_files = 18 | LICENSE.txt 19 | 20 | [options] 21 | packages = vsfieldkit 22 | python_requires = >=3.6 23 | 24 | [options.package_data] 25 | vsfieldkit = py.typed 26 | -------------------------------------------------------------------------------- /vsfieldkit/__init__.py: -------------------------------------------------------------------------------- 1 | from vsfieldkit.deinterlacing import (bob, resample_as_progressive, 2 | upsample_as_progressive) 3 | from vsfieldkit.interlacing import telecine, weave_fields 4 | from vsfieldkit.output import output_frame_inferred_y4m 5 | from vsfieldkit.repair import fill_analog_frame_ends 6 | from vsfieldkit.scanning import scan_interlaced 7 | from vsfieldkit.types import (ChromaSubsampleScanning, Factor, FormatSpecifier, 8 | InterlacedScanPostProcessor, PulldownPattern, 9 | Resizer) 10 | from vsfieldkit.util import (annotate_bobbed_fields, assume_bff, 11 | assume_progressive, assume_tff, double, 12 | group_by_combed, group_by_field_order) 13 | 14 | VERSION = 2, 1, 0 15 | 16 | SCAN_BLENDED = ChromaSubsampleScanning.SCAN_BLENDED 17 | SCAN_LATEST = ChromaSubsampleScanning.SCAN_LATEST 18 | SCAN_UPSAMPLED = ChromaSubsampleScanning.SCAN_UPSAMPLED 19 | 20 | BLEND_VERTICALLY = InterlacedScanPostProcessor.BLEND_VERTICALLY 21 | 22 | ADVANCED_PULLDOWN = PulldownPattern.ADVANCED_PULLDOWN 23 | EURO_PULLDOWN = PulldownPattern.EURO_PULLDOWN 24 | MATCHED_PULLDOWN = PulldownPattern.MATCHED_PULLDOWN 25 | NTSC_FILM_PULLDOWN = PulldownPattern.NTSC_FILM_PULLDOWN 26 | -------------------------------------------------------------------------------- /vsfieldkit/deinterlacing.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | from warnings import warn 3 | 4 | from vapoursynth import FieldBased, VideoNode, core 5 | 6 | from vsfieldkit.kernels import resample_chroma_with_spline36 7 | from vsfieldkit.types import Resizer 8 | from vsfieldkit.util import convert_format_if_needed 9 | from vsfieldkit.vapoursynth import VS_FIELD_FROM_BOTTOM, VS_FIELD_FROM_TOP 10 | 11 | 12 | def bob( 13 | clip: VideoNode, 14 | shift: bool = True, 15 | tff: Optional[bool] = None, 16 | keep_field_property: bool = True, 17 | kernel: Resizer = core.resize.Spline36, 18 | dither_type: str = 'random' 19 | ) -> VideoNode: 20 | """Returns a clip of progressive frames, each consisting of a field from 21 | the original interlaced clip in order of its original capture. 22 | 23 | As interlaced fields have half the resolution of a given moment, the new 24 | frames are stretched up to the original clip's height. 25 | """ 26 | if ( 27 | shift 28 | and hasattr(core.resize, 'Bob') 29 | and hasattr(kernel, 'plugin') 30 | and kernel.plugin.namespace == 'resize' 31 | ): 32 | kernel_filter = kernel.name.lower() 33 | warn(f'In VapourSynth >=R58, use the built-in ' 34 | f'core.resize.Bob(filter="{kernel_filter}" instead)).', 35 | DeprecationWarning) 36 | stretched = clip.resize.Bob(filter=kernel_filter) 37 | else: 38 | as_fields = clip.std.SeparateFields(tff=tff) 39 | stretched = convert_format_if_needed( 40 | as_fields, 41 | height=clip.height, 42 | kernel=kernel, 43 | dither_type=dither_type 44 | ) 45 | 46 | if shift: 47 | # core.resize doesn't expose zimg's destination field parity 48 | # options so we have to assume it won't shift for us. We can trick 49 | # it using its active region stuff instead. In zimg's sub-pixel 50 | # layout, the fields are shifted by 1/4, adjusted for stretch as 51 | # 1/8th. 52 | stretched_as_top = convert_format_if_needed( 53 | as_fields, 54 | height=clip.height, 55 | kernel=kernel, 56 | dither_type=dither_type, 57 | src_top=0.125 58 | ) 59 | stretched_as_bottom = convert_format_if_needed( 60 | as_fields, 61 | height=clip.height, 62 | kernel=kernel, 63 | dither_type=dither_type, 64 | src_top=-0.125 65 | ) 66 | shift_map = { 67 | VS_FIELD_FROM_TOP: stretched_as_top, 68 | VS_FIELD_FROM_BOTTOM: stretched_as_bottom 69 | } 70 | stretched = stretched.std.FrameEval( 71 | lambda n, f: shift_map[f.props._Field], 72 | prop_src=(as_fields,) 73 | ) 74 | 75 | if keep_field_property: 76 | return stretched 77 | 78 | return stretched.std.RemoveFrameProps(('_Field',)) 79 | 80 | 81 | def resample_as_progressive( 82 | clip: VideoNode, 83 | subsampling_kernel: Resizer = resample_chroma_with_spline36, 84 | upsampling_kernel: Resizer = resample_chroma_with_spline36, 85 | dither_type: str = 'random', 86 | avoid_chroma_shift=True 87 | ) -> VideoNode: 88 | """When every frame of the clip represents progressive content (no 89 | combing) this will take any frames encoded interlaced and resample them so 90 | that they are progressive in both content AND format. 91 | """ 92 | if ( 93 | avoid_chroma_shift 94 | and ( 95 | not hasattr(upsampling_kernel, 'supports_resizing') 96 | or upsampling_kernel.supports_resizing is True 97 | ) 98 | ): 99 | # For the round trip up and down, we can avoid the subsampling grid 100 | # altogether by working on individual planes. 101 | y, cb, cr = clip.std.SplitPlanes() 102 | upsampled_planes = [ 103 | y.std.SetFieldBased(FieldBased.FIELD_PROGRESSIVE) 104 | ] + [ 105 | convert_format_if_needed( 106 | plane, 107 | height=y.height, 108 | width=y.width, 109 | kernel=upsampling_kernel 110 | ).std.SetFieldBased(FieldBased.FIELD_PROGRESSIVE) 111 | for plane in (cb, cr) 112 | ] 113 | resampled_planes = ( 114 | upsampled_planes[0], 115 | convert_format_if_needed( 116 | upsampled_planes[1], 117 | height=cb.height, 118 | width=cb.width, 119 | kernel=subsampling_kernel, 120 | dither_type=dither_type 121 | ), 122 | convert_format_if_needed( 123 | upsampled_planes[2], 124 | height=cr.height, 125 | width=cr.width, 126 | kernel=subsampling_kernel, 127 | dither_type=dither_type 128 | ) 129 | ) 130 | resampled = core.std.ShufflePlanes( 131 | clips=resampled_planes, 132 | planes=(0, 0, 0), 133 | colorfamily=clip.format.color_family 134 | ) 135 | else: 136 | upsampled = upsample_as_progressive( 137 | clip, 138 | kernel=upsampling_kernel, 139 | upsample_horizontally=True 140 | ) 141 | resampled = convert_format_if_needed( 142 | upsampled, 143 | format=clip.format, 144 | kernel=subsampling_kernel, 145 | dither_type=dither_type, 146 | ) 147 | return resampled 148 | 149 | 150 | def upsample_as_progressive( 151 | clip: VideoNode, 152 | upsample_horizontally=False, 153 | kernel: Resizer = resample_chroma_with_spline36, 154 | dither_type: str = 'random' 155 | ): 156 | """Returns a clip now marked as progressive and with any vertical 157 | chroma subsampling removed so that previously-alternating chroma lines 158 | will be laid out in the correct one-line-after-another order for 159 | progressive content.""" 160 | subsampling_w = 0 if upsample_horizontally else clip.format.subsampling_w 161 | upsampled = convert_format_if_needed( 162 | clip, 163 | subsampling_h=0, 164 | subsampling_w=subsampling_w, 165 | kernel=kernel, 166 | dither_type=dither_type 167 | ) 168 | as_progressive = upsampled.std.SetFieldBased(FieldBased.FIELD_PROGRESSIVE) 169 | return as_progressive 170 | -------------------------------------------------------------------------------- /vsfieldkit/interlacing.py: -------------------------------------------------------------------------------- 1 | from fractions import Fraction 2 | from itertools import cycle, islice 3 | from math import ceil, floor 4 | from typing import Optional, Sequence, Union 5 | 6 | from vapoursynth import VideoFrame, VideoNode, core 7 | 8 | from vsfieldkit.kernels import resample_chroma_with_spline36 9 | from vsfieldkit.types import PulldownPattern, Resizer 10 | from vsfieldkit.util import convert_format_if_needed 11 | 12 | 13 | def telecine( 14 | clip: VideoNode, 15 | *, 16 | tff: bool, 17 | pulldown_pattern: Union[str, PulldownPattern, None] = None, 18 | fpsnum: Optional[int] = None, 19 | fpsden: Optional[int] = 1, 20 | interlace_progressive_chroma: bool = True, 21 | pre_subsample_fields: bool = False, 22 | subsampling_kernel: Resizer = resample_chroma_with_spline36, 23 | upsampling_kernel: Resizer = resample_chroma_with_spline36, 24 | dither_type: str = 'random' 25 | ) -> VideoNode: 26 | """Spreads the clip's frames across interlaced fields to produce an 27 | interlaced clip. 28 | """ 29 | if pre_subsample_fields and clip.format.subsampling_h > 0: 30 | y, cb, cr = clip.std.SplitPlanes() 31 | cb_halved = convert_format_if_needed( 32 | cb, 33 | kernel=subsampling_kernel, 34 | height=cb.height // 2 35 | ) 36 | cr_halved = convert_format_if_needed( 37 | cr, 38 | kernel=subsampling_kernel, 39 | height=cr.height // 2 40 | ) 41 | upsampled = core.std.ShufflePlanes( 42 | clips=( 43 | y, 44 | cb_halved.resize.Point(height=y.height), 45 | cr_halved.resize.Point(height=y.height) 46 | ), 47 | planes=(0, 0, 0), 48 | colorfamily=clip.format.color_family 49 | ) 50 | else: 51 | upsampled = convert_format_if_needed( 52 | clip, 53 | subsampling_h=0, 54 | kernel=upsampling_kernel, 55 | dither_type=dither_type 56 | ) 57 | 58 | if pulldown_pattern: 59 | interlaced = _telecine_by_pattern( 60 | clip, 61 | upsampled, 62 | pulldown_pattern=pulldown_pattern, 63 | tff=tff, 64 | pre_subsample_fields=pre_subsample_fields, 65 | interlace_progressive_chroma=interlace_progressive_chroma, 66 | subsampling_kernel=subsampling_kernel, 67 | dither_type=dither_type, 68 | ) 69 | elif fpsnum: 70 | interlaced = _telecine_by_time( 71 | clip, 72 | upsampled, 73 | fps=Fraction(fpsnum, fpsden), 74 | tff=tff, 75 | pre_subsample_fields=pre_subsample_fields, 76 | interlace_progressive_chroma=interlace_progressive_chroma, 77 | subsampling_kernel=subsampling_kernel, 78 | dither_type=dither_type 79 | ) 80 | else: 81 | raise ValueError('Either pulldown_pattern or fpsnum is required.') 82 | 83 | return interlaced 84 | 85 | 86 | def weave_fields( 87 | clip: VideoNode 88 | ) -> VideoNode: 89 | """Creates an interlaced clip from an interleaved field frames clip, such 90 | as one created with core.std.SeparateFields() 91 | """ 92 | return clip.std.DoubleWeave()[::2] 93 | 94 | 95 | def _pulldown_pattern_to_field_offsets( 96 | pattern: Sequence[int] 97 | ) -> Sequence[int]: 98 | offsets_pattern = [] 99 | lapsed_duration = 0 100 | field_idx = 0 101 | for frame_duration in pattern: 102 | field_offsets = cycle((field_idx, field_idx + 1)) 103 | pulled_down_field_offsets = islice( 104 | field_offsets, 105 | lapsed_duration, 106 | lapsed_duration + frame_duration 107 | ) 108 | offsets_pattern.extend(pulled_down_field_offsets) 109 | lapsed_duration += frame_duration 110 | field_idx += 2 111 | return offsets_pattern 112 | 113 | 114 | def _telecine_by_pattern( 115 | clip: VideoNode, 116 | upsampled_clip: VideoNode, 117 | pulldown_pattern: Union[str, PulldownPattern], 118 | pre_subsample_fields: bool, 119 | subsampling_kernel: Resizer, 120 | dither_type: str, 121 | interlace_progressive_chroma: bool, 122 | tff: bool 123 | ) -> VideoNode: 124 | if isinstance(pulldown_pattern, PulldownPattern): 125 | pulldown_pattern = pulldown_pattern.value 126 | pattern_parts = [ 127 | int(field_duration) 128 | for field_duration 129 | in pulldown_pattern.split(':') 130 | ] 131 | pattern_duration = sum(pattern_parts) 132 | if pattern_duration % 2 != 0: 133 | # Abbreviated pattern. 134 | # Run twice, so we don't end on half a frame. 135 | pattern_parts *= 2 136 | orig_cycle_size = len(pattern_parts) 137 | offsets_pattern = _pulldown_pattern_to_field_offsets(pattern_parts) 138 | 139 | as_fields = upsampled_clip.std.SeparateFields(tff=tff) 140 | pulled_down_fields = as_fields.std.SelectEvery( 141 | cycle=orig_cycle_size * 2, 142 | offsets=offsets_pattern 143 | ) 144 | interlaced = weave_fields(pulled_down_fields) 145 | 146 | # Resample our upsampled fields if required: 147 | if pre_subsample_fields: 148 | interlaced = interlaced.resize.Point(format=clip.format) 149 | else: 150 | interlaced = convert_format_if_needed( 151 | interlaced, 152 | format=clip.format, 153 | kernel=subsampling_kernel, 154 | dither_type=dither_type 155 | ) 156 | 157 | if not interlace_progressive_chroma: 158 | # Restore original progressive frames but with interlaced metadata 159 | # First create a map of clean frames in new cycle pointing to 160 | # original frames in source cycle 161 | clean_frame_sources = {} 162 | for field_idx in range(0, len(offsets_pattern), 2): 163 | field_offset_1, field_offset_2 = ( 164 | offsets_pattern[field_idx:field_idx + 2] 165 | ) 166 | if ( 167 | (field_offset_2 == field_offset_1 + 1) 168 | and field_offset_1 % 2 == 0 169 | ): 170 | clean_frame_sources[field_idx // 2] = ( 171 | field_offset_1 // 2 172 | ) 173 | frame_cycle_size = len(offsets_pattern) // 2 174 | 175 | def restore_original_frames(n: int, f: VideoFrame): 176 | cycle_idx = floor(n // frame_cycle_size) 177 | frame_idx_in_cycle = n % frame_cycle_size 178 | if frame_idx_in_cycle in clean_frame_sources: 179 | orig_frame_offset = clean_frame_sources[frame_idx_in_cycle] 180 | orig_frame_num = ( 181 | (cycle_idx * orig_cycle_size) 182 | + orig_frame_offset 183 | ) 184 | orig_frame = clip.get_frame(orig_frame_num) 185 | fake_interlaced_frame = orig_frame.copy() 186 | fake_interlaced_frame.props = dict(f.props) 187 | return fake_interlaced_frame 188 | return f 189 | 190 | interlaced = interlaced.std.ModifyFrame( 191 | clips=(interlaced,), 192 | selector=restore_original_frames 193 | ) 194 | return interlaced 195 | 196 | 197 | def _telecine_by_time( 198 | clip: VideoNode, 199 | upsampled_clip: VideoNode, 200 | fps: Fraction, 201 | tff: bool, 202 | interlace_progressive_chroma: bool, 203 | pre_subsample_fields: bool, 204 | subsampling_kernel: Resizer, 205 | dither_type: str, 206 | ) -> VideoNode: 207 | original_length = len(clip) 208 | original_fps = clip.fps 209 | original_duration = Fraction(original_length, clip.fps) 210 | original_fields = upsampled_clip.std.SeparateFields(tff=tff) 211 | new_length = ceil(fps * original_duration) 212 | new_clip = upsampled_clip.std.BlankClip( 213 | length=new_length, 214 | fpsnum=fps.numerator, 215 | fpsden=fps.denominator 216 | ) 217 | new_fields = new_clip.std.SeparateFields(tff=tff) 218 | new_field_rate = new_fields.fps 219 | 220 | def select_original_frame_field(n: int, f: VideoFrame): 221 | time_at_new_start = Fraction(n, new_field_rate) 222 | orig_frame_num = floor(time_at_new_start * original_fps) 223 | orig_field_num = (orig_frame_num * 2) + (n % 2) 224 | orig_field_frame = original_fields.get_frame(orig_field_num) 225 | 226 | new_field_frame = orig_field_frame.copy() 227 | new_field_frame.props['_DurationNum'] = f.props['_DurationNum'] 228 | new_field_frame.props['_DurationDen'] = f.props['_DurationDen'] 229 | return new_field_frame 230 | 231 | new_fields = new_fields.std.ModifyFrame( 232 | selector=select_original_frame_field, 233 | clips=(new_fields,) 234 | ) 235 | 236 | interlaced = weave_fields(new_fields) 237 | interlaced = convert_format_if_needed( 238 | interlaced, 239 | format=clip.format, 240 | kernel=subsampling_kernel 241 | ) 242 | 243 | # Resample our upsampled fields if required: 244 | if pre_subsample_fields: 245 | interlaced = interlaced.resize.Point(format=clip.format) 246 | else: 247 | interlaced = convert_format_if_needed( 248 | interlaced, 249 | format=clip.format, 250 | kernel=subsampling_kernel, 251 | dither_type=dither_type 252 | ) 253 | 254 | if not interlace_progressive_chroma: 255 | def restore_original_frames(n: int, f: VideoFrame): 256 | time_at_1st_field = Fraction(n, fps) 257 | time_at_2nd_field = time_at_1st_field + Fraction(1, new_field_rate) 258 | orig_1st_field_frame_n = floor(time_at_1st_field * original_fps) 259 | orig_2nd_field_frame_n = floor(time_at_2nd_field * original_fps) 260 | if orig_1st_field_frame_n == orig_2nd_field_frame_n: 261 | orig_frame: VideoFrame = clip.get_frame(orig_1st_field_frame_n) 262 | fake_interlaced_frame = orig_frame.copy() 263 | fake_interlaced_frame.props = dict(f.props) 264 | return fake_interlaced_frame 265 | return f 266 | 267 | interlaced = interlaced.std.ModifyFrame( 268 | clips=(interlaced,), 269 | selector=restore_original_frames 270 | ) 271 | 272 | return interlaced 273 | -------------------------------------------------------------------------------- /vsfieldkit/kernels.py: -------------------------------------------------------------------------------- 1 | from typing import Callable, Optional, Union 2 | 3 | from vapoursynth import ColorFamily, Error, VideoFormat, VideoNode, core 4 | 5 | try: 6 | from vapoursynth import PresetVideoFormat 7 | except ImportError: 8 | from vapoursynth import PresetFormat as PresetVideoFormat 9 | 10 | from vsfieldkit.types import Resizer 11 | from vsfieldkit.util import (annotate_bobbed_fields, convert_format_if_needed, 12 | format_from_specifier, require_one_of, 13 | shift_chroma_to_luma_sited) 14 | 15 | resize = core.resize 16 | resample_nearest_neighbor = resize.Point 17 | 18 | 19 | def prepare_nnedi3_chroma_upsampler( 20 | fallback_kernel: Resizer = core.resize.Spline36, 21 | nnedi3_func: Optional[Callable] = None, 22 | nsize: Optional[int] = None, 23 | nns: Optional[int] = None, 24 | qual: Optional[int] = None, 25 | etype: Optional[int] = None, 26 | pscrn: Optional[int] = None, 27 | opt: Optional[bool] = None, 28 | int16_prescreener: Optional[bool] = None, 29 | int16_predictor: Optional[bool] = None, 30 | exp: Optional[int] = None, 31 | show_mask: Optional[bool] = None, 32 | opencl_device: Optional[int] = None 33 | ) -> Resizer: 34 | """Creates a resampling function that uses the nnedi3 interpolation model 35 | originally made for deinterlacing to produce a clip without vertical chroma 36 | subsampling. The resampling function will use the given nnedi3 parameters. 37 | 38 | This resampling function will act like a typical VapourSynth 39 | resize kernel, but will only work for format changes in vertical 40 | subsampling, going from Y′CbCr 4:2:0 to Y′CbCr 4:2:2 or Y′CbCr 4:4:0 to 41 | Y′CbCr 4:4:4. Attempting to make any other light/color/sampling changes 42 | will result in an error. 43 | 44 | This can use the znedi3 (CPU), nnedi3 (CPU), or nnedi3cl (GPU) plugin. 45 | It'll look for those plugins in that order unless nnedi3_func or 46 | opencl_device is supplied. 47 | """ 48 | require_one_of( 49 | ('znedi3', 'znedi3'), 50 | ('nnedi3', 'nnedi3'), 51 | ('nnedi3cl', 'nnedi3cl') 52 | ) 53 | if nnedi3_func: 54 | # Assume the user knows what the function allows if they passed it 55 | extra_nnedi3_args = { 56 | 'int16_prescreener': int16_prescreener, 57 | 'int16_predictor': int16_predictor, 58 | 'device': opencl_device 59 | } 60 | elif opencl_device: 61 | if not nnedi3_func: 62 | nnedi3_func = core.nnedi3cl.NNEDI3CL 63 | extra_nnedi3_args = { 64 | 'device': opencl_device, 65 | } 66 | elif hasattr(core, 'znedi3'): 67 | nnedi3_func = core.znedi3.nnedi3 68 | extra_nnedi3_args = { 69 | 'int16_prescreener': int16_prescreener, 70 | 'int16_predictor': int16_predictor 71 | } 72 | elif hasattr(core, 'nnedi3'): 73 | nnedi3_func = core.nnedi3.nnedi3 74 | extra_nnedi3_args = { 75 | 'int16_prescreener': int16_prescreener, 76 | 'int16_predictor': int16_predictor 77 | } 78 | else: 79 | nnedi3_func = core.nnedi3cl.NNEDI3CL 80 | 81 | def upsample_chroma_using_nnedi3( 82 | clip: VideoNode, 83 | format: Union[VideoFormat, PresetVideoFormat] = None, 84 | *resize_args, 85 | **resize_kwargs 86 | ) -> VideoNode: 87 | """Given a clip with half-size (subsampled) vertical chroma, fills in 88 | missing vertical detail using the nnedi3 interpolation model originally 89 | made for deinterlacing to produce a clip without vertical chroma 90 | subsampling. 91 | """ 92 | target_format = format_from_specifier(format) 93 | # Process any non-vertical-upsampling resampling first: 94 | clip = convert_format_if_needed( 95 | clip, 96 | format=target_format.replace( 97 | subsampling_h=clip.format.subsampling_h 98 | ), 99 | kernel=fallback_kernel, 100 | **resize_kwargs 101 | ) 102 | if ( 103 | clip.format.subsampling_h != 1 104 | or format is None 105 | or target_format.subsampling_h != 0 106 | ): 107 | raise Error( 108 | 'vsfieldkit nnedi3 upsamplers are currently only for format ' 109 | 'conversion from Y′CbCr 4:2:0 to Y′CbCr 4:2:2 or Y′CbCr 4:4:0 ' 110 | 'to Y′CbCr 4:4:4.' 111 | ) 112 | 113 | y, cb, cr = clip.std.SplitPlanes() 114 | # We're using TFF (field=3). It doesn't really matter what order we bob 115 | # in, as long as we're consistent when we annotate for re-weaving. 116 | bobbed_cb = nnedi3_func(cb, field=3, nsize=nsize, nns=nns, qual=qual, 117 | etype=etype, pscrn=pscrn, opt=opt, exp=exp, 118 | show_mask=show_mask, **extra_nnedi3_args) 119 | bobbed_cr = nnedi3_func(cr, field=3, nsize=nsize, nns=nns, qual=qual, 120 | etype=etype, pscrn=pscrn, opt=opt, exp=exp, 121 | show_mask=show_mask, **extra_nnedi3_args) 122 | # These are effectively bobbed. 123 | # Treat the bobs as if they were plain separated fields 124 | bobbed_cb = annotate_bobbed_fields( 125 | bobbed_cb, 126 | original_clip=cb, 127 | tff=True, 128 | prop='_Field' 129 | ) 130 | bobbed_cr = annotate_bobbed_fields( 131 | bobbed_cr, 132 | original_clip=cr, 133 | tff=True, 134 | prop='_Field' 135 | ) 136 | reinterlaced_cb = core.std.DoubleWeave(bobbed_cb)[::2] 137 | reinterlaced_cr = core.std.DoubleWeave(bobbed_cr)[::2] 138 | 139 | upsampled = core.std.ShufflePlanes( 140 | clips=(y, reinterlaced_cb, reinterlaced_cr), 141 | planes=(0, 0, 0), 142 | colorfamily=ColorFamily.YUV 143 | ) 144 | # Any downstream operations will consider the chroma loocation 145 | # to be vertically co-sited with luma samples now that we're 4:2:2, 146 | # so resample relative to luma site if we started from vertically 147 | # centered chroma siting. 148 | chromaloc_corrected = shift_chroma_to_luma_sited( 149 | upsampled, 150 | tff=True, 151 | kernel=fallback_kernel, 152 | dither_type=resize_kwargs.get('dither_type') 153 | ) 154 | return chromaloc_corrected 155 | 156 | upsample_chroma_using_nnedi3.supports_resizing = False 157 | return upsample_chroma_using_nnedi3 158 | 159 | 160 | def _prepare_chroma_only_resampler(resampler: Resizer) -> Resizer: 161 | resampler_name = resampler.name.lower() 162 | def chroma_only_resampler(*resize_args, **resize_kwargs) -> VideoNode: 163 | if 'width' in resize_kwargs or 'height' in resize_kwargs: 164 | # Not a simple format change. 165 | return resampler(*resize_args, **resize_kwargs) 166 | return resample_nearest_neighbor( 167 | *resize_args, 168 | **resize_kwargs, 169 | resample_filter_uv=resampler_name 170 | ) 171 | 172 | # If VapourSynth's out-of-the-box annotations improve: 173 | # try: 174 | # annotations = getattr(resample_nearest_neighbor, '__annotations__') 175 | # except AttributeError: 176 | # pass 177 | # else: 178 | # setattr(chroma_only_resampler, '__annotations__', annotations) 179 | 180 | chroma_only_resampler.__name__ = f'resample_chroma_with_{resampler_name}' 181 | chroma_only_resampler.__qualname__ = ( 182 | f'resample_chroma_with_{resampler_name}' 183 | ) 184 | chroma_only_resampler.__doc__ = ( 185 | f'Assumes that the clip is Y′CbCr and that only the Cb and Cr planes ' 186 | f'are being resized. The Cb and Cr planes will be resampled with ' 187 | f'{resampler_name}. The Y′ plane will be resampled with the nearest ' 188 | f'neighbour (point) method to ensure unaltered passthrough.' 189 | ) 190 | 191 | return chroma_only_resampler 192 | 193 | 194 | resample_chroma_with_bicubic = _prepare_chroma_only_resampler(resize.Bicubic) 195 | resample_chroma_with_bilinear = _prepare_chroma_only_resampler(resize.Bilinear) 196 | resample_chroma_with_lanczos = _prepare_chroma_only_resampler(resize.Lanczos) 197 | resample_chroma_with_spline16 = _prepare_chroma_only_resampler(resize.Spline16) 198 | resample_chroma_with_spline36 = _prepare_chroma_only_resampler(resize.Spline36) 199 | resample_chroma_with_spline64 = _prepare_chroma_only_resampler(resize.Spline64) 200 | -------------------------------------------------------------------------------- /vsfieldkit/output.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from typing import IO, Callable, Mapping, Optional 3 | 4 | from vapoursynth import (ChromaLocation, ColorFamily, ColorRange, FieldBased, 5 | SampleType, VideoFormat, VideoNode) 6 | 7 | Y4M_FLOAT_DEPTH_CODES = { 8 | 16: 'h', 9 | 32: 's', 10 | 64: 'd' 11 | } 12 | 13 | Y4M_YCBCR_SUBSAMPLING_CODES = { 14 | (1, 1): '420', 15 | (1, 0): '422', 16 | (0, 0): '444', 17 | (2, 2): '410', 18 | (2, 0): '411', 19 | (0, 1): '440' 20 | } 21 | 22 | Y4M_CHROMA_SITING_CODES = { 23 | ChromaLocation.CHROMA_CENTER: 'jpeg', 24 | ChromaLocation.CHROMA_LEFT: 'mpeg2', 25 | ChromaLocation.CHROMA_TOP_LEFT: 'paldv', 26 | } 27 | 28 | Y4M_RANGE_CODES = { 29 | ColorRange.RANGE_LIMITED: 'LIMITED', 30 | ColorRange.RANGE_FULL: 'FULL', 31 | } 32 | 33 | 34 | def output_frame_inferred_y4m( 35 | clip: VideoNode, 36 | fileobj: IO, 37 | progress_update: Optional[Callable] = None, 38 | prefetch: int = 0, 39 | backlog: int = -1 40 | ) -> None: 41 | """Similar to VideNode.output, writes raw video data to the given file 42 | object, decorated with yuv4mpeg2 headers based on the clip and the first 43 | frame's properties.""" 44 | if ( 45 | (fileobj is sys.stdout or fileobj is sys.stderr) 46 | and hasattr(fileobj, 'buffer') 47 | ): 48 | write = fileobj.buffer.write 49 | else: 50 | write = fileobj.write 51 | y4m_header = yuv4mpeg2_header(clip) 52 | write(y4m_header) 53 | write(b'\n') 54 | if progress_update: 55 | def y4m_progress_update(done: int, total: int) -> None: 56 | progress_update(done, total) 57 | if done != total: 58 | write(b'FRAME\n') 59 | else: 60 | def y4m_progress_update(done: int, total: int) -> None: 61 | if done != total: 62 | write(b'FRAME\n') 63 | return clip.output( 64 | fileobj, 65 | progress_update=y4m_progress_update, 66 | prefetch=prefetch, 67 | backlog=backlog 68 | ) 69 | 70 | 71 | def yuv4mpeg2_header(clip: VideoNode, infer_from_first_frame=True) -> bytes: 72 | """Produces a YUV4MPEG2 header for the video clip. Unlike vspipe's 73 | out-of-the-box Y4M header, this one infers full details from the first 74 | frame of the clip, not just the clip's dimensions. 75 | """ 76 | # Defaults that can be overridden by frame metadata: 77 | interlacing = '?' 78 | sar = '0:0' 79 | color_range_code = None 80 | if infer_from_first_frame: 81 | first_frame_props = clip.get_frame(0).props 82 | interlacing = { 83 | FieldBased.FIELD_PROGRESSIVE: 'p', 84 | FieldBased.FIELD_TOP: 't', 85 | FieldBased.FIELD_BOTTOM: 'b' 86 | }.get(first_frame_props.get('_FieldBased'), '?') 87 | if '_SARNum' in first_frame_props: 88 | sar = ( 89 | f'{first_frame_props["_SARNum"]}' 90 | f':{first_frame_props.get("_SARDen", 1)}' 91 | ) 92 | if '_ColorRange' in first_frame_props: 93 | color_range_code = Y4M_RANGE_CODES[first_frame_props["_ColorRange"]] 94 | chroma_format = _yuv4mpeg2_chroma_string(clip, first_frame_props) 95 | else: 96 | chroma_format = _yuv4mpeg2_chroma_string(clip) 97 | 98 | y4m_header = ( 99 | f'YUV4MPEG2 ' 100 | f'C{chroma_format} ' 101 | f'W{clip.width} ' 102 | f'H{clip.height} ' 103 | f'F{clip.fps_num}:{clip.fps_den} ' 104 | f'I{interlacing} ' 105 | f'A{sar} ' 106 | f'XLENGTH={len(clip)}' 107 | ) 108 | 109 | if color_range_code: 110 | y4m_header += f' XCOLORRANGE={color_range_code}' 111 | 112 | return y4m_header.encode('ascii') 113 | 114 | 115 | def _yuv4mpeg2_chroma_string( 116 | clip: VideoNode, 117 | props: Optional[Mapping] = None 118 | ) -> str: 119 | fmt: VideoFormat = clip.format 120 | if fmt.color_family == ColorFamily.GRAY: 121 | return f'mono{fmt.bits_per_sample if fmt.bits_per_sample > 8 else ""}' 122 | elif fmt.color_family == ColorFamily.YUV: 123 | subsampling = Y4M_YCBCR_SUBSAMPLING_CODES.get( 124 | (fmt.subsampling_w, fmt.subsampling_h) 125 | ) 126 | if not subsampling: 127 | raise ValueError(f'No matching Y4M colorspace for {fmt}.') 128 | if fmt.sample_type == SampleType.INTEGER: 129 | if fmt.bits_per_sample > 8: 130 | return f'{subsampling}p{fmt.bits_per_sample}' 131 | else: 132 | if props and subsampling == '420': 133 | colorspace = Y4M_CHROMA_SITING_CODES.get( 134 | props.get('_ChromaLocation', None), 135 | '' 136 | ) 137 | return f'{subsampling}{colorspace}' 138 | else: 139 | return subsampling 140 | elif fmt.sample_type == SampleType.FLOAT: 141 | return ( 142 | f'{subsampling}p{Y4M_FLOAT_DEPTH_CODES[fmt.bits_per_sample]}' 143 | ) 144 | else: 145 | raise ValueError('Unknown sample type.') 146 | else: 147 | raise ValueError(f'{fmt.color_family} color family incompatible' 148 | f'with Y4M') 149 | -------------------------------------------------------------------------------- /vsfieldkit/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JustinTArthur/vsfieldkit/a539cdf16c5bbcac75ded63aa74a0e64a0b0c033/vsfieldkit/py.typed -------------------------------------------------------------------------------- /vsfieldkit/repair.py: -------------------------------------------------------------------------------- 1 | from math import ceil 2 | from typing import Callable, Optional, Sequence, Tuple, Union 3 | 4 | from vapoursynth import ColorFamily, Error, FieldBased, VideoNode, core 5 | 6 | from vsfieldkit.interlacing import weave_fields 7 | from vsfieldkit.types import FormatSpecifier 8 | from vsfieldkit.util import (format_from_specifier, require_one_of, 9 | require_plugins) 10 | from vsfieldkit.vapoursynth import VS_FIELD_FROM_BOTTOM, VS_FIELD_FROM_TOP 11 | 12 | FULL_ANALOG_DISPLAY_LINES = frozenset((486, 576)) 13 | 14 | 15 | def fill_analog_frame_ends( 16 | clip: VideoNode, 17 | top_blank_width: Optional[int] = None, 18 | bottom_blank_width: Optional[int] = None, 19 | continuity_radius: Union[int, Sequence[int]] = (5,), 20 | luma_splash_radius: int = 1, 21 | original_format: Optional[FormatSpecifier] = None, 22 | restore_blank_detail=False, 23 | prefill_mode='fillmargins' 24 | ) -> VideoNode: 25 | """Fills the beginning and end half-lines from frames digitized for or 26 | from PAL/NTSC signal. It aims to interpolate only the missing data, leaving 27 | existing pixels in-tact. 28 | 29 | These lines are often half-blanked so that the CRT electron beam won't 30 | light up phosphors as it zig-zags from the bottom of screen to the top to 31 | start painting the next frame.""" 32 | require_plugins(('fb', 'fillborders')) 33 | require_one_of(('cf', 'ContinuityFixer'), ('edgefixer', 'EdgeFixer')) 34 | 35 | if top_blank_width is None: 36 | top_blank_width = ceil(clip.width * 0.65) 37 | if bottom_blank_width is None: 38 | bottom_blank_width = ceil(clip.width * 0.65) 39 | 40 | if original_format is None: 41 | original_format = clip.format 42 | else: 43 | original_format = format_from_specifier(original_format) 44 | 45 | color_family = clip.format.color_family 46 | num_planes = clip.format.num_planes 47 | chroma_height = 2 ** clip.format.subsampling_h 48 | original_chroma_height = 2 ** original_format.subsampling_h 49 | orig_color_sample_equiv = max(1, original_chroma_height // chroma_height) 50 | 51 | luma_damage_radius = 1 + luma_splash_radius 52 | if color_family == ColorFamily.YUV: 53 | fill_sizes = ( 54 | 1, 55 | orig_color_sample_equiv, 56 | orig_color_sample_equiv 57 | ) 58 | continue_sizes = ( 59 | luma_damage_radius, 60 | orig_color_sample_equiv, 61 | orig_color_sample_equiv 62 | ) 63 | elif color_family == ColorFamily.GRAY: 64 | # Assume no color info persisted. Skip supersampling. 65 | fill_sizes = (1,) * num_planes 66 | continue_sizes = (luma_damage_radius,) * num_planes 67 | else: 68 | # Assume every plane has luma AND color info. 69 | fill_sizes = (orig_color_sample_equiv,) * num_planes 70 | continue_sizes = ( 71 | max(luma_damage_radius, orig_color_sample_equiv), 72 | ) * num_planes 73 | 74 | if isinstance(continuity_radius, int): 75 | continuity_radius = (continuity_radius,) 76 | # Expand radius across implied planes: 77 | continuity_radius = ( 78 | continuity_radius 79 | + (continuity_radius[-1:] * (num_planes - len(continuity_radius))) 80 | ) 81 | 82 | if hasattr(core, 'edgefixer'): 83 | continue_func = _continue_edge_with_edgefixer 84 | else: 85 | continue_func = core.cf.ContinuityFixer 86 | 87 | # Separate fields if chroma subsampling allows. Default tff doesn't matter 88 | # as we don't care about order, only position. 89 | if (clip.height // 2) % chroma_height == 0: 90 | # Current crop allows us to process as interlaced in case interlaced 91 | # frames are encountered. 92 | fields = clip.std.SeparateFields(tff=True) 93 | fields_top_edge, fields_bottom_edge = _repaired_frame_edges( 94 | fields, 95 | top_blank_width=top_blank_width, 96 | bottom_blank_width=bottom_blank_width, 97 | fill_sizes=fill_sizes, 98 | continue_sizes=continue_sizes, 99 | continuity_radius=continuity_radius, 100 | color_sample_height=orig_color_sample_equiv, 101 | restore_blank_detail=restore_blank_detail, 102 | continue_func=continue_func, 103 | prefill_mode=prefill_mode 104 | ) 105 | if top_blank_width: 106 | fields_repaired_top = core.std.StackVertical(( 107 | fields_top_edge, 108 | fields.std.Crop(top=fields_top_edge.height) 109 | )) 110 | else: 111 | fields_repaired_top = fields 112 | if bottom_blank_width: 113 | fields_repaired_bottom = core.std.StackVertical(( 114 | fields.std.Crop(bottom=fields_bottom_edge.height), 115 | fields_bottom_edge 116 | )) 117 | else: 118 | fields_repaired_bottom = fields 119 | replacement_by_position = { 120 | VS_FIELD_FROM_TOP: fields_repaired_top, 121 | VS_FIELD_FROM_BOTTOM: fields_repaired_bottom, 122 | } 123 | 124 | def repair_field_frame(n, f): 125 | return replacement_by_position[f.props._Field] 126 | 127 | repaired_fields = fields.std.FrameEval( 128 | repair_field_frame, 129 | prop_src=(fields,), 130 | clip_src=tuple(replacement_by_position.values()) 131 | ) 132 | # Re-interlace fields 133 | interlaced_repaired = weave_fields(repaired_fields) 134 | # Copy original properties in case we overwrote field-related flags 135 | interlaced_repaired = interlaced_repaired.std.CopyFrameProps(clip) 136 | else: 137 | # Current crop doesn't allow processing as interlaced due to chroma 138 | # sub-sampling. 139 | interlaced_repaired = None 140 | progressive_top_edge, progressive_bottom_edge = _repaired_frame_edges( 141 | clip, 142 | top_blank_width=top_blank_width, 143 | bottom_blank_width=bottom_blank_width, 144 | fill_sizes=fill_sizes, 145 | continue_sizes=continue_sizes, 146 | continuity_radius=continuity_radius, 147 | color_sample_height=orig_color_sample_equiv, 148 | restore_blank_detail=restore_blank_detail, 149 | continue_func=continue_func, 150 | prefill_mode=prefill_mode 151 | ) 152 | progressive_repaired = clip 153 | if top_blank_width: 154 | progressive_repaired = core.std.StackVertical(( 155 | progressive_top_edge, 156 | progressive_repaired.std.Crop( 157 | top=progressive_top_edge.height 158 | ) 159 | )) 160 | if bottom_blank_width: 161 | progressive_repaired = core.std.StackVertical(( 162 | progressive_repaired.std.Crop( 163 | bottom=progressive_bottom_edge.height 164 | ), 165 | progressive_bottom_edge 166 | )) 167 | 168 | def repair_frame(n, f): 169 | _field_based = f.props.get('_FieldBased') 170 | if _field_based in (FieldBased.FIELD_TOP, FieldBased.FIELD_BOTTOM): 171 | if interlaced_repaired is None: 172 | raise Error( 173 | "Can't repair interlaced frames when height not aligned " 174 | "with chroma subsamples." 175 | ) 176 | return interlaced_repaired 177 | else: 178 | return progressive_repaired 179 | 180 | if interlaced_repaired: 181 | repair_sources = (interlaced_repaired, progressive_repaired) 182 | else: 183 | repair_sources = (progressive_repaired,) 184 | 185 | repaired_frames = clip.std.FrameEval( 186 | repair_frame, 187 | prop_src=(clip,), 188 | clip_src=repair_sources 189 | ) 190 | 191 | return repaired_frames 192 | 193 | 194 | def _repaired_frame_edges( 195 | clip: VideoNode, 196 | top_blank_width: int, 197 | bottom_blank_width: int, 198 | continue_sizes: Sequence[int], 199 | fill_sizes: Sequence[int], 200 | continuity_radius: Sequence[int], 201 | color_sample_height: int, 202 | restore_blank_detail: bool, 203 | continue_func: Callable, 204 | prefill_mode: str 205 | ) -> Tuple[VideoNode, VideoNode]: 206 | """Returns repaired top and bottom edges with repairs. Can be fed 207 | individual field frames or deinterlaced frames. Will be used 208 | by the caller to reconstruct a full field or frame with the repaired edges. 209 | """ 210 | color_family = clip.format.color_family 211 | num_planes = clip.format.num_planes 212 | 213 | # Repeated data provides saner input to cf's least-squares regression due 214 | # to a horizontal fade often present on the edge. 215 | field_planes = clip.std.SplitPlanes() 216 | 217 | try: 218 | filled_top_planes = [ 219 | plane.fb.FillBorders(top=fill_radius, mode=prefill_mode) 220 | for plane, fill_radius in zip(field_planes, fill_sizes) 221 | ] 222 | filled_bottom_planes = [ 223 | plane.fb.FillBorders(bottom=fill_radius, mode=prefill_mode) 224 | for plane, fill_radius in zip(field_planes, fill_sizes) 225 | ] 226 | except Error as e: 227 | if str(e).startswith('FillBorders: Invalid mode.'): 228 | raise Error( 229 | f'Pre-fill mode "{prefill_mode}" not supported by this ' 230 | f'version of the fillborders plugin. Consider passing ' 231 | f'prefill_mode="fillmargins" to fill_analog_frame_ends or ' 232 | f'upgrading the fillborders (fb) plugin.' 233 | ) from e 234 | else: 235 | raise 236 | 237 | filled_top = core.std.ShufflePlanes( 238 | clips=filled_top_planes, # filled_primary_tops + filled_chroma_tops, 239 | planes=[0 for _plane in range(num_planes)], 240 | colorfamily=color_family 241 | ) 242 | filled_bottom = core.std.ShufflePlanes( 243 | clips=filled_bottom_planes, 244 | planes=[0 for _plane in range(num_planes)], 245 | colorfamily=color_family 246 | ) 247 | top_interpolated = continue_func( 248 | filled_top, 249 | top=continue_sizes, 250 | radius=continuity_radius 251 | ) 252 | bottom_interpolated = continue_func( 253 | filled_bottom, 254 | bottom=continue_sizes, 255 | radius=continuity_radius 256 | ) 257 | if restore_blank_detail: 258 | # Merge continuity without a prefill on top of the prefill continuity 259 | top_interpolated = top_interpolated.std.Merge( 260 | continue_func( 261 | clip, 262 | top=continue_sizes, 263 | radius=continuity_radius 264 | ) 265 | ) 266 | bottom_interpolated = bottom_interpolated.std.Merge( 267 | continue_func( 268 | clip, 269 | bottom=continue_sizes, 270 | radius=continuity_radius 271 | ) 272 | ) 273 | 274 | # Only bring out the portions that actually needed repair: 275 | chroma_height_pixels = 2 ** clip.format.subsampling_h 276 | orig_chroma_height_pixels = color_sample_height * chroma_height_pixels 277 | repair_height = max(orig_chroma_height_pixels, continue_sizes[0]) 278 | # Round to nearest chroma subsample size: 279 | repair_height = ( 280 | (repair_height + (chroma_height_pixels - 1)) 281 | & ~(chroma_height_pixels - 1) 282 | ) 283 | if top_blank_width: 284 | orig_top_right = clip.std.Crop( 285 | left=top_blank_width, 286 | bottom=clip.height - repair_height 287 | ) 288 | repaired_top_left = top_interpolated.std.Crop( 289 | right=top_interpolated.width - top_blank_width, 290 | bottom=top_interpolated.height - repair_height 291 | ) 292 | repaired_top_edge = core.std.StackHorizontal( 293 | (repaired_top_left, orig_top_right) 294 | ) 295 | else: 296 | repaired_top_edge = clip.std.Crop( 297 | bottom=clip.height - repair_height 298 | ) 299 | if bottom_blank_width: 300 | orig_bottom_left = clip.std.Crop( 301 | right=bottom_blank_width, 302 | top=clip.height - repair_height 303 | ) 304 | repaired_bottom_right = bottom_interpolated.std.Crop( 305 | left=bottom_interpolated.width - bottom_blank_width, 306 | top=bottom_interpolated.height - repair_height 307 | ) 308 | repaired_bottom_edge = core.std.StackHorizontal( 309 | (orig_bottom_left, repaired_bottom_right) 310 | ) 311 | else: 312 | repaired_bottom_edge = clip.std.Crop( 313 | top=clip.height - repair_height 314 | ) 315 | return repaired_top_edge, repaired_bottom_edge 316 | 317 | 318 | def _continue_edge_with_edgefixer( 319 | clip: VideoNode, 320 | left: Sequence[int] = (0,), 321 | top: Sequence[int] = (0,), 322 | right: Sequence[int] = (0,), 323 | bottom: Sequence[int] = (0,), 324 | radius: Sequence[int] = (0,) 325 | ) -> VideoNode: 326 | num_planes = clip.format.num_planes 327 | left = left + (left[-1:] * (num_planes - len(left))) 328 | right = right + (right[-1:] * (num_planes - len(right))) 329 | top = top + (top[-1:] * (num_planes - len(top))) 330 | bottom = bottom + (bottom[-1:] * (num_planes - len(bottom))) 331 | radius = radius + (radius[-1:] * (num_planes - len(radius))) 332 | 333 | planes = clip.std.SplitPlanes() 334 | fixed_planes = [] 335 | for n, plane in enumerate(planes): 336 | if any((left[n], top[n], right[n], bottom[n], radius[n])): 337 | fixed_plane = plane.edgefixer.Continuity(left[n], top[n], right[n], 338 | bottom[n], radius[n]) 339 | else: 340 | fixed_plane = plane 341 | fixed_planes.append(fixed_plane) 342 | 343 | return core.std.ShufflePlanes( 344 | fixed_planes, 345 | planes=(0,) * num_planes, 346 | colorfamily=clip.format.color_family 347 | ) 348 | -------------------------------------------------------------------------------- /vsfieldkit/scanning.py: -------------------------------------------------------------------------------- 1 | from collections.abc import Mapping, Sequence 2 | from typing import Callable, Optional 3 | 4 | from vapoursynth import ColorFamily, SampleType, VideoNode, core 5 | 6 | from vsfieldkit.types import (ChromaSubsampleScanning, Factor, 7 | InterlacedScanPostProcessor, Resizer) 8 | from vsfieldkit.util import (assume_progressive, black_clip_from_clip, 9 | brighten, convert_format_if_needed) 10 | 11 | post_processing_routines: Mapping[InterlacedScanPostProcessor, Callable] 12 | 13 | 14 | def scan_interlaced( 15 | clip: VideoNode, 16 | warmup_clip: Optional[VideoNode] = None, 17 | tff: Optional[bool] = None, 18 | chroma_subsample_scanning: ChromaSubsampleScanning = ( 19 | ChromaSubsampleScanning.SCAN_LATEST 20 | ), 21 | dither_type: str = 'random', 22 | attack_factor: Optional[Factor] = None, 23 | decay_base: Optional[VideoNode] = None, 24 | decay_factor: Optional[Factor] = None, 25 | post_processing: Sequence[InterlacedScanPostProcessor] = (), 26 | post_processing_blend_kernel: Resizer = core.resize.Spline36, 27 | ) -> VideoNode: 28 | """ 29 | Returns a new clip where interlaced fields from the original clip are 30 | painted onto each frame in their correct position moment-by-moment like an 31 | interlaced scan display would. This is sometimes referred to as phosphor 32 | deinterlacing. Like bob deinterlacing, it doubles the amount of frames 33 | (and frame rate accordingly) produced to portray the moments represented in 34 | the interlaced footage.""" 35 | # TFF (w is warmup frame) 36 | # Top field source frame: 1 1 2 2 3 3 4 4 5 5 37 | # Bot field source frame: w 1 1 2 2 3 3 4 4 5 38 | # Desired Result: 1a+wb 1a+1b 2a+1b 2a+2b 3a+2b 39 | 40 | # BFF 41 | # Top field source frame: w 1 1 2 2 3 3 4 4 5 42 | # Bot field source frame: 1 1 2 2 3 3 4 4 5 5 43 | # Desired Result: wa+1b 1a+1b 2a+1b 2a+2b 3a+2b 44 | 45 | if not warmup_clip: 46 | warmup_clip = black_clip_from_clip(clip, length=1) 47 | warmup_clip = warmup_clip.std.CopyFrameProps(clip[0]) 48 | else: 49 | warmup_clip = warmup_clip[-1] 50 | 51 | # Upsample the footage to have single line height chroma resolution if it 52 | # doesn't already so that we can persist the exact chroma layout we want to 53 | # the final "progressive" frame before downsampling back to 4:2:0 if 54 | # requested. 55 | scannable_clip = convert_format_if_needed(clip, subsampling_h=0) 56 | scannable_warmup = convert_format_if_needed(warmup_clip, subsampling_h=0) 57 | chroma_upsampled = (scannable_clip.format.id != clip.format.id) 58 | 59 | phosphor_fields = _scan_clip_to_phosphor_fields( 60 | scannable_clip, 61 | scannable_warmup, 62 | tff=tff 63 | ) 64 | if ( 65 | chroma_upsampled 66 | and chroma_subsample_scanning == ChromaSubsampleScanning.SCAN_LATEST 67 | ): 68 | phosphor_fields = _repeat_new_field_chroma(phosphor_fields) 69 | if decay_factor: 70 | if not decay_base: 71 | decay_base = black_clip_from_clip(clip, length=1) 72 | scannable_decay_base = convert_format_if_needed( 73 | decay_base, 74 | format=scannable_clip.format 75 | ) 76 | # Repeat decay base for every frame of original clip: 77 | decay_clip = scannable_decay_base[0] * len(scannable_clip) 78 | # Ensure exact same field order instructions as original: 79 | decay_clip = decay_clip.std.CopyFrameProps(scannable_clip) 80 | decayed_phosphor_fields = _scan_clip_to_phosphor_fields( 81 | decay_clip, 82 | decay_clip[0], 83 | tff=tff 84 | ) 85 | decay_chroma_planes = ( 86 | chroma_subsample_scanning == ChromaSubsampleScanning.SCAN_UPSAMPLED 87 | or not chroma_upsampled 88 | ) 89 | phosphor_fields = _decay_old_field( 90 | phosphor_fields, 91 | factor=decay_factor, 92 | decay_fields=decayed_phosphor_fields, 93 | include_chroma=decay_chroma_planes 94 | ) 95 | if attack_factor is not None and attack_factor != 1: 96 | phosphor_fields = _brighten_fresh_fields( 97 | phosphor_fields, 98 | factor=attack_factor 99 | ) 100 | 101 | laced = core.std.DoubleWeave(phosphor_fields, tff=True)[::2] 102 | as_progressive = assume_progressive(laced) 103 | 104 | post_processed = as_progressive 105 | for step in post_processing: 106 | process = post_processing_routines[step] 107 | post_processed = process(post_processed, 108 | kernel=post_processing_blend_kernel) 109 | 110 | if chroma_subsample_scanning == ChromaSubsampleScanning.SCAN_UPSAMPLED: 111 | # Restore the upsampled format in case changed by post-processing. 112 | # Restore original bit depth: 113 | return convert_format_if_needed( 114 | post_processed, 115 | subsampling_w=scannable_clip.format.subsampling_w, 116 | subsampling_h=scannable_clip.format.subsampling_h, 117 | bits_per_sample=clip.format.bits_per_sample, 118 | dither_type=dither_type 119 | ) 120 | else: 121 | return convert_format_if_needed( 122 | post_processed, 123 | format=clip.format, 124 | dither_type=dither_type 125 | ) 126 | 127 | 128 | def _scan_clip_to_phosphor_fields(clip, warmup_clip, tff): 129 | original_fields = clip.std.SeparateFields(tff=tff) 130 | warmup_fields = warmup_clip.std.SeparateFields(tff=tff) 131 | 132 | # Pick out the field position not about to be initialized by main clip 133 | first_field = original_fields.get_frame(0).props._Field 134 | for field_frame_clip in warmup_fields: 135 | field_frame = field_frame_clip.get_frame(0) 136 | if field_frame.props._Field != first_field: 137 | warmup_field = field_frame_clip 138 | break 139 | else: 140 | raise ValueError("Couldn't determine warmup field from supplied clip.") 141 | 142 | # To achieve the updating and repeating of fields, we can rely on the same 143 | # functions used to interlace. We just need to ensure every field is 144 | # interlaced twice except for the last one. 145 | recycled_fields = original_fields.std.SelectEvery( 146 | cycle=2, 147 | offsets=(0, 1, 0, 1), 148 | modify_duration=True 149 | )[:-1] 150 | synced_warmup_field = warmup_field.std.AssumeFPS(src=recycled_fields) 151 | 152 | # Insert our warm-up field in a way that scans the interlaced material 153 | # in the same field order it came in. Probably not required. 154 | phosphor_fields = ( 155 | recycled_fields[0] 156 | + synced_warmup_field 157 | + recycled_fields[1:] 158 | ) 159 | return phosphor_fields 160 | 161 | 162 | def _repeat_new_field_chroma(phosphor_fields: VideoNode, offset=0): 163 | """Returns a new clip of scanned field frames where the chroma plane from 164 | the first field of a final frame is copied over the next frame's chroma, 165 | then the 3rd frame's chroma is copied over the 4th, etc.""" 166 | if offset: 167 | pre_offset = phosphor_fields[:offset] 168 | edit_range = phosphor_fields[offset:] 169 | else: 170 | pre_offset = None 171 | edit_range = phosphor_fields 172 | 173 | # Given a scan from TFF: 174 | # NewTop WarmupBtm OldTop NewBtm NewTop OldBtm OldTop NewBtm NewTop… 175 | # Source1 Target1 Target2 Source2 Source3 Target3 Target4 Source4 176 | # Source Cadence: 177 | # 0 3 4 7 8 178 | # Target Cadence: 179 | # 1 2 5 6 9 180 | # If then interleaved: 181 | # 0 1 3 2 4 5 7 6 8 9 182 | source_frames = edit_range.std.SelectEvery( 183 | cycle=4, 184 | offsets=(0, 3), 185 | modify_duration=False 186 | ) 187 | target_frames = edit_range.std.SelectEvery( 188 | cycle=4, 189 | offsets=(1, 2), 190 | modify_duration=False 191 | ) 192 | overwritten_target = core.std.ShufflePlanes( 193 | clips=(target_frames, source_frames, source_frames), 194 | planes=(0, 1, 2), 195 | colorfamily=ColorFamily.YUV 196 | ) 197 | edited_interleaved = core.std.Interleave( 198 | (source_frames, overwritten_target), 199 | modify_duration=False 200 | ) 201 | edited_ordered = edited_interleaved.std.SelectEvery( 202 | cycle=4, 203 | offsets=(0, 1, 3, 2), 204 | modify_duration=False 205 | ) 206 | 207 | if offset: 208 | return pre_offset + edited_ordered 209 | return edited_ordered 210 | 211 | 212 | def _decay_old_field( 213 | phosphor_fields: VideoNode, 214 | factor: Factor, 215 | decay_fields: VideoNode, 216 | include_chroma: bool, 217 | offset=0 218 | ) -> VideoNode: 219 | """Returns a new clip of scanned field frames where the previously scanned 220 | field is dimmed.""" 221 | if offset: 222 | pre_offset = phosphor_fields[:offset] 223 | edit_range = phosphor_fields[offset:] 224 | decay_range = decay_fields[offset:] 225 | else: 226 | pre_offset = None 227 | edit_range = phosphor_fields 228 | decay_range = decay_fields 229 | 230 | # Given a scan from TFF: 231 | # NewTop WarmupBtm OldTop NewBtm NewTop OldBtm OldTop NewBtm NewTop… 232 | # or BFF: 233 | # NewBtm WarmupTop OldBtm NewTop NewBtm OldTop OldBtm NewTop NewBtm… 234 | # Decay cadence: 235 | # - ^ ^ - - ^ ^ - - 236 | fresh_fields = edit_range.std.SelectEvery( 237 | cycle=4, 238 | offsets=(0, 3), 239 | modify_duration=False 240 | ) 241 | old_fields = edit_range.std.SelectEvery( 242 | cycle=4, 243 | offsets=(1, 2), 244 | modify_duration=False 245 | ) 246 | decayed_old_fields = decay_range.std.SelectEvery( 247 | cycle=4, 248 | offsets=(1, 2), 249 | modify_duration=False 250 | ) 251 | 252 | mask_format = core.query_video_format( 253 | color_family=ColorFamily.GRAY, 254 | sample_type=phosphor_fields.format.sample_type, 255 | bits_per_sample=phosphor_fields.format.bits_per_sample 256 | ) 257 | if mask_format.sample_type == SampleType.FLOAT: 258 | mask_max = 1.0 259 | factor = float(factor) 260 | else: 261 | mask_max = (2 ** mask_format.bits_per_sample) - 1 262 | mask = old_fields.std.BlankClip( 263 | length=len(old_fields), 264 | format=mask_format, 265 | color=round(factor * mask_max) 266 | ) 267 | 268 | # Chroma planes only decayed if no vertical subsampling, otherwise 269 | # our decay bleeds into the newly painted scanlines. 270 | if include_chroma: 271 | decay_planes = tuple(range(phosphor_fields.format.num_planes)) 272 | else: 273 | decay_planes = (0,) 274 | 275 | decayed_fields = old_fields.std.MaskedMerge( 276 | clipb=decayed_old_fields, 277 | mask=mask, 278 | planes=decay_planes 279 | ) 280 | edited_interleaved = core.std.Interleave( 281 | (fresh_fields, decayed_fields), 282 | modify_duration=False 283 | ) 284 | edited_ordered = edited_interleaved.std.SelectEvery( 285 | cycle=4, 286 | offsets=(0, 1, 3, 2), 287 | modify_duration=False 288 | ) 289 | 290 | if offset: 291 | return pre_offset + edited_ordered 292 | return edited_ordered 293 | 294 | 295 | def _brighten_fresh_fields( 296 | phosphor_fields: VideoNode, 297 | factor: Factor, 298 | offset=0 299 | ): 300 | """Returns a new clip of scanned field frames where the newly 301 | scanned (fresh) field is brightened.""" 302 | if offset: 303 | pre_offset = phosphor_fields[:offset] 304 | edit_range = phosphor_fields[offset:] 305 | else: 306 | pre_offset = None 307 | edit_range = phosphor_fields 308 | 309 | # Given a scan from TFF: 310 | # NewTop WarmupBtm OldTop NewBtm NewTop OldBtm OldTop NewBtm NewTop… 311 | # or BFF: 312 | # NewBtm WarmupTop OldBtm NewTop NewBtm OldTop OldBtm NewTop NewBtm… 313 | # Brighten cadence: 314 | # ^ ^ ^ ^ ^ 315 | fresh_fields = edit_range.std.SelectEvery( 316 | cycle=4, 317 | offsets=(0, 3), 318 | modify_duration=False 319 | ) 320 | brightened_fresh_fields = brighten(fresh_fields, factor) 321 | 322 | old_fields = edit_range.std.SelectEvery( 323 | cycle=4, 324 | offsets=(1, 2), 325 | modify_duration=False 326 | ) 327 | 328 | edited_interleaved = core.std.Interleave( 329 | (brightened_fresh_fields, old_fields), 330 | modify_duration=False 331 | ) 332 | edited_ordered = edited_interleaved.std.SelectEvery( 333 | cycle=4, 334 | offsets=(0, 1, 3, 2), 335 | modify_duration=False 336 | ) 337 | 338 | if offset: 339 | return pre_offset + edited_ordered 340 | return edited_ordered 341 | 342 | 343 | def _blend_vertically(clip: VideoNode, kernel: Resizer) -> VideoNode: 344 | """Instead of typical Bob deinterlacing that takes advantage of temporal 345 | changes in a field, this deinterlacer simply plays back the interlaced 346 | fields at their original field rate in their correct position, but blends 347 | each moment. 348 | 349 | This is a nice fallback when the original material flickers in a 350 | bob-deinterlacer, is smooth when played back at original field refresh rate 351 | (like on an old CRT), but combing is still undesirable. 352 | """ 353 | 354 | # Process at high bit depth, assume will be restored downstream. 355 | processing_format_reqs = { 356 | 'subsampling_h': 0, 357 | 'subsampling_w': 0 358 | } 359 | if clip.format.bits_per_sample < 16: 360 | processing_format_reqs['bits_per_sample'] = 16 361 | downscaled = convert_format_if_needed( 362 | clip, 363 | height=clip.height // 2, 364 | kernel=kernel, 365 | **processing_format_reqs 366 | ) 367 | rescaled = kernel(downscaled, height=clip.height) 368 | return rescaled 369 | 370 | 371 | post_processing_routines = { 372 | InterlacedScanPostProcessor.BLEND_VERTICALLY: _blend_vertically 373 | } 374 | -------------------------------------------------------------------------------- /vsfieldkit/types.py: -------------------------------------------------------------------------------- 1 | from decimal import Decimal 2 | from enum import Enum 3 | from fractions import Fraction 4 | from typing import Callable, Union 5 | 6 | from vapoursynth import VideoFormat, VideoNode 7 | 8 | try: 9 | from vapoursynth import PresetVideoFormat 10 | except ImportError: 11 | from vapoursynth import PresetFormat as PresetVideoFormat 12 | 13 | Factor = Union[int, float, Decimal, Fraction] 14 | 15 | FormatSpecifier = Union[PresetVideoFormat, VideoFormat, VideoNode, int] 16 | 17 | Resizer = Callable[..., VideoNode] 18 | """A function following the same signature as VapourSynth's built in 19 | resize/resample kernels.""" 20 | 21 | 22 | class ChromaSubsampleScanning(Enum): 23 | SCAN_BLENDED = 'SCAN_BLENDED' 24 | """Internally, original field chroma is upsampled to have single line 25 | color. The final image is then resampled to the original subsampling 26 | format, causing each line's color to be blended with its neighbours. 27 | Currently the blending is performed *after* post-processing to allow 28 | post-processors access to the upsampled chroma data. 29 | """ 30 | 31 | SCAN_LATEST = 'SCAN_LATEST' 32 | """The field that is new in a frame supplies the color for all lines of 33 | that frame.""" 34 | 35 | SCAN_UPSAMPLED = 'SCAN_UPSAMPLED' 36 | """Returns a clip upsampled to have single line color. For example, if 37 | YUV420P8 clip was scanned, the resulting clip would be in YUV422P8 38 | ensure the original colors from each line's source are maintained.""" 39 | 40 | 41 | class InterlacedScanPostProcessor(Enum): 42 | BLEND_VERTICALLY = 'BLEND_VERTICALLY' 43 | """Blends the entire contents vertically to remove comb lines. You 44 | effectively lose close to half of the vertical detail as a side effect.""" 45 | 46 | 47 | class PulldownPattern(Enum): 48 | """Commonly found pulldown pattern.""" 49 | 50 | ADVANCED_PULLDOWN = '2:3:3:2' 51 | """For 24000/1001p to 60000/1001i where no field-matching is needed 52 | in IVTC because the only "dirty" frames can be decimated.""" 53 | 54 | EURO_PULLDOWN = '2:2:2:2:2:2:2:2:2:2:2:3' 55 | """For 24p to 50i with no speed-up or speed-down required.""" 56 | 57 | MATCHED_PULLDOWN = '2' 58 | """Each progressive frame is laid out on an interlaced frame.""" 59 | 60 | NTSC_FILM_PULLDOWN = '2:3:2:3' 61 | """For 24000/1001p to 60000/1001i with the least amount of judder. 62 | """ 63 | -------------------------------------------------------------------------------- /vsfieldkit/util.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | from typing import Iterator, Mapping, Optional, Sequence, Tuple, Union 3 | 4 | from vapoursynth import (ColorFamily, ColorRange, Error, FieldBased, 5 | VideoFormat, VideoFrame, VideoNode, core) 6 | 7 | from vsfieldkit.types import Factor, FormatSpecifier, Resizer 8 | 9 | FORMAT_INTRINSICS = ( 10 | 'color_family', 11 | 'sample_type', 12 | 'subsampling_w', 13 | 'subsampling_h', 14 | 'bits_per_sample' 15 | ) 16 | 17 | VERTICAL_CENTER_CHROMA_LOCS = { 18 | None: 2, # assume left, resample as topleft 19 | 0: 2, # left, resample as topleft 20 | 1: 3 # center, resample as top 21 | } 22 | 23 | 24 | def assume_bff(clip: VideoNode) -> VideoNode: 25 | """Returns a new clip where every frame is marked as interlaced in 26 | bottom-field-first order. Only changes metadata, does not adjust the clip 27 | content or re-arrange chroma samples. 28 | """ 29 | return clip.std.SetFrameProp( 30 | prop='_FieldBased', 31 | intval=FieldBased.FIELD_BOTTOM.value 32 | ) 33 | 34 | 35 | def assume_tff(clip: VideoNode) -> VideoNode: 36 | """Returns a new clip where every frame is marked as interlaced in 37 | top-field-first order. Only changes metadata, does not adjust the clip 38 | content or re-arrange chroma samples. 39 | """ 40 | return clip.std.SetFrameProp( 41 | prop='_FieldBased', 42 | intval=FieldBased.FIELD_TOP.value 43 | ) 44 | 45 | 46 | def assume_progressive(clip: VideoNode) -> VideoNode: 47 | """Returns a new clip where every frame is marked as progressive. Only 48 | changes metadata, does not adjust the clip content or re-arrange chroma 49 | samples. 50 | """ 51 | return clip.std.SetFrameProp( 52 | prop='_FieldBased', 53 | intval=FieldBased.FIELD_PROGRESSIVE.value 54 | ) 55 | 56 | 57 | def assume_vertically_cosited_chroma(clip: VideoNode) -> VideoNode: 58 | def adjust_frame_chroma_loc(n: int, f: VideoFrame): 59 | chroma_loc = f.props.get('_ChromaLocation') 60 | if chroma_loc in VERTICAL_CENTER_CHROMA_LOCS: 61 | new_frame = f.copy() 62 | new_frame.props['_ChromaLocation'] = ( 63 | VERTICAL_CENTER_CHROMA_LOCS[chroma_loc] 64 | ) 65 | return new_frame 66 | return f 67 | 68 | return clip.std.ModifyFrame( 69 | clips=(clip,), 70 | selector=adjust_frame_chroma_loc 71 | ) 72 | 73 | 74 | def double(clip: VideoNode) -> VideoNode: 75 | """Returns a clip where each original frame is repeated once and plays at 76 | twice the speed so the played image matches the original in time. 77 | 78 | Not specific to interlacing or deinterlacing, but useful for comparing 79 | original interlaced pictures with frame-doubled content such as that 80 | from a bob or phosphor deinterlacer. 81 | """ 82 | doubled_frames = core.std.Interleave( 83 | (clip, clip), 84 | modify_duration=True # Should double fps, halve per-frame duration 85 | ) 86 | return doubled_frames 87 | 88 | 89 | def group_by_combed( 90 | clip: VideoNode 91 | ) -> Iterator[Tuple[Union[bool, None], VideoNode]]: 92 | """Assuming the passed-in clip was processed by a filter that performs 93 | comb detection, this splits the clip into segments based on whether they 94 | are combed or not. The values it generates are True, False, or None if it 95 | was marked combed, not combed, or not marked as well as the segment of the 96 | clip.""" 97 | last_combed = ... 98 | last_change = 0 99 | for n, frame in enumerate(clip.frames()): 100 | is_combed = getattr(frame.props, '_Combed', None) 101 | if is_combed != last_combed: 102 | if last_combed is not ...: 103 | yield last_combed, clip[last_change:n] 104 | last_change = n 105 | 106 | last_combed = is_combed 107 | yield last_combed, clip[last_change:] 108 | 109 | 110 | def group_by_field_order( 111 | clip: VideoNode 112 | ) -> Iterator[Tuple[Union[FieldBased, None], VideoNode]]: 113 | """ 114 | Generates field orders and clips from the passed in clip split up by 115 | changes in field order. Field order is expressed as a 116 | vapoursynth.FieldBased enumeration or None if field order is not 117 | applicable or not available.""" 118 | last_order = ... 119 | last_change = 0 120 | for n, frame in enumerate(clip.frames()): 121 | frame_order = getattr(frame.props, '_FieldBased', None) 122 | if frame_order != last_order: 123 | if last_order is not ...: 124 | yield ( 125 | None if last_order is None else FieldBased(last_order), 126 | clip[last_change:n] 127 | ) 128 | last_change = n 129 | 130 | last_order = frame_order 131 | yield ( 132 | None if last_order is None else FieldBased(last_order), 133 | clip[last_change:] 134 | ) 135 | 136 | 137 | def convert_format_if_needed( 138 | clip: VideoNode, 139 | kernel: Resizer = core.resize.Spline36, 140 | format: Optional[VideoFormat] = None, 141 | dither_type='random', 142 | **format_or_resize_specs, 143 | ): 144 | existing_fmt_specs = { 145 | attr: getattr(clip.format, attr) 146 | for attr in FORMAT_INTRINSICS 147 | } 148 | 149 | target_fmt_specs = dict(existing_fmt_specs) 150 | if format: 151 | target_fmt_specs.update({ 152 | attr: getattr(format, attr) 153 | for attr in FORMAT_INTRINSICS 154 | }) 155 | target_fmt_specs.update({ 156 | arg: value 157 | for arg, value in format_or_resize_specs.items() 158 | if arg in FORMAT_INTRINSICS 159 | }) 160 | 161 | resize_args = { 162 | arg: value 163 | for arg, value in format_or_resize_specs.items() 164 | if arg not in FORMAT_INTRINSICS 165 | } 166 | if target_fmt_specs != existing_fmt_specs: 167 | resize_args['format'] = core.query_video_format(**target_fmt_specs).id 168 | 169 | if not resize_args: 170 | # No changes needed. 171 | return clip 172 | 173 | if ( 174 | dither_type is not None 175 | and dither_type != 'none' 176 | and target_fmt_specs['bits_per_sample'] < 16 177 | ): 178 | resize_args['dither_type'] = dither_type 179 | 180 | return kernel(clip, **resize_args) 181 | 182 | 183 | def black_clip_from_clip(clip, **blank_clip_args): 184 | """Creates a clip of black color in the same format as the passed in clip. 185 | Unlike BlankClip, this takes the passed in clip's color range into account 186 | by rendering the first frame. 187 | """ 188 | bit_depth = clip.format.bits_per_sample 189 | is_integer = (clip.format.sample_type == 0) 190 | color_range = clip.get_frame(0).props.get('_ColorRange') 191 | 192 | black_planes = [] 193 | # Luma Plane 194 | if is_integer and color_range == ColorRange.RANGE_LIMITED: 195 | floor_multiplier = (2 ** bit_depth) / 256 196 | limited_black = 16 * floor_multiplier 197 | black_planes.append(limited_black) 198 | else: 199 | black_planes.append(0) 200 | # First Chroma Plane 201 | if clip.format.color_family == ColorFamily.YUV: 202 | black_planes.append((1 << (bit_depth - 1)) if is_integer else 0.5) 203 | # Fill to rest of the planes 204 | black_planes += ( 205 | [black_planes[-1]] 206 | * (clip.format.num_planes - len(black_planes)) 207 | ) 208 | 209 | return clip.std.BlankClip(color=black_planes, **blank_clip_args) 210 | 211 | 212 | def brighten(clip: VideoNode, factor: Factor): 213 | """Increases intensity across all colors. 214 | This may not map 1:1 with an H′S′V′ family V′ increase. 215 | With Y′CbCr, only increases Y′. 216 | 217 | Note this increase ignores the clip's OETF (transfer characteristic) 218 | so the factor is applied as if the values are linear light levels. 219 | """ 220 | format: VideoFormat = clip.format 221 | is_integer = (format.sample_type == 0) 222 | color_range = clip.get_frame(0).props.get('_ColorRange') 223 | 224 | if is_integer: 225 | if color_range == ColorRange.RANGE_LIMITED: 226 | ceiling_multiplier = (2 ** format.bits_per_sample) / 256 227 | max_val = 235 * ceiling_multiplier 228 | else: 229 | max_val = (2 ** format.bits_per_sample) - 1 230 | else: 231 | max_val = 1.0 232 | 233 | plane_expr = f'x {float(factor)} * {max_val} min' 234 | if format.color_family == ColorFamily.YUV: 235 | expr = (plane_expr, '') 236 | else: 237 | expr = (plane_expr,) 238 | 239 | return clip.std.Expr(expr) 240 | 241 | 242 | def format_from_specifier(specifier: FormatSpecifier) -> VideoFormat: 243 | if isinstance(specifier, VideoFormat): 244 | return specifier 245 | elif isinstance(specifier, VideoNode): 246 | return specifier.format 247 | return core.get_video_format(specifier) 248 | 249 | 250 | def require_plugins( 251 | *plugins: Tuple[str, str] 252 | ): 253 | missing = [] 254 | for plugin_namespace, plugin_name in plugins: 255 | if not hasattr(core, plugin_namespace): 256 | missing.append(f'{plugin_namespace} ({plugin_name})') 257 | if missing: 258 | raise Error(f'Missing required plugin(s): {",".join(missing)}') 259 | 260 | 261 | def require_one_of( 262 | *plugins: Tuple[str, str] 263 | ): 264 | missing = [] 265 | for plugin_namespace, plugin_name in plugins: 266 | if hasattr(core, plugin_namespace): 267 | break 268 | else: 269 | missing.append(f'{plugin_namespace} ({plugin_name})') 270 | else: 271 | raise Error( 272 | f'Requires any one of these plugins: {",".join(missing)}' 273 | ) 274 | 275 | 276 | def shift_chroma_to_luma_sited( 277 | clip: VideoNode, 278 | tff: bool, 279 | kernel: Resizer, 280 | dither_type: Optional[str] = 'random' 281 | ) -> VideoNode: 282 | """Takes a clip marked as having vertically centered chroma and 283 | assumes that the chroma samples are centered BETWEEN luma samples 284 | from a prior subsampled state. Establishes new chroma samples that 285 | resemble the same content but relative from the luma sample 286 | locations. The _ChromaLocation property will be corrected to 287 | one that makes more sense (e.g. topleft instead of left). 288 | """ 289 | if clip.format.color_family != ColorFamily.YUV: 290 | return clip 291 | 292 | def shift_centered_chroma( 293 | n: int, 294 | f: VideoFrame, 295 | plane_fields: VideoNode, 296 | field_shifts: Mapping[Optional[int], VideoNode] 297 | ): 298 | props = f.props 299 | if props.get('_ChromaLocation') in VERTICAL_CENTER_CHROMA_LOCS: 300 | return field_shifts[props.get('_Field')] 301 | else: 302 | # Assume was already vertically co-sited 303 | return plane_fields 304 | 305 | y, cb, cr = clip.std.SplitPlanes() 306 | shifted_planes = [y] 307 | for plane in cb, cr: 308 | plane_fields = plane.std.SeparateFields(tff=tff) 309 | shifted_as_top = kernel( 310 | plane_fields, 311 | src_top=-1 / 4, 312 | dither_type=dither_type 313 | ) 314 | shifted_as_bottom = kernel( 315 | plane_fields, 316 | src_top=1 / 4, 317 | dither_type=dither_type 318 | ) 319 | field_shifts = { 320 | None: shifted_as_top if tff else shifted_as_bottom, 321 | 0: shifted_as_bottom, 322 | 1: shifted_as_top 323 | } 324 | 325 | shifted_plane_fields = plane_fields.std.FrameEval( 326 | eval=partial( 327 | shift_centered_chroma, 328 | plane_fields=plane_fields, 329 | field_shifts=field_shifts 330 | ), 331 | prop_src=(plane_fields,), 332 | clip_src=(plane_fields, shifted_as_top, shifted_as_bottom) 333 | ) 334 | shifted_planes.append( 335 | shifted_plane_fields.std.DoubleWeave()[::2] 336 | ) 337 | shifted = core.std.ShufflePlanes( 338 | clips=shifted_planes, 339 | planes=(0, 0, 0), 340 | colorfamily=ColorFamily.YUV 341 | ) 342 | 343 | def revise_frame_props(n: int, f: VideoFrame): 344 | props = f.props 345 | corrected_f = f.copy() 346 | if '_ChromaLocation' in props: 347 | corrected_f.props['_ChromaLocation'] = ( 348 | VERTICAL_CENTER_CHROMA_LOCS.get( 349 | props['_ChromaLocation'], 350 | props['_ChromaLocation'] 351 | ) 352 | ) 353 | return corrected_f 354 | return f 355 | 356 | shifted = shifted.std.ModifyFrame( 357 | clips=(shifted,), 358 | selector=revise_frame_props 359 | ) 360 | 361 | return shifted 362 | 363 | 364 | def annotate_bobbed_fields( 365 | clip: VideoNode, 366 | original_clip: VideoNode, 367 | prop: str = 'OriginalField', 368 | tff: Optional[bool] = None 369 | ) -> VideoNode: 370 | """Adds a property to frames of a bobbed clip to indicate what 371 | original field position was used to derive the new frame.""" 372 | assert len(clip) == len(original_clip) * 2 373 | 374 | def annotate_frame(n: int, f: Sequence[VideoFrame]): 375 | bobbed_frame, original_frame = f 376 | field_based = original_frame.props.get('_FieldBased') 377 | if field_based == FieldBased.FIELD_TOP: 378 | tff_int = 1 379 | elif field_based == FieldBased.FIELD_BOTTOM: 380 | tff_int = 0 381 | elif tff is None: 382 | raise Error( 383 | 'Could not determine field order and tff argument not ' 384 | 'supplied.' 385 | ) 386 | else: 387 | tff_int = int(tff) 388 | 389 | annotated_frame = bobbed_frame.copy() 390 | annotated_frame.props[prop] = (n & 1) ^ tff_int 391 | return annotated_frame 392 | 393 | return clip.std.ModifyFrame( 394 | clips=(clip, double(original_clip)), 395 | selector=annotate_frame 396 | ) 397 | 398 | 399 | def copy_specific_frame_props( 400 | clip: VideoNode, 401 | prop_src: VideoNode, 402 | props: Sequence[str] 403 | ): 404 | if not props: 405 | return clip 406 | 407 | def copy(n: int, f: Sequence[VideoFrame]): 408 | original_frame, prop_src_frame = f 409 | new_frame = original_frame.copy() 410 | for prop in props: 411 | if prop in prop_src_frame.props: 412 | new_frame.props[prop] = prop_src_frame.props[prop] 413 | elif prop in new_frame.props: 414 | del new_frame.props[prop] 415 | return new_frame 416 | 417 | return clip.std.ModifyFrame(clips=(clip, prop_src), selector=copy) 418 | -------------------------------------------------------------------------------- /vsfieldkit/vapoursynth.py: -------------------------------------------------------------------------------- 1 | VS_FIELD_FROM_TOP = 1 2 | VS_FIELD_FROM_BOTTOM = 0 3 | --------------------------------------------------------------------------------