├── .flake8 ├── .github ├── dependabot.yml └── workflows │ ├── autotag.yml │ ├── main.yml │ └── pypipublish.yml ├── .gitignore ├── .isort.cfg ├── .mypy.ini ├── .pylintrc ├── .readthedocs.yaml ├── LICENSE ├── README.md ├── TODO.md ├── docs ├── _static │ └── css │ │ └── theme_overrides.css ├── api.rst ├── conf.py ├── index.rst └── requirements.txt ├── examples ├── 1.00 - basic video only.py ├── 1.01 - basic video + audio + merge.py └── 1.02 - basic self runner.py ├── requirements-dev.txt ├── requirements.txt ├── setup.cfg ├── setup.py ├── stubs ├── cv2 │ └── __init__.pyi ├── pymediainfo │ └── __init__.pyi ├── requests │ ├── __init__.pyi │ ├── models.pyi │ └── sessions.pyi ├── requests_toolbelt │ ├── __init__.pyi │ └── multipart │ │ ├── __init__.pyi │ │ └── encoder.pyi ├── scipy │ ├── __init__.pyi │ └── io │ │ ├── __init__.pyi │ │ └── wavfile.pyi └── vapoursynth │ └── __init__.pyi ├── tests └── test_file_info.py └── vardautomation ├── __init__.py ├── _logging ├── __init__.py ├── abstract.py ├── core.py └── helpers.py ├── _metadata.py ├── automation.py ├── binary_path.py ├── chapterisation.py ├── comp.py ├── config.py ├── exception.py ├── language.py ├── logo.txt ├── py.typed ├── render.py ├── tooling ├── __init__.py ├── abstract.py ├── audio.py ├── base.py ├── misc.py ├── mux.py └── video.py ├── utils.py ├── vpathlib.py └── vtypes.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | count = True 3 | ignore = W503 4 | max-line-length = 140 5 | max-doc-length = 200 6 | max-complexity = 10 7 | exclude = stubs/* 8 | show-source = True 9 | statistics = True 10 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "pip" 9 | directory: "/" 10 | schedule: 11 | interval: "daily" 12 | open-pull-requests-limit: 20 13 | -------------------------------------------------------------------------------- /.github/workflows/autotag.yml: -------------------------------------------------------------------------------- 1 | name: Check and create tag 2 | on: 3 | push: 4 | branches: 5 | - master 6 | pull_request: 7 | branches: 8 | - master 9 | 10 | jobs: 11 | new_version: 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v3 16 | 17 | - name: Get version number 18 | run: | 19 | echo "VDA_VER=v$(cat vardautomation/_metadata.py | sed -nr "s/__version__ = '(.*)'/\1/p")" >> $GITHUB_ENV 20 | - name: Check if version exists 21 | env: 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 23 | uses: mukunku/tag-exists-action@v1.1.0 24 | id: tagcheck 25 | with: 26 | tag: ${{ env.VDA_VER }} 27 | 28 | - name: Make tag 29 | uses: actions/github-script@v6 30 | if: steps.tagcheck.outputs.exists == 'false' 31 | with: 32 | github-token: ${{ secrets.WORKFLOW_TOKEN }} 33 | script: | 34 | github.rest.git.createRef({ 35 | owner: context.repo.owner, 36 | repo: context.repo.repo, 37 | ref: `refs/tags/${process.env.VDA_VER}`, 38 | sha: context.sha 39 | }) 40 | - name: Fallback 41 | if: steps.tagcheck.outputs.exists == 'true' 42 | run: echo "Nothing to see here, move along citizen" -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: lint 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | windows: 7 | runs-on: windows-latest 8 | strategy: 9 | matrix: 10 | python-version: 11 | - "3.11" 12 | 13 | steps: 14 | - uses: actions/checkout@v4 15 | - name: Set up Python ${{ matrix.python-version }} 16 | uses: actions/setup-python@v5 17 | with: 18 | python-version: ${{ matrix.python-version }} 19 | 20 | - name: Get VapourSynth version number 21 | shell: bash 22 | run: | 23 | echo "VS_VER=$(cat requirements.txt | sed -nr "s/VapourSynth==([0-9]+)/\1/p")" >> $GITHUB_ENV 24 | 25 | - name: Install dependencies 26 | run: | 27 | python -m pip install --upgrade pip 28 | pip install vapoursynth-portable==${{ env.VS_VER }} 29 | pip install -r requirements.txt 30 | pip install -r requirements-dev.txt 31 | 32 | - name: Running flake8 33 | continue-on-error: true 34 | run: flake8 vardautomation 35 | 36 | - name: Running mypy 37 | run: mypy vardautomation -------------------------------------------------------------------------------- /.github/workflows/pypipublish.yml: -------------------------------------------------------------------------------- 1 | name: Publish releases to PyPI 2 | on: 3 | push: 4 | tags: 5 | - v[0-9]+** 6 | 7 | jobs: 8 | package_build: 9 | name: Build and push to PyPI 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | 14 | - name: Prep Python 15 | uses: actions/setup-python@v4 16 | with: 17 | python-version: '3.11' 18 | 19 | - name: Install build tools 20 | run: | 21 | python -m pip install build setuptools twine --user 22 | continue-on-error: false 23 | - name: Build wheel 24 | id: wheel 25 | run: | 26 | python -m build --wheel --outdir dist/ 27 | continue-on-error: true 28 | - name: Build source distribution 29 | id: sdist 30 | run: | 31 | python -m build --sdist --outdir dist/ 32 | continue-on-error: true 33 | - name: Check the output 34 | run: | 35 | python -m twine check --strict dist/* 36 | continue-on-error: false 37 | - name: Die on failure 38 | if: steps.wheel.outcome != 'success' && steps.sdist.outcome != 'success' 39 | run: exit 1 40 | - name: Publish to PyPI 41 | uses: pypa/gh-action-pypi-publish@release/v1 42 | with: 43 | user: __token__ 44 | password: ${{ secrets.PYPI_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | wheels/ 22 | pip-wheel-metadata/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | db.sqlite3-journal 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | docs/make.bat 73 | docs/Makefile 74 | 75 | # PyBuilder 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | .python-version 87 | 88 | # pipenv 89 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 90 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 91 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 92 | # install all needed dependencies. 93 | #Pipfile.lock 94 | 95 | # celery beat schedule file 96 | celerybeat-schedule 97 | 98 | # SageMath parsed files 99 | *.sage.py 100 | 101 | # Environments 102 | .env 103 | .venv 104 | env/ 105 | venv/ 106 | ENV/ 107 | env.bak/ 108 | venv.bak/ 109 | 110 | # Spyder project settings 111 | .spyderproject 112 | .spyproject 113 | 114 | # Rope project settings 115 | .ropeproject 116 | 117 | # mkdocs documentation 118 | /site 119 | 120 | # mypy 121 | .mypy_cache/ 122 | .dmypy.json 123 | dmypy.json 124 | 125 | # Pyre type checker 126 | .pyre/ 127 | 128 | # vscode folder 129 | .vscode 130 | 131 | # vspreview folder 132 | .vspreview 133 | 134 | # louis fork of vsrepo for vs stubs 135 | .vsrepo 136 | 137 | # Random files 138 | vardautomation/shaders.py 139 | .my_test_files 140 | 141 | # Index files 142 | *.ffindex 143 | *.lwi 144 | 145 | # Video test file 146 | *.mkv 147 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | line_length=120 3 | wrap_length=100 4 | multi_line_output=5 -------------------------------------------------------------------------------- /.mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | mypy_path = ./stubs/ 3 | 4 | python_version = 3.11 5 | exclude = vardautomation/logo.txt 6 | 7 | plugins = numpy.typing.mypy_plugin 8 | 9 | ignore_missing_imports = False 10 | 11 | disallow_any_generics = True 12 | 13 | disallow_untyped_defs = True 14 | disallow_incomplete_defs = True 15 | check_untyped_defs = True 16 | disallow_untyped_decorators = True 17 | 18 | no_implicit_optional = True 19 | strict_optional = True 20 | 21 | warn_redundant_casts = True 22 | warn_unused_ignores = False 23 | warn_no_return = True 24 | warn_return_any = False 25 | warn_unreachable = True 26 | 27 | ignore_errors = False 28 | 29 | allow_untyped_globals = False 30 | allow_redefinition = False 31 | implicit_reexport = False 32 | strict_equality = True 33 | 34 | show_error_context = False 35 | show_column_numbers = True 36 | show_error_codes = True 37 | color_output = True 38 | error_summary = True 39 | pretty = True -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # A comma-separated list of package or module names from where C extensions may 4 | # be loaded. Extensions are loading into the active Python interpreter and may 5 | # run arbitrary code. 6 | extension-pkg-allow-list=vapoursynth, lxml.etree, cv2 7 | 8 | # A comma-separated list of package or module names from where C extensions may 9 | # be loaded. Extensions are loading into the active Python interpreter and may 10 | # run arbitrary code. (This is an alternative name to extension-pkg-allow-list 11 | # for backward compatibility.) 12 | extension-pkg-whitelist=vapoursynth, lxml.etree, cv2 13 | 14 | # Specify a score threshold to be exceeded before program exits with error. 15 | fail-under=10.0 16 | 17 | # Files or directories to be skipped. They should be base names, not paths. 18 | ignore=CVS 19 | 20 | # Files or directories matching the regex patterns are skipped. The regex 21 | # matches against base names, not paths. 22 | ignore-patterns= 23 | 24 | # Python code to execute, usually for sys.path manipulation such as 25 | # pygtk.require(). 26 | #init-hook= 27 | 28 | # Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the 29 | # number of processors available to use. 30 | jobs=0 31 | 32 | # Control the amount of potential inferred values when inferring a single 33 | # object. This can help the performance when dealing with large functions or 34 | # complex, nested conditions. 35 | limit-inference-results=100 36 | 37 | # List of plugins (as comma separated values of python module names) to load, 38 | # usually to register additional checkers. 39 | load-plugins= 40 | 41 | # Pickle collected data for later comparisons. 42 | persistent=yes 43 | 44 | # When enabled, pylint would attempt to guess common misconfiguration and emit 45 | # user-friendly hints instead of false-positive error messages. 46 | suggestion-mode=yes 47 | 48 | # Allow loading of arbitrary C extensions. Extensions are imported into the 49 | # active Python interpreter and may run arbitrary code. 50 | unsafe-load-any-extension=no 51 | 52 | 53 | [MESSAGES CONTROL] 54 | 55 | # Only show warnings with the listed confidence levels. Leave empty to show 56 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. 57 | confidence= 58 | 59 | # Disable the message, report, category or checker with the given id(s). You 60 | # can either give multiple identifiers separated by comma (,) or put this 61 | # option multiple times (only on the command line, not in the configuration 62 | # file where it should appear only once). You can also use "--disable=all" to 63 | # disable everything first and then reenable specific checks. For example, if 64 | # you want to run only the similarities checker, you can use "--disable=all 65 | # --enable=similarities". If you want to run only the classes checker, but have 66 | # no Warning level messages displayed, use "--disable=all --enable=classes 67 | # --disable=W". 68 | disable= 69 | raw-checker-failed, 70 | bad-inline-option, 71 | locally-disabled, 72 | file-ignored, 73 | suppressed-message, 74 | useless-suppression, 75 | deprecated-pragma, 76 | use-symbolic-message-instead, 77 | missing-class-docstring, 78 | invalid-name, 79 | missing-function-docstring, 80 | missing-module-docstring, 81 | protected-access, 82 | too-few-public-methods, 83 | too-many-locals, 84 | too-many-statements, 85 | too-many-instance-attributes, 86 | too-many-arguments, 87 | too-many-branches, 88 | fixme, 89 | too-many-ancestors 90 | 91 | 92 | # Enable the message, report, category or checker with the given id(s). You can 93 | # either give multiple identifier separated by comma (,) or put this option 94 | # multiple time (only on the command line, not in the configuration file where 95 | # it should appear only once). See also the "--disable" option for examples. 96 | enable=c-extension-no-member 97 | 98 | 99 | [REPORTS] 100 | 101 | # Python expression which should return a score less than or equal to 10. You 102 | # have access to the variables 'error', 'warning', 'refactor', and 'convention' 103 | # which contain the number of messages in each category, as well as 'statement' 104 | # which is the total number of statements analyzed. This score is used by the 105 | # global evaluation report (RP0004). 106 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 107 | 108 | # Template used to display messages. This is a python new-style format string 109 | # used to format the message information. See doc for all details. 110 | #msg-template= 111 | 112 | # Set the output format. Available formats are text, parseable, colorized, json 113 | # and msvs (visual studio). You can also give a reporter class, e.g. 114 | # mypackage.mymodule.MyReporterClass. 115 | output-format=text 116 | 117 | # Tells whether to display a full report or only the messages. 118 | reports=no 119 | 120 | # Activate the evaluation score. 121 | score=yes 122 | 123 | 124 | [REFACTORING] 125 | 126 | # Maximum number of nested blocks for function / method body 127 | max-nested-blocks=5 128 | 129 | # Complete name of functions that never returns. When checking for 130 | # inconsistent-return-statements if a never returning function is called then 131 | # it will be considered as an explicit return statement and no message will be 132 | # printed. 133 | never-returning-functions=sys.exit,argparse.parse_error 134 | 135 | 136 | [BASIC] 137 | 138 | # Naming style matching correct argument names. 139 | argument-naming-style=snake_case 140 | 141 | # Regular expression matching correct argument names. Overrides argument- 142 | # naming-style. 143 | #argument-rgx= 144 | 145 | # Naming style matching correct attribute names. 146 | attr-naming-style=snake_case 147 | 148 | # Regular expression matching correct attribute names. Overrides attr-naming- 149 | # style. 150 | #attr-rgx= 151 | 152 | # Bad variable names which should always be refused, separated by a comma. 153 | bad-names=foo, 154 | bar, 155 | baz, 156 | toto, 157 | tutu, 158 | tata 159 | 160 | # Bad variable names regexes, separated by a comma. If names match any regex, 161 | # they will always be refused 162 | bad-names-rgxs= 163 | 164 | # Naming style matching correct class attribute names. 165 | class-attribute-naming-style=any 166 | 167 | # Regular expression matching correct class attribute names. Overrides class- 168 | # attribute-naming-style. 169 | #class-attribute-rgx= 170 | 171 | # Naming style matching correct class constant names. 172 | class-const-naming-style=UPPER_CASE 173 | 174 | # Regular expression matching correct class constant names. Overrides class- 175 | # const-naming-style. 176 | #class-const-rgx= 177 | 178 | # Naming style matching correct class names. 179 | class-naming-style=PascalCase 180 | 181 | # Regular expression matching correct class names. Overrides class-naming- 182 | # style. 183 | #class-rgx= 184 | 185 | # Naming style matching correct constant names. 186 | const-naming-style=UPPER_CASE 187 | 188 | # Regular expression matching correct constant names. Overrides const-naming- 189 | # style. 190 | #const-rgx= 191 | 192 | # Minimum line length for functions/classes that require docstrings, shorter 193 | # ones are exempt. 194 | docstring-min-length=-1 195 | 196 | # Naming style matching correct function names. 197 | function-naming-style=snake_case 198 | 199 | # Regular expression matching correct function names. Overrides function- 200 | # naming-style. 201 | #function-rgx= 202 | 203 | # Good variable names which should always be accepted, separated by a comma. 204 | good-names=i, 205 | j, 206 | k, 207 | ex, 208 | Run, 209 | _ 210 | 211 | # Good variable names regexes, separated by a comma. If names match any regex, 212 | # they will always be accepted 213 | good-names-rgxs= 214 | 215 | # Include a hint for the correct naming format with invalid-name. 216 | include-naming-hint=no 217 | 218 | # Naming style matching correct inline iteration names. 219 | inlinevar-naming-style=any 220 | 221 | # Regular expression matching correct inline iteration names. Overrides 222 | # inlinevar-naming-style. 223 | #inlinevar-rgx= 224 | 225 | # Naming style matching correct method names. 226 | method-naming-style=snake_case 227 | 228 | # Regular expression matching correct method names. Overrides method-naming- 229 | # style. 230 | #method-rgx= 231 | 232 | # Naming style matching correct module names. 233 | module-naming-style=snake_case 234 | 235 | # Regular expression matching correct module names. Overrides module-naming- 236 | # style. 237 | #module-rgx= 238 | 239 | # Colon-delimited sets of names that determine each other's naming style when 240 | # the name regexes allow several styles. 241 | name-group= 242 | 243 | # Regular expression which should only match function or class names that do 244 | # not require a docstring. 245 | no-docstring-rgx=^_ 246 | 247 | # List of decorators that produce properties, such as abc.abstractproperty. Add 248 | # to this list to register other decorators that produce valid properties. 249 | # These decorators are taken in consideration only for invalid-name. 250 | property-classes=abc.abstractproperty 251 | 252 | # Naming style matching correct variable names. 253 | variable-naming-style=snake_case 254 | 255 | # Regular expression matching correct variable names. Overrides variable- 256 | # naming-style. 257 | #variable-rgx= 258 | 259 | 260 | [FORMAT] 261 | 262 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 263 | expected-line-ending-format= 264 | 265 | # Regexp for a line that is allowed to be longer than the limit. 266 | ignore-long-lines=^\s*(# )??$ 267 | 268 | # Number of spaces of indent required inside a hanging or continued line. 269 | indent-after-paren=4 270 | 271 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 272 | # tab). 273 | indent-string=' ' 274 | 275 | # Maximum number of characters on a single line. 276 | max-line-length=140 277 | 278 | # Maximum number of lines in a module. 279 | max-module-lines=2000 280 | 281 | # Allow the body of a class to be on the same line as the declaration if body 282 | # contains single statement. 283 | single-line-class-stmt=no 284 | 285 | # Allow the body of an if to be on the same line as the test if there is no 286 | # else. 287 | single-line-if-stmt=no 288 | 289 | 290 | [LOGGING] 291 | 292 | # The type of string formatting that logging methods do. `old` means using % 293 | # formatting, `new` is for `{}` formatting. 294 | logging-format-style=old 295 | 296 | # Logging modules to check that the string format arguments are in logging 297 | # function parameter format. 298 | logging-modules=logging 299 | 300 | 301 | [MISCELLANEOUS] 302 | 303 | # List of note tags to take in consideration, separated by a comma. 304 | notes=FIXME, 305 | XXX, 306 | TODO 307 | 308 | # Regular expression of note tags to take in consideration. 309 | #notes-rgx= 310 | 311 | 312 | [SIMILARITIES] 313 | 314 | # Ignore comments when computing similarities. 315 | ignore-comments=yes 316 | 317 | # Ignore docstrings when computing similarities. 318 | ignore-docstrings=yes 319 | 320 | # Ignore imports when computing similarities. 321 | ignore-imports=no 322 | 323 | # Minimum lines number of a similarity. 324 | min-similarity-lines=4 325 | 326 | 327 | [SPELLING] 328 | 329 | # Limits count of emitted suggestions for spelling mistakes. 330 | max-spelling-suggestions=4 331 | 332 | # Spelling dictionary name. Available dictionaries: none. To make it work, 333 | # install the 'python-enchant' package. 334 | spelling-dict= 335 | 336 | # List of comma separated words that should be considered directives if they 337 | # appear and the beginning of a comment and should not be checked. 338 | spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: 339 | 340 | # List of comma separated words that should not be checked. 341 | spelling-ignore-words= 342 | 343 | # A path to a file that contains the private dictionary; one word per line. 344 | spelling-private-dict-file= 345 | 346 | # Tells whether to store unknown words to the private dictionary (see the 347 | # --spelling-private-dict-file option) instead of raising a message. 348 | spelling-store-unknown-words=no 349 | 350 | 351 | [STRING] 352 | 353 | # This flag controls whether inconsistent-quotes generates a warning when the 354 | # character used as a quote delimiter is used inconsistently within a module. 355 | check-quote-consistency=no 356 | 357 | # This flag controls whether the implicit-str-concat should generate a warning 358 | # on implicit string concatenation in sequences defined over several lines. 359 | check-str-concat-over-line-jumps=no 360 | 361 | 362 | [TYPECHECK] 363 | 364 | # List of decorators that produce context managers, such as 365 | # contextlib.contextmanager. Add to this list to register other decorators that 366 | # produce valid context managers. 367 | contextmanager-decorators=contextlib.contextmanager 368 | 369 | # List of members which are set dynamically and missed by pylint inference 370 | # system, and so shouldn't trigger E1101 when accessed. Python regular 371 | # expressions are accepted. 372 | generated-members=vapoursynth.* 373 | 374 | # Tells whether missing members accessed in mixin class should be ignored. A 375 | # mixin class is detected if its name ends with "mixin" (case insensitive). 376 | ignore-mixin-members=yes 377 | 378 | # Tells whether to warn about missing members when the owner of the attribute 379 | # is inferred to be None. 380 | ignore-none=yes 381 | 382 | # This flag controls whether pylint should warn about no-member and similar 383 | # checks whenever an opaque object is returned when inferring. The inference 384 | # can return multiple potential results while evaluating a Python object, but 385 | # some branches might not be evaluated, which results in partial inference. In 386 | # that case, it might be useful to still emit no-member and other checks for 387 | # the rest of the inferred objects. 388 | ignore-on-opaque-inference=no 389 | 390 | # List of class names for which member attributes should not be checked (useful 391 | # for classes with dynamically set attributes). This supports the use of 392 | # qualified names. 393 | ignored-classes=optparse.Values,thread._local,_thread._local 394 | 395 | # List of module names for which member attributes should not be checked 396 | # (useful for modules/projects where namespaces are manipulated during runtime 397 | # and thus existing member attributes cannot be deduced by static analysis). It 398 | # supports qualified module names, as well as Unix pattern matching. 399 | ignored-modules=_CoreProxy,vapoursynth 400 | 401 | # Show a hint with possible names when a member name was not found. The aspect 402 | # of finding the hint is based on edit distance. 403 | missing-member-hint=yes 404 | 405 | # The minimum edit distance a name should have in order to be considered a 406 | # similar match for a missing member name. 407 | missing-member-hint-distance=1 408 | 409 | # The total number of similar names that should be taken in consideration when 410 | # showing a hint for a missing member. 411 | missing-member-max-choices=1 412 | 413 | # List of decorators that change the signature of a decorated function. 414 | signature-mutators= 415 | 416 | 417 | [VARIABLES] 418 | 419 | # List of additional names supposed to be defined in builtins. Remember that 420 | # you should avoid defining new builtins when possible. 421 | additional-builtins= 422 | 423 | # Tells whether unused global variables should be treated as a violation. 424 | allow-global-unused-variables=yes 425 | 426 | # List of names allowed to shadow builtins 427 | allowed-redefined-builtins= 428 | 429 | # List of strings which can identify a callback function by name. A callback 430 | # name must start or end with one of those strings. 431 | callbacks=cb_, 432 | _cb 433 | 434 | # A regular expression matching the name of dummy variables (i.e. expected to 435 | # not be used). 436 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ 437 | 438 | # Argument names that match this expression will be ignored. Default to name 439 | # with leading underscore. 440 | ignored-argument-names=_.*|^ignored_|^unused_ 441 | 442 | # Tells whether we should check for unused import in __init__ files. 443 | init-import=no 444 | 445 | # List of qualified module names which can have objects that can redefine 446 | # builtins. 447 | redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io 448 | 449 | 450 | [CLASSES] 451 | 452 | # Warn about protected attribute access inside special methods 453 | check-protected-access-in-special-methods=no 454 | 455 | # List of method names used to declare (i.e. assign) instance attributes. 456 | defining-attr-methods=__init__, 457 | __new__, 458 | setUp, 459 | __post_init__ 460 | 461 | # List of member names, which should be excluded from the protected access 462 | # warning. 463 | exclude-protected=_asdict, 464 | _fields, 465 | _replace, 466 | _source, 467 | _make 468 | 469 | # List of valid names for the first argument in a class method. 470 | valid-classmethod-first-arg=cls 471 | 472 | # List of valid names for the first argument in a metaclass class method. 473 | valid-metaclass-classmethod-first-arg=cls 474 | 475 | 476 | [DESIGN] 477 | 478 | # Maximum number of arguments for function / method. 479 | max-args=5 480 | 481 | # Maximum number of attributes for a class (see R0902). 482 | max-attributes=7 483 | 484 | # Maximum number of boolean expressions in an if statement (see R0916). 485 | max-bool-expr=5 486 | 487 | # Maximum number of branch for function / method body. 488 | max-branches=14 489 | 490 | # Maximum number of locals for function / method body. 491 | max-locals=15 492 | 493 | # Maximum number of parents for a class (see R0901). 494 | max-parents=7 495 | 496 | # Maximum number of public methods for a class (see R0904). 497 | max-public-methods=20 498 | 499 | # Maximum number of return / yield for function / method body. 500 | max-returns=6 501 | 502 | # Maximum number of statements in function / method body. 503 | max-statements=50 504 | 505 | # Minimum number of public methods for a class (see R0903). 506 | min-public-methods=2 507 | 508 | 509 | [IMPORTS] 510 | 511 | # List of modules that can be imported at any level, not just the top level 512 | # one. 513 | allow-any-import-level=scipy,scipy.io.wavfile,cv2,PIL.Image,PyQt5.QtGui.QImage,struct,zlib 514 | 515 | # Allow wildcard imports from modules that define __all__. 516 | allow-wildcard-with-all=no 517 | 518 | # Analyse import fallback blocks. This can be used to support both Python 2 and 519 | # 3 compatible code, which means that the block might have code that exists 520 | # only in one or another interpreter, leading to false positives when analysed. 521 | analyse-fallback-blocks=no 522 | 523 | # Deprecated modules which should not be used, separated by a comma. 524 | deprecated-modules=optparse,tkinter.tix 525 | 526 | # Output a graph (.gv or any supported image format) of external dependencies 527 | # to the given file (report RP0402 must not be disabled). 528 | ext-import-graph= 529 | 530 | # Output a graph (.gv or any supported image format) of all (i.e. internal and 531 | # external) dependencies to the given file (report RP0402 must not be 532 | # disabled). 533 | import-graph= 534 | 535 | # Output a graph (.gv or any supported image format) of internal dependencies 536 | # to the given file (report RP0402 must not be disabled). 537 | int-import-graph= 538 | 539 | # Force import order to recognize a module as part of the standard 540 | # compatibility libraries. 541 | known-standard-library= 542 | 543 | # Force import order to recognize a module as part of a third party library. 544 | known-third-party=enchant 545 | 546 | # Couples of modules and preferred modules, separated by a comma. 547 | preferred-modules= 548 | 549 | 550 | [EXCEPTIONS] 551 | 552 | # Exceptions that will emit a warning when being caught. Defaults to 553 | # "BaseException, Exception". 554 | overgeneral-exceptions=BaseException, 555 | Exception 556 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-20.04 5 | tools: 6 | python: "3.10" 7 | 8 | sphinx: 9 | configuration: docs/conf.py 10 | 11 | formats: all 12 | 13 | python: 14 | install: 15 | - requirements: docs/requirements.txt 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Vardë 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Vardautomation 2 | Package I'm using for my encoding stuff 3 | 4 | # How to install vardautomation 5 | This command will install vardautomation and the python dependencies 6 | 7 | ``` 8 | pip install vardautomation -U 9 | ``` 10 | 11 | Or from source 12 | ``` 13 | python -m pip install git+https://github.com/Ichunjo/vardautomation.git -U 14 | ``` 15 | 16 | # Requirements 17 | * Python 3.11 or newer 18 | * **ONLY** VapourSynth r65 & the following plugins `bas`, `ffms2`, `imwri`, `lsmas`, `scxvid`, `wwxd` 19 | * Other modules in `requirements.txt` 20 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | # TODO: 2 | 3 | ~~- Finalise docs~~ 4 | - Add real tests and Github actions for them 5 | ~~- Upload to PyPi~~ 6 | - Replace mediainfo by ffprobe in ffmpeg based audio encoder -------------------------------------------------------------------------------- /docs/_static/css/theme_overrides.css: -------------------------------------------------------------------------------- 1 | @import 'theme.css'; 2 | 3 | @media screen and (min-width:1100px) { 4 | .wy-nav-content { 5 | max-width: 1000px 6 | } 7 | } -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | ============= 2 | API Reference 3 | ============= 4 | 5 | .. automodule:: vardautomation 6 | 7 | Configuration 8 | ============== 9 | .. autoclass:: vardautomation.config.FileInfo 10 | :members: 11 | .. autoclass:: vardautomation.config.FileInfo2 12 | :members: 13 | .. autoclass:: vardautomation.config.BlurayShow 14 | :members: 15 | 16 | Presets 17 | ---------------- 18 | .. autodata:: vardautomation.config.PresetGeneric 19 | .. autodata:: vardautomation.config.PresetBD 20 | .. autodata:: vardautomation.config.PresetWEB 21 | .. autodata:: vardautomation.config.PresetAAC 22 | .. autodata:: vardautomation.config.PresetOpus 23 | .. autodata:: vardautomation.config.PresetEAC3 24 | .. autodata:: vardautomation.config.PresetFLAC 25 | .. autodata:: vardautomation.config.PresetChapOGM 26 | .. autodata:: vardautomation.config.PresetChapXML 27 | 28 | Tools 29 | ====== 30 | .. autoclass:: vardautomation.tooling.abstract.Tool 31 | :members: 32 | :show-inheritance: 33 | .. autoclass:: vardautomation.tooling.base.BasicTool 34 | :members: 35 | :inherited-members: 36 | :show-inheritance: 37 | 38 | Video encoders 39 | ---------------- 40 | .. autoclass:: vardautomation.tooling.video.VideoEncoder 41 | :members: 42 | :inherited-members: 43 | :show-inheritance: 44 | .. autoclass:: vardautomation.tooling.video.VideoLanEncoder 45 | :members: 46 | :inherited-members: 47 | :show-inheritance: 48 | .. autoclass:: vardautomation.tooling.video.X265 49 | :members: 50 | :inherited-members: 51 | :show-inheritance: 52 | .. autoclass:: vardautomation.tooling.video.X264 53 | :members: 54 | :inherited-members: 55 | :show-inheritance: 56 | .. autoclass:: vardautomation.tooling.video.LosslessEncoder 57 | :members: 58 | :inherited-members: 59 | :show-inheritance: 60 | .. autoclass:: vardautomation.tooling.video.NVEncCLossless 61 | :members: 62 | :inherited-members: 63 | :show-inheritance: 64 | .. autoclass:: vardautomation.tooling.video.FFV1 65 | :members: 66 | :inherited-members: 67 | :show-inheritance: 68 | 69 | Audio extracters 70 | ---------------- 71 | .. autoclass:: vardautomation.tooling.audio.AudioExtracter 72 | :members: 73 | :inherited-members: 74 | :show-inheritance: 75 | .. autoclass:: vardautomation.tooling.audio.MKVAudioExtracter 76 | :members: 77 | :inherited-members: 78 | .. autoclass:: vardautomation.tooling.audio.Eac3toAudioExtracter 79 | :members: 80 | :inherited-members: 81 | .. autoclass:: vardautomation.tooling.audio.FFmpegAudioExtracter 82 | :members: 83 | :inherited-members: 84 | 85 | Audio cutters 86 | ---------------- 87 | .. autoclass:: vardautomation.tooling.audio.AudioCutter 88 | :members: 89 | :inherited-members: 90 | :show-inheritance: 91 | .. autoclass:: vardautomation.tooling.audio.ScipyCutter 92 | :members: 93 | :inherited-members: 94 | :show-inheritance: 95 | .. autoclass:: vardautomation.tooling.audio.EztrimCutter 96 | :members: 97 | :inherited-members: 98 | :show-inheritance: 99 | .. autoclass:: vardautomation.tooling.audio.SoxCutter 100 | :members: 101 | :inherited-members: 102 | :show-inheritance: 103 | .. autoclass:: vardautomation.tooling.audio.PassthroughCutter 104 | :members: 105 | :inherited-members: 106 | :show-inheritance: 107 | 108 | Audio encoders 109 | ---------------- 110 | .. autoclass:: vardautomation.tooling.audio.AudioEncoder 111 | :members: 112 | :inherited-members: 113 | :show-inheritance: 114 | .. autoclass:: vardautomation.tooling.audio.PassthroughAudioEncoder 115 | :members: 116 | :inherited-members: 117 | :show-inheritance: 118 | .. autoclass:: vardautomation.tooling.audio.QAACEncoder 119 | :members: 120 | :inherited-members: 121 | :show-inheritance: 122 | .. autoclass:: vardautomation.tooling.audio.OpusEncoder 123 | :members: 124 | :inherited-members: 125 | :show-inheritance: 126 | .. autoclass:: vardautomation.tooling.audio.FDKAACEncoder 127 | :members: 128 | :inherited-members: 129 | :show-inheritance: 130 | .. autoclass:: vardautomation.tooling.audio.FlacEncoder 131 | :members: 132 | :inherited-members: 133 | :show-inheritance: 134 | .. autoclass:: vardautomation.tooling.audio.BitrateMode 135 | :members: 136 | .. autoclass:: vardautomation.tooling.audio.FlacCompressionLevel 137 | :members: 138 | 139 | Muxing 140 | ------- 141 | .. autoclass:: vardautomation.tooling.mux.MatroskaFile 142 | :members: 143 | :show-inheritance: 144 | .. autoclass:: vardautomation.tooling.mux.SplitMode 145 | :members: 146 | .. autoclass:: vardautomation.tooling.mux.Track 147 | :members: 148 | :show-inheritance: 149 | .. autoclass:: vardautomation.tooling.mux.MediaTrack 150 | :members: 151 | :inherited-members: 152 | :show-inheritance: 153 | .. autoclass:: vardautomation.tooling.mux.VideoTrack 154 | :members: 155 | :inherited-members: 156 | :show-inheritance: 157 | .. autoclass:: vardautomation.tooling.mux.AudioTrack 158 | :members: 159 | :inherited-members: 160 | :show-inheritance: 161 | .. autoclass:: vardautomation.tooling.mux.SubtitleTrack 162 | :members: 163 | :inherited-members: 164 | :show-inheritance: 165 | .. autoclass:: vardautomation.tooling.mux.ChaptersTrack 166 | :members: 167 | :inherited-members: 168 | :show-inheritance: 169 | 170 | Utility 171 | ------- 172 | .. autoclass:: vardautomation.tooling.misc.Qpfile 173 | :members: 174 | .. autofunction:: vardautomation.tooling.misc.make_qpfile 175 | .. autofunction:: vardautomation.tooling.misc.get_vs_core 176 | 177 | Automation 178 | ============ 179 | .. autoclass:: vardautomation.automation.SelfRunner 180 | :members: 181 | .. autoclass:: vardautomation.automation.RunnerConfig 182 | :members: 183 | .. autoclass:: vardautomation.automation.Patch 184 | :members: 185 | 186 | Chapters stuff 187 | =============== 188 | .. autoclass:: vardautomation.chapterisation.Chapter 189 | :members: 190 | .. autoclass:: vardautomation.chapterisation.Chapters 191 | :members: 192 | :show-inheritance: 193 | .. autoclass:: vardautomation.chapterisation.OGMChapters 194 | :members: 195 | :inherited-members: 196 | :show-inheritance: 197 | .. autoclass:: vardautomation.chapterisation.MatroskaXMLChapters 198 | :members: 199 | :inherited-members: 200 | :show-inheritance: 201 | .. autoclass:: vardautomation.chapterisation.MplsChapters 202 | :members: 203 | :inherited-members: 204 | :show-inheritance: 205 | .. autoclass:: vardautomation.chapterisation.IfoChapters 206 | :members: 207 | :inherited-members: 208 | :show-inheritance: 209 | .. autoclass:: vardautomation.chapterisation.MplsReader 210 | :members: 211 | .. autoclass:: vardautomation.chapterisation.IfoReader 212 | :members: 213 | 214 | Comparison 215 | ============ 216 | .. autoclass:: vardautomation.comp.Writer 217 | :members: 218 | .. autoclass:: vardautomation.comp.PictureType 219 | :members: 220 | .. autoclass:: vardautomation.comp.SlowPicsConf 221 | :members: 222 | .. autoclass:: vardautomation.comp.Comparison 223 | :members: 224 | .. autofunction:: vardautomation.comp.make_comps 225 | 226 | Binary Path 227 | ============ 228 | .. autoclass:: vardautomation.binary_path.BinaryPath 229 | :members: 230 | 231 | Language 232 | ============ 233 | .. autoclass:: vardautomation.language.Lang 234 | :members: 235 | .. autodata:: vardautomation.language.FRENCH 236 | .. autodata:: vardautomation.language.ENGLISH 237 | .. autodata:: vardautomation.language.JAPANESE 238 | .. autodata:: vardautomation.language.UNDEFINED 239 | 240 | VPath 241 | ============ 242 | .. autoclass:: vardautomation.vpathlib.VPath 243 | :members: 244 | 245 | Types 246 | ====== 247 | .. autodata:: vardautomation.types.AnyPath 248 | .. autodata:: vardautomation.types.UpdateFunc 249 | .. autodata:: vardautomation.types.VPSIdx 250 | 251 | Internal functions 252 | ================== 253 | .. autoclass:: vardautomation.utils.Properties 254 | :members: 255 | .. autofunction:: vardautomation.render.clip_async_render 256 | .. autofunction:: vardautomation.render.audio_async_render 257 | .. autoclass:: vardautomation.render.WaveFormat 258 | :members: 259 | .. autoclass:: vardautomation.render.WaveHeader 260 | :members: -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | import sys 10 | from pathlib import Path 11 | from typing import Any, Dict 12 | 13 | sys.path.insert(0, str(Path('..').resolve())) 14 | 15 | # -- Project information ----------------------------------------------------- 16 | project = 'vardautomation' 17 | copyright = '2021, Ichunjo' 18 | author = 'Ichunjo' 19 | 20 | # The full version, including alpha/beta/rc tags 21 | meta: Dict[str, Any] = {} 22 | # Stoled this from vsutil 23 | with Path('../vardautomation/_metadata.py').resolve().open() as f: 24 | exec(f.read(), meta) 25 | 26 | version = release = meta['__version__'] 27 | 28 | 29 | # -- General configuration --------------------------------------------------- 30 | extensions = [ 31 | 'sphinx.ext.autodoc', 32 | 'sphinx.ext.autosummary', 33 | 'sphinx.ext.todo', 34 | 'sphinx_autodoc_typehints', 35 | ] 36 | 37 | # Add any paths that contain templates here, relative to this directory. 38 | templates_path = ['_templates'] 39 | 40 | # The language for content autogenerated by Sphinx. 41 | language = 'en' 42 | 43 | # List of patterns, relative to source directory, that match files and 44 | # directories to ignore when looking for source files. 45 | # This pattern also affects html_static_path and html_extra_path. 46 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 47 | 48 | 49 | # -- Options for HTML output ------------------------------------------------- 50 | 51 | # The theme to use for HTML and HTML Help pages. 52 | html_theme = "sphinx_rtd_theme" 53 | 54 | # Add any paths that contain custom static files (such as style sheets) here, 55 | # relative to this directory. They are copied after the builtin static files, 56 | # so a file named "default.css" will overwrite the builtin "default.css". 57 | html_static_path = ['_static'] 58 | html_css_files = ['css/theme_overrides.css'] 59 | html_style = 'css/theme_overrides.css' 60 | 61 | 62 | # -- Extension configuration ------------------------------------------------- 63 | autodoc_member_order = 'bysource' # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_member_order 64 | # Shouldn't affect anything since 65 | # we're uysing sphinx_autodoc_typehints 66 | autodoc_typehints = 'description' # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_typehints 67 | autoclass_content = 'both' # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autoclass_content 68 | autosummary_generate = True # https://www.sphinx-doc.org/en/master/usage/extensions/autosummary.html?highlight=autosummary_generate#confval-autosummary_generate # noqa: E501 69 | autodoc_mock_imports = [ 70 | 'vapoursynth', 'vsutil', 71 | 'acsuite' # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_mock_imports 72 | ] 73 | smartquotes = True # https://www.sphinx-doc.org/en/master/usage/configuration.html?highlight=smartquotes#confval-smartquotes 74 | html_show_sphinx = False # https://www.sphinx-doc.org/en/master/usage/configuration.html?highlight=smartquotes#confval-html_show_sphinx 75 | pygments_style = 'sphinx' # https://www.sphinx-doc.org/en/master/usage/configuration.html?highlight=pygments_style#confval-pygments_style 76 | 77 | _meta_types = [ 78 | 'AnyPath', 'DuplicateFrame', 'Element', 79 | 'Trim', 'UpdateFunc', 'VPSIdx', 'ElementTree' 80 | ] 81 | autodoc_type_aliases = { # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_type_aliases 82 | alias_type: f'vardautomation.types.{alias_type}' 83 | for alias_type in _meta_types 84 | } 85 | autodoc_preserve_defaults = True # https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html#confval-autodoc_preserve_defaults 86 | 87 | # -- Options for todo extension ---------------------------------------------- 88 | 89 | # If true, `todo` and `todoList` produce output, else they produce nothing. 90 | todo_include_todos = True 91 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Contents 2 | ======== 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | api 8 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx-autodoc-typehints~=1.19.5 2 | loguru~=0.7.2 3 | lxml~=5.2.1 4 | pymediainfo~=6.1.0 5 | requests~=2.31.0 6 | langcodes~=3.3.0 7 | language-data~=1.2 8 | psutil~=5.9.8 9 | pyparsebluray~=0.1.4 10 | pyparsedvd~=0.0.3 11 | pytimeconv~=0.0.2 12 | requests_toolbelt~=1.0.0 13 | rich~=13.7.1 14 | typing-extensions~=4.11.0 15 | numpy~=1.26.4 16 | -------------------------------------------------------------------------------- /examples/1.00 - basic video only.py: -------------------------------------------------------------------------------- 1 | import vapoursynth as vs 2 | from vsutil import depth 3 | 4 | from vardautomation import X265, FileInfo, PresetAAC, PresetBD 5 | 6 | core = vs.core 7 | 8 | FILE = FileInfo('path/to/your/file.m2ts', (24, -24), preset=[PresetBD, PresetAAC]) 9 | clip = depth(FILE.clip_cut, 32) 10 | 11 | ... 12 | """Filtering process""" 13 | ... 14 | 15 | out = depth(clip, 10) 16 | 17 | 18 | if __name__ == '__main__': 19 | X265('path/to/your/x265/settings').run_enc(out, FILE) 20 | else: 21 | out.set_output(0) 22 | -------------------------------------------------------------------------------- /examples/1.01 - basic video + audio + merge.py: -------------------------------------------------------------------------------- 1 | import vapoursynth as vs 2 | from vsutil import depth 3 | 4 | from vardautomation import ( 5 | X265, EztrimCutter, FFmpegAudioExtracter, FileInfo, MatroskaFile, PresetAAC, PresetBD, 6 | QAACEncoder 7 | ) 8 | 9 | core = vs.core 10 | 11 | FILE = FileInfo('path/to/your/file.m2ts', (24, -24), preset=[PresetBD, PresetAAC]) 12 | clip = depth(FILE.clip_cut, 32) 13 | 14 | ... 15 | """Filtering process""" 16 | ... 17 | 18 | out = depth(clip, 10) 19 | 20 | 21 | if __name__ == '__main__': 22 | X265('path/to/your/x265/settings').run_enc(out, FILE) 23 | FFmpegAudioExtracter(FILE, track_in=1, track_out=1).run() 24 | EztrimCutter(FILE, track=1).run() 25 | QAACEncoder(FILE, track=1).run() 26 | MatroskaFile.automux(FILE) 27 | else: 28 | out.set_output(0) 29 | -------------------------------------------------------------------------------- /examples/1.02 - basic self runner.py: -------------------------------------------------------------------------------- 1 | import vapoursynth as vs 2 | from vardautomation import (EztrimCutter, FileInfo, Mux, PresetAAC, PresetBD, 3 | QAACEncoder, X265Encoder, FFmpegAudioExtracter, RunnerConfig, SelfRunner) 4 | from vsutil import depth 5 | 6 | core = vs.core 7 | 8 | FILE = FileInfo('path/to/your/file.m2ts', (24, -24), preset=[PresetBD, PresetAAC]) 9 | clip = depth(FILE.clip_cut, 32) 10 | 11 | ... 12 | """Filtering process""" 13 | ... 14 | 15 | out = depth(clip, 10) 16 | 17 | 18 | if __name__ == '__main__': 19 | config = RunnerConfig( 20 | X265Encoder('path/to/your/x265/settings'), 21 | a_extracters=FFmpegAudioExtracter(FILE, track_in=1, track_out=1), 22 | a_cutters=EztrimCutter(FILE, track=1), 23 | a_encoders=QAACEncoder(FILE, track=1), 24 | muxer=Mux(FILE) 25 | ) 26 | 27 | SelfRunner(out, FILE, config).run() 28 | else: 29 | out.set_output(0) 30 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | flake8==7.0.0 2 | loguru-mypy==0.0.4 3 | lxml-stubs==0.5.1 4 | mypy==1.9.0 5 | mypy-extensions==1.0.0 6 | pylint==3.1.0 7 | PyQt5-stubs==5.15.6.0 8 | types-attrs==19.1.0 9 | types-commonmark==0.9.2.20240106 10 | types-Pillow==10.2.0.20240406 11 | types-psutil==5.9.5.20240316 12 | types-Pygments==2.17.0.20240310 13 | types-setuptools==69.2.0.20240317 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | langcodes~=3.3.0 2 | language-data~=1.2 3 | loguru~=0.7.2 4 | lxml~=5.2.1 5 | numpy~=1.26.4 6 | psutil~=5.9.8 7 | pymediainfo~=6.1.0 8 | pyparsebluray~=0.1.4 9 | pyparsedvd~=0.0.3 10 | pytimeconv~=0.0.2 11 | requests~=2.31.0 12 | requests_toolbelt~=1.0.0 13 | rich~=13.7.1 14 | typing-extensions~=4.11.0 15 | VapourSynth>=65 16 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description_file=README.md 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Any, Dict 3 | 4 | from setuptools import setup 5 | 6 | meta: Dict[str, Any] = {} 7 | exec(Path('vardautomation/_metadata.py').read_text(), meta := dict[str, str]()) 8 | 9 | with open('README.md', encoding='utf-8') as fh: 10 | long_description = fh.read() 11 | 12 | with open('requirements.txt', encoding='utf-8') as fh: 13 | install_requires = fh.read() 14 | 15 | NAME = 'vardautomation' 16 | 17 | setup( 18 | name=NAME, 19 | version=meta['__version__'], 20 | author=meta['__author__'], 21 | author_email=meta['__email__'], 22 | description='Encoding automation tools via Vapoursynth', 23 | long_description=long_description, 24 | long_description_content_type='text/markdown', 25 | packages=['vardautomation', 'vardautomation.tooling', 'vardautomation._logging'], 26 | package_data={ 27 | 'vardautomation': ['py.typed', 'logo.txt'], 28 | }, 29 | url='https://github.com/Ichunjo/vardautomation', 30 | zip_safe=False, 31 | classifiers=[ 32 | 'Programming Language :: Python :: 3', 33 | 'License :: OSI Approved :: MIT License', 34 | 'Operating System :: OS Independent', 35 | ], 36 | python_requires='>=3.11', 37 | install_requires=install_requires, 38 | ) 39 | -------------------------------------------------------------------------------- /stubs/cv2/__init__.pyi: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional, Sequence 2 | 3 | from numpy.typing import NDArray 4 | 5 | IMWRITE_PNG_COMPRESSION: int 6 | 7 | 8 | def imwrite(filename: str, img: NDArray[Any], params: Optional[Sequence[Any]] = ...) -> None: 9 | "imwrite(filename, img[, params]) -> retval\n. @brief Saves an image to a specified file.\n. \n. The function imwrite saves the image to the specified file. The image format is chosen based on the\n. filename extension (see cv::imread for the list of extensions). In general, only 8-bit\n. single-channel or 3-channel (with 'BGR' channel order) images\n. can be saved using this function, with these exceptions:\n. \n. - 16-bit unsigned (CV_16U) images can be saved in the case of PNG, JPEG 2000, and TIFF formats\n. - 32-bit float (CV_32F) images can be saved in PFM, TIFF, OpenEXR, and Radiance HDR formats;\n. 3-channel (CV_32FC3) TIFF images will be saved using the LogLuv high dynamic range encoding\n. (4 bytes per pixel)\n. - PNG images with an alpha channel can be saved using this function. To do this, create\n. 8-bit (or 16-bit) 4-channel image BGRA, where the alpha channel goes last. Fully transparent pixels\n. should have alpha set to 0, fully opaque pixels should have alpha set to 255/65535 (see the code sample below).\n. - Multiple images (vector of Mat) can be saved in TIFF format (see the code sample below).\n. \n. If the format, depth or channel order is different, use\n. Mat::convertTo and cv::cvtColor to convert it before saving. Or, use the universal FileStorage I/O\n. functions to save the image to XML or YAML format.\n. \n. The sample below shows how to create a BGRA image, how to set custom compression parameters and save it to a PNG file.\n. It also demonstrates how to save multiple images in a TIFF file:\n. @include snippets/imgcodecs_imwrite.cpp\n. @param filename Name of the file.\n. @param img (Mat or vector of Mat) Image or Images to be saved.\n. @param params Format-specific parameters encoded as pairs (paramId_1, paramValue_1, paramId_2, paramValue_2, ... .) see cv::ImwriteFlags" 10 | ... 11 | 12 | def merge(mv: Sequence[NDArray[Any]], dst: Optional[NDArray[Any]] = ...) -> NDArray[Any]: 13 | 'merge(mv[, dst]) -> dst\n. @overload\n. @param mv input vector of matrices to be merged; all the matrices in mv must have the same\n. size and the same depth.\n. @param dst output array of the same size and the same depth as mv[0]; The number of channels will\n. be the total number of channels in the matrix array.' 14 | ... 15 | -------------------------------------------------------------------------------- /stubs/pymediainfo/__init__.pyi: -------------------------------------------------------------------------------- 1 | import xml.etree.ElementTree as ET 2 | from typing import Any, Dict, List, Optional, overload 3 | 4 | 5 | class Track: 6 | def __eq__(self, other: object) -> bool: ... 7 | def __getattribute__(self, name: str) -> Any: ... 8 | track_type: Any 9 | def __init__(self, xml_dom_fragment: ET.Element) -> None: ... 10 | def to_data(self) -> Dict[str, Any]: ... 11 | 12 | class MediaInfo: 13 | def __eq__(self, other: object) -> bool: ... 14 | tracks: Any 15 | def __init__(self, xml: str, encoding_errors: str = ...) -> None: ... 16 | @property 17 | def general_tracks(self) -> List[Track]: ... 18 | @property 19 | def video_tracks(self) -> List[Track]: ... 20 | @property 21 | def audio_tracks(self) -> List[Track]: ... 22 | @property 23 | def text_tracks(self) -> List[Track]: ... 24 | @property 25 | def other_tracks(self) -> List[Track]: ... 26 | @property 27 | def image_tracks(self) -> List[Track]: ... 28 | @property 29 | def menu_tracks(self) -> List[Track]: ... 30 | @classmethod 31 | def can_parse(cls, library_file: Optional[str] = ...) -> bool: ... 32 | @overload 33 | @classmethod 34 | def parse(cls, filename: Any, library_file: Optional[str] = ..., cover_data: bool = ..., encoding_errors: str = ..., parse_speed: float = ..., full: bool = ..., legacy_stream_display: bool = ..., mediainfo_options: Optional[Dict[str, str]] = ..., output: None = ...) -> MediaInfo: ... # type: ignore 35 | @overload 36 | @classmethod 37 | def parse(cls, filename: Any, library_file: Optional[str] = ..., cover_data: bool = ..., encoding_errors: str = ..., parse_speed: float = ..., full: bool = ..., legacy_stream_display: bool = ..., mediainfo_options: Optional[Dict[str, str]] = ..., output: str = ...) -> str: ... 38 | def to_data(self) -> Dict[str, Any]: ... 39 | def to_json(self) -> str: ... 40 | -------------------------------------------------------------------------------- /stubs/requests/__init__.pyi: -------------------------------------------------------------------------------- 1 | from .models import Response as Response 2 | from .sessions import Session as Session 3 | -------------------------------------------------------------------------------- /stubs/requests/models.pyi: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Generator, Iterator 2 | 3 | 4 | REDIRECT_STATI: Any 5 | DEFAULT_REDIRECT_LIMIT: int 6 | CONTENT_CHUNK_SIZE: Any 7 | ITER_CHUNK_SIZE: int 8 | 9 | 10 | class Response: 11 | __attrs__: Any 12 | status_code: Any 13 | headers: Any 14 | raw: Any 15 | url: Any 16 | encoding: Any 17 | history: Any 18 | reason: Any 19 | cookies: Any 20 | elapsed: Any 21 | request: Any 22 | def __init__(self) -> None: ... 23 | def __enter__(self) -> Response: ... 24 | def __exit__(self, *args: Any) -> None: ... 25 | def __bool__(self) -> bool: ... 26 | def __nonzero__(self) -> bool: ... 27 | def __iter__(self) -> Iterator[bytes | str]: ... 28 | @property 29 | def ok(self) -> bool: ... 30 | @property 31 | def is_redirect(self) -> bool: ... 32 | @property 33 | def is_permanent_redirect(self) -> bool: ... 34 | @property 35 | def next(self) -> Any: ... 36 | @property 37 | def apparent_encoding(self) -> Any: ... 38 | def iter_content(self, chunk_size: int = ..., decode_unicode: bool = ...) -> Generator[bytes | str, None, None]: ... 39 | def iter_lines(self, chunk_size: int=..., decode_unicode: bool = ..., delimiter: Any | None = ...) -> None: ... 40 | @property 41 | def content(self) -> bytes: ... 42 | @property 43 | def text(self) -> str: ... 44 | def json(self, **kwargs: Any) -> Any: ... 45 | @property 46 | def links(self) -> Dict[Any, Any]: ... 47 | def raise_for_status(self) -> None: ... 48 | def close(self) -> None: ... 49 | -------------------------------------------------------------------------------- /stubs/requests/sessions.pyi: -------------------------------------------------------------------------------- 1 | from typing import IO, Any, AnyStr, Dict, List, Tuple 2 | 3 | from .models import DEFAULT_REDIRECT_LIMIT as DEFAULT_REDIRECT_LIMIT 4 | from .models import REDIRECT_STATI as REDIRECT_STATI 5 | from .models import Response 6 | 7 | 8 | class SessionRedirectMixin: ... 9 | 10 | class Session(SessionRedirectMixin): 11 | __attrs__: Any 12 | headers: Any 13 | auth: Any 14 | proxies: Any 15 | hooks: Any 16 | params: Any 17 | stream: bool 18 | verify: bool 19 | cert: Any 20 | max_redirects: Any 21 | trust_env: bool 22 | cookies: Any 23 | adapters: Any 24 | def __init__(self) -> None: ... 25 | def __enter__(self) -> Session: ... 26 | def __exit__(self, *args: Any) -> None: ... 27 | def get(self, url: str, **kwargs: Any) -> Response: ... 28 | def post(self, url: str, data: Dict[Any, Any] | List[Tuple[Any, ...]] | bytes | IO[AnyStr] | None = ..., json: Any | None = ..., **kwargs: Any) -> Response: ... 29 | def close(self) -> None: ... 30 | -------------------------------------------------------------------------------- /stubs/requests_toolbelt/__init__.pyi: -------------------------------------------------------------------------------- 1 | from .multipart import MultipartEncoder as MultipartEncoder 2 | -------------------------------------------------------------------------------- /stubs/requests_toolbelt/multipart/__init__.pyi: -------------------------------------------------------------------------------- 1 | from .encoder import MultipartEncoder as MultipartEncoder 2 | -------------------------------------------------------------------------------- /stubs/requests_toolbelt/multipart/encoder.pyi: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict 2 | 3 | class MultipartEncoder: 4 | boundary_value: Any 5 | boundary: Any 6 | encoding: Any 7 | fields: Any 8 | finished: bool 9 | parts: Any 10 | def __init__(self, fields: Dict[str, Any], boundary: Any | None = ..., encoding: str = ...) -> None: ... 11 | @property 12 | def len(self) -> int: ... 13 | @property 14 | def content_type(self) -> str: ... 15 | def to_string(self) -> bytes: ... 16 | def read(self, size: int = ...) -> bytes: ... 17 | -------------------------------------------------------------------------------- /stubs/scipy/__init__.pyi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ichunjo/vardautomation/abd5f487c3bfb2a4e939a3d3dadbb405e30d729e/stubs/scipy/__init__.pyi -------------------------------------------------------------------------------- /stubs/scipy/io/__init__.pyi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ichunjo/vardautomation/abd5f487c3bfb2a4e939a3d3dadbb405e30d729e/stubs/scipy/io/__init__.pyi -------------------------------------------------------------------------------- /stubs/scipy/io/wavfile.pyi: -------------------------------------------------------------------------------- 1 | from io import BufferedReader, BufferedWriter 2 | from os import PathLike 3 | from typing import Tuple 4 | 5 | from numpy import float32, int16, int32, uint8 6 | from numpy.typing import NDArray 7 | 8 | 9 | class WavFileWarning(UserWarning): ... 10 | 11 | 12 | def read(filename: str | PathLike[str] | BufferedReader, mmap: bool = ...) -> Tuple[int, NDArray[uint8 | int16 | int32 | float32]]: ... 13 | 14 | def write(filename: str | PathLike[str] | BufferedWriter, rate: int, data: NDArray[uint8 | int16 | int32 | float32]) -> None: ... 15 | -------------------------------------------------------------------------------- /tests/test_file_info.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | from vardautomation import FileInfo 3 | 4 | 5 | FILEINFO_ATTR: List[str] = [ 6 | 'path', 7 | 'path_without_ext', 8 | 'work_filename', 9 | 'idx', 10 | 'preset', 11 | 'name', 12 | 'workdir', 13 | 'a_src', 14 | 'a_src_cut', 15 | 'a_enc_cut', 16 | 'chapter', 17 | 'clip', 18 | '_trims_or_dfs', 19 | 'clip_cut', 20 | 'name_clip_output', 21 | 'name_file_final', 22 | 'name_clip_output_lossless', 23 | 'do_lossless', 24 | 'qpfile', 25 | 'do_qpfile' 26 | ] 27 | 28 | 29 | def test_file_info_attr() -> None: 30 | file = FileInfo('tests/video_file.mkv') 31 | 32 | assert len(vars(file)) == len(FILEINFO_ATTR) 33 | 34 | for attr in vars(file): 35 | assert attr in FILEINFO_ATTR 36 | 37 | 38 | # def test_file_info_trims() -> None: 39 | # file = FileInfo('tests/video_file.mkv', trims_or_dfs=(24, -24)) 40 | -------------------------------------------------------------------------------- /vardautomation/__init__.py: -------------------------------------------------------------------------------- 1 | """Collection of classes and helper functions to automate encoding""" 2 | # flake8: noqa 3 | from ._logging import * 4 | from ._metadata import __author__, __version__, version # type: ignore[pylance] 5 | from .automation import * 6 | from .binary_path import * 7 | from .chapterisation import * 8 | from .comp import * 9 | from .config import * 10 | from .language import * 11 | from .render import * 12 | from .tooling import * 13 | from .vpathlib import * 14 | from .vtypes import * 15 | 16 | # for wildcard imports 17 | _mods = [ 18 | 'automation', 'binary_path', 'chapterisation', 'comp', 'config', 'language', 19 | 'render', 'tooling', 'vtypes', 'vpathlib' 20 | ] 21 | 22 | __all__ = [] 23 | for _pkg in _mods: 24 | __all__ += __import__(__name__ + '.' + _pkg, fromlist=_mods).__all__ # type: ignore[pylance] 25 | -------------------------------------------------------------------------------- /vardautomation/_logging/__init__.py: -------------------------------------------------------------------------------- 1 | # flake8: noqa 2 | from .core import * 3 | 4 | __all__ = ['logger'] 5 | -------------------------------------------------------------------------------- /vardautomation/_logging/abstract.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, ABCMeta 2 | from threading import Lock 3 | from typing import Any, Dict, List 4 | 5 | __all__: List[str] = [] 6 | 7 | 8 | class SingletonMeta(ABCMeta): 9 | _instances: Dict[object, Any] = {} 10 | _lock: Lock = Lock() 11 | 12 | def __call__(cls, *args: Any, **kwargs: Any) -> Any: 13 | with cls._lock: 14 | if cls not in cls._instances: 15 | instance = super().__call__(*args, **kwargs) 16 | cls._instances[cls] = instance 17 | return cls._instances[cls] 18 | 19 | 20 | class Singleton(ABC, metaclass=SingletonMeta): 21 | __slots__ = () 22 | -------------------------------------------------------------------------------- /vardautomation/_logging/core.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | # pylint: disable=no-member 4 | # pylint: disable=broad-except 5 | # pylint: disable=inconsistent-return-statements 6 | 7 | __all__: List[str] = ['logger'] 8 | 9 | import sys 10 | from functools import partial, wraps 11 | from typing import Any, Callable, ContextManager, Dict, List, NamedTuple, NoReturn, cast, overload 12 | 13 | import loguru 14 | import pkg_resources as pkgr 15 | 16 | from ..vtypes import F 17 | from .abstract import Singleton 18 | from .helpers import add_log_attribute, loguru_format, sys_exit 19 | 20 | loguru.logger.remove(0) 21 | 22 | 23 | class LogLevel(NamedTuple): 24 | name: str 25 | no: int 26 | colour: str 27 | 28 | 29 | TRACE = LogLevel('TRACE', 5, '') 30 | DEBUG = LogLevel('DEBUG', 10, '') 31 | INFO = LogLevel('INFO', 20, '') 32 | SUCCESS = LogLevel('SUCCESS', 25, '') 33 | WARNING = LogLevel('WARNING', 30, '') 34 | ERROR = LogLevel('ERROR', 40, '') 35 | CRITICAL = LogLevel('CRITICAL', 50, '') 36 | LOG_LEVELS = [TRACE, DEBUG, INFO, SUCCESS, WARNING, ERROR, CRITICAL] 37 | 38 | 39 | class Logger(Singleton): 40 | __slots__ = ('__id', '__level') 41 | 42 | def __init__(self) -> None: 43 | self.__level = 20 44 | __ids = loguru.logger.configure( 45 | handlers=[ 46 | dict(sink=sys.stderr, level=self.__level, format=loguru_format, backtrace=True, diagnose=False), 47 | ], 48 | levels=[{'name': log.name, 'color': log.colour} for log in LOG_LEVELS], 49 | extra=dict(global_level=self.__level) 50 | ) 51 | self.__id = __ids.pop(0) 52 | 53 | @property 54 | def logger(self) -> loguru.Logger: 55 | return loguru.logger.bind(global_level=self.__level) 56 | 57 | @property 58 | def level(self) -> int: 59 | return self.__level 60 | 61 | def set_level(self, level: int) -> None: 62 | self.__level = level 63 | loguru.logger.remove(self.__id) 64 | loguru.logger.add(sys.stderr, level=level, format=loguru_format, backtrace=True, diagnose=False) 65 | 66 | def logo(self) -> None: 67 | with open(pkgr.resource_filename('vardautomation', 'logo.txt'), 'r', encoding='utf-8') as logo: 68 | lines = logo.read() 69 | self.trace('Displaying that based vardautomation logo') 70 | self.logger.opt(depth=1, colors=True).info('\n' + lines) 71 | self.logger.opt(raw=True).info('\n') 72 | 73 | @add_log_attribute(TRACE) 74 | def trace(self, message: Any, /, depth: int = 1, **kwargs: Any) -> None: 75 | self.logger.opt(depth=depth).trace(str(message), **kwargs) 76 | 77 | @add_log_attribute(DEBUG) 78 | def debug(self, message: Any, /, depth: int = 1, **kwargs: Any) -> None: 79 | self.logger.opt(depth=depth).debug(str(message), **kwargs) 80 | 81 | @add_log_attribute(INFO) 82 | def info(self, message: Any, /, depth: int = 1, **kwargs: Any) -> None: 83 | if self.__level < 20: 84 | kwargs.setdefault('colour', self.info.colour.replace('', '')) 85 | self.logger.opt(depth=depth).info(str(message), **kwargs) 86 | 87 | @add_log_attribute(SUCCESS) 88 | def success(self, message: Any, /, depth: int = 1, **kwargs: Any) -> None: 89 | self.logger.opt(depth=depth).success(str(message), **kwargs) 90 | 91 | @add_log_attribute(WARNING) 92 | def warning(self, message: Any, /, depth: int = 1, **kwargs: Any) -> None: 93 | if self.__level < 20: 94 | kwargs.setdefault('colour', '') 95 | self.logger.opt(depth=depth).warning(str(message), **kwargs) 96 | 97 | # @add_log_attribute(log_level=ERROR) 98 | def error(self, message: Any, /, exception: bool | BaseException | None = True, 99 | depth: int = 1, record: bool = False, **kwargs: Any) -> NoReturn: 100 | self.logger.opt(exception=exception, record=record, depth=depth).error(str(message), **kwargs) 101 | sys.exit(1) 102 | 103 | # @add_log_attribute(log_level=CRITICAL) 104 | def critical(self, message: Any, /, exception: bool | BaseException | None = True, 105 | depth: int = 1, record: bool = False, **kwargs: Any) -> NoReturn: 106 | if self.__level < 20: 107 | kwargs.setdefault('colour', CRITICAL.colour.replace('', '')) 108 | self.logger.opt(exception=exception, record=record, depth=depth).critical(str(message), **kwargs) 109 | sys.exit(1) 110 | 111 | @overload 112 | def catch(self, func: F) -> F: 113 | ... 114 | 115 | @overload 116 | def catch(self, **kwargs: Any) -> Callable[[F], F]: 117 | ... 118 | 119 | def catch(self, func: F | None = None, **kwargs: Any) -> F | Callable[[F], F]: 120 | if func is None: 121 | return cast(Callable[[F], F], partial(self.catch, **kwargs)) 122 | 123 | @wraps(func) 124 | def _wrapper(*args: Any, **kwrgs: Any) -> Any: 125 | assert func 126 | try: 127 | return func(*args, **kwrgs) 128 | except Exception as e: 129 | kwargs_c: Dict[str, Any] = dict(depth=2, record=True) | kwargs 130 | self.critical( 131 | "{record[name]}:{record[line]}: An error has been caught in function '{record[function]}'", 132 | e, **kwargs_c 133 | ) 134 | 135 | return _wrapper 136 | 137 | def catch_ctx(self) -> ContextManager[None]: 138 | return cast(ContextManager[None], self.logger.catch(level='CRITICAL', onerror=sys_exit)) 139 | 140 | 141 | # The singleton 142 | logger: Logger = Logger() 143 | -------------------------------------------------------------------------------- /vardautomation/_logging/helpers.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import re 4 | import sys 5 | from typing import TYPE_CHECKING, Callable, Concatenate, Generic, List, NoReturn, cast, final 6 | 7 | import loguru 8 | 9 | from ..vtypes import P, T 10 | 11 | if TYPE_CHECKING: 12 | from .core import Logger, LogLevel 13 | 14 | 15 | __all__: List[str] = [] 16 | 17 | # pylint: disable=no-member 18 | 19 | 20 | # Helpers stuff 21 | def close_and_reverse_tags(colour_tags: str) -> str: 22 | return ''.join(f'' for tag in reversed(re.split(r'<(.*?)>', colour_tags)) if tag) 23 | 24 | 25 | def loguru_format(record: loguru.Record) -> str: 26 | global_lvl = record['extra']['global_level'] 27 | if global_lvl >= 20: 28 | return '{message}\n{exception}' 29 | 30 | if 'colour' in record['extra']: 31 | lvl_c = str(record['extra']['colour']), close_and_reverse_tags(record['extra']['colour']) 32 | else: 33 | lvl_c = '', '' 34 | 35 | return ( 36 | "{time:YYYY-MM-DD HH:mm:ss.SSS} | " 37 | + "{level.name: <8} | " 38 | + "{name}:{function}:{line} - " 39 | + lvl_c[0] + "{message}" + lvl_c[1] + '\n{exception}' 40 | ) 41 | 42 | 43 | def sys_exit(_: BaseException) -> NoReturn: 44 | sys.exit(1) 45 | 46 | 47 | @final 48 | class _log_func_wrapper(Generic[P, T]): 49 | name: str 50 | no: int 51 | colour: str 52 | colour_close: str 53 | 54 | def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T: # type: ignore[empty-body] 55 | ... 56 | 57 | 58 | def add_log_attribute(log_level: LogLevel) -> Callable[[Callable[Concatenate[Logger, P], T]], _log_func_wrapper[P, T]]: 59 | 60 | def _wrapper(func: Callable[Concatenate[Logger, P], T]) -> _log_func_wrapper[P, T]: 61 | funcw = cast(_log_func_wrapper[P, T], func) 62 | funcw.name = log_level.name 63 | funcw.no = log_level.no 64 | funcw.colour = log_level.colour 65 | funcw.colour_close = close_and_reverse_tags(log_level.colour) 66 | return funcw 67 | 68 | return _wrapper 69 | -------------------------------------------------------------------------------- /vardautomation/_metadata.py: -------------------------------------------------------------------------------- 1 | __author__ = 'Ichunjo' 2 | 3 | __version__ = '1.2.4' 4 | __maintainer__ = 'Ichunjo' 5 | __email__ = 'ichunjo.le.terrible@gmail.com' 6 | __status__ = 'Development' 7 | 8 | 9 | def version() -> str: 10 | return __version__ 11 | -------------------------------------------------------------------------------- /vardautomation/automation.py: -------------------------------------------------------------------------------- 1 | """Automation module""" 2 | 3 | __all__ = [ 4 | 'RunnerConfig', 'SelfRunner', 5 | 6 | 'patch', 'Patch' 7 | ] 8 | 9 | from copy import deepcopy 10 | from dataclasses import dataclass 11 | from enum import Enum, auto 12 | from functools import partial 13 | from itertools import chain 14 | from typing import Callable, List, Optional, Protocol, Sequence, Set, Tuple, TypedDict, cast 15 | 16 | import vapoursynth as vs 17 | 18 | from typing_extensions import NotRequired 19 | from vstools import FrameRangeN, FrameRangesN 20 | 21 | from ._logging import logger 22 | from .binary_path import BinaryPath 23 | from .config import FileInfo, FileInfo2 24 | from .tooling import ( 25 | AudioCutter, AudioEncoder, AudioExtracter, BasicTool, LosslessEncoder, MatroskaFile, Qpfile, 26 | Track, VideoEncoder, get_keyframes, make_qpfile 27 | ) 28 | from .tooling.video import SupportManualVFR, SupportQpfile, SupportResume 29 | from .vpathlib import CleanupSet, VPath 30 | from .vtypes import AnyPath, T 31 | 32 | core = vs.core 33 | 34 | 35 | @dataclass(repr=False, eq=False, order=False, unsafe_hash=False, frozen=True, slots=True) 36 | class RunnerConfig: 37 | """ 38 | Config for the SelfRunner 39 | """ 40 | 41 | class Order(Enum): 42 | """Simple enum for priority order""" 43 | VIDEO = auto() 44 | AUDIO = auto() 45 | 46 | v_encoder: VideoEncoder 47 | """Video encoder""" 48 | v_lossless_encoder: Optional[LosslessEncoder] = None 49 | """Lossless video encoder""" 50 | a_extracters: AudioExtracter | Sequence[AudioExtracter] | None = None 51 | """Audio extracter(s)""" 52 | a_cutters: AudioCutter | Sequence[AudioCutter] | None = None 53 | """Audio cutter(s)""" 54 | a_encoders: AudioEncoder | Sequence[AudioEncoder] | None = None 55 | """Audio encoder(s)""" 56 | mkv: MatroskaFile | None = None 57 | """Muxer""" 58 | 59 | order: Order = Order.VIDEO 60 | """Priority order""" 61 | 62 | clear_outputs: bool = True 63 | """Clears all clips set for output in the current environment""" 64 | 65 | 66 | class _QpFileParams(TypedDict): 67 | qpfile_clip: NotRequired[vs.VideoNode] 68 | qpfile_func: NotRequired[Callable[[vs.VideoNode, AnyPath], Qpfile]] 69 | 70 | 71 | # Workaround to https://github.com/python/mypy/issues/708 72 | class _PLPFunction(Protocol): 73 | def __call__(self, path: VPath) -> vs.VideoNode: 74 | ... 75 | 76 | 77 | def _lossless_index(path: VPath) -> vs.VideoNode: 78 | return core.lsmas.LWLibavSource(path.to_str()) 79 | 80 | 81 | class SelfRunner: 82 | """Self runner interface""" 83 | 84 | clip: vs.VideoNode | Sequence[vs.VideoNode] 85 | """Clip to be encoded""" 86 | 87 | file: FileInfo 88 | """FileInfo object""" 89 | 90 | config: RunnerConfig 91 | """Config of the runner""" 92 | 93 | work_files: CleanupSet 94 | """ 95 | Intermediate working files: 96 | 97 | The SelfRunner class will add everything it can in this set-like, 98 | meaning if you want to delete the files you can just do:: 99 | 100 | runner = SelfRunner(...) 101 | runner.run() 102 | runner.work_files.clear() 103 | 104 | The runner will add these attributes to be deleted: 105 | * :py:attr:`FileInfo.name_clip_output` 106 | * :py:attr:`FileInfo.a_src` 107 | * :py:attr:`FileInfo.a_src_cut` 108 | * :py:attr:`FileInfo.a_enc_cut` 109 | * :py:attr:`FileInfo.chapter` 110 | 111 | So if you want to keep some of these files, this is possible:: 112 | 113 | runner.work_files.discard(self.file.name_clip_output) 114 | runner.work_files.discard(self.file.chapter) 115 | """ 116 | 117 | plp_function: _PLPFunction | Callable[[VPath], vs.VideoNode] 118 | """ 119 | Post Lossless Processing function. 120 | Set this function if you need some adjustements on the lossless clip 121 | before running the encode. 122 | If set, it will be called with the lossless path as argument and must return a VideoNode. 123 | """ 124 | 125 | _qpfile_params: _QpFileParams 126 | 127 | def __init__(self, clip: vs.VideoNode | Sequence[vs.VideoNode], file: FileInfo, /, config: RunnerConfig) -> None: 128 | """ 129 | :param clip: Clip to be encoded 130 | :param file: FileInfo object 131 | :param config: Confif of the runner 132 | """ 133 | self.clip = clip 134 | self.file = file 135 | self.config = config 136 | self.work_files = CleanupSet() 137 | self.plp_function = _lossless_index 138 | self._qpfile_params = _QpFileParams() 139 | 140 | def run(self, *, show_logo: bool = True) -> None: 141 | """ 142 | Main tooling chain 143 | 144 | :param show_logo: Print vardoto logo. 145 | """ 146 | if show_logo: 147 | logger.logo() 148 | logger.info('SelfRunning...') 149 | 150 | funcs = [self._encode, self._audio_getter] 151 | if self.config.order == RunnerConfig.Order.AUDIO: 152 | funcs.reverse() 153 | funcs.append(self._mux) 154 | logger.debug(funcs) 155 | for f in funcs: 156 | f() 157 | 158 | def inject_qpfile_params(self, qpfile_clip: vs.VideoNode, qpfile_func: Callable[[vs.VideoNode, AnyPath], Qpfile] = make_qpfile) -> None: 159 | """ 160 | :param qpfile_clip: Clip to be used to generate the Qpfile 161 | :param qpfile_func: Function to be used to generate the Qpfile 162 | """ 163 | self._qpfile_params['qpfile_clip'] = qpfile_clip 164 | self._qpfile_params['qpfile_func'] = qpfile_func 165 | logger.debug(self._qpfile_params) 166 | 167 | def rename_final_file(self, name: AnyPath) -> None: 168 | """ 169 | Rename the file.name_file_final 170 | 171 | :param name: New filename 172 | """ 173 | logger.debug('Renaming') 174 | self.file.name_file_final = self.file.name_file_final.replace(VPath(name)) 175 | 176 | def upload_ftp(self, ftp_name: str, destination: AnyPath, rclone_args: Optional[List[str]] = None) -> None: 177 | """ 178 | Upload the ``name_file_final`` to a given FTP using rclone 179 | 180 | :param ftp_name: FTP name 181 | :param destination: Path destination 182 | :param rclone_args: Additionnal options, defaults to None 183 | """ 184 | BasicTool( 185 | BinaryPath.rclone, ['copy', '--progress'] + (rclone_args if rclone_args else []) 186 | + [self.file.name_file_final.absolute().as_posix(), f'{ftp_name}:{VPath(destination).to_str()}'] 187 | ).run() 188 | 189 | @logger.catch 190 | def _encode(self) -> None: # noqa C901 191 | if self.config.clear_outputs: 192 | vs.clear_outputs() 193 | 194 | if self.config.v_lossless_encoder: 195 | if isinstance(self.clip, Sequence): 196 | raise NotImplementedError(f'{self.__class__.__name__}: Multiple clips for lossless encode isn\'t implemented') 197 | if not ( 198 | path_lossless 199 | := self.file.name_clip_output.append_stem(self.config.v_lossless_encoder.suffix_name) 200 | ).exists(): 201 | self.config.v_lossless_encoder.run_enc(self.clip, self.file) 202 | self.clip = self.plp_function(path_lossless) 203 | 204 | if not self.file.name_clip_output.exists(): 205 | if isinstance(self.clip, vs.VideoNode): 206 | if isinstance(self.config.v_encoder, SupportQpfile): 207 | self.config.v_encoder.run_enc(self.clip, self.file, **self._qpfile_params) 208 | else: 209 | self.config.v_encoder.run_enc(self.clip, self.file) 210 | elif isinstance(self.config.v_encoder, SupportManualVFR): 211 | self.config.v_encoder.run_enc(self.clip, self.file, **self._qpfile_params) 212 | self.work_files.add(self.config.v_encoder.tcfile) 213 | else: 214 | raise TypeError(f'{self.__class__.__name__}: Wrong video encoder and/or type of clip') 215 | self.work_files.add(self.file.name_clip_output) 216 | 217 | def _audio_getter(self) -> None: # noqa C901 218 | if not isinstance(self.file, FileInfo2): 219 | if self.config.a_extracters and self.file.a_src: 220 | for a_extracter in _toseq(self.config.a_extracters): 221 | all_a_src = [self.file.a_src.set_track(n) for n in a_extracter.track_out] 222 | self.work_files.update(all_a_src) 223 | if not any(a_src.exists() for a_src in all_a_src): 224 | a_extracter.run() 225 | else: 226 | logger.warning(f'Skipping "{[p.to_str() for p in all_a_src]}" to extract...') 227 | 228 | if self.config.a_cutters and self.file.a_src_cut: 229 | for i, a_cutter in enumerate(_toseq(self.config.a_cutters), start=1): 230 | self.work_files.add(self.file.a_src_cut.set_track(i)) 231 | if not self.file.a_src_cut.set_track(i).exists(): 232 | a_cutter.run() 233 | else: 234 | logger.warning(f'Skipping "{self.file.a_src_cut.set_track(i).to_str()}" to cut...') 235 | 236 | if self.config.a_encoders and self.file.a_enc_cut: 237 | for i, a_encoder in enumerate(_toseq(self.config.a_encoders), start=1): 238 | self.work_files.add(self.file.a_enc_cut.set_track(i)) 239 | if not self.file.a_enc_cut.set_track(i).exists(): 240 | a_encoder.run() 241 | else: 242 | logger.warning(f'Skipping "{self.file.a_enc_cut.set_track(i).to_str()}" to encode...') 243 | 244 | def _mux(self) -> None: 245 | if self.config.mkv is not None: 246 | wf = self.config.mkv.mux(True) 247 | self.work_files.update(wf) 248 | 249 | 250 | def _toseq(seq: T | Sequence[T]) -> Sequence[T]: 251 | return cast(Sequence[T], seq) if isinstance(seq, Sequence) else cast(Sequence[T], [seq]) 252 | 253 | 254 | @logger.catch 255 | def patch( 256 | encoder: VideoEncoder, clip: vs.VideoNode, file: FileInfo, ranges: FrameRangeN | FrameRangesN, 257 | output_filename: AnyPath | None = None, cleanup: bool = False 258 | ) -> None: 259 | """Easy video patching function 260 | 261 | :param encoder: VideoEncoder to be used 262 | :param clip: Clip where the patch will pick the fixed ranges 263 | :param file: FileInfo object. The file that will be fixed is the file defined 264 | in :py:attr:`vardautomation.config.FileInfo.name_file_final` 265 | :param ranges: Ranges of frames that need to be fixed 266 | :param output_filename: Optional filename. If not specified a suffix ``_new`` wil be added, defaults to None 267 | """ 268 | if isinstance(encoder, SupportResume): 269 | encoder.resumable = False 270 | 271 | from vardefunc.util import normalise_ranges 272 | 273 | nranges = normalise_ranges(clip, ranges) # type: ignore 274 | 275 | _file_to_fix = file.name_file_final 276 | final = _file_to_fix.parent 277 | 278 | workdir = final / (file.name + '_temp') 279 | if output_filename is not None: 280 | output_fn = VPath(output_filename) 281 | else: 282 | output_fn = final / f'{_file_to_fix.stem}_new.mkv' 283 | 284 | if workdir.exists(): 285 | raise FileExistsError(f'patch: {workdir.resolve().to_str()} already exists!') 286 | 287 | # Patching... 288 | workdir.mkdir() 289 | 290 | # _resolve_range 291 | kf = get_keyframes(_file_to_fix) 292 | kfsint = kf.frames + [clip.num_frames] 293 | 294 | nbranges = _bound_to_keyframes(nranges, kfsint) 295 | logger.debug(f'Ranges: {str(nranges)}') 296 | nranges = normalise_ranges(clip, nbranges, norm_dups=True) 297 | logger.debug(f'Ranges: {str(nranges)}') 298 | 299 | if len(nranges) == 1 and nranges[0][0] == 0 and nranges[0][1] == clip.num_frames: 300 | raise ValueError('patch: Don\'t use patch, just redo your encode') 301 | 302 | # _encode 303 | params = deepcopy(encoder.params) 304 | for i, (s, e) in enumerate(nranges, start=1): 305 | logger.debug(str((s, e))) 306 | fix = workdir / f'fix-{i:03.0f}' 307 | file.name_clip_output = fix 308 | encoder.run_enc(clip[s:e], file) 309 | encoder.params = params.copy() 310 | MatroskaFile(fix.with_suffix('.mkv'), fix).mux() 311 | 312 | # _cut_and_merge 313 | tmp = workdir / 'tmp.mkv' 314 | tmpnoaudio = workdir / 'tmp_noaudio.mkv' 315 | 316 | if (start := (rng := list(chain.from_iterable(nranges)))[0]) == 0: 317 | rng.pop(0) 318 | if rng[-1] == clip.num_frames: 319 | rng.pop(-1) 320 | 321 | MatroskaFile(tmp, _file_to_fix, '--no-audio', '--no-track-tags', '--no-chapters').split_frames(rng) 322 | 323 | tmp_files = sorted(workdir.glob('tmp-???.mkv')) 324 | fix_files = sorted(workdir.glob('fix-???.mkv')) 325 | 326 | parts = [ 327 | fix_files[int(i / 2)] if i % 2 == (0 if start == 0 else 1) else tmp 328 | for i, tmp in enumerate(tmp_files) 329 | ] 330 | 331 | MatroskaFile(tmpnoaudio, None, '--no-audio', '--no-track-tags', '--no-chapters').append_to( 332 | parts, [(i + 1, 0, i, 0) for i in range(len(parts) - 1)] 333 | ) 334 | 335 | MatroskaFile(output_fn, [Track(tmpnoaudio), Track(_file_to_fix, '--no-video')]).mux() 336 | 337 | # do_cleanup 338 | if cleanup: 339 | workdir.rmtree(ignore_errors=True) 340 | 341 | 342 | def _bound_to_keyframes(ranges: List[Tuple[int, int]], kfs: List[int]) -> Sequence[Tuple[int, int]]: 343 | rng_set: Set[Tuple[int, int]] = set() 344 | for start, end in ranges: 345 | s, e = (None, ) * 2 346 | for i, kf in enumerate(kfs): 347 | if kf > start: 348 | s = kfs[i - 1] 349 | break 350 | if kf == start: 351 | s = kf 352 | break 353 | 354 | for i, kf in enumerate(kfs): 355 | if kf >= end: 356 | e = kf 357 | break 358 | 359 | if s is None or e is None: 360 | logger.debug(str((s, e))) 361 | raise ValueError('_bound_to_keyframes: Something is wrong in `s` or `e`') 362 | 363 | rng_set.add((s, e)) 364 | 365 | return sorted(rng_set) 366 | 367 | 368 | class Patch: 369 | """Easy video patching interface""" 370 | 371 | encoder: VideoEncoder 372 | """VideoEncoder to be used""" 373 | 374 | clip: vs.VideoNode 375 | """Clip where the patch will pick the fixed ranges""" 376 | 377 | file: FileInfo 378 | """ 379 | FileInfo object\n 380 | The file that will be fixed is the file defined in 381 | :py:attr:`vardautomation.config.FileInfo.name_file_final` 382 | """ 383 | 384 | ranges: List[Tuple[int, int]] 385 | """Normalised ranges""" 386 | 387 | debug: bool 388 | """Debug boolean""" 389 | 390 | workdir: VPath 391 | """Work directory path""" 392 | output_filename: VPath 393 | """Output filename path""" 394 | 395 | _file_to_fix: VPath 396 | 397 | @logger.catch 398 | def __init__(self, encoder: VideoEncoder, clip: vs.VideoNode, file: FileInfo, 399 | ranges: FrameRangeN | FrameRangesN, 400 | output_filename: Optional[str] = None, *, debug: bool = False) -> None: 401 | """ 402 | :param encoder: VideoEncoder to be used 403 | :param clip: Clip where the patch will pick the fixed ranges 404 | :param file: FileInfo object. The file that will be fixed is the file defined 405 | in :py:attr:`vardautomation.config.FileInfo.name_file_final` 406 | :param ranges: Ranges of frames that need to be fixed 407 | :param output_filename: Optional filename. If not specified a suffix ``_new`` wil be added, defaults to None 408 | :param debug: Debug argument, defaults to False 409 | """ 410 | self.debug = debug 411 | logger.warning('Using Patch is deprecated; please use "patch" instead') 412 | self._patch = partial(patch, encoder, clip, file, ranges, output_filename) 413 | 414 | def run(self) -> None: 415 | """Launch patch""" 416 | self._patch() 417 | 418 | def do_cleanup(self) -> None: 419 | """Delete working directory folder""" 420 | -------------------------------------------------------------------------------- /vardautomation/binary_path.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module implements just one class that stores the path of the binaries used in vardautomation.\n 3 | Just edit one of these attributes if the binary is not in your environment path like this:: 4 | 5 | from vardautomation import BinaryPath, VPath 6 | 7 | BinaryPath.mkvmerge = VPath('path/to/your/mkvmerge') 8 | 9 | """ 10 | 11 | __all__ = ['BinaryPath'] 12 | 13 | from ._logging import logger 14 | from .vpathlib import VPath 15 | 16 | 17 | class BinaryPath: 18 | """ 19 | Class storing the path of the variable binaries used in vardautomation.\n 20 | Just edit one of these attributes if the binary is not in your environment path 21 | """ 22 | 23 | @logger.catch 24 | def __init__(self) -> None: 25 | raise RuntimeError('Cannot directly instantiate this class.') 26 | 27 | eac3to: VPath = VPath('eac3to') 28 | """ 29 | https://www.videohelp.com/software/eac3to\n 30 | https://en.wikibooks.org/wiki/Eac3to/How_to_Use 31 | """ 32 | 33 | fdkaac: VPath = VPath('fdkaac') 34 | """ 35 | https://github.com/nu774/fdkaac\n 36 | https://en.wikipedia.org/wiki/Fraunhofer_FDK_AAC\n 37 | Also available in ffmpeg with ``--enable-libfdk-aac`` 38 | """ 39 | 40 | ffmpeg: VPath = VPath('ffmpeg') 41 | """ 42 | https://www.ffmpeg.org/ 43 | """ 44 | 45 | ffmsindex: VPath = VPath('ffmsindex') 46 | """ 47 | https://github.com/FFMS/ffms2 48 | """ 49 | 50 | flac: VPath = VPath('flac') 51 | """ 52 | https://xiph.org/flac/index.html 53 | """ 54 | 55 | mkvextract: VPath = VPath('mkvextract') 56 | """ 57 | https://mkvtoolnix.download/\n 58 | https://mkvtoolnix.download/doc/mkvextract.html 59 | """ 60 | 61 | mkvmerge: VPath = VPath('mkvmerge') 62 | """ 63 | https://mkvtoolnix.download/\n 64 | https://mkvtoolnix.download/doc/mkvextract.html 65 | """ 66 | 67 | nvencc: VPath = VPath('nvencc') 68 | """ 69 | https://github.com/rigaya/NVEnc 70 | """ 71 | 72 | opusenc: VPath = VPath('opusenc') 73 | """ 74 | https://github.com/xiph/opus-tools\n 75 | Also available in ffmpeg 76 | """ 77 | 78 | qaac: VPath = VPath('qaac') 79 | """ 80 | https://sites.google.com/site/qaacpage/ 81 | """ 82 | 83 | rclone: VPath = VPath('rclone') 84 | """ 85 | https://rclone.org/ 86 | """ 87 | 88 | sox: VPath = VPath('sox') 89 | """ 90 | http://sox.sourceforge.net/ 91 | """ 92 | 93 | x264: VPath = VPath('x264') 94 | """ 95 | https://www.videolan.org/developers/x264.html 96 | """ 97 | 98 | x265: VPath = VPath('x265') 99 | """ 100 | http://msystem.waw.pl/x265/\n 101 | https://bitbucket.org/multicoreware/x265_git/wiki/Home 102 | """ 103 | -------------------------------------------------------------------------------- /vardautomation/comp.py: -------------------------------------------------------------------------------- 1 | """Comparison module""" 2 | 3 | __all__ = [ 4 | # Enums 5 | 'Writer', 'PictureType', 6 | 7 | # Dicts 8 | 'SlowPicsConf', 9 | 10 | # Class and function 11 | 'Comparison', 'make_comps' 12 | ] 13 | 14 | import inspect 15 | import os 16 | import random 17 | import subprocess 18 | 19 | from enum import Enum, auto 20 | from functools import partial 21 | from typing import Any, Callable, Dict, Final, Iterable, List, NamedTuple, Optional, Set 22 | 23 | import numpy as np 24 | import vapoursynth as vs 25 | 26 | from numpy.typing import NDArray 27 | from requests import Session 28 | from requests_toolbelt import MultipartEncoder 29 | from vstools import DitherType 30 | 31 | from ._logging import logger 32 | from .binary_path import BinaryPath 33 | from .tooling import SubProcessAsync, VideoEncoder 34 | from .utils import Properties 35 | from .vpathlib import VPath 36 | from .vtypes import AnyPath 37 | 38 | _MAX_ATTEMPTS_PER_PICTURE_TYPE: Final[int] = 50 39 | 40 | # pylint: disable=consider-using-f-string 41 | 42 | 43 | class Writer(Enum): 44 | """Writer to be used to extract frames""" 45 | 46 | FFMPEG = auto() 47 | """FFmpeg encoder""" 48 | 49 | IMWRI = auto() 50 | """vapoursynth.core.imwri Vapoursynth plugin""" 51 | 52 | OPENCV = auto() 53 | """opencv library""" 54 | 55 | PILLOW = auto() 56 | """Pillow library""" 57 | 58 | PYQT = auto() 59 | """PyQt library""" 60 | 61 | PYTHON = auto() 62 | """Pure python implementation""" 63 | 64 | def __repr__(self) -> str: 65 | return f'<{self.__class__.__name__}.{self.name}>' 66 | 67 | 68 | class PictureType(bytes, Enum): 69 | """A simple enum for picture types.""" 70 | I = b'I' # noqa E741 71 | """I frames""" 72 | 73 | P = b'P' 74 | """P frames""" 75 | 76 | B = b'B' 77 | """B frames""" 78 | 79 | 80 | class SlowPicsConf(NamedTuple): 81 | """Slow.pics configuration""" 82 | collection_name: str = VPath(inspect.stack()[-1].filename).stem 83 | """ 84 | Slowpics's collection name.\n 85 | Default is the name of the current script 86 | """ 87 | 88 | public: bool = True 89 | """Make the comparison public""" 90 | 91 | optimise: bool = True 92 | """If 'true", images will be losslessly optimised""" 93 | 94 | nsfw: bool = False 95 | """If images not suitable for minors (nudity, gore, etc.)""" 96 | 97 | remove_after: Optional[int] = None 98 | """Remove after N days""" 99 | 100 | 101 | class Comparison: 102 | """Extract frames, make diff between two clips and upload to slow.pics""" 103 | 104 | @logger.catch 105 | def __init__(self, clips: Dict[str, vs.VideoNode], path: AnyPath = 'comps', 106 | num: int = 15, frames: Optional[Iterable[int]] = None, 107 | picture_type: Optional[PictureType | List[PictureType]] = None) -> None: 108 | """ 109 | :param clips: Named clips. 110 | :param path: Path to your comparison folder, defaults to 'comps' 111 | :param num: Number of frames to extract, defaults to 15 112 | :param frames: Additionnal frame numbers that will be added to the total of ``num``, defaults to None 113 | :param picture_type: Select picture types to pick, default to None 114 | """ 115 | self.clips = clips 116 | self.path = VPath(path) 117 | self.path_diff: Optional[VPath] = None 118 | 119 | # Check length of all clips 120 | lens = set(c.num_frames for c in clips.values()) 121 | if len(lens) != 1: 122 | logger.warning(f'{self.__class__.__name__}: "clips" doesn\'t have the same length!') 123 | lens_n = min(lens) 124 | 125 | try: 126 | self.path.mkdir(parents=True) 127 | except FileExistsError as file_err: 128 | raise ValueError(f'{self.__class__.__name__}: path "{self.path.to_str()}" already exists!') from file_err 129 | 130 | # Make samples 131 | if picture_type: 132 | logger.info(f'{self.__class__.__name__}: Make samples according to specified picture types...') 133 | samples = self._select_samples_ptypes(lens_n, num, picture_type) 134 | else: 135 | samples = set(random.sample(range(lens_n), num)) 136 | 137 | # Add additionnal frames if frame exists 138 | if frames: 139 | samples.update(frames) 140 | self.max_num = max(samples) 141 | self.frames = sorted(samples) 142 | 143 | @logger.catch 144 | def extract(self, writer: Writer = Writer.PYTHON, compression: int = -1, force_bt709: bool = False) -> None: 145 | """ 146 | Extract images from the specified clips in the constructor 147 | 148 | :param writer: Writer method to be used, defaults to Writer.PYTHON 149 | :param compression: Compression level. It depends of the writer used, defaults to -1 which means automatic selection 150 | :param force_bt709: Force BT709 matrix before conversion to RGB24, defaults to False 151 | """ 152 | # pylint: disable=cell-var-from-loop 153 | for name, clip in self.clips.items(): 154 | path_name = self.path / name 155 | try: 156 | path_name.mkdir(parents=True) 157 | except FileExistsError as file_err: 158 | logger.critical(f'{self.__class__.__name__}: {path_name.to_str()} already exists!', file_err) 159 | 160 | clip = clip.resize.Bicubic( 161 | format=vs.RGB24, matrix_in=vs.MATRIX_BT709 if force_bt709 else None, 162 | dither_type=DitherType.ERROR_DIFFUSION 163 | ) 164 | 165 | path_images = [ 166 | path_name / (f'{name}_' + f'{f}'.zfill(len("%i" % self.max_num)) + '.png') 167 | for f in self.frames 168 | ] 169 | # Extracts the requested frames using ffmpeg 170 | if writer == Writer.FFMPEG: 171 | from vardefunc.util import select_frames 172 | clip = select_frames(clip, self.frames) 173 | 174 | # -> RGB -> GBR. Needed for ffmpeg 175 | # Also FPS=1/1. I'm just lazy, okay? 176 | clip = clip.std.ShufflePlanes([1, 2, 0], vs.RGB).std.AssumeFPS(fpsnum=1, fpsden=1) 177 | 178 | outputs: List[str] = [] 179 | for i, path_image in enumerate(path_images): 180 | outputs.extend([ 181 | '-compression_level', str(compression), '-pred', 'mixed', 182 | '-ss', f'{i}', '-t', '1', f'{path_image.to_str()}' 183 | ]) 184 | 185 | settings = [ 186 | '-hide_banner', '-loglevel', 'error', '-f', 'rawvideo', 187 | '-video_size', f'{clip.width}x{clip.height}', 188 | '-pixel_format', 'gbrp', '-framerate', str(clip.fps), 189 | '-i', 'pipe:' 190 | ] 191 | settings.extend(outputs) 192 | 193 | encoder = VideoEncoder(BinaryPath.ffmpeg, settings) 194 | encoder.progress_update = _progress_update_func 195 | encoder.y4m = False 196 | encoder.run_enc(clip, None) 197 | # imwri lib is slower even asynchronously requested 198 | elif writer == Writer.IMWRI: 199 | from vardefunc.util import select_frames 200 | reqs = clip.imwri.Write( 201 | 'PNG', (path_name / (f'{name}_%' + f'{len("%i" % self.max_num)}'.zfill(2) + 'd.jpg')).to_str(), 202 | quality=compression if compression >= 0 else None 203 | ) 204 | clip = select_frames(reqs, self.frames) 205 | # zzzzzzzzz soooo slow 206 | with open(os.devnull, 'wb') as devnull: 207 | clip.output(devnull, y4m=False, progress_update=_progress_update_func) 208 | logger.logger.opt(raw=True).info('\n') 209 | else: 210 | from vardefunc.util import select_frames 211 | clip = select_frames(clip, self.frames) 212 | clip = clip.std.ModifyFrame(clip, lambda n, f: _saver(writer, compression)(n, f, path_images)) 213 | with open(os.devnull, 'wb') as devnull: 214 | clip.output(devnull, y4m=False, progress_update=_progress_update_func) 215 | logger.logger.opt(raw=True).info('\n') 216 | 217 | @logger.catch 218 | def magick_compare(self) -> None: 219 | """ 220 | Make an image of differences between the first and second clip using ImageMagick. 221 | Will raise an exception if more than 2 clips are passed to the constructor. 222 | """ 223 | # Make diff images 224 | if len(self.clips) > 2: 225 | raise ValueError(f'{self.__class__.__name__}: "magick_compare" can only be used with two clips!') 226 | 227 | self.path_diff = self.path / 'diffs' 228 | try: 229 | subprocess.call(['magick', 'compare'], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) 230 | self.path_diff.mkdir(parents=True) 231 | except FileNotFoundError as f_err: 232 | logger.critical(f'{self.__class__.__name__}: "magick compare" was not found!', f_err) 233 | except FileExistsError as f_err: 234 | logger.critical(f'{self.__class__.__name__}: {self.path_diff.to_str()} already exists!', f_err) 235 | 236 | all_images = [sorted((self.path / name).glob('*.png')) for name in self.clips.keys()] 237 | images_a, images_b = all_images 238 | 239 | cmds = [ 240 | f'magick compare "{i1.to_str()}" "{i2.to_str()}" ' 241 | + f'"{self.path_diff.to_str()}/diff_' + f'{f}'.zfill(len("%i" % self.max_num)) + '.png"' 242 | for i1, i2, f in zip(images_a, images_b, self.frames) 243 | ] 244 | 245 | # Launch asynchronously the Magick commands 246 | logger.info('Diffing clips...') 247 | SubProcessAsync(cmds) 248 | logger.logger.opt(raw=True).info('\n') 249 | 250 | @logger.catch 251 | def upload_to_slowpics(self, config: SlowPicsConf) -> None: 252 | """ 253 | Upload to slow.pics with given configuration 254 | 255 | :param config: NamedTuple which contains the uploading configuration 256 | """ 257 | # Upload to slow.pics 258 | all_images = [sorted((self.path / name).glob('*.png')) for name in self.clips.keys()] 259 | if self.path_diff: 260 | all_images.append(sorted(self.path_diff.glob('*.png'))) 261 | 262 | fields: Dict[str, Any] = {} 263 | 264 | for i, (name, images) in enumerate( 265 | zip(list(self.clips.keys()) + (['diff'] if self.path_diff else []), 266 | all_images) 267 | ): 268 | for j, (image, frame) in enumerate(zip(images, self.frames)): 269 | fields[f'comparisons[{j}].name'] = str(frame) 270 | fields[f'comparisons[{j}].images[{i}].name'] = name 271 | fields[f'comparisons[{j}].images[{i}].file'] = (image.name, image.read_bytes(), 'image/png') 272 | 273 | with Session() as sess: 274 | sess.get('https://slow.pics/api/comparison') 275 | # TODO: yeet this 276 | files = MultipartEncoder(_make_api_compatible(config) | fields) 277 | 278 | logger.info('Uploading images...\n') 279 | logger.logger.opt(raw=True).info('\n') 280 | url = sess.post( 281 | 'https://slow.pics/api/comparison', data=files.to_string(), 282 | headers=_get_slowpics_header(str(files.len), files.content_type, sess) 283 | ) 284 | 285 | slowpics_url = f'https://slow.pics/c/{url.text}' 286 | logger.info(f'Slowpics url: {slowpics_url}') 287 | 288 | url_file = self.path / 'slow.pics.url' 289 | url_file.write_text(f'[InternetShortcut]\nURL={slowpics_url}', encoding='utf-8') 290 | logger.info(f'url file copied to "{url_file.resolve().to_str()}"') 291 | 292 | @logger.catch 293 | def _select_samples_ptypes(self, num_frames: int, k: int, picture_types: PictureType | List[PictureType]) -> Set[int]: 294 | from vardefunc.util import select_frames 295 | samples: Set[int] = set() 296 | _max_attempts = 0 297 | _rnum_checked: Set[int] = set() 298 | picture_types = picture_types if isinstance(picture_types, list) else [picture_types] 299 | while len(samples) < k: 300 | _attempts = 0 301 | 302 | while True: 303 | # Check if we don't exceed the length of the clips 304 | # if yes then that means we checked all the frames 305 | if len(_rnum_checked) >= num_frames: 306 | raise ValueError(f'{self.__class__.__name__}: There are not enough of {picture_types} in these clips') 307 | rnum = _rand_num_frames(_rnum_checked, partial(random.randrange, start=0, stop=num_frames)) 308 | _rnum_checked.add(rnum) 309 | 310 | # Check _PictType 311 | if all( 312 | Properties.get_prop(f, '_PictType', bytes) in picture_types 313 | for f in vs.core.std.Splice([select_frames(c, [rnum]) for c in self.clips.values()], mismatch=True).frames() 314 | ): 315 | break 316 | _attempts += 1 317 | _max_attempts += 1 318 | 319 | if _attempts > _MAX_ATTEMPTS_PER_PICTURE_TYPE: 320 | logger.warning( 321 | f'{self.__class__.__name__}: {_MAX_ATTEMPTS_PER_PICTURE_TYPE} attempts were made for sample {len(samples)} ' 322 | f'and no match found for {picture_types}; stopping iteration...' 323 | ) 324 | break 325 | 326 | if _max_attempts > (curr_max_att := _MAX_ATTEMPTS_PER_PICTURE_TYPE * k): 327 | raise RecursionError(f'{self.__class__.__name__}: attempts max of {curr_max_att} has been reached!') 328 | 329 | if _attempts < _MAX_ATTEMPTS_PER_PICTURE_TYPE: 330 | samples.add(rnum) 331 | logger.info( 332 | "\rSelecting image: %i/%i ~ %.2f %%" % ( 333 | len(samples), k, 100 * len(samples) / k 334 | ) 335 | ) 336 | 337 | logger.logger.opt(raw=True).info('\n') 338 | return samples 339 | 340 | 341 | def make_comps( 342 | clips: Dict[str, vs.VideoNode], path: AnyPath = 'comps', 343 | num: int = 15, frames: Optional[Iterable[int]] = None, *, 344 | picture_types: Optional[PictureType | List[PictureType]] = None, 345 | force_bt709: bool = False, 346 | writer: Writer = Writer.PYTHON, compression: int = -1, 347 | magick_compare: bool = False, 348 | slowpics_conf: Optional[SlowPicsConf] = None 349 | ) -> None: 350 | """ 351 | Convenience function for :py:class:`Comparison`. 352 | 353 | :param clips: Named clips. 354 | :param path: Path to your comparison folder, defaults to 'comps' 355 | :param num: Number of frames to extract, defaults to 15 356 | :param frames: Additionnal frame numbers that will be added to the total of num, defaults to None 357 | :param picture_types: Select picture types to pick, default to None 358 | :param force_bt709: Force BT709 matrix before conversion to RGB24, defaults to False 359 | :param writer: Writer method to be used, defaults to Writer.PYTHON 360 | :param compression: Compression level. It depends of the writer used, defaults to -1 which means automatic selection 361 | :param magick_compare: Make diffs between the first and second clip. 362 | Will raise an exception if more than 2 clips are passed to clips, defaults to False 363 | :param slowpics_conf: slow.pics configuration. If specified, images will be uploaded following this configuration 364 | """ 365 | comp = Comparison(clips, path, num, frames, picture_types) 366 | comp.extract(writer, compression, force_bt709) 367 | if magick_compare: 368 | comp.magick_compare() 369 | if slowpics_conf is not None: 370 | comp.upload_to_slowpics(slowpics_conf) 371 | 372 | 373 | def _rand_num_frames(checked: Set[int], rand_func: Callable[[], int]) -> int: 374 | rnum = rand_func() 375 | while rnum in checked: 376 | rnum = rand_func() 377 | return rnum 378 | 379 | 380 | @logger.catch 381 | def _saver(writer: Writer, compression: int) -> Callable[[int, vs.VideoFrame, List[VPath]], vs.VideoFrame]: # noqa: C901 382 | if writer == Writer.OPENCV: 383 | try: 384 | import cv2 385 | except ImportError as imp_err: 386 | raise ValueError('comp: you need opencv to use this writer') from imp_err 387 | 388 | opencv_compression = [cv2.IMWRITE_PNG_COMPRESSION, compression] if compression >= 0 else [] 389 | 390 | def _opencv(n: int, f: vs.VideoFrame, path_images: List[VPath]) -> vs.VideoFrame: 391 | frame_array = np.dstack(tuple(reversed(f))) # type: ignore[var-annotated] 392 | cv2.imwrite(path_images[n].to_str(), frame_array, opencv_compression) 393 | return f 394 | return _opencv 395 | 396 | if writer == Writer.PILLOW: 397 | try: 398 | from PIL import Image 399 | except ImportError as imp_err: 400 | raise ValueError('comp: you need Pillow to use this writer') from imp_err 401 | 402 | def _pillow(n: int, f: vs.VideoFrame, path_images: List[VPath]) -> vs.VideoFrame: 403 | frame_array: NDArray[Any] = np.dstack(f) # type: ignore[call-overload] 404 | img = Image.fromarray(frame_array, 'RGB') # type: ignore[pylance-strict] 405 | img.save(path_images[n], format='PNG', optimize=False, compress_level=abs(compression)) 406 | return f 407 | return _pillow 408 | 409 | if writer == Writer.PYQT: 410 | try: 411 | from PyQt5.QtGui import QImage 412 | except ImportError as imp_err: 413 | raise ValueError('comp: you need pyqt to use this writer') from imp_err 414 | 415 | def _pyqt(n: int, f: vs.VideoFrame, path_images: List[VPath]) -> vs.VideoFrame: 416 | frame_array: NDArray[Any] = np.dstack(f) # type: ignore[call-overload] 417 | # pylint: disable=no-member 418 | image = QImage(frame_array.tobytes(), f.width, f.height, 3 * f.width, QImage.Format_RGB888) 419 | image.save(path_images[n].to_str(), 'PNG', compression) 420 | return f 421 | return _pyqt 422 | 423 | if writer == Writer.PYTHON: 424 | import struct 425 | import zlib 426 | 427 | def _write_png(buf: bytes, width: int, height: int) -> bytes: 428 | # add null bytes at the start 429 | width_byte_3 = width * 3 430 | raw_data = b''.join( 431 | b'\x00' + buf[span:span + width_byte_3] 432 | for span in range(0, (height + 1) * width_byte_3, width_byte_3) 433 | ) 434 | 435 | def _png_pack(png_tag: bytes, data: bytes) -> bytes: 436 | chunk_head = png_tag + data 437 | return (struct.pack("!L", len(data)) 438 | + chunk_head 439 | + struct.pack("!L", 0xFFFFFFFF & zlib.crc32(chunk_head))) 440 | 441 | return b''.join([ 442 | # http://www.w3.org/TR/PNG/#5PNG-file-signature 443 | struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10), 444 | # https://www.w3.org/TR/PNG/#11IHDR 445 | _png_pack(b'IHDR', struct.pack("!2I5B", width, height, 8, 2, 0, 0, 0)), 446 | _png_pack(b'IDAT', zlib.compress(raw_data, compression)), 447 | _png_pack(b'IEND', b'')]) 448 | 449 | def _python_png(n: int, f: vs.VideoFrame, path_images: List[VPath]) -> vs.VideoFrame: 450 | # pylint: disable=no-member 451 | frame_bytes = _write_png(np.dstack(f).tobytes(), f.width, f.height) # type: ignore[call-overload] 452 | path_images[n].write_bytes(frame_bytes) 453 | return f 454 | return _python_png 455 | 456 | raise ValueError(f'comp: unknown writer! "{writer}"') 457 | 458 | 459 | def _get_slowpics_header(content_length: str, content_type: str, sess: Session) -> Dict[str, str]: 460 | return { 461 | "Accept": "*/*", 462 | "Accept-Encoding": "gzip, deflate, br", 463 | "Accept-Language": "en-US,en;q=0.5", 464 | "Content-Length": content_length, 465 | "Content-Type": content_type, 466 | "Origin": "https://slow.pics/", 467 | "Referer": "https://slow.pics/comparison", 468 | "Sec-Fetch-Mode": "cors", 469 | "Sec-Fetch-Site": "same-origin", 470 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36", 471 | "X-XSRF-TOKEN": sess.cookies.get_dict()["XSRF-TOKEN"] 472 | } 473 | 474 | 475 | def _make_api_compatible(config: SlowPicsConf) -> Dict[str, str]: 476 | conf = { 477 | 'collectionName': config.collection_name, 478 | 'public': str(config.public).lower(), 479 | 'optimizeImages': str(config.optimise).lower(), 480 | 'hentai': str(config.nsfw).lower(), 481 | } 482 | if config.remove_after is not None: 483 | conf.update({'removeAfter': str(config.remove_after)}) 484 | return conf 485 | 486 | 487 | def _progress_update_func(value: int, endvalue: int) -> None: 488 | if value == 0: 489 | return 490 | logger.logger.opt(raw=True, colors=True).info( 491 | logger.info.colour 492 | + "\rExtracting image: %i/%i ~ %.2f %%" % (value, endvalue, 100 * value / endvalue) 493 | + logger.info.colour_close 494 | ) 495 | -------------------------------------------------------------------------------- /vardautomation/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Configuration module 3 | 4 | Contains FileInfo, BlurayShow and the different Presets to pass to them. 5 | """ 6 | 7 | __all__ = [ 8 | 'FileInfo', 'FileInfo2', 9 | 'PresetType', 10 | 'Preset', 'NoPreset', 11 | 'PresetBD', 'PresetBDWAV64', 'PresetWEB', 12 | 'PresetAAC', 'PresetOpus', 'PresetEAC3', 'PresetFLAC', 13 | 'PresetChapOGM', 'PresetChapXML', 14 | 'BlurayShow' 15 | ] 16 | 17 | import sys 18 | 19 | from dataclasses import dataclass 20 | from enum import IntEnum 21 | from fractions import Fraction 22 | from pprint import pformat 23 | from typing import Any, Callable, Dict, List, NamedTuple, Optional, Sequence, Type, TypeVar, Union 24 | 25 | import vapoursynth as vs 26 | 27 | from pymediainfo import MediaInfo 28 | 29 | from ._logging import logger 30 | from .chapterisation import MatroskaXMLChapters, MplsReader 31 | from .language import UNDEFINED, Lang 32 | from .render import audio_async_render 33 | from .vpathlib import VPath 34 | from .vtypes import AnyPath, DuplicateFrame, Trim, VPSIdx 35 | 36 | core = vs.core 37 | 38 | 39 | class PresetType(IntEnum): 40 | """Type of preset""" 41 | NO_PRESET = 0 42 | """Special type""" 43 | VIDEO = 10 44 | """Video type""" 45 | AUDIO = 20 46 | """Audio type""" 47 | CHAPTER = 30 48 | """Chapter type""" 49 | 50 | 51 | @dataclass(frozen=True, slots=True) 52 | class Preset: 53 | """Preset class that fills some attributes of :py:class:`FileInfo`""" 54 | 55 | idx: Optional[Callable[[str], vs.VideoNode]] 56 | """Vapoursynth indexer callable""" 57 | 58 | a_src: Optional[VPath] 59 | """Audio source path""" 60 | 61 | a_src_cut: Optional[VPath] 62 | """Audio trimmed source path""" 63 | 64 | a_enc_cut: Optional[VPath] 65 | """Audio trimmed encoded source path""" 66 | 67 | chapter: Optional[VPath] 68 | """Chapter file path""" 69 | 70 | preset_type: PresetType 71 | """Preset type from :py:class:`PresetType`""" 72 | 73 | 74 | NoPreset = Preset( 75 | idx=None, 76 | a_src=VPath(''), 77 | a_src_cut=VPath(''), 78 | a_enc_cut=VPath(''), 79 | chapter=VPath(''), 80 | preset_type=PresetType.NO_PRESET 81 | ) 82 | """ 83 | Special Preset that won't do anything 84 | """ 85 | 86 | PresetBD = Preset( 87 | idx=core.lsmas.LWLibavSource, 88 | a_src=VPath('{work_filename:s}_track_{track_number:s}.wav'), 89 | a_src_cut=VPath('{work_filename:s}_cut_track_{track_number:s}.wav'), 90 | a_enc_cut=None, 91 | chapter=None, 92 | preset_type=PresetType.VIDEO 93 | ) 94 | """ 95 | Preset for BD encode. 96 | The indexer is core.lsmas.LWLibavSource and audio sources are .wav 97 | """ 98 | 99 | PresetBDWAV64 = Preset( 100 | idx=core.lsmas.LWLibavSource, 101 | a_src=VPath('{work_filename:s}_track_{track_number:s}.w64'), 102 | a_src_cut=VPath('{work_filename:s}_cut_track_{track_number:s}.w64'), 103 | a_enc_cut=None, 104 | chapter=None, 105 | preset_type=PresetType.VIDEO 106 | ) 107 | """ 108 | Preset for BD encode. 109 | The indexer is core.lsmas.LWLibavSource and audio sources are .w64 110 | """ 111 | 112 | PresetWEB = Preset( 113 | idx=core.ffms2.Source, 114 | a_src=None, 115 | a_src_cut=None, 116 | a_enc_cut=VPath(''), 117 | chapter=None, 118 | preset_type=PresetType.VIDEO 119 | ) 120 | """ 121 | Preset for WEB encode. 122 | The indexer is core.ffms2.Source and a_enc_cut is blocked. 123 | """ 124 | 125 | PresetAAC = Preset( 126 | idx=None, 127 | a_src=VPath('{work_filename:s}_track_{track_number:s}.aac'), 128 | a_src_cut=VPath('{work_filename:s}_cut_track_{track_number:s}.aac'), 129 | a_enc_cut=VPath('{work_filename:s}_cut_enc_track_{track_number:s}.m4a'), 130 | chapter=None, 131 | preset_type=PresetType.AUDIO 132 | ) 133 | """ 134 | Preset for AAC encode. 135 | """ 136 | 137 | PresetOpus = Preset( 138 | idx=None, 139 | a_src=VPath('{work_filename:s}_track_{track_number:s}.opus'), 140 | a_src_cut=VPath('{work_filename:s}_cut_track_{track_number:s}.opus'), 141 | a_enc_cut=VPath('{work_filename:s}_cut_enc_track_{track_number:s}.opus'), 142 | chapter=None, 143 | preset_type=PresetType.AUDIO 144 | ) 145 | """ 146 | Preset for Opus encode. 147 | """ 148 | 149 | 150 | PresetEAC3 = Preset( 151 | idx=None, 152 | a_src=VPath('{work_filename:s}_track_{track_number:s}.eac3'), 153 | a_src_cut=VPath('{work_filename:s}_cut_track_{track_number:s}.eac3'), 154 | a_enc_cut=VPath('{work_filename:s}_cut_enc_track_{track_number:s}.eac3'), 155 | chapter=None, 156 | preset_type=PresetType.AUDIO 157 | ) 158 | """ 159 | Preset for EAC3 encode. 160 | """ 161 | 162 | PresetFLAC = Preset( 163 | idx=None, 164 | a_src=VPath('{work_filename:s}_track_{track_number:s}.flac'), 165 | a_src_cut=VPath('{work_filename:s}_cut_track_{track_number:s}.flac'), 166 | a_enc_cut=VPath('{work_filename:s}_cut_enc_track_{track_number:s}.flac'), 167 | chapter=None, 168 | preset_type=PresetType.AUDIO 169 | ) 170 | """ 171 | Preset for FLAC encode. 172 | """ 173 | 174 | PresetChapOGM = Preset( 175 | idx=None, 176 | a_src=None, 177 | a_src_cut=None, 178 | a_enc_cut=None, 179 | chapter=VPath('chapters/{name:s}.txt'), 180 | preset_type=PresetType.CHAPTER 181 | ) 182 | """ 183 | Preset for OGM based chapters. 184 | """ 185 | 186 | PresetChapXML = Preset( 187 | idx=None, 188 | a_src=None, 189 | a_src_cut=None, 190 | a_enc_cut=None, 191 | chapter=VPath('chapters/{name:s}.xml'), 192 | preset_type=PresetType.CHAPTER 193 | ) 194 | """ 195 | Preset for XML based chapters. 196 | """ 197 | 198 | 199 | class FileInfo: 200 | """FileInfo object. This is the first thing you should initialise.""" 201 | path: VPath 202 | """Path of the video file""" 203 | path_without_ext: VPath 204 | """Path of the video file without the extension""" 205 | work_filename: str 206 | """Work directory filename""" 207 | 208 | idx: Optional[VPSIdx] 209 | """Vapoursynth Indexer""" 210 | preset: List[Preset] 211 | """Preset(s) used""" 212 | 213 | name: str 214 | """Name of the script""" 215 | 216 | workdir: VPath 217 | """Work directory""" 218 | 219 | a_src: Optional[VPath] 220 | """Audio source path""" 221 | a_src_cut: Optional[VPath] 222 | """Audio source trimmed/cut path""" 223 | a_enc_cut: Optional[VPath] 224 | """Audio source encoded (and trimmed) path""" 225 | _chapter: Optional[VPath] 226 | 227 | clip: vs.VideoNode 228 | """VideoNode object loaded by the indexer""" 229 | clip_cut: vs.VideoNode 230 | """Clip trimmed""" 231 | 232 | name_clip_output: VPath 233 | """Clip output path name""" 234 | name_file_final: VPath 235 | """Final file output path""" 236 | 237 | _num_prop: bool = False 238 | _trims_or_dfs: List[Union[Trim, DuplicateFrame]] | Trim | None 239 | 240 | @logger.catch 241 | def __init__( 242 | self, path: AnyPath, /, 243 | trims_or_dfs: List[Union[Trim, DuplicateFrame]] | Trim | None = None, *, 244 | idx: Optional[VPSIdx] = None, 245 | preset: Preset | Sequence[Preset] = [PresetBD, PresetBDWAV64], 246 | workdir: AnyPath = VPath().cwd() 247 | ) -> None: 248 | """ 249 | Helper which allows to store the data related to your file to be encoded 250 | 251 | :param path: Path to your source file. 252 | :param trims_or_dfs: Adjust the clip length by trimming or duplicating frames. Python slicing. Defaults to None 253 | :param idx: Indexer used to index the video track, defaults to None 254 | :param preset: Preset used to fill idx, a_src, a_src_cut, a_enc_cut and chapter attributes, 255 | defaults to :py:data:`.PresetGeneric` 256 | :param workdir: Work directory. Default to the current directorie where the script is launched. 257 | """ 258 | self.workdir = VPath(workdir).resolve() 259 | 260 | self.path = VPath(path) 261 | self.path_without_ext = self.path.with_suffix('') 262 | self.work_filename = self.path.stem 263 | 264 | self.idx = idx 265 | 266 | self.name = VPath(sys.argv[0]).stem 267 | 268 | self.a_src, self.a_src_cut, self.a_enc_cut, self._chapter = (None, ) * 4 269 | if isinstance(preset, Preset): 270 | self.preset = [preset] 271 | else: 272 | self.preset = sorted(preset, key=lambda p: p.preset_type) 273 | for p in self.preset: 274 | self._fill_preset(p) 275 | 276 | if self.idx: 277 | self.clip = self.idx(str(path)) 278 | self.trims_or_dfs = trims_or_dfs 279 | 280 | self.name_clip_output = self.workdir / VPath(self.name) 281 | self.name_file_final = VPath(self.name + '.mkv') 282 | 283 | self.__post_init__() 284 | 285 | def __post_init__(self) -> None: 286 | ... 287 | 288 | def __str__(self) -> str: 289 | dico = dict(self.__dict__) 290 | for k in list(dico.keys()): 291 | if k.startswith('_'): 292 | del dico[k] 293 | dico['chapter'] = self.chapter 294 | dico['trims_or_dfs'] = self.trims_or_dfs 295 | dico['media_info'] = self.media_info 296 | dico['num_prop'] = self.num_prop 297 | return pformat(dico, width=200, sort_dicts=False) 298 | 299 | def _fill_preset(self, p: Preset) -> None: 300 | if self.idx is None: 301 | self.idx = p.idx 302 | 303 | if self.a_src is None and p.a_src is not None: 304 | if p.a_src == VPath(): 305 | self.a_src = VPath() 306 | else: 307 | self.a_src = self.workdir / p.a_src.format( 308 | work_filename=self.work_filename, track_number='{track_number}' 309 | ) 310 | 311 | if self.a_src_cut is None and p.a_src_cut is not None: 312 | if p.a_src_cut == VPath(): 313 | self.a_src_cut = VPath() 314 | else: 315 | self.a_src_cut = self.workdir / p.a_src_cut.format( 316 | work_filename=self.work_filename, track_number='{track_number}' 317 | ) 318 | 319 | if self.a_enc_cut is None and p.a_enc_cut is not None: 320 | if p.a_enc_cut == VPath(): 321 | self.a_enc_cut = VPath() 322 | else: 323 | self.a_enc_cut = self.workdir / p.a_enc_cut.format( 324 | work_filename=self.work_filename, track_number='{track_number}' 325 | ) 326 | 327 | if self.chapter is None and p.chapter is not None: 328 | self._chapter = self.workdir / p.chapter.format(name=self.name) 329 | 330 | def set_name_clip_output_ext(self, extension: str, /) -> None: 331 | """ 332 | Set the extension of :attr:`FileInfo.name_clip_output` 333 | 334 | :param extension: Extension in string format, eg. ".265" 335 | """ 336 | self.name_clip_output = self.name_clip_output.with_suffix(extension) 337 | 338 | @property 339 | def chapter(self) -> Optional[VPath]: 340 | """ 341 | Chapter file path 342 | 343 | :setter: Set the chapter path 344 | """ 345 | return self._chapter 346 | 347 | @chapter.setter 348 | def chapter(self, chap: Optional[VPath]) -> None: 349 | if chap and chap.suffix not in {'.txt', '.xml'}: 350 | logger.warning(f'{self.__class__.__name__}: Chapter extension "{chap.suffix}" is not recognised!') 351 | self._chapter = chap 352 | 353 | @property 354 | def trims_or_dfs(self) -> List[Union[Trim, DuplicateFrame]] | Trim | None: 355 | """ 356 | Trims or DuplicateFrame objects of the current FileInfo 357 | 358 | :setter: Set trims or duplicate frames 359 | """ 360 | return self._trims_or_dfs 361 | 362 | @trims_or_dfs.setter 363 | def trims_or_dfs(self, x: List[Union[Trim, DuplicateFrame]] | Trim | None) -> None: 364 | from vardefunc.util import adjust_clip_frames 365 | self._trims_or_dfs = x 366 | if x: 367 | self.clip_cut = adjust_clip_frames(self.clip, x) 368 | else: 369 | self.clip_cut = self.clip 370 | 371 | @property 372 | def media_info(self) -> MediaInfo: 373 | """Get the MediaInfo of the video file loaded""" 374 | return MediaInfo.parse(self.path) 375 | 376 | @property 377 | def num_prop(self) -> bool: 378 | """ 379 | If the frame number is added to props 380 | 381 | :setter: Add a prop ``FileInfoFrameNumber`` to the frame properties of :attr:`FileInfo.clip` and :attr:`FileInfo.clip_cut` 382 | """ 383 | return self._num_prop 384 | 385 | @num_prop.setter 386 | def num_prop(self, x: bool) -> None: 387 | self._num_prop = x 388 | if x: 389 | def _add_frame_num(n: int, f: vs.VideoFrame) -> vs.VideoFrame: 390 | fout = f.copy() 391 | fout.props['FileInfoFrameNumber'] = n 392 | return fout 393 | 394 | self.clip = core.std.ModifyFrame(self.clip, self.clip, _add_frame_num) 395 | self.trims_or_dfs = self._trims_or_dfs 396 | else: 397 | self.clip, self.clip_cut = [ 398 | c.std.RemoveFrameProps('FileInfoFrameNumber') for c in [self.clip, self.clip_cut] 399 | ] 400 | 401 | 402 | class FileInfo2(FileInfo): 403 | """Second version of FileInfo adding audio support""" 404 | 405 | audios: List[vs.AudioNode] 406 | """List of AudioNode indexed by BestAudioSource in the file""" 407 | 408 | audios_cut: List[vs.AudioNode] 409 | """List of AudioNode cut with the specified trims""" 410 | 411 | @logger.catch 412 | def __post_init__(self) -> None: 413 | from vardefunc.util import adjust_audio_frames 414 | self.audios = [] 415 | self.audios_cut = [] 416 | 417 | track = 0 418 | num_error = 0 419 | while num_error < 2: 420 | try: 421 | audio = core.bas.Source(str(self.path), track=track) 422 | except vs.Error: 423 | num_error += 1 424 | else: 425 | self.audios.append(audio) 426 | num_error = 0 427 | track += 1 428 | 429 | if self.trims_or_dfs: 430 | for audio in self.audios: 431 | self.audios_cut.append( 432 | adjust_audio_frames(audio, self.trims_or_dfs, ref_fps=self.clip.fps) 433 | ) 434 | else: 435 | self.audios_cut = self.audios.copy() 436 | 437 | @property 438 | def trims_or_dfs(self) -> List[Union[Trim, DuplicateFrame]] | Trim | None: 439 | """ 440 | Trims or DuplicateFrame objects of the current FileInfo 441 | 442 | :setter: Set trims or duplicate frames 443 | """ 444 | return self._trims_or_dfs 445 | 446 | @trims_or_dfs.setter 447 | def trims_or_dfs(self, x: List[Union[Trim, DuplicateFrame]] | Trim | None) -> None: 448 | from vardefunc.util import adjust_clip_frames 449 | self._trims_or_dfs = x 450 | if x: 451 | self.clip_cut = adjust_clip_frames(self.clip, x) 452 | else: 453 | self.clip_cut = self.clip 454 | self.__post_init__() 455 | 456 | @property 457 | def audio(self) -> vs.AudioNode: 458 | """ 459 | Return the first AudioNode track of the file. 460 | 461 | :return: AudioNode 462 | """ 463 | return self.audios[0] 464 | 465 | @property 466 | def audio_cut(self) -> vs.AudioNode: 467 | """ 468 | Return the first trimmed AudioNode track of the file. 469 | 470 | :return: AudioNode 471 | """ 472 | return self.audios_cut[0] 473 | 474 | @logger.catch 475 | def write_a_src(self, index: int, offset: int = -1) -> None: 476 | """ 477 | Using `audio_async_render` write the AudioNodes of the file 478 | as a WAV file to `a_src` path 479 | """ 480 | if not self.a_src: 481 | raise ValueError(f'{self.__class__.__name__}: no a_src VPath found!') 482 | with self.a_src.set_track(index).open('wb') as binary: 483 | audio_async_render( 484 | self.audios[index + offset], binary, 485 | progress=f'Writing a_src to {self.a_src.set_track(index).resolve().to_str()}' 486 | ) 487 | 488 | @logger.catch 489 | def write_a_src_cut(self, index: int, offset: int = -1) -> None: 490 | """ 491 | Using `audio_async_render` write the AudioNodes of the file 492 | as a WAV file to `a_src_cut` path 493 | """ 494 | if not self.a_src_cut: 495 | raise ValueError(f'{self.__class__.__name__}: no a_src_cut VPath found!') 496 | with self.a_src_cut.set_track(index).open('wb') as binary: 497 | audio_async_render( 498 | self.audios_cut[index + offset], binary, 499 | progress=f'Writing a_src_cut to {self.a_src_cut.set_track(index).resolve().to_str()}' 500 | ) 501 | 502 | 503 | class _File(NamedTuple): 504 | file: VPath 505 | chapter: Optional[VPath] 506 | 507 | 508 | _FileInfoType = TypeVar('_FileInfoType', bound=FileInfo) 509 | 510 | 511 | class BlurayShow: 512 | """Helper class for batching shows""" 513 | 514 | _files: List[_File] 515 | 516 | _file_info_args: Dict[str, Any] 517 | _file_ncops: List[_File] 518 | _file_nceds: List[_File] 519 | 520 | def __init__(self, episodes: Dict[VPath, List[VPath]], global_trims: List[Trim | DuplicateFrame] | Trim | None = None, *, 521 | idx: Optional[VPSIdx] = None, preset: Preset | Sequence[Preset] = [PresetBD, PresetBDWAV64], 522 | lang: Lang = UNDEFINED, fps: Fraction = Fraction(24000, 1001)) -> None: 523 | """ 524 | :param episodes: A dictionnary of episodes. 525 | Keys are the path of each bdmv folder. 526 | Values are the episodes inside the current bdmv folder key. 527 | :param global_trims: Adjust the clips length by trimming or duplicating frames. Python slicing. Defaults to None 528 | :param idx: Indexer used to index the video track, defaults to None 529 | :param preset: Preset used to fill idx, a_src, a_src_cut, a_enc_cut and chapter attributes, 530 | defaults to :py:data:`.PresetGeneric` 531 | :param lang: Chapters language, defaults to UNDEFINED 532 | """ 533 | self._file_info_args = dict(trims_or_dfs=global_trims, idx=idx, preset=preset) 534 | self._files = [] 535 | 536 | for path, eps in episodes.items(): 537 | chap_folder = path / 'chapters' 538 | chap_folder.mkdir(parents=True, exist_ok=True) 539 | chaps = sorted(chap_folder.glob('*')) 540 | 541 | MplsReader(path, lang).write_playlist(chap_folder) 542 | chaps = sorted(chap_folder.glob('*')) 543 | 544 | for ep in eps: 545 | chap_sel: Optional[VPath] = None 546 | for chap in chaps: 547 | if chap.stem.split('_')[1] == ep.stem: 548 | chap_sel = chap 549 | if isinstance(global_trims, tuple) and (trim := global_trims[0]): 550 | MatroskaXMLChapters(chap).shift_times(- trim, fps) 551 | break 552 | self._files.append(_File(path / ep, chap_sel)) 553 | 554 | self._file_ncops = [] 555 | self._file_nceds = [] 556 | 557 | def register_ncops(self, *path: VPath) -> None: 558 | """ 559 | Add NCOP paths to the class 560 | """ 561 | self._file_ncops.extend(_File(p, None) for p in path) 562 | 563 | def register_nceds(self, *path: VPath) -> None: 564 | """ 565 | Add NCED paths to the class 566 | """ 567 | self._file_nceds.extend(_File(p, None) for p in path) 568 | 569 | def ncops(self, /, file_info_t: Type[_FileInfoType]) -> List[_FileInfoType]: 570 | """ 571 | Get all the NCOPs 572 | 573 | :return: List of FileInfo 574 | """ 575 | return [ 576 | self.ncop(i, start_from=0, file_info_t=file_info_t) 577 | for i in range(len(self._file_ncops)) 578 | ] 579 | 580 | def ncop(self, num: int, /, file_info_t: Type[_FileInfoType], *, start_from: int = 1) -> _FileInfoType: 581 | """ 582 | Get a specified NCOP 583 | 584 | :param num: Numero of the NCOP 585 | :param start_from: Indexing starting value, defaults to 1 586 | :return: FileInfo object 587 | """ 588 | ncop = self._file_ncops[num - start_from] 589 | ncop_info = file_info_t(ncop.file, **self._file_info_args) 590 | return ncop_info 591 | 592 | def nceds(self, /, file_info_t: Type[_FileInfoType]) -> List[_FileInfoType]: 593 | """ 594 | Get all the NCEDs 595 | 596 | :return: List of FileInfo 597 | """ 598 | return [ 599 | self.nced(i, start_from=0, file_info_t=file_info_t) 600 | for i in range(len(self._file_nceds)) 601 | ] 602 | 603 | def nced(self, num: int, /, file_info_t: Type[_FileInfoType], *, start_from: int = 1) -> _FileInfoType: 604 | """ 605 | Get a specified NCED 606 | 607 | :param num: Numero of the NCED 608 | :param start_from: Indexing starting value, defaults to 1 609 | :return: FileInfo object 610 | """ 611 | nced = self._file_nceds[num - start_from] 612 | nced_info = file_info_t(nced.file, **self._file_info_args) 613 | return nced_info 614 | 615 | def episodes(self, /, file_info_t: Type[_FileInfoType]) -> List[_FileInfoType]: 616 | """ 617 | Get all the episodes 618 | 619 | :return: List of FileInfo 620 | """ 621 | return [ 622 | self.episode(i, start_from=0, file_info_t=file_info_t) 623 | for i in range(len(self._files)) 624 | ] 625 | 626 | def episode(self, num: int, /, file_info_t: Type[_FileInfoType], *, start_from: int = 1) -> _FileInfoType: 627 | """ 628 | Get a specified episode 629 | 630 | :param num: Numero of the episode 631 | :param start_from: Indexing starting value, defaults to 1 632 | :return: FileInfo object 633 | """ 634 | file = self._files[num - start_from] 635 | file_info = file_info_t(file.file, **self._file_info_args) 636 | file_info.chapter = file.chapter 637 | return file_info 638 | -------------------------------------------------------------------------------- /vardautomation/exception.py: -------------------------------------------------------------------------------- 1 | """Logger module""" 2 | 3 | 4 | class FileError(OSError): 5 | ... 6 | 7 | 8 | class VSFormatError(ValueError): 9 | ... 10 | 11 | 12 | class VSSubsamplingError(VSFormatError): 13 | ... 14 | 15 | 16 | class VSColourRangeError(ValueError): 17 | ... 18 | -------------------------------------------------------------------------------- /vardautomation/language.py: -------------------------------------------------------------------------------- 1 | """Language module""" 2 | 3 | from __future__ import annotations 4 | 5 | __all__ = [ 6 | 'Lang', 7 | 'FRENCH', 'ENGLISH', 'JAPANESE', 'UNDEFINED' 8 | ] 9 | 10 | 11 | from pprint import pformat 12 | from typing import Optional 13 | 14 | from langcodes import Language 15 | 16 | from .utils import recursive_dict 17 | 18 | 19 | class Lang: 20 | """Basic language class""" 21 | 22 | name: str 23 | """Name of the language""" 24 | ietf: str 25 | """IETF BCP 47 language code""" 26 | iso639: str 27 | """ISO-639 language code""" 28 | 29 | def __init__(self, language: Language, *, iso639_variant: str = 'B') -> None: 30 | """ 31 | :param language: Language class of the package langcodes 32 | :param iso639_variant: Optional variant to get the 'bibliographic' code instead, defaults to 'B' 33 | """ 34 | self.name = language.autonym() 35 | self.ietf = str(language) 36 | self.iso639 = language.to_alpha3(variant=iso639_variant) 37 | 38 | def __str__(self) -> str: 39 | return pformat(recursive_dict(self), indent=4, width=200, sort_dicts=False) 40 | 41 | @classmethod 42 | def make(cls, ietf: Optional[str]) -> Lang: 43 | """ 44 | Make a new Lang based on IETF 45 | 46 | :param ietf: IETF BCP 47 language code 47 | :return: A new Lang object 48 | """ 49 | return cls(Language.make(ietf)) 50 | 51 | 52 | FRENCH = Lang.make('fr') 53 | """French Lang object""" 54 | 55 | ENGLISH = Lang.make('en') 56 | """English Lang object""" 57 | 58 | JAPANESE = Lang.make('ja') 59 | """Japanese Lang object""" 60 | 61 | UNDEFINED = Lang.make(None) 62 | """Undefined Lang object""" 63 | -------------------------------------------------------------------------------- /vardautomation/logo.txt: -------------------------------------------------------------------------------- 1 | +––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––+ 2 | | _ _ _ _ | 3 | | | | | | | | (_) | 4 | | __ __ __ _ _ __ __| | __ _ _ _ | |_ ___ _ __ ___ __ _ | |_ _ ___ _ __ | 5 | | \ \ / // _` || '__|/ _` | / _` || | | || __|/ _ \ | '_ ` _ \ / _` || __|| | / _ \ | '_ \ | 6 | | \ V /| (_| || | | (_| || (_| || |_| || |_| (_) || | | | | || (_| || |_ | || (_) || | | | | 7 | | \_/ \__,_||_| \__,_| \__,_| \__,_| \__|\___/ |_| |_| |_| \__,_| \__||_| \___/ |_| |_| | 8 | | | 9 | +––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––+ -------------------------------------------------------------------------------- /vardautomation/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Ichunjo/vardautomation/abd5f487c3bfb2a4e939a3d3dadbb405e30d729e/vardautomation/py.typed -------------------------------------------------------------------------------- /vardautomation/render.py: -------------------------------------------------------------------------------- 1 | """Node rendering helpers""" 2 | 3 | __all__ = [ 4 | 'clip_async_render', 5 | 'WaveHeader', 'audio_async_render' 6 | ] 7 | 8 | # pylint: disable=no-member 9 | 10 | import struct 11 | 12 | from enum import IntEnum 13 | from typing import BinaryIO, Callable, Dict, List, Optional, TextIO, Tuple, overload 14 | 15 | import numpy as np 16 | import vapoursynth as vs 17 | 18 | from rich.progress import BarColumn, Progress, ProgressColumn, Task, TextColumn, TimeRemainingColumn 19 | from rich.text import Text 20 | 21 | from ._logging import logger 22 | from .utils import Properties 23 | 24 | 25 | class FPSColumn(ProgressColumn): 26 | def render(self, task: Task) -> Text: 27 | return Text(f"{task.speed or 0:.02f} fps") 28 | 29 | 30 | def get_render_progress() -> Progress: 31 | return Progress( 32 | TextColumn("{task.description}"), 33 | BarColumn(), 34 | TextColumn("{task.completed}/{task.total}"), 35 | TextColumn("{task.percentage:>3.02f}%"), 36 | FPSColumn(), 37 | TimeRemainingColumn(), 38 | ) 39 | 40 | 41 | RenderCallback = Callable[[int, vs.VideoFrame], None] 42 | 43 | 44 | @overload 45 | def clip_async_render(clip: vs.VideoNode, # type: ignore [misc] 46 | outfile: Optional[BinaryIO] = None, 47 | timecodes: None = ..., 48 | progress: Optional[str] = "Rendering clip...", 49 | callback: RenderCallback | List[RenderCallback] | None = None) -> None: 50 | ... 51 | 52 | 53 | @overload 54 | def clip_async_render(clip: vs.VideoNode, 55 | outfile: Optional[BinaryIO] = None, 56 | timecodes: TextIO = ..., 57 | progress: Optional[str] = "Rendering clip...", 58 | callback: RenderCallback | List[RenderCallback] | None = None) -> List[float]: 59 | ... 60 | 61 | 62 | @logger.catch 63 | def clip_async_render(clip: vs.VideoNode, # noqa: C901 64 | outfile: Optional[BinaryIO] = None, 65 | timecodes: TextIO | None = None, 66 | progress: Optional[str] = "Rendering clip...", 67 | callback: RenderCallback | List[RenderCallback] | None = None) -> None | List[float]: 68 | """ 69 | Render a clip by requesting frames asynchronously using clip.frames, 70 | providing for callback with frame number and frame object. 71 | 72 | This is mostly a re-implementation of VideoNode.output, but a little bit slower since it's pure python. 73 | You only really need this when you want to render a clip while operating on each frame in order 74 | or you want timecodes without using vspipe. 75 | 76 | Original function borrowed from lvsfunc.render.clip_async_render. 77 | 78 | :param clip: Clip to render. 79 | :param outfile: Y4MPEG render output BinaryIO handle. If None, no Y4M output is performed. 80 | Use ``sys.stdout.buffer`` for stdout. (Default: None) 81 | :param timecodes: Timecode v2 file TextIO handle. If None, timecodes will not be written. 82 | :param progress: String to use for render progress display. 83 | If empty or ``None``, no progress display. 84 | :param callback: Single or list of callbacks to be preformed. The callbacks are called 85 | when each sequential frame is output, not when each frame is done. 86 | 87 | :return: List of timecodes from rendered clip. 88 | """ 89 | cbl = [] if callback is None else callback if isinstance(callback, list) else [callback] 90 | 91 | if progress: 92 | p = get_render_progress() 93 | task = p.add_task(progress, total=clip.num_frames) 94 | p.start() 95 | 96 | def _progress_cb(n: int, f: vs.VideoFrame) -> None: 97 | p.update(task, advance=1) 98 | 99 | cbl.append(_progress_cb) 100 | 101 | if outfile: 102 | if clip.format is None: 103 | raise ValueError("clip_async_render: 'Cannot render a variable format clip to y4m!'") 104 | if clip.format.color_family not in (vs.YUV, vs.GRAY): 105 | raise ValueError("clip_async_render: 'Can only render YUV and GRAY clips to y4m!'") 106 | if clip.format.color_family == vs.GRAY: 107 | y4mformat = "mono" 108 | else: 109 | try: 110 | formats: Dict[Tuple[int, int], str] = { 111 | (1, 1): "420", 112 | (1, 0): "422", 113 | (0, 0): "444", 114 | (2, 2): "410", 115 | (2, 0): "411", 116 | (0, 1): "440", 117 | } 118 | y4mformat = formats[(clip.format.subsampling_w, clip.format.subsampling_h)] 119 | except KeyError as key_err: 120 | raise ValueError("clip_async_render: 'What have you done'") from key_err 121 | 122 | y4mformat = f"{y4mformat}p{clip.format.bits_per_sample}" if clip.format.bits_per_sample > 8 else y4mformat 123 | header = f"YUV4MPEG2 C{y4mformat} W{clip.width} H{clip.height} F{clip.fps.numerator}:{clip.fps.denominator} Ip A0:0\n" 124 | outfile.write(header.encode("utf-8")) 125 | 126 | if timecodes: 127 | timecodes.write("# timestamp format v2\n") 128 | 129 | tc_list = [0.0] 130 | 131 | try: 132 | for n, f in enumerate(clip.frames(close=True)): 133 | for cb in cbl: 134 | cb(n, f) 135 | if timecodes: 136 | _write_timecodes(f, timecodes, tc_list) 137 | if outfile: 138 | _finish_frame_video(f, outfile) 139 | except KeyboardInterrupt as keyb_err: 140 | logger.error('', keyb_err) 141 | finally: 142 | if progress: 143 | p.stop() # type: ignore[pylance] 144 | 145 | return tc_list if timecodes else None 146 | 147 | 148 | def _finish_frame_video(frame: vs.VideoFrame, outfile: BinaryIO) -> None: 149 | outfile.write("FRAME\n".encode("utf-8")) 150 | for plane in frame: # type: ignore [attr-defined] 151 | outfile.write(plane) 152 | 153 | 154 | def _write_timecodes(frame: vs.VideoFrame, timecodes: TextIO, tc_list: List[float]) -> None: 155 | tc = tc_list[-1] + Properties.get_prop(frame, '_DurationNum', int) / Properties.get_prop(frame, '_DurationDen', int) 156 | tc_list.append(tc) 157 | timecodes.write(f"{round(tc * 1000):d}\n") 158 | 159 | 160 | class WaveFormat(IntEnum): 161 | """ 162 | WAVE form wFormatTag IDs 163 | Complete list is in mmreg.h in Windows 10 SDK. 164 | """ 165 | PCM = 0x0001 166 | IEEE_FLOAT = 0x0003 167 | EXTENSIBLE = 0xFFFE 168 | 169 | 170 | class WaveHeader(IntEnum): 171 | """ 172 | Wave headers 173 | """ 174 | WAVE = 0 175 | WAVE64 = 1 176 | AUTO = 2 177 | 178 | 179 | WAVE_RIFF_TAG = b'RIFF' 180 | WAVE_WAVE_TAG = b'WAVE' 181 | WAVE_FMT_TAG = b'fmt ' 182 | WAVE_DATA_TAG = b'data' 183 | 184 | WAVE64_RIFF_UUID = (0x72, 0x69, 0x66, 0x66, 0x2E, 0x91, 0xCF, 0x11, 0xA5, 0xD6, 0x28, 0xDB, 0x04, 0xC1, 0x00, 0x00) 185 | WAVE64_WAVE_UUID = (0x77, 0x61, 0x76, 0x65, 0xF3, 0xAC, 0xD3, 0x11, 0x8C, 0xD1, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A) 186 | WAVE64_FMT_UUID = (0x66, 0x6D, 0x74, 0x20, 0xF3, 0xAC, 0xD3, 0x11, 0x8C, 0xD1, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A) 187 | WAVE64_DATA_UUID = (0x64, 0x61, 0x74, 0x61, 0xF3, 0xAC, 0xD3, 0x11, 0x8C, 0xD1, 0x00, 0xC0, 0x4F, 0x8E, 0xDB, 0x8A) 188 | WAVE_FMT_EXTENSIBLE_SUBFORMAT = ( 189 | (WaveFormat.PCM, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71), 190 | (WaveFormat.IEEE_FLOAT, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xAA, 0x00, 0x38, 0x9B, 0x71) 191 | ) 192 | 193 | 194 | @logger.catch 195 | def audio_async_render(audio: vs.AudioNode, 196 | outfile: BinaryIO, 197 | header: WaveHeader = WaveHeader.AUTO, 198 | progress: Optional[str] = "Rendering audio...") -> None: 199 | """ 200 | Render an audio by requesting frames asynchronously using audio.frames. 201 | 202 | Implementation-like of VideoNode.output for an AudioNode that isn't in the Cython side yet. 203 | 204 | :param audio: Audio to render. 205 | :param outfile: Render output BinaryIO handle. 206 | :param header: Kind of Wave header. 207 | WaveHeader.AUTO adds a Wave64 header if the audio 208 | 209 | * Has more than 2 channels 210 | * Has a bitdepth > 16 211 | * Has more than 44100 samples 212 | 213 | :param progress: String to use for render progress display. 214 | If empty or ``None``, no progress display. 215 | """ 216 | if progress: 217 | p = get_render_progress() 218 | task = p.add_task(progress, total=audio.num_frames) 219 | p.start() 220 | 221 | bytes_per_output_sample = (audio.bits_per_sample + 7) // 8 222 | block_align = audio.num_channels * bytes_per_output_sample 223 | bytes_per_second = audio.sample_rate * block_align 224 | data_size = audio.num_samples * block_align 225 | 226 | if header == WaveHeader.AUTO: 227 | conditions = (audio.num_channels > 2, audio.bits_per_sample > 16, audio.num_samples > 44100) 228 | header_func, use_w64 = (_w64_header, WaveHeader.WAVE64) if any(conditions) else (_wav_header, WaveHeader.WAVE) 229 | else: 230 | use_w64 = header 231 | header_func = (_wav_header, _w64_header)[header] 232 | 233 | outfile.write(header_func(audio, bytes_per_second, block_align, data_size)) 234 | 235 | for f in audio.frames(close=True): 236 | if progress: 237 | p.update(task, advance=1) # type: ignore[pylance-strict] 238 | _finish_frame_audio(f, outfile, audio.bits_per_sample == 24) 239 | # Determine file size and place the value at the correct position 240 | # at the beginning of the file 241 | size = outfile.tell() 242 | if use_w64: 243 | outfile.seek(16) 244 | outfile.write(struct.pack(' bytes: 254 | header = WAVE_RIFF_TAG 255 | # Add 4 bytes for the length later 256 | header += b'\x00\x00\x00\x00' 257 | header += WAVE_WAVE_TAG 258 | 259 | header += WAVE_FMT_TAG 260 | format_tag = WaveFormat.IEEE_FLOAT if audio.sample_type == vs.FLOAT else WaveFormat.PCM 261 | 262 | fmt_chunk_data = struct.pack( 263 | ' 0xFFFFFFFE: 270 | raise ValueError('Data exceeds wave file size limit') 271 | 272 | header += WAVE_DATA_TAG 273 | header += struct.pack(' bytes: 278 | # RIFF-GUID 279 | header = bytes(WAVE64_RIFF_UUID) 280 | # Add 8 bytes for the length later 281 | header += b'\x00\x00\x00\x00\x00\x00\x00\x00' 282 | # WAVE-GUID 283 | header += bytes(WAVE64_WAVE_UUID) 284 | # FMT-GUID 285 | fmt_guid = bytes(WAVE64_FMT_UUID) 286 | header += fmt_guid 287 | 288 | # We only support WAVEFORMATEXTENSIBLE for WAVE64 header 289 | format_tag = WaveFormat.EXTENSIBLE 290 | 291 | # cb_size should be 22 for WAVEFORMATEXTENSIBLE with PCM 292 | cb_size = 22 293 | fmt_chunk_data = struct.pack( 294 | ' None: 315 | # For some reason f[i] is faster than list(f) or just passing f to stack 316 | data = np.stack([frame[i] for i in range(frame.num_channels)], axis=1) # type: ignore[var-annotated] 317 | 318 | if _24bit: 319 | if data.ndim == 1: 320 | # Convert to a 2D array with a single column 321 | data.shape += (1, ) 322 | # Data values are stored in 32 bits so we must convert them to 24 bits 323 | # Then by shifting first 0 bits, then 8, then 16, the resulting output is 24 bit little-endian. 324 | data = ((data // 2 ** 8).reshape(data.shape + (1, )) >> np.array([0, 8, 16], np.uint32)) # type: ignore[pylance-strict] 325 | outfile.write(data.ravel().astype(np.uint8).tobytes()) 326 | else: 327 | outfile.write(data.ravel().view(np.int8).tobytes()) 328 | 329 | 330 | class SceneChangeMode(IntEnum): 331 | WWXD = 11 332 | SCXVID = 22 333 | MV = 44 334 | 335 | 336 | def find_scene_changes( # noqa: C901 337 | clip: vs.VideoNode, mode: int | SceneChangeMode = SceneChangeMode.WWXD, *, 338 | scxvid_use_slices: bool = False, 339 | mv_vectors: Optional[vs.VideoNode] = None, 340 | mv_thscd1: Optional[int] = None, mv_thscd2: Optional[int] = None, 341 | ) -> List[int]: 342 | """ 343 | Generate a list of scene changes (keyframes). 344 | 345 | Dependencies: 346 | 347 | * vapoursynth-wwxd 348 | * vapoursynth-scxvid (Optional: scxvid mode) 349 | 350 | :param clip: Clip to search for scene changes. Will be rendered in its entirety. 351 | :param mode: Scene change detection mode: 352 | 353 | * WWXD: Use wwxd 354 | * SCXVID: Use scxvid 355 | * WWXD_SCXVID_UNION: Union of wwxd and sxcvid (must be detected by at least one) 356 | * WWXD_SCXVID_INTERSECTION: Intersection of wwxd and scxvid (must be detected by both) 357 | 358 | :return: List of scene changes. 359 | """ 360 | frames: List[int] = [] 361 | props: List[str] = [] 362 | clip = clip.resize.Bilinear(640, 360, format=vs.YUV420P8) 363 | SCM = SceneChangeMode 364 | wwxd_unions = {SCM.WWXD | SCM.SCXVID, SCM.WWXD | SCM.MV, SCM.WWXD | SCM.SCXVID | SCM.MV} 365 | wwxd_inters = {SCM.WWXD & SCM.SCXVID, SCM.WWXD & SCM.MV, SCM.WWXD & SCM.SCXVID & SCM.MV} 366 | scxvid_unions = {SCM.SCXVID | SCM.WWXD, SCM.SCXVID | SCM.MV, SCM.SCXVID | SCM.WWXD | SCM.MV} 367 | scxvid_inters = {SCM.SCXVID & SCM.WWXD, SCM.SCXVID & SCM.MV, SCM.SCXVID & SCM.WWXD & SCM.MV} 368 | mv_unions = {SCM.MV | SCM.WWXD, SCM.MV | SCM.SCXVID, SCM.MV | SCM.WWXD | SCM.SCXVID} 369 | mv_inters = {SCM.MV & SCM.WWXD, SCM.MV & SCM.SCXVID, SCM.MV & SCM.WWXD & SCM.SCXVID} 370 | 371 | # SCXVID and mv share the same prop 372 | # https://github.com/dubhater/vapoursynth-scxvid/issues/3 373 | if mode in {SCM.WWXD} | wwxd_unions | wwxd_inters: 374 | clip = clip.wwxd.WWXD() 375 | props.append('Scenechange') 376 | if mode in {SCM.SCXVID} | scxvid_unions | scxvid_inters: 377 | clip = clip.scxvid.Scxvid(use_slices=scxvid_use_slices) 378 | props.append('_SceneChangePrev') 379 | if mode in {SCM.MV} | mv_unions | mv_inters: 380 | if not mv_vectors: 381 | mv_vectors = clip.mv.Super().mv.Analyse() 382 | clip = clip.mv.SCDetection(mv_vectors, mv_thscd1, mv_thscd2) 383 | props.append('_SceneChangePrev') 384 | 385 | def _cb(n: int, f: vs.VideoFrame) -> None: 386 | match mode: 387 | case SCM.WWXD | SCM.SCXVID | SCM.MV: 388 | if Properties.get_prop(f, props[0], int): 389 | frames.append(n) 390 | case _ if mode in wwxd_unions | scxvid_unions | mv_unions: 391 | if any(Properties.get_prop(f, p, int) for p in props): 392 | frames.append(n) 393 | case _ if mode in wwxd_inters | scxvid_inters | mv_inters: 394 | if all(Properties.get_prop(f, p, int) for p in props): 395 | frames.append(n) 396 | case _: 397 | pass 398 | 399 | clip_async_render(clip, progress="Detecting scene changes...", callback=_cb) 400 | 401 | return sorted(frames) 402 | -------------------------------------------------------------------------------- /vardautomation/tooling/__init__.py: -------------------------------------------------------------------------------- 1 | """Tooling module""" 2 | # flake8: noqa 3 | from .abstract import * 4 | from .audio import * 5 | from .base import * 6 | from .misc import * 7 | from .mux import * 8 | from .video import * 9 | 10 | __all__ = [ 11 | 'Tool', 'BasicTool', 12 | 'AudioExtracter', 'MKVAudioExtracter', 'Eac3toAudioExtracter', 'FFmpegAudioExtracter', 13 | 14 | 'AudioEncoder', 'BitrateMode', 'QAACEncoder', 'OpusEncoder', 'FDKAACEncoder', 'FlacCompressionLevel', 'FlacEncoder', 15 | 'PassthroughAudioEncoder', 16 | 17 | 'AudioCutter', 'ScipyCutter', 'EztrimCutter', 'SoxCutter', 'PassthroughCutter', 18 | 'VideoEncoder', 'VideoLanEncoder', 'X265', 'X264', 'LosslessEncoder', 'NVEncCLossless', 'FFV1', 19 | 'progress_update_func', 20 | 21 | 'make_qpfile', 'Qpfile', 'KeyframesFile', 'get_keyframes', 'get_vs_core', 22 | 23 | 'Track', 'MediaTrack', 'VideoTrack', 'AudioTrack', 'SubtitleTrack', 'ChaptersTrack', 24 | 'SplitMode', 25 | 'MatroskaFile', 26 | 27 | 'SubProcessAsync' 28 | ] 29 | -------------------------------------------------------------------------------- /vardautomation/tooling/abstract.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | __all__ = ['Tool'] 4 | 5 | import re 6 | import subprocess 7 | 8 | from abc import ABC, abstractmethod 9 | from typing import Any, Dict, List, NoReturn 10 | 11 | from .._logging import logger 12 | from ..vpathlib import VPath 13 | from ..vtypes import AnyPath 14 | 15 | 16 | class Tool(ABC): 17 | """ 18 | Abstract Tool interface.\n 19 | Most of the tools inherit from it. 20 | """ 21 | 22 | binary: VPath 23 | """Binary path""" 24 | 25 | params: List[str] 26 | """Settings normalised and parsed""" 27 | 28 | def __init__(self, binary: AnyPath, settings: AnyPath | List[str] | Dict[str, Any], *, check_binary: bool = True) -> None: 29 | """ 30 | :: 31 | 32 | # This 33 | >>> cat settings 34 | -o {clip_output:s} - --y4m --preset slower --crf 51 35 | 36 | # is equivalent to this: 37 | settings: List[str] = ['-o', '{clip_output:s}', '-', '--y4m', '--preset', 'slower', '--crf', '51'] 38 | 39 | # and is equivalent to this: 40 | settings: Dict[str, Any] = { 41 | '-o': '{clip_output:s}', 42 | '-': None, 43 | '--y4m': None, 44 | '--preset': 'slower', 45 | '--crf': 51 46 | } 47 | 48 | :param binary: Path to your binary file. 49 | :param settings: Path to your settings file or list of string or a dict containing your settings 50 | Special variable names can be specified and are replaced at runtime. 51 | Supported variable names are defined in :py:func:`set_variable` docstring. 52 | :param check_binary: Check binary's availability. 53 | """ 54 | self.binary = VPath(binary) 55 | 56 | if isinstance(settings, dict): 57 | for k, v in settings.items(): 58 | self.params.extend([k] + ([str(v)] if v else [])) 59 | elif isinstance(settings, list): 60 | self.params = settings.copy() 61 | else: 62 | try: 63 | with open(settings, 'r', encoding='utf-8') as sttgs: 64 | params_re = re.split(r'[\n\s]\s*', sttgs.read()) 65 | except FileNotFoundError as file_err: 66 | logger.critical(f'{self.__class__.__name__}: settings file not found', file_err) 67 | self.params = [p for p in params_re if isinstance(p, str)] 68 | 69 | if check_binary: 70 | self._check_binary() 71 | self.params.insert(0, self.binary.to_str()) 72 | 73 | super().__init__() 74 | 75 | @abstractmethod 76 | def run(self) -> None | NoReturn: 77 | """Tooling chain""" 78 | 79 | @abstractmethod 80 | def set_variable(self) -> Dict[str, Any]: 81 | """ 82 | Set variables in the settings\n 83 | """ 84 | 85 | @property 86 | def _quiet(self) -> bool: 87 | return logger.level >= logger.info.no 88 | 89 | @logger.catch 90 | def _update_settings(self) -> None: 91 | set_vars = self.set_variable() 92 | for i, p in enumerate(self.params): 93 | if not re.findall(r'(?<=(? None: 99 | try: 100 | subprocess.call(self.binary.to_str(), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) 101 | except FileNotFoundError as file_not_found: 102 | logger.critical(f'{self.__class__.__name__}: "{self.binary.to_str()}" was not found!', file_not_found) 103 | -------------------------------------------------------------------------------- /vardautomation/tooling/base.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = ['BasicTool'] 3 | 4 | import subprocess 5 | 6 | from typing import Any, Dict, List 7 | 8 | from .._logging import logger 9 | from ..config import FileInfo 10 | from ..utils import copy_docstring_from 11 | from ..vtypes import AnyPath 12 | from .abstract import Tool 13 | 14 | 15 | class BasicTool(Tool): 16 | """BasicTool interface.""" 17 | 18 | file: FileInfo | None 19 | """FileInfo object.""" 20 | 21 | def __init__(self, binary: AnyPath, settings: AnyPath | List[str] | Dict[str, Any], /, 22 | file: FileInfo | None = None, check_binary: bool = True) -> None: 23 | """ 24 | Helper allowing the use of CLI programs for basic tasks like video or audio track extraction. 25 | 26 | :param binary: See :py:attr:`Tool.binary` 27 | :param settings: See :py:attr:`Tool.settings` 28 | :param file: Not used in BasicTool implementation, defaults to None 29 | :param check_binary: Check binary's availability. 30 | """ 31 | self.file = file 32 | super().__init__(binary, settings, check_binary=check_binary) 33 | 34 | def run(self) -> None: 35 | self._update_settings() 36 | self._do_tooling() 37 | 38 | @copy_docstring_from(Tool.set_variable, 'o+t') 39 | def set_variable(self) -> Dict[str, Any]: 40 | """No variable are replaced there.""" 41 | return {} 42 | 43 | def _do_tooling(self) -> None: 44 | logger.info(f'{self.binary.to_str()} command: ' + ' '.join(self.params)) 45 | with logger.catch_ctx(): 46 | subprocess.run(self.params, check=True, text=True, encoding='utf-8') 47 | -------------------------------------------------------------------------------- /vardautomation/tooling/misc.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = [ 3 | 'Qpfile', 'make_qpfile', 4 | 'KeyframesFile', 'get_keyframes', 5 | 'get_vs_core', 'SubProcessAsync' 6 | ] 7 | 8 | import asyncio 9 | import inspect 10 | import os 11 | 12 | from fractions import Fraction 13 | from itertools import accumulate 14 | from typing import Iterable, List, NamedTuple, Optional, Union 15 | 16 | import psutil 17 | import vapoursynth as vs 18 | 19 | from pytimeconv import Convert 20 | 21 | from .._logging import logger 22 | from ..binary_path import BinaryPath 23 | from ..render import SceneChangeMode as SCM 24 | from ..render import find_scene_changes 25 | from ..vpathlib import VPath 26 | from ..vtypes import AnyPath 27 | from .base import BasicTool 28 | 29 | 30 | class Qpfile(NamedTuple): 31 | """Simple namedtuple for a qpfile""" 32 | 33 | path: VPath 34 | """Qpfile path""" 35 | 36 | frames: Optional[List[int]] = None 37 | """List of keyframes""" 38 | 39 | 40 | def make_qpfile(clip: vs.VideoNode, path: Optional[AnyPath] = None, /, 41 | overwrite: bool = True, mode: Union[int, SCM] = SCM.WWXD | SCM.SCXVID) -> Qpfile: 42 | """ 43 | Convenience function for making a qpfile 44 | 45 | :param clip: Source clip 46 | :param path: Path where the qpfile will be written. 47 | Default to the name of the script that run this function with the ".log" extension 48 | :param overwrite: If True, will overwrite the file 49 | :param mode: Scene change mode, defaults to SCM.WWXD_SCXVID_UNION 50 | :return: A Qpfile 51 | """ 52 | path = VPath(inspect.stack()[-1].filename).with_suffix('.log') if not path else VPath(path) 53 | 54 | if not overwrite and path.exists(): 55 | logger.critical(f'make_qpfile: a qpfile already exists at "{path.resolve().to_str()}"') 56 | 57 | num_threads = vs.core.num_threads 58 | if (oscpu := os.cpu_count()) is not None: 59 | vs.core.num_threads = oscpu 60 | scenes = find_scene_changes(clip, mode) 61 | vs.core.num_threads = num_threads 62 | 63 | with path.open('w', encoding='utf-8') as file: 64 | file.writelines(f'{s} K\n' for s in scenes) 65 | return Qpfile(path, scenes) 66 | 67 | 68 | class KeyframesFile(NamedTuple): 69 | """Simple namedtuple for a keyframes file""" 70 | 71 | path: VPath 72 | """Keyframe file path""" 73 | 74 | frames: List[int] 75 | """List of keyframes""" 76 | 77 | 78 | def get_keyframes(path: AnyPath) -> KeyframesFile: 79 | """ 80 | Get the keyframes of a video using ffmsindex 81 | 82 | :param path: Path of the video 83 | :return: A KeyframesFile 84 | """ 85 | logger.debug(path) 86 | path = VPath(path) 87 | 88 | idx_file = path.parent / 'index.ffindex' 89 | kf_file = idx_file.with_suffix(idx_file.suffix + '_track00.kf.txt') 90 | 91 | BasicTool(BinaryPath.ffmsindex, ['-p', '-k', '-f', path.to_str(), idx_file.to_str()]).run() 92 | idx_file.rm() 93 | 94 | with kf_file.open('r', encoding='utf-8') as kfio: 95 | file = KeyframesFile( 96 | kf_file, 97 | [int(kf) for kf in kfio.read().splitlines()[2:]] 98 | ) 99 | return file 100 | 101 | 102 | def get_vs_core(threads: Optional[Iterable[int]] = None, max_cache_size: Optional[int] = None) -> vs.Core: 103 | """ 104 | Get the VapourSynth singleton core. Optionaly, set the number of threads used 105 | and the maximum cache size 106 | 107 | :param threads: An iteratable of thread numbers, defaults to None. 108 | :param max_cache_size: Set the upper framebuffer cache size after which memory is aggressively freed. 109 | The value is in megabytes, defaults to None. 110 | :return: Vapoursynth Core. 111 | """ 112 | core = vs.core.core 113 | 114 | if threads is not None: 115 | threads = list(threads) 116 | core.num_threads = len(threads) 117 | p_handle = psutil.Process() 118 | p_handle.cpu_affinity(threads) 119 | 120 | if max_cache_size is not None: 121 | core.max_cache_size = max_cache_size 122 | 123 | return core 124 | 125 | 126 | def make_tcfile(clips: Iterable[vs.VideoNode], path: Optional[AnyPath] = None, precision: int = 6) -> VPath: 127 | """ 128 | Convenience function for making a tcfile 129 | 130 | :param clips: Source clips 131 | :param path: tcfile path 132 | :param precision: Precision of fps 133 | :return: tcfile path 134 | """ 135 | num_frames, fpss, times = list[int](), list[Fraction](), list[float]() 136 | 137 | for clip in clips: 138 | num_frames.append(clip.num_frames) 139 | fpss.append(clip.fps) 140 | times.append(Convert.f2seconds(clip.num_frames, clip.fps)) 141 | 142 | start_frames = accumulate(num_frames[:-1], lambda x, y: x + y, initial=0) 143 | end_frames = accumulate(num_frames[1:], lambda x, y: x + y, initial=num_frames[0] - 1) 144 | 145 | path = VPath(inspect.stack()[-1].filename).with_suffix('.tcfile') if not path else VPath(path) 146 | 147 | with path.open('w', encoding='utf-8') as file: 148 | file.write('# timestamp format v1\n') 149 | file.write(f'assume {round(sum(num_frames) / sum(times), precision)}\n') # type: ignore[arg-type] 150 | file.writelines( 151 | f'{s},{e},{round(float(fps), precision)}\n' 152 | for s, e, fps in zip(start_frames, end_frames, fpss) 153 | ) 154 | 155 | return path 156 | 157 | 158 | class SubProcessAsync: 159 | __slots__ = ('sem', ) 160 | 161 | sem: asyncio.Semaphore 162 | 163 | @logger.catch 164 | def __init__(self, cmds: List[str], /, *, nb_cpus: Optional[int] = os.cpu_count()) -> None: 165 | if nb_cpus: 166 | self.sem = asyncio.Semaphore(nb_cpus) 167 | else: 168 | raise ValueError(f'{self.__class__.__name__}: no CPU found!') 169 | 170 | loop = asyncio.get_event_loop() 171 | try: 172 | loop.run_until_complete(self._processing(cmds)) 173 | finally: 174 | loop.run_until_complete(loop.shutdown_asyncgens()) 175 | loop.close() 176 | 177 | async def _processing(self, all_cmds: List[str]) -> None: 178 | await asyncio.gather( 179 | *(asyncio.ensure_future(self._safe_processing(cmd)) for cmd in all_cmds) 180 | ) 181 | 182 | async def _safe_processing(self, cmd: str) -> None: 183 | logger.debug(cmd) 184 | async with self.sem: 185 | return await self._run_cmd(cmd) 186 | 187 | @staticmethod 188 | async def _run_cmd(cmd: str) -> None: 189 | proc = await asyncio.create_subprocess_shell(cmd) 190 | logger.debug(cmd) 191 | await proc.communicate() 192 | -------------------------------------------------------------------------------- /vardautomation/tooling/mux.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | # pylint: disable=inconsistent-return-statements 4 | # pylint: disable=keyword-arg-before-vararg 5 | 6 | __all__ = [ 7 | 'Track', 'MediaTrack', 'VideoTrack', 'AudioTrack', 'SubtitleTrack', 'ChaptersTrack', 8 | 'SplitMode', 9 | 'MatroskaFile' 10 | ] 11 | 12 | from abc import ABC, abstractmethod 13 | from enum import Enum 14 | from os import PathLike 15 | from pprint import pformat 16 | from typing import Iterable, List, Literal, MutableSequence, NoReturn, Optional, Sequence, Tuple, overload, cast 17 | 18 | from ..binary_path import BinaryPath 19 | from ..config import FileInfo 20 | from ..language import UNDEFINED, Lang 21 | from ..utils import recursive_dict 22 | from ..vpathlib import CleanupSet, VPath 23 | from ..vtypes import AnyPath 24 | from .base import BasicTool 25 | 26 | 27 | class _AbstractTrack(Sequence[str], ABC): 28 | _cmd: List[str] 29 | 30 | @abstractmethod 31 | def __init__(self) -> None: 32 | ... 33 | 34 | @overload 35 | def __getitem__(self, index: int) -> str: 36 | ... 37 | 38 | @overload 39 | def __getitem__(self, index: slice) -> Sequence[str]: 40 | ... 41 | 42 | def __getitem__(self, index: int | slice) -> str | Sequence[str]: 43 | return cast(Sequence[str], self._cmd.__getitem__(index)) 44 | 45 | def __len__(self) -> int: 46 | return self._cmd.__len__() 47 | 48 | def __str__(self) -> str: 49 | return pformat(recursive_dict(self.__dict__), indent=1, width=80, sort_dicts=True) 50 | 51 | 52 | class Track(_AbstractTrack): 53 | """Standard Track interface for to be passed to mkvmerge""" 54 | 55 | path: VPath 56 | """VPath to the file""" 57 | 58 | opts: Tuple[str, ...] 59 | """Additional options for this track""" 60 | 61 | def __init__(self, path: AnyPath, *opts: str) -> None: 62 | """ 63 | Register a new track 64 | 65 | :param path: Path to the file 66 | :param opts: Additional options 67 | """ 68 | self.path = VPath(path) 69 | self.opts = opts 70 | self._cmd = [self.path.to_str()] 71 | self._cmd.extend(reversed(self.opts)) 72 | 73 | 74 | class _LanguageTrack(Track): 75 | lang: Lang 76 | """Language of the track""" 77 | 78 | def __init__(self, path: AnyPath, lang: Lang | str = UNDEFINED, *opts: str) -> None: 79 | self.lang = Lang.make(lang) if isinstance(lang, str) else lang 80 | super().__init__(path, *opts) 81 | 82 | 83 | class MediaTrack(_LanguageTrack): 84 | """Interface for medias track based to be passed to mkvmerge""" 85 | 86 | name: Optional[str] 87 | """Name of the track""" 88 | 89 | lang: Lang 90 | """Language of the track""" 91 | 92 | def __init__(self, path: AnyPath, name: Optional[str] = None, lang: Lang | str = UNDEFINED, tid: int = 0, /, *opts: str) -> None: 93 | """ 94 | Register a new track 95 | 96 | :param path: Path to the file 97 | :param name: Name of the track 98 | :param lang: Language of the track 99 | :param tid: Track ID 100 | :param opts: Additional options 101 | """ 102 | super().__init__(path, lang, *opts) 103 | self.name = name 104 | if self.name: 105 | self._cmd.extend([f'{tid}:' + self.name, '--track-name']) 106 | self._cmd.extend([f'{tid}:' + self.lang.iso639, '--language']) 107 | 108 | 109 | class VideoTrack(MediaTrack): 110 | ... 111 | 112 | 113 | class AudioTrack(MediaTrack): 114 | ... 115 | 116 | 117 | class SubtitleTrack(MediaTrack): 118 | ... 119 | 120 | 121 | class ChaptersTrack(_LanguageTrack): 122 | """Interface for chapters track based to be passed to mkvmerge""" 123 | 124 | charset: Optional[str] 125 | """Character set that is used for the conversion to UTF-8 for simple chapter files.""" 126 | 127 | def __init__(self, path: AnyPath, lang: Lang | str = UNDEFINED, charset: Optional[str] = None, /, *opts: str) -> None: 128 | """ 129 | Register a new chapters track 130 | 131 | :param path: Path to the file 132 | :param lang: Language of the track 133 | :param charset: Character set that is used for the conversion to UTF-8 for simple chapter files 134 | """ 135 | super().__init__(path, lang, *opts) 136 | self.charset = charset 137 | self._cmd.insert(1, '--chapters') 138 | if self.charset: 139 | self._cmd.extend([self.charset, '--chapter-charset']) 140 | self._cmd.extend([self.lang.iso639, '--chapter-language']) 141 | 142 | 143 | class _AbstractMatroskaFile(MutableSequence[Track]): 144 | _output: VPath 145 | _tracks: List[Track] 146 | 147 | @abstractmethod 148 | def __init__(self) -> None: 149 | ... 150 | 151 | @overload 152 | def __getitem__(self, index: int) -> Track: 153 | ... 154 | 155 | @overload 156 | def __getitem__(self, index: slice) -> MutableSequence[Track]: 157 | ... 158 | 159 | def __getitem__(self, index: int | slice) -> Track | MutableSequence[Track]: 160 | return self._tracks.__getitem__(index) 161 | 162 | @overload 163 | def __setitem__(self, index: int, value: Track) -> None: 164 | ... 165 | 166 | @overload 167 | def __setitem__(self, index: slice, value: Iterable[Track]) -> None: 168 | ... 169 | 170 | def __setitem__(self, index: int | slice, value: Track | Iterable[Track]) -> None: 171 | return self._tracks.__setitem__(index, value) # type: ignore 172 | 173 | def __delitem__(self, index: int | slice) -> None: 174 | return self._tracks.__delitem__(index) 175 | 176 | def __len__(self) -> int: 177 | return self._tracks.__len__() 178 | 179 | def insert(self, index: int, value: Track) -> None: 180 | return self._tracks.insert(index, value) 181 | 182 | 183 | class SplitMode(str, Enum): 184 | """MKVMerge split modes""" 185 | SIZE = 'size' 186 | """Split by size""" 187 | 188 | DURATION = 'duration' 189 | """Split by duration""" 190 | 191 | TIMESTAMPS = 'timestamps' 192 | """Split by timestamps""" 193 | 194 | PARTS = 'parts' 195 | """Keep specific parts by specifying timestamp ranges while discarding others""" 196 | 197 | PARTS_FRAMES = 'parts-frames' 198 | """Keep specific parts by specifying frame/field number ranges while discarding others""" 199 | 200 | FRAMES = 'frames' 201 | """Split by frames""" 202 | 203 | CHAPTERS = 'chapters' 204 | """Split by chapters""" 205 | 206 | 207 | class MatroskaFile(_AbstractMatroskaFile): 208 | """Matroska file interface""" 209 | 210 | global_opts: Tuple[str, ...] 211 | """Global options and other options that affect the whole process""" 212 | 213 | def __init__(self, output: AnyPath, tracks: AnyPath | Track | Iterable[AnyPath | Track] | None = None, /, *global_opts: str) -> None: 214 | """ 215 | Register a new matroska file to be merged/splitted/appended 216 | 217 | :param output: Output path 218 | :param tracks: A path or an iterable of path/Track 219 | :param global_opts: Global options 220 | """ 221 | self._output = VPath(output) 222 | if not tracks: 223 | self._tracks = [] 224 | elif isinstance(tracks, Track): 225 | self._tracks = [tracks] 226 | elif isinstance(tracks, (str, PathLike)): 227 | self._tracks = [Track(tracks)] 228 | else: 229 | self._tracks = [track if isinstance(track, Track) else Track(track) for track in tracks] 230 | self.global_opts = global_opts 231 | 232 | @property 233 | def command(self) -> List[str]: 234 | """Get the mkvmerge command""" 235 | cmd = list[str]() 236 | for track in reversed(self._tracks): 237 | cmd.extend(track) 238 | cmd.extend(reversed(self.global_opts)) 239 | cmd.extend([self._output.to_str(), '-o']) 240 | cmd.reverse() 241 | return cmd 242 | 243 | @classmethod 244 | def autotrack(cls, file: FileInfo, lang: Lang | None = None) -> MatroskaFile: 245 | """ 246 | Automatically get the tracks from a FileInfo object and make a MatroskaFile from it 247 | 248 | :param file: FileInfo object 249 | :return: MatroskaFile object 250 | """ 251 | streams: List[AnyPath | Track] = [file.name_clip_output] 252 | i = 1 253 | while True: 254 | if file.a_enc_cut is not None and file.a_enc_cut.set_track(i).exists(): 255 | streams.append(file.a_enc_cut.set_track(i)) 256 | elif file.a_src_cut is not None and file.a_src_cut.set_track(i).exists(): 257 | streams.append(file.a_src_cut.set_track(i)) 258 | elif file.a_src is not None and file.a_src.set_track(i).exists(): 259 | streams.append(file.a_src.set_track(i)) 260 | else: 261 | break 262 | i += 1 263 | 264 | if file.chapter and file.chapter.exists(): 265 | streams.append(ChaptersTrack(file.chapter)) 266 | 267 | mkv = cls(file.name_file_final, streams) 268 | mkv.track_lang = lang 269 | 270 | return mkv 271 | 272 | @staticmethod 273 | def automux(file: FileInfo) -> None: 274 | """ 275 | Call ``MatroskaFile.autotrack`` and mux it. 276 | 277 | :param file: FileInfo object 278 | """ 279 | MatroskaFile.autotrack(file).mux(return_workfiles=False) 280 | 281 | @property 282 | def track_lang(self) -> List[Lang | None] | Lang | None: 283 | """ 284 | Lang(s) of the tracks of the current MatroskaFile object 285 | 286 | :setter: Change the Lang of the tracks 287 | """ 288 | if len(self._tracks) > 1: 289 | return [track.lang if isinstance(track, MediaTrack) else None for track in self._tracks] 290 | return track.lang if isinstance(track := self._tracks[0], MediaTrack) else None 291 | 292 | @track_lang.setter 293 | def track_lang(self, langs: List[Lang | None] | Lang | None) -> None: 294 | if langs is None: 295 | return None 296 | 297 | if isinstance(langs, Lang): 298 | for track in self._tracks: 299 | if isinstance(track, MediaTrack): 300 | track.lang = langs 301 | return None 302 | 303 | nlangs = langs[:len(self._tracks)] 304 | nlangs += [langs[-1]] * (len(self._tracks) - len(langs)) 305 | for track, nlang in zip(self._tracks, nlangs): 306 | if isinstance(track, MediaTrack) and nlang: 307 | track.lang = nlang 308 | 309 | @overload 310 | def mux(self, return_workfiles: Literal[True] = ...) -> CleanupSet: 311 | ... 312 | 313 | @overload 314 | def mux(self, return_workfiles: Literal[False]) -> None: 315 | ... 316 | 317 | def mux(self, return_workfiles: bool = True) -> CleanupSet | None: 318 | """ 319 | Launch a merge command 320 | 321 | :return: Return worksfiles if True 322 | """ 323 | BasicTool(BinaryPath.mkvmerge, self.command).run() 324 | 325 | if return_workfiles: 326 | return CleanupSet(t.path for t in self._tracks) 327 | return None 328 | 329 | def split(self, mode: SplitMode, param: str) -> None: 330 | """ 331 | Split function ruled by "mode" 332 | 333 | :param mode: Split mode 334 | :param param: Full command after the mode 335 | """ 336 | cmd = self.command 337 | cmd.extend(['--split', mode.value + ':' + param]) 338 | BasicTool(BinaryPath.mkvmerge, cmd).run() 339 | 340 | def split_size(self, size: str) -> None: 341 | """ 342 | Split the output file after a given size 343 | 344 | :param size: d[k|m|g] 345 | """ 346 | self.split(SplitMode.SIZE, size) 347 | 348 | def split_duration(self, duration: str) -> None: 349 | """ 350 | Split the output file after a given duration 351 | 352 | :param duration: HH:MM:SS.nnnnnnnnn|ds 353 | """ 354 | self.split(SplitMode.DURATION, duration) 355 | 356 | def split_timestamps(self, timestamps: Iterable[str]) -> None: 357 | """ 358 | Split the output file after specific timestamps 359 | 360 | :param timestamps: A[,B[,C...]] 361 | """ 362 | self.split(SplitMode.TIMESTAMPS, ','.join(timestamps)) 363 | 364 | def split_parts(self, parts: List[Tuple[str | None, str | None]]) -> None: 365 | """ 366 | Keep specific parts by specifying timestamp ranges while discarding others 367 | 368 | :param parts: start1-end1[,[+]start2-end2[,[+]start3-end3...]] 369 | """ 370 | nparts = list[str]() 371 | for part in parts: 372 | s, e = part 373 | if not s: 374 | s = '' 375 | if not e: 376 | e = '' 377 | pr = s + '-' + e 378 | nparts.append(pr) 379 | self.split(SplitMode.PARTS, ','.join(nparts)) 380 | 381 | def split_parts_frames(self, parts: List[Tuple[int | None, int | None]]) -> None: 382 | """ 383 | Keep specific parts by specifying frame/field number ranges while discarding others 384 | 385 | :param parts: start1-end1[,[+]start2-end2[,[+]start3-end3...]] 386 | """ 387 | nparts = list[str]() 388 | for part in parts: 389 | s, e = part 390 | ss = '' if not s else str(s) 391 | ee = '' if not e else str(e) 392 | pr = ss + '-' + ee 393 | nparts.append(pr) 394 | 395 | self.split(SplitMode.PARTS_FRAMES, ','.join(nparts)) 396 | 397 | def split_frames(self, frames: int | Iterable[int]) -> None: 398 | """ 399 | Split after specific frames/fields 400 | 401 | :param frames: A[,B[,C...]] 402 | """ 403 | self.split(SplitMode.FRAMES, str(frames) if isinstance(frames, int) else ','.join(map(str, frames))) 404 | 405 | def split_chapters(self, indices: Literal['all'] | Iterable[int]) -> None: 406 | """ 407 | Split before specific chapters 408 | 409 | :param indices: "all" or A[,B[,C...]] 410 | """ 411 | if isinstance(indices, str): 412 | return self.split(SplitMode.CHAPTERS, indices) 413 | self.split(SplitMode.CHAPTERS, ','.join(map(str, indices))) 414 | 415 | def append_to(self, files: Iterable[AnyPath], ids: Iterable[Tuple[int, int, int, int]] | None = None) -> None: 416 | """ 417 | Enable append mode 418 | 419 | :param files: Files to be appended 420 | :param ids: Controls to which track another track is appended. 421 | """ 422 | cmd = self.command 423 | cmd.append('[') 424 | cmd.extend(map(str, files)) 425 | cmd.append(']') 426 | if ids: 427 | cmd.append('--append-to') 428 | cmd.append(','.join(':'.join(map(str, id_)) for id_ in ids)) 429 | BasicTool(BinaryPath.mkvmerge, cmd).run() 430 | 431 | def add_timestamps(self, path: AnyPath, id_: int = 0) -> None: 432 | """ 433 | Add timestamps global command 434 | 435 | :param path: Timecode path 436 | :param id_: [description], defaults to 0 437 | """ 438 | self.global_opts = ('--timestamps', f'{id_}:' + str(path)) + self.global_opts 439 | 440 | def add_attachments(self) -> NoReturn: 441 | raise NotImplementedError 442 | -------------------------------------------------------------------------------- /vardautomation/utils.py: -------------------------------------------------------------------------------- 1 | """Properties and helpers functions""" 2 | import subprocess 3 | 4 | from functools import wraps 5 | from types import FunctionType 6 | from typing import Any, Callable, Dict, Iterable, List, MutableMapping, Tuple, Type, TypeVar, cast 7 | 8 | import vapoursynth as vs 9 | 10 | from ._logging import logger 11 | from .exception import VSColourRangeError, VSSubsamplingError 12 | from .vtypes import AnyPath, T 13 | 14 | core = vs.core 15 | 16 | 17 | class Properties: 18 | """Collection of methods to get some properties from the parameters and/or the clip""" 19 | 20 | @logger.catch 21 | @classmethod 22 | def get_colour_range(cls, params: List[str], clip: vs.VideoNode) -> Tuple[int, int]: 23 | """ 24 | Get the luma colour range specified in the params. 25 | Fallback to the clip properties. 26 | 27 | :param params: Settings of the encoder. 28 | :param clip: Source 29 | :return: A tuple of min_luma and max_luma value 30 | """ 31 | bits = cls.get_depth(clip) 32 | 33 | def _get_props(clip: vs.VideoNode) -> MutableMapping[str, Any]: 34 | with clip.get_frame(0) as frame: 35 | return frame.props.copy() 36 | 37 | if '--range' in params: 38 | rng_param = params[params.index('--range') + 1] 39 | if rng_param == 'limited': 40 | min_luma = 16 << (bits - 8) 41 | max_luma = 235 << (bits - 8) 42 | elif rng_param == 'full': 43 | min_luma = 0 44 | max_luma = (1 << bits) - 1 45 | else: 46 | raise VSColourRangeError(f'{cls.__name__}: Wrong range in parameters!') 47 | elif '_ColorRange' in (props := _get_props(clip)): 48 | color_rng = props['_ColorRange'] 49 | if color_rng == 1: 50 | min_luma = 16 << (bits - 8) 51 | max_luma = 235 << (bits - 8) 52 | elif color_rng == 0: 53 | min_luma = 0 54 | max_luma = (1 << bits) - 1 55 | else: 56 | raise VSColourRangeError(f'{cls.__name__}: Wrong "_ColorRange" prop in the clip!') 57 | else: 58 | raise VSColourRangeError(f'{cls.__name__}: Cannot guess the color range!') 59 | 60 | return min_luma, max_luma 61 | 62 | @staticmethod 63 | def get_depth(clip: vs.VideoNode, /) -> int: 64 | """ 65 | Returns the bit depth of a VideoNode as an integer. 66 | 67 | :param clip: Source clip 68 | :return: Bitdepth 69 | """ 70 | assert clip.format 71 | return clip.format.bits_per_sample 72 | 73 | @staticmethod 74 | @logger.catch 75 | def get_csp(clip: vs.VideoNode) -> str: 76 | """ 77 | Get the colourspace a the given clip based on its format 78 | 79 | :param clip: Source clip 80 | :return: Colourspace suitable for x264 81 | """ 82 | def _get_csp_subsampled(format_clip: vs.VideoFormat) -> str: 83 | sub_w, sub_h = format_clip.subsampling_w, format_clip.subsampling_h 84 | csp_yuv_subs: Dict[Tuple[int, int], str] = {(0, 0): 'i444', (1, 0): 'i422', (1, 1): 'i420'} 85 | try: 86 | return csp_yuv_subs[(sub_w, sub_h)] 87 | except KeyError as k_err: 88 | raise VSSubsamplingError(f'{Properties.__name__}: wrong subsampling "{(sub_w, sub_h)}"') from k_err 89 | 90 | assert clip.format 91 | 92 | csp_avc: dict[vs.ColorFamily, str] = { 93 | vs.GRAY: 'i400', 94 | vs.YUV: _get_csp_subsampled(clip.format), 95 | vs.RGB: 'rgb' 96 | } 97 | return csp_avc[clip.format.color_family] 98 | 99 | @staticmethod 100 | def get_encoder_name(path: AnyPath) -> str: 101 | """ 102 | Get the encoder name from the file's tags 103 | 104 | :param path: File path 105 | :return: Encoder name 106 | """ 107 | ffprobe_args = ['ffprobe', '-loglevel', 'quiet', '-show_entries', 'format_tags=encoder', 108 | '-print_format', 'default=nokey=1:noprint_wrappers=1', str(path)] 109 | with logger.catch_ctx(): 110 | return subprocess.check_output(ffprobe_args, shell=True, encoding='utf-8') 111 | 112 | @staticmethod 113 | def get_matrix_name(frame: vs.VideoFrame, key: str) -> str: 114 | """ 115 | Gets FrameProp ``prop`` from frame ``frame`` with expected type ``t`` 116 | and then returns a corresponding string. 117 | This is necessary because x264 does not accept integers for the matrix/primaries/transfer. 118 | 119 | For a full list of accepted matrices, please check 120 | http://www.chaneru.com/Roku/HLS/X264_Settings.htm#colormatrix 121 | 122 | :param frame: Frame containing props 123 | :param key: Prop to get. Must be _Matrix, _Primaries, or _Transfer! 124 | :param t: Type of prop 125 | 126 | :return: string signalling the clip's matrix 127 | """ 128 | if key.lower() in ('matrix', 'transfer', 'primaries'): 129 | key = "_" + key.capitalize() 130 | 131 | try: 132 | prop = frame.props[key] 133 | except KeyError as key_err: 134 | logger.critical(f"get_matrix_names: 'Key {key} not present in props'", key_err) 135 | 136 | if not isinstance(prop, int): 137 | with logger.catch_ctx(): 138 | raise ValueError(f"get_matrix_names: 'Key {key} did not contain expected type: " 139 | f"Expected int got {type(prop)}'") 140 | 141 | match prop: 142 | case 0: return 'GBR' 143 | case 1: return 'bt709' 144 | case 2: return 'undef' 145 | case 5: return 'bt470m' 146 | case 6: return 'smpte170m' 147 | case 7: return 'smpte240m' 148 | case 9: raise ValueError("get_matrix_names: 'x264 does not support BT2020 yet!'") 149 | case _: raise ValueError("get_matrix_names: 'Invalid matrix passed!'") 150 | 151 | @staticmethod 152 | def get_prop(frame: vs.VideoFrame, key: str, t: Type[T]) -> T: 153 | """ 154 | Gets FrameProp ``prop`` from frame ``frame`` with expected type ``t`` 155 | to satisfy the type checker. 156 | Function borrowed from lvsfunc. 157 | 158 | :param frame: Frame containing props 159 | :param key: Prop to get 160 | :param t: Type of prop 161 | 162 | :return: frame.prop[key] 163 | """ 164 | try: 165 | prop = frame.props[key] 166 | except KeyError as key_err: 167 | logger.critical(f"get_prop: 'Key {key} not present in props'", key_err) 168 | 169 | if not isinstance(prop, t): 170 | with logger.catch_ctx(): 171 | raise ValueError(f"get_prop: 'Key {key} did not contain expected type: Expected {t} got {type(prop)}'") 172 | 173 | return prop 174 | 175 | 176 | def recursive_dict(obj: object) -> Dict[str, Any] | str: 177 | # pylint: disable=no-else-return 178 | if hasattr(obj, '__dict__') and obj.__dict__: 179 | return {k: recursive_dict(v) for k, v in obj.__dict__.items()} 180 | else: 181 | if isinstance(obj, vs.VideoNode): 182 | return repr(obj) 183 | else: 184 | return str(obj) 185 | 186 | 187 | F = TypeVar('F', bound=Callable[..., Any]) 188 | 189 | 190 | def copy_docstring_from(original: Callable[..., Any], mode: str = 'o') -> Callable[[F], F]: 191 | """ 192 | :param original: Original function 193 | :param mode: Copy mode. Can be 'o+t', 't+o', 'o', defaults to 'o' 194 | """ 195 | @wraps(original) 196 | def wrapper(target: F) -> F: 197 | if target.__doc__ is None: 198 | target.__doc__ = '' 199 | if original.__doc__ is None: 200 | original.__doc__ = '' 201 | 202 | if mode == 'o': 203 | target.__doc__ = original.__doc__ 204 | elif mode == 'o+t': 205 | target.__doc__ = original.__doc__ + target.__doc__ 206 | elif mode == 't+o': 207 | target.__doc__ += original.__doc__ 208 | else: 209 | with logger.catch_ctx(): 210 | raise ValueError('copy_docstring_from: Wrong mode!') 211 | return target 212 | 213 | return wrapper 214 | 215 | 216 | def modify_docstring(edit_func: Callable[[str], str], /) -> Callable[[F], F]: 217 | 218 | def _wrapper(target: F) -> F: 219 | if not target.__doc__: 220 | logger.debug(f'modify_docstring: missing docstring in {target}') 221 | target.__doc__ = '' 222 | target.__doc__ = edit_func(target.__doc__) 223 | return target 224 | 225 | return _wrapper 226 | 227 | 228 | # pylint: disable=unused-argument 229 | def modify_docstring_for(fn_name: str | Iterable[str], edit_func: Callable[[str], str], /) -> Callable[[Type[T]], Type[T]]: 230 | 231 | def _wrapper(target: Type[T]) -> Type[T]: 232 | nonlocal fn_name 233 | with logger.catch_ctx(): 234 | if isinstance(fn_name, str): 235 | fn_name = [fn_name] 236 | 237 | for fnn in fn_name: 238 | func = cast(FunctionType, getattr(target, fnn)) 239 | func_c = FunctionType(func.__code__, func.__globals__, fnn, func.__defaults__, func.__closure__) 240 | func_c = modify_docstring(edit_func)(func_c) 241 | func_c = wraps(target)(func_c) 242 | setattr(target, fnn, func_c) 243 | 244 | return target 245 | 246 | return _wrapper 247 | -------------------------------------------------------------------------------- /vardautomation/vpathlib.py: -------------------------------------------------------------------------------- 1 | """pathlib.Path inheritance""" 2 | 3 | from __future__ import annotations 4 | 5 | __all__ = ['VPath'] 6 | 7 | import os 8 | import shutil 9 | 10 | from pathlib import Path 11 | from types import TracebackType 12 | from typing import Any, Callable, Iterable, List, Optional, Protocol, Tuple, Type 13 | 14 | from ._logging import logger 15 | from .vtypes import AbstractMutableSet, AnyPath 16 | 17 | 18 | class _Flavour(Protocol): 19 | sep: str 20 | altsep: str 21 | 22 | 23 | _ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType] 24 | _OptExcInfo = _ExcInfo | Tuple[None, None, None] # type: ignore[operator] 25 | 26 | 27 | class VPath(Path): 28 | """Modified version of pathlib.Path""" 29 | # pylint: disable=no-member 30 | _flavour: _Flavour = type(Path())._flavour # type: ignore[attr-defined] 31 | 32 | @logger.catch 33 | def format(self, *args: Any, **kwargs: Any) -> VPath: 34 | """ 35 | :return: Formatted version of `vpath`, 36 | using substitutions from args and kwargs. 37 | The substitutions are identified by braces ('{' and '}') 38 | """ 39 | return VPath(self.to_str().format(*args, **kwargs)) 40 | 41 | @logger.catch 42 | def set_track(self, track_number: int, /) -> VPath: 43 | """ 44 | Set the track number by replacing the substitution "{track_number}" 45 | by the track_number specified 46 | 47 | :param track_number: Track number 48 | :return: Formatted VPath 49 | """ 50 | return self.format(track_number=track_number) 51 | 52 | def to_str(self) -> str: 53 | """ 54 | :return: String representation of the path, suitable for 55 | passing to system calls. 56 | """ 57 | return str(self) 58 | 59 | def append_stem(self, stem: str) -> VPath: 60 | """ 61 | Append ``stem`` at the end of the VPath stem 62 | 63 | :param stem: Stem to add 64 | :return: New VPath with the stem appended 65 | """ 66 | return self.with_stem(self.stem + stem) 67 | 68 | def append_suffix(self, suffix: str) -> VPath: 69 | """ 70 | Append ``stem`` at the end of the VPath suffix. 71 | Stolen from pathlib3x 72 | 73 | :param suffix: Suffix to add. It has to start with '.' 74 | :return: New VPath with the file suffix appended 75 | """ 76 | f = self._flavour 77 | if f.sep in suffix or f.altsep and f.altsep in suffix: 78 | raise ValueError(f'Invalid suffix {suffix}') 79 | if suffix and not suffix.startswith('.') or suffix == '.': 80 | raise ValueError(f'Invalid suffix {suffix}') 81 | name = self.name 82 | if not name: 83 | raise ValueError(f'{self} has an empty name') 84 | name = name + suffix 85 | return self._from_parsed_parts(self._drv, self._root, self._parts[:-1] + [name]) # type: ignore[attr-defined, no-any-return] 86 | 87 | @logger.catch 88 | def copy(self, target: AnyPath, *, follow_symlinks: bool = True) -> None: 89 | """ 90 | Wraps shutil.copy. Stolen from pathlib3x. 91 | 92 | https://docs.python.org/3/library/shutil.html#shutil.copy 93 | 94 | :param target: See Python official documentation 95 | :param follow_symlinks: See Python official documentation 96 | """ 97 | shutil.copy(self, target, follow_symlinks=follow_symlinks) 98 | 99 | @logger.catch 100 | def copy2(self, target: AnyPath, follow_symlinks: bool = True) -> None: 101 | """ 102 | Wraps shutil.copy2. Stolen from pathlib3x. 103 | 104 | https://docs.python.org/3/library/shutil.html#shutil.copy2 105 | 106 | :param target: See Python official documentation 107 | :param follow_symlinks: See Python official documentation 108 | """ 109 | shutil.copy2(self, target, follow_symlinks=follow_symlinks) 110 | 111 | @logger.catch 112 | def copyfile(self, target: VPath, follow_symlinks: bool = True) -> None: 113 | """ 114 | Wraps shutil.copyfile. Stolen from pathlib3x. 115 | 116 | https://docs.python.org/3/library/shutil.html#shutil.copyfile 117 | 118 | :param target: See Python official documentation 119 | :param follow_symlinks: See Python official documentation 120 | """ 121 | shutil.copyfile(self, target, follow_symlinks=follow_symlinks) 122 | 123 | @logger.catch 124 | def copymode(self, target: AnyPath, follow_symlinks: bool = True) -> None: 125 | """ 126 | Wraps shutil.copymode. Stolen from pathlib3x. 127 | 128 | https://docs.python.org/3/library/shutil.html#shutil.copymode 129 | 130 | :param target: See Python official documentation 131 | :param follow_symlinks: See Python official documentation 132 | """ 133 | shutil.copymode(self, target, follow_symlinks=follow_symlinks) 134 | 135 | @logger.catch 136 | def copystat(self, target: AnyPath, follow_symlinks: bool = True) -> None: 137 | """ 138 | Wraps shutil.copystat. Stolen from pathlib3x. 139 | 140 | https://docs.python.org/3/library/shutil.html#shutil.copystat 141 | 142 | :param target: See Python official documentation 143 | :param follow_symlinks: See Python official documentation 144 | """ 145 | shutil.copystat(self, target, follow_symlinks=follow_symlinks) 146 | 147 | @logger.catch 148 | def copytree( 149 | self, target: AnyPath, symlinks: bool = False, 150 | ignore: Optional[Callable[[AnyPath, List[str]], Iterable[str]]] = None, 151 | copy_function: Callable[[AnyPath, AnyPath], Any] = shutil.copy2, 152 | ignore_dangling_symlinks: bool = True, dirs_exist_ok: bool = False 153 | ) -> None: 154 | """ 155 | Wraps shutil.copytree. Stolen from pathlib3x. 156 | 157 | https://docs.python.org/3/library/shutil.html#shutil.copytree 158 | 159 | :param target: See Python official documentation 160 | :param symlinks: See Python official documentation 161 | :param ignore: See Python official documentation 162 | :param copy_function: See Python official documentation 163 | :param ignore_dangling_symlinks: See Python official documentation 164 | :param dirs_exist_ok: See Python official documentation 165 | """ 166 | shutil.copytree(self, target, symlinks, ignore, copy_function, ignore_dangling_symlinks, dirs_exist_ok) 167 | 168 | @logger.catch 169 | def rmtree(self, ignore_errors: bool = False, 170 | onerror: Optional[Callable[[Callable[..., Any], str, _OptExcInfo], Any]] = None) -> None: 171 | """ 172 | Wraps shutil.rmtree. Stolen from pathlib3x. 173 | 174 | https://docs.python.org/3/library/shutil.html#shutil.rmtree 175 | 176 | :param ignore_errors: See Python official documentation 177 | :param onerror: See Python official documentation 178 | """ 179 | shutil.rmtree(self, ignore_errors, onerror) 180 | 181 | @logger.catch 182 | def rm(self, ignore_errors: bool = False) -> None: 183 | """ 184 | Wraps os.remove. 185 | 186 | :param ignore_errors: Ignore errors emitted by os.remove 187 | """ 188 | if ignore_errors: 189 | try: 190 | os.remove(self) 191 | except OSError: 192 | pass 193 | else: 194 | try: 195 | os.remove(self) 196 | except FileNotFoundError as file_err: 197 | logger.critical('This file doesn\'t exist', file_err) 198 | except IsADirectoryError as dir_err: 199 | logger.critical(f'{self} is a directory. Use ``rmtree`` instead', dir_err) 200 | 201 | 202 | class CleanupSet(AbstractMutableSet[VPath]): 203 | # pylint: disable=arguments-differ 204 | def clear(self, *, ignore_errors: bool = True) -> None: 205 | """ 206 | Clear the set and delete files 207 | 208 | :param ignore_errors: Ignore errors emitted by os.remove 209 | """ 210 | for path in self: 211 | path.rm(ignore_errors) 212 | return super().clear() 213 | 214 | def add(self, value: AnyPath) -> None: 215 | """ 216 | Add a generic path to this set and convert it to a VPath 217 | This has no effect if the path is already present 218 | 219 | :param value: A path 220 | """ 221 | return super().add(VPath(value)) 222 | 223 | def discard(self, value: VPath) -> None: 224 | """ 225 | Remove a VPath from this set if it is a member. 226 | If the VPath is not a member, do nothing 227 | 228 | :param value: A VPath 229 | """ 230 | return super().discard(value) 231 | 232 | def update(self, *s: Iterable[AnyPath]) -> None: 233 | """ 234 | Update the set with the union of itself and others 235 | 236 | :param s: Iterable of path 237 | """ 238 | return super().update((VPath(p) for iterable in s for p in iterable)) 239 | -------------------------------------------------------------------------------- /vardautomation/vtypes.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = ['AnyPath', 'DuplicateFrame', 'Element', 'Trim', 'UpdateFunc', 'VPSIdx', 'ElementTree'] 3 | 4 | from abc import ABC 5 | from os import PathLike 6 | from typing import ( 7 | Any, Callable, Iterable, Iterator, List, Mapping, MutableSet, Optional, ParamSpec, Set, TypeVar, 8 | Union, cast 9 | ) 10 | 11 | from lxml import etree 12 | from vapoursynth import VideoNode 13 | from vardefunc.types import DuplicateFrame, Trim 14 | 15 | T = TypeVar('T') 16 | F = TypeVar('F', bound=Callable[..., Any]) 17 | P = ParamSpec('P') 18 | 19 | AnyPath = PathLike[str] | str 20 | """Represents a PathLike""" 21 | 22 | Element = etree._Element # type: ignore[pylance-strict] 23 | 24 | UpdateFunc = Callable[[int, int], None] 25 | """An update function type suitable for ``vapoursynth.VideoNode.output``""" 26 | 27 | VPSIdx = Callable[[str], VideoNode] 28 | """Vapoursynth function indexer""" 29 | 30 | 31 | class ElementTree(etree._ElementTree): # type: ignore 32 | def xpath(self, _path: Union[str, bytes], # type: ignore 33 | namespaces: Optional[Mapping[str, str]] = None, 34 | extensions: Any = None, smart_strings: bool = True, 35 | **_variables: Any) -> List[Element]: 36 | xpathobject = super().xpath( 37 | _path, namespaces=namespaces, extensions=extensions, 38 | smart_strings=smart_strings, **_variables 39 | ) 40 | return cast(List[Element], xpathobject) 41 | 42 | 43 | class AbstractMutableSet(MutableSet[T], ABC): 44 | __slots__ = ('__data', ) 45 | __data: Set[T] 46 | 47 | def __init__(self, __iterable: Optional[Iterable[T]] = None) -> None: 48 | self.__data = set(__iterable) if __iterable is not None else set() 49 | super().__init__() 50 | 51 | def __str__(self) -> str: 52 | return self.__data.__str__() 53 | 54 | def __repr__(self) -> str: 55 | return self.__data.__repr__() 56 | 57 | def __contains__(self, x: object) -> bool: 58 | return self.__data.__contains__(x) 59 | 60 | def __iter__(self) -> Iterator[T]: 61 | return self.__data.__iter__() 62 | 63 | def __len__(self) -> int: 64 | return self.__data.__len__() 65 | 66 | def add(self, value: T) -> None: 67 | return self.__data.add(value) 68 | 69 | def discard(self, value: T) -> None: 70 | return self.__data.discard(value) 71 | 72 | def update(self, *s: Iterable[T]) -> None: 73 | return self.__data.update(*s) 74 | --------------------------------------------------------------------------------