├── README.md ├── k7sfunc.py ├── nircmd_portable.exe ├── portable_config ├── input_uosc.conf ├── mpv.conf ├── script-opts │ └── uosc_lang.conf ├── scripts │ ├── Autovsr.lua │ ├── Set-refresh-rate.lua │ ├── SmartCopyPaste.lua │ └── SmartSkip.lua └── vs │ ├── MEMC_RIFE_NV_HQ.vpy │ ├── MEMC_RIFE_NV_LQ.vpy │ └── MEMC_RIFE_NV_MQ.vpy ├── vs-plugins └── models │ └── rife_v2 │ ├── rife_v4.50.onnx │ ├── rife_v4.51.onnx │ └── rife_v4.52.onnx └── vsmlrt.py /README.md: -------------------------------------------------------------------------------- 1 | # MPV Lazy EN 2 | 3 | English translation for [MPV_lazy](https://github.com/hooke007/MPV_lazy) focused on NVIDIA RTX video enhancement features (RTX 2060 Super or better). 4 | Only tested for windows. 5 | 6 | ![MPV Lazy Interface](https://i.imgur.com/BXDBdwI.png) 7 | 8 | ## Features 9 | 10 | - RIFE frame interpolation with NVIDIA GPU acceleration 11 | - NVIDIA DLSS Super Resolution (auto-scaling) 12 | - Automatic display refresh rate switching 13 | - Multiple quality presets (LQ/MQ/HQ) 14 | 15 | ## Full Installation 16 | 17 | 1. **Base Installation** 18 | - Download from [MPV Lazy Releases](https://github.com/hooke007/MPV_lazy/releases/tag/20240406): 19 | - `hooke007.mpv-lazy-20240406.exe` 20 | - `mpv-lazy-20240406-vsMega.7z` 21 | 22 | 2. **MPV Update** 23 | - Replace files with latest [MPV WinBuild](https://github.com/shinchiro/mpv-winbuild-cmake/releases) (`mpv-x86_64-v3-xxx.7z`) 24 | 25 | 3. **English Translation and tweaks** 26 | - Override with files from [MPV Lazy EN](https://github.com/vadash/mpv-lazy-en/archive/refs/heads/main.zip) 27 | 28 | To update for new mpv-lazy just repeat step 3. 29 | 30 | ## Quick Start 31 | 32 | 1. Install 33 | 2. Open any video file and press `Shift + 2/3/4` for RIFE + NVIDIA SuperRes (LQ/MQ/HQ) 34 | 3. Wait for the model to build (first time only) 35 | 36 | ## Configuration 37 | 38 | ### RIFE Settings 39 | File: `portable_config/vs/MEMC_RIFE_NV_{LQ|MQ|HQ}.vpy` 40 | 41 | ```python 42 | # Pre-downscale height (adjust for your GPU) 43 | H_Pre = 1080 # RTX 2060 Super 44 | # H_Pre = 1440 # RTX 3070 or better 45 | 46 | # Model Selection 47 | Model = 450 # Fast 48 | #Model = 451 # Balanced 49 | #Model = 452 # Quality 50 | ``` 51 | 52 | ### Auto Features 53 | 54 | #### RIFE Interpolation 55 | Add to `mpv.conf`: 56 | ``` 57 | ### Extra 58 | ontop = yes 59 | window-scale=0.33 60 | vf=vapoursynth="~~/vs/MEMC_RIFE_NV_MQ.vpy" 61 | sub-visibility=no 62 | ``` 63 | 64 | #### Display Refresh Rate 65 | Edit values in `portable_config/scripts/Set-refresh-rate.lua`: 66 | - START_HZ (default: 48) 67 | - EXIT_HZ (default: 75) 68 | 69 | Requires [NirCmd](https://www.nirsoft.net/utils/nircmd.zip) (included as `nircmd_portable.exe`) 70 | 71 | ## Model Performance 72 | 73 | Benchmarked FPS (fastest to slowest): 74 | 1. RIFE 4.6 - 112 fps (oldest) 75 | 2. RIFE 4.15 lite - 100 fps (recommended) 76 | 3. RIFE 4.26 - 86 fps 77 | 4. RIFE 4.25 - 84 fps 78 | 5. RIFE 4.18 - 75 fps 79 | 80 | Recommended: RIFE 4.25, 4.18, or 4.15 lite 81 | 82 | ## Updating Models 83 | 84 | 1. Download V2 `.onnx` files from [AMUSEMENT Club Models](https://github.com/AmusementClub/vs-mlrt/releases/tag/external-models) 85 | 2. Place in `mpv-lazy\vs-plugins\models\rife_v2\`: 86 | - `450.onnx` - LQ preset (4.9) 87 | - `451.onnx` - MQ preset (4.15 lite) 88 | - `452.onnx` - HQ preset (4.25) 89 | 90 | ## Support 91 | 92 | Model discussions: [SVP Team Forum](https://www.svp-team.com/forum/viewtopic.php?id=6281) 93 | -------------------------------------------------------------------------------- /nircmd_portable.exe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vadash/mpv-lazy-en/65794dd72db059cf98eab45cf2b242928a04ed67/nircmd_portable.exe -------------------------------------------------------------------------------- /portable_config/input_uosc.conf: -------------------------------------------------------------------------------- 1 | # script-binding uosc/open-file #! Load > ※ File Browser 2 | # script-binding uosc/load-subtitles #! Load > ※ Import Subtitle Track 3 | 4 | # script-binding uosc/playlist #! Navigate > ※ Playlist 5 | # script-binding uosc/editions #! Navigate > ※ Editions List 6 | # script-binding uosc/chapters #! Navigate > ※ Chapters List 7 | # script-binding uosc/video #! Navigate > ※ Video Tracks List 8 | # script-binding uosc/audio #! Navigate > ※ Audio Tracks List 9 | # script-binding uosc/subtitles #! Navigate > ※ Subtitle Tracks List 10 | # playlist-shuffle #! Navigate > Shuffle Playlist 11 | 12 | , frame-back-step #! Play > Previous Frame 13 | . frame-step #! Play > Next Frame 14 | l ab-loop #! Play > Set/Clear AB Loop Points 15 | [ no-osd cycle-values speed 2 1.5 1.2 1 ; script-message-to uosc flash-elements speed #! Play > Toggle Speed - 16 | ] no-osd cycle-values speed 1 1.2 1.5 2 ; script-message-to uosc flash-elements speed #! Play > Toggle Speed + 17 | { no-osd add speed -0.1 ; script-message-to uosc flash-elements speed #! Play > Toggle Speed -0.1 18 | } no-osd add speed 0.1 ; script-message-to uosc flash-elements speed #! Play > Toggle Speed +0.1 19 | - no-osd add volume -1 ; script-message-to uosc flash-elements volume #! Play > Volume - 20 | = no-osd add volume 1 ; script-message-to uosc flash-elements volume #! Play > Volume + 21 | # ignore #! Play > --- 22 | c add audio-delay -0.1 #! Play > Audio Delay -100ms 23 | v add audio-delay 0.1 #! Play > Audio Delay +100ms 24 | z add sub-delay -0.1 #! Play > Subtitle Delay -100ms 25 | x add sub-delay +0.1 #! Play > Subtitle Delay +100ms 26 | # ignore #! Play > --- 27 | Shift+BS set audio-delay 0 ; set sub-delay 0 #! Play > Reset Audio & Subtitle Sync 28 | 29 | # ignore #! --- 30 | ! vf clr "" #! VF Filter > Clear (Shift + 1) 31 | # #! VF Filter > --- 32 | @ vf set vapoursynth="~~/vs/MEMC_RIFE_NV_LQ.vpy" #! VF Filter > Interpolation RIFE LQ (Shift + 2) 33 | SHARP vf set vapoursynth="~~/vs/MEMC_RIFE_NV_MQ.vpy" #! VF Filter > Interpolation RIFE MQ (Shift + 3) 34 | $ vf set vapoursynth="~~/vs/MEMC_RIFE_NV_HQ.vpy" #! VF Filter > Interpolation RIFE HQ (Shift + 4) 35 | # ignore #! VF Filter > --- 36 | 37 | # cycle-values hwdec "auto" "auto-copy" "no" #! Video > Toggle Decode Mode 38 | # cycle deband #! Video > Toggle Deband State 39 | # cycle deinterlace #! Video > Toggle Deinterlace State 40 | # cycle icc-profile-auto #! Video > Toggle Auto Color Correction 41 | # cycle correct-pts #! Video > Toggle Timestamp Parsing Mode 42 | 1 add contrast -1 #! Video > EQ > Contrast - 43 | 2 add contrast 1 #! Video > EQ > Contrast + 44 | 3 add brightness -1 #! Video > EQ > Brightness - 45 | 4 add brightness 1 #! Video > EQ > Brightness + 46 | 5 add gamma -1 #! Video > EQ > Gamma - 47 | 6 add gamma 1 #! Video > EQ > Gamma + 48 | 7 add saturation -1 #! Video > EQ > Saturation - 49 | 8 add saturation 1 #! Video > EQ > Saturation + 50 | 9 add hue -1 #! Video > EQ > Hue - 51 | 0 add hue 1 #! Video > EQ > Hue + 52 | # ignore #! Video > EQ > --- 53 | Ctrl+BS set contrast 0 ; set brightness 0 ; set gamma 0 ; set saturation 0 ; set hue 0 #! Video > EQ > Reset All 54 | 55 | Ctrl+i script-binding uosc/keybinds #! Tools > ※ Keybinds List 56 | I script-binding display-stats-toggle #! Tools > Permanent Display Stats 57 | ` script-binding console/enable #! Tools > Show Console 58 | Ctrl+r script-message-to save_global_props clean_data #! Tools > Clear Recorded Properties 59 | # cycle border #! Tools > Toggle Window Border 60 | # cycle ontop #! Tools > Toggle Window On Top 61 | # script-binding uosc/audio-device #! Tools > ※ Audio Output Devices List 62 | # script-binding uosc/stream-quality #! Tools > ※ Stream Quality 63 | # script-binding uosc/show-in-directory #! Tools > ※ Open Current File Directory 64 | # script-binding uosc/open-config-directory #! Tools > ※ Open Config Directory 65 | 66 | # ignore #! --- 67 | # stop #! Stop 68 | Ctrl+q quit #! Quit mpv 69 | 70 | ### ====================== Unmapped Menu ====================== 71 | 72 | POWER quit 73 | PLAY set pause no 74 | PAUSE set pause yes 75 | PLAYPAUSE cycle pause 76 | STOP stop 77 | REWIND seek -30 78 | FORWARD seek 30 79 | PREV playlist-prev 80 | NEXT playlist-next 81 | 82 | MBTN_BACK playlist-prev 83 | MBTN_FORWARD playlist-next 84 | MBTN_LEFT ignore 85 | MBTN_LEFT_DBL cycle fullscreen 86 | MBTN_MID script-binding uosc/menu 87 | MBTN_RIGHT context-menu 88 | MBTN_RIGHT_DBL ignore 89 | 90 | WHEEL_DOWN no-osd add volume -1 ; script-message-to uosc flash-elements volume 91 | WHEEL_UP no-osd add volume 1 ; script-message-to uosc flash-elements volume 92 | 93 | ESC set fullscreen no 94 | ENTER set fullscreen yes 95 | Alt+ENTER cycle fullscreen 96 | SPACE cycle pause 97 | UP set pause yes ; seek -1 exact 98 | DOWN set pause yes ; seek 1 exact 99 | LEFT seek -5 100 | RIGHT seek 5 101 | Ctrl+UP playlist-prev 102 | Ctrl+DOWN playlist-next 103 | Ctrl+Alt+UP script-binding uosc/prev 104 | Ctrl+Alt+DOWN script-binding uosc/next 105 | 106 | # Resize window to 33%, 50%, and 100% 107 | Alt+1 set window-scale 0.33 108 | Alt+2 set window-scale 0.5 109 | Alt+3 set window-scale 1.0 110 | 111 | # Toggle subtitle visibility 112 | v cycle sub-visibility 113 | -------------------------------------------------------------------------------- /portable_config/mpv.conf: -------------------------------------------------------------------------------- 1 | ### Portable installation configuration 2 | input-conf = "~~/input_uosc.conf" 3 | include = "~~/profiles.conf" 4 | include = "~~/script-opts.conf" 5 | use-filedir-conf = yes 6 | 7 | ### Basic Settings 8 | vo = gpu-next 9 | hwdec = auto-copy 10 | hwdec-codecs = h264,hevc,vp8,vp9,av1 11 | 12 | ### Functionality 13 | idle = yes 14 | input-builtin-bindings = no 15 | hr-seek-framedrop = no 16 | save-position-on-quit = yes 17 | watch-later-options = start,vid,aid,sid 18 | keep-open = yes 19 | autofit-smaller = 50%x50% 20 | keepaspect-window = no 21 | hidpi-window-scale = no 22 | 23 | ### Performance 24 | demuxer-max-bytes = 1024MiB 25 | demuxer-max-back-bytes = 512MiB 26 | icc-cache-dir = "~~/_cache/icc" 27 | gpu-shader-cache-dir = "~~/_cache/shader" 28 | watch-later-dir = "~~/_cache/watch_later" 29 | 30 | ### OSD (On-Screen Display) 31 | osd-on-seek = msg 32 | osd-bar-w = 100 33 | osd-bar-h = 1.5 34 | osd-bar-align-y = -1 35 | osd-color = "#672168" 36 | osd-border-color = "#FFFFFF" 37 | osd-border-size = 1 38 | osd-font-size = 40 39 | osd-fractions = yes 40 | osd-playing-msg = "${filename}" 41 | osd-duration = 2000 42 | osd-playing-msg-duration = 3000 43 | 44 | ### Audio 45 | ao = wasapi 46 | volume = 100 47 | volume-max = 130 48 | audio-channels = stereo 49 | audio-file-auto = fuzzy 50 | alang = eng,en,rus,ru 51 | 52 | ### Video 53 | video-sync = display-resample 54 | video-sync-max-video-change = 5 55 | interpolation = yes 56 | cscale = bilinear 57 | scale = spline36 58 | dscale = bicubic 59 | dither-depth = no 60 | deinterlace = auto 61 | hdr-peak-percentile = 99.99 62 | 63 | ### Shaders and Filters 64 | osc = no 65 | glsl-shaders-append = "~~/shaders/AMD_FSR_EASU_rgb_RT.glsl" 66 | 67 | ### Subtitles 68 | sub-auto = fuzzy 69 | sub-file-paths = sub;subtitles 70 | slang = eng,en,rus,ru 71 | subs-fallback = yes 72 | sub-font = "LXGW WenKai Mono" 73 | sub-font-size = 44 74 | sub-border-size = 4 75 | sub-blur = 1 76 | sub-color = "#FFD766" 77 | sub-use-margins = yes 78 | sub-ass-vsfilter-blur-compat = no 79 | sub-ass-force-margins = yes 80 | 81 | ### GSync 82 | fullscreen = yes 83 | video-sync = display-resample 84 | interpolation = yes 85 | tscale = oversample 86 | opengl-swapinterval = 1 87 | d3d11-sync-interval = 1 88 | vulkan-swap-mode = immediate 89 | 90 | ### Extra 91 | ontop = yes 92 | -------------------------------------------------------------------------------- /portable_config/script-opts/uosc_lang.conf: -------------------------------------------------------------------------------- 1 | 2 | ## context menu default 3 | _cm_load=Load 4 | _cm_file_browser=File Browser 5 | _cm_import_sid=Import SID 6 | _cm_navigation=Navigation 7 | _cm_playlist=Playlist 8 | _cm_edition_list=Edition-list 9 | _cm_chapter_list=Chapter-list 10 | _cm_vid_list=VID-list 11 | _cm_aid_list=AID-list 12 | _cm_sid_list=SID-list 13 | _cm_playlist_shuffle=Playlist Shuffle 14 | _cm_ushot=uScreenshot 15 | _cm_video=VIDEO 16 | _cm_decoding_api=hwdec cycle 17 | _cm_deband_toggle=deband toggle 18 | _cm_deint_toggle=deint toggle 19 | _cm_icc_toggle=icc auto toggle 20 | _cm_corpts_toggle=correct pts toggle 21 | _cm_tools=TOOLS 22 | _cm_keybinding=key-bindings 23 | _cm_stats_toggle=stats toggle 24 | _cm_console_on=console on 25 | _cm_border_toggle=border toggle 26 | _cm_ontop_toggle=ontop toggle 27 | _cm_audio_device=audio device 28 | _cm_stream_quality=Stream Quality 29 | _cm_show_file_dir=show file dir 30 | _cm_show_config_dir=show config dir 31 | _cm_stop=Stop 32 | _cm_quit=Quit 33 | 34 | ## no_border_title 35 | _border_title=No File 36 | 37 | ## track_loaders sub_menu 38 | _sid_menu=subtitle track 39 | _aid_menu=audio track 40 | _vid_menu=video track 41 | _import_id_menu=import 42 | 43 | _menu_search=type & Ctrl+ENTER to search 44 | _menu_search2=type to search 45 | 46 | _input_empty=empty input-bindings 47 | 48 | _sid_submenu_title=sid list 49 | _aid_submenu_title=aid list 50 | _vid_submenu_title=vid list 51 | _playlist_submenu_title=playlist 52 | _chapter_list_submenu_title=chapter list 53 | _chapter_list_submenu_item_title=unnamed chapter 54 | _edition_list_submenu_title=edition list 55 | _edition_list_submenu_item_title=edition 56 | _stream_quality_submenu_title=stream quality list 57 | _audio_device_submenu_title=audio device list 58 | _audio_device_submenu_item_title=Autoselect device 59 | 60 | _dlsub_download=download 61 | _dlsub_searchol=search online 62 | _dlsub_invalid_response=invalid response json 63 | _dlsub_process_exit=process exit code 64 | _dlsub_unknown_err=unknown error 65 | _dlsub_err=error 66 | _dlsub_fin=subtitles loaded & applied 67 | _dlsub_remain=remaining downloads today 68 | _dlsub_reset=reset in 69 | _dlsub_foreign=foreign parts only 70 | _dlsub_hearing=hearing impaired 71 | _dlsub_result0=no result 72 | _dlsub_page_prev=prev page 73 | _dlsub_page_next=next page 74 | _dlsub_2search=Ctrl+ENTER to search 75 | _dlsub_enter_query=enter query 76 | 77 | _submenu_import=import 78 | _submenu_load_file=load file 79 | _submenu_id_disabled=disabled 80 | _submenu_id_hint=channel(s) 81 | _submenu_id_forced=forced 82 | _submenu_id_default=default 83 | _submenu_id_external=external 84 | _submenu_id_title=track 85 | _submenu_file_browser_item_hint=driver list 86 | _submenu_file_browser_item_hint2=parent dir 87 | _submenu_file_browser_item2_hint=driver 88 | _submenu_file_browser_title=driver list 89 | 90 | ## built-in_shortcut 91 | _button01=MENU 92 | _button02=SID 93 | _button03=AID 94 | _button04=AU-DEVICE 95 | _button05=VID 96 | _button06=PLAYLIST 97 | _button07=CHAPTER 98 | _button08=EDITION 99 | _button09=STREAM-QTY 100 | _button10=LOAD 101 | _button11=PLAYLIST/BROWSER 102 | _button12=uPREV 103 | _button13=uNEXT 104 | _button14=uFIRST 105 | _button15=uLAST 106 | _button16=LOOP-PLAYLIST 107 | _button17=LOOP_FILE 108 | _button18=uSHUFFLE-PLAY 109 | _button19=FULLSCREEN 110 | 111 | _button_ext01=Play/Pause 112 | _button_ext02=Play/Pause 113 | _button_ext03=list-prev 114 | _button_ext04=list-next 115 | _button_ext05=Border 116 | _button_ext06=Ontop 117 | _button_ext07=Hwdec 118 | _button_ext08=Unscaled 119 | _button_ext09=Deband 120 | _button_ext10=Deint 121 | _button_ext11=Ushot 122 | _button_ext12=Stats 123 | _button_ext13=Thumbnail 124 | -------------------------------------------------------------------------------- /portable_config/scripts/Autovsr.lua: -------------------------------------------------------------------------------- 1 | -- Description: This script enables or disables NVIDIA's DLSS Super Resolution (VSR) in MPV. 2 | -- Credits: https://gist.github.com/azumukupoe/a56a78d71cf26a8f682dc06407e615db 3 | local mp = require 'mp' 4 | local autovsr_enabled = false 5 | 6 | local function autovsr() 7 | local display_width = mp.get_property_native("display-width") 8 | local video_width = mp.get_property_native("width") 9 | local display_height = mp.get_property_native("display-height") 10 | local video_height = mp.get_property_native("height") 11 | 12 | if video_width and display_width and video_height and display_height then 13 | local scale = math.max(display_width, display_height) / math.max(video_width, video_height) 14 | scale = scale - scale % 0.1 -- 15 | 16 | local vf = mp.get_property("vf") or "" 17 | if string.match(vf, "@vsr") then 18 | mp.command("vf remove @vsr") 19 | end 20 | 21 | if scale > 1 then 22 | mp.command("vf append @vsr:d3d11vpp:scaling-mode=nvidia:scale=" .. scale) 23 | end 24 | end 25 | end 26 | 27 | local function activate() 28 | autovsr_enabled = not autovsr_enabled 29 | 30 | if autovsr_enabled then 31 | autovsr() 32 | mp.observe_property("video-params/pixelformat", "native", autovsr) 33 | mp.observe_property("vf", "native", autovsr) 34 | mp.osd_message("RTX ON") 35 | else 36 | mp.command("vf remove @vsr") 37 | mp.unobserve_property(autovsr) 38 | mp.osd_message("RTX OFF") 39 | end 40 | end 41 | 42 | -- Automatically activate VSR on startup 43 | activate() 44 | 45 | mp.add_key_binding("ctrl+shift+u", "autovsr", activate) 46 | -------------------------------------------------------------------------------- /portable_config/scripts/Set-refresh-rate.lua: -------------------------------------------------------------------------------- 1 | -- mpv Lua script to set display refresh rate 2 | -- version 1.0 3 | -- by vadash 4 | local mp = require "mp" 5 | local utils = require "mp.utils" 6 | 7 | -- Constants 8 | local NIRCMD_PATH = "nircmd_portable.exe" -- Specify the full executable name if needed 9 | local RESOLUTION_WIDTH = 2560 -- Hardcoded resolution width 10 | local RESOLUTION_HEIGHT = 1440 -- Hardcoded resolution height 11 | local BITS_PER_PIXEL = 32 -- Hardcoded bits per pixel 12 | local START_HZ = 48 -- Refresh rate when MPV starts 13 | local EXIT_HZ = 75 -- Refresh rate when MPV exits 14 | 15 | -- Function to set the hardcoded resolution and refresh rate 16 | local function set_display_hz(hz) 17 | -- Build a table of arguments rather than a single string 18 | local args = { 19 | NIRCMD_PATH, 20 | "setdisplay", 21 | tostring(RESOLUTION_WIDTH), 22 | tostring(RESOLUTION_HEIGHT), 23 | tostring(BITS_PER_PIXEL), 24 | tostring(hz) 25 | } 26 | local result = utils.subprocess({ args = args, cancellable = false }) 27 | if result.status ~= 0 then 28 | mp.msg.error("Failed to set refresh rate to " .. hz) 29 | else 30 | mp.msg.info(string.format("Display set to %dx%d@%dHz", RESOLUTION_WIDTH, RESOLUTION_HEIGHT, hz)) 31 | end 32 | end 33 | 34 | -- Set to START_HZ when the player starts 35 | set_display_hz(START_HZ) 36 | 37 | -- Set to EXIT_HZ when the player exits 38 | mp.register_event("shutdown", function() 39 | set_display_hz(EXIT_HZ) 40 | end) 41 | -------------------------------------------------------------------------------- /portable_config/scripts/SmartCopyPaste.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (c) 2022, Eisa AlAwadhi 2 | -- License: BSD 2-Clause License 3 | -- Creator: Eisa AlAwadhi 4 | -- Project: SmartCopyPaste 5 | -- Version: 3.1 6 | 7 | local o = { 8 | ---------------------------USER CUSTOMIZATION SETTINGS--------------------------- 9 | --These settings are for users to manually change some options. 10 | --Changes are recommended to be made in the script-opts directory. 11 | 12 | -----Script Settings---- 13 | device = 'auto', --'auto' is for automatic device detection, or manually change to: 'windows' or 'mac' or 'linux' 14 | linux_copy = 'xclip -silent -selection clipboard -in', --copy command that will be used in Linux. OR write a different command 15 | linux_paste = 'xclip -selection clipboard -o', --paste command that will be used in Linux. OR write a different command 16 | mac_copy = 'pbcopy', --copy command that will be used in MAC. OR write a different command 17 | mac_paste = 'pbpaste', --paste command that will be used in MAC. OR write a different command 18 | windows_copy = 'powershell', --'powershell' is for using windows powershell to copy. OR write the copy command, e.g: ' clip' 19 | windows_paste = 'powershell', --'powershell' is for using windows powershell to paste. OR write the paste command 20 | resume_offset = -0.65, --change to 0 so item resumes from the exact position, or decrease the value so that it gives you a little preview before loading the resume point 21 | osd_messages = true, --true is for displaying osd messages when actions occur. Change to false will disable all osd messages generated from this script 22 | time_seperator = ' 🕒 ', --Time seperator that will be shown before the saved time in osd messages 23 | prefer_filename_over_title = 'local', --Prefers to copy filename over filetitle. Select between 'local', 'protocols', 'all', and 'none'. 'local' prefer filenames for videos that are not protocols. 'protocols' will prefer filenames for protocols only. 'all' will prefer filename over filetitle for both protocols and not protocols videos. 'none' will always use filetitle instead of filename 24 | copy_time_method = 'all', --Option to copy time with video, 'none' for disabled, 'all' to copy time for all videos, 'protocols' for copying time only for protocols, 'specifics' to copy time only for websites defined below, 'local' to copy time for videos that are not protocols 25 | specific_time_attributes=[[ 26 | [ ["twitter", "?t=", ""], ["twitch", "?t=", "s"], ["youtube", "&t=", "s"] ] 27 | ]], --The time attributes which will be added when copying protocols of specific websites from this list. Additional attributes can be added following the same format. 28 | protocols_time_attribute = '&t=', --The text that will be copied before the seek time when copying a protocol video from mpv 29 | local_time_attribute = '&time=', --The text that will be copied before the seek time when copying a local video from mpv 30 | pastable_time_attributes=[[ 31 | [" | time="] 32 | ]], --The time attributes that can be pasted for resume, specific_time_attributes, protocols_time_attribute, local_time_attribute are automatically added 33 | copy_keybind=[[ 34 | ["ctrl+c", "ctrl+C", "meta+c", "meta+C"] 35 | ]], --Keybind that will be used to copy 36 | running_paste_behavior = 'playlist', --The priority of paste behavior when a video is running. select between 'playlist', 'timestamp', 'force'. 37 | paste_keybind=[[ 38 | ["ctrl+v", "ctrl+V", "meta+v", "meta+V"] 39 | ]], --Keybind that will be used to paste 40 | copy_specific_behavior = 'path', --Copy behavior when using copy_specific_keybind. select between 'title', 'path', 'timestamp', 'path×tamp'. 41 | copy_specific_keybind=[[ 42 | ["ctrl+alt+c", "ctrl+alt+C", "meta+alt+c", "meta+alt+C"] 43 | ]], --Keybind that will be used to copy based on the copy behavior specified 44 | paste_specific_behavior = 'playlist', --Paste behavior when using paste_specific_keybind. select between 'playlist', 'timestamp', 'force'. 45 | paste_specific_keybind=[[ 46 | ["ctrl+alt+v", "ctrl+alt+V", "meta+alt+v", "meta+alt+V"] 47 | ]], --Keybind that will be used to paste based on the paste behavior specified 48 | paste_protocols=[[ 49 | ["https?://", "magnet:", "rtmp:"] 50 | ]], --add above (after a comma) any protocol you want paste to work with; e.g: ,'ftp://'. Or set it as "" by deleting all defined protocols to make paste works with any protocol. 51 | paste_extensions=[[ 52 | ["ac3", "a52", "eac3", "mlp", "dts", "dts-hd", "dtshd", "true-hd", "thd", "truehd", "thd+ac3", "tta", "pcm", "wav", "aiff", "aif", "aifc", "amr", "awb", "au", "snd", "lpcm", "yuv", "y4m", "ape", "wv", "shn", "m2ts", "m2t", "mts", "mtv", "ts", "tsv", "tsa", "tts", "trp", "adts", "adt", "mpa", "m1a", "m2a", "mp1", "mp2", "mp3", "mpeg", "mpg", "mpe", "mpeg2", "m1v", "m2v", "mp2v", "mpv", "mpv2", "mod", "tod", "vob", "vro", "evob", "evo", "mpeg4", "m4v", "mp4", "mp4v", "mpg4", "m4a", "aac", "h264", "avc", "x264", "264", "hevc", "h265", "x265", "265", "flac", "oga", "ogg", "opus", "spx", "ogv", "ogm", "ogx", "mkv", "mk3d", "mka", "webm", "weba", "avi", "vfw", "divx", "3iv", "xvid", "nut", "flic", "fli", "flc", "nsv", "gxf", "mxf", "wma", "wm", "wmv", "asf", "dvr-ms", "dvr", "wtv", "dv", "hdv", "flv","f4v", "f4a", "qt", "mov", "hdmov", "rm", "rmvb", "ra", "ram", "3ga", "3ga2", "3gpp", "3gp", "3gp2", "3g2", "ay", "gbs", "gym", "hes", "kss", "nsf", "nsfe", "sap", "spc", "vgm", "vgz", "m3u", "m3u8", "pls", "cue", 53 | "ase", "art", "bmp", "blp", "cd5", "cit", "cpt", "cr2", "cut", "dds", "dib", "djvu", "egt", "exif", "gif", "gpl", "grf", "icns", "ico", "iff", "jng", "jpeg", "jpg", "jfif", "jp2", "jps", "lbm", "max", "miff", "mng", "msp", "nitf", "ota", "pbm", "pc1", "pc2", "pc3", "pcf", "pcx", "pdn", "pgm", "PI1", "PI2", "PI3", "pict", "pct", "pnm", "pns", "ppm", "psb", "psd", "pdd", "psp", "px", "pxm", "pxr", "qfx", "raw", "rle", "sct", "sgi", "rgb", "int", "bw", "tga", "tiff", "tif", "vtf", "xbm", "xcf", "xpm", "3dv", "amf", "ai", "awg", "cgm", "cdr", "cmx", "dxf", "e2d", "egt", "eps", "fs", "gbr", "odg", "svg", "stl", "vrml", "x3d", "sxd", "v2d", "vnd", "wmf", "emf", "art", "xar", "png", "webp", "jxr", "hdp", "wdp", "cur", "ecw", "iff", "lbm", "liff", "nrrd", "pam", "pcx", "pgf", "sgi", "rgb", "rgba", "bw", "int", "inta", "sid", "ras", "sun", "tga", 54 | "torrent"] 55 | ]], --add above (after a comma) any extension you want paste to work with; e.g: ,'pdf'. Or set it as "" by deleting all defined extension to make paste works with any extension. 56 | paste_subtitles=[[ 57 | ["aqt", "gsub", "jss", "sub", "ttxt", "pjs", "psb", "rt", "smi", "slt", "ssf", "srt", "ssa", "ass", "usf", "idx", "vtt"] 58 | ]], --add above (after a comma) any extension you want paste to attempt to add as a subtitle file, e.g.:'txt'. Or set it as "" by deleting all defined extension to make paste attempt to add any subtitle. 59 | 60 | -----Time Format Settings----- 61 | --in the first parameter, you can define from the available styles: default, hms, hms-full, timestamp, timestamp-concise "default" to show in HH:MM:SS.sss format. "hms" to show in 1h 2m 3.4s format. "hms-full" is the same as hms but keeps the hours and minutes persistent when they are 0. "timestamp" to show the total time as timestamp 123456.700 format. "timestamp-concise" shows the total time in 123456.7 format (shows and hides decimals depending on availability). 62 | --in the second parameter, you can define whether to show milliseconds, round them or truncate them. Available options: 'truncate' to remove the milliseconds and keep the seconds. 0 to remove the milliseconds and round the seconds. 1 or above is the amount of milliseconds to display. The default value is 3 milliseconds. 63 | --in the third parameter you can define the seperator between hour:minute:second. "default" style is automatically set to ":", "hms", "hms-full" are automatically set to " ". You can define your own. Some examples: ["default", 3, "-"],["hms-full", 5, "."],["hms", "truncate", ":"],["timestamp-concise"],["timestamp", 0],["timestamp", "truncate"],["timestamp", 5] 64 | copy_time_format=[[ 65 | ["timestamp-concise"] 66 | ]], 67 | osd_time_format=[[ 68 | ["default", "truncate"] 69 | ]], 70 | 71 | ---------------------------END OF USER CUSTOMIZATION SETTINGS--------------------------- 72 | } 73 | 74 | (require 'mp.options').read_options(o) 75 | local utils = require 'mp.utils' 76 | local msg = require 'mp.msg' 77 | 78 | o.copy_keybind = utils.parse_json(o.copy_keybind) 79 | o.paste_keybind = utils.parse_json(o.paste_keybind) 80 | o.copy_specific_keybind = utils.parse_json(o.copy_specific_keybind) 81 | o.paste_specific_keybind = utils.parse_json(o.paste_specific_keybind) 82 | o.paste_protocols = utils.parse_json(o.paste_protocols) 83 | o.paste_extensions = utils.parse_json(o.paste_extensions) 84 | o.paste_subtitles = utils.parse_json(o.paste_subtitles) 85 | o.specific_time_attributes = utils.parse_json(o.specific_time_attributes) 86 | o.pastable_time_attributes = utils.parse_json(o.pastable_time_attributes) 87 | o.copy_time_format = utils.parse_json(o.copy_time_format) 88 | o.osd_time_format = utils.parse_json(o.osd_time_format) 89 | 90 | local protocols = {'https?:', 'magnet:', 'rtmps?:', 'smb:', 'ftps?:', 'sftp:'} 91 | local seekTime = 0 92 | local clip, clip_time, clip_file, filePath, fileTitle 93 | local clipboard_pasted = false 94 | 95 | function has_value(tab, val) 96 | for index, value in ipairs(tab) do 97 | if value == val then 98 | return true 99 | end 100 | end 101 | 102 | return false 103 | end 104 | 105 | table.insert(o.pastable_time_attributes, o.protocols_time_attribute) 106 | table.insert(o.pastable_time_attributes, o.local_time_attribute) 107 | for i = 1, #o.specific_time_attributes do 108 | if not has_value(o.pastable_time_attributes, o.specific_time_attributes[i][2]) then 109 | table.insert(o.pastable_time_attributes, o.specific_time_attributes[i][2]) 110 | end 111 | end 112 | 113 | if not o.device or o.device == 'auto' then 114 | if os.getenv('windir') ~= nil then 115 | o.device = 'windows' 116 | elseif os.execute '[ -d "/Applications" ]' == 0 and os.execute '[ -d "/Library" ]' == 0 or os.execute '[ -d "/Applications" ]' == true and os.execute '[ -d "/Library" ]' == true then 117 | o.device = 'mac' 118 | else 119 | o.device = 'linux' 120 | end 121 | end 122 | 123 | function starts_protocol(tab, val) 124 | for index, value in ipairs(tab) do 125 | if (val:find(value) == 1) then 126 | return true 127 | end 128 | end 129 | return false 130 | end 131 | 132 | function contain_value(tab, val) 133 | if not tab then return end 134 | if not val then return end 135 | 136 | for index, value in ipairs(tab) do 137 | if value.match(string.lower(val), string.lower(value)) then 138 | return true 139 | end 140 | end 141 | 142 | return false 143 | end 144 | 145 | function file_exists(name) 146 | local f = io.open(name, "r") 147 | if f ~= nil then io.close(f) return true else return false end 148 | end 149 | 150 | function format_time(seconds, sep, decimals, style) 151 | local function divmod (a, b) 152 | return math.floor(a / b), a % b 153 | end 154 | decimals = decimals == nil and 3 or decimals 155 | 156 | local s = seconds 157 | local h, s = divmod(s, 60*60) 158 | local m, s = divmod(s, 60) 159 | 160 | if decimals == 'truncate' then 161 | s = math.floor(s) 162 | decimals = 0 163 | if style == 'timestamp' then 164 | seconds = math.floor(seconds) 165 | end 166 | end 167 | 168 | if not style or style == '' or style == 'default' then 169 | local second_format = string.format("%%0%d.%df", 2+(decimals > 0 and decimals+1 or 0), decimals) 170 | sep = sep and sep or ":" 171 | return string.format("%02d"..sep.."%02d"..sep..second_format, h, m, s) 172 | elseif style == 'hms' or style == 'hms-full' then 173 | sep = sep ~= nil and sep or " " 174 | if style == 'hms-full' or h > 0 then 175 | return string.format("%dh"..sep.."%dm"..sep.."%." .. tostring(decimals) .. "fs", h, m, s) 176 | elseif m > 0 then 177 | return string.format("%dm"..sep.."%." .. tostring(decimals) .. "fs", m, s) 178 | else 179 | return string.format("%." .. tostring(decimals) .. "fs", s) 180 | end 181 | elseif style == 'timestamp' then 182 | return string.format("%." .. tostring(decimals) .. "f", seconds) 183 | elseif style == 'timestamp-concise' then 184 | return seconds 185 | end 186 | end 187 | 188 | function get_path() 189 | local path = mp.get_property('path') 190 | if not path then return end 191 | 192 | local title = mp.get_property('media-title'):gsub("\"", "") 193 | 194 | if starts_protocol(protocols, path) and o.prefer_filename_over_title == 'protocols' then 195 | title = mp.get_property('filename'):gsub("\"", "") 196 | elseif not starts_protocol(protocols, path) and o.prefer_filename_over_title == 'local' then 197 | title = mp.get_property('filename'):gsub("\"", "") 198 | elseif o.prefer_filename_over_title == 'all' then 199 | title = mp.get_property('filename'):gsub("\"", "") 200 | end 201 | 202 | return path, title 203 | end 204 | 205 | function bind_keys(keys, name, func, opts) 206 | if not keys then 207 | mp.add_forced_key_binding(keys, name, func, opts) 208 | return 209 | end 210 | 211 | for i = 1, #keys do 212 | if i == 1 then 213 | mp.add_forced_key_binding(keys[i], name, func, opts) 214 | else 215 | mp.add_forced_key_binding(keys[i], name .. i, func, opts) 216 | end 217 | end 218 | end 219 | 220 | function handleres(res, args) 221 | if not res.error and res.status == 0 then 222 | return res.stdout 223 | else 224 | msg.error("There was an error getting "..o.device.." clipboard: ") 225 | msg.error(" Status: "..(res.status or "")) 226 | msg.error(" Error: "..(res.error or "")) 227 | msg.error(" stdout: "..(res.stdout or "")) 228 | msg.error("args: "..utils.to_string(args)) 229 | return '' 230 | end 231 | end 232 | 233 | function os.capture(cmd) 234 | local f = assert(io.popen(cmd, 'r')) 235 | local s = assert(f:read('*a')) 236 | f:close() 237 | return s 238 | end 239 | 240 | function make_raw(s) 241 | if not s then return end 242 | s = string.gsub(s, '^%s+', '') 243 | s = string.gsub(s, '%s+$', '') 244 | s = string.gsub(s, '[\n\r]+', ' ') 245 | return s 246 | end 247 | 248 | function get_extension(path) 249 | if not path then return end 250 | 251 | match = string.match(path, '%.([^%.]+)$' ) 252 | if match == nil then 253 | return 'nomatch' 254 | else 255 | return match 256 | end 257 | end 258 | 259 | 260 | function get_specific_attribute(target_path) 261 | local pre_attribute = '' 262 | local after_attribute = '' 263 | if not starts_protocol(protocols, target_path) then 264 | pre_attribute = o.local_time_attribute 265 | elseif starts_protocol(protocols, target_path) then 266 | pre_attribute = o.protocols_time_attribute 267 | for i = 1, #o.specific_time_attributes do 268 | if contain_value({o.specific_time_attributes[i][1]}, target_path) then 269 | pre_attribute = o.specific_time_attributes[i][2] 270 | after_attribute = o.specific_time_attributes[i][3] 271 | break 272 | end 273 | end 274 | end 275 | return pre_attribute, after_attribute 276 | end 277 | 278 | function get_time_attribute(target_path) 279 | local pre_attribute = '' 280 | for i = 1, #o.pastable_time_attributes do 281 | if contain_value({o.pastable_time_attributes[i]}, target_path) then 282 | pre_attribute = o.pastable_time_attributes[i] 283 | break 284 | end 285 | end 286 | return pre_attribute 287 | end 288 | 289 | 290 | function get_clipboard() 291 | local clipboard 292 | if o.device == 'linux' then 293 | clipboard = os.capture(o.linux_paste) 294 | return clipboard 295 | elseif o.device == 'windows' then 296 | if o.windows_paste == 'powershell' then 297 | local args = { 298 | 'powershell', '-NoProfile', '-Command', [[& { 299 | Trap { 300 | Write-Error -ErrorRecord $_ 301 | Exit 1 302 | } 303 | $clip = Get-Clipboard -Raw -Format Text -TextFormatType UnicodeText 304 | if (-not $clip) { 305 | $clip = Get-Clipboard -Raw -Format FileDropList 306 | } 307 | $u8clip = [System.Text.Encoding]::UTF8.GetBytes($clip) 308 | [Console]::OpenStandardOutput().Write($u8clip, 0, $u8clip.Length) 309 | }]] 310 | } 311 | return handleres(utils.subprocess({ args = args, cancellable = false }), args) 312 | else 313 | clipboard = os.capture(o.windows_paste) 314 | return clipboard 315 | end 316 | elseif o.device == 'mac' then 317 | clipboard = os.capture(o.mac_paste) 318 | return clipboard 319 | end 320 | return '' 321 | end 322 | 323 | 324 | function set_clipboard(text) 325 | local pipe 326 | if o.device == 'linux' then 327 | pipe = io.popen(o.linux_copy, 'w') 328 | pipe:write(text) 329 | pipe:close() 330 | elseif o.device == 'windows' then 331 | if o.windows_copy == 'powershell' then 332 | local res = utils.subprocess({ args = { 333 | 'powershell', '-NoProfile', '-Command', string.format([[& { 334 | Trap { 335 | Write-Error -ErrorRecord $_ 336 | Exit 1 337 | } 338 | Add-Type -AssemblyName PresentationCore 339 | [System.Windows.Clipboard]::SetText('%s') 340 | }]], text) 341 | } }) 342 | else 343 | pipe = io.popen(o.windows_copy,'w') 344 | pipe:write(text) 345 | pipe:close() 346 | end 347 | elseif o.device == 'mac' then 348 | pipe = io.popen(o.mac_copy,'w') 349 | pipe:write(text) 350 | pipe:close() 351 | end 352 | return '' 353 | end 354 | 355 | function parse_clipboard(text) 356 | if not text then return end 357 | 358 | local clip, clip_file, clip_time, pre_attribute 359 | local clip_table = {} 360 | clip = text 361 | 362 | for c in clip:gmatch("[^\n\r+]+") do 363 | local c_pre_attribute, c_clip_file, c_clip_time, c_clip_extension 364 | c = make_raw(c) 365 | 366 | c_pre_attribute = get_time_attribute(c) 367 | if string.match(c, '(.*)'..c_pre_attribute) then 368 | c_clip_file = string.match(c, '(.*)'..c_pre_attribute) 369 | c_clip_time = tonumber(string.match(c, c_pre_attribute..'(%d*%.?%d*)')) 370 | elseif string.match(c, '^\"(.*)\"$') then 371 | c_clip_file = string.match(c, '^\"(.*)\"$') 372 | else 373 | c_clip_file = c 374 | end 375 | 376 | c_clip_extension = get_extension(c_clip_file) 377 | 378 | table.insert(clip_table, {c_clip_file, c_clip_time, c_clip_extension}) 379 | end 380 | 381 | clip = make_raw(clip) 382 | pre_attribute = get_time_attribute(clip) 383 | 384 | if string.match(clip, '(.*)'..pre_attribute) then 385 | clip_file = string.match(clip, '(.*)'..pre_attribute) 386 | clip_time = tonumber(string.match(clip, pre_attribute..'(%d*%.?%d*)')) 387 | elseif string.match(clip, '^\"(.*)\"$') then 388 | clip_file = string.match(clip, '^\"(.*)\"$') 389 | else 390 | clip_file = clip 391 | end 392 | 393 | return clip, clip_file, clip_time, clip_table 394 | end 395 | 396 | function copy() 397 | if filePath ~= nil then 398 | if o.copy_time_method == 'none' or copy_time_method == '' then 399 | copy_specific('path') 400 | return 401 | elseif o.copy_time_method == 'protocols' and not starts_protocol(protocols, filePath) then 402 | copy_specific('path') 403 | return 404 | elseif o.copy_time_method == 'local' and starts_protocol(protocols, filePath) then 405 | copy_specific('path') 406 | return 407 | elseif o.copy_time_method == 'specifics' then 408 | if not starts_protocol(protocols, filePath) then 409 | copy_specific('path') 410 | return 411 | else 412 | for i = 1, #o.specific_time_attributes do 413 | if contain_value({o.specific_time_attributes[i][1]}, filePath) then 414 | copy_specific('path×tamp') 415 | return 416 | end 417 | end 418 | copy_specific('path') 419 | return 420 | end 421 | else 422 | copy_specific('path×tamp') 423 | return 424 | end 425 | else 426 | if o.osd_messages == true then 427 | mp.osd_message('Failed to Copy\nNo Video Found') 428 | end 429 | msg.info('Failed to copy, no video found') 430 | end 431 | end 432 | 433 | 434 | function copy_specific(action) 435 | if not action then return end 436 | 437 | if filePath == nil then 438 | if o.osd_messages == true then 439 | mp.osd_message('Failed to Copy\nNo Video Found') 440 | end 441 | msg.info("Failed to copy, no video found") 442 | return 443 | else 444 | if action == 'title' then 445 | if o.osd_messages == true then 446 | mp.osd_message("Copied:\n"..fileTitle) 447 | end 448 | set_clipboard(fileTitle) 449 | msg.info("Copied the below into clipboard:\n"..fileTitle) 450 | end 451 | if action == 'path' then 452 | if o.osd_messages == true then 453 | mp.osd_message("Copied:\n"..filePath) 454 | end 455 | set_clipboard(filePath) 456 | msg.info("Copied the below into clipboard:\n"..filePath) 457 | end 458 | if action == 'timestamp' then 459 | local pre_attribute, after_attribute = get_specific_attribute(filePath) 460 | local video_time = mp.get_property_number('time-pos') 461 | if o.osd_messages == true then 462 | mp.osd_message("Copied"..o.time_seperator..format_time(video_time, o.osd_time_format[3], o.osd_time_format[2], o.osd_time_format[1])) 463 | end 464 | set_clipboard(pre_attribute..format_time(video_time, o.copy_time_format[3], o.copy_time_format[2], o.copy_time_format[1])..after_attribute) 465 | msg.info('Copied the below into clipboard:\n'..pre_attribute..format_time(video_time, o.copy_time_format[3], o.copy_time_format[2], o.copy_time_format[1])..after_attribute) 466 | end 467 | if action == 'path×tamp' then 468 | local pre_attribute, after_attribute = get_specific_attribute(filePath) 469 | local video_time = mp.get_property_number('time-pos') 470 | if o.osd_messages == true then 471 | mp.osd_message("Copied:\n" .. fileTitle .. o.time_seperator .. format_time(video_time, o.osd_time_format[3], o.osd_time_format[2], o.osd_time_format[1])) 472 | end 473 | set_clipboard(filePath..pre_attribute..format_time(video_time, o.copy_time_format[3], o.copy_time_format[2], o.copy_time_format[1])..after_attribute) 474 | msg.info('Copied the below into clipboard:\n'..filePath..pre_attribute..format_time(video_time, o.copy_time_format[3], o.copy_time_format[2], o.copy_time_format[1])..after_attribute) 475 | end 476 | end 477 | end 478 | 479 | function trigger_paste_action(action) 480 | if not action then return end 481 | 482 | if action == 'load-file' then 483 | filePath = clip_file 484 | if o.osd_messages == true then 485 | if clip_time ~= nil then 486 | mp.osd_message("Pasted:\n"..clip_file .. o.time_seperator .. format_time(clip_time, o.osd_time_format[3], o.osd_time_format[2], o.osd_time_format[1])) 487 | else 488 | mp.osd_message("Pasted:\n"..clip_file) 489 | end 490 | end 491 | mp.commandv('loadfile', clip_file) 492 | clipboard_pasted = true 493 | 494 | if clip_time ~= nil then 495 | msg.info("Pasted the below file into mpv:\n"..clip_file .. format_time(clip_time)) 496 | else 497 | msg.info("Pasted the below file into mpv:\n"..clip_file) 498 | end 499 | end 500 | 501 | if action == 'load-subtitle' then 502 | if o.osd_messages == true then 503 | mp.osd_message("Pasted Subtitle:\n"..clip_file) 504 | end 505 | mp.commandv('sub-add', clip_file, 'select') 506 | msg.info("Pasted the below subtitle into mpv:\n"..clip_file) 507 | end 508 | 509 | if action == 'file-seek' then 510 | local video_duration = mp.get_property_number('duration') 511 | seekTime = clip_time + o.resume_offset 512 | 513 | if seekTime > video_duration then 514 | if o.osd_messages == true then 515 | mp.osd_message('Time Paste Exceeds Video Length' .. o.time_seperator .. format_time(clip_time, o.osd_time_format[3], o.osd_time_format[2], o.osd_time_format[1])) 516 | end 517 | msg.info("The time pasted exceeds the video length:\n"..format_time(clip_time)) 518 | return 519 | end 520 | 521 | if (seekTime < 0) then 522 | seekTime = 0 523 | end 524 | 525 | if o.osd_messages == true then 526 | mp.osd_message('Resumed to Pasted Time' .. o.time_seperator .. format_time(clip_time, o.osd_time_format[3], o.osd_time_format[2], o.osd_time_format[1])) 527 | end 528 | mp.commandv('seek', seekTime, 'absolute', 'exact') 529 | msg.info("Resumed to the pasted time" .. o.time_seperator .. format_time(clip_time)) 530 | end 531 | 532 | if action == 'add-playlist' then 533 | if o.osd_messages == true then 534 | mp.osd_message('Pasted Into Playlist:\n'..clip_file) 535 | end 536 | mp.commandv('loadfile', clip_file, 'append-play') 537 | msg.info("Pasted the below into playlist:\n"..clip_file) 538 | end 539 | 540 | if action == 'error-subtitle' then 541 | if o.osd_messages == true then 542 | mp.osd_message('Subtitle Paste Requires Running Video:\n'..clip_file) 543 | end 544 | msg.info('Subtitles can only be pasted if a video is running:\n'..clip_file) 545 | end 546 | 547 | if action == 'error-unsupported' then 548 | if o.osd_messages == true then 549 | mp.osd_message('Paste of this item is unsupported possibly due to configuration:\n'..clip) 550 | end 551 | msg.info('Failed to paste into mpv, pasted item shown below is unsupported possibly due to configuration:\n'..clip) 552 | end 553 | 554 | if action == 'error-missing' then 555 | if o.osd_messages == true then 556 | mp.osd_message('File Doesn\'t Exist:\n' .. clip_file) 557 | end 558 | msg.info('The file below doesn\'t seem to exist:\n' .. clip_file) 559 | end 560 | 561 | if action == 'error-time' then 562 | if o.osd_messages == true then 563 | if clip_time ~= nil then 564 | mp.osd_message('Time Paste Requires Running Video' .. o.time_seperator .. format_time(clip_time, o.osd_time_format[3], o.osd_time_format[2], o.osd_time_format[1])) 565 | else 566 | mp.osd_message('Time Paste Requires Running Video') 567 | end 568 | end 569 | 570 | if clip_time ~= nil then 571 | msg.info('Time can only be pasted if a video is running:\n'.. format_time(clip_time)) 572 | else 573 | msg.info('Time can only be pasted if a video is running') 574 | end 575 | end 576 | 577 | if action == 'error-missingtime' then 578 | if o.osd_messages == true then 579 | mp.osd_message('Clipboard does not contain time for seeking:\n'..clip) 580 | end 581 | msg.info("Clipboard does not contain the time attribute and time for seeking:\n"..clip) 582 | end 583 | 584 | if action == 'error-samefile' then 585 | if o.osd_messages == true then 586 | mp.osd_message('Pasted file is already running:\n'..clip) 587 | end 588 | msg.info("Pasted file shown below is already running:\n"..clip) 589 | end 590 | 591 | if action == 'error-unknown' then 592 | if o.osd_messages == true then 593 | mp.osd_message('Paste was ignored due to an error:\n'..clip) 594 | end 595 | msg.info('Paste was ignored due to an error:\n'..clip) 596 | end 597 | 598 | end 599 | 600 | function multipaste() 601 | if #clip_table < 2 then return msg.warn('Single paste should be called instead of multipaste') end 602 | local file_ignored_total = 0 603 | local file_subtitle_total = 0 604 | local triggered_multipaste = {} 605 | 606 | if filePath == nil then 607 | for i=1, #clip_table do 608 | if file_exists(clip_table[i][1]) and has_value(o.paste_extensions, clip_table[i][3]) 609 | or starts_protocol(o.paste_protocols, clip_table[i][1]) then 610 | filePath = clip_table[i][1] 611 | mp.commandv('loadfile', clip_table[i][1]) 612 | clipboard_pasted = true 613 | table.remove(clip_table, i) 614 | triggered_multipaste[1] = true 615 | break 616 | end 617 | end 618 | end 619 | 620 | if filePath ~= nil then 621 | for i=1, #clip_table do 622 | if file_exists(clip_table[i][1]) and has_value(o.paste_extensions, clip_table[i][3]) 623 | or starts_protocol(o.paste_protocols, clip_table[i][1]) then 624 | mp.commandv('loadfile', clip_table[i][1], 'append-play') 625 | triggered_multipaste[2] = true 626 | elseif file_exists(clip_table[i][1]) and has_value(o.paste_subtitles, clip_table[i][3]) then 627 | mp.commandv('sub-add', clip_table[i][1]) 628 | file_subtitle_total = file_subtitle_total + 1 629 | elseif not has_value(o.paste_extensions, clip_table[i][3]) and not has_value(o.paste_subtitles, clip_table[i][3]) then 630 | msg.warn('The below was ignored since it is unsupported possibly due to configuration:\n'..clip_table[i][1]) 631 | file_ignored_total = file_ignored_total + 1 632 | elseif not file_exists(clip_table[i][1]) then 633 | msg.warn('The below doesn\'t seem to exist:\n' .. clip_table[i][1]) 634 | file_ignored_total = file_ignored_total + 1 635 | else 636 | msg.warn('The below was ignored due to an error:\n' .. clip_table[i][1]) 637 | file_ignored_total = file_ignored_total + 1 638 | end 639 | end 640 | end 641 | 642 | local osd_msg = '' 643 | if triggered_multipaste[1] == true then 644 | if osd_msg ~= '' then osd_msg = osd_msg..'\n' end 645 | osd_msg = osd_msg..'Pasted: '..filePath 646 | end 647 | if file_subtitle_total > 0 then 648 | if osd_msg ~= '' then osd_msg = osd_msg..'\n' end 649 | osd_msg = osd_msg..'Added '..file_subtitle_total..' Subtitle/s' 650 | end 651 | if triggered_multipaste[2] == true then 652 | if osd_msg ~= '' then osd_msg = osd_msg..'\n' end 653 | osd_msg = osd_msg..'Added Into Playlist '..#clip_table - file_ignored_total - file_subtitle_total..' item/s' 654 | end 655 | if file_ignored_total > 0 then 656 | if osd_msg ~= '' then osd_msg = osd_msg..'\n' end 657 | osd_msg = osd_msg..'Ignored '..file_ignored_total.. ' Item/s' 658 | end 659 | 660 | if osd_msg == '' then 661 | osd_msg = 'Pasted Items Ignored or Unable To Append Into Video:\n'..clip 662 | end 663 | 664 | if o.osd_messages == true then 665 | mp.osd_message(osd_msg) 666 | end 667 | msg.info(osd_msg) 668 | end 669 | 670 | 671 | function paste() 672 | if o.osd_messages == true then 673 | mp.osd_message("Pasting...") 674 | end 675 | msg.info("Pasting...") 676 | 677 | clip = get_clipboard(clip) 678 | if not clip then msg.error('Error: clip is null' .. clip) return end 679 | clip, clip_file, clip_time, clip_table = parse_clipboard(clip) 680 | 681 | if #clip_table > 1 then 682 | multipaste() 683 | else 684 | local currentVideoExtension = string.lower(get_extension(clip_file)) 685 | if filePath == nil then 686 | if file_exists(clip_file) and has_value(o.paste_extensions, currentVideoExtension) 687 | or starts_protocol(o.paste_protocols, clip_file) then 688 | trigger_paste_action('load-file') 689 | elseif file_exists(clip_file) and has_value(o.paste_subtitles, currentVideoExtension) then 690 | trigger_paste_action('error-subtitle') 691 | elseif not has_value(o.paste_extensions, currentVideoExtension) and not has_value(o.paste_subtitles, currentVideoExtension) then 692 | trigger_paste_action('error-unsupported') 693 | elseif not file_exists(clip_file) then 694 | trigger_paste_action('error-missing') 695 | else 696 | trigger_paste_action('error-unknown') 697 | end 698 | else 699 | if file_exists(clip_file) and has_value(o.paste_subtitles, currentVideoExtension) then 700 | trigger_paste_action('load-subtitle') 701 | elseif o.running_paste_behavior == 'playlist' then 702 | if filePath ~= clip_file and file_exists(clip_file) and has_value(o.paste_extensions, currentVideoExtension) 703 | or filePath ~= clip_file and starts_protocol(o.paste_protocols, clip_file) 704 | or filePath == clip_file and file_exists(clip_file) and has_value(o.paste_extensions, currentVideoExtension) and clip_time == nil 705 | or filePath == clip_file and starts_protocol(o.paste_protocols, clip_file) and clip_time == nil then 706 | trigger_paste_action('add-playlist') 707 | elseif clip_time ~= nil then 708 | trigger_paste_action('file-seek') 709 | elseif not has_value(o.paste_extensions, currentVideoExtension) and not has_value(o.paste_subtitles, currentVideoExtension) then 710 | trigger_paste_action('error-unsupported') 711 | elseif not file_exists(clip_file) then 712 | trigger_paste_action('error-missing') 713 | else 714 | trigger_paste_action('error-unknown') 715 | end 716 | elseif o.running_paste_behavior == 'timestamp' then 717 | if clip_time ~= nil then 718 | trigger_paste_action('file-seek') 719 | elseif file_exists(clip_file) and has_value(o.paste_extensions, currentVideoExtension) 720 | or starts_protocol(o.paste_protocols, clip_file) then 721 | trigger_paste_action('add-playlist') 722 | elseif not has_value(o.paste_extensions, currentVideoExtension) and not has_value(o.paste_subtitles, currentVideoExtension) then 723 | trigger_paste_action('error-unsupported') 724 | elseif not file_exists(clip_file) then 725 | trigger_paste_action('error-missing') 726 | else 727 | trigger_paste_action('error-unknown') 728 | end 729 | elseif o.running_paste_behavior == 'force' then 730 | if filePath ~= clip_file and file_exists(clip_file) and has_value(o.paste_extensions, currentVideoExtension) 731 | or filePath ~= clip_file and starts_protocol(o.paste_protocols, clip_file) then 732 | trigger_paste_action('load-file') 733 | elseif clip_time ~= nil then 734 | trigger_paste_action('file-seek') 735 | elseif file_exists(clip_file) and filePath == clip_file 736 | or filePath == clip_file and starts_protocol(o.paste_protocols, clip_file) then 737 | trigger_paste_action('add-playlist') 738 | elseif not has_value(o.paste_extensions, currentVideoExtension) and not has_value(o.paste_subtitles, currentVideoExtension) then 739 | trigger_paste_action('error-unsupported') 740 | elseif not file_exists(clip_file) then 741 | trigger_paste_action('error-missing') 742 | else 743 | trigger_paste_action('error-unknown') 744 | end 745 | end 746 | end 747 | end 748 | end 749 | 750 | 751 | function paste_specific(action) 752 | if not action then return end 753 | 754 | if o.osd_messages == true then 755 | mp.osd_message("Pasting...") 756 | end 757 | msg.info("Pasting...") 758 | 759 | clip = get_clipboard(clip) 760 | if not clip then msg.error('Error: clip is null' .. clip) return end 761 | clip, clip_file, clip_time, clip_table = parse_clipboard(clip) 762 | 763 | if #clip_table > 1 then 764 | multipaste() 765 | else 766 | local currentVideoExtension = string.lower(get_extension(clip_file)) 767 | if action == 'playlist' then 768 | if file_exists(clip_file) and has_value(o.paste_extensions, currentVideoExtension) 769 | or starts_protocol(o.paste_protocols, clip_file) then 770 | trigger_paste_action('add-playlist') 771 | elseif not has_value(o.paste_extensions, currentVideoExtension) and not has_value(o.paste_subtitles, currentVideoExtension) then 772 | trigger_paste_action('error-unsupported') 773 | elseif not file_exists(clip_file) then 774 | trigger_paste_action('error-missing') 775 | else 776 | trigger_paste_action('error-unknown') 777 | end 778 | end 779 | 780 | if action == 'timestamp' then 781 | if filePath == nil then 782 | trigger_paste_action('error-time') 783 | elseif clip_time ~= nil then 784 | trigger_paste_action('file-seek') 785 | elseif clip_time == nil then 786 | trigger_paste_action('error-missingtime') 787 | elseif not has_value(o.paste_extensions, currentVideoExtension) and not has_value(o.paste_subtitles, currentVideoExtension) then 788 | trigger_paste_action('error-unsupported') 789 | elseif not file_exists(clip_file) then 790 | trigger_paste_action('error-missing') 791 | else 792 | trigger_paste_action('error-unknown') 793 | end 794 | end 795 | 796 | if action == 'force' then 797 | if filePath ~= clip_file and file_exists(clip_file) and has_value(o.paste_extensions, currentVideoExtension) 798 | or filePath ~= clip_file and starts_protocol(o.paste_protocols, clip_file) then 799 | trigger_paste_action('load-file') 800 | elseif file_exists(clip_file) and filePath == clip_file 801 | or filePath == clip_file and starts_protocol(o.paste_protocols, clip_file) then 802 | trigger_paste_action('error-samefile') 803 | elseif not has_value(o.paste_extensions, currentVideoExtension) and not has_value(o.paste_subtitles, currentVideoExtension) then 804 | trigger_paste_action('error-unsupported') 805 | elseif not file_exists(clip_file) then 806 | trigger_paste_action('error-missing') 807 | else 808 | trigger_paste_action('error-unknown') 809 | end 810 | end 811 | end 812 | end 813 | 814 | mp.register_event('file-loaded', function() 815 | filePath, fileTitle = get_path() 816 | if clipboard_pasted == true then 817 | clip = get_clipboard(clip) 818 | if not clip then msg.error('Error: clip is null' .. clip) return end 819 | clip, clip_file, clip_time, clip_table = parse_clipboard(clip) 820 | 821 | if #clip_table > 1 then 822 | for i=1, #clip_table do 823 | if file_exists(clip_table[i][1]) and has_value(o.paste_extensions, clip_table[i][3]) 824 | or starts_protocol(o.paste_protocols, clip_table[i][1]) then 825 | clip_file = clip_table[i][1] 826 | clip_time = clip_table[i][2] 827 | break 828 | end 829 | end 830 | end 831 | 832 | if filePath == clip_file and clip_time ~= nil then 833 | local video_duration = mp.get_property_number('duration') 834 | seekTime = clip_time + o.resume_offset 835 | 836 | if seekTime > video_duration then 837 | if o.osd_messages == true then 838 | mp.osd_message('Time Paste Exceeds Video Length' .. o.time_seperator .. format_time(clip_time, o.osd_time_format[3], o.osd_time_format[2], o.osd_time_format[1])) 839 | end 840 | msg.info("The time pasted exceeds the video length:\n"..format_time(clip_time)) 841 | return 842 | end 843 | 844 | if seekTime < 0 then 845 | seekTime = 0 846 | end 847 | 848 | mp.commandv('seek', seekTime, 'absolute', 'exact') 849 | clipboard_pasted = false 850 | end 851 | end 852 | end) 853 | 854 | bind_keys(o.copy_keybind, 'copy', copy) 855 | bind_keys(o.copy_specific_keybind, 'copy-specific', function()copy_specific(o.copy_specific_behavior)end) 856 | bind_keys(o.paste_keybind, 'paste', paste) 857 | bind_keys(o.paste_specific_keybind, 'paste-specific', function()paste_specific(o.paste_specific_behavior)end) 858 | -------------------------------------------------------------------------------- /portable_config/scripts/SmartSkip.lua: -------------------------------------------------------------------------------- 1 | -- Copyright (c) 2023, Eisa AlAwadhi 2 | -- License: BSD 2-Clause License 3 | -- Creator: Eisa AlAwadhi 4 | -- Project: SmartSkip 5 | -- Version: 1.2 6 | -- Date: 23-09-2023 7 | 8 | -- Related forked projects: 9 | -- https://github.com/detuur/mpv-scripts/blob/master/skiptosilence.lua 10 | -- https://raw.githubusercontent.com/mpv-player/mpv/master/TOOLS/lua/autoload.lua 11 | -- https://github.com/mar04/chapters_for_mpv 12 | -- https://github.com/po5/chapterskip/blob/master/chapterskip.lua 13 | 14 | local o = { 15 | -----Silence Skip Settings----- 16 | silence_audio_level = -40, 17 | silence_duration = 0.65, 18 | ignore_silence_duration=5, 19 | min_skip_duration = 0, 20 | max_skip_duration = 130, 21 | keybind_twice_cancel_skip = true, 22 | silence_skip_to_end = "playlist-next", 23 | add_chapter_on_skip = true, 24 | force_mute_on_skip = false, 25 | -----Smart Skip Settings----- 26 | last_chapter_skip_behavior=[[ [ ["no-chapters", "silence-skip"], ["internal-chapters", "playlist-next"], ["external-chapters", "silence-skip"] ] ]], 27 | smart_next_proceed_countdown = true, 28 | smart_prev_cancel_countdown = true, 29 | -----Chapters Settings----- 30 | external_chapters_autoload = true, 31 | modified_chapters_autosave=[[ ["no-chapters", "external-chapters"] ]], 32 | global_chapters = true, 33 | global_chapters_path = "/:dir%mpvconf%/chapters", 34 | hash_global_chapters = true, 35 | add_chapter_ask_title = false, 36 | add_chapter_pause_for_input = false, 37 | add_chapter_placeholder_title = "Chapter ", 38 | -----Auto-Skip Settings----- 39 | autoskip_chapter = true, 40 | autoskip_countdown = 3, 41 | autoskip_countdown_bulk = false, 42 | autoskip_countdown_graceful = false, 43 | skip_once = false, 44 | categories=[[ [ ["internal-chapters", "prologue>Prologue/^Intro; opening>^OP/ OP$/^Opening; ending>^ED/ ED$/^Ending; preview>Preview$"], ["external-chapters", "idx->0/2"] ] ]], 45 | skip=[[ [ ["internal-chapters", "toggle;toggle_idx;opening;ending;preview"], ["external-chapters", "toggle;toggle_idx"] ] ]], 46 | -----Autoload Settings----- 47 | autoload_playlist = true, 48 | autoload_max_entries = 5000, 49 | autoload_max_dir_stack = 20, 50 | ignore_hidden = true, 51 | same_type = false, 52 | directory_mode = "auto", 53 | images = true, 54 | videos = true, 55 | audio = true, 56 | additional_image_exts = "", 57 | additional_video_exts = "", 58 | additional_audio_exts = "", 59 | -----OSD Messages Settings----- 60 | osd_duration = 2500, 61 | seek_osd = "osd-msg-bar", 62 | chapter_osd = "osd-msg-bar", 63 | autoskip_osd = "osd-msg-bar", 64 | playlist_osd = true, 65 | osd_msg = true, 66 | -----Keybind Settings----- 67 | toggle_autoload_keybind=[[ [""] ]], 68 | toggle_autoskip_keybind=[[ ["ctrl+."] ]], 69 | toggle_category_autoskip_keybind=[[ ["alt+."] ]], 70 | cancel_autoskip_countdown_keybind=[[ ["esc", "n"] ]], 71 | proceed_autoskip_countdown_keybind=[[ ["enter", "y"] ]], 72 | add_chapter_keybind=[[ ["n"] ]], 73 | remove_chapter_keybind=[[ ["alt+n"] ]], 74 | edit_chapter_keybind=[[ [""] ]], 75 | write_chapters_keybind=[[ ["ctrl+n"] ]], 76 | bake_chapters_keybind=[[ [""] ]], 77 | chapter_next_keybind=[[ ["ctrl+right"] ]], 78 | chapter_prev_keybind=[[ ["ctrl+left"] ]], 79 | smart_next_keybind=[[ [">"] ]], 80 | smart_prev_keybind=[[ ["<"] ]], 81 | silence_skip_keybind=[[ ["?"] ]], 82 | } 83 | 84 | local mp = require 'mp' 85 | local msg = require 'mp.msg' 86 | local utils = require 'mp.utils' 87 | local options = require 'mp.options' 88 | options.read_options(o, nil, function(list) 89 | split_option_exts(list.additional_video_exts, list.additional_audio_exts, list.additional_image_exts) 90 | if list.videos or list.additional_video_exts or 91 | list.audio or list.additional_audio_exts or 92 | list.images or list.additional_image_exts then 93 | create_extensions() 94 | end 95 | if list.directory_mode then 96 | validate_directory_mode() 97 | end 98 | end) 99 | 100 | if o.add_chapter_on_skip ~= false and o.add_chapter_on_skip ~= true then o.add_chapter_on_skip = utils.parse_json(o.add_chapter_on_skip) end 101 | if o.modified_chapters_autosave ~= false and o.modified_chapters_autosave ~= true then o.modified_chapters_autosave = utils.parse_json(o.modified_chapters_autosave) end 102 | o.last_chapter_skip_behavior = utils.parse_json(o.last_chapter_skip_behavior) 103 | if utils.parse_json(o.skip) ~= nil then o.skip = utils.parse_json(o.skip) end 104 | if utils.parse_json(o.categories) ~= nil then o.categories = utils.parse_json(o.categories) end 105 | if o.skip_once ~= false and o.skip_once ~= true then o.skip_once = utils.parse_json(o.skip_once) end 106 | 107 | if o.global_chapters_path:match('/:dir%%mpvconf%%') then --1.2# add variables for specifying path via user-config 108 | o.global_chapters_path = o.global_chapters_path:gsub('/:dir%%mpvconf%%', mp.find_config_file('.')) 109 | elseif o.global_chapters_path:match('/:dir%%script%%') then 110 | o.global_chapters_path = o.global_chapters_path:gsub('/:dir%%script%%', debug.getinfo(1).source:match('@?(.*/)')) 111 | elseif o.global_chapters_path:match('/:var%%(.*)%%') then 112 | local os_variable = o.global_chapters_path:match('/:var%%(.*)%%') 113 | o.global_chapters_path = o.global_chapters_path:gsub('/:var%%(.*)%%', os.getenv(os_variable)) 114 | end 115 | 116 | o.toggle_autoload_keybind = utils.parse_json(o.toggle_autoload_keybind) 117 | o.toggle_autoskip_keybind = utils.parse_json(o.toggle_autoskip_keybind) 118 | o.cancel_autoskip_countdown_keybind = utils.parse_json(o.cancel_autoskip_countdown_keybind) 119 | o.proceed_autoskip_countdown_keybind = utils.parse_json(o.proceed_autoskip_countdown_keybind) 120 | o.toggle_category_autoskip_keybind = utils.parse_json(o.toggle_category_autoskip_keybind) 121 | o.add_chapter_keybind = utils.parse_json(o.add_chapter_keybind) 122 | o.remove_chapter_keybind = utils.parse_json(o.remove_chapter_keybind) 123 | o.write_chapters_keybind = utils.parse_json(o.write_chapters_keybind) 124 | o.edit_chapter_keybind = utils.parse_json(o.edit_chapter_keybind) 125 | o.bake_chapters_keybind = utils.parse_json(o.bake_chapters_keybind) 126 | o.chapter_prev_keybind = utils.parse_json(o.chapter_prev_keybind) 127 | o.chapter_next_keybind = utils.parse_json(o.chapter_next_keybind) 128 | o.smart_prev_keybind = utils.parse_json(o.smart_prev_keybind) 129 | o.smart_next_keybind = utils.parse_json(o.smart_next_keybind) 130 | o.silence_skip_keybind = utils.parse_json(o.silence_skip_keybind) 131 | 132 | package.path = mp.command_native({"expand-path", "~~/script-modules/?.lua;"}) .. package.path 133 | local user_input_module, input = pcall(require, "user-input-module") 134 | 135 | if o.osd_duration == -1 then o.osd_duration = (mp.get_property_number('osd-duration') or 1000) end 136 | local speed_state = 1 137 | local pause_state = false 138 | local mute_state = false 139 | local sub_state = nil 140 | local secondary_sub_state = nil 141 | local vid_state = nil 142 | local skip_flag = false 143 | local window_state = nil 144 | local force_silence_skip = false 145 | local initial_skip_time = 0 146 | local initial_chapter_count = 0 147 | local chapter_state = 'no-chapters' 148 | local file_length = 0 149 | local keep_open_state = "yes" 150 | if mp.get_property("config") ~= "no" then keep_open_state = mp.get_property("keep-open") end 151 | local osd_duration_default = (mp.get_property_number('osd-duration') or 1000) 152 | local autoload_playlist = o.autoload_playlist 153 | local autoskip_chapter = o.autoskip_chapter 154 | local playlist_osd = false 155 | local autoskip_playlist_osd = false 156 | local g_playlist_pos = 0 157 | local g_opt_categories = o.categories 158 | local g_opt_skip_once = false 159 | o.autoskip_countdown = math.floor(o.autoskip_countdown) 160 | local g_autoskip_countdown = o.autoskip_countdown 161 | local g_autoskip_countdown_flag = false 162 | local categories = { 163 | toggle = "", 164 | toggle_idx = "", 165 | } 166 | local autoskip_osd = o.autoskip_osd 167 | if o.autoskip_osd == 'osd-msg-bar' then autoskip_osd = 'osd-bar' end 168 | if o.autoskip_osd == 'osd-msg' then autoskip_osd = 'no-osd' end 169 | 170 | 171 | -- utility functions -- 172 | function has_value(tab, val, array2d) 173 | if not tab then return msg.error('check value passed') end 174 | if not val then return msg.error('check value passed') end 175 | if not array2d then 176 | for index, value in ipairs(tab) do 177 | if string.lower(value) == string.lower(val) then 178 | return true 179 | end 180 | end 181 | end 182 | if array2d then 183 | for i=1, #tab do 184 | if tab[i] and string.lower(tab[i][array2d]) == string.lower(val) then 185 | return true 186 | end 187 | end 188 | end 189 | 190 | return false 191 | end 192 | 193 | function esc_string(str) 194 | return str:gsub("([%p])", "%%%1") 195 | end 196 | 197 | function prompt_msg(text, duration) 198 | if not text then return end 199 | if not duration then duration = o.osd_duration end 200 | if o.osd_msg then mp.commandv("show-text", text, duration) end 201 | msg.info(text) 202 | end 203 | 204 | function bind_keys(keys, name, func, opts) 205 | if not keys then 206 | mp.add_forced_key_binding(keys, name, func, opts) 207 | return 208 | end 209 | 210 | for i = 1, #keys do 211 | if i == 1 then 212 | mp.add_forced_key_binding(keys[i], name, func, opts) 213 | else 214 | mp.add_forced_key_binding(keys[i], name .. i, func, opts) 215 | end 216 | end 217 | end 218 | 219 | function unbind_keys(keys, name) 220 | if not keys then 221 | mp.remove_key_binding(name) 222 | return 223 | end 224 | 225 | for i = 1, #keys do 226 | if i == 1 then 227 | mp.remove_key_binding(name) 228 | else 229 | mp.remove_key_binding(name .. i) 230 | end 231 | end 232 | end 233 | 234 | -- skip-silence utility functions -- 235 | function restoreProp(timepos,pause) 236 | if not timepos then timepos = mp.get_property_number("time-pos") end 237 | if not pause then pause = pause_state end 238 | 239 | mp.set_property("vid", vid_state) 240 | mp.set_property("force-window", window_state) 241 | mp.set_property_bool("mute", mute_state) 242 | mp.set_property("speed", speed_state) 243 | mp.unobserve_property(foundSilence) 244 | mp.command("no-osd af remove @skiptosilence") 245 | mp.set_property_bool("pause", pause) 246 | mp.set_property_number("time-pos", timepos) 247 | mp.set_property("sub-visibility", sub_state) 248 | mp.set_property("secondary-sub-visibility", secondary_sub_state) 249 | timer:kill() 250 | skip_flag = false 251 | end 252 | 253 | function handleMinMaxDuration(timepos) 254 | if not skip_flag then return end 255 | if not timepos then timepos = mp.get_property_number("time-pos") end 256 | 257 | skip_duration = timepos - initial_skip_time 258 | if o.min_skip_duration > 0 and skip_duration <= o.min_skip_duration then 259 | restoreProp(initial_skip_time) 260 | prompt_msg('Skipping Cancelled\nSilence less than minimum') 261 | return true 262 | end 263 | if o.max_skip_duration > 0 and skip_duration >= o.max_skip_duration then 264 | restoreProp(initial_skip_time) 265 | prompt_msg('Skipping Cancelled\nSilence is more than configured maximum') 266 | return true 267 | end 268 | return false 269 | end 270 | 271 | function setKeepOpenState() 272 | if o.silence_skip_to_end == "playlist-next" then 273 | mp.set_property("keep-open", "yes") 274 | else 275 | mp.set_property("keep-open", "always") 276 | end 277 | end 278 | 279 | function eofHandler(name, val) 280 | if val and skip_flag then 281 | if o.silence_skip_to_end == 'playlist-next' then 282 | restoreProp((mp.get_property_native('duration') or 0)) 283 | if mp.get_property_native('playlist-playing-pos')+1 == mp.get_property_native('playlist-count') then 284 | prompt_msg('Skipped to end at ' .. mp.get_property_osd('duration')) 285 | else 286 | mp.commandv("playlist-next") 287 | end 288 | elseif o.silence_skip_to_end == 'cancel' then 289 | prompt_msg('Skipping Cancelled\nSilence not detected') 290 | restoreProp(initial_skip_time) 291 | elseif o.silence_skip_to_end == 'pause' then 292 | prompt_msg('Skipped to end at ' .. mp.get_property_osd('duration')) 293 | restoreProp((mp.get_property_native('duration') or 0), true) 294 | end 295 | end 296 | end 297 | 298 | -- smart-skip main code -- 299 | function smartNext() 300 | if g_autoskip_countdown_flag and o.smart_next_proceed_countdown then proceed_autoskip(true) return end 301 | local next_action = "silence-skip" 302 | local chapters_count = (mp.get_property_number('chapters') or 0) 303 | local chapter = (mp.get_property_number('chapter') or 0) 304 | local current_playlist = (mp.get_property_native('playlist-playing-pos')+1 or 0) 305 | local total_playlist = (mp.get_property_native('playlist-count') or 0) 306 | 307 | if chapter+2 <= chapters_count then 308 | next_action = 'chapter-next' 309 | elseif chapter+2 > chapters_count and (initial_chapter_count == 0 or chapters_count == 0 or force_silence_skip) then 310 | if chapters_count == 0 then force_silence_skip = true end 311 | next_action = 'silence-skip' 312 | elseif chapter+1 >= chapters_count then 313 | for i = 1, #o.last_chapter_skip_behavior do 314 | if o.last_chapter_skip_behavior[i] and o.last_chapter_skip_behavior[i][1] == chapter_state then 315 | next_action = o.last_chapter_skip_behavior[i][2] 316 | break 317 | end 318 | end 319 | end 320 | 321 | if next_action == 'playlist-next' and current_playlist == total_playlist then 322 | next_action = 'chapter-next' 323 | end 324 | 325 | if next_action == 'silence-skip' then 326 | silenceSkip() 327 | end 328 | if next_action == 'chapter-next' then 329 | mp.set_property('osd-duration', o.osd_duration) 330 | mp.commandv(o.chapter_osd, 'add', 'chapter', 1) 331 | mp.add_timeout(0.07, function () mp.set_property('osd-duration', osd_duration_default) end) 332 | end 333 | if next_action == 'playlist-next' then 334 | mp.command('playlist_next') 335 | end 336 | end 337 | 338 | function smartPrev() 339 | if skip_flag then restoreProp(initial_skip_time) return end 340 | if g_autoskip_countdown_flag and o.smart_prev_cancel_countdown then kill_chapterskip_countdown('osd') return end 341 | local chapters_count = (mp.get_property_number('chapters') or 0) 342 | local chapter = (mp.get_property_number('chapter') or 0) 343 | local timepos = (mp.get_property_native("time-pos") or 0) 344 | 345 | if chapter-1 < 0 and timepos > 1 and chapters_count == 0 then 346 | mp.commandv('seek', 0, 'absolute', 'exact') 347 | 348 | mp.set_property('osd-duration', o.osd_duration) 349 | mp.commandv(o.seek_osd, "show-progress") 350 | mp.add_timeout(0.07, function () mp.set_property('osd-duration', osd_duration_default) end) 351 | elseif chapter-1 < 0 and timepos < 1 then 352 | mp.command('playlist_prev') 353 | elseif chapter-1 <= chapters_count then 354 | mp.set_property('osd-duration', o.osd_duration) 355 | mp.commandv(o.chapter_osd, 'add', 'chapter', -1) 356 | mp.add_timeout(0.07, function () mp.set_property('osd-duration', osd_duration_default) end) 357 | end 358 | end 359 | 360 | -- chapter-next/prev main code -- 361 | function chapterSeek(direction) 362 | if skip_flag and direction == -1 then restoreProp(initial_skip_time) return end 363 | 364 | local chapters_count = (mp.get_property_number('chapters') or 0) 365 | local chapter = (mp.get_property_number('chapter') or 0) 366 | local timepos = (mp.get_property_native("time-pos") or 0) 367 | 368 | if chapter+direction < 0 and timepos > 1 and chapters_count == 0 then 369 | mp.commandv('seek', 0, 'absolute', 'exact') 370 | 371 | mp.set_property('osd-duration', o.osd_duration) 372 | mp.commandv(o.seek_osd, "show-progress") 373 | mp.add_timeout(0.07, function () mp.set_property('osd-duration', osd_duration_default) end) 374 | elseif chapter+direction < 0 and timepos < 1 then 375 | mp.command('playlist_prev') 376 | elseif chapter+direction >= chapters_count then 377 | mp.command('playlist_next') 378 | else 379 | mp.set_property('osd-duration', o.osd_duration) 380 | mp.commandv(o.chapter_osd, 'add', 'chapter', direction) 381 | mp.add_timeout(0.07, function () mp.set_property('osd-duration', osd_duration_default) end) 382 | end 383 | end 384 | 385 | -- silence skip main code -- 386 | function silenceSkip(action) 387 | if skip_flag then if o.keybind_twice_cancel_skip then restoreProp(initial_skip_time) end return end 388 | initial_skip_time = (mp.get_property_native("time-pos") or 0) 389 | if math.floor(initial_skip_time) == math.floor(mp.get_property_native('duration') or 0) then return end 390 | local width = mp.get_property_native("osd-width") 391 | local height = mp.get_property_native("osd-height") 392 | mp.set_property_native("geometry", ("%dx%d"):format(width, height)) 393 | mp.commandv(o.seek_osd, "show-progress") 394 | 395 | mp.command( 396 | "no-osd af add @skiptosilence:lavfi=[silencedetect=noise=" .. 397 | o.silence_audio_level .. "dB:d=" .. o.silence_duration .. "]" 398 | ) 399 | 400 | mp.observe_property("af-metadata/skiptosilence", "string", foundSilence) 401 | 402 | sub_state = mp.get_property("sub-visibility") 403 | mp.set_property("sub-visibility", "no") 404 | secondary_sub_state = mp.get_property("secondary-sub-visibility") 405 | mp.set_property("secondary-sub-visibility", "no") 406 | window_state = mp.get_property("force-window") 407 | mp.set_property("force-window", "yes") 408 | vid_state = mp.get_property("vid") 409 | mp.set_property("vid", "no") 410 | mute_state = mp.get_property_native("mute") 411 | if o.force_mute_on_skip then 412 | mp.set_property_bool("mute", true) 413 | end 414 | pause_state = mp.get_property_native("pause") 415 | mp.set_property_bool("pause", false) 416 | speed_state = mp.get_property_native("speed") 417 | mp.set_property("speed", 100) 418 | setKeepOpenState() 419 | skip_flag = true 420 | 421 | timer = mp.add_periodic_timer(0.5, function() 422 | local video_time = (mp.get_property_native("time-pos") or 0) 423 | handleMinMaxDuration(video_time) 424 | if skip_flag then mp.commandv(o.seek_osd, "show-progress") end 425 | end) 426 | end 427 | 428 | function foundSilence(name, value) 429 | if value == "{}" or value == nil then 430 | return 431 | end 432 | 433 | timecode = tonumber(string.match(value, "%d+%.?%d+")) 434 | if timecode == nil or timecode < initial_skip_time + o.ignore_silence_duration then 435 | return 436 | end 437 | 438 | if handleMinMaxDuration(timecode) then return end 439 | 440 | restoreProp(timecode) 441 | 442 | mp.add_timeout(0.05, function() prompt_msg('Skipped to silence 🕒 ' .. mp.get_property_osd("time-pos")) end) 443 | if o.add_chapter_on_skip == true or has_value(o.add_chapter_on_skip, chapter_state) then 444 | mp.add_timeout(0.05, add_chapter) 445 | end 446 | skip_flag = false 447 | end 448 | 449 | -- modified fork of chapters_for_mpv -- 450 | --[[ 451 | Copyright (c) 2023 Mariusz Libera 452 | 453 | Permission is hereby granted, free of charge, to any person obtaining a copy 454 | of this software and associated documentation files (the "Software"), to deal 455 | in the Software without restriction, including without limitation the rights 456 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 457 | copies of the Software, and to permit persons to whom the Software is 458 | furnished to do so, subject to the following conditions: 459 | 460 | The above copyright notice and this permission notice shall be included in all 461 | copies or substantial portions of the Software. 462 | 463 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 464 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 465 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 466 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 467 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 468 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 469 | SOFTWARE. 470 | --]] 471 | 472 | 473 | -- to debug run mpv with arg: --msg-level=SmartSkip=debug 474 | -- to test o.run mpv with arg: --script-opts=SmartSkip-OPTION=VALUE 475 | 476 | 477 | 478 | local chapters_modified = false 479 | 480 | msg.debug("options:", utils.to_string(options)) 481 | 482 | 483 | -- CHAPTER MANIPULATION -------------------------------------------------------- 484 | 485 | 486 | function change_title_callback(user_input, err, chapter_index) 487 | if user_input == nil or err ~= nil then 488 | msg.warn("no chapter title provided:", err) 489 | return 490 | end 491 | 492 | local chapter_list = mp.get_property_native("chapter-list") 493 | 494 | if chapter_index > mp.get_property_number("chapter-list/count") then 495 | msg.warn("can't set chapter title") 496 | return 497 | end 498 | 499 | chapter_list[chapter_index].title = user_input 500 | 501 | mp.set_property_native("chapter-list", chapter_list) 502 | chapters_modified = true 503 | end 504 | 505 | 506 | function edit_chapter() 507 | local mpv_chapter_index = mp.get_property_number("chapter") 508 | local chapter_list = mp.get_property_native("chapter-list") 509 | 510 | if mpv_chapter_index == nil or mpv_chapter_index == -1 then 511 | msg.verbose("no chapter selected, nothing to edit") 512 | return 513 | end 514 | 515 | if not user_input_module then 516 | msg.error("no mpv-user-input, can't get user input, install: https://github.com/CogentRedTester/mpv-user-input") 517 | return 518 | end 519 | input.get_user_input(change_title_callback, { 520 | request_text = "title of the chapter:", 521 | default_input = chapter_list[mpv_chapter_index + 1].title, 522 | cursor_pos = #(chapter_list[mpv_chapter_index + 1].title) + 1, 523 | }, mpv_chapter_index + 1) 524 | 525 | if o.add_chapter_pause_for_input then 526 | mp.set_property_bool("pause", true) 527 | mp.osd_message(" ", 0.1) 528 | end 529 | end 530 | 531 | 532 | function add_chapter(timepos) 533 | if not timepos then timepos = mp.get_property_number("time-pos") end 534 | local chapter_list = mp.get_property_native("chapter-list") 535 | 536 | if #chapter_list > 0 then 537 | for i = 1, #chapter_list do 538 | if math.floor(chapter_list[i].time) == math.floor(timepos) then 539 | msg.debug("failed to add chapter, chapter exists in same position") 540 | return 541 | end 542 | end 543 | end 544 | 545 | local chapter_index = (mp.get_property_number("chapter") or -1) + 2 546 | 547 | table.insert(chapter_list, chapter_index, {title = "", time = timepos}) 548 | 549 | msg.debug("inserting new chapter at ", chapter_index, " chapter_", " time: ", timepos) 550 | 551 | mp.set_property_native("chapter-list", chapter_list) 552 | chapters_modified = true 553 | 554 | if o.add_chapter_ask_title then 555 | if not user_input_module then 556 | msg.error("no mpv-user-input, can't get user input, install: https://github.com/CogentRedTester/mpv-user-input") 557 | return 558 | end 559 | -- ask user for chapter title 560 | input.get_user_input(change_title_callback, { 561 | request_text = "title of the chapter:", 562 | default_input = o.placeholder_title .. chapter_index, 563 | cursor_pos = #(o.placeholder_title .. chapter_index) + 1, 564 | }, chapter_index) 565 | 566 | if o.add_chapter_pause_for_input then 567 | mp.set_property_bool("pause", true) 568 | -- FIXME: for whatever reason osd gets hidden when we pause the 569 | -- playback like that, workaround to make input prompt appear 570 | -- right away without requiring mouse or keyboard action 571 | mp.osd_message(" ", 0.1) 572 | end 573 | end 574 | end 575 | 576 | 577 | function remove_chapter() 578 | local chapter_count = mp.get_property_number("chapter-list/count") 579 | 580 | if chapter_count < 1 then 581 | msg.verbose("no chapters to remove") 582 | return 583 | end 584 | 585 | local chapter_list = mp.get_property_native("chapter-list") 586 | local current_chapter = mp.get_property_number("chapter") + 1 587 | 588 | table.remove(chapter_list, current_chapter) 589 | msg.debug("removing chapter", current_chapter) 590 | 591 | mp.set_property_native("chapter-list", chapter_list) 592 | chapters_modified = true 593 | end 594 | 595 | 596 | -- UTILITY FUNCTIONS ----------------------------------------------------------- 597 | 598 | 599 | function detect_os() 600 | if package.config:sub(1,1) == "/" then 601 | return "unix" 602 | else 603 | return "windows" 604 | end 605 | end 606 | 607 | 608 | -- for unix use only 609 | -- returns a table of command path and varargs, or nil if command was not found 610 | function command_exists(command, ...) 611 | msg.debug("looking for command:", command) 612 | -- msg.debug("args:", ) 613 | local process = mp.command_native({ 614 | name = "subprocess", 615 | capture_stdout = true, 616 | capture_stderr = true, 617 | playback_only = false, 618 | args = {"sh", "-c", "command -v -- " .. command} 619 | }) 620 | 621 | if process.status == 0 then 622 | local command_path = process.stdout:gsub("\n", "") 623 | msg.debug("command found:", command_path) 624 | return {command_path, ...} 625 | else 626 | msg.debug("command not found:", command) 627 | return nil 628 | end 629 | end 630 | 631 | function mkdir(path) 632 | local args = nil 633 | 634 | if detect_os() == "unix" then 635 | args = {"mkdir", "-p", "--", path} 636 | else 637 | args = {"powershell", "-NoProfile", "-Command", "mkdir", path} 638 | end 639 | 640 | local process = mp.command_native({ 641 | name = 'subprocess', 642 | playback_only = false, 643 | capture_stdout = true, 644 | capture_stderr = true, 645 | args = args, 646 | }) 647 | 648 | if process.status == 0 then 649 | msg.debug("mkdir success:", path) 650 | return true 651 | else 652 | msg.error("mkdir failure:", path) 653 | return false 654 | end 655 | end 656 | 657 | 658 | -- returns md5 hash of the full path of the current media file 659 | function hash() 660 | local path = mp.get_property("path") 661 | if path == nil then 662 | msg.debug("something is wrong with the path, can't get full_path, can't hash it") 663 | return 664 | end 665 | 666 | msg.debug("hashing:", path) 667 | 668 | local cmd = { 669 | name = 'subprocess', 670 | capture_stdout = true, 671 | playback_only = false, 672 | } 673 | local args = nil 674 | 675 | if detect_os() == "unix" then 676 | local md5 = command_exists("md5sum") or command_exists("md5") or command_exists("openssl", "md5 | cut -d ' ' -f 2") 677 | if md5 == nil then 678 | msg.warn("no md5 command found, can't generate hash") 679 | return 680 | end 681 | md5 = table.concat(md5, " ") 682 | cmd["stdin_data"] = path 683 | args = {"sh", "-c", md5 .. " | cut -d ' ' -f 1 | tr '[:lower:]' '[:upper:]'" } 684 | else --windows 685 | -- https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.utility/get-filehash?view=powershell-7.3 686 | local hash_command ="$s = [System.IO.MemoryStream]::new(); $w = [System.IO.StreamWriter]::new($s); $w.write(\"" .. path .. "\"); $w.Flush(); $s.Position = 0; Get-FileHash -Algorithm MD5 -InputStream $s | Select-Object -ExpandProperty Hash" 687 | args = {"powershell", "-NoProfile", "-Command", hash_command} 688 | end 689 | cmd["args"] = args 690 | msg.debug("hash cmd:", utils.to_string(cmd)) 691 | local process = mp.command_native(cmd) 692 | 693 | if process.status == 0 then 694 | local hash = process.stdout:gsub("%s+", "") 695 | msg.debug("hash:", hash) 696 | return hash 697 | else 698 | msg.warn("hash function failed") 699 | return 700 | end 701 | end 702 | 703 | function construct_ffmetadata() 704 | local path = mp.get_property("path") 705 | if path == nil then 706 | msg.debug("something is wrong with the path, can't get full_path") 707 | return 708 | end 709 | 710 | local chapter_count = mp.get_property_number("chapter-list/count") 711 | local all_chapters = mp.get_property_native("chapter-list") 712 | 713 | local ffmetadata = ";FFMETADATA1\n;file=" .. path 714 | 715 | for i, c in ipairs(all_chapters) do 716 | local c_title = c.title 717 | local c_start = c.time * 1000000000 718 | local c_end 719 | 720 | if i < chapter_count then 721 | c_end = all_chapters[i+1].time * 1000000000 722 | else 723 | c_end = (mp.get_property_number("duration") or c.time) * 1000000000 724 | end 725 | 726 | msg.debug(i, "c_title", c_title, "c_start:", c_start, "c_end", c_end) 727 | 728 | ffmetadata = ffmetadata .. "\n[CHAPTER]\nSTART=" .. c_start .. "\nEND=" .. c_end .. "\ntitle=" .. c_title 729 | end 730 | 731 | return ffmetadata 732 | end 733 | 734 | 735 | -- FILE IO --------------------------------------------------------------------- 736 | 737 | 738 | -- args: 739 | -- osd - if true, display an osd message 740 | -- force -- if true write chapters file even if there are no changes 741 | -- on success returns path of the chapters file, nil on failure 742 | function write_chapters(...) 743 | local chapters_count = (mp.get_property_number('chapters') or 0) 744 | local osd, force = ... 745 | if not force and not chapters_modified then 746 | msg.debug("nothing to write") 747 | return 748 | end 749 | if initial_chapter_count == 0 and chapters_count == 0 then return end 750 | 751 | 752 | -- figure out the directory 753 | local chapters_dir 754 | if o.global_chapters then 755 | local dir = utils.file_info(o.global_chapters_path) 756 | if dir then 757 | if dir.is_dir then 758 | msg.debug("o.global_chapters_path exists:", o.global_chapters_path) 759 | chapters_dir = o.global_chapters_path 760 | else 761 | msg.error("o.global_chapters_path is not a directory") 762 | return 763 | end 764 | else 765 | msg.verbose("o.global_chapters_path doesn't exists:", o.global_chapters_path) 766 | if mkdir(o.global_chapters_path) then 767 | chapters_dir = o.global_chapters_path 768 | else 769 | return 770 | end 771 | end 772 | else 773 | chapters_dir = utils.split_path(mp.get_property("path")) 774 | end 775 | 776 | -- and the name 777 | local name = mp.get_property("filename") 778 | if o.hash_global_chapters and o.global_chapters then 779 | name = hash() 780 | if name == nil then 781 | msg.warn("hash function failed, fallback to filename") 782 | name = mp.get_property("filename") 783 | end 784 | end 785 | 786 | local chapters_file_path = utils.join_path(chapters_dir, name .. ".ffmetadata") 787 | --1.09#HERE I SHOULD ADD SOME SORT OF DELETE FUNCTION IN CASE CHAPTER COUNT IS 0 788 | msg.debug("opening for writing:", chapters_file_path) 789 | local chapters_file = io.open(chapters_file_path, "w") 790 | if chapters_file == nil then 791 | msg.error("could not open chapter file for writing") 792 | return 793 | end 794 | 795 | local success, error = chapters_file:write(construct_ffmetadata()) 796 | chapters_file:close() 797 | 798 | if success then 799 | if osd then 800 | prompt_msg('Chapters written to:' .. chapters_file_path) 801 | end 802 | return chapters_file_path 803 | else 804 | msg.error("error writing chapters file:", error) 805 | return 806 | end 807 | end 808 | 809 | function load_chapters() 810 | local path = mp.get_property("path") 811 | local expected_chapters_file = utils.join_path(utils.split_path(path), mp.get_property("filename") .. ".ffmetadata") 812 | 813 | msg.debug("looking for:", expected_chapters_file) 814 | 815 | local file = utils.file_info(expected_chapters_file) 816 | 817 | if file then 818 | msg.debug("found in the local directory, loading..") 819 | mp.set_property("file-local-options/chapters-file", expected_chapters_file) 820 | chapter_state = 'external-chapters' 821 | return 822 | end 823 | 824 | if not o.global_chapters then 825 | msg.debug("not in local, global chapters not enabled, aborting search") 826 | return 827 | end 828 | 829 | msg.debug("looking in the global directory") 830 | 831 | if o.hash_global_chapters then 832 | local hashed_path = hash() 833 | if hashed_path then 834 | expected_chapters_file = utils.join_path(o.global_chapters_path, hashed_path .. ".ffmetadata") 835 | else 836 | msg.debug("hash function failed, fallback to path") 837 | expected_chapters_file = utils.join_path(o.global_chapters_path, mp.get_property("filename") .. ".ffmetadata") 838 | end 839 | else 840 | expected_chapters_file = utils.join_path(o.global_chapters_path, mp.get_property("filename") .. ".ffmetadata") 841 | end 842 | 843 | msg.debug("looking for:", expected_chapters_file) 844 | 845 | file = utils.file_info(expected_chapters_file) 846 | 847 | if file then 848 | msg.debug("found in the global directory, loading..") 849 | mp.set_property("file-local-options/chapters-file", expected_chapters_file) 850 | chapter_state = 'external-chapters' 851 | return 852 | end 853 | 854 | msg.debug("chapters file not found") 855 | end 856 | 857 | 858 | function bake_chapters() 859 | if mp.get_property_number("chapter-list/count") == 0 then 860 | msg.verbose("no chapters present") 861 | return 862 | end 863 | 864 | local chapters_file_path = write_chapters(false, true) 865 | if not chapters_file_path then 866 | msg.error("no chapters file") 867 | return 868 | end 869 | 870 | local filename = mp.get_property("filename") 871 | local output_name 872 | 873 | -- extract file extension 874 | local reverse_dot_index = filename:reverse():find(".", 1, true) 875 | if reverse_dot_index == nil then 876 | msg.warning("file has no extension, fallback to .mkv") 877 | output_name = filename .. ".chapters.mkv" 878 | else 879 | local dot_index = #filename + 1 - reverse_dot_index 880 | local ext = filename:sub(dot_index + 1) 881 | msg.debug("ext:", ext) 882 | if ext ~= "mkv" and ext ~= "mp4" and ext ~= "webm" then 883 | msg.debug("fallback to .mkv") 884 | ext = "mkv" 885 | end 886 | output_name = filename:sub(1, dot_index) .. "chapters." .. ext 887 | end 888 | 889 | local file_path = mp.get_property("path") 890 | local output_path = utils.join_path(utils.split_path(file_path), output_name) 891 | 892 | local args = {"ffmpeg", "-y", "-i", file_path, "-i", chapters_file_path, "-map_metadata", "1", "-codec", "copy", output_path} 893 | 894 | msg.debug("args:", utils.to_string(args)) 895 | 896 | local process = mp.command_native({ 897 | name = 'subprocess', 898 | playback_only = false, 899 | capture_stdout = true, 900 | capture_stderr = true, 901 | args = args 902 | }) 903 | 904 | if process.status == 0 then 905 | prompt_msg('file written to ' .. output_path) 906 | else 907 | msg.error("failed to write file:\n", process.stderr) 908 | end 909 | end 910 | 911 | --modified fork of autoload script-- 912 | function toggle_autoload() 913 | if autoload_playlist == true then 914 | prompt_msg('○ Auto-Load Disabled') 915 | autoload_playlist = false 916 | elseif autoload_playlist == false then 917 | prompt_msg('● Auto-Load Enabled') 918 | autoload_playlist = true 919 | end 920 | if autoload_playlist then find_and_add_entries() end 921 | end 922 | 923 | function Set (t) 924 | local set = {} 925 | for _, v in pairs(t) do set[v] = true end 926 | return set 927 | end 928 | 929 | function SetUnion (a,b) 930 | for k in pairs(b) do a[k] = true end 931 | return a 932 | end 933 | 934 | function Split (s) 935 | local set = {} 936 | for v in string.gmatch(s, '([^,]+)') do set[v] = true end 937 | return set 938 | end 939 | 940 | EXTENSIONS_VIDEO = Set { 941 | '3g2', '3gp', 'avi', 'flv', 'm2ts', 'm4v', 'mj2', 'mkv', 'mov', 942 | 'mp4', 'mpeg', 'mpg', 'ogv', 'rmvb', 'webm', 'wmv', 'y4m' 943 | } 944 | 945 | EXTENSIONS_AUDIO = Set { 946 | 'aiff', 'ape', 'au', 'flac', 'm4a', 'mka', 'mp3', 'oga', 'ogg', 947 | 'ogm', 'opus', 'wav', 'wma' 948 | } 949 | 950 | EXTENSIONS_IMAGES = Set { 951 | 'avif', 'bmp', 'gif', 'j2k', 'jp2', 'jpeg', 'jpg', 'jxl', 'png', 952 | 'svg', 'tga', 'tif', 'tiff', 'webp' 953 | } 954 | 955 | function split_option_exts(video, audio, image) 956 | if video then o.additional_video_exts = Split(o.additional_video_exts) end 957 | if audio then o.additional_audio_exts = Split(o.additional_audio_exts) end 958 | if image then o.additional_image_exts = Split(o.additional_image_exts) end 959 | end 960 | split_option_exts(true, true, true) 961 | 962 | function create_extensions() 963 | EXTENSIONS = {} 964 | if o.videos then SetUnion(SetUnion(EXTENSIONS, EXTENSIONS_VIDEO), o.additional_video_exts) end 965 | if o.audio then SetUnion(SetUnion(EXTENSIONS, EXTENSIONS_AUDIO), o.additional_audio_exts) end 966 | if o.images then SetUnion(SetUnion(EXTENSIONS, EXTENSIONS_IMAGES), o.additional_image_exts) end 967 | end 968 | create_extensions() 969 | 970 | function validate_directory_mode() 971 | if o.directory_mode ~= "recursive" and o.directory_mode ~= "lazy" and o.directory_mode ~= "ignore" then 972 | o.directory_mode = nil 973 | end 974 | end 975 | validate_directory_mode() 976 | 977 | function add_files(files) 978 | local oldcount = mp.get_property_number("playlist-count", 1) 979 | for i = 1, #files do 980 | mp.commandv("loadfile", files[i][1], "append") 981 | mp.commandv("playlist-move", oldcount + i - 1, files[i][2]) 982 | end 983 | end 984 | 985 | function get_extension(path) 986 | match = string.match(path, "%.([^%.]+)$" ) 987 | if match == nil then 988 | return "nomatch" 989 | else 990 | return match 991 | end 992 | end 993 | 994 | table.filter = function(t, iter) 995 | for i = #t, 1, -1 do 996 | if not iter(t[i]) then 997 | table.remove(t, i) 998 | end 999 | end 1000 | end 1001 | 1002 | table.append = function(t1, t2) 1003 | local t1_size = #t1 1004 | for i = 1, #t2 do 1005 | t1[t1_size + i] = t2[i] 1006 | end 1007 | end 1008 | 1009 | -- alphanum sorting for humans in Lua 1010 | -- http://notebook.kulchenko.com/algorithms/alphanumeric-natural-sorting-for-humans-in-lua 1011 | 1012 | function alphanumsort(filenames) 1013 | local function padnum(n, d) 1014 | return #d > 0 and ("%03d%s%.12f"):format(#n, n, tonumber(d) / (10 ^ #d)) 1015 | or ("%03d%s"):format(#n, n) 1016 | end 1017 | 1018 | local tuples = {} 1019 | for i, f in ipairs(filenames) do 1020 | tuples[i] = {f:lower():gsub("0*(%d+)%.?(%d*)", padnum), f} 1021 | end 1022 | table.sort(tuples, function(a, b) 1023 | return a[1] == b[1] and #b[2] < #a[2] or a[1] < b[1] 1024 | end) 1025 | for i, tuple in ipairs(tuples) do filenames[i] = tuple[2] end 1026 | return filenames 1027 | end 1028 | 1029 | local autoloaded = nil 1030 | local added_entries = {} 1031 | local autoloaded_dir = nil 1032 | 1033 | function scan_dir(path, current_file, dir_mode, separator, dir_depth, total_files, extensions) 1034 | if dir_depth == o.autoload_max_dir_stack then 1035 | return 1036 | end 1037 | msg.trace("scanning: " .. path) 1038 | local files = utils.readdir(path, "files") or {} 1039 | local dirs = dir_mode ~= "ignore" and utils.readdir(path, "dirs") or {} 1040 | local prefix = path == "." and "" or path 1041 | table.filter(files, function (v) 1042 | -- The current file could be a hidden file, ignoring it doesn't load other 1043 | -- files from the current directory. 1044 | if (o.ignore_hidden and not (prefix .. v == current_file) and string.match(v, "^%.")) then 1045 | return false 1046 | end 1047 | local ext = get_extension(v) 1048 | if ext == nil then 1049 | return false 1050 | end 1051 | return extensions[string.lower(ext)] 1052 | end) 1053 | table.filter(dirs, function(d) 1054 | return not ((o.ignore_hidden and string.match(d, "^%."))) 1055 | end) 1056 | alphanumsort(files) 1057 | alphanumsort(dirs) 1058 | 1059 | for i, file in ipairs(files) do 1060 | files[i] = prefix .. file 1061 | end 1062 | 1063 | table.append(total_files, files) 1064 | if dir_mode == "recursive" then 1065 | for _, dir in ipairs(dirs) do 1066 | scan_dir(prefix .. dir .. separator, current_file, dir_mode, 1067 | separator, dir_depth + 1, total_files, extensions) 1068 | end 1069 | else 1070 | for i, dir in ipairs(dirs) do 1071 | dirs[i] = prefix .. dir 1072 | end 1073 | table.append(total_files, dirs) 1074 | end 1075 | end 1076 | 1077 | function find_and_add_entries() 1078 | local path = mp.get_property("path", "") 1079 | local dir, filename = utils.split_path(path) 1080 | msg.trace(("dir: %s, filename: %s"):format(dir, filename)) 1081 | if not autoload_playlist then 1082 | msg.verbose("stopping: autoload_playlist is disabled") 1083 | return 1084 | elseif #dir == 0 then 1085 | msg.verbose("stopping: not a local path") 1086 | return 1087 | end 1088 | 1089 | local pl_count = mp.get_property_number("playlist-count", 1) 1090 | this_ext = get_extension(filename) 1091 | -- check if this is a manually made playlist 1092 | if (pl_count > 1 and autoloaded == nil) or 1093 | (pl_count == 1 and EXTENSIONS[string.lower(this_ext)] == nil) then 1094 | msg.verbose("stopping: manually made playlist") 1095 | return 1096 | else 1097 | if pl_count == 1 then 1098 | autoloaded = true 1099 | autoloaded_dir = dir 1100 | added_entries = {} 1101 | end 1102 | end 1103 | 1104 | local extensions = {} 1105 | if o.same_type then 1106 | if EXTENSIONS_VIDEO[string.lower(this_ext)] ~= nil then 1107 | extensions = EXTENSIONS_VIDEO 1108 | elseif EXTENSIONS_AUDIO[string.lower(this_ext)] ~= nil then 1109 | extensions = EXTENSIONS_AUDIO 1110 | else 1111 | extensions = EXTENSIONS_IMAGES 1112 | end 1113 | else 1114 | extensions = EXTENSIONS 1115 | end 1116 | 1117 | local pl = mp.get_property_native("playlist", {}) 1118 | local pl_current = mp.get_property_number("playlist-pos-1", 1) 1119 | msg.trace(("playlist-pos-1: %s, playlist: %s"):format(pl_current, 1120 | utils.to_string(pl))) 1121 | 1122 | local files = {} 1123 | do 1124 | local dir_mode = o.directory_mode or mp.get_property("directory-mode", "lazy") 1125 | local separator = mp.get_property_native("platform") == "windows" and "\\" or "/" 1126 | scan_dir(autoloaded_dir, path, dir_mode, separator, 0, files, extensions) 1127 | end 1128 | 1129 | if next(files) == nil then 1130 | msg.verbose("no other files or directories in directory") 1131 | return 1132 | end 1133 | 1134 | -- Find the current pl entry (dir+"/"+filename) in the sorted dir list 1135 | local current 1136 | for i = 1, #files do 1137 | if files[i] == path then 1138 | current = i 1139 | break 1140 | end 1141 | end 1142 | if current == nil then 1143 | return 1144 | end 1145 | msg.trace("current file position in files: "..current) 1146 | 1147 | -- treat already existing playlist entries, independent of how they got added 1148 | -- as if they got added by autoload 1149 | for _, entry in ipairs(pl) do 1150 | added_entries[entry.filename] = true 1151 | end 1152 | 1153 | local append = {[-1] = {}, [1] = {}} 1154 | for direction = -1, 1, 2 do -- 2 iterations, with direction = -1 and +1 1155 | for i = 1, o.autoload_max_entries do 1156 | local pos = current + i * direction 1157 | local file = files[pos] 1158 | if file == nil or file[1] == "." then 1159 | break 1160 | end 1161 | 1162 | -- skip files that are/were already in the playlist 1163 | if not added_entries[file] then 1164 | if direction == -1 then 1165 | msg.info("Prepending " .. file) 1166 | table.insert(append[-1], 1, {file, pl_current + i * direction + 1}) 1167 | else 1168 | msg.info("Adding " .. file) 1169 | if pl_count > 1 then 1170 | table.insert(append[1], {file, pl_current + i * direction - 1}) 1171 | else 1172 | mp.commandv("loadfile", file, "append") 1173 | end 1174 | end 1175 | end 1176 | added_entries[file] = true 1177 | end 1178 | if pl_count == 1 and direction == -1 and #append[-1] > 0 then 1179 | for i = 1, #append[-1] do 1180 | mp.commandv("loadfile", append[-1][i][1], "append") 1181 | end 1182 | mp.commandv("playlist-move", 0, current) 1183 | end 1184 | end 1185 | 1186 | if pl_count > 1 then 1187 | add_files(append[1]) 1188 | add_files(append[-1]) 1189 | end 1190 | end 1191 | 1192 | --modified fork of chapterskip.lua-- 1193 | 1194 | function matches(i, title) 1195 | local opt_skip = o.skip 1196 | if type(o.skip) == 'table' then 1197 | for i=1, #o.skip do 1198 | if o.skip[i] and o.skip[i][1] == chapter_state then 1199 | opt_skip = o.skip[i][2] 1200 | break 1201 | end 1202 | end 1203 | end 1204 | 1205 | for category in string.gmatch(opt_skip, " *([^;]*[^; ]) *") do 1206 | if categories[category:lower()] then 1207 | if category:lower() == "idx-" or category:lower() == "toggle_idx" then 1208 | for pattern in string.gmatch(categories[category:lower()], "([^/]+)") do 1209 | if tonumber(pattern) == i then 1210 | return true 1211 | end 1212 | end 1213 | else 1214 | if title then 1215 | for pattern in string.gmatch(categories[category:lower()], "([^/]+)") do 1216 | if string.match(title, pattern) then 1217 | return true 1218 | end 1219 | end 1220 | end 1221 | end 1222 | end 1223 | end 1224 | end 1225 | 1226 | local skipped = {} 1227 | local parsed = {} 1228 | 1229 | function prep_chapterskip_var() 1230 | if chapter_state == 'no-chapters' then return end 1231 | g_opt_categories = o.categories 1232 | 1233 | g_opt_skip_once = false 1234 | if o.skip_once == true or o.skip_once == false then 1235 | g_opt_skip_once = o.skip_once 1236 | elseif has_value(o.skip_once, chapter_state) then 1237 | g_opt_skip_once = true; 1238 | end 1239 | 1240 | if type(o.categories) == 'table' then 1241 | for i=1, #o.categories do 1242 | if o.categories[i] and o.categories[i][1] == chapter_state then 1243 | g_opt_categories = o.categories[i][2] 1244 | break 1245 | end 1246 | end 1247 | end 1248 | 1249 | for category in string.gmatch(g_opt_categories, "([^;]+)") do 1250 | local name, patterns = string.match(category, " *([^+>]*[^+> ]) *[+>](.*)") 1251 | if name then 1252 | categories[name:lower()] = patterns 1253 | elseif not parsed[category] then 1254 | mp.msg.warn("Improper category definition: " .. category) 1255 | end 1256 | parsed[category] = true 1257 | end 1258 | end 1259 | 1260 | function start_chapterskip_countdown(text, duration) 1261 | g_autoskip_countdown_flag = true 1262 | g_autoskip_countdown = g_autoskip_countdown - 1 1263 | 1264 | if o.autoskip_countdown_graceful and (g_autoskip_countdown <= 0) then kill_chapterskip_countdown(); mp.osd_message('',0) return end 1265 | 1266 | if (g_autoskip_countdown < 0) then kill_chapterskip_countdown(); mp.osd_message('',0) return end 1267 | 1268 | text = text:gsub("%%countdown%%", g_autoskip_countdown) 1269 | prompt_msg(text, 2000) 1270 | end 1271 | 1272 | function kill_chapterskip_countdown(action) 1273 | if not g_autoskip_countdown_flag then return end 1274 | if action == 'osd' and o.autoskip_osd ~= 'no-osd' then 1275 | prompt_msg('○ Auto-Skip Cancelled') 1276 | end 1277 | if g_autoskip_timer ~= nil then 1278 | g_autoskip_timer:kill() 1279 | end 1280 | unbind_keys(o.cancel_autoskip_countdown_keybind, 'cancel-autoskip-countdown') 1281 | unbind_keys(o.proceed_autoskip_countdown_keybind, 'proceed-autoskip-countdown') 1282 | g_autoskip_countdown = o.autoskip_countdown 1283 | g_autoskip_countdown_flag = false 1284 | end 1285 | 1286 | function chapterskip(_, current, countdown) 1287 | if chapter_state == 'no-chapters' then return end 1288 | if not autoskip_chapter then return end 1289 | if g_autoskip_countdown_flag then kill_chapterskip_countdown('osd') end 1290 | if not countdown then countdown = o.autoskip_countdown end 1291 | 1292 | local chapters = mp.get_property_native("chapter-list") 1293 | local skip = false 1294 | local consecutive_i = 0 1295 | 1296 | for i=0, #chapters do 1297 | if (not g_opt_skip_once or not skipped[i]) and i == 0 and chapters[i+1] and matches(i, chapters[i+1].title) then 1298 | if i == current + 1 or skip == i - 1 then 1299 | if skip then 1300 | skipped[skip] = true 1301 | end 1302 | skip = i 1303 | consecutive_i = consecutive_i+1 1304 | end 1305 | elseif (not g_opt_skip_once or not skipped[i]) and chapters[i] and matches(i, chapters[i].title) then 1306 | if i == current + 1 or skip == i - 1 then 1307 | if skip then 1308 | skipped[skip] = true 1309 | end 1310 | skip = i 1311 | consecutive_i = consecutive_i+1 1312 | end 1313 | elseif skip and countdown <= 0 then 1314 | mp.set_property('osd-duration', o.osd_duration) 1315 | mp.commandv(autoskip_osd, "show-progress") 1316 | mp.add_timeout(0.07, function () mp.set_property('osd-duration', osd_duration_default) end) 1317 | 1318 | if o.autoskip_osd == 'osd-msg-bar' or o.autoskip_osd == 'osd-msg' then 1319 | if consecutive_i > 1 then 1320 | local autoskip_osd_string = '' 1321 | for j=consecutive_i, 1, -1 do 1322 | local chapter_title = '' 1323 | if chapters[i-j] then chapter_title = chapters[i-j].title end 1324 | autoskip_osd_string=(autoskip_osd_string..'\n ➤ Chapter ('..i-j..') '..chapter_title) 1325 | end 1326 | prompt_msg('● Auto-Skip'..autoskip_osd_string) 1327 | else 1328 | prompt_msg('➤ Auto-Skip: Chapter '.. mp.command_native({'expand-text', '${chapter}'})) 1329 | end 1330 | end 1331 | mp.set_property("time-pos", chapters[i].time) 1332 | skipped[skip] = true 1333 | return 1334 | elseif skip and countdown > 0 then 1335 | g_autoskip_countdown_flag = true 1336 | bind_keys(o.cancel_autoskip_countdown_keybind, "cancel-autoskip-countdown", function() kill_chapterskip_countdown('osd') return end) 1337 | 1338 | local autoskip_osd_string = '' 1339 | local autoskip_graceful_osd = '' 1340 | if o.autoskip_countdown_graceful then autoskip_graceful_osd = 'Press Keybind to:\n' end 1341 | if o.autoskip_osd == 'osd-msg-bar' or o.autoskip_osd == 'osd-msg' then 1342 | if consecutive_i > 1 and o.autoskip_countdown_bulk then 1343 | local autoskip_osd_string = '' 1344 | for j=consecutive_i, 1, -1 do 1345 | local chapter_title = '' 1346 | if chapters[i-j] then chapter_title = chapters[i-j].title end 1347 | autoskip_osd_string=(autoskip_osd_string..'\n ▷ Chapter ('..i-j..') '..chapter_title) 1348 | end 1349 | prompt_msg(autoskip_graceful_osd..'○ Auto-Skip'..' in "'..o.autoskip_countdown..'"'..autoskip_osd_string, 2000) 1350 | g_autoskip_timer = mp.add_periodic_timer(1, function () 1351 | start_chapterskip_countdown(autoskip_graceful_osd..'○ Auto-Skip'..' in "%countdown%"'..autoskip_osd_string, 2000) 1352 | end) 1353 | else 1354 | prompt_msg(autoskip_graceful_osd..'▷ Auto-Skip in "'..o.autoskip_countdown..'": Chapter '.. mp.command_native({'expand-text', '${chapter}'}), 2000) 1355 | g_autoskip_timer = mp.add_periodic_timer(1, function () 1356 | start_chapterskip_countdown(autoskip_graceful_osd..'▷ Auto-Skip in "%countdown%": Chapter '.. mp.command_native({'expand-text', '${chapter}'}), 2000) 1357 | end) 1358 | end 1359 | end 1360 | function proceed_autoskip(force) 1361 | if not g_autoskip_countdown_flag then kill_chapterskip_countdown() return end 1362 | if g_autoskip_countdown > 1 and not force then return end 1363 | 1364 | mp.set_property('osd-duration', o.osd_duration) 1365 | mp.commandv(autoskip_osd, "show-progress") 1366 | mp.add_timeout(0.07, function () mp.set_property('osd-duration', osd_duration_default) end) 1367 | if o.autoskip_osd == 'osd-msg-bar' or o.autoskip_osd == 'osd-msg' then 1368 | if consecutive_i > 1 and o.autoskip_countdown_bulk then 1369 | local autoskip_osd_string = '' 1370 | for j=consecutive_i, 1, -1 do 1371 | local chapter_title = '' 1372 | if chapters[i-j] then chapter_title = chapters[i-j].title end 1373 | autoskip_osd_string=(autoskip_osd_string..'\n ➤ Chapter ('..i-j..') '..chapter_title) 1374 | end 1375 | prompt_msg('● Auto-Skip'..autoskip_osd_string) 1376 | else 1377 | prompt_msg('➤ Auto-Skip: Chapter '.. mp.command_native({'expand-text', '${chapter}'})) 1378 | end 1379 | end 1380 | if consecutive_i > 1 and o.autoskip_countdown_bulk then 1381 | mp.set_property("time-pos", chapters[i].time) 1382 | else 1383 | mp.commandv('no-osd', 'add', 'chapter', 1) 1384 | end 1385 | skipped[skip] = true 1386 | kill_chapterskip_countdown() 1387 | end 1388 | bind_keys(o.proceed_autoskip_countdown_keybind, "proceed-autoskip-countdown", function() proceed_autoskip(true) return end) 1389 | if o.autoskip_countdown_graceful then return end 1390 | mp.add_timeout(countdown, proceed_autoskip) 1391 | return 1392 | end 1393 | end 1394 | if skip and countdown <= 0 then 1395 | if mp.get_property_native("playlist-count") == mp.get_property_native("playlist-pos-1") then 1396 | return mp.set_property("time-pos", mp.get_property_native("duration")) 1397 | end 1398 | mp.commandv("playlist-next") 1399 | if o.autoskip_osd ~= 'no-osd' then autoskip_playlist_osd = true end 1400 | elseif skip and countdown > 0 then 1401 | g_autoskip_countdown_flag = true 1402 | bind_keys(o.cancel_autoskip_countdown_keybind, "cancel-autoskip-countdown", function() kill_chapterskip_countdown('osd') return end) 1403 | 1404 | if o.autoskip_osd == 'osd-msg-bar' or o.autoskip_osd == 'osd-msg' then 1405 | local autoskip_graceful_osd = '' 1406 | if o.autoskip_countdown_graceful then autoskip_graceful_osd = 'Press Keybind to:\n' end 1407 | if consecutive_i > 1 and o.autoskip_countdown_bulk then 1408 | local i = (mp.get_property_number('chapters')+1 or 0) 1409 | local autoskip_osd_string = '' 1410 | for j=consecutive_i, 1, -1 do 1411 | local chapter_title = '' 1412 | if chapters[i-j] then chapter_title = chapters[i-j].title end 1413 | autoskip_osd_string=(autoskip_osd_string..'\n ▷ Chapter ('..i-j..') '..chapter_title) 1414 | end 1415 | prompt_msg(autoskip_graceful_osd..'○ Auto-Skip'..' in "'..o.autoskip_countdown..'"'..autoskip_osd_string, 2000) 1416 | g_autoskip_timer = mp.add_periodic_timer(1, function () 1417 | start_chapterskip_countdown(autoskip_graceful_osd..'○ Auto-Skip'..' in "%countdown%"'..autoskip_osd_string, 2000) 1418 | end) 1419 | else 1420 | prompt_msg(autoskip_graceful_osd..'▷ Auto-Skip in "'..o.autoskip_countdown..'": Chapter '.. mp.command_native({'expand-text', '${chapter}'}), 2000) 1421 | g_autoskip_timer = mp.add_periodic_timer(1, function () 1422 | start_chapterskip_countdown(autoskip_graceful_osd..'▷ Auto-Skip in "%countdown%": Chapter '.. mp.command_native({'expand-text', '${chapter}'}), 2000) 1423 | end) 1424 | end 1425 | end 1426 | function proceed_autoskip(force) 1427 | if not g_autoskip_countdown_flag then return end 1428 | if g_autoskip_countdown > 1 and not force then return end 1429 | 1430 | mp.set_property('osd-duration', o.osd_duration) 1431 | mp.commandv(autoskip_osd, "show-progress") 1432 | mp.add_timeout(0.07, function () mp.set_property('osd-duration', osd_duration_default) end) 1433 | if consecutive_i > 1 and o.autoskip_countdown_bulk then 1434 | if mp.get_property_native("playlist-count") == mp.get_property_native("playlist-pos-1") then 1435 | return mp.set_property("time-pos", mp.get_property_native("duration")) 1436 | end 1437 | mp.commandv("playlist-next") 1438 | else 1439 | local current_chapter = (mp.get_property_number("chapter") + 1 or 0) 1440 | local chapters_count = (mp.get_property_number('chapters') or 0) 1441 | 1442 | if current_chapter == chapters_count then 1443 | if mp.get_property_native("playlist-count") == mp.get_property_native("playlist-pos-1") then 1444 | return mp.set_property("time-pos", mp.get_property_native("duration")) 1445 | end 1446 | mp.commandv("playlist-next") 1447 | else 1448 | mp.commandv('no-osd', 'add', 'chapter', 1) 1449 | end 1450 | end 1451 | if o.autoskip_osd ~= 'no-osd' then autoskip_playlist_osd = true end 1452 | kill_chapterskip_countdown() 1453 | end 1454 | bind_keys(o.proceed_autoskip_countdown_keybind, "proceed-autoskip-countdown", function() proceed_autoskip(true) return end) 1455 | if o.autoskip_countdown_graceful then return end 1456 | mp.add_timeout(countdown, proceed_autoskip) 1457 | end 1458 | end 1459 | 1460 | function toggle_autoskip() 1461 | if autoskip_chapter == true then 1462 | prompt_msg('○ Auto-Skip Disabled') 1463 | autoskip_chapter = false 1464 | if g_autoskip_countdown_flag then kill_chapterskip_countdown() end 1465 | elseif autoskip_chapter == false then 1466 | prompt_msg('● Auto-Skip Enabled') 1467 | autoskip_chapter = true 1468 | end 1469 | end 1470 | 1471 | function toggle_category_autoskip() 1472 | if chapter_state == 'no-chapters' then return end 1473 | if not mp.get_property_number("chapter") then return end 1474 | local chapters = mp.get_property_native("chapter-list") 1475 | local current_chapter = (mp.get_property_number("chapter") + 1 or 0) 1476 | 1477 | local chapter_title = tostring(current_chapter) 1478 | if current_chapter > 0 and chapters[current_chapter].title and chapters[current_chapter].title ~= '' then 1479 | chapter_title = chapters[current_chapter].title 1480 | end 1481 | 1482 | local found_i = 0 1483 | if matches(current_chapter, chapter_title) then 1484 | for category in string.gmatch(g_opt_categories, "([^;]+)") do 1485 | local name, patterns = string.match(category, " *([^+>]*[^+> ]) *[+>](.*)") 1486 | 1487 | for pattern in string.gmatch(patterns, "([^/]+)") do 1488 | if string.match(chapter_title:lower(), pattern:lower()) then 1489 | g_opt_categories = g_opt_categories:gsub(esc_string(pattern)..'/?', "") 1490 | found_i = found_i + 1 1491 | end 1492 | end 1493 | end 1494 | 1495 | for category in string.gmatch(g_opt_categories, "([^;]+)") do 1496 | local name, patterns = string.match(category, " *([^+>]*[^+> ]) *[+>](.*)") 1497 | if name then 1498 | categories[name:lower()] = patterns 1499 | elseif not parsed[category] then 1500 | mp.msg.warn("Improper category definition: " .. category) 1501 | end 1502 | parsed[category] = true 1503 | end 1504 | 1505 | if type(o.categories) == 'table' then 1506 | for i=1, #o.categories do 1507 | if o.categories[i] and o.categories[i][1] == chapter_state then 1508 | o.categories[i][2] = g_opt_categories 1509 | break 1510 | end 1511 | end 1512 | else 1513 | o.categories = g_opt_categories 1514 | end 1515 | end 1516 | if current_chapter > 0 and chapters[current_chapter].title and chapters[current_chapter].title ~= '' then 1517 | if found_i > 0 or string.match(categories.toggle, esc_string(chapter_title)) then 1518 | prompt_msg('○ Removed from Auto-Skip\n ▷ Chapter: '..chapter_title) 1519 | categories.toggle = categories.toggle:gsub(esc_string("^"..chapter_title.."/"), "") 1520 | if g_autoskip_countdown_flag then kill_chapterskip_countdown() end 1521 | else 1522 | prompt_msg('● Added to Auto-Skip\n ➔ Chapter: '..chapter_title) 1523 | categories.toggle = categories.toggle.."^"..chapter_title.."/" 1524 | end 1525 | else 1526 | if found_i > 0 or string.match(categories.toggle_idx, esc_string(chapter_title)) then 1527 | prompt_msg('○ Removed from Auto-Skip\n ▷ Chapter: '..chapter_title) 1528 | categories.toggle_idx = categories.toggle_idx:gsub(esc_string(chapter_title.."/"), "") 1529 | if g_autoskip_countdown_flag then kill_chapterskip_countdown() end 1530 | else 1531 | prompt_msg('● Added to Auto-Skip\n ➔ Chapter: '..chapter_title) 1532 | categories.toggle_idx = categories.toggle_idx..chapter_title.."/" 1533 | end 1534 | end 1535 | end 1536 | 1537 | -- HOOKS -------------------------------------------------------------------- 1538 | if user_input_module then mp.add_hook("on_unload", 50, function () input.cancel_user_input() end) end -- chapters.lua 1539 | mp.register_event("start-file", find_and_add_entries) -- autoload.lua 1540 | mp.observe_property("chapter", "number", chapterskip) -- chapterskip.lua 1541 | 1542 | -- smart skip events / properties / hooks -- 1543 | 1544 | mp.register_event('file-loaded', function() 1545 | file_length = (mp.get_property_native('duration') or 0) 1546 | if o.playlist_osd and g_playlist_pos > 0 then playlist_osd = true end 1547 | if playlist_osd and not autoskip_playlist_osd then 1548 | prompt_msg('['..mp.command_native({'expand-text', '${playlist-pos-1}'})..'/'..mp.command_native({'expand-text', '${playlist-count}'})..'] '..mp.command_native({'expand-text', '${filename}'})) 1549 | end 1550 | if autoskip_playlist_osd then 1551 | prompt_msg('➤ Auto-Skip\n['..mp.command_native({'expand-text', '${playlist-pos-1}'})..'/'..mp.command_native({'expand-text', '${playlist-count}'})..'] '..mp.command_native({'expand-text', '${filename}'})) 1552 | end 1553 | playlist_osd = false 1554 | autoskip_playlist_osd = false 1555 | force_silence_skip = false 1556 | skipped = {} 1557 | initial_chapter_count = mp.get_property_number("chapter-list/count") 1558 | if initial_chapter_count > 0 and chapter_state ~= 'external-chapters' then chapter_state = 'internal-chapters' end 1559 | prep_chapterskip_var() 1560 | end) 1561 | 1562 | mp.add_hook("on_load", 50, function() 1563 | if o.external_chapters_autoload then load_chapters() end 1564 | end) 1565 | 1566 | mp.observe_property('pause', 'bool', function(name, value) 1567 | if value and skip_flag then 1568 | restoreProp(initial_skip_time, true) 1569 | end 1570 | if g_autoskip_countdown_flag then kill_chapterskip_countdown('osd') end 1571 | end) 1572 | 1573 | mp.add_hook('on_unload', 9, function() 1574 | if o.modified_chapters_autosave == true or has_value(o.modified_chapters_autosave, chapter_state) then write_chapters(false) end 1575 | mp.set_property("keep-open", keep_open_state) 1576 | chapter_state = 'no-chapters' 1577 | g_playlist_pos = (mp.get_property_native('playlist-playing-pos')+1 or 0) 1578 | kill_chapterskip_countdown() 1579 | end) 1580 | 1581 | mp.register_event('seek', function() 1582 | if g_autoskip_countdown_flag then kill_chapterskip_countdown('osd') end 1583 | end) 1584 | 1585 | mp.observe_property('eof-reached', 'bool', eofHandler) 1586 | 1587 | -- BINDINGS -------------------------------------------------------------------- 1588 | 1589 | bind_keys(o.toggle_autoload_keybind, 'toggle-autoload', toggle_autoload) 1590 | bind_keys(o.toggle_autoskip_keybind, "toggle-autoskip", toggle_autoskip) 1591 | bind_keys(o.toggle_category_autoskip_keybind, "toggle-category-autoskip", toggle_category_autoskip) 1592 | bind_keys(o.add_chapter_keybind, "add-chapter", add_chapter) 1593 | bind_keys(o.remove_chapter_keybind, "remove-chapter", remove_chapter) 1594 | bind_keys(o.write_chapters_keybind, "write-chapters", function () write_chapters(true) end) 1595 | bind_keys(o.edit_chapter_keybind, "edit-chapter", edit_chapter) 1596 | bind_keys(o.bake_chapters_keybind, "bake-chapters", bake_chapters) 1597 | bind_keys(o.chapter_prev_keybind, "chapter-prev", function() chapterSeek(-1) end) 1598 | bind_keys(o.chapter_next_keybind, "chapter-next", function() chapterSeek(1) end) 1599 | bind_keys(o.smart_prev_keybind, "smart-prev", smartPrev) 1600 | bind_keys(o.smart_next_keybind, "smart-next", smartNext) 1601 | bind_keys(o.silence_skip_keybind, "silence-skip", silenceSkip) 1602 | -------------------------------------------------------------------------------- /portable_config/vs/MEMC_RIFE_NV_HQ.vpy: -------------------------------------------------------------------------------- 1 | ### https://github.com/hooke007/MPV_lazy/wiki/3_K7sfunc 2 | ### RIFE (v4+) Frame interpolation, high performance requirement, NVIDIA exclusive 3 | 4 | import vapoursynth as vs 5 | from vapoursynth import core 6 | import k7sfunc as k7f 7 | 8 | clip = video_in 9 | 10 | ############ 11 | # User options # 12 | ############ 13 | 14 | H_Pre = 1080 ## Integer, pre-downscale source height (fill in your display height) 15 | Lt_D2k = False ## Whether to interpolate frames for sources exceeding DCI2K resolution 16 | Model = 452 ## <450|451|452> Model used. 450 is rife 4.6, 451 is rife 4.15 lite, 452 is rife 4.18 17 | Ext_Proc = False ## Whether to use external padding/cropping processing 18 | T_Tta = False ## Whether to use ensemble model 19 | Fps_Num = 2 ## Integer, Fps_Num/Fps_Den value, i.e., frame rate multiplier 20 | Fps_Den = 1 ## Integer 21 | Sc_Mode = 1 ## <0|1|2> Scene change detection mode, 0 to disable 22 | Gpu = 0 ## GPU index used, 0 is the first in order 23 | Gpu_T = 2 ## <1|2|3> Number of GPU threads 24 | St_Eng = False ## Whether to use static engine (requires preprocessing for different resolutions); dynamic engine adapts to different resolutions (QVGA→DCI4K) 25 | Ws_Size = 0 ## Integer, constrain VRAM (MiB), minimum for static engine is 128 (dynamic engine automatically doubles), set below this value to maximize utilization 26 | Lk_Fmt = False ## Lock pixel format to yuv420p8 27 | 28 | ret = k7f.FPS_CTRL(clip, fps_in=container_fps, fps_ret=True) 29 | clip = k7f.FMT_CTRL(clip, h_max=H_Pre, fmt_pix=1 if Lk_Fmt else 0) 30 | clip = k7f.RIFE_NV(clip, lt_d2k=Lt_D2k, model=Model, ext_proc=Ext_Proc, t_tta=T_Tta, fps_in=container_fps, fps_num=Fps_Num, fps_den=Fps_Den, sc_mode=Sc_Mode, gpu=Gpu, gpu_t=Gpu_T, st_eng=St_Eng, ws_size=Ws_Size) 31 | 32 | clip.set_output() 33 | -------------------------------------------------------------------------------- /portable_config/vs/MEMC_RIFE_NV_LQ.vpy: -------------------------------------------------------------------------------- 1 | ### https://github.com/hooke007/MPV_lazy/wiki/3_K7sfunc 2 | ### RIFE (v4+) Frame interpolation, high performance requirement, NVIDIA exclusive 3 | 4 | import vapoursynth as vs 5 | from vapoursynth import core 6 | import k7sfunc as k7f 7 | 8 | clip = video_in 9 | 10 | ############ 11 | # User options # 12 | ############ 13 | 14 | H_Pre = 1080 ## Integer, pre-downscale source height (fill in your display height) 15 | Lt_D2k = False ## Whether to interpolate frames for sources exceeding DCI2K resolution 16 | Model = 450 ## <450|451|452> Model used. 450 is rife 4.6, 451 is rife 4.15 lite, 452 is rife 4.18 17 | Ext_Proc = False ## Whether to use external padding/cropping processing 18 | T_Tta = False ## Whether to use ensemble model 19 | Fps_Num = 2 ## Integer, Fps_Num/Fps_Den value, i.e., frame rate multiplier 20 | Fps_Den = 1 ## Integer 21 | Sc_Mode = 1 ## <0|1|2> Scene change detection mode, 0 to disable 22 | Gpu = 0 ## GPU index used, 0 is the first in order 23 | Gpu_T = 2 ## <1|2|3> Number of GPU threads 24 | St_Eng = False ## Whether to use static engine (requires preprocessing for different resolutions); dynamic engine adapts to different resolutions (QVGA→DCI4K) 25 | Ws_Size = 0 ## Integer, constrain VRAM (MiB), minimum for static engine is 128 (dynamic engine automatically doubles), set below this value to maximize utilization 26 | Lk_Fmt = False ## Lock pixel format to yuv420p8 27 | 28 | ret = k7f.FPS_CTRL(clip, fps_in=container_fps, fps_ret=True) 29 | clip = k7f.FMT_CTRL(clip, h_max=H_Pre, fmt_pix=1 if Lk_Fmt else 0) 30 | clip = k7f.RIFE_NV(clip, lt_d2k=Lt_D2k, model=Model, ext_proc=Ext_Proc, t_tta=T_Tta, fps_in=container_fps, fps_num=Fps_Num, fps_den=Fps_Den, sc_mode=Sc_Mode, gpu=Gpu, gpu_t=Gpu_T, st_eng=St_Eng, ws_size=Ws_Size) 31 | 32 | clip.set_output() 33 | -------------------------------------------------------------------------------- /portable_config/vs/MEMC_RIFE_NV_MQ.vpy: -------------------------------------------------------------------------------- 1 | ### https://github.com/hooke007/MPV_lazy/wiki/3_K7sfunc 2 | ### RIFE (v4+) Frame interpolation, high performance requirement, NVIDIA exclusive 3 | 4 | import vapoursynth as vs 5 | from vapoursynth import core 6 | import k7sfunc as k7f 7 | 8 | clip = video_in 9 | 10 | ############ 11 | # User options # 12 | ############ 13 | 14 | H_Pre = 1080 ## Integer, pre-downscale source height (fill in your display height) 15 | Lt_D2k = False ## Whether to interpolate frames for sources exceeding DCI2K resolution 16 | Model = 451 ## <450|451|452> Model used. 450 is rife 4.6, 451 is rife 4.15 lite, 452 is rife 4.18 17 | Ext_Proc = False ## Whether to use external padding/cropping processing 18 | T_Tta = False ## Whether to use ensemble model 19 | Fps_Num = 2 ## Integer, Fps_Num/Fps_Den value, i.e., frame rate multiplier 20 | Fps_Den = 1 ## Integer 21 | Sc_Mode = 1 ## <0|1|2> Scene change detection mode, 0 to disable 22 | Gpu = 0 ## GPU index used, 0 is the first in order 23 | Gpu_T = 2 ## <1|2|3> Number of GPU threads 24 | St_Eng = False ## Whether to use static engine (requires preprocessing for different resolutions); dynamic engine adapts to different resolutions (QVGA→DCI4K) 25 | Ws_Size = 0 ## Integer, constrain VRAM (MiB), minimum for static engine is 128 (dynamic engine automatically doubles), set below this value to maximize utilization 26 | Lk_Fmt = False ## Lock pixel format to yuv420p8 27 | 28 | ret = k7f.FPS_CTRL(clip, fps_in=container_fps, fps_ret=True) 29 | clip = k7f.FMT_CTRL(clip, h_max=H_Pre, fmt_pix=1 if Lk_Fmt else 0) 30 | clip = k7f.RIFE_NV(clip, lt_d2k=Lt_D2k, model=Model, ext_proc=Ext_Proc, t_tta=T_Tta, fps_in=container_fps, fps_num=Fps_Num, fps_den=Fps_Den, sc_mode=Sc_Mode, gpu=Gpu, gpu_t=Gpu_T, st_eng=St_Eng, ws_size=Ws_Size) 31 | 32 | clip.set_output() 33 | -------------------------------------------------------------------------------- /vs-plugins/models/rife_v2/rife_v4.50.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vadash/mpv-lazy-en/65794dd72db059cf98eab45cf2b242928a04ed67/vs-plugins/models/rife_v2/rife_v4.50.onnx -------------------------------------------------------------------------------- /vs-plugins/models/rife_v2/rife_v4.51.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vadash/mpv-lazy-en/65794dd72db059cf98eab45cf2b242928a04ed67/vs-plugins/models/rife_v2/rife_v4.51.onnx -------------------------------------------------------------------------------- /vs-plugins/models/rife_v2/rife_v4.52.onnx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vadash/mpv-lazy-en/65794dd72db059cf98eab45cf2b242928a04ed67/vs-plugins/models/rife_v2/rife_v4.52.onnx -------------------------------------------------------------------------------- /vsmlrt.py: -------------------------------------------------------------------------------- 1 | __version__ = "3.20.4" 2 | 3 | __all__ = [ 4 | "Backend", "BackendV2", 5 | "Waifu2x", "Waifu2xModel", 6 | "DPIR", "DPIRModel", 7 | "RealESRGAN", "RealESRGANModel", 8 | "RealESRGANv2", "RealESRGANv2Model", 9 | "CUGAN", 10 | "RIFE", "RIFEModel", "RIFEMerge", 11 | "SAFA", "SAFAModel", "SAFAAdaptiveMode", 12 | "inference" 13 | ] 14 | 15 | import copy 16 | from dataclasses import dataclass, field 17 | import enum 18 | from fractions import Fraction 19 | import math 20 | import os 21 | import os.path 22 | import platform 23 | import subprocess 24 | import sys 25 | import tempfile 26 | import time 27 | import typing 28 | import zlib 29 | 30 | import vapoursynth as vs 31 | from vapoursynth import core 32 | 33 | 34 | def get_plugins_path() -> str: 35 | path = b"" 36 | 37 | try: 38 | path = core.ov.Version()["path"] 39 | except AttributeError: 40 | try: 41 | path = core.ort.Version()["path"] 42 | except AttributeError: 43 | try: 44 | path = core.ncnn.Version()["path"] 45 | except AttributeError: 46 | try: 47 | path = core.trt.Version()["path"] 48 | except AttributeError: 49 | path = core.migx.Version()["path"] 50 | 51 | assert path != b"" 52 | 53 | return os.path.dirname(path).decode() 54 | 55 | plugins_path: str = get_plugins_path() 56 | trtexec_path: str = os.path.join(plugins_path, "vsmlrt-cuda", "trtexec") 57 | migraphx_driver_path: str = os.path.join(plugins_path, "vsmlrt-hip", "migraphx-driver") 58 | models_path: str = os.path.join(plugins_path, "models") 59 | 60 | 61 | class Backend: 62 | @dataclass(frozen=False) 63 | class ORT_CPU: 64 | """ backend for cpus """ 65 | 66 | num_streams: int = 1 67 | verbosity: int = 2 68 | fp16: bool = False 69 | fp16_blacklist_ops: typing.Optional[typing.Sequence[str]] = None 70 | 71 | # internal backend attributes 72 | supports_onnx_serialization: bool = True 73 | 74 | @dataclass(frozen=False) 75 | class ORT_CUDA: 76 | """ backend for nvidia gpus 77 | 78 | basic performance tuning: 79 | set fp16 = True (on RTX GPUs) 80 | """ 81 | 82 | device_id: int = 0 83 | cudnn_benchmark: bool = True 84 | num_streams: int = 1 85 | verbosity: int = 2 86 | fp16: bool = False 87 | use_cuda_graph: bool = False # preview, not supported by all models 88 | fp16_blacklist_ops: typing.Optional[typing.Sequence[str]] = None 89 | 90 | # internal backend attributes 91 | supports_onnx_serialization: bool = True 92 | 93 | @dataclass(frozen=False) 94 | class OV_CPU: 95 | """ backend for x86 cpus 96 | 97 | basic performance tuning: 98 | set bf16 = True (on Zen4) 99 | increase num_streams 100 | """ 101 | 102 | fp16: bool = False 103 | num_streams: typing.Union[int, str] = 1 104 | bind_thread: bool = True 105 | fp16_blacklist_ops: typing.Optional[typing.Sequence[str]] = None 106 | bf16: bool = False 107 | num_threads: int = 0 108 | 109 | # internal backend attributes 110 | supports_onnx_serialization: bool = True 111 | 112 | @dataclass(frozen=False) 113 | class TRT: 114 | """ backend for nvidia gpus 115 | 116 | basic performance tuning: 117 | set fp16 = True (on RTX GPUs) 118 | increase num_streams 119 | increase workspace 120 | set use_cuda_graph = True 121 | """ 122 | 123 | max_shapes: typing.Optional[typing.Tuple[int, int]] = None 124 | opt_shapes: typing.Optional[typing.Tuple[int, int]] = None 125 | fp16: bool = False 126 | device_id: int = 0 127 | workspace: typing.Optional[int] = None 128 | verbose: bool = False 129 | use_cuda_graph: bool = False 130 | num_streams: int = 1 131 | use_cublas: bool = False # cuBLAS + cuBLASLt 132 | static_shape: bool = True 133 | tf32: bool = False 134 | log: bool = True 135 | 136 | # as of TensorRT 8.4, it can be turned off without performance penalty in most cases 137 | use_cudnn: bool = False # changed to False since vsmlrt.vpy 3.16 138 | use_edge_mask_convolutions: bool = True 139 | use_jit_convolutions: bool = True 140 | heuristic: bool = False # only supported on Ampere+ with TensorRT 8.5+ 141 | output_format: int = 0 # 0: fp32, 1: fp16 142 | min_shapes: typing.Tuple[int, int] = (0, 0) 143 | faster_dynamic_shapes: bool = True 144 | force_fp16: bool = False 145 | builder_optimization_level: int = 3 146 | max_aux_streams: typing.Optional[int] = None 147 | short_path: typing.Optional[bool] = None # True on Windows by default, False otherwise 148 | bf16: bool = False 149 | custom_env: typing.Dict[str, str] = field(default_factory=lambda: {}) 150 | custom_args: typing.List[str] = field(default_factory=lambda: []) 151 | 152 | # internal backend attributes 153 | supports_onnx_serialization: bool = False 154 | 155 | @dataclass(frozen=False) 156 | class OV_GPU: 157 | """ backend for nvidia gpus 158 | 159 | basic performance tuning: 160 | set fp16 = True 161 | increase num_streams 162 | """ 163 | 164 | fp16: bool = False 165 | num_streams: typing.Union[int, str] = 1 166 | device_id: int = 0 167 | fp16_blacklist_ops: typing.Optional[typing.Sequence[str]] = None 168 | 169 | # internal backend attributes 170 | supports_onnx_serialization: bool = True 171 | 172 | @dataclass(frozen=False) 173 | class NCNN_VK: 174 | """ backend for vulkan devices 175 | 176 | basic performance tuning: 177 | set fp16 = True (on modern GPUs) 178 | increase num_streams 179 | """ 180 | 181 | fp16: bool = False 182 | device_id: int = 0 183 | num_streams: int = 1 184 | 185 | # internal backend attributes 186 | supports_onnx_serialization: bool = True 187 | 188 | @dataclass(frozen=False) 189 | class ORT_DML: 190 | """ backend for directml (d3d12) devices """ 191 | 192 | device_id: int = 0 193 | num_streams: int = 1 194 | verbosity: int = 2 195 | fp16: bool = False 196 | fp16_blacklist_ops: typing.Optional[typing.Sequence[str]] = None 197 | 198 | # internal backend attributes 199 | supports_onnx_serialization: bool = True 200 | 201 | @dataclass(frozen=False) 202 | class MIGX: 203 | """ backend for amd gpus 204 | 205 | basic performance tuning: 206 | set fp16 = True 207 | """ 208 | 209 | device_id: int = 0 210 | fp16: bool = False 211 | opt_shapes: typing.Optional[typing.Tuple[int, int]] = None 212 | fast_math: bool = True 213 | exhaustive_tune: bool = False 214 | 215 | short_path: typing.Optional[bool] = None # True on Windows by default, False otherwise 216 | custom_env: typing.Dict[str, str] = field(default_factory=lambda: {}) 217 | custom_args: typing.List[str] = field(default_factory=lambda: []) 218 | 219 | # internal backend attributes 220 | supports_onnx_serialization: bool = False 221 | 222 | @dataclass(frozen=False) 223 | class OV_NPU: 224 | """ backend for intel npus 225 | """ 226 | 227 | # internal backend attributes 228 | supports_onnx_serialization: bool = True 229 | 230 | 231 | backendT = typing.Union[ 232 | Backend.OV_CPU, 233 | Backend.ORT_CPU, 234 | Backend.ORT_CUDA, 235 | Backend.TRT, 236 | Backend.OV_GPU, 237 | Backend.NCNN_VK, 238 | Backend.ORT_DML, 239 | Backend.MIGX, 240 | Backend.OV_NPU, 241 | ] 242 | 243 | 244 | fallback_backend: typing.Optional[backendT] = None 245 | 246 | 247 | @enum.unique 248 | class Waifu2xModel(enum.IntEnum): 249 | anime_style_art = 0 250 | anime_style_art_rgb = 1 251 | photo = 2 252 | upconv_7_anime_style_art_rgb = 3 253 | upconv_7_photo = 4 254 | upresnet10 = 5 255 | cunet = 6 256 | swin_unet_art = 7 257 | swin_unet_photo = 8 # 20230329 258 | swin_unet_photo_v2 = 9 # 20230407 259 | swin_unet_art_scan = 10 # 20230504 260 | 261 | 262 | def Waifu2x( 263 | clip: vs.VideoNode, 264 | noise: typing.Literal[-1, 0, 1, 2, 3] = -1, 265 | scale: typing.Literal[1, 2, 4] = 2, 266 | tiles: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 267 | tilesize: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 268 | overlap: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 269 | model: Waifu2xModel = Waifu2xModel.cunet, 270 | backend: backendT = Backend.OV_CPU(), 271 | preprocess: bool = True 272 | ) -> vs.VideoNode: 273 | 274 | func_name = "vsmlrt.Waifu2x" 275 | 276 | if not isinstance(clip, vs.VideoNode): 277 | raise TypeError(f'{func_name}: "clip" must be a clip!') 278 | 279 | if clip.format.sample_type != vs.FLOAT or clip.format.bits_per_sample not in [16, 32]: 280 | raise ValueError(f"{func_name}: only constant format 16/32 bit float input supported") 281 | 282 | if not isinstance(noise, int) or noise not in range(-1, 4): 283 | raise ValueError(f'{func_name}: "noise" must be -1, 0, 1, 2, or 3') 284 | 285 | if not isinstance(scale, int) or scale not in (1, 2, 4): 286 | raise ValueError(f'{func_name}: "scale" must be 1, 2 or 4') 287 | 288 | if not isinstance(model, int) or model not in Waifu2xModel.__members__.values(): 289 | raise ValueError(f'{func_name}: invalid "model"') 290 | 291 | if model == 0 and noise == 0: 292 | raise ValueError( 293 | f'{func_name}: "anime_style_art" model' 294 | ' does not support noise reduction level 0' 295 | ) 296 | 297 | if model in range(7) and scale not in (1, 2): 298 | raise ValueError(f'{func_name}: "scale" must be 1 or 2') 299 | 300 | if model == 0: 301 | if clip.format.color_family != vs.GRAY: 302 | raise ValueError(f'{func_name}: "clip" must be of GRAY color family') 303 | elif clip.format.color_family != vs.RGB: 304 | raise ValueError(f'{func_name}: "clip" must be of RGB color family') 305 | 306 | if overlap is None: 307 | overlap_w = overlap_h = [8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4][model] 308 | elif isinstance(overlap, int): 309 | overlap_w = overlap_h = overlap 310 | else: 311 | overlap_w, overlap_h = overlap 312 | 313 | if model == 6: 314 | multiple = 4 315 | else: 316 | multiple = 1 317 | 318 | width, height = clip.width, clip.height 319 | if preprocess and model in (0, 1, 2): 320 | # emulating cv2.resize(interpolation=cv2.INTER_CUBIC) 321 | clip = core.resize.Bicubic( 322 | clip, 323 | width * 2, height * 2, 324 | filter_param_a=0, filter_param_b=0.75 325 | ) 326 | 327 | (tile_w, tile_h), (overlap_w, overlap_h) = calc_tilesize( 328 | tiles=tiles, tilesize=tilesize, 329 | width=clip.width, height=clip.height, 330 | multiple=multiple, 331 | overlap_w=overlap_w, overlap_h=overlap_h 332 | ) 333 | 334 | if tile_w % multiple != 0 or tile_h % multiple != 0: 335 | raise ValueError( 336 | f'{func_name}: tile size must be divisible by {multiple} ({tile_w}, {tile_h})' 337 | ) 338 | 339 | backend = init_backend( 340 | backend=backend, 341 | trt_opt_shapes=(tile_w, tile_h) 342 | ) 343 | 344 | folder_path = os.path.join( 345 | models_path, 346 | "waifu2x", 347 | tuple(Waifu2xModel.__members__)[model] 348 | ) 349 | 350 | if model in (0, 1, 2): 351 | if noise == -1: 352 | model_name = "scale2.0x_model.onnx" 353 | else: 354 | model_name = f"noise{noise}_model.onnx" 355 | elif model in (3, 4, 5): 356 | if noise == -1: 357 | model_name = "scale2.0x_model.onnx" 358 | else: 359 | model_name = f"noise{noise}_scale2.0x_model.onnx" 360 | elif model == 6: 361 | if scale == 1: 362 | scale_name = "" 363 | else: 364 | scale_name = "scale2.0x_" 365 | 366 | if noise == -1: 367 | model_name = "scale2.0x_model.onnx" 368 | else: 369 | model_name = f"noise{noise}_{scale_name}model.onnx" 370 | elif model == 7: 371 | if scale == 1: 372 | scale_name = "" 373 | elif scale == 2: 374 | scale_name = "scale2x" 375 | elif scale == 4: 376 | scale_name = "scale4x" 377 | 378 | if noise == -1: 379 | if scale == 1: 380 | raise ValueError("swin_unet model for \"noise == -1\" and \"scale == 1\" does not exist") 381 | 382 | model_name = f"{scale_name}.onnx" 383 | else: 384 | if scale == 1: 385 | model_name = f"noise{noise}.onnx" 386 | else: 387 | model_name = f"noise{noise}_{scale_name}.onnx" 388 | elif model in (8, 9, 10): 389 | scale_name = "scale4x" 390 | if noise == -1: 391 | model_name = f"{scale_name}.onnx" 392 | else: 393 | model_name = f"noise{noise}_{scale_name}.onnx" 394 | else: 395 | raise ValueError(f"{func_name}: inavlid model {model}") 396 | 397 | network_path = os.path.join(folder_path, model_name) 398 | 399 | clip = inference_with_fallback( 400 | clips=[clip], network_path=network_path, 401 | overlap=(overlap_w, overlap_h), tilesize=(tile_w, tile_h), 402 | backend=backend 403 | ) 404 | 405 | if model in range(8) and scale == 1 and clip.width // width == 2: 406 | # emulating cv2.resize(interpolation=cv2.INTER_CUBIC) 407 | # cr: @AkarinVS 408 | 409 | clip = fmtc_resample( 410 | clip, scale=0.5, 411 | kernel="impulse", impulse=[-0.1875, 1.375, -0.1875], 412 | kovrspl=2 413 | ) 414 | 415 | elif model in (8, 9, 10) and scale != 4: 416 | clip = core.resize.Bicubic( 417 | clip, clip.width * scale // 4, clip.height * scale // 4, 418 | filter_param_a=0, filter_param_b=0.5 419 | ) 420 | 421 | return clip 422 | 423 | 424 | @enum.unique 425 | class DPIRModel(enum.IntEnum): 426 | drunet_gray = 0 427 | drunet_color = 1 428 | drunet_deblocking_grayscale = 2 429 | drunet_deblocking_color = 3 430 | 431 | 432 | def DPIR( 433 | clip: vs.VideoNode, 434 | strength: typing.Optional[typing.Union[typing.SupportsFloat, vs.VideoNode]], 435 | tiles: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 436 | tilesize: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 437 | overlap: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 438 | model: DPIRModel = DPIRModel.drunet_gray, 439 | backend: backendT = Backend.OV_CPU() 440 | ) -> vs.VideoNode: 441 | 442 | func_name = "vsmlrt.DPIR" 443 | 444 | if not isinstance(clip, vs.VideoNode): 445 | raise TypeError(f'{func_name}: "clip" must be a clip!') 446 | 447 | if clip.format.sample_type != vs.FLOAT or clip.format.bits_per_sample not in [16, 32]: 448 | raise ValueError(f"{func_name}: only constant format 16/32 bit float input supported") 449 | 450 | if not isinstance(model, int) or model not in DPIRModel.__members__.values(): 451 | raise ValueError(f'{func_name}: invalid "model"') 452 | 453 | if model in [0, 2] and clip.format.color_family != vs.GRAY: 454 | raise ValueError(f'{func_name}: "clip" must be of GRAY color family') 455 | elif model in [1, 3] and clip.format.color_family != vs.RGB: 456 | raise ValueError(f'{func_name}: "clip" must be of RGB color family') 457 | 458 | if strength is None: 459 | strength = 5.0 460 | 461 | gray_format = vs.GRAYS if clip.format.bits_per_sample == 32 else vs.GRAYH 462 | 463 | if isinstance(strength, vs.VideoNode): 464 | strength = typing.cast(vs.VideoNode, strength) 465 | if strength.format.color_family != vs.GRAY: 466 | raise ValueError(f'{func_name}: "strength" must be of GRAY color family') 467 | if strength.width != clip.width or strength.height != clip.height: 468 | raise ValueError(f'{func_name}: "strength" must be of the same size as "clip"') 469 | if strength.num_frames != clip.num_frames: 470 | raise ValueError(f'{func_name}: "strength" must be of the same length as "clip"') 471 | 472 | strength = core.std.Expr(strength, "x 255 /", format=gray_format) 473 | else: 474 | try: 475 | strength = float(strength) 476 | except TypeError as e: 477 | raise TypeError(f'{func_name}: "strength" must be a float or a clip') from e 478 | 479 | strength = core.std.BlankClip(clip, format=gray_format, color=strength / 255, keep=True) 480 | 481 | if overlap is None: 482 | overlap_w = overlap_h = 0 483 | elif isinstance(overlap, int): 484 | overlap_w = overlap_h = overlap 485 | else: 486 | overlap_w, overlap_h = overlap 487 | 488 | multiple = 8 489 | 490 | (tile_w, tile_h), (overlap_w, overlap_h) = calc_tilesize( 491 | tiles=tiles, tilesize=tilesize, 492 | width=clip.width, height=clip.height, 493 | multiple=multiple, 494 | overlap_w=overlap_w, overlap_h=overlap_h 495 | ) 496 | 497 | if tile_w % multiple != 0 or tile_h % multiple != 0: 498 | raise ValueError( 499 | f'{func_name}: tile size must be divisible by {multiple} ({tile_w}, {tile_h})' 500 | ) 501 | 502 | backend = init_backend( 503 | backend=backend, 504 | trt_opt_shapes=(tile_w, tile_h) 505 | ) 506 | 507 | network_path = os.path.join( 508 | models_path, 509 | "dpir", 510 | f"{tuple(DPIRModel.__members__)[model]}.onnx" 511 | ) 512 | 513 | clip = inference_with_fallback( 514 | clips=[clip, strength], network_path=network_path, 515 | overlap=(overlap_w, overlap_h), tilesize=(tile_w, tile_h), 516 | backend=backend 517 | ) 518 | 519 | return clip 520 | 521 | 522 | @enum.unique 523 | class RealESRGANModel(enum.IntEnum): 524 | # v2 525 | animevideo_xsx2 = 0 526 | animevideo_xsx4 = 1 527 | # v3 528 | animevideov3 = 2 # 4x 529 | # contributed: janaiV2(2x) https://github.com/the-database/mpv-upscale-2x_animejanai/releases/tag/2.0.0 maintainer: hooke007 530 | animejanaiV2L1 = 5005 531 | animejanaiV2L2 = 5006 532 | animejanaiV2L3 = 5007 533 | # contributed: janaiV3-hd(2x) https://github.com/the-database/mpv-upscale-2x_animejanai/releases/tag/3.0.0 maintainer: hooke007 534 | animejanaiV3_HD_L1 = 5008 535 | animejanaiV3_HD_L2 = 5009 536 | animejanaiV3_HD_L3 = 5010 537 | 538 | RealESRGANv2Model = RealESRGANModel 539 | 540 | 541 | def RealESRGAN( 542 | clip: vs.VideoNode, 543 | tiles: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 544 | tilesize: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 545 | overlap: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 546 | model: RealESRGANv2Model = RealESRGANv2Model.animevideo_xsx2, 547 | backend: backendT = Backend.OV_CPU(), 548 | scale: typing.Optional[float] = None 549 | ) -> vs.VideoNode: 550 | 551 | func_name = "vsmlrt.RealESRGAN" 552 | 553 | if not isinstance(clip, vs.VideoNode): 554 | raise TypeError(f'{func_name}: "clip" must be a clip!') 555 | 556 | if clip.format.sample_type != vs.FLOAT or clip.format.bits_per_sample not in [16, 32]: 557 | raise ValueError(f"{func_name}: only constant format 16/32 bit float input supported") 558 | 559 | if clip.format.color_family != vs.RGB: 560 | raise ValueError(f'{func_name}: "clip" must be of RGB color family') 561 | 562 | if not isinstance(model, int) or model not in RealESRGANv2Model.__members__.values(): 563 | raise ValueError(f'{func_name}: invalid "model"') 564 | 565 | if overlap is None: 566 | overlap_w = overlap_h = 8 567 | elif isinstance(overlap, int): 568 | overlap_w = overlap_h = overlap 569 | else: 570 | overlap_w, overlap_h = overlap 571 | 572 | multiple = 1 573 | 574 | (tile_w, tile_h), (overlap_w, overlap_h) = calc_tilesize( 575 | tiles=tiles, tilesize=tilesize, 576 | width=clip.width, height=clip.height, 577 | multiple=multiple, 578 | overlap_w=overlap_w, overlap_h=overlap_h 579 | ) 580 | 581 | backend = init_backend( 582 | backend=backend, 583 | trt_opt_shapes=(tile_w, tile_h) 584 | ) 585 | 586 | if model in [0, 1]: 587 | network_path = os.path.join( 588 | models_path, 589 | "RealESRGANv2", 590 | f"RealESRGANv2-{tuple(RealESRGANv2Model.__members__)[model]}.onnx".replace('_', '-') 591 | ) 592 | elif model == 2: 593 | network_path = os.path.join( 594 | models_path, 595 | "RealESRGANv2", 596 | "realesr-animevideov3.onnx" 597 | ) 598 | elif model in [5005, 5006, 5007, 5008, 5009, 5010]: 599 | network_path = os.path.join( 600 | models_path, 601 | "RealESRGANv2", 602 | f"{RealESRGANv2Model(model).name}.onnx".replace('_', '-') 603 | ) 604 | 605 | clip_org = clip 606 | clip = inference_with_fallback( 607 | clips=[clip], network_path=network_path, 608 | overlap=(overlap_w, overlap_h), tilesize=(tile_w, tile_h), 609 | backend=backend 610 | ) 611 | 612 | if scale is not None: 613 | scale_h = clip.width // clip_org.width 614 | scale_v = clip.height // clip_org.height 615 | 616 | assert scale_h == scale_v 617 | 618 | if scale != scale_h: 619 | rescale = scale / scale_h 620 | 621 | if rescale > 1: 622 | clip = core.resize.Lanczos(clip, int(clip_org.width * scale), int(clip_org.height * scale), filter_param_a=4) 623 | else: 624 | clip = fmtc_resample(clip, scale=rescale, kernel="lanczos", taps=4, fh=1/rescale, fv=1/rescale) 625 | 626 | return clip 627 | 628 | RealESRGANv2 = RealESRGAN 629 | 630 | 631 | def CUGAN( 632 | clip: vs.VideoNode, 633 | noise: typing.Literal[-1, 0, 1, 2, 3] = -1, 634 | scale: typing.Literal[2, 3, 4] = 2, 635 | tiles: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 636 | tilesize: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 637 | overlap: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 638 | backend: backendT = Backend.OV_CPU(), 639 | alpha: float = 1.0, 640 | version: typing.Literal[1, 2] = 1, # 1: legacy, 2: pro 641 | conformance: bool = True # currently specifies dynamic range compression for cugan-pro 642 | ) -> vs.VideoNode: 643 | """ 644 | denoising strength: 0 < -1 < 1 < 2 < 3 645 | 646 | version: (1 or 2) 647 | 1 -> legacy, 648 | 2 -> pro (only models for "noise" in [-1, 0, 3] and "scale" in [2, 3] are published currently) 649 | """ 650 | 651 | func_name = "vsmlrt.CUGAN" 652 | 653 | if not isinstance(clip, vs.VideoNode): 654 | raise TypeError(f'{func_name}: "clip" must be a clip!') 655 | 656 | if clip.format.sample_type != vs.FLOAT or clip.format.bits_per_sample not in [16, 32]: 657 | raise ValueError(f"{func_name}: only constant format 16/32 bit float input supported") 658 | 659 | if not isinstance(noise, int) or noise not in range(-1, 4): 660 | raise ValueError(f'{func_name}: "noise" must be -1, 0, 1, 2, or 3') 661 | 662 | if not isinstance(scale, int) or scale not in (2, 3, 4): 663 | raise ValueError(f'{func_name}: "scale" must be 2, 3 or 4') 664 | 665 | if scale != 2 and noise in [1, 2]: 666 | raise ValueError( 667 | f'{func_name}: "scale={scale}" model' 668 | f' does not support noise reduction level {noise}' 669 | ) 670 | 671 | if clip.format.color_family != vs.RGB: 672 | raise ValueError(f'{func_name}: "clip" must be of RGB color family') 673 | 674 | if overlap is None: 675 | overlap_w = overlap_h = 4 676 | elif isinstance(overlap, int): 677 | overlap_w = overlap_h = overlap 678 | else: 679 | overlap_w, overlap_h = overlap 680 | 681 | multiple = 2 682 | 683 | (tile_w, tile_h), (overlap_w, overlap_h) = calc_tilesize( 684 | tiles=tiles, tilesize=tilesize, 685 | width=clip.width, height=clip.height, 686 | multiple=multiple, 687 | overlap_w=overlap_w, overlap_h=overlap_h 688 | ) 689 | 690 | if tile_w % multiple != 0 or tile_h % multiple != 0: 691 | raise ValueError( 692 | f'{func_name}: tile size must be divisible by {multiple} ({tile_w}, {tile_h})' 693 | ) 694 | 695 | backend = init_backend( 696 | backend=backend, 697 | trt_opt_shapes=(tile_w, tile_h) 698 | ) 699 | 700 | folder_path = os.path.join(models_path, "cugan") 701 | 702 | if version == 1: 703 | if noise == -1: 704 | model_name = f"up{scale}x-latest-no-denoise.onnx" 705 | elif noise == 0: 706 | model_name = f"up{scale}x-latest-conservative.onnx" 707 | else: 708 | model_name = f"up{scale}x-latest-denoise{noise}x.onnx" 709 | elif version == 2: 710 | if noise == -1: 711 | model_name = f"pro-no-denoise3x-up{scale}x.onnx" 712 | elif noise == 0: 713 | model_name = f"pro-conservative-up{scale}x.onnx" 714 | else: 715 | model_name = f"pro-denoise{noise}x-up{scale}x.onnx" 716 | else: 717 | raise ValueError(f'{func_name}: unknown version ({version}), must be 1 (legacy) or 2 (pro)') 718 | 719 | network_path = os.path.join(folder_path, model_name) 720 | 721 | # https://github.com/bilibili/ailab/blob/978f3be762183d7fa79525f29a43e65afb995f6b/Real-CUGAN/upcunet_v3.py#L207 722 | # mutates network_path 723 | if alpha != 1.0: 724 | alpha = float(alpha) 725 | 726 | import numpy as np 727 | import onnx 728 | from onnx import numpy_helper 729 | 730 | model = onnx.load(network_path) 731 | 732 | for idx, node in reversed(list(enumerate(model.graph.node))): 733 | if node.op_type == "ConvTranspose": 734 | break 735 | 736 | upstream_name = node.input[0] 737 | downstream_name = node.input[0] + "_mul" 738 | node.input[0] = downstream_name 739 | 740 | alpha_array = np.array(alpha, dtype=np.float32) 741 | alpha_tensor = numpy_helper.from_array(alpha_array) 742 | alpha_constant = onnx.helper.make_node( 743 | "Constant", 744 | inputs=[], 745 | outputs=["alpha"], 746 | value=alpha_tensor 747 | ) 748 | model.graph.node.insert(idx, alpha_constant) 749 | 750 | mul_node = onnx.helper.make_node( 751 | "Mul", 752 | inputs=[upstream_name, "alpha"], 753 | outputs=[downstream_name] 754 | ) 755 | model.graph.node.insert(idx+1, mul_node) 756 | 757 | if backend.supports_onnx_serialization: 758 | if conformance and version == 2: 759 | clip = core.std.Expr(clip, "x 0.7 * 0.15 +") 760 | 761 | clip = inference_with_fallback( 762 | clips=[clip], network_path=model.SerializeToString(), 763 | overlap=(overlap_w, overlap_h), tilesize=(tile_w, tile_h), 764 | backend=backend, path_is_serialization=True 765 | ) 766 | 767 | if conformance and version == 2: 768 | clip = core.std.Expr(clip, "x 0.15 - 0.7 /") 769 | 770 | return clip 771 | 772 | network_path = f"{network_path}_alpha{alpha!r}.onnx" 773 | onnx.save(model, network_path) 774 | 775 | # https://github.com/bilibili/ailab/blob/e102bef22384c629f82552dbec3d6b5bab125639/Real-CUGAN/upcunet_v3.py#L1275-L1276 776 | if conformance and version == 2: 777 | clip = core.std.Expr(clip, "x 0.7 * 0.15 +") 778 | 779 | clip = inference_with_fallback( 780 | clips=[clip], network_path=network_path, 781 | overlap=(overlap_w, overlap_h), tilesize=(tile_w, tile_h), 782 | backend=backend 783 | ) 784 | 785 | # https://github.com/bilibili/ailab/blob/e102bef22384c629f82552dbec3d6b5bab125639/Real-CUGAN/upcunet_v3.py#L269 786 | if conformance and version == 2: 787 | clip = core.std.Expr(clip, "x 0.15 - 0.7 /") 788 | 789 | return clip 790 | 791 | 792 | def get_rife_input(clip: vs.VideoNode) -> typing.List[vs.VideoNode]: 793 | assert clip.format.sample_type == vs.FLOAT 794 | gray_format = vs.GRAYS if clip.format.bits_per_sample == 32 else vs.GRAYH 795 | 796 | 797 | if (hasattr(core, 'akarin') and 798 | b"width" in core.akarin.Version()["expr_features"] and 799 | b"height" in core.akarin.Version()["expr_features"] 800 | ): 801 | if b"fp16" in core.akarin.Version()["expr_features"]: 802 | empty = clip.std.BlankClip(format=gray_format, length=1) 803 | else: 804 | empty = clip.std.BlankClip(format=vs.GRAYS, length=1) 805 | 806 | horizontal = bits_as(core.akarin.Expr(empty, 'X 2 * width 1 - / 1 -'), clip) 807 | vertical = bits_as(core.akarin.Expr(empty, 'Y 2 * height 1 - / 1 -'), clip) 808 | else: 809 | empty = clip.std.BlankClip(format=vs.GRAYS, length=1) 810 | 811 | from functools import partial 812 | 813 | def meshgrid_core(n: int, f: vs.VideoFrame, horizontal: bool) -> vs.VideoFrame: 814 | fout = f.copy() 815 | 816 | is_api4 = hasattr(vs, "__api_version__") and vs.__api_version__.api_major == 4 817 | if is_api4: 818 | mem_view = fout[0] 819 | else: 820 | mem_view = fout.get_write_array(0) 821 | 822 | height, width = mem_view.shape 823 | 824 | if horizontal: 825 | for i in range(height): 826 | for j in range(width): 827 | mem_view[i, j] = 2 * j / (width - 1) - 1 828 | else: 829 | for i in range(height): 830 | for j in range(width): 831 | mem_view[i, j] = 2 * i / (height - 1) - 1 832 | 833 | return fout 834 | 835 | horizontal = bits_as(core.std.ModifyFrame(empty, empty, partial(meshgrid_core, horizontal=True)), clip) 836 | vertical = bits_as(core.std.ModifyFrame(empty, empty, partial(meshgrid_core, horizontal=False)), clip) 837 | 838 | horizontal = horizontal.std.Loop(clip.num_frames) 839 | vertical = vertical.std.Loop(clip.num_frames) 840 | 841 | multiplier_h = clip.std.BlankClip(format=gray_format, color=2/(clip.width-1), keep=True) 842 | 843 | multiplier_w = clip.std.BlankClip(format=gray_format, color=2/(clip.height-1), keep=True) 844 | 845 | return [horizontal, vertical, multiplier_h, multiplier_w] 846 | 847 | 848 | @enum.unique 849 | class RIFEModel(enum.IntEnum): 850 | """ 851 | Starting from RIFE v4.12 lite, this interface does not provide forward compatiblity in enum values. 852 | """ 853 | 854 | v4_0 = 40 855 | v4_2 = 42 856 | v4_3 = 43 857 | v4_4 = 44 858 | v4_5 = 45 859 | v4_6 = 46 860 | v4_7 = 47 861 | v4_8 = 48 862 | v4_9 = 49 863 | v4_10 = 410 864 | v4_11 = 411 865 | v4_12 = 412 866 | v4_12_lite = 4121 867 | v4_13 = 413 868 | v4_13_lite = 4131 869 | v4_14 = 414 870 | v4_14_lite = 4141 871 | v4_15 = 415 872 | v4_15_lite = 4151 873 | v4_16_lite = 4161 874 | v4_50 = 450 875 | v4_51 = 451 876 | v4_52 = 452 877 | 878 | 879 | def RIFEMerge( 880 | clipa: vs.VideoNode, 881 | clipb: vs.VideoNode, 882 | mask: vs.VideoNode, 883 | scale: float = 1.0, 884 | tiles: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 885 | tilesize: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 886 | overlap: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 887 | model: RIFEModel = RIFEModel.v4_50, 888 | backend: backendT = Backend.OV_CPU(), 889 | ensemble: bool = False, 890 | _implementation: typing.Optional[typing.Literal[1, 2]] = None 891 | ) -> vs.VideoNode: 892 | """ temporal MaskedMerge-like interface for the RIFE model 893 | 894 | Its semantics is similar to core.std.MaskedMerge(clipa, clipb, mask, first_plane=True), 895 | except that it merges the two clips in the time domain and you specify the "mask" based 896 | on the time point of the resulting clip (range (0,1)) between the two clips. 897 | """ 898 | 899 | func_name = "vsmlrt.RIFEMerge" 900 | 901 | for clip in (clipa, clipb, mask): 902 | if not isinstance(clip, vs.VideoNode): 903 | raise TypeError(f'{func_name}: clip must be a clip!') 904 | 905 | if clip.format.sample_type != vs.FLOAT or clip.format.bits_per_sample not in [16, 32]: 906 | raise ValueError(f"{func_name}: only constant format 16/32 bit float input supported") 907 | 908 | for clip in (clipa, clipb): 909 | if clip.format.color_family != vs.RGB: 910 | raise ValueError(f'{func_name}: "clipa" / "clipb" must be of RGB color family') 911 | 912 | if clip.width != mask.width or clip.height != mask.height: 913 | raise ValueError(f'{func_name}: video dimensions mismatch') 914 | 915 | if clip.num_frames != mask.num_frames: 916 | raise ValueError(f'{func_name}: number of frames mismatch') 917 | 918 | if mask.format.color_family != vs.GRAY: 919 | raise ValueError(f'{func_name}: "mask" must be of GRAY color family') 920 | 921 | if tiles is not None or tilesize is not None or overlap is not None: 922 | raise ValueError(f'{func_name}: tiling is not supported') 923 | 924 | if overlap is None: 925 | overlap_w = overlap_h = 0 926 | elif isinstance(overlap, int): 927 | overlap_w = overlap_h = overlap 928 | else: 929 | overlap_w, overlap_h = overlap 930 | 931 | multiple_frac = 32 / Fraction(scale) 932 | if multiple_frac.denominator != 1: 933 | raise ValueError(f'{func_name}: (32 / Fraction(scale)) must be an integer') 934 | multiple = int(multiple_frac.numerator) 935 | scale = float(Fraction(scale)) 936 | 937 | model_major = int(str(int(model))[0]) 938 | model_minor = int(str(int(model))[1:3]) 939 | lite = "_lite" if len(str(int(model))) >= 4 else "" 940 | version = f"v{model_major}.{model_minor}{lite}{'_ensemble' if ensemble else ''}" 941 | 942 | if (model_major, model_minor) >= (4, 7) and scale != 1.0: 943 | raise ValueError("not supported") 944 | 945 | network_path = os.path.join( 946 | models_path, 947 | "rife_v2", 948 | f"rife_{version}.onnx" 949 | ) 950 | if _implementation == 2 and os.path.exists(network_path) and scale == 1.0: 951 | implementation_version = 2 952 | multiple = 1 # v2 implements internal padding 953 | clips = [clipa, clipb, mask] 954 | else: 955 | implementation_version = 1 956 | 957 | network_path = os.path.join( 958 | models_path, 959 | "rife", 960 | f"rife_{version}.onnx" 961 | ) 962 | 963 | clips = [clipa, clipb, mask, *get_rife_input(clipa)] 964 | 965 | (tile_w, tile_h), (overlap_w, overlap_h) = calc_tilesize( 966 | tiles=tiles, tilesize=tilesize, 967 | width=clip.width, height=clip.height, 968 | multiple=multiple, 969 | overlap_w=overlap_w, overlap_h=overlap_h 970 | ) 971 | 972 | if tile_w % multiple != 0 or tile_h % multiple != 0: 973 | raise ValueError( 974 | f'{func_name}: tile size must be divisible by {multiple} ({tile_w}, {tile_h})' 975 | ) 976 | 977 | backend = init_backend( 978 | backend=backend, 979 | trt_opt_shapes=(tile_w, tile_h) 980 | ) 981 | 982 | if implementation_version == 2: 983 | if isinstance(backend, Backend.TRT): 984 | # https://github.com/AmusementClub/vs-mlrt/issues/66#issuecomment-1791986979 985 | if (4, 0) <= (model_major, model_minor): 986 | if backend.force_fp16: 987 | backend.force_fp16 = False 988 | backend.fp16 = True 989 | 990 | backend.custom_args.extend([ 991 | "--precisionConstraints=obey", 992 | "--layerPrecisions=" + ( 993 | "/Cast_2:fp32,/Cast_3:fp32,/Cast_5:fp32,/Cast_7:fp32," 994 | "/Reciprocal:fp32,/Reciprocal_1:fp32," 995 | "/Mul:fp32,/Mul_1:fp32,/Mul_8:fp32,/Mul_10:fp32," 996 | "/Sub_5:fp32,/Sub_6:fp32," 997 | # generated by TensorRT's onnx parser 998 | "ONNXTRT_Broadcast_236:fp32,ONNXTRT_Broadcast_238:fp32," 999 | "ONNXTRT_Broadcast_273:fp32,ONNXTRT_Broadcast_275:fp32," 1000 | # TensorRT 9.0 or later 1001 | "ONNXTRT_Broadcast_*:fp32" 1002 | ) 1003 | ]) 1004 | 1005 | if scale == 1.0: 1006 | return inference_with_fallback( 1007 | clips=clips, network_path=network_path, 1008 | overlap=(overlap_w, overlap_h), tilesize=(tile_w, tile_h), 1009 | backend=backend 1010 | ) 1011 | elif ensemble or implementation_version != 1: 1012 | raise ValueError(f'{func_name}: currently not supported') 1013 | else: 1014 | import onnx 1015 | from onnx.numpy_helper import from_array, to_array 1016 | 1017 | onnx_model = onnx.load(network_path) 1018 | 1019 | resize_counter = 0 1020 | for i in range(len(onnx_model.graph.node)): 1021 | node = onnx_model.graph.node[i] 1022 | if len(node.output) == 1 and node.op_type == "Constant" and node.output[0].startswith("onnx::Resize"): 1023 | resize_counter += 1 1024 | 1025 | array = to_array(node.attribute[0].t).copy() 1026 | if resize_counter % 3 == 2: 1027 | array[2:4] /= scale 1028 | else: 1029 | array[2:4] *= scale 1030 | onnx_model.graph.node[i].attribute[0].t.raw_data = from_array(array).raw_data 1031 | 1032 | if resize_counter != 11: 1033 | raise ValueError("invalid rife model") 1034 | 1035 | multiplier_counter = 0 1036 | for i in range(len(onnx_model.graph.node)): 1037 | node = onnx_model.graph.node[i] 1038 | if len(node.output) == 1 and node.op_type == "Constant" and node.output[0].startswith("onnx::Mul"): 1039 | multiplier_counter += 1 1040 | 1041 | array = to_array(node.attribute[0].t).copy() 1042 | if multiplier_counter % 2 == 1: 1043 | array /= scale 1044 | else: 1045 | array *= scale 1046 | onnx_model.graph.node[i].attribute[0].t.raw_data = from_array(array).raw_data 1047 | 1048 | if multiplier_counter != 7: 1049 | raise ValueError("invalid rife model") 1050 | 1051 | if backend.supports_onnx_serialization: 1052 | return inference_with_fallback( 1053 | clips=clips, network_path=onnx_model.SerializeToString(), 1054 | overlap=(overlap_w, overlap_h), tilesize=(tile_w, tile_h), 1055 | backend=backend, path_is_serialization=True 1056 | ) 1057 | else: 1058 | network_path = f"{network_path}_scale{scale!r}.onnx" 1059 | onnx.save(onnx_model, network_path) 1060 | 1061 | return inference_with_fallback( 1062 | clips=clips, network_path=network_path, 1063 | overlap=(overlap_w, overlap_h), tilesize=(tile_w, tile_h), 1064 | backend=backend 1065 | ) 1066 | 1067 | 1068 | def RIFE( 1069 | clip: vs.VideoNode, 1070 | multi: typing.Union[int, Fraction] = 2, 1071 | scale: float = 1.0, 1072 | tiles: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 1073 | tilesize: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 1074 | overlap: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 1075 | model: RIFEModel = RIFEModel.v4_4, 1076 | backend: backendT = Backend.OV_CPU(), 1077 | ensemble: bool = False, 1078 | video_player: bool = False, 1079 | _implementation: typing.Optional[typing.Literal[1, 2]] = None 1080 | ) -> vs.VideoNode: 1081 | """ RIFE: Real-Time Intermediate Flow Estimation for Video Frame Interpolation 1082 | 1083 | multi, scale is based on vs-rife. 1084 | 1085 | For the best results, you need to perform scene detection on the input clip 1086 | (e.g. misc.SCDetect, mv.SCDetection) before passing it to RIFE. 1087 | Also note that the quality of result is strongly dependent on high quality 1088 | scene detection and you might need to tweak the scene detection parameters 1089 | and/or filter to achieve the best quality. 1090 | 1091 | Args: 1092 | multi: Multiple of the frame counts, can be a fractions.Fraction. 1093 | Default: 2. 1094 | 1095 | scale: Controls the process resolution for optical flow model. 1096 | 32 / fractions.Fraction(scale) must be an integer. 1097 | scale=0.5 is recommended for 4K video. 1098 | 1099 | _implementation: (None, 1 or 2, experimental and maybe removed in the future) 1100 | Switch between different onnx implementation. 1101 | Implmementation will be selected based on internal heuristic if it is None. 1102 | """ 1103 | 1104 | func_name = "vsmlrt.RIFE" 1105 | 1106 | if not isinstance(clip, vs.VideoNode): 1107 | raise TypeError(f'{func_name}: "clip" must be a clip!') 1108 | 1109 | if clip.format.sample_type != vs.FLOAT or clip.format.bits_per_sample not in [16, 32]: 1110 | raise ValueError(f"{func_name}: only constant format 16/32 bit float input supported") 1111 | 1112 | if clip.format.color_family != vs.RGB: 1113 | raise ValueError(f'{func_name}: "clip" must be of RGB color family') 1114 | 1115 | if not isinstance(multi, (int, Fraction)): 1116 | raise TypeError(f'{func_name}: "multi" must be an integer or a fractions.Fraction!') 1117 | 1118 | if tiles is not None or tilesize is not None or overlap is not None: 1119 | raise ValueError(f'{func_name}: tiling is not supported') 1120 | 1121 | gray_format = vs.GRAYS if clip.format.bits_per_sample == 32 else vs.GRAYH 1122 | 1123 | if int(multi) == multi: 1124 | multi = int(multi) 1125 | 1126 | if multi < 2: 1127 | raise ValueError(f'{func_name}: RIFE: multi must be at least 2') 1128 | 1129 | initial = core.std.Interleave([clip] * (multi - 1)) 1130 | 1131 | terminal = clip.std.DuplicateFrames(frames=clip.num_frames - 1).std.Trim(first=1) 1132 | terminal = core.std.Interleave([terminal] * (multi - 1)) 1133 | 1134 | timepoint = core.std.Interleave([ 1135 | clip.std.BlankClip(format=gray_format, color=i/multi, length=1) 1136 | for i in range(1, multi) 1137 | ]).std.Loop(clip.num_frames) 1138 | 1139 | output0 = RIFEMerge( 1140 | clipa=initial, clipb=terminal, mask=timepoint, 1141 | scale=scale, tiles=tiles, tilesize=tilesize, overlap=overlap, 1142 | model=model, backend=backend, ensemble=ensemble, 1143 | _implementation=_implementation 1144 | ) 1145 | 1146 | clip = bits_as(clip, output0) 1147 | initial = core.std.Interleave([clip] * (multi - 1)) 1148 | 1149 | if hasattr(core, 'akarin') and hasattr(core.akarin, 'Select'): 1150 | output = core.akarin.Select([output0, initial], initial, 'x._SceneChangeNext 1 0 ?') 1151 | else: 1152 | def handler(n: int, f: vs.VideoFrame) -> vs.VideoNode: 1153 | if f.props.get('_SceneChangeNext'): 1154 | return initial 1155 | return output0 1156 | output = core.std.FrameEval(output0, handler, initial) 1157 | 1158 | if multi == 2: 1159 | res = core.std.Interleave([clip, output]) 1160 | else: 1161 | res = core.std.Interleave([ 1162 | clip, 1163 | *(output.std.SelectEvery(cycle=multi-1, offsets=i) for i in range(multi - 1)) 1164 | ]) 1165 | 1166 | if clip.fps_num != 0 and clip.fps_den != 0: 1167 | return res.std.AssumeFPS(fpsnum = clip.fps_num * multi, fpsden = clip.fps_den) 1168 | else: 1169 | return res 1170 | else: 1171 | if not hasattr(core, 'akarin') or \ 1172 | not hasattr(core.akarin, 'PropExpr') or \ 1173 | not hasattr(core.akarin, 'PickFrames'): 1174 | raise RuntimeError( 1175 | 'fractional multi requires plugin akarin ' 1176 | '(https://github.com/AkarinVS/vapoursynth-plugin/releases)' 1177 | ', version v0.96g or later.') 1178 | 1179 | if clip.fps_num == 0 or clip.fps_den == 0: 1180 | src_fps = Fraction(1) 1181 | else: 1182 | src_fps = clip.fps 1183 | 1184 | dst_fps = src_fps * multi 1185 | src_frames = clip.num_frames 1186 | dst_frames = min(int(src_frames * multi), 2 ** 31 - 1) 1187 | 1188 | duration_rel = src_fps / dst_fps 1189 | dst_duration = duration_rel.numerator 1190 | src_duration = duration_rel.denominator 1191 | 1192 | # https://github.com/AmusementClub/vs-mlrt/issues/59#issuecomment-1842649342 1193 | if video_player: 1194 | temp = core.std.BlankClip(clip, length=dst_frames, keep=True) 1195 | 1196 | def left_func(n: int) -> vs.VideoNode: 1197 | return clip[dst_duration * n // src_duration] 1198 | left_clip = core.std.FrameEval(temp, left_func) 1199 | 1200 | def right_func(n: int) -> vs.VideoNode: 1201 | # no out of range access because of function filter_sc 1202 | return clip[dst_duration * n // src_duration + 1] 1203 | right_clip = core.std.FrameEval(temp, right_func) 1204 | 1205 | temp_gray = core.std.BlankClip(temp, format=gray_format, keep=True) 1206 | def timepoint_func(n: int) -> vs.VideoNode: 1207 | current_time = dst_duration * n 1208 | left_index = current_time // src_duration 1209 | left_time = src_duration * left_index 1210 | tp = (current_time - left_time) / src_duration 1211 | return temp_gray.std.BlankClip(color=tp, keep=True) 1212 | tp_clip = core.std.FrameEval(temp_gray, timepoint_func) 1213 | 1214 | output0 = RIFEMerge( 1215 | clipa=left_clip, clipb=right_clip, mask=tp_clip, 1216 | scale=scale, tiles=tiles, tilesize=tilesize, overlap=overlap, 1217 | model=model, backend=backend, ensemble=ensemble, 1218 | _implementation=_implementation 1219 | ) 1220 | 1221 | left0 = bits_as(left_clip, output0) 1222 | 1223 | def filter_sc(n: int, f: vs.VideoFrame) -> vs.VideoNode: 1224 | current_time = dst_duration * n 1225 | left_index = current_time // src_duration 1226 | if ( 1227 | current_time % src_duration == 0 or 1228 | left_index + 1 >= src_frames or 1229 | f.props.get("_SceneChangeNext", False) 1230 | ): 1231 | return left0 1232 | else: 1233 | return output0 1234 | 1235 | res = core.std.FrameEval(output0, filter_sc, left0) 1236 | else: 1237 | if not hasattr(core, 'akarin') or \ 1238 | not hasattr(core.akarin, 'PropExpr') or \ 1239 | not hasattr(core.akarin, 'PickFrames'): 1240 | raise RuntimeError( 1241 | 'fractional multi requires plugin akarin ' 1242 | '(https://github.com/AkarinVS/vapoursynth-plugin/releases)' 1243 | ', version v0.96g or later.') 1244 | 1245 | left_indices = [] 1246 | right_indices = [] 1247 | timepoints = [] 1248 | output_indices = [] 1249 | 1250 | for i in range(dst_frames): 1251 | current_time = dst_duration * i 1252 | if current_time % src_duration == 0: 1253 | output_indices.append(current_time // src_duration) 1254 | else: 1255 | left_index = current_time // src_duration 1256 | if left_index + 1 >= src_frames: 1257 | # approximate last frame with last frame of source 1258 | output_indices.append(src_frames - 1) 1259 | break 1260 | output_indices.append(src_frames + len(timepoints)) 1261 | left_indices.append(left_index) 1262 | right_indices.append(left_index + 1) 1263 | left_time = src_duration * left_index 1264 | tp = (current_time - left_time) / src_duration 1265 | timepoints.append(tp) 1266 | 1267 | left_clip = core.akarin.PickFrames(clip, left_indices) 1268 | right_clip = core.akarin.PickFrames(clip, right_indices) 1269 | tp_clip = core.std.BlankClip(clip, format=gray_format, length=len(timepoints)) 1270 | tp_clip = tp_clip.akarin.PropExpr(lambda: dict(_tp=timepoints)).akarin.Expr('x._tp') 1271 | 1272 | output0 = RIFEMerge( 1273 | clipa=left_clip, clipb=right_clip, mask=tp_clip, 1274 | scale=scale, tiles=tiles, tilesize=tilesize, overlap=overlap, 1275 | model=model, backend=backend, ensemble=ensemble, 1276 | _implementation=_implementation 1277 | ) 1278 | 1279 | clip0 = bits_as(clip, output0) 1280 | left0 = bits_as(left_clip, output0) 1281 | output = core.akarin.Select([output0, left0], left0, 'x._SceneChangeNext 1 0 ?') 1282 | res = core.akarin.PickFrames(clip0 + output, output_indices) 1283 | 1284 | if clip.fps_num != 0 and clip.fps_den != 0: 1285 | return res.std.AssumeFPS(fpsnum = dst_fps.numerator, fpsden = dst_fps.denominator) 1286 | else: 1287 | return res 1288 | 1289 | 1290 | @enum.unique 1291 | class SAFAModel(enum.IntEnum): 1292 | v0_1 = 1 1293 | v0_2 = 2 1294 | v0_3 = 3 1295 | v0_4 = 4 1296 | 1297 | 1298 | @enum.unique 1299 | class SAFAAdaptiveMode(enum.IntEnum): 1300 | non_adaptive = 0 # non-adaptive 1301 | adaptive1x = 1 # use adaptive path only at 1x scale 1302 | adaptive = 2 # use adaptive path at 1x, 1/2x and 1/4x scales, proposed algorithm 1303 | 1304 | 1305 | def SAFA( 1306 | clip: vs.VideoNode, 1307 | tiles: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 1308 | tilesize: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 1309 | overlap: typing.Optional[typing.Union[int, typing.Tuple[int, int]]] = None, 1310 | model: SAFAModel = SAFAModel.v0_1, 1311 | adaptive: SAFAAdaptiveMode = SAFAAdaptiveMode.non_adaptive, 1312 | backend: backendT = Backend.OV_CPU(), 1313 | ) -> vs.VideoNode: 1314 | """ SAFA: Scale-Adaptive Feature Aggregation for Efficient Space-Time Video Super-Resolution 1315 | """ 1316 | 1317 | func_name = "vsmlrt.SAFA" 1318 | 1319 | if not isinstance(clip, vs.VideoNode): 1320 | raise TypeError(f'{func_name}: "clip" must be a clip!') 1321 | 1322 | if clip.format.sample_type != vs.FLOAT or clip.format.bits_per_sample not in [16, 32]: 1323 | raise ValueError(f"{func_name}: only constant format 16/32 bit float input supported") 1324 | 1325 | if clip.format.color_family != vs.RGB: 1326 | raise ValueError(f'{func_name}: "clip" must be of RGB color family') 1327 | 1328 | if clip.num_frames == 1: 1329 | raise ValueError(f'{func_name}: "clip" too short!') 1330 | 1331 | if overlap is None: 1332 | overlap_w = overlap_h = 16 1333 | elif isinstance(overlap, int): 1334 | overlap_w = overlap_h = overlap 1335 | else: 1336 | overlap_w, overlap_h = overlap 1337 | 1338 | # unknown crash 1339 | if model <= 2: 1340 | multiple = 8 1341 | else: 1342 | multiple = 16 1343 | 1344 | (tile_w, tile_h), (overlap_w, overlap_h) = calc_tilesize( 1345 | tiles=tiles, tilesize=tilesize, 1346 | width=clip.width, height=clip.height, 1347 | multiple=multiple, 1348 | overlap_w=overlap_w, overlap_h=overlap_h 1349 | ) 1350 | 1351 | backend = init_backend( 1352 | backend=backend, 1353 | trt_opt_shapes=(tile_w, tile_h) 1354 | ) 1355 | 1356 | _adaptive = SAFAAdaptiveMode(adaptive) 1357 | 1358 | if isinstance(backend, Backend.TRT): 1359 | if backend.force_fp16: 1360 | backend.force_fp16 = False 1361 | backend.fp16 = True 1362 | 1363 | cast1, cast2 = [(218, 255), (266, 303), (254, 291)][_adaptive] 1364 | 1365 | backend.custom_args.extend([ 1366 | "--precisionConstraints=obey", 1367 | "--layerPrecisions=" + ( 1368 | "/Div_2:fp32,/Div_3:fp32,/Div_4:fp32,/Div_5:fp32,/Div_6:fp32,/Div_7:fp32," 1369 | "/Cast_7:fp32,/Cast_8:fp32,/Cast_10:fp32,/Cast_11:fp32," 1370 | f"Cast_{cast1}:fp32,Cast_{cast2}:fp32," 1371 | "/Sub_3:fp32,/Sub_4:fp32" 1372 | ) 1373 | ]) 1374 | 1375 | model_version = SAFAModel(model).name.replace('_', '.') 1376 | adaptive_string = _adaptive.name 1377 | 1378 | network_path = os.path.join( 1379 | models_path, 1380 | "safa", 1381 | f"safa_{model_version}_{adaptive_string}.onnx" 1382 | ) 1383 | 1384 | clip_org = clip 1385 | 1386 | clips = [clip[::2], clip[1::2]] 1387 | if clips[0].num_frames != clips[1].num_frames: 1388 | clips[1] = core.std.Splice([clips[1], clip[-1]]) 1389 | 1390 | clip2x = inference_with_fallback( 1391 | clips=clips, network_path=network_path, 1392 | overlap=(overlap_w, overlap_h), tilesize=(tile_w, tile_h), 1393 | backend=backend 1394 | ) 1395 | 1396 | up = core.std.Crop(clip2x, bottom=clip2x.height // 2) 1397 | down = core.std.Crop(clip2x, top=clip2x.height // 2) 1398 | clip = core.std.Interleave([up, down]) 1399 | 1400 | if clip.num_frames != clip_org.num_frames: 1401 | clip = clip[:-1] 1402 | 1403 | return clip 1404 | 1405 | 1406 | def get_engine_path( 1407 | network_path: str, 1408 | min_shapes: typing.Tuple[int, int], 1409 | opt_shapes: typing.Tuple[int, int], 1410 | max_shapes: typing.Tuple[int, int], 1411 | workspace: typing.Optional[int], 1412 | fp16: bool, 1413 | device_id: int, 1414 | use_cublas: bool, 1415 | static_shape: bool, 1416 | tf32: bool, 1417 | use_cudnn: bool, 1418 | input_format: int, 1419 | output_format: int, 1420 | builder_optimization_level: int, 1421 | max_aux_streams: typing.Optional[int], 1422 | short_path: typing.Optional[bool], 1423 | bf16: bool 1424 | ) -> str: 1425 | 1426 | with open(network_path, "rb") as file: 1427 | checksum = zlib.adler32(file.read()) 1428 | 1429 | trt_version = core.trt.Version()["tensorrt_version"].decode() 1430 | 1431 | try: 1432 | device_name = core.trt.DeviceProperties(device_id)["name"].decode() 1433 | device_name = device_name.replace(' ', '-') 1434 | except AttributeError: 1435 | device_name = f"device{device_id}" 1436 | 1437 | if static_shape: 1438 | shape_str = f"{opt_shapes[0]}x{opt_shapes[1]}" 1439 | else: 1440 | shape_str = ( 1441 | f"min{min_shapes[0]}x{min_shapes[1]}" 1442 | f"_opt{opt_shapes[0]}x{opt_shapes[1]}" 1443 | f"_max{max_shapes[0]}x{max_shapes[1]}" 1444 | ) 1445 | 1446 | identity = ( 1447 | shape_str + 1448 | ("_fp16" if fp16 else "") + 1449 | ("_tf32" if tf32 else "") + 1450 | ("_bf16" if bf16 else "") + 1451 | (f"_workspace{workspace}" if workspace is not None else "") + 1452 | f"_opt{builder_optimization_level}" + 1453 | (f"_max-aux-streams{max_aux_streams}" if max_aux_streams is not None else "") + 1454 | f"_trt-{trt_version}" + 1455 | ("_cublas" if use_cublas else "") + 1456 | ("_cudnn" if use_cudnn else "") + 1457 | "_I-" + ("fp32" if input_format == 0 else "fp16") + 1458 | "_O-" + ("fp32" if output_format == 0 else "fp16") + 1459 | f"_{device_name}" + 1460 | f"_{checksum:x}" 1461 | ) 1462 | 1463 | if short_path or (short_path is None and platform.system() == "Windows"): 1464 | dirname, basename = os.path.split(network_path) 1465 | return os.path.join(dirname, f"{zlib.crc32((basename + identity).encode()):x}.engine") 1466 | else: 1467 | return f"{network_path}.{identity}.engine" 1468 | 1469 | 1470 | def trtexec( 1471 | network_path: str, 1472 | channels: int, 1473 | opt_shapes: typing.Tuple[int, int], 1474 | max_shapes: typing.Tuple[int, int], 1475 | fp16: bool, 1476 | device_id: int, 1477 | workspace: typing.Optional[int] = None, 1478 | verbose: bool = False, 1479 | use_cuda_graph: bool = False, 1480 | use_cublas: bool = False, 1481 | static_shape: bool = True, 1482 | tf32: bool = False, 1483 | log: bool = False, 1484 | use_cudnn: bool = True, 1485 | use_edge_mask_convolutions: bool = True, 1486 | use_jit_convolutions: bool = True, 1487 | heuristic: bool = False, 1488 | input_name: str = "input", 1489 | input_format: int = 0, 1490 | output_format: int = 0, 1491 | min_shapes: typing.Tuple[int, int] = (0, 0), 1492 | faster_dynamic_shapes: bool = True, 1493 | force_fp16: bool = False, 1494 | builder_optimization_level: int = 3, 1495 | max_aux_streams: typing.Optional[int] = None, 1496 | short_path: typing.Optional[bool] = None, 1497 | bf16: bool = False, 1498 | custom_env: typing.Dict[str, str] = {}, 1499 | custom_args: typing.List[str] = [] 1500 | ) -> str: 1501 | 1502 | # tensort runtime version, e.g. 8401 => 8.4.1 1503 | trt_version = int(core.trt.Version()["tensorrt_version"]) 1504 | 1505 | if isinstance(opt_shapes, int): 1506 | opt_shapes = (opt_shapes, opt_shapes) 1507 | 1508 | if isinstance(max_shapes, int): 1509 | max_shapes = (max_shapes, max_shapes) 1510 | 1511 | if force_fp16: 1512 | fp16 = True 1513 | tf32 = False 1514 | bf16 = False 1515 | 1516 | engine_path = get_engine_path( 1517 | network_path=network_path, 1518 | min_shapes=min_shapes, 1519 | opt_shapes=opt_shapes, 1520 | max_shapes=max_shapes, 1521 | workspace=workspace, 1522 | fp16=fp16, 1523 | device_id=device_id, 1524 | use_cublas=use_cublas, 1525 | static_shape=static_shape, 1526 | tf32=tf32, 1527 | use_cudnn=use_cudnn, 1528 | input_format=input_format, 1529 | output_format=output_format, 1530 | builder_optimization_level=builder_optimization_level, 1531 | max_aux_streams=max_aux_streams, 1532 | short_path=short_path, 1533 | bf16=bf16, 1534 | ) 1535 | 1536 | if os.access(engine_path, mode=os.R_OK): 1537 | return engine_path 1538 | 1539 | alter_engine_path = os.path.join( 1540 | tempfile.gettempdir(), 1541 | os.path.splitdrive(engine_path)[1][1:] 1542 | ) 1543 | 1544 | if os.access(alter_engine_path, mode=os.R_OK): 1545 | return alter_engine_path 1546 | 1547 | try: 1548 | # test writability 1549 | with open(engine_path, "w") as f: 1550 | pass 1551 | os.remove(engine_path) 1552 | except PermissionError: 1553 | print(f"{engine_path} not writable", file=sys.stderr) 1554 | engine_path = alter_engine_path 1555 | dirname = os.path.dirname(engine_path) 1556 | if not os.path.exists(dirname): 1557 | os.makedirs(dirname) 1558 | print(f"change engine path to {engine_path}", file=sys.stderr) 1559 | 1560 | args = [ 1561 | trtexec_path, 1562 | f"--onnx={network_path}", 1563 | f"--timingCacheFile={engine_path}.cache", 1564 | f"--device={device_id}", 1565 | f"--saveEngine={engine_path}" 1566 | ] 1567 | 1568 | if workspace is not None: 1569 | if trt_version >= 8400: 1570 | args.append(f"--memPoolSize=workspace:{workspace}") 1571 | else: 1572 | args.append(f"--workspace{workspace}") 1573 | 1574 | if static_shape: 1575 | args.append(f"--shapes={input_name}:1x{channels}x{opt_shapes[1]}x{opt_shapes[0]}") 1576 | else: 1577 | args.extend([ 1578 | f"--minShapes=input:1x{channels}x{min_shapes[1]}x{min_shapes[0]}", 1579 | f"--optShapes=input:1x{channels}x{opt_shapes[1]}x{opt_shapes[0]}", 1580 | f"--maxShapes=input:1x{channels}x{max_shapes[1]}x{max_shapes[0]}" 1581 | ]) 1582 | 1583 | if fp16: 1584 | args.append("--fp16") 1585 | 1586 | if verbose: 1587 | args.append("--verbose") 1588 | 1589 | preview_features = [] 1590 | if (use_cublas or use_cudnn) and trt_version >= 8600: 1591 | preview_features.append("-disableExternalTacticSourcesForCore0805") 1592 | 1593 | if preview_features and trt_version >= 8500: 1594 | args.append(f"--preview={','.join(preview_features)}") 1595 | 1596 | tactic_sources = [] 1597 | 1598 | if use_cublas: 1599 | tactic_sources.extend(["+CUBLAS", "+CUBLAS_LT"]) 1600 | else: 1601 | tactic_sources.extend(["-CUBLAS", "-CUBLAS_LT"]) 1602 | 1603 | if use_cudnn: 1604 | tactic_sources.append("+CUDNN") 1605 | else: 1606 | tactic_sources.append("-CUDNN") 1607 | 1608 | if trt_version >= 8401: 1609 | if use_edge_mask_convolutions: 1610 | tactic_sources.append("+EDGE_MASK_CONVOLUTIONS") 1611 | else: 1612 | tactic_sources.append("-EDGE_MASK_CONVOLUTIONS") 1613 | 1614 | if trt_version >= 8500: 1615 | if use_jit_convolutions: 1616 | tactic_sources.append("+JIT_CONVOLUTIONS") 1617 | else: 1618 | tactic_sources.append("-JIT_CONVOLUTIONS") 1619 | 1620 | args.append(f"--tacticSources={','.join(tactic_sources)}") 1621 | 1622 | if use_cuda_graph: 1623 | args.extend(( 1624 | "--useCudaGraph", 1625 | "--noDataTransfers" 1626 | )) 1627 | else: 1628 | if trt_version >= 8600: 1629 | args.append("--skipInference") 1630 | else: 1631 | args.append("--buildOnly") 1632 | 1633 | if not tf32: 1634 | args.append("--noTF32") 1635 | 1636 | if heuristic and trt_version >= 8500 and core.trt.DeviceProperties(device_id)["major"] >= 8: 1637 | if trt_version < 8600: 1638 | args.append("--heuristic") 1639 | else: 1640 | builder_optimization_level = 2 1641 | 1642 | args.extend([ 1643 | "--inputIOFormats=fp32:chw" if input_format == 0 else "--inputIOFormats=fp16:chw", 1644 | "--outputIOFormats=fp32:chw" if output_format == 0 else "--outputIOFormats=fp16:chw" 1645 | ]) 1646 | 1647 | if faster_dynamic_shapes and not static_shape and 8500 <= trt_version < 8600: 1648 | args.append("--preview=+fasterDynamicShapes0805") 1649 | 1650 | if force_fp16: 1651 | if trt_version >= 8401: 1652 | args.extend([ 1653 | "--layerPrecisions=*:fp16", 1654 | "--layerOutputTypes=*:fp16", 1655 | "--precisionConstraints=obey" 1656 | ]) 1657 | else: 1658 | raise ValueError('"force_fp16" is not available') 1659 | 1660 | if trt_version >= 8600: 1661 | args.append(f"--builderOptimizationLevel={builder_optimization_level}") 1662 | 1663 | if max_aux_streams is not None: 1664 | args.append(f"--maxAuxStreams={max_aux_streams}") 1665 | 1666 | if trt_version >= 9000: 1667 | if bf16: 1668 | args.append("--bf16") 1669 | 1670 | args.extend(custom_args) 1671 | 1672 | if log: 1673 | env_key = "TRTEXEC_LOG_FILE" 1674 | prev_env_value = os.environ.get(env_key) 1675 | 1676 | if prev_env_value is not None and len(prev_env_value) > 0: 1677 | # env_key has been set, no extra action 1678 | env = {env_key: prev_env_value, "CUDA_MODULE_LOADING": "LAZY"} 1679 | env.update(**custom_env) 1680 | subprocess.run(args, env=env, check=True, stdout=sys.stderr) 1681 | else: 1682 | time_str = time.strftime('%y%m%d_%H%M%S', time.localtime()) 1683 | 1684 | log_filename = os.path.join( 1685 | tempfile.gettempdir(), 1686 | f"trtexec_{time_str}.log" 1687 | ) 1688 | 1689 | env = {env_key: log_filename, "CUDA_MODULE_LOADING": "LAZY"} 1690 | env.update(**custom_env) 1691 | 1692 | completed_process = subprocess.run(args, env=env, check=False, stdout=sys.stderr) 1693 | 1694 | if completed_process.returncode == 0: 1695 | try: 1696 | os.remove(log_filename) 1697 | except FileNotFoundError: 1698 | # maybe the official trtexec is used? 1699 | pass 1700 | else: 1701 | if os.path.exists(log_filename): 1702 | raise RuntimeError(f"trtexec execution fails, log has been written to {log_filename}") 1703 | else: 1704 | raise RuntimeError(f"trtexec execution fails but no log is found") 1705 | else: 1706 | env = {"CUDA_MODULE_LOADING": "LAZY"} 1707 | env.update(**custom_env) 1708 | subprocess.run(args, env=env, check=True, stdout=sys.stderr) 1709 | 1710 | return engine_path 1711 | 1712 | 1713 | def get_mxr_path( 1714 | network_path: str, 1715 | opt_shapes: typing.Tuple[int, int], 1716 | fp16: bool, 1717 | fast_math: bool, 1718 | exhaustive_tune: bool, 1719 | device_id: int, 1720 | short_path: typing.Optional[bool] 1721 | ) -> str: 1722 | 1723 | with open(network_path, "rb") as file: 1724 | checksum = zlib.adler32(file.read()) 1725 | 1726 | migx_version = core.migx.Version()["migraphx_version_build"].decode() 1727 | 1728 | try: 1729 | device_name = core.migx.DeviceProperties(device_id)["name"].decode() 1730 | device_name = device_name.replace(' ', '-') 1731 | except AttributeError: 1732 | device_name = f"device{device_id}" 1733 | 1734 | shape_str = f"{opt_shapes[0]}x{opt_shapes[1]}" 1735 | 1736 | identity = ( 1737 | shape_str + 1738 | ("_fp16" if fp16 else "") + 1739 | ("_fast" if fast_math else "") + 1740 | ("_exhaustive" if exhaustive_tune else "") + 1741 | f"_migx-{migx_version}" + 1742 | f"_{device_name}" + 1743 | f"_{checksum:x}" 1744 | ) 1745 | 1746 | if short_path or (short_path is None and platform.system() == "Windows"): 1747 | dirname, basename = os.path.split(network_path) 1748 | return os.path.join(dirname, f"{zlib.crc32((basename + identity).encode()):x}.mxr") 1749 | else: 1750 | return f"{network_path}.{identity}.mxr" 1751 | 1752 | 1753 | def migraphx_driver( 1754 | network_path: str, 1755 | channels: int, 1756 | opt_shapes: typing.Tuple[int, int], 1757 | fp16: bool, 1758 | fast_math: bool, 1759 | exhaustive_tune: bool, 1760 | device_id: int, 1761 | input_name: str = "input", 1762 | short_path: typing.Optional[bool] = None, 1763 | custom_env: typing.Dict[str, str] = {}, 1764 | custom_args: typing.List[str] = [] 1765 | ) -> str: 1766 | 1767 | if isinstance(opt_shapes, int): 1768 | opt_shapes = (opt_shapes, opt_shapes) 1769 | 1770 | mxr_path = get_mxr_path( 1771 | network_path=network_path, 1772 | opt_shapes=opt_shapes, 1773 | fp16=fp16, 1774 | fast_math=fast_math, 1775 | exhaustive_tune=exhaustive_tune, 1776 | device_id=device_id, 1777 | short_path=short_path 1778 | ) 1779 | 1780 | if os.access(mxr_path, mode=os.R_OK): 1781 | return mxr_path 1782 | 1783 | alter_mxr_path = os.path.join( 1784 | tempfile.gettempdir(), 1785 | os.path.splitdrive(mxr_path)[1][1:] 1786 | ) 1787 | 1788 | if os.access(alter_mxr_path, mode=os.R_OK): 1789 | return alter_mxr_path 1790 | 1791 | try: 1792 | # test writability 1793 | with open(mxr_path, "w") as f: 1794 | pass 1795 | os.remove(mxr_path) 1796 | except PermissionError: 1797 | print(f"{mxr_path} not writable", file=sys.stderr) 1798 | mxr_path = alter_mxr_path 1799 | dirname = os.path.dirname(mxr_path) 1800 | if not os.path.exists(dirname): 1801 | os.makedirs(dirname) 1802 | print(f"change mxr path to {mxr_path}", file=sys.stderr) 1803 | 1804 | if device_id != 0: 1805 | raise ValueError('"device_id" must be 0') 1806 | 1807 | args = [ 1808 | migraphx_driver_path, 1809 | "compile", 1810 | "--onnx", f"{network_path}", 1811 | "--gpu", 1812 | # f"--device={device_id}", 1813 | "--optimize", 1814 | "--binary", 1815 | "--output", f"{mxr_path}" 1816 | ] 1817 | 1818 | args.extend(["--input-dim", f"@{input_name}", "1", f"{channels}", f"{opt_shapes[1]}", f"{opt_shapes[0]}"]) 1819 | 1820 | if fp16: 1821 | args.append("--fp16") 1822 | 1823 | if not fast_math: 1824 | args.append("--disable-fast-math") 1825 | 1826 | if exhaustive_tune: 1827 | args.append("--exhaustive-tune") 1828 | 1829 | args.extend(custom_args) 1830 | 1831 | subprocess.run(args, env=custom_env, check=True, stdout=sys.stderr) 1832 | 1833 | return mxr_path 1834 | 1835 | 1836 | def calc_size(width: int, tiles: int, overlap: int, multiple: int = 1) -> int: 1837 | return math.ceil((width + 2 * overlap * (tiles - 1)) / (tiles * multiple)) * multiple 1838 | 1839 | 1840 | def calc_tilesize( 1841 | tiles: typing.Optional[typing.Union[int, typing.Tuple[int, int]]], 1842 | tilesize: typing.Optional[typing.Union[int, typing.Tuple[int, int]]], 1843 | width: int, 1844 | height: int, 1845 | multiple: int, 1846 | overlap_w: int, 1847 | overlap_h: int 1848 | ) -> typing.Tuple[typing.Tuple[int, int], typing.Tuple[int, int]]: 1849 | 1850 | if tilesize is None: 1851 | if tiles is None: 1852 | overlap_w = 0 1853 | overlap_h = 0 1854 | tile_w = width 1855 | tile_h = height 1856 | elif isinstance(tiles, int): 1857 | tile_w = calc_size(width, tiles, overlap_w, multiple) 1858 | tile_h = calc_size(height, tiles, overlap_h, multiple) 1859 | else: 1860 | tile_w = calc_size(width, tiles[0], overlap_w, multiple) 1861 | tile_h = calc_size(height, tiles[1], overlap_h, multiple) 1862 | elif isinstance(tilesize, int): 1863 | tile_w = tilesize 1864 | tile_h = tilesize 1865 | else: 1866 | tile_w, tile_h = tilesize 1867 | 1868 | return (tile_w, tile_h), (overlap_w, overlap_h) 1869 | 1870 | 1871 | def init_backend( 1872 | backend: backendT, 1873 | trt_opt_shapes: typing.Tuple[int, int] 1874 | ) -> backendT: 1875 | 1876 | if backend is Backend.ORT_CPU: # type: ignore 1877 | backend = Backend.ORT_CPU() 1878 | elif backend is Backend.ORT_CUDA: # type: ignore 1879 | backend = Backend.ORT_CUDA() 1880 | elif backend is Backend.OV_CPU: # type: ignore 1881 | backend = Backend.OV_CPU() 1882 | elif backend is Backend.TRT: # type: ignore 1883 | backend = Backend.TRT() 1884 | elif backend is Backend.OV_GPU: # type: ignore 1885 | backend = Backend.OV_GPU() 1886 | elif backend is Backend.NCNN_VK: # type: ignore 1887 | backend = Backend.NCNN_VK() 1888 | elif backend is Backend.ORT_DML: # type: ignore 1889 | backend = Backend.ORT_DML() 1890 | elif backend is Backend.MIGX: # type: ignore 1891 | backend = Backend.MIGX() 1892 | elif backend is Backend.OV_NPU: 1893 | backend = Backend.OV_NPU() 1894 | 1895 | backend = copy.deepcopy(backend) 1896 | 1897 | if isinstance(backend, Backend.TRT): 1898 | if backend.opt_shapes is None: 1899 | backend.opt_shapes = trt_opt_shapes 1900 | 1901 | if backend.max_shapes is None: 1902 | backend.max_shapes = backend.opt_shapes 1903 | elif isinstance(backend, Backend.MIGX): 1904 | if backend.opt_shapes is None: 1905 | backend.opt_shapes = trt_opt_shapes 1906 | 1907 | return backend 1908 | 1909 | 1910 | def _inference( 1911 | clips: typing.List[vs.VideoNode], 1912 | network_path: typing.Union[bytes, str], 1913 | overlap: typing.Tuple[int, int], 1914 | tilesize: typing.Tuple[int, int], 1915 | backend: backendT, 1916 | path_is_serialization: bool = False, 1917 | input_name: str = "input" 1918 | ) -> vs.VideoNode: 1919 | 1920 | if not path_is_serialization: 1921 | network_path = typing.cast(str, network_path) 1922 | if not os.path.exists(network_path): 1923 | raise RuntimeError( 1924 | f'"{network_path}" not found, ' 1925 | "built-in models can be found at" 1926 | "https://github.com/AmusementClub/vs-mlrt/releases/tag/model-20211209, " 1927 | "https://github.com/AmusementClub/vs-mlrt/releases/tag/model-20220923 and " 1928 | "https://github.com/AmusementClub/vs-mlrt/releases/tag/external-models" 1929 | ) 1930 | 1931 | if isinstance(backend, Backend.ORT_CPU): 1932 | clip = core.ort.Model( 1933 | clips, network_path, 1934 | overlap=overlap, tilesize=tilesize, 1935 | provider="CPU", builtin=False, 1936 | num_streams=backend.num_streams, 1937 | verbosity=backend.verbosity, 1938 | fp16=backend.fp16, 1939 | path_is_serialization=path_is_serialization, 1940 | fp16_blacklist_ops=backend.fp16_blacklist_ops 1941 | ) 1942 | elif isinstance(backend, Backend.ORT_DML): 1943 | clip = core.ort.Model( 1944 | clips, network_path, 1945 | overlap=overlap, tilesize=tilesize, 1946 | provider="DML", builtin=False, 1947 | device_id=backend.device_id, 1948 | num_streams=backend.num_streams, 1949 | verbosity=backend.verbosity, 1950 | fp16=backend.fp16, 1951 | path_is_serialization=path_is_serialization, 1952 | fp16_blacklist_ops=backend.fp16_blacklist_ops 1953 | ) 1954 | elif isinstance(backend, Backend.ORT_CUDA): 1955 | clip = core.ort.Model( 1956 | clips, network_path, 1957 | overlap=overlap, tilesize=tilesize, 1958 | provider="CUDA", builtin=False, 1959 | device_id=backend.device_id, 1960 | num_streams=backend.num_streams, 1961 | verbosity=backend.verbosity, 1962 | cudnn_benchmark=backend.cudnn_benchmark, 1963 | fp16=backend.fp16, 1964 | path_is_serialization=path_is_serialization, 1965 | use_cuda_graph=backend.use_cuda_graph, 1966 | fp16_blacklist_ops=backend.fp16_blacklist_ops 1967 | ) 1968 | elif isinstance(backend, Backend.OV_CPU): 1969 | version = tuple(map(int, core.ov.Version().get("openvino_version", b"0.0.0").split(b'-')[0].split(b'.'))) 1970 | 1971 | if version >= (2024, 0, 0): 1972 | config_dict = dict( 1973 | NUM_STREAMS=backend.num_streams, 1974 | INFERENCE_NUM_THREADS=backend.num_threads, 1975 | ENABLE_CPU_PINNING="YES" if backend.bind_thread else "NO" 1976 | ) 1977 | if backend.fp16: 1978 | config_dict["INFERENCE_PRECISION_HINT"] = "f16" 1979 | elif backend.bf16: 1980 | config_dict["INFERENCE_PRECISION_HINT"] = "bf16" 1981 | else: 1982 | config_dict["INFERENCE_PRECISION_HINT"] = "f32" 1983 | 1984 | config = lambda: config_dict 1985 | else: 1986 | config = lambda: dict( 1987 | CPU_THROUGHPUT_STREAMS=backend.num_streams, 1988 | CPU_BIND_THREAD="YES" if backend.bind_thread else "NO", 1989 | CPU_THREADS_NUM=backend.num_threads, 1990 | ENFORCE_BF16="YES" if backend.bf16 else "NO" 1991 | ) 1992 | 1993 | clip = core.ov.Model( 1994 | clips, network_path, 1995 | overlap=overlap, tilesize=tilesize, 1996 | device="CPU", builtin=False, 1997 | fp16=False, # use ov's internal quantization 1998 | config=config, 1999 | path_is_serialization=path_is_serialization, 2000 | fp16_blacklist_ops=backend.fp16_blacklist_ops # disabled since fp16 = False 2001 | ) 2002 | elif isinstance(backend, Backend.OV_GPU): 2003 | version = tuple(map(int, core.ov.Version().get("openvino_version", b"0.0.0").split(b'-')[0].split(b'.'))) 2004 | 2005 | if version >= (2024, 0, 0): 2006 | config_dict = dict( 2007 | NUM_STREAMS=backend.num_streams, 2008 | ) 2009 | if backend.fp16: 2010 | config_dict["INFERENCE_PRECISION_HINT"] = "f16" 2011 | else: 2012 | config_dict["INFERENCE_PRECISION_HINT"] = "f32" 2013 | 2014 | config = lambda: config_dict 2015 | else: 2016 | config = lambda: dict( 2017 | GPU_THROUGHPUT_STREAMS=backend.num_streams 2018 | ) 2019 | 2020 | clip = core.ov.Model( 2021 | clips, network_path, 2022 | overlap=overlap, tilesize=tilesize, 2023 | device=f"GPU.{backend.device_id}", builtin=False, 2024 | fp16=False, # use ov's internal quantization 2025 | config=config, 2026 | path_is_serialization=path_is_serialization, 2027 | fp16_blacklist_ops=backend.fp16_blacklist_ops 2028 | ) 2029 | elif isinstance(backend, Backend.TRT): 2030 | if path_is_serialization: 2031 | raise ValueError('"path_is_serialization" must be False for trt backend') 2032 | 2033 | network_path = typing.cast(str, network_path) 2034 | 2035 | channels = sum(clip.format.num_planes for clip in clips) 2036 | 2037 | opt_shapes = backend.opt_shapes if backend.opt_shapes is not None else tilesize 2038 | max_shapes = backend.max_shapes if backend.max_shapes is not None else tilesize 2039 | 2040 | engine_path = trtexec( 2041 | network_path, 2042 | channels=channels, 2043 | opt_shapes=opt_shapes, 2044 | max_shapes=max_shapes, 2045 | fp16=backend.fp16, 2046 | device_id=backend.device_id, 2047 | workspace=backend.workspace, 2048 | verbose=backend.verbose, 2049 | use_cuda_graph=backend.use_cuda_graph, 2050 | use_cublas=backend.use_cublas, 2051 | static_shape=backend.static_shape, 2052 | tf32=backend.tf32, 2053 | log=backend.log, 2054 | use_cudnn=backend.use_cudnn, 2055 | use_edge_mask_convolutions=backend.use_edge_mask_convolutions, 2056 | use_jit_convolutions=backend.use_jit_convolutions, 2057 | heuristic=backend.heuristic, 2058 | input_name=input_name, 2059 | input_format=clips[0].format.bits_per_sample == 16, 2060 | output_format=backend.output_format, 2061 | min_shapes=backend.min_shapes, 2062 | faster_dynamic_shapes=backend.faster_dynamic_shapes, 2063 | force_fp16=backend.force_fp16, 2064 | builder_optimization_level=backend.builder_optimization_level, 2065 | max_aux_streams=backend.max_aux_streams, 2066 | short_path=backend.short_path, 2067 | bf16=backend.bf16, 2068 | custom_env=backend.custom_env, 2069 | custom_args=backend.custom_args 2070 | ) 2071 | clip = core.trt.Model( 2072 | clips, engine_path, 2073 | overlap=overlap, 2074 | tilesize=tilesize, 2075 | device_id=backend.device_id, 2076 | use_cuda_graph=backend.use_cuda_graph, 2077 | num_streams=backend.num_streams, 2078 | verbosity=4 if backend.verbose else 2 2079 | ) 2080 | elif isinstance(backend, Backend.NCNN_VK): 2081 | clip = core.ncnn.Model( 2082 | clips, network_path, 2083 | overlap=overlap, tilesize=tilesize, 2084 | device_id=backend.device_id, 2085 | num_streams=backend.num_streams, 2086 | builtin=False, 2087 | fp16=backend.fp16, 2088 | path_is_serialization=path_is_serialization, 2089 | ) 2090 | elif isinstance(backend, Backend.MIGX): 2091 | if path_is_serialization: 2092 | raise ValueError('"path_is_serialization" must be False for migx backend') 2093 | 2094 | network_path = typing.cast(str, network_path) 2095 | 2096 | channels = sum(clip.format.num_planes for clip in clips) 2097 | 2098 | opt_shapes = backend.opt_shapes if backend.opt_shapes is not None else tilesize 2099 | 2100 | mxr_path = migraphx_driver( 2101 | network_path, 2102 | channels=channels, 2103 | opt_shapes=opt_shapes, 2104 | fp16=backend.fp16, 2105 | fast_math=backend.fast_math, 2106 | exhaustive_tune=backend.exhaustive_tune, 2107 | device_id=backend.device_id, 2108 | input_name=input_name, 2109 | short_path=backend.short_path, 2110 | custom_env=backend.custom_env, 2111 | custom_args=backend.custom_args 2112 | ) 2113 | clip = core.migx.Model( 2114 | clips, mxr_path, 2115 | overlap=overlap, 2116 | tilesize=tilesize, 2117 | device_id=backend.device_id 2118 | ) 2119 | elif isinstance(backend, Backend.OV_NPU): 2120 | clip = core.ov.Model( 2121 | clips, network_path, 2122 | overlap=overlap, tilesize=tilesize, 2123 | device="NPU", builtin=False, 2124 | fp16=False, # use ov's internal quantization 2125 | path_is_serialization=path_is_serialization, 2126 | ) 2127 | else: 2128 | raise TypeError(f'unknown backend {backend}') 2129 | 2130 | return clip 2131 | 2132 | 2133 | def inference_with_fallback( 2134 | clips: typing.List[vs.VideoNode], 2135 | network_path: typing.Union[bytes, str], 2136 | overlap: typing.Tuple[int, int], 2137 | tilesize: typing.Tuple[int, int], 2138 | backend: backendT, 2139 | path_is_serialization: bool = False, 2140 | input_name: str = "input" 2141 | ) -> vs.VideoNode: 2142 | 2143 | try: 2144 | return _inference( 2145 | clips=clips, network_path=network_path, 2146 | overlap=overlap, tilesize=tilesize, 2147 | backend=backend, 2148 | path_is_serialization=path_is_serialization, 2149 | input_name=input_name 2150 | ) 2151 | except Exception as e: 2152 | if fallback_backend is not None: 2153 | import logging 2154 | logger = logging.getLogger("vsmlrt") 2155 | logger.warning(f'"{backend}" fails, trying fallback backend "{fallback_backend}"') 2156 | 2157 | return _inference( 2158 | clips=clips, network_path=network_path, 2159 | overlap=overlap, tilesize=tilesize, 2160 | backend=fallback_backend, 2161 | path_is_serialization=path_is_serialization, 2162 | input_name=input_name 2163 | ) 2164 | else: 2165 | raise e 2166 | 2167 | 2168 | def inference( 2169 | clips: typing.Union[vs.VideoNode, typing.List[vs.VideoNode]], 2170 | network_path: str, 2171 | overlap: typing.Tuple[int, int] = (0, 0), 2172 | tilesize: typing.Optional[typing.Tuple[int, int]] = None, 2173 | backend: backendT = Backend.OV_CPU(), 2174 | input_name: typing.Optional[str] = "input" 2175 | ) -> vs.VideoNode: 2176 | 2177 | if isinstance(clips, vs.VideoNode): 2178 | clips = typing.cast(vs.VideoNode, clips) 2179 | clips = [clips] 2180 | 2181 | if tilesize is None: 2182 | tilesize = (clips[0].width, clips[0].height) 2183 | 2184 | backend = init_backend(backend=backend, trt_opt_shapes=tilesize) 2185 | 2186 | if input_name is None: 2187 | input_name = get_input_name(network_path) 2188 | 2189 | return inference_with_fallback( 2190 | clips=clips, 2191 | network_path=network_path, 2192 | overlap=overlap, 2193 | tilesize=tilesize, 2194 | backend=backend, 2195 | path_is_serialization=False, 2196 | input_name=input_name 2197 | ) 2198 | 2199 | 2200 | def get_input_name(network_path: str) -> str: 2201 | import onnx 2202 | model = onnx.load(network_path) 2203 | return model.graph.input[0].name 2204 | 2205 | 2206 | def bits_as(clip: vs.VideoNode, target: vs.VideoNode) -> vs.VideoNode: 2207 | if clip.format.bits_per_sample == target.format.bits_per_sample: 2208 | return clip 2209 | else: 2210 | is_api4 = hasattr(vs, "__api_version__") and vs.__api_version__.api_major == 4 2211 | query_video_format = core.query_video_format if is_api4 else core.register_format 2212 | format = query_video_format( 2213 | color_family=clip.format.color_family, 2214 | sample_type=clip.format.sample_type, 2215 | bits_per_sample=target.format.bits_per_sample, 2216 | subsampling_w=clip.format.subsampling_w, 2217 | subsampling_h=clip.format.subsampling_h 2218 | ) 2219 | return clip.resize.Point(format=format) 2220 | 2221 | 2222 | class BackendV2: 2223 | """ simplified backend interfaces with keyword-only arguments 2224 | 2225 | More exposed arguments may be added for each backend, 2226 | but existing ones will always function in a forward compatible way. 2227 | """ 2228 | 2229 | @staticmethod 2230 | def TRT(*, 2231 | num_streams: int = 1, 2232 | fp16: bool = False, 2233 | tf32: bool = False, 2234 | output_format: int = 0, # 0: fp32, 1: fp16 2235 | workspace: typing.Optional[int] = None, 2236 | use_cuda_graph: bool = False, 2237 | static_shape: bool = True, 2238 | min_shapes: typing.Tuple[int, int] = (0, 0), 2239 | opt_shapes: typing.Optional[typing.Tuple[int, int]] = None, 2240 | max_shapes: typing.Optional[typing.Tuple[int, int]] = None, 2241 | force_fp16: bool = False, 2242 | use_cublas: bool = False, 2243 | use_cudnn: bool = False, 2244 | device_id: int = 0, 2245 | **kwargs 2246 | ) -> Backend.TRT: 2247 | 2248 | return Backend.TRT( 2249 | num_streams=num_streams, 2250 | fp16=fp16, force_fp16=force_fp16, tf32=tf32, output_format=output_format, 2251 | workspace=workspace, use_cuda_graph=use_cuda_graph, 2252 | static_shape=static_shape, 2253 | min_shapes=min_shapes, opt_shapes=opt_shapes, max_shapes=max_shapes, 2254 | use_cublas=use_cublas, use_cudnn=use_cudnn, 2255 | device_id=device_id, 2256 | **kwargs 2257 | ) 2258 | 2259 | @staticmethod 2260 | def NCNN_VK(*, 2261 | num_streams: int = 1, 2262 | fp16: bool = False, 2263 | device_id: int = 0, 2264 | **kwargs 2265 | ) -> Backend.NCNN_VK: 2266 | return Backend.NCNN_VK( 2267 | num_streams=num_streams, 2268 | fp16=fp16, 2269 | device_id=device_id, 2270 | **kwargs 2271 | ) 2272 | 2273 | @staticmethod 2274 | def ORT_CUDA(*, 2275 | num_streams: int = 1, 2276 | fp16: bool = False, 2277 | cudnn_benchmark: bool = True, 2278 | device_id: int = 0, 2279 | **kwargs 2280 | ) -> Backend.ORT_CUDA: 2281 | return Backend.ORT_CUDA( 2282 | num_streams=num_streams, 2283 | fp16=fp16, 2284 | cudnn_benchmark=cudnn_benchmark, 2285 | device_id=device_id, 2286 | **kwargs 2287 | ) 2288 | 2289 | @staticmethod 2290 | def OV_CPU(*, 2291 | num_streams: typing.Union[int, str] = 1, 2292 | bf16: bool = False, 2293 | bind_thread: bool = True, 2294 | num_threads: int = 0, 2295 | **kwargs 2296 | ) -> Backend.OV_CPU: 2297 | return Backend.OV_CPU( 2298 | num_streams=num_streams, 2299 | bf16=bf16, 2300 | bind_thread=bind_thread, 2301 | num_threads=num_threads, 2302 | **kwargs 2303 | ) 2304 | 2305 | @staticmethod 2306 | def ORT_CPU(*, 2307 | num_streams: int = 1, 2308 | **kwargs 2309 | ) -> Backend.ORT_CPU: 2310 | return Backend.ORT_CPU( 2311 | num_streams=num_streams, 2312 | **kwargs 2313 | ) 2314 | 2315 | @staticmethod 2316 | def OV_GPU(*, 2317 | num_streams: typing.Union[int, str] = 1, 2318 | fp16: bool = False, 2319 | device_id: int = 0, 2320 | **kwargs 2321 | ) -> Backend.OV_GPU: 2322 | return Backend.OV_GPU( 2323 | num_streams=num_streams, 2324 | fp16=fp16, 2325 | device_id=device_id, 2326 | **kwargs 2327 | ) 2328 | 2329 | @staticmethod 2330 | def ORT_DML(*, 2331 | device_id: int = 0, 2332 | num_streams: int = 1, 2333 | fp16: bool = False, 2334 | **kwargs 2335 | ) -> Backend.ORT_DML: 2336 | return Backend.ORT_DML( 2337 | device_id=device_id, 2338 | num_streams=num_streams, 2339 | fp16=fp16, 2340 | **kwargs 2341 | ) 2342 | 2343 | @staticmethod 2344 | def MIGX(*, 2345 | fp16: bool = False, 2346 | opt_shapes: typing.Optional[typing.Tuple[int, int]] = None, 2347 | **kwargs 2348 | ) -> Backend.MIGX: 2349 | 2350 | return Backend.MIGX( 2351 | fp16=fp16, 2352 | opt_shapes=opt_shapes 2353 | **kwargs 2354 | ) 2355 | 2356 | @staticmethod 2357 | def OV_NPU(**kwargs 2358 | ) -> Backend.OV_NPU: 2359 | return Backend.OV_NPU( 2360 | **kwargs 2361 | ) 2362 | 2363 | 2364 | def fmtc_resample(clip: vs.VideoNode, **kwargs) -> vs.VideoNode: 2365 | clip_org = clip 2366 | 2367 | if clip.format.sample_type == vs.FLOAT and clip.format.bits_per_sample != 32: 2368 | format = clip.format.replace(core=core, bits_per_sample=32) 2369 | clip = core.resize.Point(clip, format=format.id) 2370 | 2371 | clip = core.fmtc.resample(clip, **kwargs) 2372 | 2373 | if clip.format.bits_per_sample != clip_org.format.bits_per_sample: 2374 | clip = core.resize.Point(clip, format=clip_org.format.id) 2375 | 2376 | return clip 2377 | --------------------------------------------------------------------------------