├── README.md ├── xvs ├── config.py ├── __init__.py ├── deband.py ├── mask.py ├── dehalo.py ├── deinterlace.py ├── aa.py ├── metrics.py ├── props.py ├── enhance.py ├── denoise.py ├── utils.py ├── scale.py └── other.py └── .gitignore /README.md: -------------------------------------------------------------------------------- 1 | # xvs 2 | --- 3 | For old verion,check legacy branch. 4 | 5 | New version did not fully test yet. 6 | -------------------------------------------------------------------------------- /xvs/config.py: -------------------------------------------------------------------------------- 1 | import vapoursynth as vs 2 | core=vs.core 3 | 4 | import functools 5 | 6 | import nnedi3_resample as nnrs 7 | nnedi3_resample=nnrs.nnedi3_resample 8 | if hasattr(core,"znedi3") and "mode" in nnrs.nnedi3_resample.__code__.co_varnames: 9 | nnedi3_resample=functools.partial(nnrs.nnedi3_resample,mode="znedi3") 10 | 11 | Expr=core.akarin.Expr if hasattr(core,"akarin") else core.std.Expr -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | #for test 107 | test/ -------------------------------------------------------------------------------- /xvs/__init__.py: -------------------------------------------------------------------------------- 1 | from .config import * 2 | 3 | from .utils import ( 4 | getplane, 5 | getY, 6 | getU, 7 | getV, 8 | extractPlanes, 9 | showY, 10 | showU, 11 | showV, 12 | showUV, 13 | inpand, 14 | expand, 15 | mt_deflate_multi, 16 | mt_inflate_multi, 17 | mt_inpand_multi, 18 | mt_expand_multi, 19 | LimitFilter, 20 | nnedi3, 21 | eedi3, 22 | nlm 23 | ) 24 | 25 | from .scale import ( 26 | rescale, 27 | rescalef, 28 | multirescale, 29 | MRkernelgen, 30 | dpidDown 31 | ) 32 | 33 | from .denoise import ( 34 | bm3d, 35 | SPresso, 36 | STPresso, 37 | STPressoMC, 38 | FluxsmoothTMC 39 | ) 40 | 41 | from .enhance import ( 42 | SCSharpen, 43 | FastLineDarkenMOD, 44 | mwenhance, 45 | mwcfix, 46 | xsUSM, 47 | SharpenDetail, 48 | ssharp 49 | ) 50 | 51 | from .dehalo import ( 52 | EdgeCleaner, 53 | abcxyz, 54 | LazyDering, 55 | SADering 56 | ) 57 | 58 | from .deband import ( 59 | SAdeband, 60 | lbdeband 61 | ) 62 | 63 | from .deinterlace import ( 64 | ivtc, 65 | ivtc_t, 66 | FIFP 67 | ) 68 | 69 | from .aa import ( 70 | daa, 71 | mwaa, 72 | XSAA, 73 | drAA 74 | ) 75 | 76 | from .props import ( 77 | props2csv, 78 | csv2props, 79 | ssim2csv, 80 | GMSD2csv, 81 | getPictType, 82 | statsinfo2csv 83 | ) 84 | 85 | from .metrics import ( 86 | SSIM, 87 | GMSD, 88 | getsharpness 89 | ) 90 | 91 | from .mask import ( 92 | mwdbmask, 93 | mwlmask, 94 | creditmask 95 | ) 96 | 97 | from .other import ( 98 | splicev1, 99 | mvfrc, 100 | textsub, 101 | vfrtocfr, 102 | Overlaymod, 103 | InterFrame, 104 | xTonemap, 105 | readmpls, 106 | LDMerge, 107 | lowbitdepth_sim 108 | ) 109 | 110 | __version__ = "20251128" -------------------------------------------------------------------------------- /xvs/deband.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | from .other import lowbitdepth_sim 3 | 4 | @deprecated("No maintenance.") 5 | def SAdeband( 6 | src: vs.VideoNode, 7 | thr: int = 128, 8 | f3kdb_arg: dict = {}, 9 | smoothmask: int = 0, 10 | Sdbclip: vs.VideoNode | None = None, 11 | Smask: vs.VideoNode | None = None, 12 | tvrange:bool = True, 13 | ) -> vs.VideoNode: 14 | """ 15 | Simple Adaptive Debanding 16 | ------------------------------------- 17 | thr: only pixel less then will be processed(only affect luma),default is 128 and vaule is based on 8bit 18 | f3kdb_arg:use a dict to set parameters of f3kdb 19 | smoothmask: -1: don't smooth the mask; 0: use removegrain mode11; 20 | 1: use removegrain mode20; 2: use removegrain mode19 21 | a list :use Convolution,and this list will be the matrix 22 | default is 0 23 | Sdbclip: input and use your own debanded clip,must be 16bit 24 | Smask:input and use your own mask clip,must be 16bit,and 0 means don't process 25 | tvrange: set false if your clip is pcrange 26 | """ 27 | clip = src if src.format.bits_per_sample==16 else core.fmtc.bitdepth(src,bits=16) 28 | db = core.f3kdb.Deband(clip,output_depth=16,**f3kdb_arg) if Sdbclip is None else Sdbclip 29 | expr = "x {thr} > 0 65535 x {low} - 65535 * {thr} {low} - / - ?".format(thr=scale(thr),low=scale(16) if tvrange else 0) 30 | mask = core.std.Expr(clip,[expr,'','']) 31 | if smoothmask==-1: 32 | mask = mask 33 | elif smoothmask==0: 34 | mask = core.rgvs.RemoveGrain(mask,[11,0,0]) 35 | elif smoothmask==1: 36 | mask = core.rgvs.RemoveGrain(mask,[20,0,0]) 37 | elif smoothmask==2: 38 | mask = core.rgvs.RemoveGrain(mask,[19,0,0]) 39 | elif isinstance(smoothmask,list): 40 | mask = core.std.Convolution(mask,matrix=smoothmask,planes=[0]) 41 | else: 42 | raise TypeError("") 43 | merge = core.std.MaskedMerge(clip, db, mask, planes=0) 44 | return core.std.ShufflePlanes([merge,db],[0,1,2], colorfamily=vs.YUV) 45 | 46 | @deprecated("Strange idea,not really useful.") 47 | def lbdeband( 48 | clip: vs.VideoNode, 49 | dbit: int = 6, 50 | ) -> vs.VideoNode: 51 | """ 52 | low bitdepth deband 53 | deband for flat area with heavy details through round to low bitdepth,limitfilter and f3kdb 54 | only procress luma when YUV,no direct support for RGB. 55 | You need use trim,mask or other way you can to protect the area without heavy banding. 56 | """ 57 | 58 | if clip.format.color_family==vs.RGB: 59 | raise TypeError("RGB is unsupported") 60 | isGary=clip.format.color_family==vs.GRAY 61 | clip=core.fmtc.bitdepth(clip,bits=16) 62 | luma=clip if isGary else getY(clip) 63 | down=lowbitdepth_sim(luma,dbit,dither=1).f3kdb.Deband(31, 64, 0, 0, 0, 0, output_depth=16) 64 | deband=LimitFilter(down, luma, thr=0.2, elast=8.0).f3kdb.Deband(31, 64, 0, 0, 0, 0, output_depth=16).f3kdb.Deband(15, 64, 0, 0, 0, 0, output_depth=16) 65 | if isGary: 66 | return deband 67 | else: 68 | return core.std.ShufflePlanes([deband,clip], [0,1,2], vs.YUV) 69 | 70 | -------------------------------------------------------------------------------- /xvs/mask.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | 3 | #from old havsfunc. 4 | @deprecated("Use VapourSynth-EdgeMasks instead.") 5 | def AvsPrewitt( 6 | clip: vs.VideoNode, 7 | planes: PlanesType = None, 8 | ) -> vs.VideoNode: 9 | if not isinstance(clip, vs.VideoNode): 10 | raise vs.Error('AvsPrewitt: this is not a clip') 11 | 12 | if planes is None: 13 | planes = list(range(clip.format.num_planes)) 14 | elif isinstance(planes, int): 15 | planes = [planes] 16 | 17 | return Expr([ 18 | clip.std.Convolution(matrix=[1, 1, 0, 1, 0, -1, 0, -1, -1], planes=planes, saturate=False), 19 | clip.std.Convolution(matrix=[1, 1, 1, 0, 0, 0, -1, -1, -1], planes=planes, saturate=False), 20 | clip.std.Convolution(matrix=[1, 0, -1, 1, 0, -1, 1, 0, -1], planes=planes, saturate=False), 21 | clip.std.Convolution(matrix=[0, -1, -1, 1, 0, -1, 1, 1, 0], planes=planes, saturate=False) 22 | ], 23 | expr=['x y max z max a max' if i in planes else '' for i in range(clip.format.num_planes)]) 24 | 25 | def creditmask( 26 | clip: vs.VideoNode, 27 | nclip: vs.VideoNode, 28 | mode: int = 0, 29 | ) -> vs.VideoNode: 30 | """ 31 | use non-credit clip to create mask for credit area 32 | 255(8bit) means credit 33 | output will be 16bit 34 | #### 35 | mode: 0: only use Y to create mask ; 1: use all planes to create mask 36 | only affect the yuv input 37 | """ 38 | 39 | clip = core.fmtc.bitdepth(clip,bits=16) 40 | nclip = core.fmtc.bitdepth(nclip,bits=16) 41 | fid=clip.format.id 42 | def Graymask(src,nc): 43 | dif = Expr([src,nc],["x y - abs 2560 > 65535 0 ?",'','']).rgvs.RemoveGrain(4) 44 | mask= core.std.Inflate(dif).rgvs.RemoveGrain(11).rgvs.RemoveGrain(11) 45 | return mask 46 | if clip.format.color_family==vs.RGB: 47 | raise TypeError("RGB is unsupported") 48 | isYUV=clip.format.color_family==vs.YUV 49 | if not isYUV: 50 | mask = Graymask(clip,nclip) 51 | return mask 52 | else: 53 | if mode==0: 54 | mask=Graymask(getY(clip),getY(nclip)) 55 | mask=core.std.ShufflePlanes(mask,[0,0,0], colorfamily=vs.YUV) 56 | elif mode==1: 57 | clip=clip.resize.Bicubic(format=vs.YUV444P16) 58 | nclip=nclip.resize.Bicubic(format=vs.YUV444P16) 59 | maskY=Graymask(getY(clip),getY(nclip)) 60 | maskU=Graymask(getU(clip),getU(nclip)) 61 | maskV=Graymask(getV(clip),getV(nclip)) 62 | mask=Expr([maskY,maskU,maskV],"x y max z max") 63 | mask=core.std.ShufflePlanes(mask,[0,0,0], colorfamily=vs.YUV) 64 | else: 65 | raise ValueError("mode must be 0 or 1") 66 | return mask.resize.Bicubic(format=fid) 67 | 68 | def mwlmask( 69 | clip: vs.VideoNode, 70 | l1: int = 80, 71 | h1: int = 96, 72 | h2: int | None = None, 73 | l2: int | None = None, 74 | ) -> vs.VideoNode: 75 | """ 76 | luma mask 77 | Steal from other one's script. Most likely written by mawen1250. 78 | """ 79 | sbitPS = clip.format.bits_per_sample 80 | black = 0 81 | white = (1 << sbitPS) - 1 82 | l1 = l1 << (sbitPS - 8) 83 | h1 = h1 << (sbitPS - 8) 84 | if h2 is None: h2 = white 85 | else: h2 = h2 << (sbitPS - 8) 86 | if l2 is None: l2 = white 87 | else: l2 = l2 << (sbitPS - 8) 88 | 89 | if h2 >= white: 90 | expr = f'{white}' 91 | else: 92 | slope2=white / (h2 - l2) 93 | expr = f'x {h2} <= {white} x {l2} < x {l2} - {slope2} * {black} ? ?' 94 | slope1=white / (h1 - l1) 95 | expr = f'x {l1} <= {black} x {h1} < x {l1} - {slope1} * ' + expr + ' ? ?' 96 | 97 | clip = getplane(clip, 0) 98 | clip = clip.rgvs.RemoveGrain(4) 99 | clip = Expr(expr) 100 | return clip 101 | 102 | def mwdbmask( 103 | clip: vs.VideoNode, 104 | chroma: bool = True, 105 | sigma: float = 2.5, 106 | t_h: float = 1.0, 107 | t_l: float = 0.5, 108 | yuv444: bool | None = None, 109 | cs_h: int = 0, 110 | cs_v: int = 0, 111 | lmask: vs.VideoNode | None = None, 112 | sigma2: float = 2.5, 113 | t_h2: float = 3.0, 114 | t_l2: float = 1.5, 115 | ) -> vs.VideoNode: 116 | """ 117 | deband mask 118 | Steal from other one's script. Most likely written by mawen1250. 119 | """ 120 | ## clip properties 121 | bits=clip.format.bits_per_sample 122 | yuv420 = clip.format.subsampling_w == 1 and clip.format.subsampling_h == 1 123 | sw = clip.width 124 | sh = clip.height 125 | if yuv444 is None: 126 | yuv444 = not yuv420 127 | ## Canny edge detector 128 | emask = clip.tcanny.TCanny(sigma=sigma, t_h=t_h, t_l=t_l, planes=[0,1,2] if chroma else [0]) 129 | if lmask is not None: 130 | emask2 = clip.tcanny.TCanny(sigma2=sigma2,t_h=t_h2, t_l=t_l2, planes=[0,1,2] if chroma else [0]) 131 | emask = core.std.MaskedMerge(emask, emask2, lmask, [0,1,2] if chroma else [0], True) 132 | ## apply morphologic filters and merge mask planes 133 | emaskY = getY(emask) 134 | if chroma: 135 | emaskC = Expr([getU(emask),getV(emask)],"x y max") 136 | if yuv420: 137 | emaskC = mt_inpand_multi(mt_expand_multi(emaskC, 'losange', sw=3, sh=3), 'rectangle', sw=3, sh=3) 138 | emaskC = emaskC.fmtc.resample(sw, sh, 0.25 - cs_h / 2, 0 - cs_v / 2, kernel='bilinear', fulls=True).fmtc.bitdepth(bits=bits) 139 | else: 140 | emaskY = Expr([emaskY, emaskC],"x y max") 141 | emaskY = mt_inpand_multi(mt_expand_multi(emaskY, 'losange', sw=5, sh=5), 'rectangle', sw=2, sh=2) 142 | if chroma and yuv420: 143 | dbmask = Expr([emaskY, emaskC],"x y max") 144 | else: 145 | dbmask = emaskY 146 | ## convert to final mask, all the planes of which are the same 147 | if yuv444: 148 | dbmask = mt_inflate_multi(dbmask, radius=2) 149 | dbmaskC = dbmask 150 | else: 151 | dbmaskC = dbmask.fmtc.resample(sw // 2, sh // 2, -0.5, 0, kernel='bilinear').fmtc.bitdepth(bits=bits) 152 | dbmask = mt_inflate_multi(dbmask, radius=2) 153 | dbmask = core.std.ShufflePlanes([dbmask, dbmaskC, dbmaskC], [0,0,0], vs.YUV) 154 | return dbmask 155 | 156 | 157 | 158 | -------------------------------------------------------------------------------- /xvs/dehalo.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | 3 | #from muvsfunc 4 | def abcxyz( 5 | clp: vs.VideoNode, 6 | rad: float = 3.0, 7 | ss: float = 1.5 8 | ) -> vs.VideoNode: 9 | """Avisynth's abcxyz() 10 | 11 | Reduces halo artifacts that can occur when sharpening. 12 | 13 | Author: Didée (http://avisynth.nl/images/Abcxyz_MT2.avsi) 14 | 15 | Only the first plane (luma) will be processed. 16 | 17 | Args: 18 | clp: Input clip. 19 | 20 | rad: (float) Radius for halo removal. Default is 3.0. 21 | 22 | ss: (float) Radius for supersampling / ss=1.0 -> no supersampling. Range: 1.0 - ???. Default is 1.5 23 | 24 | """ 25 | 26 | funcName = 'abcxyz' 27 | 28 | if not isinstance(clp, vs.VideoNode) or clp.format.color_family not in [vs.GRAY, vs.YUV]: 29 | raise TypeError(funcName + ': \"clp\" must be a Gray or YUV clip!') 30 | 31 | ox = clp.width 32 | oy = clp.height 33 | 34 | isGray = clp.format.color_family == vs.GRAY 35 | bits = clp.format.bits_per_sample 36 | 37 | if not isGray: 38 | clp_src = clp 39 | clp = getY(clp) 40 | 41 | x = core.resize.Bicubic(clp, m4(ox/rad), m4(oy/rad), filter_param_a=1/3, filter_param_b=1/3).resize.Bicubic(ox, oy, filter_param_a=1, filter_param_b=0) 42 | y = Expr([clp, x], ['x {a} + y < x {a} + x {b} - y > x {b} - y ? ? x y - abs * x {c} x y - abs - * + {c} /'.format( 43 | a=scale(8, bits), b=scale(24, bits), c=scale(32, bits))]) 44 | 45 | z1 = core.rgvs.Repair(clp, y, [1]) 46 | 47 | if ss != 1.: 48 | maxbig = core.std.Maximum(y).resize.Bicubic(m4(ox*ss), m4(oy*ss), filter_param_a=1/3, filter_param_b=1/3) 49 | minbig = core.std.Minimum(y).resize.Bicubic(m4(ox*ss), m4(oy*ss), filter_param_a=1/3, filter_param_b=1/3) 50 | z2 = core.resize.Lanczos(clp, m4(ox*ss), m4(oy*ss)) 51 | z2 = Expr([z2, maxbig, minbig], ['x y min z max']).resize.Lanczos(ox, oy) 52 | z1 = z2 # for simplicity 53 | 54 | if not isGray: 55 | z1 = core.std.ShufflePlanes([z1, clp_src], list(range(clp_src.format.num_planes)), clp_src.format.color_family) 56 | 57 | return z1 58 | 59 | #modified from old havsfunc 60 | def EdgeCleaner( 61 | c: vs.VideoNode, 62 | strength: int = 10, 63 | rep: bool = True, 64 | rmode: int = 17, 65 | smode: int = 0, 66 | hot: bool = False, 67 | ) -> vs.VideoNode: 68 | ''' 69 | EdgeCleaner v1.04 70 | A simple edge cleaning and weak dehaloing function. 71 | 72 | Parameters: 73 | c: Clip to process. 74 | 75 | strength: Specifies edge denoising strength. 76 | 77 | rep: Activates Repair for the aWarpSharped clip. 78 | 79 | rmode: Specifies the Repair mode. 80 | 1 is very mild and good for halos, 81 | 16 and 18 are good for edge structure preserval on strong settings but keep more halos and edge noise, 82 | 17 is similar to 16 but keeps much less haloing, other modes are not recommended. 83 | 84 | smode: Specifies what method will be used for finding small particles, ie stars. 0 is disabled, 1 uses RemoveGrain. 85 | 86 | hot: Specifies whether removal of hot pixels should take place. 87 | ''' 88 | if not isinstance(c, vs.VideoNode): 89 | raise vs.Error('EdgeCleaner: this is not a clip') 90 | 91 | if c.format.color_family == vs.RGB: 92 | raise vs.Error('EdgeCleaner: RGB format is not supported') 93 | 94 | if hasattr(core,"edgemasks"): 95 | Prewitt=core.edgemasks.Prewitt 96 | else: 97 | from .mask import AvsPrewitt as Prewitt 98 | 99 | bits = c.format.bits_per_sample 100 | peak = (1 << bits) - 1 101 | 102 | if c.format.color_family != vs.GRAY: 103 | c_orig = c 104 | c = getY(c) 105 | else: 106 | c_orig = None 107 | 108 | if smode > 0: 109 | strength += 4 110 | 111 | main = Padding(c, 6, 6, 6, 6).warp.AWarpSharp2(blur=1, depth=cround(strength / 2)).std.Crop(6, 6, 6, 6) 112 | if rep: 113 | main = core.rgvs.Repair(main, c, mode=rmode) 114 | mask = Prewitt(mask) 115 | mask = Expr(mask,expr=[f'x {scale(4, peak)} < 0 x {scale(32, peak)} > {peak} x ? ?']) 116 | mask= core.std.InvertMask(mask).std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) 117 | 118 | final = core.std.MaskedMerge(c, main, mask) 119 | if hot: 120 | final = core.rgvs.Repair(final, c, mode=2) 121 | if smode > 0: 122 | clean = c.rgvs.RemoveGrain(mode=17) 123 | diff = core.std.MakeDiff(c, clean) 124 | mask = Prewitt(diff.std.Levels(min_in=scale(40, peak), max_in=scale(168, peak), gamma=0.35).rgvs.RemoveGrain(mode=7)) 125 | mask=Expr(mask,f'x {scale(4, peak)} < 0 x {scale(16, peak)} > {peak} x ? ?') 126 | final = core.std.MaskedMerge(final, c, mask) 127 | 128 | if c_orig is not None: 129 | final = core.std.ShufflePlanes([final, c_orig], planes=[0, 1, 2], colorfamily=c_orig.format.color_family) 130 | return final 131 | 132 | @deprecated("No maintenance.") 133 | def LazyDering( 134 | src: vs.VideoNode, 135 | depth: int = 32, 136 | diff: int = 8, 137 | thr: int = 32, 138 | ) -> vs.VideoNode: 139 | """ 140 | LazyDering 141 | ----------------------------- 142 | port from avs script by Leak&RazorbladeByte 143 | LazyDering tries to clean up slight ringing around edges by applying aWarpSharp2 only to areas where the difference is small enough so detail isn't destroyed. 144 | LazyDering it's a modified version of aWarpSharpDering. 145 | """ 146 | bit = src.format.bits_per_sample 147 | Y = getplane(src,0) 148 | sharped = core.warp.AWarpSharp2(Y,depth=depth) 149 | diff_mask = Expr([Y,sharped], "x y - x y > *").std.Levels(0,scale(diff,bit),0.5,scale(255,bit),0) 150 | luma_mask = core.std.Deflate(Y).std.Levels(scale(16,bit),scale(16+thr,bit),0.5,0,scale(255,bit)) 151 | masks = Expr([luma_mask,diff_mask], "x y * {} /".format(scale(255,bit))).std.Deflate() 152 | merge = core.std.MaskedMerge(Y,sharped, mask=masks) 153 | return core.std.ShufflePlanes([merge,src],[0,1,2], colorfamily=vs.YUV) 154 | 155 | @deprecated("No maintenance.") 156 | def SADering( 157 | src:vs.VideoNode, 158 | ring_r: int = 2, 159 | warp_arg: dict = {}, 160 | warpclip: vs.VideoNode | None = None, 161 | edge_r: int = 2, 162 | show_mode: int = 0, 163 | ) -> vs.VideoNode: 164 | """ 165 | Simple Awarpsharp2 Dering 166 | --------------------------------------- 167 | ring_r: (int)range of ring,higher means more area around edge be set as ring in ringmask,deflaut is 2.Sugest use the smallest value which can dering well 168 | warp_arg: (dict)set your own args for AWarpSharp2,should be dict. e.g. 169 | warpclip: (clip)aim to allow you input a own warped clip instead internal warped clip,but I think a rightly blurred clip may also useful 170 | edge_r: (int)if the non-ring area between nearly edge can't be preserved well,try increase it's value 171 | show_mode: 0 :output the result 1: edgemask 2: ringmask 3: warped clip 172 | """ 173 | arg={'depth':16,'type':0} 174 | w_a = {**arg,**warp_arg} 175 | isGray = src.format.color_family == vs.GRAY 176 | clip = src if isGray else getplane(src,0) 177 | ####warp 178 | warp = core.warp.AWarpSharp2(clip,**w_a) if warpclip is None else warpclip 179 | ####mask 180 | edgemask = core.tcanny.TCanny(clip,mode=0, op=1) 181 | edgemask = expand(edgemask,cycle=edge_r) 182 | edgemask = core.std.Deflate(edgemask) 183 | edgemask = inpand(edgemask,cycle=edge_r) 184 | # 185 | mask = expand(edgemask,cycle=ring_r+1) 186 | mask = inpand(mask,cycle=1) 187 | # 188 | ringmask = Expr([edgemask,mask], ["y x -"]) 189 | #### 190 | merge = core.std.MaskedMerge(clip, warp, ringmask) 191 | last = merge if isGray else core.std.ShufflePlanes([merge,src],[0,1,2], colorfamily=vs.YUV) 192 | #### 193 | if show_mode==0: 194 | return last 195 | elif show_mode==1: 196 | return edgemask 197 | elif show_mode==2: 198 | return ringmask 199 | elif show_mode==3: 200 | return warp 201 | else: 202 | raise vs.Error("show_mode should in [0,1,2,3]") 203 | 204 | -------------------------------------------------------------------------------- /xvs/deinterlace.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | 3 | def FIFP( 4 | src: vs.VideoNode, 5 | mode: int = 0, 6 | tff: bool = True, 7 | mi: int = 40, 8 | blockx: int = 16, 9 | blocky: int = 16, 10 | cthresh: int = 8, 11 | chroma: bool = False, 12 | metric: int = 1, 13 | tc: bool = True, 14 | _pass: int = 1, 15 | opencl: int = False, 16 | device: int = -1, 17 | ) -> vs.VideoNode: 18 | """ 19 | Fix Interlanced Frames in Progressive video 20 | --------------------------------------- 21 | analyze setting: 22 | vfm_mode: set the mode of vfm,default:5 23 | mi: set the mi of vfm,default:40 24 | cthresh: set the cthresh of vfm,default:8 25 | blockx,blocky:set the blockx/blocky of vfm,default:16 26 | --------------------------------------- 27 | deinterlace args: 28 | opencl: if True,use nnedi3cl;else use znedi3 29 | device: set device for nnedi3cl 30 | tff: true means Top field first,False means Bottom field first,default:True 31 | --------------------------------------- 32 | mode args: 33 | mode = 0: 34 | interlaced frames will be deinterlaced in the same fps 35 | mode = 1: 36 | interlaced frames will be deinterlaced in double fps,and output timecodes to create a vfr video 37 | need 2 pass 38 | _pass: 39 | 1:analyze pass 40 | 2:encode pass,can output timecodes 41 | tc:if True,will output timecodes,suggest set True only when finally encode,default:True 42 | --------------------------------------- 43 | notice: 44 | analyze.csv will be created when mode=1,_pass=1,you can check and revise it,then use in pass 2 45 | """ 46 | clip = src if src.format.bits_per_sample==8 else core.fmtc.bitdepth(src,bits=8,dmode=8) 47 | order = 1 if tff else 0 48 | 49 | dect = core.tdm.IsCombed(clip,cthresh=cthresh,blockx=blockx,blocky=blocky,chroma=chroma,mi=mi,metric=metric) 50 | 51 | if mode==0: 52 | deinterlace = core.nnedi3cl.NNEDI3CL(src, field=order,device=device) if opencl else core.znedi3.nnedi3(src, field=order) 53 | ### 54 | def postprocess(n, f, clip, de): 55 | if f.props['_Combed'] == 1: 56 | return de 57 | else: 58 | return clip 59 | ### 60 | last=core.std.FrameEval(src, functools.partial(postprocess, clip=src, de=deinterlace), prop_src=dect) 61 | last=core.std.Cache(last, make_linear=True) 62 | return last 63 | elif mode==1: 64 | if _pass==1: 65 | t = open("analyze.csv",'w') 66 | t.write("frame,combed\n") 67 | def analyze(n, f, clip): 68 | t.write(str(n)+","+str(f.props['_Combed'])+"\n") 69 | return clip 70 | t.close() 71 | last=core.std.FrameEval(dect, functools.partial(analyze, clip=dect),prop_src=dect) 72 | last=core.std.Cache(last, make_linear=True) 73 | return last 74 | elif _pass==2: 75 | lenlst=len(src) 76 | num=src.fps_num 77 | den=src.fps_den 78 | c=open("analyze.csv","r") 79 | tmp=c.read().split("\n")[1:lenlst] 80 | lst=[None]*len(tmp) 81 | for i in tmp: 82 | i=i.split(",") 83 | lst[int(i[0])]=int(i[1]) 84 | c.close() 85 | lenlst=len(lst) 86 | 87 | #tc 88 | if tc: 89 | c=open("timecodes.txt","w") 90 | c.write("# timecode format v2\n0\n") 91 | b=1000/num*den 92 | for i in range(lenlst): 93 | if lst[i]==0: 94 | c.write(str(int((i+1)*b))+"\n") 95 | elif lst[i]==1: 96 | c.write(str(int((i+0.5)*b))+"\n"+str(int((i+1)*b))+"\n") 97 | else: 98 | raise ValueError("") 99 | c.close() 100 | 101 | #deinterlace 102 | deinterlace = core.nnedi3cl.NNEDI3CL(src, field=order+2,device=device) if opencl else core.znedi3.nnedi3(src, field=order+2) 103 | src= core.std.Interleave([src,src]) 104 | def postprocess(n,clip, de): 105 | if lst[n//2]==0: 106 | return clip 107 | else: 108 | return de 109 | dl=core.std.FrameEval(src, functools.partial(postprocess, clip=src, de=deinterlace)) 110 | dl=core.std.Cache(dl, make_linear=True) 111 | tlist=[] 112 | for i in range(lenlst): 113 | if lst[i]==0: 114 | tlist.append(2*i) 115 | else: 116 | tlist.append(2*i) 117 | tlist.append(2*i+1) 118 | last = core.std.SelectEvery(dl,lenlst*2,tlist) 119 | return last#core.std.AssumeFPS(last,src) 120 | else: 121 | ValueError("pass must be 1 or 2") 122 | else: 123 | raise ValueError("mode must be 0 or 1") 124 | 125 | def ivtc( 126 | src: vs.VideoNode, 127 | order:int = 1, 128 | field:int = 2, 129 | mode:int = 1, 130 | mchroma:bool = True, 131 | cthresh:int = 9, 132 | mi:int = 80, 133 | vfm_chroma:bool = True, 134 | vfm_block: List[int] = [16,16], 135 | y0:int = 16, 136 | y1:int = 16, 137 | micmatch:int = 1, 138 | cycle:int = 5, 139 | vd_chroma: bool = True, 140 | dupthresh: float = 1.1, 141 | scthresh: float = 15, 142 | vd_block: List[int] = [32,32], 143 | pp: bool = True, 144 | nsize: int = 0, 145 | nns: int = 1, 146 | qual: int = 1, 147 | etype: int = 0, 148 | pscrn: int = 2, 149 | opencl: bool = False, 150 | device: int = -1, 151 | ) -> vs.VideoNode: 152 | """ 153 | warp function for vivtc with a simple post-process use nnedi3 or user-defined filter. 154 | """ 155 | src8=core.fmtc.bitdepth(src,bits=8) 156 | src16=core.fmtc.bitdepth(src,bits=16) 157 | 158 | def selector(n,f,match,di): 159 | if f.props["_Combed"]>0: 160 | return di 161 | else: 162 | return match 163 | 164 | match=core.vivtc.VFM(src8,order=order,field=field,mode=mode,mchroma=mchroma,cthresh=cthresh,mi=mi,chroma=vfm_chroma,blockx=vfm_block[0],blocky=vfm_block[1],y0=y0,y1=y1,scthresh=scthresh,micmatch=micmatch,clip2=src16) 165 | 166 | if callable(pp): 167 | di=pp(match) 168 | match=core.std.FrameEval(match,functools.partial(selector,match=match,di=di),prop_src=match) 169 | elif pp: 170 | di=nnedi3(match,field=order,nsize=nsize,nns=nns,qual=qual,etype=etype,pscrn=pscrn,mode="nnedi3cl" if opencl else "znedi3",device=device) 171 | match=core.std.FrameEval(match,functools.partial(selector,match=match,di=di),prop_src=match) 172 | 173 | return core.vivtc.VDecimate(match,cycle=cycle,chroma=vd_chroma,dupthresh=dupthresh,scthresh=scthresh,blockx=vd_block[0],blocky=vd_block[1]) 174 | 175 | def ivtc_t( 176 | src: vs.VideoNode, 177 | order: int = 1, 178 | field: int = -1, 179 | mode: int = 1, 180 | slow: int = 1, 181 | mchroma: bool = True, 182 | y0: int = 16, 183 | y1: int = 16, 184 | scthresh: float = 12.0, 185 | ubsco: bool = True, 186 | micmatching: int = 1, 187 | mmsco: bool = True, 188 | cthresh: int = 9, 189 | tfm_chroma: bool = True, 190 | tfm_block: list[int] = [16,16], 191 | mi: int = 80, 192 | metric: int = 0, 193 | mthresh: int = 5, 194 | td_mode: int = 0, 195 | cycleR: int = 1, 196 | cycle: int = 5, 197 | rate: float = 24000/1001 , 198 | hybrid: int = 0, 199 | vfrdec: int = 1, 200 | dupThresh: float | None = None, 201 | vidThresh: float | None = None, 202 | sceneThresh: float = 15, 203 | vidDetect: int = 3, 204 | conCycle: int = None, 205 | conCycleTP: int = None, 206 | nt: int = 0, 207 | vd_block: list[int] = [32,32], 208 | tcfv1: bool = True, 209 | se: bool = False, 210 | vd_chroma: bool = True, 211 | noblend: bool = True, 212 | maxndl: int = None, 213 | m2PA: bool = False, 214 | denoise: bool = False, 215 | ssd: bool = False, 216 | sdlim: int = 0, 217 | pp: int = -1, 218 | nsize: int = 0, 219 | nns: int = 1, 220 | qual: int = 1, 221 | etype: int = 0, 222 | pscrn: int = 2, 223 | opencl: bool = False, 224 | device: int = -1, 225 | ) -> vs.VideoNode: 226 | """ 227 | warp function for tivtc with a simple post-process use nnedi3 or user-defined filter. 228 | use pp=-1(default) to use nnedi3 or pp>0 to use tfm internal post-process. 229 | """ 230 | 231 | def selector(n,f,match,di): 232 | if f.props["_Combed"]>0: 233 | return di 234 | else: 235 | return match 236 | 237 | match=core.tivtc.TFM(src,order=order,field=field,mode=mode,PP=pp if isinstance(pp,int) and pp>0 else 0,slow=slow,mChroma=mchroma,cthresh=cthresh,MI=mi,chroma=tfm_chroma,blockx=tfm_block[0],blocky=tfm_block[1],y0=y0,y1=y1,mthresh=mthresh,scthresh=scthresh,micmatching=micmatching,metric=metric,mmsco=mmsco,ubsco=ubsco) 238 | 239 | if callable(pp): 240 | di=pp(match) 241 | match=core.std.FrameEval(match,functools.partial(selector,match=match,di=di),prop_src=match) 242 | elif pp==-1: 243 | di=nnedi3(match,field=order,nsize=nsize,nns=nns,qual=qual,etype=etype,pscrn=pscrn,mode="nnedi3cl" if opencl else "znedi3",device=device) 244 | match=core.std.FrameEval(match,functools.partial(selector,match=match,di=di),prop_src=match) 245 | 246 | return core.tivtc.TDecimate(match,mode=td_mode,cycleR=cycleR,cycle=cycle,rate=rate,dupThresh=dupThresh,vidDetect=vidDetect,vidThresh=vidThresh,sceneThresh=sceneThresh,hybrid=hybrid,conCycle=conCycle,conCycleTP=conCycleTP,nt=nt,blockx=vd_block[0],blocky=vd_block[1],tcfv1=tcfv1,se=se,chroma=vd_chroma,noblend=noblend,maxndl=maxndl,m2PA=m2PA,denoise=denoise,ssd=ssd,sdlim=sdlim,vfrDec=vfrdec) -------------------------------------------------------------------------------- /xvs/aa.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | from .other import LDMerge 3 | from .enhance import ( 4 | FastLineDarkenMOD, 5 | xsUSM, 6 | mwenhance 7 | ) 8 | from .dehalo import abcxyz 9 | 10 | #modfied from old havsunc 11 | def daa( 12 | c: vs.VideoNode, 13 | nsize: int | None = None, 14 | nns: int | None = None, 15 | qual: int | None = None, 16 | pscrn: int | None = None, 17 | int16_prescreener: bool | None = None, 18 | int16_predictor: bool | None = None, 19 | exp: int | None = None, 20 | nnedi3_mode: str = "znedi3", 21 | device: int | None = None, 22 | ) -> vs.VideoNode: 23 | ''' 24 | Anti-aliasing with contra-sharpening by Didée. 25 | 26 | It averages two independent interpolations, where each interpolation set works between odd-distanced pixels. 27 | This on its own provides sufficient amount of blurring. Enough blurring that the script uses a contra-sharpening step to counteract the blurring. 28 | ''' 29 | if not isinstance(c, vs.VideoNode): 30 | raise vs.Error('daa: this is not a clip') 31 | 32 | nn=nnedi3(c,field=3,nsize=nsize, nns=nns, qual=qual, pscrn=pscrn,int16_prescreener=int16_prescreener, int16_predictor=int16_predictor, exp=exp,device=device,mode=nnedi3_mode) 33 | dbl = core.std.Merge(nn[::2], nn[1::2]) 34 | dblD = core.std.MakeDiff(c, dbl) 35 | shrpD = core.std.MakeDiff(dbl, dbl.std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1] if c.width > 1100 else [1, 2, 1, 2, 4, 2, 1, 2, 1])) 36 | DD = core.rgvs.Repair(shrpD, dblD, mode=13) 37 | return core.std.MergeDiff(dbl, DD) 38 | 39 | def XSAA( 40 | src: vs.VideoNode, 41 | nsize: int | None = None, 42 | nns: int | None = 2, 43 | qual: int | None = None, 44 | aamode: int = -1, 45 | maskmode: int = 1, 46 | nnedi3_mode: str = "znedi3", 47 | device: int = -1, 48 | linedarken: bool = False, 49 | preaa: int = 0, 50 | ) -> vs.VideoNode: 51 | """ 52 | xyx98's simple aa function 53 | only process luma 54 | #### 55 | nsize,nns,qual: nnedi3 args 56 | aamode: decide how to aa. 0: merge two deinterlacing fleid ; 1: enlarge video and downscale 57 | maskmode: 0:no mask ; 1: use the clip before AA to create mask ; 2: use AAed clip to create mask; a video clip: use it as mask 58 | opencl: if True: use nnedi3cl; else use znedi3 59 | device:choose device for nnedi3cl 60 | linedarken: choose whether use FastLineDarkenMOD 61 | preaa: 0: no pre-AA ; 62 | 1: using LDMerge in muvsfunc for pre-AA,use orginal clip when masked merge with AAed clip 63 | 2: using LDMerge in muvsfunc for pre-AA,use pre-AAed clip when masked merge with AAed clip 64 | """ 65 | 66 | w=src.width 67 | h=src.height 68 | if aamode==-1: 69 | enlarge = False if h>=720 else True 70 | elif aamode in (0,1): 71 | enlarge = bool(aamode) 72 | else: 73 | raise ValueError("") 74 | 75 | if src.format.color_family==vs.RGB: 76 | raise TypeError("RGB is unsupported") 77 | isYUV=src.format.color_family==vs.YUV 78 | Yclip = getY(src) 79 | 80 | if preaa in (1,2): 81 | horizontal = core.std.Convolution(Yclip, matrix=[1, 2, 7, 2, 1],mode='h') 82 | vertical = core.std.Convolution(Yclip, matrix=[1, 2, 7, 2, 1],mode='v') 83 | clip = LDMerge(horizontal, vertical, Yclip, mrad=1) 84 | elif preaa ==0: 85 | clip=Yclip 86 | else: 87 | raise ValueError("") 88 | if enlarge: 89 | if nsize is None: 90 | nsize = 1 91 | if qual is None: 92 | qual = 2 93 | aa=nnedi3(clip,dh=True,field=1,nsize=nsize,nns=nns,qual=qual,device=device,mode=nnedi3_mode) 94 | last=aa.resize.Spline36(w,h) 95 | t=last 96 | else: 97 | if nsize is None: 98 | nsize = 3 99 | if qual is None: 100 | qual = 1 101 | aa=nnedi3(clip,dh=False,field=3,nsize=nsize,nns=nns,qual=qual,device=device,mode=nnedi3_mode) 102 | last=core.std.Merge(aa[0::2], aa[1::2]) 103 | if linedarken: 104 | last = FastLineDarkenMOD(last, strength=48, protection=5, luma_cap=191, threshold=5, thinning=0) 105 | 106 | 107 | if maskmode==1: 108 | mask=clip.tcanny.TCanny(sigma=1.5, t_h=20.0, t_l=8.0) 109 | mask=mt_expand_multi(mask, 'losange', planes=[0], sw=1, sh=1) 110 | if preaa==1: 111 | clip=Yclip 112 | last=core.std.MaskedMerge(clip, last, mask) 113 | elif maskmode==2: 114 | mask=last.tcanny.TCanny(sigma=1.5, t_h=20.0, t_l=8.0) 115 | mask=mt_expand_multi(mask, 'losange', planes=[0], sw=1, sh=1) 116 | if preaa==1: 117 | clip=Yclip 118 | last=core.std.MaskedMerge(clip, last, mask) 119 | elif isinstance(maskmode,vs.VideoNode): 120 | if preaa==1: 121 | clip=Yclip 122 | last=core.std.MaskedMerge(clip, last, maskmode) 123 | if isYUV: 124 | last = core.std.ShufflePlanes([last,src],[0,1,2], colorfamily=vs.YUV) 125 | return last 126 | 127 | def mwaa( 128 | clip: vs.VideoNode, 129 | aa_y: bool = True, 130 | aa_c: bool = False, 131 | cs_h: int = 0, 132 | cs_v: int = 0, 133 | aa_cmask: bool = True, 134 | kernel_y: int = 2, 135 | kernel_c: int = 1, 136 | show: bool = False, 137 | nnedi3_mode: str = "znedi3", 138 | device: int = 0, 139 | ) -> vs.VideoNode: 140 | """ 141 | Anti-Aliasing function 142 | Steal from other one's script. Most likely written by mawen1250. 143 | """ 144 | if clip.format.bits_per_sample != 16: 145 | raise vs.Error('mwaa: Only 16bit supported') 146 | 147 | ## internal functions 148 | def aa_kernel_vertical(clip): 149 | clip_blk = clip.std.BlankClip(height=clip.height * 2) 150 | clip_y = getplane(clip, 0) 151 | if kernel_y == 2: 152 | clip_y = clip_y.eedi2.EEDI2(field=1) 153 | else: 154 | clip_y = nnedi3(clip_y,field=1, dh=True,device=device,mode=nnedi3_mode) 155 | clip_u = getplane(clip, 1) 156 | clip_v = getplane(clip, 2) 157 | if kernel_c == 2: 158 | clip_u = clip_u.eedi2.EEDI2(field=1) 159 | clip_v = clip_v.eedi2.EEDI2(field=1) 160 | else: 161 | clip_u = nnedi3(clip_u,field=1, dh=True,device=device,mode=nnedi3_mode) 162 | clip_v = nnedi3(clip_v,field=1, dh=True,device=device,mode=nnedi3_mode) 163 | return core.std.ShufflePlanes([clip_y if aa_y else clip_blk, clip_u if aa_c else clip_blk, clip_v if aa_c else clip_blk], [0,0,0] if aa_c else [0,1,2], vs.YUV) 164 | 165 | def aa_resample_vercial(clip, height, chroma_shift=0): 166 | return clip.fmtc.resample(h=height, sx=0, sy=[-0.5, -0.5 * (1 << clip.format.subsampling_h) - chroma_shift * 2], kernel=["spline36", "bicubic"], a1=0, a2=0.5, planes=[3 if aa_y else 1,3 if aa_c else 1,3 if aa_c else 1]) 167 | 168 | ## parameters 169 | aa_cmask = aa_c and aa_cmask 170 | 171 | ## kernel 172 | aa = aa_resample_vercial(aa_kernel_vertical(clip.std.Transpose()), clip.width, cs_h) 173 | aa = aa_resample_vercial(aa_kernel_vertical(aa.std.Transpose()), clip.height, cs_v) 174 | 175 | ## mask 176 | aamask = clip.tcanny.TCanny(sigma=1.5, t_h=20.0, t_l=8.0, planes=[0]) 177 | aamask = mt_expand_multi(aamask, 'losange', planes=[0], sw=1, sh=1) 178 | 179 | ## merge 180 | if aa_y: 181 | if aa_c: 182 | if aa_cmask: 183 | aa_merge = core.std.MaskedMerge(clip, aa, aamask, [0,1,2], True) 184 | else: 185 | aa_merge = core.std.MaskedMerge(clip, aa, aamask, [0], False) 186 | aa_merge = core.std.ShufflePlanes([aa_merge, aa], [0,1,2], vs.YUV) 187 | else: 188 | aa_merge = core.std.MaskedMerge(clip, aa, aamask, [0], False) 189 | else: 190 | if aa_c: 191 | if aa_cmask: 192 | aa_merge = core.std.MaskedMerge(clip, aa, aamask, [1,2], True) 193 | else: 194 | aa_merge = core.std.ShufflePlanes([clip, aa], [0,1,2], vs.YUV) 195 | else: 196 | aa_merge = clip 197 | 198 | ## output 199 | return aamask if show else aa_merge 200 | 201 | def drAA( 202 | src: vs.VideoNode, 203 | drf: int = 0.5, 204 | lraa:bool = True, 205 | nnedi3_mode: str = "znedi3", 206 | device: int = -1, 207 | pp: bool = True, 208 | ) -> vs.VideoNode: 209 | """ 210 | down resolution Anti-Aliasing for anime with heavy Aliasing 211 | only process luma 212 | ####### 213 | drf:set down resolution factor,default is 0.5,range:0.5-1 214 | lraa:enable XSAA after down resolution,default is True 215 | opencl:use nnedi3cl and TcannyCL,default is False,means using znedi3 and Tcanny 216 | device:select device for opencl 217 | pp:enable post-process,sharpen、linedarken、dering,default is True 218 | """ 219 | src=src.fmtc.bitdepth(bits=16) 220 | w=src.width 221 | h=src.height 222 | if src.format.color_family==vs.RGB: 223 | raise TypeError("RGB is unsupported") 224 | isYUV=src.format.color_family==vs.YUV 225 | Y = getY(src) 226 | if not 0.5<=drf<=1: 227 | raise ValueError("down resolution factor(drf) must between 0.5 and 1") 228 | 229 | ##aa 230 | aaY=core.resize.Bicubic(Y,int(w*drf),int(h*drf)) 231 | if lraa: 232 | aaY=XSAA(aaY,aamode=0,preaa=2,maskmode=0,nsize=3,nnedi3_mode=nnedi3_mode,device=device) 233 | if nnedi3_mode=="nnedi3cl": 234 | aaY=nnedi3(aaY,field=1,dh=True,dw=True,nsize=3,nns=1,device=device,mode=nnedi3_mode) 235 | else: 236 | aaY=nnedi3(aaY,field=1,dh=True,nsize=3,nns=1,device=device,mode=nnedi3_mode).std.Transpose() 237 | aaY=nnedi3(aaY,field=1,dh=True,nsize=3,nns=1,device=device,mode=nnedi3_mode).std.Transpose() 238 | if int(w*drf)*2!=w or int(h*drf)*2!=h: 239 | aaY=core.resize.Spline36(aaY,w,h) 240 | 241 | ##mask 242 | mask1=Expr([aaY,Y],["x y - abs 16 * 12288 < 0 65535 ?"]).rgvs.RemoveGrain(3).rgvs.RemoveGrain(11) 243 | mask2=core.tcanny.TCanny(Y, sigma=0, mode=1,op=1,gmmax=30) 244 | mask2=Expr(mask2,"x 14000 < 0 65535 ?").rgvs.RemoveGrain(3).rgvs.RemoveGrain(11) 245 | mask=Expr([mask1,mask2],"x y min 256 < 0 65535 ?").rgvs.RemoveGrain(11) 246 | 247 | #pp 248 | if pp: 249 | aaY=FastLineDarkenMOD(aaY,strength=96, protection=3, luma_cap=200, threshold=5, thinning=24) 250 | aaY=xsUSM(aaY) 251 | aaY=abcxyz(aaY) 252 | #aaY=SADring(aaY) 253 | 254 | low=core.rgvs.RemoveGrain(aaY,11) 255 | hi=core.std.MakeDiff(aaY, low) 256 | en=mwenhance(hi,chroma=False,Strength=2.5) 257 | hi=LimitFilter(en,hi, thr=0.3, elast=8, brighten_thr=0.15) 258 | aaY=core.std.MergeDiff(aaY, hi) 259 | #merge 260 | Ylast=core.std.MaskedMerge(Y, aaY, mask) 261 | if isYUV: 262 | last= core.std.ShufflePlanes([Ylast,src], [0,1,2], vs.YUV) 263 | else: 264 | last= Ylast 265 | return last -------------------------------------------------------------------------------- /xvs/metrics.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | 3 | def _IQA_downsample(clip: vs.VideoNode) -> vs.VideoNode: 4 | """Downsampler for image quality assessment model. 5 | 6 | The “clip” is first filtered by a 2x2 average filter, and then down-sampled by a factor of 2. 7 | """ 8 | 9 | return core.std.Convolution(clip, [1, 1, 0, 1, 1, 0, 0, 0, 0]).resize.Point(clip.width // 2, clip.height // 2, src_left=-1, src_top=-1) 10 | 11 | #modified from muvsfunc 12 | def SSIM( 13 | clip1: vs.VideoNode, 14 | clip2: vs.VideoNode, 15 | plane: int | None = None, 16 | downsample: bool = True, 17 | k1: float = 0.01, 18 | k2: float = 0.03, 19 | fun: VSFuncType | float | None = None, 20 | dynamic_range: int = 1, 21 | show_map: bool = False 22 | ) -> vs.VideoNode: 23 | """Structural SIMilarity Index Calculator 24 | 25 | The Structural SIMilarity (SSIM) index is a method for measuring the similarity between two images. 26 | It is based on the hypothesis that the HVS is highly adapted for extracting structural information, 27 | which compares local patterns of pixel intensities that have been normalized for luminance and contrast. 28 | 29 | The mean SSIM (MSSIM) index value of the distorted image will be stored as frame property 'PlaneSSIM' in the output clip. 30 | 31 | The value of SSIM measures the structural similarity in an image. 32 | The higher the SSIM score, the higher the image perceptual quality. 33 | If "clip1" == "clip2", SSIM = 1. 34 | 35 | All the internal calculations are done at 32-bit float, only one channel of the image will be processed. 36 | 37 | Args: 38 | clip1: The distorted clip, will be copied to output if "show_map" is False. 39 | 40 | clip2: Reference clip, must be of the same format and dimension as the "clip1". 41 | 42 | plane: (int) Specify which plane to be processed. Default is None. 43 | 44 | downsample: (bool) Whether to average the clips over local 2x2 window and downsample by a factor of 2 before calculation. 45 | Default is True. 46 | 47 | k1, k2: (float) Constants in the SSIM index formula. 48 | According to the paper, the performance of the SSIM index algorithm is fairly insensitive to variations of these values. 49 | Default are 0.01 and 0.03. 50 | 51 | fun: (function or float) The function of how the clips are filtered. 52 | If it is None, it will be set to a gaussian filter whose standard deviation is 1.5. 53 | Note that the size of gaussian kernel is different from the one in MATLAB. 54 | If it is a float, it specifies the standard deviation of the gaussian filter. (sigma in core.tcanny.TCanny) 55 | According to the paper, the quality map calculated from gaussian filter exhibits a locally isotropic property, 56 | which prevents the present of undesirable “blocking” artifacts in the resulting SSIM index map. 57 | Default is None. 58 | 59 | dynamic_range: (float) Dynamic range of the internal float point clip. Default is 1. 60 | 61 | show_map: (bool) Whether to return SSIM index map. If not, "clip1" will be returned. Default is False. 62 | 63 | Ref: 64 | [1] Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. 65 | IEEE transactions on image processing, 13(4), 600-612. 66 | [2] https://ece.uwaterloo.ca/~z70wang/research/ssim/. 67 | 68 | """ 69 | 70 | funcName = 'SSIM' 71 | 72 | if not isinstance(clip1, vs.VideoNode): 73 | raise TypeError(funcName + ': \"clip1\" must be a clip!') 74 | if not isinstance(clip2, vs.VideoNode): 75 | raise TypeError(funcName + ': \"clip2\" must be a clip!') 76 | 77 | if clip1.format.id != clip2.format.id: 78 | raise ValueError(funcName + ': \"clip1\" and \"clip2\" must be of the same format!') 79 | if clip1.width != clip2.width or clip1.height != clip2.height: 80 | raise ValueError(funcName + ': \"clip1\" and \"clip2\" must be of the same width and height!') 81 | 82 | c1 = (k1 * dynamic_range) ** 2 83 | c2 = (k2 * dynamic_range) ** 2 84 | 85 | if fun is None: 86 | fun = functools.partial(core.tcanny.TCanny, sigma=1.5, mode=-1) 87 | elif isinstance(fun, (int, float)): 88 | fun = functools.partial(core.tcanny.TCanny, sigma=fun, mode=-1) 89 | elif not callable(fun): 90 | raise TypeError(funcName + ': \"fun\" must be a function or a float!') 91 | 92 | # Store the "clip1" 93 | clip1_src = clip1 94 | 95 | # Convert to float type grayscale image 96 | clip1 = getplane(clip1, plane) 97 | clip2 = getplane(clip2, plane) 98 | clip1 = core.fmtc.bitdepth(clip1,bits=32) 99 | clip2 = core.fmtc.bitdepth(clip2,bits=32) 100 | 101 | # Filtered by a 2x2 average filter and then down-sampled by a factor of 2 102 | if downsample: 103 | clip1 = _IQA_downsample(clip1) 104 | clip2 = _IQA_downsample(clip2) 105 | 106 | # Core algorithm 107 | mu1 = fun(clip1) 108 | mu2 = fun(clip2) 109 | mu1_sq = Expr([mu1], ['x dup *']) 110 | mu2_sq = Expr([mu2], ['x dup *']) 111 | mu1_mu2 = Expr([mu1, mu2], ['x y *']) 112 | sigma1_sq_pls_mu1_sq = fun(Expr([clip1], ['x dup *'])) 113 | sigma2_sq_pls_mu2_sq = fun(Expr([clip2], ['x dup *'])) 114 | sigma12_pls_mu1_mu2 = fun(Expr([clip1, clip2], ['x y *'])) 115 | 116 | if c1 > 0 and c2 > 0: 117 | expr = '2 x * {c1} + 2 y x - * {c2} + * z a + {c1} + b c - d e - + {c2} + * /'.format(c1=c1, c2=c2) 118 | expr_clips = [mu1_mu2, sigma12_pls_mu1_mu2, mu1_sq, mu2_sq, sigma1_sq_pls_mu1_sq, mu1_sq, sigma2_sq_pls_mu2_sq, mu2_sq] 119 | ssim_map = Expr(expr_clips, [expr]) 120 | else: 121 | denominator1 = Expr([mu1_sq, mu2_sq], ['x y + {c1} +'.format(c1=c1)]) 122 | denominator2 = Expr([sigma1_sq_pls_mu1_sq, mu1_sq, sigma2_sq_pls_mu2_sq, mu2_sq], ['x y - z a - + {c2} +'.format(c2=c2)]) 123 | 124 | numerator1_expr = '2 z * {c1} +'.format(c1=c1) 125 | numerator2_expr = '2 a z - * {c2} +'.format(c2=c2) 126 | expr = 'x y * 0 > {numerator1} {numerator2} * x y * / x 0 = not y 0 = and {numerator1} x / {i} ? ?'.format(numerator1=numerator1_expr, 127 | numerator2=numerator2_expr, i=1) 128 | ssim_map = Expr([denominator1, denominator2, mu1_mu2, sigma12_pls_mu1_mu2], [expr]) 129 | 130 | # The following code is modified from mvf.PlaneStatistics(), which is used to compute the mean of the SSIM index map as MSSIM 131 | map_mean = core.std.PlaneStats(ssim_map, plane=0, prop='PlaneStats') 132 | 133 | def _PlaneSSIMTransfer(n: int, f: List[vs.VideoFrame]) -> vs.VideoFrame: 134 | fout = f[0].copy() 135 | fout.props['PlaneSSIM'] = f[1].props['PlaneStatsAverage'] 136 | return fout 137 | 138 | output_clip = ssim_map if show_map else clip1_src 139 | output_clip = core.std.ModifyFrame(output_clip, [output_clip, map_mean], selector=_PlaneSSIMTransfer) 140 | 141 | return output_clip 142 | 143 | #modified from muvsfunc 144 | def GMSD( 145 | clip1: vs.VideoNode, 146 | clip2: vs.VideoNode, 147 | plane: int | None = None, 148 | downsample: bool = True, 149 | c: float = 0.0026, 150 | show_map: bool = False 151 | ) -> vs.VideoNode: 152 | """Gradient Magnitude Similarity Deviation Calculator 153 | 154 | GMSD is a new effective and efficient image quality assessment (IQA) model, which utilizes the pixel-wise gradient magnitude similarity (GMS) 155 | between the reference and distorted images combined with standard deviation of the GMS map to predict perceptual image quality. 156 | 157 | The distortion degree of the distorted image will be stored as frame property 'PlaneGMSD' in the output clip. 158 | 159 | The value of GMSD reflects the range of distortion severities in an image. 160 | The lowerer the GMSD score, the higher the image perceptual quality. 161 | If "clip1" == "clip2", GMSD = 0. 162 | 163 | All the internal calculations are done at 32-bit float, only one channel of the image will be processed. 164 | 165 | Args: 166 | clip1: The distorted clip, will be copied to output if "show_map" is False. 167 | 168 | clip2: Reference clip, must be of the same format and dimension as the "clip1". 169 | 170 | plane: (int) Specify which plane to be processed. Default is None. 171 | 172 | downsample: (bool) Whether to average the clips over local 2x2 window and downsample by a factor of 2 before calculation. 173 | Default is True. 174 | 175 | c: (float) A positive constant that supplies numerical stability. 176 | According to the paper, for all the test databases, GMSD shows similar preference to the value of c. 177 | Default is 0.0026. 178 | 179 | show_map: (bool) Whether to return GMS map. If not, "clip1" will be returned. Default is False. 180 | 181 | Ref: 182 | [1] Xue, W., Zhang, L., Mou, X., & Bovik, A. C. (2014). Gradient magnitude similarity deviation: 183 | A highly efficient perceptual image quality index. IEEE Transactions on Image Processing, 23(2), 684-695. 184 | [2] http://www4.comp.polyu.edu.hk/~cslzhang/IQA/GMSD/GMSD.htm. 185 | 186 | """ 187 | 188 | funcName = 'GMSD' 189 | 190 | if not isinstance(clip1, vs.VideoNode): 191 | raise TypeError(funcName + ': \"clip1\" must be a clip!') 192 | if not isinstance(clip2, vs.VideoNode): 193 | raise TypeError(funcName + ': \"clip2\" must be a clip!') 194 | 195 | if clip1.format.id != clip2.format.id: 196 | raise ValueError(funcName + ': \"clip1\" and \"clip2\" must be of the same format!') 197 | if clip1.width != clip2.width or clip1.height != clip2.height: 198 | raise ValueError(funcName + ': \"clip1\" and \"clip2\" must be of the same width and height!') 199 | 200 | # Store the "clip1" 201 | clip1_src = clip1 202 | 203 | # Convert to float type grayscale image 204 | clip1 = getplane(clip1, plane) 205 | clip2 = getplane(clip2, plane) 206 | clip1 = core.fmtc.bitdepth(clip1,bits=32) 207 | clip2 = core.fmtc.bitdepth(clip2,bits=32) 208 | 209 | # Filtered by a 2x2 average filter and then down-sampled by a factor of 2, as in the implementation of SSIM 210 | if downsample: 211 | clip1 = _IQA_downsample(clip1) 212 | clip2 = _IQA_downsample(clip2) 213 | 214 | # Calculate gradients based on Prewitt filter 215 | clip1_dx = core.std.Convolution(clip1, [1, 0, -1, 1, 0, -1, 1, 0, -1], divisor=1, saturate=False) 216 | clip1_dy = core.std.Convolution(clip1, [1, 1, 1, 0, 0, 0, -1, -1, -1], divisor=1, saturate=False) 217 | clip1_grad_squared = Expr([clip1_dx, clip1_dy], ['x dup * y dup * +']) 218 | 219 | clip2_dx = core.std.Convolution(clip2, [1, 0, -1, 1, 0, -1, 1, 0, -1], divisor=1, saturate=False) 220 | clip2_dy = core.std.Convolution(clip2, [1, 1, 1, 0, 0, 0, -1, -1, -1], divisor=1, saturate=False) 221 | clip2_grad_squared = Expr([clip2_dx, clip2_dy], ['x dup * y dup * +']) 222 | 223 | # Compute the gradient magnitude similarity (GMS) map 224 | quality_map = Expr([clip1_grad_squared, clip2_grad_squared], ['2 x y * sqrt * {c} + x y + {c} + /'.format(c=c)]) 225 | 226 | # The following code is modified from mvf.PlaneStatistics(), which is used to compute the standard deviation of the GMS map as GMSD 227 | map_mean = core.std.PlaneStats(quality_map, plane=0, prop='PlaneStats') 228 | 229 | def _PlaneSDFrame(n: int, f: vs.VideoFrame, clip: vs.VideoNode, core: vs.Core) -> vs.VideoNode: 230 | mean = f.props['PlaneStatsAverage'] 231 | expr = "x {mean} - dup *".format(mean=mean) 232 | return Expr(clip, expr) 233 | if hasattr(core,"akarin"): 234 | mean = "y.PlaneStatsAverage" 235 | SDclip = core.akarin.Expr([quality_map, map_mean], "x {mean} - dup *".format(mean=mean)) 236 | else: 237 | SDclip = core.std.FrameEval(quality_map, functools.partial(_PlaneSDFrame, clip=quality_map, core=core), map_mean) 238 | 239 | SDclip = core.std.PlaneStats(SDclip, plane=0, prop='PlaneStats') 240 | 241 | def _PlaneGMSDTransfer(n: int, f: List[vs.VideoFrame]) -> vs.VideoFrame: 242 | fout = f[0].copy() 243 | fout.props['PlaneGMSD'] = math.sqrt(f[1].props['PlaneStatsAverage']) # type: ignore 244 | return fout 245 | output_clip = quality_map if show_map else clip1_src 246 | output_clip = core.std.ModifyFrame(output_clip, [output_clip, SDclip], selector=_PlaneGMSDTransfer) 247 | 248 | return output_clip 249 | 250 | 251 | def getsharpness( 252 | clip: vs.VideoNode, 253 | show: bool = False, 254 | usePropExpr: bool = False, 255 | ) -> vs.VideoNode: 256 | luma=getY(clip).fmtc.bitdepth(bits=16) 257 | blur=core.rgvs.RemoveGrain(luma, 20) 258 | dif=Expr([luma,blur],[f"x y - 65535 / 2 pow 65535 *"]) 259 | dif=core.std.PlaneStats(dif) 260 | 261 | if hasattr(core,"akarin") and hasattr(core.akarin,"PropExpr") and usePropExpr: 262 | last=core.akarin.PropExpr([clip,dif],lambda: dict(sharpness='y.PlaneStatsAverage 65535 *')) 263 | else: 264 | def calc(n,f): 265 | fout=f[1].copy() 266 | fout.props["sharpness"]=f[0].props["PlaneStatsAverage"]*65535 267 | return fout 268 | 269 | last=core.std.ModifyFrame(clip,[dif,clip],calc) 270 | return core.text.FrameProps(last,"sharpness",scale=2) if show else last 271 | 272 | 273 | -------------------------------------------------------------------------------- /xvs/props.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | from .metrics import SSIM,GMSD 3 | 4 | def statsinfo2csv( 5 | clip: vs.VideoNode, 6 | plane: PlanesType = None, 7 | Max: bool = True, 8 | Min: bool = True, 9 | Avg: bool = False, 10 | bits: int = 8, 11 | namebase: str | None = None, 12 | ) -> vs.VideoNode: 13 | """ 14 | write PlaneStats(Max,Min,Avg) to csv 15 | """ 16 | 17 | cbits=clip.format.bits_per_sample 18 | cfamily=clip.format.color_family 19 | ######### 20 | def info(clip,t,p): 21 | statsclip=core.std.PlaneStats(clip, plane=p) 22 | txt = open(t,'w') 23 | ############# 24 | head="n" 25 | head+=",Max" if Max else "" 26 | head+=",Min" if Min else "" 27 | head+=",Avg" if Avg else "" 28 | head+="\n" 29 | txt.write(head) 30 | ############# 31 | def write(n, f, clip, core,Max,Min,Avg,bits): 32 | ma = int(round(f.props.PlaneStatsMax*(1< vs.VideoNode: 83 | """ 84 | Calculate SSIM and write to a csv 85 | Args: 86 | clip1: The distorted clip, will be copied to output. 87 | 88 | clip2: Reference clip, must be of the same format and dimension as the "clip1". 89 | 90 | file: output file name 91 | 92 | planes: (int/int[]) Specify which planes to be processed. Default is None. 93 | 94 | downsample: (bool) Whether to average the clips over local 2x2 window and downsample by a factor of 2 before calculation. 95 | Default is True. 96 | 97 | k1, k2: (int) Constants in the SSIM index formula. 98 | According to the paper, the performance of the SSIM index algorithm is fairly insensitive to variations of these values. 99 | Default are 0.01 and 0.03. 100 | 101 | fun: (function or float) The function of how the clips are filtered. 102 | If it is None, it will be set to a gaussian filter whose standard deviation is 1.5. Note that the size of gaussian kernel is different from the one in MATLAB. 103 | If it is a float, it specifies the standard deviation of the gaussian filter. (sigma in core.tcanny.TCanny) 104 | According to the paper, the quality map calculated from gaussian filter exhibits a locally isotropic property, 105 | which prevents the present of undesirable “blocking” artifacts in the resulting SSIM index map. 106 | Default is None. 107 | 108 | dynamic_range: (float) Dynamic range of the internal float point clip. Default is 1. 109 | """ 110 | isYUV=clip1.format.color_family==vs.YUV 111 | isRGB=clip1.format.color_family==vs.RGB 112 | isGRAY=clip1.format.color_family==vs.GRAY 113 | if isinstance(planes,int): 114 | planes=[planes] 115 | if isGRAY: 116 | clip=SSIM(clip1, clip2, plane=0, downsample=downsample, k1=k1, k2=k2, fun=fun, dynamic_range=dynamic_range, show_map=False) 117 | txt = open(file,'w') 118 | txt.write("n,gary\n") 119 | def tocsv(n, f, clip ,core): 120 | txt.write(str(n)+","+str(f.props.PlaneSSIM)+"\n") 121 | return clip 122 | txt.close() 123 | last=core.std.FrameEval(clip,functools.partial(tocsv, clip=clip,core=core),prop_src=clip) 124 | elif isYUV: 125 | if planes is None: 126 | planes=[0,1,2] 127 | Y=SSIM(getY(clip1),getY(clip2),plane=0, downsample=downsample, k1=k1, k2=k2, fun=fun, dynamic_range=dynamic_range, show_map=False) if 0 in planes else getY(clip1) 128 | U=SSIM(getU(clip1),getU(clip2),plane=0, downsample=downsample, k1=k1, k2=k2, fun=fun, dynamic_range=dynamic_range, show_map=False) if 1 in planes else getU(clip1) 129 | V=SSIM(getV(clip1),getV(clip2),plane=0, downsample=downsample, k1=k1, k2=k2, fun=fun, dynamic_range=dynamic_range, show_map=False) if 2 in planes else getV(clip1) 130 | txt = open(file,'w') 131 | head="n" 132 | head+=",Y" if 0 in planes else "" 133 | head+=",U" if 1 in planes else "" 134 | head+=",V" if 2 in planes else "" 135 | txt.write(head+"\n") 136 | def tocsv(n,f,clip,core): 137 | line=str(n) 138 | line+=(","+str(f[0].props.PlaneSSIM)) if 0 in planes else "" 139 | line+=(","+str(f[1].props.PlaneSSIM)) if 1 in planes else "" 140 | line+=(","+str(f[2].props.PlaneSSIM)) if 2 in planes else "" 141 | txt.write(line+"\n") 142 | return clip 143 | txt.close() 144 | last=core.std.FrameEval(clip1,functools.partial(tocsv, clip=clip1,core=core),prop_src=[Y,U,V]) 145 | elif isRGB: 146 | if planes is None: 147 | planes=[0,1,2] 148 | R=SSIM(getY(clip1),getY(clip2),plane=0, downsample=downsample, k1=k1, k2=k2, fun=fun, dynamic_range=dynamic_range, show_map=False) if 0 in planes else getY(clip1) 149 | G=SSIM(getU(clip1),getU(clip2),plane=0, downsample=downsample, k1=k1, k2=k2, fun=fun, dynamic_range=dynamic_range, show_map=False) if 1 in planes else getU(clip1) 150 | B=SSIM(getV(clip1),getV(clip2),plane=0, downsample=downsample, k1=k1, k2=k2, fun=fun, dynamic_range=dynamic_range, show_map=False) if 2 in planes else getV(clip1) 151 | txt = open(file,'w') 152 | head="n" 153 | head+=",R" if 0 in planes else "" 154 | head+=",G" if 1 in planes else "" 155 | head+=",B" if 2 in planes else "" 156 | txt.write(head+"\n") 157 | def tocsv(n,f,clip,core): 158 | line=str(n) 159 | line+=(","+str(f[0].props.PlaneSSIM)) if 0 in planes else "" 160 | line+=(","+str(f[1].props.PlaneSSIM)) if 1 in planes else "" 161 | line+=(","+str(f[2].props.PlaneSSIM)) if 2 in planes else "" 162 | txt.write(line+"\n") 163 | return clip 164 | txt.close() 165 | last=core.std.FrameEval(clip1,functools.partial(tocsv, clip=clip1,core=core),prop_src=[R,G,B]) 166 | else: 167 | raise TypeError("unsupport format") 168 | return last 169 | 170 | def GMSD2csv( 171 | clip1: vs.VideoNode, 172 | clip2: vs.VideoNode, 173 | file: str = "GMSD.csv", 174 | planes: PlanesType = None, 175 | downsample: bool = True, 176 | c: float = 0.0026, 177 | ) -> vs.VideoNode: 178 | """ 179 | Calculate GMSD and write to a csv 180 | 181 | GMSD is a new effective and efficient image quality assessment (IQA) model, which utilizes the pixel-wise gradient magnitude similarity (GMS) 182 | between the reference and distorted images combined with standard deviation of the GMS map to predict perceptual image quality. 183 | 184 | The distortion degree of the distorted image will be stored as frame property 'PlaneGMSD' in the output clip. 185 | 186 | The value of GMSD reflects the range of distortion severities in an image. 187 | The lowerer the GMSD score, the higher the image perceptual quality. 188 | If "clip1" == "clip2", GMSD = 0. 189 | 190 | Args: 191 | clip1: The distorted clip, will be copied to output. 192 | 193 | clip2: Reference clip, must be of the same format and dimension as the "clip1". 194 | 195 | file: output file name 196 | 197 | planes: (int/int[]) Specify which planes to be processed. Default is None. 198 | 199 | downsample: (bool) Whether to average the clips over local 2x2 window and downsample by a factor of 2 before calculation. 200 | Default is True. 201 | 202 | c: (float) A positive constant that supplies numerical stability. 203 | According to the paper, for all the test databases, GMSD shows similar preference to the value of c. 204 | Default is 0.0026. 205 | 206 | depth_args: (dict) Additional arguments passed to mvf.Depth() in the form of keyword arguments. 207 | Default is {}. 208 | """ 209 | isYUV=clip1.format.color_family==vs.YUV 210 | isRGB=clip1.format.color_family==vs.RGB 211 | isGRAY=clip1.format.color_family==vs.GRAY 212 | if isinstance(planes,int): 213 | planes=[planes] 214 | if isGRAY: 215 | clip=GMSD(clip1, clip2, plane=0, downsample=downsample, c=c,show_map=False) 216 | txt = open(file,'w') 217 | txt.write("n,gary\n") 218 | def tocsv(n, f, clip ,core): 219 | txt.write(str(n)+","+str(f.props.PlaneGMSD)+"\n") 220 | return clip 221 | txt.close() 222 | last=core.std.FrameEval(clip,functools.partial(tocsv, clip=clip,core=core),prop_src=clip) 223 | elif isYUV: 224 | if planes is None: 225 | planes=[0,1,2] 226 | Y=GMSD(getY(clip1),getY(clip2),plane=0, downsample=downsample, c=c, show_map=False) if 0 in planes else getY(clip1) 227 | U=GMSD(getU(clip1),getU(clip2),plane=0, downsample=downsample, c=c, show_map=False) if 1 in planes else getU(clip1) 228 | V=GMSD(getV(clip1),getV(clip2),plane=0, downsample=downsample, c=c, show_map=False) if 2 in planes else getV(clip1) 229 | txt = open(file,'w') 230 | head="n" 231 | head+=",Y" if 0 in planes else "" 232 | head+=",U" if 1 in planes else "" 233 | head+=",V" if 2 in planes else "" 234 | txt.write(head+"\n") 235 | def tocsv(n,f,clip,core): 236 | line=str(n) 237 | line+=(","+str(f[0].props.PlaneGMSD)) if 0 in planes else "" 238 | line+=(","+str(f[1].props.PlaneGMSD)) if 1 in planes else "" 239 | line+=(","+str(f[2].props.PlaneGMSD)) if 2 in planes else "" 240 | txt.write(line+"\n") 241 | return clip 242 | txt.close() 243 | last=core.std.FrameEval(clip1,functools.partial(tocsv, clip=clip1,core=core),prop_src=[Y,U,V]) 244 | elif isRGB: 245 | if planes is None: 246 | planes=[0,1,2] 247 | R=GMSD(getY(clip1),getY(clip2),plane=0, downsample=downsample, c=c, show_map=False) if 0 in planes else getY(clip1) 248 | G=GMSD(getU(clip1),getU(clip2),plane=0, downsample=downsample, c=c, show_map=False) if 1 in planes else getU(clip1) 249 | B=GMSD(getV(clip1),getV(clip2),plane=0, downsample=downsample, c=c, show_map=False) if 2 in planes else getV(clip1) 250 | txt = open(file,'w') 251 | head="n" 252 | head+=",R" if 0 in planes else "" 253 | head+=",G" if 1 in planes else "" 254 | head+=",B" if 2 in planes else "" 255 | txt.write(head+"\n") 256 | def tocsv(n,f,clip,core): 257 | line=str(n) 258 | line+=(","+str(f[0].props.PlaneGMSD)) if 0 in planes else "" 259 | line+=(","+str(f[1].props.PlaneGMSD)) if 1 in planes else "" 260 | line+=(","+str(f[2].props.PlaneGMSD)) if 2 in planes else "" 261 | txt.write(line+"\n") 262 | return clip 263 | txt.close() 264 | last=core.std.FrameEval(clip1,functools.partial(tocsv, clip=clip1,core=core),prop_src=[R,G,B]) 265 | else: 266 | raise TypeError("unsupport format") 267 | return last 268 | 269 | def csv2props( 270 | clip: vs.VideoNode, 271 | file: str, 272 | sep: str = "\t", 273 | props: list[list] | None = None, 274 | rawfilter: Callable | None = None, 275 | strict: bool = False, 276 | charset: str = "utf-8", 277 | ) -> vs.VideoNode: 278 | """ 279 | csv file must contain a column use "n" as title to log frame number,all values should be int >=0. 280 | props: 281 | should be a list contain a list with "title of values in csv","prop name you want set","fuction you want use to process raw value" ,"default when value not in csv" in order. 282 | If leave props unset,use all titles in csv. 283 | If leave "prop name you want set" None,it will same as "title of values in csv" 284 | If leave "fuction you want use to process raw value" None,will use raw value as an string. 285 | If leave "default when value not in csv" None,will use a empty string as default value. 286 | 287 | Notice:Default value only for frame number not in csv,will not affect line with missing value and sep. 288 | rawfilter: 289 | overwrite fuction use to process raw value when "fuction you want use to process raw value" unset. 290 | strict: 291 | If True, will throw an exception instead of using default value when frame number not in csv. And also force "n column" only contain frame number in clip strictly. 292 | If False,it also ignore same frame number in csv and use the last one.(maybe change in future) 293 | """ 294 | rawfilter=rawfilter if (rawfilter is not None and callable(rawfilter)) else lambda x:x 295 | with open(file,encoding=charset) as file: 296 | lines=[line.split(sep) for line in file.read().split("\n") if len(line.split(sep))>1] 297 | 298 | if "n" not in lines[0]: 299 | raise ValueError("""csv file must contain a column use "n" as title to log frame number,all values should be int >=0.""") 300 | 301 | if len(set(lines[0])) != len(lines[0]): 302 | raise ValueError("csv should not have columns with same title") 303 | 304 | titles=lines[0][:] 305 | titles.remove("n") 306 | nindex=lines[0].index("n") 307 | 308 | if strict: 309 | nlist=sorted([int(line[nindex]) for line in lines[1:]]) 310 | if nlist!=list(range(len(clip))): 311 | raise ValueError(""" "n column" should only contain frame number in clip strictly. """) 312 | datas={} 313 | for line in lines[1:]: 314 | if len(line) vs.VideoNode: 370 | """ 371 | write props which you chosen to csv 372 | you can rewrite tostring function to process props before write to csv 373 | 374 | props: A list contain the name of props you want to write to csv 375 | titles:A list contain titles of props 376 | output:path of output file,default is info.csv 377 | sep:the separator you want use,default is tab 378 | charset:the charset of csv file you want use,default is utf-8 379 | tostring:should be a function to process props before write to csv 380 | """ 381 | file=open(output,"w",encoding=charset) 382 | file.write(sep.join(["n"]+titles)) 383 | 384 | tostring=tostring if callable(tostring) else lambda x: x.decode("utf-8") if isinstance(x,bytes) else str(x) 385 | 386 | 387 | def tocsv(n,f,clip): 388 | file.write("\n"+sep.join([str(n)]+[tostring(eval("f.props."+i,globals(),{'f':f})) for i in props])) 389 | 390 | return clip 391 | file.close() 392 | return core.std.FrameEval(clip, functools.partial(tocsv, clip=clip),prop_src=clip) 393 | 394 | def getPictType( 395 | clip: vs.VideoNode, 396 | txt: str | None = None, 397 | show: bool = True, 398 | ) -> vs.VideoNode: 399 | """ 400 | getPictType 401 | """ 402 | sclip=core.std.PlaneStats(clip, plane=0) 403 | log = txt is not None 404 | if log: 405 | t = open(txt,'w') 406 | def __type(n, f, clip, core): 407 | ptype = str(f.props._PictType)[2] 408 | if log: 409 | t.write(str(n)+","+ptype) 410 | t.write('\n') 411 | if show: 412 | return core.text.Text(clip, "PictType:"+ptype) 413 | else: 414 | return clip 415 | if log: 416 | t.close() 417 | last = core.std.FrameEval(clip, functools.partial(__type, clip=clip,core=core),prop_src=sclip) 418 | return last 419 | -------------------------------------------------------------------------------- /xvs/enhance.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | from .metrics import getsharpness 3 | from .dehalo import EdgeCleaner 4 | 5 | @deprecated("No maintenance.") 6 | def xsUSM( 7 | src: vs.VideoNode, 8 | blur: int = 11, 9 | limit: float = 1, 10 | elast: float = 4, 11 | maskclip: vs.VideoNode | None = None, 12 | plane: PlanesType = [0], 13 | ) -> vs.VideoNode: 14 | """ 15 | xsUSM: xyx98's simple unsharp mask 16 | ----------------------------------------------- 17 | blur: way to get blurclip,default is 11,means RemoveGrain(mode=11) 18 | you can also use a list like [1,2,1,2,4,2,1,2,1],means Convolution(matrix=[1,2,1,2,4,2,1,2,1]) 19 | or you can input a blur clip made by yourself 20 | limit: way to limit the sharp resault,default is 3 21 | =0 no limit 22 | >0 use limitfilter thr=limit 23 | <0 use repair(mode=-limit) 24 | elast: elast in LimitFilter,only for limit>0 25 | maskclip: you can input your own mask to merge resault and source if needed,default is None,means skip this step 26 | plane: setting which plane or planes to be processed,default is [0],means only process Y 27 | """ 28 | isGray = src.format.color_family == vs.GRAY 29 | def usm(clip=None,blur=11,limit=1,elast=4,maskclip=None): 30 | if isinstance(blur,int): 31 | blurclip=core.rgvs.RemoveGrain(clip,blur) 32 | elif isinstance(blur,list): 33 | blurclip=core.std.Convolution(clip,matrix=blur,planes=0) 34 | else: 35 | blurclip=blur 36 | diff=core.std.MakeDiff(clip,blurclip,planes=0) 37 | sharp = core.std.MergeDiff(clip,diff,planes=0) 38 | ### 39 | if limit==0: 40 | lt=sharp 41 | elif limit>0: 42 | lt = LimitFilter(sharp,clip,thr=limit,elast=elast) 43 | elif limit<0: 44 | lt = core.rgvs.Repair(sharp,clip,-limit) 45 | if isinstance(maskclip,vs.VideoNode): 46 | m = core.std.MaskedMerge(lt,clip,maskclip,planes=0) 47 | else: 48 | m = lt 49 | return m 50 | 51 | if isGray: 52 | return usm(src,blur,limit,elast,maskclip) 53 | else: 54 | li=[] 55 | plane= [plane] if isinstance(plane,int) else plane 56 | for i in range(3): 57 | if i in plane: 58 | a=usm(getplane(src,i),blur,limit,elast,None if maskclip is None else getplane(maskclip,i)) 59 | else: 60 | a=getplane(src,i) 61 | li.append(a) 62 | return core.std.ShufflePlanes(li,[0,0,0], vs.YUV) 63 | 64 | @deprecated("No maintenance.") 65 | def SharpenDetail( 66 | src: vs.VideoNode, 67 | limit: float = 4, 68 | thr: int = 32, 69 | ) -> vs.VideoNode: 70 | """ 71 | SharpenDetail 72 | ------------------------------------ 73 | ideas comes from : https://forum.doom9.org/showthread.php?t=163598 74 | but it's not a port,because their is no asharp in vapoursynth,so I adjust sharpener 75 | """ 76 | cFormat=src.format.color_family 77 | depth=src.format.bits_per_sample 78 | if cFormat==vs.YUV: 79 | clip=getplane(src,0) 80 | elif cFormat==vs.GRAY: 81 | clip=src 82 | else: 83 | raise TypeError("") 84 | thr = thr*depth/8 85 | bia = 128*depth/8 86 | blur = core.rgvs.RemoveGrain(clip,19) 87 | mask = Expr([clip,blur],"x y - "+str(thr)+" * "+str(bia)+" +") 88 | mask = core.rgvs.RemoveGrain(mask,2) 89 | mask = inpand(mask,mode="both") 90 | mask = core.std.Deflate(mask) 91 | sharp = xsUSM(clip,blur=[1]*25,limit=limit,maskclip=None) 92 | last = core.std.MaskedMerge(sharp,clip,mask,planes=0) 93 | if cFormat==vs.YUV: 94 | last = core.std.ShufflePlanes([last,src],[0,1,2], vs.YUV) 95 | return last 96 | 97 | #modify from old havsfunc 98 | def FastLineDarkenMOD( 99 | c: vs.VideoNode, 100 | strength: float = 48, 101 | protection: float = 5, 102 | luma_cap: float = 191, 103 | threshold: float = 4, 104 | thinning: float = 0, 105 | ) -> vs.VideoNode: 106 | """ 107 | ############################## 108 | # FastLineDarken 1.4x MT MOD # 109 | ############################## 110 | Written by Vectrangle (http://forum.doom9.org/showthread.php?t=82125) 111 | Didée: - Speed Boost, Updated: 11th May 2007 112 | Dogway - added protection option. 12-May-2011 113 | 114 | Parameters are: 115 | strength (integer) - Line darkening amount, 0-256. Default 48. Represents the _maximum_ amount 116 | that the luma will be reduced by, weaker lines will be reduced by 117 | proportionately less. 118 | protection (integer) - Prevents the darkest lines from being darkened. Protection acts as a threshold. 119 | Values range from 0 (no prot) to ~50 (protect everything) 120 | luma_cap (integer) - value from 0 (black) to 255 (white), used to stop the darkening 121 | determination from being 'blinded' by bright pixels, and to stop grey 122 | lines on white backgrounds being darkened. Any pixels brighter than 123 | luma_cap are treated as only being as bright as luma_cap. Lowering 124 | luma_cap tends to reduce line darkening. 255 disables capping. Default 191. 125 | threshold (integer) - any pixels that were going to be darkened by an amount less than 126 | threshold will not be touched. setting this to 0 will disable it, setting 127 | it to 4 (default) is recommended, since often a lot of random pixels are 128 | marked for very slight darkening and a threshold of about 4 should fix 129 | them. Note if you set threshold too high, some lines will not be darkened 130 | thinning (integer) - optional line thinning amount, 0-256. Setting this to 0 will disable it, 131 | which is gives a _big_ speed increase. Note that thinning the lines will 132 | inherently darken the remaining pixels in each line a little. Default 0. 133 | """ 134 | if not isinstance(c, vs.VideoNode): 135 | raise vs.Error('FastLineDarkenMOD: this is not a clip') 136 | 137 | if c.format.color_family == vs.RGB: 138 | raise vs.Error('FastLineDarkenMOD: RGB format is not supported') 139 | 140 | peak = (1 << c.format.bits_per_sample) - 1 if c.format.sample_type == vs.INTEGER else 1.0 141 | 142 | if c.format.color_family != vs.GRAY: 143 | c_orig = c 144 | c = getplane(c, 0) 145 | else: 146 | c_orig = None 147 | 148 | ## parameters ## 149 | Str = strength / 128 150 | lum = scale(luma_cap, peak) 151 | thr = scale(threshold, peak) 152 | thn = thinning / 16 153 | 154 | ## filtering ## 155 | exin = c.std.Maximum(threshold=peak / (protection + 1)).std.Minimum() 156 | thick = Expr([c, exin], expr=[f'y {lum} < y {lum} ? x {thr} + > x y {lum} < y {lum} ? - 0 ? {Str} * x +']) 157 | if thinning <= 0: 158 | last = thick 159 | else: 160 | diff = Expr([c, exin], expr=[f'y {lum} < y {lum} ? x {thr} + > x y {lum} < y {lum} ? - 0 ? {scale(127, peak)} +']) 161 | linemask = Expr(diff.std.Minimum(),expr=[f'x {scale(127, peak)} - {thn} * {peak} +']).std.Convolution(matrix=[1, 1, 1, 1, 1, 1, 1, 1, 1]) 162 | thin = Expr([c.std.Maximum(), diff], expr=[f'x y {scale(127, peak)} - {Str} 1 + * +']) 163 | last = core.std.MaskedMerge(thin, thick, linemask) 164 | 165 | if c_orig is not None: 166 | last = core.std.ShufflePlanes([last, c_orig], planes=[0, 1, 2], colorfamily=c_orig.format.color_family) 167 | return last 168 | 169 | 170 | def mwenhance( 171 | diffClip: vs.VideoNode, 172 | chroma: bool = False, 173 | Strength: float = 2.0, 174 | Szrp8: int = 8, 175 | Spwr: int = 4, 176 | SdmpLo: int = 4, 177 | SdmpHi: int = 48, 178 | Soft: int = 0, 179 | useExpr: bool = False, 180 | ) -> vs.VideoNode: 181 | """ 182 | high frequency enhance 183 | Steal from other one's script. Most likely written by mawen1250. 184 | use it on your high frequency layer. 185 | 186 | add useExpr for using Expr instead of lut 187 | 188 | """ 189 | # constant values for sharpening LUT 190 | if Strength<=0: 191 | return diffClip 192 | 193 | sbitPS = diffClip.format.bits_per_sample 194 | bpsMul8 = 1 << (sbitPS - 8) 195 | floor = 0 196 | ceil = (1 << sbitPS) - 1 197 | neutral = 1 << (sbitPS - 1) 198 | miSpwr = 1 / Spwr 199 | Szrp = Szrp8 * bpsMul8 200 | Szrp8Sqr = Szrp8 * Szrp8 201 | SzrpMulStrength = Szrp * Strength 202 | Szrp8SqrPlusSdmpLo = Szrp8Sqr + SdmpLo 203 | SdmpHiEqual0 = SdmpHi == 0 204 | Szrp8DivSdmpHiPower4Plus1 = 1 if SdmpHiEqual0 else (Szrp8 / SdmpHi) ** 4 + 1 205 | 206 | if useExpr: 207 | #generate expr 208 | diff = f' x {neutral} - ' 209 | absDiff = f' {diff} abs ' 210 | diff8 = f' {diff} {bpsMul8} / ' 211 | absDiff8 = f' {diff8} abs ' 212 | diff8Sqr = f' {diff8} {diff8} * ' 213 | signMul = f' {diff} 0 >= 1 -1 ? ' 214 | 215 | res1=f' {absDiff} {Szrp} / {miSpwr} pow {SzrpMulStrength} * {signMul} * ' 216 | res2=f' {diff8Sqr} {Szrp8SqrPlusSdmpLo} * {diff8Sqr} {SdmpLo} + {Szrp8Sqr} * / ' 217 | res3=' 0 ' if SdmpHiEqual0 else f' {absDiff8} {SdmpHi} / 4 pow ' 218 | 219 | enhanced=f' {res1} {res2} * {Szrp8DivSdmpHiPower4Plus1} * 1 {res3} + /' 220 | enhanced=f' {ceil} {floor} {neutral} {enhanced} + max min ' 221 | 222 | expr=f' x {neutral} = x {enhanced} ? ' 223 | 224 | #apply expr 225 | if diffClip.format.num_planes==1: 226 | diffClip=Expr(diffClip,[expr]) 227 | else: 228 | diffClip=Expr(diffClip,[expr,expr if chroma else '']) 229 | 230 | else: 231 | # function to generate sharpening LUT 232 | def diffEhFunc(x): 233 | if x == neutral: 234 | return x 235 | 236 | diff = x - neutral 237 | absDiff = abs(diff) 238 | diff8 = diff / bpsMul8 239 | absDiff8 = abs(diff8) 240 | diff8Sqr = diff8 * diff8 241 | signMul = 1 if diff >= 0 else -1 242 | 243 | res1 = (absDiff / Szrp) ** miSpwr * SzrpMulStrength * signMul 244 | res2 = diff8Sqr * Szrp8SqrPlusSdmpLo / ((diff8Sqr + SdmpLo) * Szrp8Sqr) 245 | res3 = 0 if SdmpHiEqual0 else (absDiff8 / SdmpHi) ** 4 246 | enhanced = res1 * res2 * Szrp8DivSdmpHiPower4Plus1 / (1 + res3) 247 | 248 | return min(ceil, max(floor, round(neutral + enhanced))) 249 | # apply sharpening LUT 250 | diffClip = diffClip.std.Lut([0,1,2] if chroma else [0], function=diffEhFunc) 251 | 252 | #soften the result 253 | if Soft > 0: 254 | diffClipEhSoft = diffClip.rgvs.RemoveGrain([19, 19 if chroma else 0]) 255 | diffClipEhSoft = diffClipEhSoft if Soft >= 1 else core.std.Merge(diffClip, diffClipEhSoft, [1 - Soft, Soft]) 256 | limitDiffExpr=f' x {neutral} - abs y {neutral} - abs <= x y ? ' 257 | diffClip = Expr([diffClip, diffClipEhSoft], [limitDiffExpr, limitDiffExpr if chroma else '']) 258 | # output 259 | return diffClip 260 | 261 | def mwcfix( 262 | clip: vs.VideoNode, 263 | kernel: int = 1, 264 | restore: float = 5, 265 | a: int = 2, 266 | grad: int = 2, 267 | warp: int = 6, 268 | thresh: int = 96, 269 | blur: int = 3, 270 | repair: int = 1, 271 | cs_h: int = 0, 272 | cs_v: int = 0, 273 | nlm_mode: str = "nlm_ispc", 274 | nlm_device_type: str = "auto", 275 | nlm_device_id: int = 0, 276 | nnedi3_mode: str = "znedi3", 277 | nnedi3_device: str = -1, 278 | ) -> vs.VideoNode: 279 | """ 280 | chroma restoration 281 | Steal from other one's script. Most likely written by mawen1250. 282 | repalce nnedi3 with znedi3 283 | """ 284 | if clip.format.bits_per_sample != 16: 285 | raise vs.Error('mwcfix: Only 16bit supported') 286 | 287 | clip_y,clip_u,clip_v = extractPlanes() 288 | 289 | cssw = clip.format.subsampling_w 290 | cssh = clip.format.subsampling_h 291 | 292 | if cs_h != 0 or cssw > 0: 293 | if cssw > 0 and kernel == 1: 294 | clip_u = core.fmtc.bitdepth(clip_u,bits=8) 295 | clip_v = core.fmtc.bitdepth(clip_v,bits=8) 296 | clip_u = clip_u.std.Transpose() 297 | clip_v = clip_v.std.Transpose() 298 | field = 1 299 | for i in range(cssw): 300 | if kernel >= 2: 301 | clip_u = clip_u.eedi2.EEDI2(field=field) 302 | clip_v = clip_v.eedi2.EEDI2(field=field) 303 | elif kernel == 1: 304 | clip_u = nnedi3(clip_u,field=field,dh=True,mode=nnedi3_mode,device=nnedi3_device) 305 | clip_v = nnedi3(clip_v,field=field,dh=True,mode=nnedi3_mode,device=nnedi3_device) 306 | sy = -cs_h 307 | clip_u = clip_u.fmtc.resample(h=clip_y.width, sy=sy, center=False, kernel="bicubic", a1=0, a2=0.5) 308 | clip_v = clip_v.fmtc.resample(h=clip_y.width, sy=sy, center=False, kernel="bicubic", a1=0, a2=0.5) 309 | clip_u = clip_u.std.Transpose() 310 | clip_v = clip_v.std.Transpose() 311 | 312 | if cs_v != 0 or cssh > 0: 313 | if cssh > 0 and kernel == 1: 314 | clip_u = core.fmtc.bitdepth(clip_u,bits=8) 315 | clip_v = core.fmtc.bitdepth(clip_v,bits=8) 316 | field = 1 317 | for i in range(clip.format.subsampling_w): 318 | if kernel >= 2: 319 | clip_u = clip_u.eedi2.EEDI2(field=field) 320 | clip_v = clip_v.eedi2.EEDI2(field=field) 321 | elif kernel == 1: 322 | clip_u = nnedi3(clip_u,field=field,dh=True,mode=nnedi3_mode,device=nnedi3_device) 323 | clip_v = nnedi3(clip_v,field=field,dh=True,mode=nnedi3_mode,device=nnedi3_device) 324 | field = 0 325 | sy = (-0.5 if cssh > 0 and kernel > 0 else 0) - cs_v 326 | clip_u = clip_u.fmtc.resample(h=clip_y.height, sy=sy, center=True, kernel="bicubic", a1=0, a2=0.5) 327 | clip_v = clip_v.fmtc.resample(h=clip_y.height, sy=sy, center=True, kernel="bicubic", a1=0, a2=0.5) 328 | 329 | pp_u = clip_u 330 | pp_v = clip_v 331 | 332 | if restore > 0: 333 | rst_u = nlm(pp_u,d=0, a=a, s=1, h=restore, rclip=clip_y, device_type=nlm_device_type, device_id=nlm_device_id,mode=nlm_mode).rgvs.Repair(pp_u, 13) 334 | rst_v = nlm(pp_v,d=0, a=a, s=1, h=restore, rclip=clip_y, device_type=nlm_device_type, device_id=nlm_device_id,mode=nlm_mode).rgvs.Repair(pp_v, 13) 335 | low_u = rst_u 336 | low_v = rst_v 337 | for i in range(grad): 338 | low_u = low_u.rgvs.RemoveGrain(20) 339 | low_v = low_v.rgvs.RemoveGrain(20) 340 | pp_u = Expr([pp_u, rst_u, low_u], 'y z - x +') 341 | pp_v = Expr([pp_v, rst_v, low_v], 'y z - x +') 342 | 343 | if warp > 0: 344 | awarp_mask = core.fmtc.bitdepth(clip_y,bits=8).warp.ASobel(thresh).warp.ABlur(blur, 1) 345 | pp_u8 = core.fmtc.bitdepth(core.fmtc.bitdepth(pp_u,bits=8).warp.AWarp(awarp_mask, warp),bits=16) 346 | pp_v8 = core.fmtc.bitdepth(core.fmtc.bitdepth(pp_v, bits=8).warp.AWarp(awarp_mask, warp), bits=16) 347 | pp_u = LimitFilter(pp_u, pp_u8, thr=1.0, elast=2.0) 348 | pp_v = LimitFilter(pp_v, pp_v8, thr=1.0, elast=2.0) 349 | 350 | if repair > 0: 351 | pp_u = pp_u.rgvs.Repair(clip_u, repair) 352 | pp_v = pp_v.rgvs.Repair(clip_v, repair) 353 | elif repair < 0: 354 | pp_u = clip_u.rgvs.Repair(pp_u, -repair) 355 | pp_v = clip_v.rgvs.Repair(pp_v, -repair) 356 | 357 | final = core.std.ShufflePlanes([clip_y, pp_u, pp_v], [0,0,0], vs.YUV) 358 | final = final.fmtc.resample(csp=clip.format.id, kernel="bicubic", a1=0, a2=0.5) 359 | return final 360 | 361 | @deprecated("Strange idea,not really useful.") 362 | def ssharp( 363 | clip: vs.VideoNode, 364 | chroma: bool = True, 365 | mask: bool = False, 366 | compare: bool = False, 367 | ) -> vs.VideoNode: 368 | """ 369 | slightly sharp through bicubic 370 | """ 371 | isGRAY=clip.format.color_family==vs.GRAY 372 | src=clip.fmtc.bitdepth(bits=16) 373 | w=src.width 374 | h=src.height 375 | if chroma and not isGRAY: 376 | sha = core.fmtc.resample(src,w*2,h*2,kernel='bicubic',a1=-1,a2=6).resize.Lanczos(w,h) 377 | last=core.rgvs.Repair(sha, src, 13) 378 | last=LimitFilter(src, last, thr=1, thrc=0.5, elast=6, brighten_thr=0.5, planes=[0,1,2]) 379 | if mask: 380 | mask1=Expr(core.tcanny.TCanny(src,sigma=0.5, t_h=20.0, t_l=8.0,mode=1),"x 30000 < 0 x ?").rgvs.RemoveGrain(4) 381 | mask1=inpand(expand(mask1,cycle=1),cycle=1) 382 | mask2=Expr([last,src],"x y - abs 96 *").rgvs.RemoveGrain(4) 383 | mask2=Expr(mask2,"x 30000 < 0 x ?") 384 | mask=Expr([mask1,mask2],"x y min") 385 | last=core.std.MaskedMerge(last, src, mask,[0,1,2]) 386 | elif not chroma: 387 | srcy=getY(src) 388 | sha = core.fmtc.resample(srcy,w*2,h*2,kernel='bicubic',a1=-1,a2=6).resize.Lanczos(w, h) 389 | last=core.rgvs.Repair(sha, srcy, 13) 390 | last=LimitFilter(srcy, last, thr=1,elast=6, brighten_thr=0.5, planes=0) 391 | if mask: 392 | mask1=Expr(core.tcanny.TCanny(srcy,sigma=0.5, t_h=20.0, t_l=8.0,mode=1),"x 30000 < 0 x ?").rgvs.RemoveGrain(4) 393 | mask1=inpand(expand(mask1,cycle=1),cycle=1) 394 | mask2=Expr([last,srcy],"x y - abs 96 *").rgvs.RemoveGrain(4) 395 | mask2=Expr(mask2,"x 30000 < 0 x ?") 396 | mask=Expr([mask1,mask2],"x y min") 397 | last=core.std.MaskedMerge(last, srcy, mask,0) 398 | last=core.std.ShufflePlanes([last,src], [0,1,2],colorfamily=vs.YUV) 399 | elif isGRAY: 400 | sha = core.fmtc.resample(src,w*2,h*2,kernel='bicubic',a1=-1,a2=6).resize.Lanczos(w,h) 401 | last=core.rgvs.Repair(sha, src, 13) 402 | last=LimitFilter(src, last, thr=1, thrc=0.5, elast=6, brighten_thr=0.5) 403 | if mask: 404 | mask1=Expr(core.tcanny.TCanny(src,sigma=0.5, t_h=20.0, t_l=8.0,mode=1),"x 30000 < 0 x ?").rgvs.RemoveGrain(4) 405 | mask1=inpand(expand(mask1,cycle=1),cycle=1) 406 | mask2=Expr([last,src],"x y - abs 96 *").rgvs.RemoveGrain(4) 407 | mask2=Expr(mask2,"x 30000 < 0 x ?") 408 | mask=Expr([mask1,mask2],"x y min") 409 | last=core.std.MaskedMerge(last, src, mask) 410 | if not compare: 411 | return last 412 | else: 413 | return core.std.Interleave([src.text.Text("src"),last.text.Text("sharp")]) 414 | 415 | def SCSharpen( 416 | clip:vs.VideoNode, 417 | ref:vs.VideoNode, 418 | max_sharpen_weight: float = 0.3, 419 | min_sharpen_weight: float = 0, 420 | clean: bool = True, 421 | ) -> vs.VideoNode: 422 | """ 423 | Sharpness Considered Sharpen: 424 | It mainly design for sharpen a bad source after blurry filtered such as strong AA, and source unsuited to be reference when you want sharpen filtered clip to match the sharpness of source. 425 | It use cas as sharpen core,and calculate sharpness of source(reference clip),filtered clip (the clip you want sharpen),and sharpen clip(by cas).Use these sharpness information adjust merge weight of filtered clip and sharpen clip. 426 | ############################ 427 | If clean is True,use EdgeCleaner clean edge after sharpen. 428 | Don't use high max_sharpen_weight or you might need addition filter to resolve artifacts cause by cas(1). 429 | only luma processed,output is always 16bit. 430 | """ 431 | if max_sharpen_weight >1 or max_sharpen_weight <=0 : 432 | raise ValueError("max_sharpen_weight should in (0,1]") 433 | 434 | if min_sharpen_weight >1 or min_sharpen_weight <0 or max_sharpen_weight {L1} z * {L2} y * + {k1} {L3} < {L3} z * {L4} y * + {k1} z * {k2} y * + ? ? ?" 466 | last=core.akarin.Expr([ref,last,sharp],expr) 467 | 468 | if clean: 469 | last=EdgeCleaner(last,strength=10, rmode=17, smode=0, hot=False) 470 | if isYUV: 471 | last=core.std.ShufflePlanes([last,clip],[0,1,2],vs.YUV) 472 | 473 | return last 474 | 475 | -------------------------------------------------------------------------------- /xvs/denoise.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | 3 | def bm3d( 4 | clip: vs.VideoNode, 5 | sigma: float | Sequence[float] = [3,3,3], 6 | sigma2: float | Sequence[float] | None = None, 7 | preset: str = "fast", 8 | preset2: str | None = None, 9 | mode: str = "cpu", 10 | radius: int = 0, 11 | radius2: int = None, 12 | chroma: bool = False, 13 | fast: bool = True, 14 | block_step1: int | Sequence[int] | None = None, 15 | bm_range1: int | Sequence[int] | None = None, 16 | ps_num1: int | None = None, 17 | ps_range1: int | None = None, 18 | block_step2: int | Sequence[int] | None = None, 19 | bm_range2: int | Sequence[int] | None = None, 20 | ps_num2: int | None = None, 21 | ps_range2: int | None = None, 22 | extractor_exp: int = 0, 23 | device_id: int = 0, 24 | bm_error_s: str = "SSD", 25 | transform_2d_s: str = "DCT", 26 | transform_1d_s: str = "DCT", 27 | refine: int = 1, 28 | dmode:int = 0, 29 | v2:bool = False, 30 | ) -> vs.VideoNode: 31 | """ 32 | warp function for bm3dcpu,bm3dcuda,bm3dcuda_rtc,and bm3dhip,similar to mvs.bm3d but only main function(without colorspace tranform) 33 | due to difference between bm3d and bm3d{cpu,cuda,cuda_rtc,hip},result will not match mvf.bm3d 34 | ------------------------------------------------------------------------- 35 | preset,preset2:set preset for basic estimate and final estimate.Supported value:fast,lc,np,high 36 | v2:True means use bm3dv2,for vbm3d,it should slightly faster 37 | For more info about other parameters,read bm3d{cpu,cuda,cuda_rtc,hip}'s doc. 38 | """ 39 | bits=clip.format.bits_per_sample 40 | clip=core.fmtc.bitdepth(clip,bits=32) 41 | if chroma is True and clip.format.id !=vs.YUV444PS: 42 | raise ValueError("chroma=True only works on yuv444") 43 | 44 | if radius2 is None: 45 | radius2=radius 46 | 47 | isvbm3d=radius>0 48 | isvbm3d2=radius2>0 49 | 50 | if sigma2 is None: 51 | sigma2=sigma 52 | 53 | if preset2 is None: 54 | preset2=preset 55 | 56 | if preset not in ["fast","lc","np","high"] or preset2 not in ["fast","lc","np","high"]: 57 | raise ValueError("preset and preset2 must be 'fast','lc','np',or'high'") 58 | 59 | parmas1={ 60 | #block_step,bm_range, ps_num, ps_range 61 | "fast":[8,9,2,4], 62 | "lc" :[6,9,2,4], 63 | "np" :[4,16,2,5], 64 | "high":[3,16,2,7], 65 | } 66 | 67 | vparmas1={ 68 | #block_step,bm_range, ps_num, ps_range 69 | "fast":[8,7,2,4], 70 | "lc" :[6,9,2,4], 71 | "np" :[4,12,2,5], 72 | "high":[3,16,2,7], 73 | } 74 | 75 | parmas2={ 76 | #block_step,bm_range, ps_num, ps_range 77 | "fast":[7,9,2,5], 78 | "lc" :[5,9,2,5], 79 | "np" :[3,16,2,6], 80 | "high":[2,16,2,8], 81 | } 82 | 83 | vparmas2={ 84 | #block_step,bm_range, ps_num, ps_range 85 | "fast":[7,7,2,5], 86 | "lc" :[5,9,2,5], 87 | "np" :[3,12,2,6], 88 | "high":[2,16,2,8], 89 | } 90 | 91 | p1=vparmas1 if isvbm3d else parmas1 92 | p2=vparmas2 if isvbm3d2 else parmas2 93 | 94 | block_step1=p1[preset][0] if block_step1 is None else block_step1 95 | bm_range1=p1[preset][1] if bm_range1 is None else bm_range1 96 | ps_num1=p1[preset][2] if ps_num1 is None else ps_num1 97 | ps_range1=p1[preset][3] if ps_range1 is None else ps_range1 98 | 99 | block_step2=p2[preset2][0] if block_step2 is None else block_step2 100 | bm_range2=p2[preset2][1] if bm_range2 is None else bm_range2 101 | ps_num2=p2[preset2][2] if ps_num2 is None else ps_num2 102 | ps_range2=p2[preset2][3] if ps_range2 is None else ps_range2 103 | 104 | if v2: 105 | flt=bm3dv2_core(clip,mode=mode,sigma=sigma,radius=radius,block_step=block_step1,bm_range=bm_range1,ps_num=ps_num1,ps_range=ps_range1,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id,bm_error_s=bm_error_s,transform_2d_s=transform_2d_s,transform_1d_s=transform_1d_s) 106 | 107 | for i in range(refine): 108 | flt=bm3dv2_core(clip,ref=flt,mode=mode,sigma=sigma2,radius=radius,block_step=block_step2,bm_range=bm_range2,ps_num=ps_num2,ps_range=ps_range2,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id,bm_error_s=bm_error_s,transform_2d_s=transform_2d_s,transform_1d_s=transform_1d_s) 109 | 110 | else: 111 | if isvbm3d: 112 | flt=bm3d_core(clip,mode=mode,sigma=sigma,radius=radius,block_step=block_step1,bm_range=bm_range1,ps_num=ps_num1,ps_range=ps_range1,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id,bm_error_s=bm_error_s,transform_2d_s=transform_2d_s,transform_1d_s=transform_1d_s) 113 | flt=core.bm3d.VAggregate(flt,radius=radius,sample=1) 114 | if isvbm3d2: 115 | for _ in range(refine): 116 | flt=bm3d_core(clip,ref=flt,mode=mode,sigma=sigma2,radius=radius,block_step=block_step2,bm_range=bm_range2,ps_num=ps_num2,ps_range=ps_range2,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id,bm_error_s=bm_error_s,transform_2d_s=transform_2d_s,transform_1d_s=transform_1d_s) 117 | flt=core.bm3d.VAggregate(flt,radius=radius,sample=1) 118 | else: 119 | for _ in range(refine): 120 | flt=bm3d_core(clip,ref=flt,mode=mode,sigma=sigma2,radius=radius,block_step=block_step2,bm_range=bm_range2,ps_num=ps_num2,ps_range=ps_range2,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id,bm_error_s=bm_error_s,transform_2d_s=transform_2d_s,transform_1d_s=transform_1d_s) 121 | 122 | else: 123 | flt=bm3d_core(clip,mode=mode,sigma=sigma,radius=radius,block_step=block_step1,bm_range=bm_range1,ps_num=ps_num1,ps_range=ps_range1,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id,bm_error_s=bm_error_s,transform_2d_s=transform_2d_s,transform_1d_s=transform_1d_s) 124 | if isvbm3d2: 125 | for _ in range(refine): 126 | flt=bm3d_core(clip,ref=flt,mode=mode,sigma=sigma2,radius=radius,block_step=block_step2,bm_range=bm_range2,ps_num=ps_num2,ps_range=ps_range2,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id,bm_error_s=bm_error_s,transform_2d_s=transform_2d_s,transform_1d_s=transform_1d_s) 127 | flt=core.bm3d.VAggregate(flt,radius=radius,sample=1) 128 | else: 129 | for _ in range(refine): 130 | flt=bm3d_core(clip,ref=flt,mode=mode,sigma=sigma2,radius=radius,block_step=block_step2,bm_range=bm_range2,ps_num=ps_num2,ps_range=ps_range2,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id,bm_error_s=bm_error_s,transform_2d_s=transform_2d_s,transform_1d_s=transform_1d_s) 131 | 132 | return core.fmtc.bitdepth(flt,bits=bits,dmode=dmode) 133 | 134 | def bm3d_core( 135 | clip: vs.VideoNode, 136 | ref: vs.VideoNode | None = None, 137 | mode: str = "cpu", 138 | sigma: float | Sequence[float] = 3.0, 139 | block_step: int | Sequence[int] = 8, 140 | bm_range: int | Sequence[int] = 9, 141 | radius: int = 0, 142 | ps_num: int = 2, 143 | ps_range: int = 4, 144 | chroma: bool = False, 145 | fast: bool = True, 146 | extractor_exp: int = 0, 147 | device_id: int = 0, 148 | bm_error_s: str = "SSD", 149 | transform_2d_s:str = "DCT", 150 | transform_1d_s:str = "DCT", 151 | ) -> vs.VideoNode: 152 | if mode not in ["cpu","cuda","cuda_rtc","hip"]: 153 | raise ValueError("mode must be cpu,or cuda,or cuda_rtc,or hip!") 154 | elif mode=="cpu": 155 | return core.bm3dcpu.BM3D(clip,ref=ref,sigma=sigma,block_step=block_step,bm_range=bm_range,radius=radius,ps_num=ps_num,ps_range=ps_range,chroma=chroma) 156 | elif mode=="cuda": 157 | return core.bm3dcuda.BM3D(clip,ref=ref,sigma=sigma,block_step=block_step,bm_range=bm_range,radius=radius,ps_num=ps_num,ps_range=ps_range,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id) 158 | elif mode=="cuda_rtc": 159 | return core.bm3dcuda_rtc.BM3D(clip,ref=ref,sigma=sigma,block_step=block_step,bm_range=bm_range,radius=radius,ps_num=ps_num,ps_range=ps_range,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id,bm_error_s=bm_error_s,transform_2d_s=transform_2d_s,transform_1d_s=transform_1d_s) 160 | else: 161 | return core.bm3dhip.BM3D(clip,ref=ref,sigma=sigma,block_step=block_step,bm_range=bm_range,radius=radius,ps_num=ps_num,ps_range=ps_range,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id) 162 | 163 | def bm3dv2_core( 164 | clip: vs.VideoNode, 165 | ref: vs.VideoNode | None = None, 166 | mode: str = "cpu", 167 | sigma: float | Sequence[float] = 3.0, 168 | block_step: int | Sequence[int] = 8, 169 | bm_range: int | Sequence[int] = 9, 170 | radius: int = 0, 171 | ps_num: int = 2, 172 | ps_range: int = 4, 173 | chroma: bool = False, 174 | fast: bool = True, 175 | extractor_exp: int = 0, 176 | device_id: int = 0, 177 | bm_error_s: str = "SSD", 178 | transform_2d_s: str = "DCT", 179 | transform_1d_s: str = "DCT", 180 | ) -> vs.VideoNode: 181 | if mode not in ["cpu","cuda","cuda_rtc","hip"]: 182 | raise ValueError("mode must be cpu,or cuda,or cuda_rtc,or hip!") 183 | elif mode=="cpu": 184 | return core.bm3dcpu.BM3Dv2(clip,ref=ref,sigma=sigma,block_step=block_step,bm_range=bm_range,radius=radius,ps_num=ps_num,ps_range=ps_range,chroma=chroma) 185 | elif mode=="cuda": 186 | return core.bm3dcuda.BM3Dv2(clip,ref=ref,sigma=sigma,block_step=block_step,bm_range=bm_range,radius=radius,ps_num=ps_num,ps_range=ps_range,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id) 187 | elif mode=="cuda_rtc": 188 | return core.bm3dcuda_rtc.BM3Dv2(clip,ref=ref,sigma=sigma,block_step=block_step,bm_range=bm_range,radius=radius,ps_num=ps_num,ps_range=ps_range,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id,bm_error_s=bm_error_s,transform_2d_s=transform_2d_s,transform_1d_s=transform_1d_s) 189 | else: 190 | return core.bm3dhip.BM3Dv2(clip,ref=ref,sigma=sigma,block_step=block_step,bm_range=bm_range,radius=radius,ps_num=ps_num,ps_range=ps_range,chroma=chroma,fast=fast,extractor_exp=extractor_exp,device_id=device_id) 191 | 192 | 193 | @deprecated("No maintenance") 194 | def STPresso( 195 | clip: vs.VideoNode, 196 | limit: int = 3, 197 | bias: int = 24, 198 | RGmode: int = 4, 199 | tthr: int = 12, 200 | tlimit: int = 3, 201 | tbias: int = 49, 202 | back: int = 1, 203 | ) -> vs.VideoNode: 204 | """ 205 | STPresso 206 | #################### 207 | orginal script by Didée 208 | #################### 209 | The goal of STPresso (Spatio-Temporal Pressdown) is 210 | to "dampen the grain just a little, to keep the original look, 211 | and make it fast". In other words it makes a video more 212 | compressible without losing detail and original grain structure. 213 | #################### 214 | cllp = Input clip. 215 | limit = 3 Spatial limit: the spatial part won't change a pixel more than this. 216 | bias = 24 The percentage of the spatial filter that will apply. 217 | RGmode = 4 The spatial filter is RemoveGrain, this is its mode. 218 | tthr = 12 Temporal threshold for FluxSmooth; Can be set "a good bit bigger" than usually. 219 | tlimit = 3 The temporal filter won't change a pixel more than this. 220 | tbias = 49 The percentage of the temporal filter that will apply. 221 | back = 1 After all changes have been calculated, reduce all pixel changes by this value. (Shift "back" towards original value) 222 | #################### 223 | STPresso is recommended for content up to 720p because 224 | "the spatial part might be a bit too narrow for 1080p encoding 225 | (since it's only a 3x3 kernel)". 226 | #################### 227 | Differences: 228 | high depth support 229 | automatically adjust parameters to fit into different depth 230 | you have less choice in RGmode 231 | """ 232 | depth = clip.format.bits_per_sample 233 | LIM1= round(limit*100.0/bias-1.0) if limit>0 else round(100.0/bias) 234 | LIM1 = scale(LIM1,depth) 235 | #(limit>0) ? round(limit*100.0/bias-1.0) : round(100.0/bias) 236 | LIM2 =1 if limit<0 else limit 237 | LIM2 = scale(LIM2,depth) 238 | #(limit<0) ? 1 : limit 239 | BIA = bias 240 | BK = scale(back,depth) 241 | TBIA = bias 242 | TLIM1 = round(tlimit*100.0/tbias-1.0) if tlimit>0 else round(100.0/tbias) 243 | TLIM1 = scale(TLIM1,depth) 244 | #(tlimit>0) ? string( round(tlimit*100.0/tbias-1.0) ) : string( round(100.0/tbias) ) 245 | TLIM2 = 1 if tlimit<0 else tlimit 246 | TLIM2 = scale(TLIM2,depth) 247 | #(tlimit<0) ? "1" : string(tlimit) 248 | bzz = core.rgvs.RemoveGrain(clip,RGmode) 249 | #### 250 | if limit < 0: 251 | expr = "x y - abs "+str(LIM1)+" < x x " +str(scale(1,depth))+ " x y - x y - abs / * - ?" 252 | texpr = "x y - abs "+str(TLIM1)+" < x x " +str(scale(1,depth))+ " x y - x y - abs / * - ?" 253 | else: 254 | expr = "x y - abs " +str(scale(1,depth))+ " < x x "+str(LIM1)+" + y < x "+str(LIM2)+" + x "+str(LIM1)+" - y > x "+str(LIM2)+" - " + "x " +str(scale(100,depth))+" "+str(BIA)+" - * y "+str(BIA)+" * + "+str(scale(100,depth))+" / ? ? ?" 255 | texpr = "x y - abs " +str(scale(1,depth))+ " < x x "+str(TLIM1)+" + y < x "+str(TLIM2)+" + x "+str(TLIM1)+" - y > x "+str(TLIM2)+" - " + "x " +str(scale(100,depth))+" "+str(TBIA)+" - * y "+str(TBIA)+" * + "+str(scale(100,depth))+" / ? ? ?" 256 | L=[] 257 | for i in range(0,3): 258 | C = core.std.ShufflePlanes(clip, i, colorfamily=vs.GRAY) 259 | B = core.std.ShufflePlanes(bzz, i, colorfamily=vs.GRAY) 260 | O = Expr([C,B],expr) 261 | L.append(O) 262 | if tthr!=0: 263 | st=core.flux.SmoothT(bzz, temporal_threshold=tthr, planes=[0, 1, 2]) 264 | diff = core.std.MakeDiff(bzz,st,[0,1,2]) 265 | last = core.std.ShufflePlanes(L, [0,0,0], colorfamily=vs.YUV) 266 | diff2 = core.std.MakeDiff(last,diff,[0,1,2]) 267 | for i in range(0,3): 268 | c=L[i] 269 | b=core.std.ShufflePlanes(diff2, i, colorfamily=vs.GRAY) 270 | L[i] = Expr([c,b],texpr) 271 | if back!=0: 272 | bexpr="x "+str(BK)+" + y < x "+str(BK)+" + x "+str(BK)+" - y > x "+str(BK)+" - y ? ?" 273 | Y = core.std.ShufflePlanes(clip, 0, colorfamily=vs.GRAY) 274 | L[0] = Expr([L[0],Y],bexpr) 275 | output=core.std.ShufflePlanes(L, [0,0,0], colorfamily=vs.YUV) 276 | return output 277 | 278 | @deprecated("No maintenance") 279 | def SPresso( 280 | clip: vs.VideoNode, 281 | limit: int = 2, 282 | bias: int = 25, 283 | RGmode: int = 4, 284 | limitC: int = 4, 285 | biasC: int = 50, 286 | RGmodeC: int = 0, 287 | ) -> vs.VideoNode: 288 | """ 289 | SPresso 290 | ######################### 291 | orginal script by Didée 292 | ######################### 293 | SPresso (Spatial Pressdown) is a purely spatial script designed to achieve better 294 | compressibility without doing too much harm to the original detail. 295 | SPresso was not designed for 1080p processing/encoding; due to its 3x3 kernel 296 | it works better on standard definition (SD) content like DVDs and possibly on 720p. 297 | On noisy DVD/SD sources, compression gain usually is from 2% to 3% (light settings -> 298 | changes almost invisible) up to 10 to 12% (stronger settings -> slight, gentle softening, not very obvious). 299 | ######################## 300 | clip= Input clip. 301 | limit = 2 Limit the maximum change for any given pixel. 302 | bias = 25 Something like "aggessivity": '20' is a very light setting, '33' is already quite strong. 303 | RGmode = 4 RemoveGrain mode for the luma (Y) channel. The default of "4" is the best in most cases. 304 | Mode 19 and 20 might work better in other cases; if set to 0, luma will be copied from the input clip. 305 | limitC = 4 Same as limit but for chroma. 306 | biasC = 50 Same as bias but for chroma. 307 | RGmodeC = 0 RemoveGrain mode for the chroma channels (UV) channels; by default the chroma is simply copied from the input clip. 308 | To process chroma, set RGmodeC=4 (or 19, 20, or any other compatible mode). 309 | ######################## 310 | Differences: 311 | high depth support 312 | automatically adjust parameters to fit into different depth 313 | you have less choice in RGmode 314 | """ 315 | depth = clip.format.bits_per_sample 316 | LIM1= round(limit*100.0/bias-1.0) if limit>0 else round(100.0/bias) 317 | LIM1 = scale(LIM1,depth) 318 | LIM2 =1 if limit<0 else limit 319 | LIM2 = scale(LIM2,depth) 320 | BIA = bias 321 | LIM1c= round(limitC*100.0/biasC-1.0) if limit>0 else round(100.0/biasC) 322 | LIM1c = scale(LIM1c,depth) 323 | LIM2c =1 if limit<0 else limit 324 | LIM2c = scale(LIM2c,depth) 325 | BIAc = biasC 326 | ### 327 | if limit < 0: 328 | expr = "x y - abs "+str(LIM1)+" < x x " +str(scale(1,depth))+ " x y - x y - abs / * - ?" 329 | else: 330 | expr = "x y - abs " +str(scale(0,depth))+ " <= x x "+str(LIM1)+" + y < x "+str(LIM2)+" + x "+str(LIM1)+" - y > x "+str(LIM2)+" - " + "x " +str(scale(100,depth))+" "+str(BIA)+" - * y "+str(BIA)+" * + "+str(scale(100,depth))+" / ? ? ?" 331 | if limitC < 0: 332 | exprC = "x y - abs "+str(LIM1c)+" < x x " +str(scale(1,depth))+ " x y - x y - abs / * - ?" 333 | else: 334 | exprC = "x y - abs " +str(scale(0,depth))+ " <= x x "+str(LIM1c)+" + y < x "+str(LIM2c)+" + x "+str(LIM1c)+" - y > x "+str(LIM2c)+" - " + "x " +str(scale(100,depth))+" "+str(BIAc)+" - * y "+str(BIAc)+" * + "+str(scale(100,depth))+" / ? ? ?" 335 | ### 336 | rg = core.rgvs.RemoveGrain(clip,[RGmode,RGmodeC]) 337 | Y = Expr([getplane(clip,0),getplane(rg,0)],expr) 338 | U = getplane(clip,1) if RGmodeC==0 else Expr([getplane(clip,1),getplane(rg,1)],exprC) 339 | V = getplane(clip,2) if RGmodeC==0 else Expr([getplane(clip,2),getplane(rg,2)],exprC) 340 | last = core.std.ShufflePlanes([Y,U,V],[0,0,0], colorfamily=vs.YUV) 341 | return last 342 | 343 | @deprecated("No maintenance") 344 | def STPressoMC( 345 | clip: vs.VideoNode, 346 | limit: int = 3, 347 | bias: int = 24, 348 | RGmode: int = 4, 349 | tthr: int = 12, 350 | tlimit: int = 3, 351 | tbias: int = 49, 352 | back: int = 1, 353 | s_p: dict = {}, 354 | a_p: dict = {}, 355 | c_p: dict = {}, 356 | ) -> vs.VideoNode: 357 | """ 358 | STPressoMC 359 | """ 360 | depth = clip.format.bits_per_sample 361 | LIM1= round(limit*100.0/bias-1.0) if limit>0 else round(100.0/bias) 362 | LIM1 = scale(LIM1,depth) 363 | #(limit>0) ? round(limit*100.0/bias-1.0) : round(100.0/bias) 364 | LIM2 =1 if limit<0 else limit 365 | LIM2 = scale(LIM2,depth) 366 | #(limit<0) ? 1 : limit 367 | BIA = bias 368 | BK = scale(back,depth) 369 | TBIA = bias 370 | TLIM1 = round(tlimit*100.0/tbias-1.0) if tlimit>0 else round(100.0/tbias) 371 | TLIM1 = scale(TLIM1,depth) 372 | #(tlimit>0) ? string( round(tlimit*100.0/tbias-1.0) ) : string( round(100.0/tbias) ) 373 | TLIM2 = 1 if tlimit<0 else tlimit 374 | TLIM2 = scale(TLIM2,depth) 375 | #(tlimit<0) ? "1" : string(tlimit) 376 | bzz = core.rgvs.RemoveGrain(clip,RGmode) 377 | #### 378 | if limit < 0: 379 | expr = "x y - abs "+str(LIM1)+" < x x " +str(scale(1,depth))+ " x y - x y - abs / * - ?" 380 | texpr = "x y - abs "+str(TLIM1)+" < x x " +str(scale(1,depth))+ " x y - x y - abs / * - ?" 381 | else: 382 | expr = "x y - abs " +str(scale(1,depth))+ " < x x "+str(LIM1)+" + y < x "+str(LIM2)+" + x "+str(LIM1)+" - y > x "+str(LIM2)+" - " + "x " +str(scale(100,depth))+" "+str(BIA)+" - * y "+str(BIA)+" * + "+str(scale(100,depth))+" / ? ? ?" 383 | texpr = "x y - abs " +str(scale(1,depth))+ " < x x "+str(TLIM1)+" + y < x "+str(TLIM2)+" + x "+str(TLIM1)+" - y > x "+str(TLIM2)+" - " + "x " +str(scale(100,depth))+" "+str(TBIA)+" - * y "+str(TBIA)+" * + "+str(scale(100,depth))+" / ? ? ?" 384 | L=[] 385 | for i in range(0,3): 386 | C = core.std.ShufflePlanes(clip, i, colorfamily=vs.GRAY) 387 | B = core.std.ShufflePlanes(bzz, i, colorfamily=vs.GRAY) 388 | O = Expr([C,B],expr) 389 | L.append(O) 390 | if tthr!=0: 391 | st=FluxsmoothTMC(bzz,tthr,s_p,a_p,c_p,[0,1,2]) 392 | diff = core.std.MakeDiff(bzz,st,[0,1,2]) 393 | last = core.std.ShufflePlanes(L, [0,0,0], colorfamily=vs.YUV) 394 | diff2 = core.std.MakeDiff(last,diff,[0,1,2]) 395 | for i in range(0,3): 396 | c=L[i] 397 | b=core.std.ShufflePlanes(diff2, i, colorfamily=vs.GRAY) 398 | L[i] = Expr([c,b],texpr) 399 | if back!=0: 400 | bexpr="x "+str(BK)+" + y < x "+str(BK)+" + x "+str(BK)+" - y > x "+str(BK)+" - y ? ?" 401 | Y = core.std.ShufflePlanes(clip, 0, colorfamily=vs.GRAY) 402 | L[0] = Expr([L[0],Y],bexpr) 403 | output=core.std.ShufflePlanes(L, [0,0,0], colorfamily=vs.YUV) 404 | return output 405 | 406 | @deprecated("No maintenance") 407 | def FluxsmoothTMC( 408 | src: vs.VideoNode, 409 | tthr: int = 12, 410 | s_p: dict = {}, 411 | a_p: dict = {}, 412 | c_p: dict = {}, 413 | planes: PlanesType = [0,1,2], 414 | ): 415 | """ 416 | port from https://forum.doom9.org/showthread.php?s=d58237a359f5b1f2ea45591cceea5133&p=1572664#post1572664 417 | allow setting parameters for mvtools 418 | """ 419 | super_p={"pel":2,"sharp":1,} 420 | analyse_p={"truemotion":False,"delta":1,"blksize":16,"overlap":8,} 421 | s = {**super_p, **s_p} 422 | a = {**analyse_p, **a_p} 423 | sup = core.mv.Super(src,**s) 424 | bv = core.mv.Analyse(sup, isb=True, **a) 425 | fv = core.mv.Analyse(sup, isb=False, **a) 426 | bc = core.mv.Compensate(src,sup,bv,**c_p) 427 | fc = core.mv.Compensate(src,sup,fv,**c_p) 428 | il = core.std.Interleave([fc,src,bc]) 429 | fs = core.flux.SmoothT(il, temporal_threshold=tthr, planes=planes) 430 | return core.std.SelectEvery(fs,3,1) -------------------------------------------------------------------------------- /xvs/utils.py: -------------------------------------------------------------------------------- 1 | import vapoursynth as vs 2 | import math 3 | import functools 4 | from typing import Callable, Optional, Sequence, Union, Any, List 5 | from warnings import deprecated 6 | 7 | 8 | core=vs.core 9 | from .config import nnedi3_resample,Expr 10 | 11 | PlanesType = Optional[Union[int, Sequence[int]]] 12 | VSFuncType = Union[vs.Func, Callable[..., vs.VideoNode]] 13 | 14 | #converting the values in one depth to another 15 | def scale(i: int,depth_out: int = 16,depth_in: int = 8) -> (int | float): 16 | return i*2**(depth_out-depth_in) 17 | 18 | #getplane in YUV 19 | def getplane(clip: vs.VideoNode,i: int) -> vs.VideoNode: 20 | return clip.std.ShufflePlanes(i, colorfamily=vs.GRAY) 21 | 22 | def getY(clip: vs.VideoNode) -> vs.VideoNode: 23 | return clip.std.ShufflePlanes(0, colorfamily=vs.GRAY) 24 | 25 | def getU(clip: vs.VideoNode) -> vs.VideoNode: 26 | return clip.std.ShufflePlanes(1, colorfamily=vs.GRAY) 27 | 28 | def getV(clip: vs.VideoNode) -> vs.VideoNode: 29 | return clip.std.ShufflePlanes(2, colorfamily=vs.GRAY) 30 | 31 | #extract all planes 32 | def extractPlanes(clip: vs.VideoNode) -> tuple[vs.VideoNode]: 33 | return tuple(clip.std.ShufflePlanes(x, colorfamily=vs.GRAY) for x in range(clip.format.num_planes)) 34 | 35 | #show plane in YUV(interger only) 36 | def showY(clip: vs.VideoNode) -> vs.VideoNode: 37 | return Expr(clip,["",str(scale(128,clip.format.bits_per_sample))]) 38 | 39 | def showU(clip: vs.VideoNode) -> vs.VideoNode: 40 | return Expr(clip,["0","",str(scale(128,clip.format.bits_per_sample))]) 41 | 42 | def showV(clip: vs.VideoNode) -> vs.VideoNode: 43 | return Expr(clip,["0",str(scale(128,clip.format.bits_per_sample)),""]) 44 | 45 | def showUV(clip: vs.VideoNode) -> vs.VideoNode: 46 | return Expr(clip,["0",""]) 47 | 48 | #inpand/expand 49 | def inpand( 50 | clip:vs.VideoNode, 51 | planes: int | Sequence[int] = 0, 52 | thr: float | None = None, 53 | mode: str = "square", 54 | cycle: int = 1 55 | ) -> vs.VideoNode: 56 | modemap={ 57 | "square":[1,1,1,1,1,1,1,1], 58 | "horizontal":[0,0,0,1,1,0,0,0], 59 | "vertical":[0,1,0,0,0,0,1,0], 60 | "both":[0,1,0,1,1,0,1,0], 61 | } 62 | 63 | if not (cd:=modemap.get(mode)): 64 | raise TypeError("unknown mode") 65 | 66 | for i in range(cycle): 67 | clip = core.std.Minimum(clip,planes,thr,cd) 68 | return clip 69 | 70 | def expand( 71 | clip:vs.VideoNode, 72 | planes: int | Sequence[int] = 0, 73 | thr: float | None = None, 74 | mode: str = "square", 75 | cycle: int = 1 76 | ) -> vs.VideoNode: 77 | modemap={ 78 | "square":[1,1,1,1,1,1,1,1], 79 | "horizontal":[0,0,0,1,1,0,0,0], 80 | "vertical":[0,1,0,0,0,0,1,0], 81 | "both":[0,1,0,1,1,0,1,0], 82 | } 83 | 84 | if not (cd:=modemap.get(mode)): 85 | raise TypeError("unknown mode") 86 | 87 | for i in range(cycle): 88 | clip = core.std.Maximum(clip,planes,thr,cd) 89 | return clip 90 | 91 | def getCSS(w:int,h:int) -> str: 92 | css={ 93 | (1,1):"420", 94 | (1,0):"422", 95 | (0,0):"444", 96 | (2,2):"410", 97 | (2,0):"411", 98 | (0,1):"440"} 99 | sub=(w,h) 100 | if css.get(sub) is None: 101 | raise ValueError('Unknown subsampling') 102 | else: 103 | return css[sub] 104 | 105 | def clip2css(clip: vs.VideoNode) -> str: 106 | return getCSS(clip.format.subsampling_w,clip.format.subsampling_h) 107 | 108 | def nnedi3( 109 | clip: vs.VideoNode, 110 | field: int, 111 | dh: bool | None = False, 112 | dw: bool | None = False, 113 | planes: VSFuncType = None, 114 | nsize: int | None = 6, 115 | nns: int | None = 1, 116 | qual: int | None = 1, 117 | etype: int | None = 0, 118 | pscrn: int | None = 2, 119 | exp: int | None = 0, 120 | mode: str = "znedi3", 121 | device: int | None = -1, 122 | list_device: bool | None = False, 123 | info: bool | None = False, 124 | int16_predictor: bool | None = True, 125 | int16_prescreener: bool | None = True, 126 | ) -> vs.VideoNode: 127 | mode=mode.lower() 128 | if mode=="nnedi3": 129 | return core.nnedi3.nnedi3(clip,field=field, dh=dh, planes=planes, nsize=nsize, nns=nns, qual=qual, etype=etype, pscrn=pscrn,exp=exp,int16_predictor=int16_predictor,int16_prescreener=int16_prescreener) 130 | elif mode=="znedi3": 131 | return core.znedi3.nnedi3(clip,field=field, dh=dh, planes=planes, nsize=nsize, nns=nns, qual=qual, etype=etype, pscrn=pscrn,exp=exp,int16_predictor=int16_predictor,int16_prescreener=int16_prescreener) 132 | elif mode=="nnedi3cl": 133 | return core.nnedi3cl.NNEDI3CL(clip,field=field, dh=dh, dw=dw, planes=planes, nsize=nsize, nns=nns, qual=qual, etype=etype, pscrn=pscrn,device=device,list_device=list_device,info=info) 134 | elif mode=="sneedif": 135 | return core.sneedif.NNEDI3(clip,field=field, dh=dh, dw=dw, planes=planes, nsize=nsize, nns=nns, qual=qual, etype=etype, pscrn=pscrn,device=device) 136 | else: 137 | raise ValueError("Unknown mode,mode must in ['nnedi3','nnedi3cl','znedi3','sneedif']") 138 | 139 | def eedi3( 140 | clip: vs.VideoNode, 141 | field: int, 142 | dh: bool | None = None, 143 | planes: PlanesType = None, 144 | alpha: float | None = None, 145 | beta: float | None = None, 146 | gamma: float | None = None, 147 | nrad: int | None = None, 148 | mdis: int | None = None, 149 | hp: int | None = None, 150 | ucubic: int | None = None, 151 | cost3: int | None = None, 152 | vcheck: int | None = None, 153 | vthresh0: float | None = None, 154 | vthresh1: float | None = None, 155 | vthresh2: float | None = None, 156 | sclip: vs.VideoNode | None = None, 157 | mclip: vs.VideoNode | None = None, 158 | mode: str = "eedi3m", 159 | device: int | None = None, 160 | list_device: bool | None = None, 161 | info: bool | None = None, 162 | opt: int | None = None, 163 | ) -> vs.VideoNode: 164 | mode=mode.lower() 165 | if mode=="eedi3m": 166 | return clip.eedi3m.EEDI3(field, dh, planes, alpha, beta, gamma, nrad, mdis, hp, ucubic, cost3, vcheck, vthresh0, vthresh1, vthresh2, sclip, mclip, opt) 167 | elif mode=="eedi3cl": 168 | return clip.eedi3m.EEDI3CL(field, dh, planes, alpha, beta, gamma, nrad, mdis, hp, ucubic, cost3, vcheck, vthresh0, vthresh1, vthresh2, sclip, mclip, opt, device, list_device, info) 169 | elif mode=="eedi3": 170 | return clip.eedi3.eedi3(field, dh, planes, alpha, beta, gamma, nrad, mdis, hp, ucubic, cost3, vcheck, vthresh0, vthresh1, vthresh2, sclip) 171 | else: 172 | raise ValueError("Unknown mode,mode must in ['eedi3m,eedi3,eedi3cl']") 173 | 174 | def nlm( 175 | clip: vs.VideoNode, 176 | d: int | None = 1, 177 | a: int | None = 2, 178 | s: int | None = 4, 179 | h: float | None = 12, 180 | channels: str | None = 'auto', 181 | wmode: int | None = 0, 182 | wref: float | None = 1.0, 183 | rclip: vs.VideoNode | None = None, 184 | device_type: str | None = "auto", 185 | device_id: int | None = 0, 186 | ocl_x: int | None = 0, 187 | ocl_y: int | None = 0, 188 | ocl_r: int | None = 0, 189 | info: bool | None = False, 190 | num_streams: int | None = 1, 191 | mode: str | None = "nlm_ispc", 192 | ) -> vs.VideoNode: 193 | mode=mode.lower() 194 | if mode=="knl" or mode=="knlmeanscl": 195 | return core.knlm.KNLMeansCL(clip,d=d,a=a,s=s,h=h,channels=channels,wmode=wmode,wref=wref,rclip=rclip,device_type=device_type,device_id=device_id,ocl_x=ocl_x,ocl_y=ocl_y,ocl_r=ocl_r,info=info) 196 | elif mode=="nlm" or mode=="nlm_ispc": 197 | return core.nlm_ispc.NLMeans(clip,d=d,a=a,s=s,h=h,channels=channels,wmode=wmode,wref=wref,rclip=rclip) 198 | elif mode=="nlm_cuda": 199 | return core.nlm_cuda.NLMeans(clip,d=d,a=a,s=s,h=h,channels=channels,wmode=wmode,wref=wref,rclip=rclip,device_id=device_id,num_streams=num_streams) 200 | else: 201 | raise ValueError("Unknown mode,mode must in ['knlmeanscl','nlm_ispc','nlm_cuda']") 202 | #copy from havsfunc 203 | def mt_expand_multi( 204 | src: vs.VideoNode, 205 | mode: str | None = 'rectangle', 206 | planes: PlanesType = None, 207 | sw: int = 1, 208 | sh: int = 1 209 | ) -> vs.VideoNode: 210 | """ 211 | mt_expand_multi 212 | mt_inpand_multi 213 | 214 | Calls mt_expand or mt_inpand multiple times in order to grow or shrink 215 | the mask from the desired width and height. 216 | 217 | Parameters: 218 | - sw : Growing/shrinking shape width. 0 is allowed. Default: 1 219 | - sh : Growing/shrinking shape height. 0 is allowed. Default: 1 220 | - mode : "rectangle" (default), "ellipse" or "losange". Replaces the 221 | mt_xxpand mode. Ellipses are actually combinations of 222 | rectangles and losanges and look more like octogons. 223 | Losanges are truncated (not scaled) when sw and sh are not 224 | equal. 225 | Other parameters are the same as mt_xxpand. 226 | """ 227 | if not isinstance(src, vs.VideoNode): 228 | raise vs.Error('mt_expand_multi: this is not a clip') 229 | 230 | if sw > 0 and sh > 0: 231 | mode_m = [0, 1, 0, 1, 1, 0, 1, 0] if mode == 'losange' or (mode == 'ellipse' and (sw % 3) != 1) else [1, 1, 1, 1, 1, 1, 1, 1] 232 | elif sw > 0: 233 | mode_m = [0, 0, 0, 1, 1, 0, 0, 0] 234 | elif sh > 0: 235 | mode_m = [0, 1, 0, 0, 0, 0, 1, 0] 236 | else: 237 | mode_m = None 238 | 239 | if mode_m is not None: 240 | src = mt_expand_multi(src.std.Maximum(planes=planes, coordinates=mode_m), mode=mode, planes=planes, sw=sw - 1, sh=sh - 1) 241 | return src 242 | 243 | #copy from havsfunc 244 | def mt_inpand_multi( 245 | src: vs.VideoNode, 246 | mode: str | None = 'rectangle', 247 | planes: PlanesType = None, 248 | sw: int = 1, 249 | sh: int = 1 250 | ) -> vs.VideoNode: 251 | """ 252 | mt_expand_multi 253 | mt_inpand_multi 254 | 255 | Calls mt_expand or mt_inpand multiple times in order to grow or shrink 256 | the mask from the desired width and height. 257 | 258 | Parameters: 259 | - sw : Growing/shrinking shape width. 0 is allowed. Default: 1 260 | - sh : Growing/shrinking shape height. 0 is allowed. Default: 1 261 | - mode : "rectangle" (default), "ellipse" or "losange". Replaces the 262 | mt_xxpand mode. Ellipses are actually combinations of 263 | rectangles and losanges and look more like octogons. 264 | Losanges are truncated (not scaled) when sw and sh are not 265 | equal. 266 | Other parameters are the same as mt_xxpand. 267 | """ 268 | if not isinstance(src, vs.VideoNode): 269 | raise vs.Error('mt_inpand_multi: this is not a clip') 270 | 271 | if sw > 0 and sh > 0: 272 | mode_m = [0, 1, 0, 1, 1, 0, 1, 0] if mode == 'losange' or (mode == 'ellipse' and (sw % 3) != 1) else [1, 1, 1, 1, 1, 1, 1, 1] 273 | elif sw > 0: 274 | mode_m = [0, 0, 0, 1, 1, 0, 0, 0] 275 | elif sh > 0: 276 | mode_m = [0, 1, 0, 0, 0, 0, 1, 0] 277 | else: 278 | mode_m = None 279 | 280 | if mode_m is not None: 281 | src = mt_inpand_multi(src.std.Minimum(planes=planes, coordinates=mode_m), mode=mode, planes=planes, sw=sw - 1, sh=sh - 1) 282 | return src 283 | 284 | def mt_inflate_multi( 285 | src: vs.VideoNode, 286 | planes: PlanesType = None, 287 | radius: int = 1, 288 | ) -> vs.VideoNode: 289 | if not isinstance(src, vs.VideoNode): 290 | raise vs.Error('mt_inflate_multi: this is not a clip') 291 | 292 | for i in range(radius): 293 | src = core.std.Inflate(src, planes=planes) 294 | return src 295 | 296 | def mt_deflate_multi( 297 | src: vs.VideoNode, 298 | planes: PlanesType = None, 299 | radius: int = 1, 300 | ) -> vs.VideoNode: 301 | if not isinstance(src, vs.VideoNode): 302 | raise vs.Error('mt_deflate_multi: this is not a clip') 303 | 304 | for i in range(radius): 305 | src = core.std.Deflate(src, planes=planes) 306 | return src 307 | 308 | def LimitFilter( 309 | flt: vs.VideoNode, 310 | src: vs.VideoNode, 311 | ref: float | None = None, 312 | thr: float | None = None, 313 | elast: float | None = None, 314 | brighten_thr: float | None = None, 315 | thrc: float | None = None, 316 | planes: PlanesType = None, 317 | use_vszip: bool | None = None, 318 | ) -> vs.VideoNode: 319 | """ 320 | Similar to the AviSynth function Dither_limit_dif16() and HQDeringmod_limit_dif16(). 321 | It acts as a post-processor, and is very useful to limit the difference of filtering while avoiding artifacts. 322 | Commonly used cases: 323 | de-banding 324 | de-ringing 325 | de-noising 326 | sharpening 327 | combining high precision source with low precision filtering: mvf.LimitFilter(src, flt, thr=1.0, elast=2.0) 328 | ############################################################################################################################ 329 | From mvsfunc, and only Expr implementation left,if vszip installed,will use vszip.LimitFilter. 330 | ############################################################################################################################ 331 | Algorithm for Y/R/G/B plane (for chroma, replace "thr" and "brighten_thr" with "thrc") 332 | dif = flt - src 333 | dif_ref = flt - ref 334 | dif_abs = abs(dif_ref) 335 | thr_1 = brighten_thr if (dif > 0) else thr 336 | thr_2 = thr_1 * elast 337 | 338 | if dif_abs <= thr_1: 339 | final = flt 340 | elif dif_abs >= thr_2: 341 | final = src 342 | else: 343 | final = src + dif * (thr_2 - dif_abs) / (thr_2 - thr_1) 344 | ############################################################################################################################ 345 | Basic parameters 346 | flt {clip}: filtered clip, to compute the filtering diff 347 | can be of YUV/RGB/Gray color family, can be of 8-16 bit integer or 16/32 bit float 348 | src {clip}: source clip, to apply the filtering diff 349 | must be of the same format and dimension as "flt" 350 | ref {clip} (optional): reference clip, to compute the weight to be applied on filtering diff 351 | must be of the same format and dimension as "flt" 352 | default: None (use "src") 353 | thr {float}: threshold (8-bit scale) to limit filtering diff 354 | default: 1.0 355 | elast {float}: elasticity of the soft threshold 356 | default: 2.0 357 | planes {int[]}: specify which planes to process 358 | unprocessed planes will be copied from "flt" 359 | default: all planes will be processed, [0,1,2] for YUV/RGB input, [0] for Gray input 360 | ############################################################################################################################ 361 | Advanced parameters 362 | brighten_thr {float}: threshold (8-bit scale) for filtering diff that brightening the image (Y/R/G/B plane) 363 | set a value different from "thr" is useful to limit the overshoot/undershoot/blurring introduced in sharpening/de-ringing 364 | default is the same as "thr" 365 | thrc {float}: threshold (8-bit scale) for chroma (U/V/Co/Cg plane) 366 | default is the same as "thr" 367 | """ 368 | # input clip 369 | if not isinstance(flt, vs.VideoNode): 370 | raise vs.Error('LimitFilter:"flt" must be a clip!') 371 | if not isinstance(src, vs.VideoNode): 372 | raise vs.Error('LimitFilter:"src" must be a clip!') 373 | if ref is not None and not isinstance(ref, vs.VideoNode): 374 | raise vs.Error('LimitFilter:"ref" must be a clip!') 375 | 376 | # Get properties of input clip 377 | sFormat = flt.format 378 | if sFormat.id != src.format.id: 379 | raise vs.Error('LimitFilter:"flt" and "src" must be of the same format!') 380 | if flt.width != src.width or flt.height != src.height: 381 | raise vs.Error('LimitFilter:"flt" and "src" must be of the same width and height!') 382 | 383 | if ref is not None: 384 | if sFormat.id != ref.format.id: 385 | raise vs.Error('LimitFilter:"flt" and "ref" must be of the same format!') 386 | if flt.width != ref.width or flt.height != ref.height: 387 | raise vs.Error('LimitFilter:"flt" and "ref" must be of the same width and height!') 388 | 389 | sColorFamily = sFormat.color_family 390 | #CheckColorFamily(sColorFamily) 391 | sIsYUV = sColorFamily == vs.YUV 392 | 393 | sSType = sFormat.sample_type 394 | sbitPS = sFormat.bits_per_sample 395 | sNumPlanes = sFormat.num_planes 396 | 397 | # Parameters 398 | if thr is None: 399 | thr = 1.0 400 | elif isinstance(thr, int) or isinstance(thr, float): 401 | if thr < 0: 402 | raise vs.Error('valid range of "thr" is [0, +inf)') 403 | else: 404 | raise vs.Error('"thr" must be an int or a float!') 405 | 406 | if elast is None: 407 | elast = 2.0 408 | elif isinstance(elast, int) or isinstance(elast, float): 409 | if elast < 1: 410 | raise vs.Error('valid range of "elast" is [1, +inf)') 411 | else: 412 | raise vs.Error('"elast" must be an int or a float!') 413 | 414 | if brighten_thr is None: 415 | brighten_thr = thr 416 | elif isinstance(brighten_thr, int) or isinstance(brighten_thr, float): 417 | if brighten_thr < 0: 418 | raise vs.Error('valid range of "brighten_thr" is [0, +inf)') 419 | else: 420 | raise vs.Error('"brighten_thr" must be an int or a float!') 421 | 422 | if thrc is None: 423 | thrc = thr 424 | elif isinstance(thrc, int) or isinstance(thrc, float): 425 | if thrc < 0: 426 | raise vs.Error('valid range of "thrc" is [0, +inf)') 427 | else: 428 | raise vs.Error('"thrc" must be an int or a float!') 429 | 430 | if use_vszip is None and hasattr(core,"vszip") and hasattr(core.vszip,"LimitFilter"): 431 | use_vszip=True 432 | else: 433 | use_vszip=False 434 | 435 | # planes 436 | process = [0,0,0] 437 | 438 | if planes is None: 439 | process = [1,1,1] 440 | elif isinstance(planes, int): 441 | if planes < 0 or planes >= 3: 442 | raise vs.Error(f'valid range of "planes" is [0, 3)!') 443 | process[planes] = 1 444 | elif isinstance(planes, Sequence): 445 | for p in planes: 446 | if not isinstance(p, int): 447 | raise vs.Error('"planes" must be a (sequence of) int!') 448 | elif p < 0 or p >= 3: 449 | raise vs.Error(f'valid range of "planes" is [0, 3)!') 450 | process[p] = 1 451 | else: 452 | raise vs.Error('"planes" must be a (sequence of) int!') 453 | 454 | if use_vszip: 455 | return core.vszip.LimitFilter(flt=flt,src=src,ref=ref,dark_thr=[thr,thrc],bright_thr=[brighten_thr,thrc],elast=elast,planes=planes) 456 | else: 457 | # Process 458 | if thr <= 0 and brighten_thr <= 0: 459 | if sIsYUV: 460 | if thrc <= 0: 461 | return src 462 | else: 463 | return src 464 | if thr >= 255 and brighten_thr >= 255: 465 | if sIsYUV: 466 | if thrc >= 255: 467 | return flt 468 | else: 469 | return flt 470 | 471 | valueRange = (1 << sbitPS) - 1 if sSType == vs.INTEGER else 1 472 | limitExprY = _limit_filter_expr(ref is not None, thr, elast, brighten_thr, valueRange) 473 | limitExprC = _limit_filter_expr(ref is not None, thrc, elast, thrc, valueRange) 474 | expr = [] 475 | for i in range(sNumPlanes): 476 | if process[i]: 477 | if i > 0 and (sIsYUV): 478 | expr.append(limitExprC) 479 | else: 480 | expr.append(limitExprY) 481 | else: 482 | expr.append("") 483 | 484 | if ref is None: 485 | clip = Expr([flt, src], expr) 486 | else: 487 | clip = Expr([flt, src, ref], expr) 488 | return clip 489 | 490 | def _limit_filter_expr(defref, thr, elast, largen_thr, value_range): 491 | flt = " x " 492 | src = " y " 493 | ref = " z " if defref else src 494 | 495 | dif = f" {flt} {src} - " 496 | dif_ref = f" {flt} {ref} - " 497 | dif_abs = dif_ref + " abs " 498 | 499 | thr = thr * value_range / 255 500 | largen_thr = largen_thr * value_range / 255 501 | 502 | if thr <= 0 and largen_thr <= 0: 503 | limitExpr = f" {src} " 504 | elif thr >= value_range and largen_thr >= value_range: 505 | limitExpr = "" 506 | else: 507 | if thr <= 0: 508 | limitExpr = f" {src} " 509 | elif thr >= value_range: 510 | limitExpr = f" {flt} " 511 | elif elast <= 1: 512 | limitExpr = f" {dif_abs} {thr} <= {flt} {src} ? " 513 | else: 514 | thr_1 = thr 515 | thr_2 = thr * elast 516 | thr_slope = 1 / (thr_2 - thr_1) 517 | # final = src + dif * (thr_2 - dif_abs) / (thr_2 - thr_1) 518 | limitExpr = f" {src} {dif} {thr_2} {dif_abs} - * {thr_slope} * + " 519 | limitExpr = f" {dif_abs} {thr_1} <= {flt} {dif_abs} {thr_2} >= {src} " + limitExpr + " ? ? " 520 | 521 | if largen_thr != thr: 522 | if largen_thr <= 0: 523 | limitExprLargen = f" {src} " 524 | elif largen_thr >= value_range: 525 | limitExprLargen = f" {flt} " 526 | elif elast <= 1: 527 | limitExprLargen = f" {dif_abs} {largen_thr} <= {flt} {src} ? " 528 | else: 529 | thr_1 = largen_thr 530 | thr_2 = largen_thr * elast 531 | thr_slope = 1 / (thr_2 - thr_1) 532 | # final = src + dif * (thr_2 - dif_abs) / (thr_2 - thr_1) 533 | limitExprLargen = f" {src} {dif} {thr_2} {dif_abs} - * {thr_slope} * + " 534 | limitExprLargen = f" {dif_abs} {thr_1} <= {flt} {dif_abs} {thr_2} >= {src} " + limitExprLargen + " ? ? " 535 | limitExpr = f" {flt} {ref} > " + limitExprLargen + " " + limitExpr + " ? " 536 | 537 | return limitExpr 538 | 539 | def cround(x: float) -> int: 540 | return math.floor(x + 0.5) if x > 0 else math.ceil(x - 0.5) 541 | 542 | def m4(x: int) -> int: 543 | return 16 if x < 16 else cround(x / 4) * 4 544 | 545 | def scale(value: int, peak: int) -> (int | float): 546 | return cround(value * peak / 255) if peak != 1 else value / 255 547 | 548 | def Padding( 549 | clip: vs.VideoNode, 550 | left: int = 0, 551 | right: int = 0, 552 | top: int = 0, 553 | bottom: int = 0, 554 | ) -> vs.VideoNode: 555 | if not isinstance(clip, vs.VideoNode): 556 | raise vs.Error('Padding: this is not a clip') 557 | 558 | if left < 0 or right < 0 or top < 0 or bottom < 0: 559 | raise vs.Error('Padding: border size to pad must not be negative') 560 | 561 | width = clip.width + left + right 562 | height = clip.height + top + bottom 563 | 564 | return clip.resize.Point(width, height, src_left=-left, src_top=-top, src_width=width, src_height=height) 565 | -------------------------------------------------------------------------------- /xvs/scale.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | 3 | def rescale( 4 | src: vs.VideoNode, 5 | kernel: str, 6 | w: int | None = None, 7 | h: int | None = None, 8 | mask: bool | vs.VideoNode = True, 9 | mask_dif_pix: float = 2, 10 | show: str = "result", 11 | postfilter_descaled: VSFuncType | None = None, 12 | mthr: Sequence[int] = [2,2], 13 | maskpp: VSFuncType | None = None, 14 | **args, 15 | ) -> vs.VideoNode: 16 | """ 17 | descale to target resolution and upscale by nnedi3_resample with optional mask and postfilter. 18 | ----------------------------------------------------------- 19 | kernel:must in ["Debilinear","Debicubic","Delanczos","Despline16","Despline36","Despline64"] 20 | w,h:target resolution for descale 21 | mask:True to enable internal mask,or input your clip as mask. 22 | mask_dif_pix: set protection strength.This value means the threshold of difference to consider as native high resolution,calculate under 8bit. 23 | mthr:set [expand,inpand] times for mask 24 | maskpp:use self-defined function to replace internal expand and inpand process for mask. 25 | postfilter_descaled:self-defined postfilter for descaled clip. 26 | ################################# 27 | b,c,taps:parameters for kernel 28 | nsize,nns,qual,etype,pscrn,exp,sigmoid:parameters for nnedi3_resample 29 | """ 30 | if src.format.color_family not in [vs.YUV,vs.GRAY]: 31 | raise ValueError("input clip should be YUV or GRAY!") 32 | 33 | src_h,src_w=src.height,src.width 34 | if w is None and h is None: 35 | w,h=1280,720 36 | elif w is None: 37 | w=int(h*src_w/src_h) 38 | else: 39 | h=int(w*src_h/src_w) 40 | 41 | if w>=src_w or h>=src_h: 42 | raise ValueError("w,h should less than input resolution") 43 | 44 | kernel=kernel.strip().capitalize() 45 | if kernel not in ["Debilinear","Debicubic","Delanczos","Despline16","Despline36","Despline64"]: 46 | raise ValueError("unsupport kernel") 47 | 48 | src=core.fmtc.bitdepth(src,bits=16) 49 | luma=getY(src) 50 | 51 | taps=args.get("taps") 52 | b,c=args.get("b"),args.get("c") 53 | nsize=3 if args.get("nsize") is None else args.get("nsize")#keep behavior before 54 | nns=args.get("nns") 55 | qual=2 if args.get("qual") is None else args.get("qual")#keep behavior before 56 | etype=args.get("etype") 57 | pscrn=args.get("pscrn") 58 | exp=args.get("exp") 59 | sigmoid=args.get("sigmoid") 60 | 61 | luma_rescale,mask,luma_de=MRcore(luma,kernel[2:],w,h,mask=mask,mask_dif_pix=mask_dif_pix,postfilter_descaled=postfilter_descaled,mthr=mthr,taps=taps,b=b,c=c,multiple=1,maskpp=maskpp,show="both",nsize=nsize,nns=nns,qual=qual,etype=etype,pscrn=pscrn,exp=exp,sigmoid=sigmoid) 62 | 63 | if show=="descale": 64 | return luma_de 65 | elif show=="mask": 66 | return mask 67 | elif show=="both": 68 | return luma_de,mask 69 | 70 | if src.format.color_family==vs.GRAY: 71 | return luma_rescale 72 | else: 73 | return core.std.ShufflePlanes([luma_rescale,src],[0,1,2],vs.YUV) 74 | 75 | def rescalef( 76 | src: vs.VideoNode, 77 | kernel: str, 78 | w: float | None = None, 79 | h: float | None = None, 80 | bh: float | None = None, 81 | bw: float | None = None, 82 | mask: bool = True, 83 | mask_dif_pix: float = 2, 84 | show: str = "result", 85 | postfilter_descaled: VSFuncType | None = None, 86 | mthr: Sequence[int] = [2,2], 87 | maskpp: VSFuncType | None = None, 88 | selective: bool = False, 89 | upper: float = 0.0001, 90 | lower: float = 0.00001, 91 | **args 92 | ) -> vs.VideoNode: 93 | #for decimal resolution descale,refer to GetFnative 94 | if src.format.color_family not in [vs.YUV,vs.GRAY]: 95 | raise ValueError("input clip should be YUV or GRAY!") 96 | 97 | src_h,src_w=src.height,src.width 98 | if w is None and h is None: 99 | w,h=1280,720 100 | elif w is None: 101 | w=int(h*src_w/src_h) 102 | else: 103 | h=int(w*src_h/src_w) 104 | 105 | if w>=src_w or h>=src_h: 106 | raise ValueError("w,h should less than input resolution") 107 | 108 | if bh is None: 109 | bh=src_h 110 | 111 | if w>=src_w or h>=src_h: 112 | raise ValueError("w,h should less than input resolution") 113 | 114 | kernel=kernel.strip().capitalize() 115 | if kernel not in ["Debilinear","Debicubic","Delanczos","Despline16","Despline36","Despline64"]: 116 | raise ValueError("unsupport kernel") 117 | 118 | src=core.fmtc.bitdepth(src,bits=16) 119 | luma=getY(src) 120 | 121 | taps=args.get("taps") 122 | b,c=args.get("b"),args.get("c") 123 | nsize=3 if args.get("nsize") is None else args.get("nsize")#keep behavior before 124 | nns=args.get("nns") 125 | qual=2 if args.get("qual") is None else args.get("qual")#keep behavior before 126 | etype=args.get("etype") 127 | pscrn=args.get("pscrn") 128 | exp=args.get("exp") 129 | sigmoid=args.get("sigmoid") 130 | 131 | luma_rescale,mask,luma_de=MRcoref(luma,kernel[2:],w,h,bh,bw,mask=mask,mask_dif_pix=mask_dif_pix,postfilter_descaled=postfilter_descaled,mthr=mthr,taps=taps,b=b,c=c,multiple=1,maskpp=maskpp,show="both",nsize=nsize,nns=nns,qual=qual,etype=etype,pscrn=pscrn,exp=exp,sigmoid=sigmoid) 132 | 133 | if selective: 134 | base=upper-lower 135 | #x:rescale y:src 136 | expr=f"x.diff {upper} > y x.diff {lower} < x {upper} x.diff - {base} / y * x.diff {lower} - {base} / x * + ? ?" 137 | luma_rescale=core.akarin.Expr([luma_rescale,luma], expr) 138 | 139 | if show=="descale": 140 | return luma_de 141 | elif show=="mask": 142 | return mask 143 | elif show=="both": 144 | return luma_de,mask 145 | 146 | if src.format.color_family==vs.GRAY: 147 | return luma_rescale 148 | else: 149 | return core.std.ShufflePlanes([luma_rescale,src],[0,1,2],vs.YUV) 150 | 151 | def multirescale( 152 | clip: vs.VideoNode, 153 | kernels: Sequence[dict], 154 | w: int | None = None, 155 | h: int | None = None, 156 | mask: bool = True, 157 | mask_dif_pix: float = 2.5, 158 | postfilter_descaled: VSFuncType | None = None, 159 | mthr: Sequence[int] = [2,2], 160 | maskpp: VSFuncType | None = None, 161 | selective_disable: bool = False, 162 | disable_thr: float = 0.00001, 163 | showinfo: bool = False, 164 | save: str | None = None, 165 | load: str | None = None, 166 | kindex: bool = False, 167 | **args, 168 | ) -> vs.VideoNode: 169 | clip=core.fmtc.bitdepth(clip,bits=16) 170 | luma=getY(clip) 171 | src_h,src_w=clip.height,clip.width 172 | def getwh(w,h): 173 | if w is None and h is None: 174 | w,h=1280,720 175 | elif w is None: 176 | w=int(h*src_w/src_h) 177 | elif h is None: 178 | h=int(w*src_h/src_w) 179 | 180 | if w>=src_w or h>=src_h: 181 | raise ValueError("w,h should less than input resolution") 182 | return w,h 183 | 184 | w,h=getwh(w,h) 185 | 186 | info_gobal=f"gobal:\nresolution:{w}x{h}\tmask:{mask}\tmask_dif_pix:{mask_dif_pix}\tpostfilter_descaled:{'yes' if callable(postfilter_descaled) else 'no'}\nselective_disable:{selective_disable}\tdisable_thr:{disable_thr:f}\nmthr:{str(mthr)}\tmaskpp:{'yes' if callable(maskpp) else 'no'}\nextra:{str(args)}" 187 | rescales=[] 188 | total=len(kernels) 189 | for i in kernels: 190 | fmode=False if i.get("fmode") is None else i.get("fmode") 191 | k=i["k"][2:] 192 | kb,kc,ktaps=i.get("b"),i.get("c"),i.get("taps") 193 | kw,kh=i.get("w"),i.get("h") 194 | if kw is not None or kh is not None: 195 | kw,kh=getwh(kw,kh) 196 | else: 197 | kw,kh=w,h 198 | kmask=mask if i.get("mask") is None else i.get("mask") 199 | kmdp=mask_dif_pix if i.get("mask_dif_pix") is None else i.get("mask_dif_pix") 200 | kmthr=mthr if i.get("mthr") is None else i.get("mthr") 201 | kpp=postfilter_descaled if i.get("postfilter_descaled") is None else i.get("postfilter_descaled") 202 | multiple=1 if i.get("multiple") is None else i.get("multiple") 203 | kmaskpp=maskpp if i.get("maskpp") is None else i.get("maskpp") 204 | 205 | 206 | if not fmode: 207 | rescales.append(MRcore(luma,kernel=k,w=kw,h=kh,mask=kmask,mask_dif_pix=kmdp,postfilter_descaled=kpp,mthr=kmthr,taps=ktaps,b=kb,c=kc,multiple=multiple,maskpp=kmaskpp,**args)) 208 | else: 209 | kbh=src_h if i.get("bh") is None else i.get("bh") 210 | kbw=i.get("bw") 211 | rescales.append(MRcoref(luma,kernel=k,w=kw,h=kh,bh=kbh,bw=kbw,mask=kmask,mask_dif_pix=kmdp,postfilter_descaled=kpp,mthr=kmthr,taps=ktaps,b=kb,c=kc,multiple=multiple,maskpp=kmaskpp,**args)) 212 | 213 | if load is not None: 214 | with open(load,"r",encoding="utf-8") as file: 215 | plist=file.read().split('\n') 216 | 217 | slist={} 218 | for line in plist[1:]: 219 | tmp=line.split("\t") 220 | if len(tmp)>=2: 221 | slist[int(tmp[0])]={'select':int(tmp[1]),'diff':[float(i) for i in tmp[2:]]} 222 | 223 | if save is not None and save!=load: 224 | saves=open(save,"w",encoding="utf-8") 225 | saves.write("n\tselect\t"+"\t".join([str(i) for i in range(len(kernels))])+"\n") 226 | 227 | def selector(n,f,src,clips): 228 | kernels_info=[] 229 | diffs=[] 230 | if len(f)==1: 231 | f=[f] 232 | index,mindiff=0,f[0].props["diff"] 233 | for i in range(total): 234 | tmpdiff=f[i].props["diff"] 235 | diffs.append(tmpdiff) 236 | kernels_info.append(f"kernel {i}:\t{kernels[i]}\n{tmpdiff:.10f}") 237 | if tmpdiffdisable_thr: 243 | usesrc=True 244 | last=src 245 | info+="source" 246 | info_short=f"{n}\t-1\t"+info_short+"\n" 247 | index=-1 248 | else: 249 | last=clips[index] 250 | info+=kernels_info[index] 251 | info_short=f"{n}\t{index}\t"+info_short+"\n" 252 | 253 | if load is not None: 254 | info+="--------------------\noverwrite info:\n" 255 | if slist.get(n) is not None: 256 | newindex=slist[n]["select"] 257 | if newindex==-1: 258 | if usesrc: 259 | info+="same as current" 260 | else: 261 | last=src 262 | info+="source" 263 | else: 264 | if newindex==index: 265 | info+="same as current" 266 | else: 267 | last=clips[newindex] 268 | info+=kernels_info[newindex] 269 | else: 270 | newindex=-2 271 | info+="skip" 272 | 273 | if showinfo: 274 | last=core.text.Text(last,info.replace("\t"," ")) 275 | if save is not None: 276 | saves.write(info_short) 277 | if kindex: 278 | last=core.std.SetFrameProp(last,'kindex',intval=index) 279 | if load: 280 | last=core.std.SetFrameProp(last,'nkindex',intval=newindex) 281 | return last 282 | saves.close() 283 | 284 | last=core.std.FrameEval(luma,functools.partial(selector,src=luma,clips=rescales),prop_src=rescales) 285 | if clip.format.color_family==vs.GRAY: 286 | return last 287 | else: 288 | return core.std.ShufflePlanes([last,clip],[0,1,2],vs.YUV) 289 | 290 | def resize_core( 291 | kernel: str, 292 | taps: int=3, 293 | b: float=0, 294 | c: float=0 295 | ) -> VSFuncType: 296 | kernel=kernel.capitalize() 297 | if kernel in ["Bilinear","Spline16","Spline36","Spline64"]: 298 | return eval(f"core.resize.{kernel}") 299 | elif kernel == "Bicubic": 300 | return functools.partial(core.resize.Bicubic,filter_param_a=b,filter_param_b=c) 301 | elif kernel == "Lanczos": 302 | return functools.partial(core.resize.Lanczos,filter_param_a=taps) 303 | 304 | def MRcore( 305 | clip: vs.VideoNode, 306 | kernel: str, 307 | w: int, 308 | h: int, 309 | mask: bool | vs.VideoNode = True, 310 | mask_dif_pix: float = 2, 311 | postfilter_descaled: VSFuncType | None = None, 312 | mthr:Sequence[int] = [2,2], 313 | taps: int = 3, 314 | b: float = 0, 315 | c: float = 0.5, 316 | multiple: float = 1, 317 | maskpp: VSFuncType | None = None, 318 | show: str = "result", 319 | **args 320 | ) -> vs.VideoNode: 321 | src_w,src_h=clip.width,clip.height 322 | clip32=core.fmtc.bitdepth(clip,bits=32) 323 | descaled=core.descale.Descale(clip32,width=w,height=h,kernel=kernel.lower(),taps=taps,b=b,c=c) 324 | upscaled=resize_core(kernel.capitalize(),taps,b,c)(descaled,src_w,src_h) 325 | diff=Expr([clip32,upscaled],"x y - abs dup 0.015 > swap 0 ?").std.PlaneStats() 326 | 327 | def calc(n,f): 328 | fout=f[1].copy() 329 | fout.props["diff"]=f[0].props["PlaneStatsAverage"]*multiple 330 | return fout 331 | 332 | if postfilter_descaled is None: 333 | pass 334 | elif callable(postfilter_descaled): 335 | descaled=postfilter_descaled(descaled) 336 | else: 337 | raise ValueError("postfilter_descaled must be a function") 338 | 339 | nsize=3 if args.get("nsize") is None else args.get("nsize") 340 | nns=args.get("nns") 341 | qual=2 if args.get("qual") is None else args.get("qual") 342 | etype=args.get("etype") 343 | pscrn=args.get("pscrn") 344 | exp=args.get("exp") 345 | sigmoid=args.get("sigmoid") 346 | 347 | rescale=nnedi3_resample(descaled,src_w,src_h,nsize=nsize,nns=nns,qual=qual,etype=etype,pscrn=pscrn,exp=exp,sigmoid=sigmoid).fmtc.bitdepth(bits=16) 348 | 349 | if mask is True: 350 | mask=Expr([clip,upscaled.fmtc.bitdepth(bits=16,dmode=1)],"x y - abs").std.Binarize(mask_dif_pix*256) 351 | if callable(maskpp): 352 | mask=maskpp(mask) 353 | else: 354 | mask=expand(mask,cycle=mthr[0]) 355 | mask=inpand(mask,cycle=mthr[1]) 356 | rescale=core.std.MaskedMerge(rescale,clip,mask) 357 | elif isinstance(mask,vs.VideoNode): 358 | if mask.width!=src_w or mask.height!=src_h or mask.format.color_family!=vs.GRAY: 359 | raise ValueError("mask should have same resolution as source,and should be GRAY") 360 | mask=core.fmtc.bitdepth(mask,bits=16,dmode=1) 361 | rescale=core.std.MaskedMerge(rescale,clip,mask) 362 | else: 363 | mask=core.std.BlankClip(rescale) 364 | 365 | if show.lower()=="result": 366 | return core.std.ModifyFrame(rescale,[diff,rescale],calc) 367 | elif show.lower()=="mask" and mask: 368 | return core.std.ModifyFrame(mask,[diff,mask],calc) 369 | elif show.lower()=="descale": 370 | return descaled #after postfilter_descaled 371 | elif show.lower()=="both": #result,mask,descaled 372 | return core.std.ModifyFrame(rescale,[diff,rescale],calc),core.std.ModifyFrame(mask,[diff,mask],calc),descaled 373 | 374 | def MRcoref( 375 | clip: vs.VideoNode, 376 | kernel: str, 377 | w: float, 378 | h: float, 379 | bh: int, 380 | bw: int | None =None, 381 | mask: bool | vs.VideoNode = True, 382 | mask_dif_pix: float = 2, 383 | postfilter_descaled: VSFuncType | None = None, 384 | mthr: Sequence[int] = [2,2], 385 | taps: int = 3, 386 | b: float = 0, 387 | c: float = 0.5, 388 | multiple: float = 1, 389 | maskpp: VSFuncType | None = None, 390 | show: str = "result", 391 | **args 392 | ) -> vs.VideoNode: 393 | 394 | src_w,src_h=clip.width,clip.height 395 | cargs=cropping_args(src_w,src_h,h,bh,bw) 396 | clip32=core.fmtc.bitdepth(clip,bits=32) 397 | descaled=core.descale.Descale(clip32,kernel=kernel.lower(),taps=taps,b=b,c=c,**cargs.descale_gen()) 398 | upscaled=resize_core(kernel.capitalize(),taps,b,c)(descaled,**cargs.resize_gen()) 399 | diff=Expr([clip32,upscaled],"x y - abs dup 0.015 > swap 0 ?").std.Crop(10, 10, 10, 10).std.PlaneStats() 400 | def calc(n,f): 401 | fout=f[1].copy() 402 | fout.props["diff"]=f[0].props["PlaneStatsAverage"]*multiple 403 | return fout 404 | 405 | if postfilter_descaled is None: 406 | pass 407 | elif callable(postfilter_descaled): 408 | descaled=postfilter_descaled(descaled) 409 | else: 410 | raise ValueError("postfilter_descaled must be a function") 411 | 412 | nsize=3 if args.get("nsize") is None else args.get("nsize") 413 | nns=args.get("nns") 414 | qual=2 if args.get("qual") is None else args.get("qual") 415 | etype=args.get("etype") 416 | pscrn=args.get("pscrn") 417 | exp=args.get("exp") 418 | sigmoid=args.get("sigmoid") 419 | 420 | rescale=nnedi3_resample(descaled,nsize=nsize,nns=nns,qual=qual,etype=etype,pscrn=pscrn,exp=exp,sigmoid=sigmoid,**cargs.nnrs_gen()).fmtc.bitdepth(bits=16) 421 | 422 | if mask is True: 423 | mask=Expr([clip,upscaled.fmtc.bitdepth(bits=16,dmode=1)],"x y - abs").std.Binarize(mask_dif_pix*256) 424 | if callable(maskpp): 425 | mask=maskpp(mask) 426 | else: 427 | mask=expand(mask,cycle=mthr[0]) 428 | mask=inpand(mask,cycle=mthr[1]) 429 | rescale=core.std.MaskedMerge(rescale,clip,mask) 430 | elif isinstance(mask,vs.VideoNode): 431 | if mask.width!=src_w or mask.height!=src_h or mask.format.color_family!=vs.GRAY: 432 | raise ValueError("mask should have same resolution as source,and should be GRAY") 433 | mask=core.fmtc.bitdepth(mask,bits=16,dmode=1) 434 | rescale=core.std.MaskedMerge(rescale,clip,mask) 435 | else: 436 | mask=core.std.BlankClip(rescale) 437 | 438 | if show.lower()=="result": 439 | return core.std.ModifyFrame(rescale,[diff,rescale],calc) 440 | elif show.lower()=="mask" and mask: 441 | return core.std.ModifyFrame(mask,[diff,mask],calc) 442 | elif show.lower()=="descale": 443 | return descaled #after postfilter_descaled 444 | elif show.lower()=="both": #result,mask,descaled 445 | return core.std.ModifyFrame(rescale,[diff,rescale],calc),core.std.ModifyFrame(mask,[diff,mask],calc),descaled 446 | 447 | def MRkernelgen(k,w=None,h=None,b=None,c=None,taps=None,mask=None,mask_dif_pix=None,mthr=None,pp=None,multiple=None,maskpp=None,fmode=None) -> dict: 448 | l=locals() 449 | tmp={} 450 | for i in l.keys(): 451 | if l[i] is not None: 452 | tmp[i]=l[i] 453 | return tmp 454 | 455 | @deprecated("Not real needed function,no maintenance.") 456 | def dpidDown( 457 | src: vs.VideoNode, 458 | width: int | None = None, 459 | height: int | None = None, 460 | Lambda: float | None = 1.0, 461 | matrix_in: str | None = None, 462 | matrix: str | None = None, 463 | transfer_in: str = "709", 464 | transfer: str | None = None, 465 | primaries_in: str | None = None, 466 | primaries: str | None = None, 467 | css: str | None = None, 468 | depth: int = 16, 469 | dither_type: str = "error_diffusion", 470 | range_in:str = None, 471 | range_out:str = None, 472 | ) -> vs.VideoNode: 473 | """ 474 | dpidDown 475 | -------------------------------- 476 | use dpid as kernel in Gamma-aware resize,only downscale. 477 | need CUDA-Enabled GPU 478 | """ 479 | def M(a,b): 480 | if a <= 1024 and b <= 576: 481 | return "170m" 482 | elif a <= 2048 and b <= 1536: 483 | return "709" 484 | else : 485 | return "2020ncl" 486 | ############## 487 | if width is None: 488 | width = src.width 489 | if height is None: 490 | height = src.height 491 | if width>src.width or height > src.height: 492 | raise ValueError("") 493 | isRGB=src.format.color_family==vs.RGB 494 | if transfer is None: 495 | transfer=transfer_in 496 | if matrix is None: 497 | matrix=M(width,height) 498 | if isRGB: 499 | matrix_in="rgb" 500 | if css is not None: 501 | css=str(css).lower() 502 | if css == "444" or css == "4:4:4": 503 | css = "11" 504 | elif css == "440" or css == "4:4:0": 505 | css = "12" 506 | elif css == "422" or css == "4:2:2": 507 | css = "21" 508 | elif css == "420" or css == "4:2:0": 509 | css = "22" 510 | elif css == "411" or css == "4:1:1": 511 | css = "41" 512 | elif css == "410" or css == "4:1:0": 513 | css = "42" 514 | if css not in ["11","12","21","22","41","42","rgb"]: 515 | raise ValueError("") 516 | if range_in is None: 517 | range_in="full" if isRGB else "limited" 518 | if range_out is None: 519 | if css is None: 520 | range_out=range_in 521 | elif isRGB and css=="rgb": 522 | range_out=range_in 523 | elif not isRGB and css!="rgb": 524 | range_out=range_in 525 | elif isRGB and css!="rgb": 526 | range_out="limited" 527 | else: 528 | range_out="full" 529 | range_in=range_in.lower() 530 | range_out=range_out.lower() 531 | if range_in=="tv": 532 | range_in="limited" 533 | if range_in=="pc": 534 | range_in="full" 535 | if range_in not in ["limited","full"]: 536 | raise ValueError("") 537 | if range_out=="tv": 538 | range_out="limited" 539 | if range_out=="pc": 540 | range_out="full" 541 | if range_out not in ["limited","full"]: 542 | raise ValueError("") 543 | rgb=core.resize.Bicubic(src,format=vs.RGBS,matrix_in_s=matrix_in,transfer_in_s=transfer_in,primaries_in_s=primaries_in,primaries_s=primaries,range_in_s=range_in) 544 | lin=core.resize.Bicubic(rgb,format=vs.RGB48,transfer_in_s=transfer_in,transfer_s="linear") 545 | res=core.dpid.Dpid(lin,width,height,Lambda) 546 | res=core.resize.Bicubic(res,format=vs.RGBS,transfer_in_s="linear",transfer_s=transfer) 547 | if not isRGB and css!="rgb": 548 | st=vs.FLOAT if depth==32 else vs.INTEGER 549 | if css is None: 550 | sh=src.format.subsampling_h 551 | sw=src.format.subsampling_w 552 | else: 553 | sh=int(css[0])//2 554 | sw=int(css[1])//2 555 | outformat=core.register_format(vs.YUV,st,depth,sh,sw) 556 | last=core.resize.Bicubic(res,format=vs.YUV444PS,matrix_s=matrix,range_s=range_out) 557 | last=core.resize.Bicubic(last,format=outformat.id,dither_type=dither_type) 558 | else: 559 | if css is None or css=="rgb": 560 | last=core.resize.Bicubic(res,range_s=range_out) 561 | else: 562 | sh=int(css[0])//2 563 | sw=int(css[1])//2 564 | st=vs.FLOAT if depth==32 else vs.INTEGER 565 | outformat=core.register_format(vs.YUV,st,depth,sh,sw) 566 | last=core.resize.Bicubic(res,format=vs.YUV444PS,matrix_s=matrix,range_s=range_out) 567 | last=core.resize.Bicubic(last,format=outformat.id,dither_type=dither_type) 568 | return last 569 | 570 | class cropping_args: 571 | #rewrite from function descale_cropping_args in getfnative 572 | def __init__(self,width:int,height:int,src_height: float, base_height: int, base_width= None, mode: str = 'wh'): 573 | assert base_height >= src_height 574 | self.mode=mode 575 | 576 | self.width=width 577 | self.height=height 578 | self.base_width=self.getw(base_height) if base_width is None else base_width 579 | self.base_height=base_height 580 | self.src_width=src_height * width / height 581 | self.src_height=src_height 582 | 583 | self.cropped_width=self.base_width - 2 * math.floor((self.base_width - self.src_width) / 2) 584 | self.cropped_height=self.base_height - 2 * math.floor((self.base_height - self.src_height) / 2) 585 | 586 | def descale_gen(self): 587 | args={"width":self.width,"height":self.height} 588 | argsw={"width":self.cropped_width,"src_width":self.src_width,"src_left":(self.cropped_width-self.src_width)/2} 589 | argsh={"height":self.cropped_height,"src_height":self.src_height,"src_top":(self.cropped_height-self.src_height)/2} 590 | 591 | if "w" in self.mode: 592 | args.update(argsw) 593 | if "h" in self.mode: 594 | args.update(argsh) 595 | return args 596 | 597 | def resize_gen(self): 598 | args={"width":self.width,"height":self.height} 599 | argsw={"src_width":self.src_width,"src_left":(self.cropped_width-self.src_width)/2} 600 | argsh={"src_height":self.src_height,"src_top":(self.cropped_height-self.src_height)/2} 601 | 602 | if "w" in self.mode: 603 | args.update(argsw) 604 | if "h" in self.mode: 605 | args.update(argsh) 606 | return args 607 | 608 | def nnrs_gen(self): 609 | args={"target_width":self.width,"target_height":self.height} 610 | argsw={"src_width":self.src_width,"src_left":(self.cropped_width-self.src_width)/2} 611 | argsh={"src_height":self.src_height,"src_top":(self.cropped_height-self.src_height)/2} 612 | 613 | if "w" in self.mode: 614 | args.update(argsw) 615 | if "h" in self.mode: 616 | args.update(argsh) 617 | return args 618 | 619 | def getw(self, height: int): 620 | width = math.ceil(height * self.width / self.height) 621 | if height % 2 == 0: 622 | width = width // 2 * 2 623 | return width 624 | -------------------------------------------------------------------------------- /xvs/other.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | 3 | import os 4 | import re 5 | 6 | 7 | def splicev1( 8 | clip: Sequence[vs.VideoNode], 9 | num: Sequence[int], 10 | den: Sequence[int], 11 | tc_out: str = "tc v1.txt", 12 | ) -> vs.VideoNode: 13 | """ 14 | splice clips with different fps and output timecodes v1 15 | """ 16 | clip_len = len(clip) 17 | num_len = len(num) 18 | den_len = len(den) 19 | if clip_len > num_len: 20 | for i in range(num_len,clip_len): 21 | num.append(None) 22 | if clip_len > den_len: 23 | for i in range(den_len,clip_len): 24 | den.append(None) 25 | for i in range(0,clip_len): 26 | if num[i] is None: 27 | num[i] = clip[i].fps_num 28 | den[i] = clip[i].fps_den 29 | elif den[i] is None: 30 | if num[i] > 10000: 31 | den[i] = 1001 32 | else: 33 | den[i]=1 34 | fps=[] 35 | for i in range(0,clip_len): 36 | fps.append(float(num[i])/den[i]) 37 | fnum=[i.num_frames for i in clip] 38 | for i in range(1,clip_len): 39 | fnum[i]+=fnum[i-1] 40 | tc = open(tc_out,"w") 41 | tc.write("# timecode format v1\nassume "+str(fps[0])+"\n") 42 | for i in range(1,clip_len): 43 | tc.write(str(fnum[i-1])+","+str(fnum[i]-1)+","+str(fps[i])+"\n") 44 | tc.close() 45 | last = clip[0] 46 | for i in range(1,clip_len): 47 | last+=clip[i] 48 | last=core.std.AssumeFPS(last,fpsnum=num[0],fpsden=den[0]) 49 | return last 50 | 51 | def mvfrc( 52 | input: vs.VideoNode, 53 | it: int = 140, 54 | scp: int = 15, 55 | num: int = 60000, 56 | den: int = 1001, 57 | preset: str = 'fast', 58 | pel: int = 2, 59 | block: bool = True, 60 | flow_mask: int | None = None, 61 | block_mode: int | None = None, 62 | blksize: int = 8, 63 | blksizev: int = 8, 64 | search: int | None = None, 65 | truemotion: bool | None = True, 66 | searchparam: int = 2, 67 | overlap: int = 0, 68 | overlapv: int | None = None, 69 | dct: int = 0, 70 | blend: bool = True, 71 | badSAD: int = 10000, 72 | badrange: int = 24, 73 | divide: int = 0, 74 | ml: float = 100, 75 | Mblur: int = 15, 76 | ) -> vs.VideoNode: 77 | """ 78 | change fps by mvtools with motion interpolation 79 | it = thscd1 ; scp=thscd2/255*100 80 | """ 81 | funcName = 'mvfrc' 82 | if not isinstance(input, vs.VideoNode): 83 | raise TypeError(funcName + ': This is not a clip!') 84 | ############# 85 | if preset == 'fast': 86 | pnum=0 87 | elif preset == 'medium': 88 | pnum=1 89 | elif preset == 'slow': 90 | pnum=2 91 | else: 92 | raise TypeError(funcName + r":preset should be fast\ medium\slow'") 93 | overlapv = overlap 94 | ############# 95 | if search is None : search = [0,3,3][pnum] 96 | if block_mode is None : block_mode = [0,0,3][pnum] 97 | if flow_mask is None : flow_mask = [0,0,2][pnum] 98 | ############# 99 | analParams = { 100 | 'overlap' : overlap, 101 | 'overlapv':overlapv, 102 | 'search' : search, 103 | 'dct':dct, 104 | 'truemotion' : truemotion, 105 | 'blksize' : blksize, 106 | 'blksizev':blksizev, 107 | 'searchparam':searchparam, 108 | 'badsad':badSAD, 109 | 'badrange':badrange, 110 | 'divide':divide 111 | } 112 | ############ 113 | #block or flow Params 114 | bofp = { 115 | 'thscd1':it, 116 | 'thscd2':int(scp*255/100), 117 | 'blend':blend, 118 | 'num':num, 119 | 'den':den, 120 | 'ml' : ml, 121 | } 122 | ############ 123 | sup = core.mv.Super(input, pel=pel,sharp=2, rfilter=4) 124 | bvec = core.mv.Analyse(sup, isb=True, **analParams) 125 | fvec = core.mv.Analyse(sup, isb=False, **analParams) 126 | if input.fps_num/input.fps_den > num/den: 127 | input = core.mv.FlowBlur(input, sup, bvec, fvec, blur=Mblur) 128 | if block == True: 129 | clip = core.mv.BlockFPS(input, sup, bvec, fvec,**bofp,mode=block_mode) 130 | else: 131 | clip = core.mv.FlowFPS(input, sup, bvec, fvec,**bofp,mask=flow_mask) 132 | return clip 133 | 134 | @deprecated("No maintenance.") 135 | def textsub( 136 | input: vs.VideoNode, 137 | file: str, 138 | charset: str | None = None, 139 | fps: float | None = None, 140 | vfr: str | None = None, 141 | mod: bool = False, 142 | Matrix: str | None = None, 143 | ) -> vs.VideoNode: 144 | """ 145 | --------------------------- 146 | textsub 147 | --------------------------- 148 | Can support high bit and yuv444&yuv422,but not rgb 149 | Not recommended for yuv420p8 150 | It's a port from avs script—textsub16 by mawen1250,but have some differences 151 | --------------------------- 152 | input,file,charset,fps,vfr: same in vsfilter 153 | mod: Choose whether to use(vsfiler or vsfiltermod),deflaut is False,means use vsfilter 154 | """ 155 | def M(a,b): 156 | if a <= 1024 and b <= 576: 157 | return "601" 158 | elif a <= 2048 and b <= 1536: 159 | return "709" 160 | else : 161 | return "2020" 162 | 163 | funcName = "textsub16" 164 | width = input.width 165 | height = input.height 166 | bit = input.format.bits_per_sample 167 | U = getplane(mask,1) 168 | c_w = U.width 169 | c_h = U.height 170 | w = width/c_w 171 | h = height/c_h 172 | if w==1 and h==1: 173 | f = vs.YUV444P16 174 | s = 0 175 | elif w==2 and h==1: 176 | f = vs.YUV422P16 177 | s = 0.5 178 | elif w==2 and h==2: 179 | f = vs.YUV420P16 180 | s = 0.25 181 | else: 182 | TypeError(funcName + ': Only support YUV420、YUV422、YUV444') 183 | if Matrix is None: 184 | Matrix = M(width,height) 185 | ############## 186 | def vsmode(clip,file,charset,fps,vfr,mod): 187 | core = vs.core 188 | if mod == False: 189 | last = core.vsf.TextSub(clip,file,charset,fps,vfr) 190 | else: 191 | last = core.vsfm.TextSubMod(clip,file,charset,fps,vfr) 192 | return core.std.Cache(last, make_linear=True) 193 | 194 | def mskE(a,b,depth): 195 | core=vs.core 196 | expr="x y - abs 1 < 0 255 ?" 197 | last = Expr([a,b], [expr]*3) 198 | return core.fmtc.bitdepth(last,bits=depth) 199 | ############## 200 | src8 = core.resize.Bilinear(input,format=vs.YUV420P8) 201 | sub8 = vsmode(src8,file,charset,fps,vfr,mod) 202 | mask = mskE(src8,sub8,bit) 203 | maskY = getplane(mask,0) 204 | maskU = getplane(mask,1) 205 | maskU = core.resize.Bilinear(maskU,width,height,src_left=s) 206 | maskV = getplane(mask,2) 207 | maskV = core.resize.Bilinear(maskV,width,height,src_left=s) 208 | mask = Expr([maskY,maskU,maskV],"x y max z max") 209 | mask = core.std.Inflate(mask) 210 | maskC = core.resize.Bilinear(mask,c_w,c_h,src_left=-s)#,src_width=c_w,src_height=c_h 211 | if w==1 and h==1: 212 | mask = core.std.ShufflePlanes(mask,[0,0,0], colorfamily=vs.YUV) 213 | else: 214 | mask = core.std.ShufflePlanes([mask,maskC],[0,0,0], colorfamily=vs.YUV) 215 | ################ 216 | rgb = core.resize.Bilinear(input,format=vs.RGB24,matrix_in_s=Matrix) 217 | sub = vsmode(rgb,file,charset,fps,vfr,mod) 218 | sub = core.resize.Bilinear(sub,format=f,matrix_s=Matrix) 219 | sub = core.fmtc.bitdepth(sub,bits=bit) 220 | last = core.std.MaskedMerge(input, sub, mask=mask, planes=[0,1,2]) 221 | return last 222 | 223 | @deprecated("No maintenance.") 224 | def vfrtocfr( 225 | clip: vs.VideoNode, 226 | tc: str, 227 | num: int, 228 | den: int = 1, 229 | blend: bool = False, 230 | ) -> vs.VideoNode: 231 | """ 232 | vfrtocfr 233 | -------------------------------- 234 | clip: input clip 235 | tc: input timecodes,only support tcv2 236 | num,den: output fps=num/den 237 | blend: True means blend the frames instead of delete or copy , default is False 238 | """ 239 | def tclist(f): 240 | A=open(f,"r") 241 | B=re.sub(r'(.\n)*# timecode format v2(.|\n)*\n0',r'0',A.read(),count=1) 242 | A.close() 243 | C=B.split() 244 | T=[] 245 | for i in C: 246 | T.append(int(float(i))) 247 | return T 248 | ################# 249 | vn = clip.num_frames 250 | vtc = tclist(tc) 251 | cn = int(vtc[-1]*num/den/1000) 252 | ctc = [int(1000*den/num)*i for i in range(0,cn+1)] 253 | cc = clip[0] 254 | for i in range(1,cn+1): 255 | for j in range(1,vn+1): 256 | if ctc[i]=vtc[j] and ctc[i](vtc[j+1]-ctc[i]) else clip[j] 261 | else: 262 | cl=core.std.Merge(clip[j-1],clip[j],weight=(ctc[i]-vtc[j])/(vtc[j+1]-vtc[j])) 263 | cc += cl 264 | last = core.std.AssumeFPS(cc,fpsnum=num,fpsden=den) 265 | return core.std.Cache(last, make_linear=True) 266 | 267 | @deprecated("No maintenance.") 268 | def Overlaymod( 269 | clipa: vs.VideoNode, 270 | clipb: vs.VideoNode, 271 | x: int = 0, 272 | y: int = 0, 273 | alpha: vs.VideoNode | None = None, 274 | aa: bool = False, 275 | ) -> vs.VideoNode: 276 | """ 277 | Overlaymod 278 | ------------------------- 279 | modified overlay by xyx98, 280 | orginal Overlay by holy in havsfunc 281 | ------------------------- 282 | difference: mask->alpha,please input the alpha clip read by imwri if needed 283 | aa: if is True,use daa in clipb and alpha clip 284 | """ 285 | if not (isinstance(clipa, vs.VideoNode) and isinstance(clipb, vs.VideoNode)): 286 | raise TypeError('Overlaymod: This is not a clip') 287 | if clipa.format.subsampling_w > 0 or clipa.format.subsampling_h > 0: 288 | clipa_src = clipa 289 | clipa = core.resize.Point(clipa, format=core.register_format(clipa.format.color_family, clipa.format.sample_type, clipa.format.bits_per_sample, 0, 0).id) 290 | else: 291 | clipa_src = None 292 | if clipb.format.id != clipa.format.id: 293 | clipb = core.resize.Point(clipb, format=clipa.format.id) 294 | mask = core.std.BlankClip(clipb, color=[(1 << clipb.format.bits_per_sample) - 1] * clipb.format.num_planes) 295 | if (alpha is not None) and (not isinstance(alpha, vs.VideoNode)): 296 | raise TypeError("Overlaymod: 'alpha' is not a clip") 297 | if mask.width != clipb.width or mask.height != clipb.height: 298 | raise TypeError("Overlaymod: 'alpha' must be the same dimension as 'clipb'") 299 | 300 | if aa: 301 | from .aa import daa 302 | clipb = daa(clipb) 303 | # Calculate padding sizes 304 | l, r = x, clipa.width - clipb.width - x 305 | t, b = y, clipa.height - clipb.height - y 306 | # Split into crop and padding values 307 | cl, pl = min(l, 0) * -1, max(l, 0) 308 | cr, pr = min(r, 0) * -1, max(r, 0) 309 | ct, pt = min(t, 0) * -1, max(t, 0) 310 | cb, pb = min(b, 0) * -1, max(b, 0) 311 | # Crop and padding 312 | def cap(clip): 313 | mask = getplane(clip, 0) 314 | mask = core.std.CropRel(mask, cl, cr, ct, cb) 315 | mask = core.std.AddBorders(mask, pl, pr, pt, pb) 316 | return mask 317 | clipb = core.std.CropRel(clipb, cl, cr, ct, cb) 318 | clipb = core.std.AddBorders(clipb, pl, pr, pt, pb) 319 | # Return padded clip 320 | mask = cap(mask) 321 | last = core.std.MaskedMerge(clipa, clipb, mask) 322 | if alpha is not None: 323 | alpha=core.fmtc.bitdepth(alpha,bits=clipb.format.bits_per_sample) 324 | m = 1< vs.VideoNode: 348 | """ 349 | adjusted InterFrame from havsfunc,support 10bit with new svp 350 | """ 351 | if not isinstance(Input, vs.VideoNode): 352 | raise TypeError('InterFrame: This is not a clip') 353 | 354 | sw=Input.format.subsampling_w 355 | sh=Input.format.subsampling_h 356 | depth= Input.format.bits_per_sample 357 | if not sw ==1 and not sh==1 and depth not in [8,10]: 358 | raise TypeError('InterFrame: input must be yuv420p8 or yuv420p10') 359 | oInput=Input 360 | Input = core.fmtc.bitdepth(Input,bits=8) 361 | # Validate inputs 362 | Preset = Preset.lower() 363 | Tuning = Tuning.lower() 364 | InputType = InputType.upper() 365 | if Preset not in ['medium', 'fast', 'faster', 'fastest']: 366 | raise ValueError("InterFrame: '{Preset}' is not a valid preset".format(Preset=Preset)) 367 | if Tuning not in ['film', 'smooth', 'animation', 'weak']: 368 | raise ValueError("InterFrame: '{Tuning}' is not a valid tuning".format(Tuning=Tuning)) 369 | if InputType not in ['2D', 'SBS', 'OU', 'HSBS', 'HOU']: 370 | raise ValueError("InterFrame: '{InputType}' is not a valid InputType".format(InputType=InputType)) 371 | 372 | def InterFrameProcess(clip,oclip): 373 | # Create SuperString 374 | if Preset in ['fast', 'faster', 'fastest']: 375 | SuperString = '{pel:1,' 376 | else: 377 | SuperString = '{' 378 | 379 | SuperString += 'gpu:1}' if GPU else 'gpu:0}' 380 | 381 | # Create VectorsString 382 | if Tuning == 'animation' or Preset == 'fastest': 383 | VectorsString = '{block:{w:32,' 384 | elif Preset in ['fast', 'faster'] or not GPU: 385 | VectorsString = '{block:{w:16,' 386 | else: 387 | VectorsString = '{block:{w:8,' 388 | 389 | if Tuning == 'animation' or Preset == 'fastest': 390 | VectorsString += 'overlap:0' 391 | elif Preset == 'faster' and GPU: 392 | VectorsString += 'overlap:1' 393 | else: 394 | VectorsString += 'overlap:2' 395 | 396 | if Tuning == 'animation': 397 | VectorsString += '},main:{search:{coarse:{type:2,' 398 | elif Preset == 'faster': 399 | VectorsString += '},main:{search:{coarse:{' 400 | else: 401 | VectorsString += '},main:{search:{distance:0,coarse:{' 402 | 403 | if Tuning == 'animation': 404 | VectorsString += 'distance:-6,satd:false},distance:0,' 405 | elif Tuning == 'weak': 406 | VectorsString += 'distance:-1,trymany:true,' 407 | else: 408 | VectorsString += 'distance:-10,' 409 | 410 | if Tuning == 'animation' or Preset in ['faster', 'fastest']: 411 | VectorsString += 'bad:{sad:2000}}}}}' 412 | elif Tuning == 'weak': 413 | VectorsString += 'bad:{sad:2000}}}},refine:[{thsad:250,search:{distance:-1,satd:true}}]}' 414 | else: 415 | VectorsString += 'bad:{sad:2000}}}},refine:[{thsad:250}]}' 416 | 417 | # Create SmoothString 418 | if NewNum is not None: 419 | SmoothString = '{rate:{num:' + repr(NewNum) + ',den:' + repr(NewDen) + ',abs:true},' 420 | elif clip.fps_num / clip.fps_den in [15, 25, 30] or FrameDouble: 421 | SmoothString = '{rate:{num:2,den:1,abs:false},' 422 | else: 423 | SmoothString = '{rate:{num:60000,den:1001,abs:true},' 424 | if GPU: 425 | SmoothString+= 'gpuid:'+repr(gpuid)+',' 426 | if OverrideAlgo is not None: 427 | SmoothString += 'algo:' + repr(OverrideAlgo) + ',mask:{cover:80,' 428 | elif Tuning == 'animation': 429 | SmoothString += 'algo:2,mask:{' 430 | elif Tuning == 'smooth': 431 | SmoothString += 'algo:23,mask:{' 432 | else: 433 | SmoothString += 'algo:13,mask:{cover:80,' 434 | 435 | if OverrideArea is not None: 436 | SmoothString += 'area:{OverrideArea}'.format(OverrideArea=OverrideArea) 437 | elif Tuning == 'smooth': 438 | SmoothString += 'area:150' 439 | else: 440 | SmoothString += 'area:0' 441 | 442 | if Tuning == 'weak': 443 | SmoothString += ',area_sharp:1.2},scene:{blend:true,mode:0,limits:{blocks:50}}}' 444 | else: 445 | SmoothString += ',area_sharp:1.2},scene:{blend:true,mode:0}}' 446 | 447 | # Make interpolation vector clip 448 | Super = core.svp1.Super(clip, SuperString) 449 | Vectors = core.svp1.Analyse(Super['clip'], Super['data'], clip, VectorsString) 450 | 451 | # Put it together 452 | return core.svp2.SmoothFps(oclip, Super['clip'], Super['data'], Vectors['clip'], Vectors['data'], SmoothString) 453 | 454 | # Get either 1 or 2 clips depending on InputType 455 | if InputType == 'SBS': 456 | FirstEye = InterFrameProcess(core.std.CropRel(Input, right=Input.width // 2), 457 | core.std.CropRel(oInput, right=Input.width // 2)) 458 | SecondEye = InterFrameProcess(core.std.CropRel(Input, left=Input.width // 2), 459 | core.std.CropRel(oInput, left=Input.width // 2)) 460 | return core.std.StackHorizontal([FirstEye, SecondEye]) 461 | elif InputType == 'OU': 462 | FirstEye = InterFrameProcess(core.std.CropRel(Input, bottom=Input.height // 2), 463 | core.std.CropRel(oInput, bottom=Input.height // 2)) 464 | SecondEye = InterFrameProcess(core.std.CropRel(Input, top=Input.height // 2), 465 | core.std.CropRel(oInput, top=Input.height // 2)) 466 | return core.std.StackVertical([FirstEye, SecondEye]) 467 | elif InputType == 'HSBS': 468 | FirstEye = InterFrameProcess(core.std.CropRel(Input, right=Input.width // 2).resize.Spline36(Input.width, Input.height), 469 | core.std.CropRel(oInput, right=oInput.width // 2).resize.Spline36(oInput.width, oInput.height)) 470 | SecondEye = InterFrameProcess(core.std.CropRel(Input, left=Input.width // 2).resize.Spline36(Input.width, Input.height), 471 | core.std.CropRel(oInput, left=oInput.width // 2).resize.Spline36(oInput.width, oInput.height)) 472 | return core.std.StackHorizontal([core.resize.Spline36(FirstEye, Input.width // 2, Input.height), 473 | core.resize.Spline36(SecondEye, Input.width // 2, Input.height)]) 474 | elif InputType == 'HOU': 475 | FirstEye = InterFrameProcess(core.std.CropRel(Input, bottom=Input.height // 2).resize.Spline36(Input.width, Input.height), 476 | core.std.CropRel(oInput, bottom=oInput.height // 2).resize.Spline36(oInput.width, oInput.height)) 477 | SecondEye = InterFrameProcess(core.std.CropRel(Input, top=Input.height // 2).resize.Spline36(Input.width, Input.height), 478 | core.std.CropRel(oInput, top=oInput.height // 2).resize.Spline36(oInput.width, oInput.height)) 479 | return core.std.StackVertical([core.resize.Spline36(FirstEye, Input.width, Input.height // 2), 480 | core.resize.Spline36(SecondEye, Input.width, Input.height // 2)]) 481 | else: 482 | return InterFrameProcess(Input,oInput) 483 | 484 | @deprecated("Do not use!") 485 | def xTonemap( 486 | clip: vs.VideoNode, 487 | nominal_luminance: int = 400, 488 | exposure:float = 4.5, 489 | ) -> vs.VideoNode: 490 | """ 491 | The way I convert hdr to sdr when I rip 'Kimi No Na Wa'(UHDBD HK ver.). 492 | I'm not sure It suit for other UHDBD 493 | ### 494 | nominal_luminance: nominal_luminance when convert to linear RGBS 495 | exposure: exposure in Mobius,which do the tonemap 496 | """ 497 | 498 | fid=clip.format.id 499 | clip=core.resize.Spline36(clip=clip, format=vs.RGBS,range_in_s="limited", matrix_in_s="2020ncl", primaries_in_s="2020", primaries_s="2020", transfer_in_s="st2084", transfer_s="linear",dither_type="none", nominal_luminance=nominal_luminance) 500 | clip=core.tonemap.Mobius(clip,exposure=exposure) 501 | clip=core.resize.Spline36(clip, format=fid,matrix_s="709", primaries_in_s="2020", primaries_s="709", transfer_in_s="linear", transfer_s="709",dither_type="none") 502 | clip=Expr(clip,["x 4096 - 219 * 235 / 4096 +",""]) 503 | return clip 504 | 505 | @deprecated("No maintenance.") 506 | def readmpls( 507 | path: str, 508 | sfilter: str = 'ffms2', 509 | cache: int | None = None, 510 | ) -> vs.VideoNode: 511 | mpls = core.mpls.Read(path) 512 | if sfilter in ["ffms2","ffms","ff","f","ffvideosource"]: 513 | if cache is None or cache==0: 514 | cache=os.path.join(os.getcwd(),'cache') 515 | elif isinstance(cache,str): 516 | pass 517 | elif cache==-1: 518 | cache=False 519 | else: 520 | raise ValueError('unknown cache setting') 521 | 522 | if cache: 523 | clips=[] 524 | for i in range(mpls['count']): 525 | clips.append(core.ffms2.Source(source=mpls['clip'][i], cachefile=os.path.join(cache,mpls['filename'][i].decode()+'.ffindex'))) 526 | else: 527 | clips=[core.ffms2.Source(mpls['clip'][i]) for i in range(mpls['count'])] 528 | elif sfilter in ['lwi','l','lsmash','l-smash','lsmas','LWLibavSource']: 529 | clips=[core.lsmas.LWLibavSource(mpls['clip'][i]) for i in range(mpls['count'])] 530 | else: 531 | raise ValueError("unknown source filter") 532 | return core.std.Splice(clips) 533 | 534 | #copy from muvsfunc 535 | def LDMerge( 536 | flt_h: vs.VideoNode, 537 | flt_v: vs.VideoNode, 538 | src: vs.VideoNode, 539 | mrad: int = 0, 540 | show: bool = False, 541 | planes: PlanesType = None, 542 | convknl: int = 1, 543 | conv_div: int | None = None, 544 | calc_mode: int = 0, 545 | power: float = 1.0, 546 | ) -> vs.VideoNode: 547 | """Merges two filtered clips based on the gradient direction map from a source clip. 548 | 549 | Args: 550 | flt_h, flt_v: Two filtered clips. 551 | 552 | src: Source clip. Must be the same format as the filtered clips. 553 | 554 | mrad: (int) Expanding of gradient direction map. Default is 0. 555 | 556 | show: (bool) Whether to output gradient direction map. Default is False. 557 | 558 | planes: (int []) Whether to process the corresponding plane. By default, every plane will be processed. 559 | The unprocessed planes will be copied from the first clip, "flt_h". 560 | 561 | convknl: (0 or 1) Convolution kernel used to generate gradient direction map. 562 | 0: Seconde order center difference in one direction and average in perpendicular direction 563 | 1: First order center difference in one direction and weighted average in perpendicular direction. 564 | Default is 1. 565 | 566 | conv_div: (int) Divisor in convolution filter. Default is the max value in convolution kernel. 567 | 568 | calc_mode: (0 or 1) Method used to calculate the gradient direction map. Default is 0. 569 | 570 | power: (float) Power coefficient in "calc_mode=0". 571 | 572 | Example: 573 | # Fast anti-aliasing 574 | horizontal = core.std.Convolution(clip, matrix=[1, 4, 0, 4, 1], planes=[0], mode='h') 575 | vertical = core.std.Convolution(clip, matrix=[1, 4, 0, 4, 1], planes=[0], mode='v') 576 | blur_src = core.tcanny.TCanny(clip, mode=-1, planes=[0]) # Eliminate noise 577 | antialiasing = muf.LDMerge(horizontal, vertical, blur_src, mrad=1, planes=[0]) 578 | 579 | """ 580 | 581 | funcName = 'LDMerge' 582 | 583 | if not isinstance(src, vs.VideoNode): 584 | raise TypeError(funcName + ': \"src\" must be a clip!') 585 | 586 | if not isinstance(flt_h, vs.VideoNode): 587 | raise TypeError(funcName + ': \"flt_h\" must be a clip!') 588 | if src.format.id != flt_h.format.id: 589 | raise TypeError(funcName + ': \"flt_h\" must be of the same format as \"src\"!') 590 | if src.width != flt_h.width or src.height != flt_h.height: 591 | raise TypeError(funcName + ': \"flt_h\" must be of the same size as \"src\"!') 592 | 593 | if not isinstance(flt_v, vs.VideoNode): 594 | raise TypeError(funcName + ': \"flt_v\" must be a clip!') 595 | if src.format.id != flt_v.format.id: 596 | raise TypeError(funcName + ': \"flt_v\" must be of the same format as \"src\"!') 597 | if src.width != flt_v.width or src.height != flt_v.height: 598 | raise TypeError(funcName + ': \"flt_v\" must be of the same size as \"src\"!') 599 | 600 | if not isinstance(mrad, int): 601 | raise TypeError(funcName + '\"mrad\" must be an int!') 602 | 603 | if not isinstance(show, int): 604 | raise TypeError(funcName + '\"show\" must be an int!') 605 | if show not in list(range(0, 4)): 606 | raise ValueError(funcName + '\"show\" must be in [0, 1, 2, 3]!') 607 | 608 | if planes is None: 609 | planes = list(range(flt_h.format.num_planes)) 610 | elif isinstance(planes, int): 611 | planes = [planes] 612 | 613 | bits = flt_h.format.bits_per_sample 614 | 615 | if convknl == 0: 616 | convknl_h = [-1, -1, -1, 2, 2, 2, -1, -1, -1] 617 | convknl_v = [-1, 2, -1, -1, 2, -1, -1, 2, -1] 618 | else: # convknl == 1 619 | convknl_h = [-17, -61, -17, 0, 0, 0, 17, 61, 17] 620 | convknl_v = [-17, 0, 17, -61, 0, 61, -17, 0, 17] 621 | 622 | if conv_div is None: 623 | conv_div = max(convknl_h) 624 | 625 | hmap = core.std.Convolution(src, matrix=convknl_h, saturate=False, planes=planes, divisor=conv_div) 626 | vmap = core.std.Convolution(src, matrix=convknl_v, saturate=False, planes=planes, divisor=conv_div) 627 | 628 | if mrad > 0: 629 | hmap = mt_expand_multi(hmap, sw=0, sh=mrad, planes=planes) 630 | vmap = mt_expand_multi(vmap, sw=mrad, sh=0, planes=planes) 631 | elif mrad < 0: 632 | hmap = mt_inpand_multi(hmap, sw=0, sh=-mrad, planes=planes) 633 | vmap = mt_inpand_multi(vmap, sw=-mrad, sh=0, planes=planes) 634 | 635 | if calc_mode == 0: 636 | ldexpr = '{peak} 1 x 0.0001 + y 0.0001 + / {power} pow + /'.format(peak=(1 << bits) - 1, power=power) 637 | else: 638 | ldexpr = 'y 0.0001 + x 0.0001 + dup * y 0.0001 + dup * + sqrt / {peak} *'.format(peak=(1 << bits) - 1) 639 | ldmap = Expr([hmap, vmap], [(ldexpr if i in planes else '') for i in range(src.format.num_planes)]) 640 | 641 | if show == 0: 642 | return core.std.MaskedMerge(flt_h, flt_v, ldmap, planes=planes) 643 | elif show == 1: 644 | return ldmap 645 | elif show == 2: 646 | return hmap 647 | elif show == 3: 648 | return vmap 649 | else: 650 | raise ValueError 651 | 652 | #simplified from mvsfunc.Depth,only lowbitdepth part left. 653 | def lowbitdepth_sim( 654 | clip: vs.VideoNode, 655 | depth: int, 656 | dither: int = 1, 657 | fulls: bool | None = None, 658 | fulld: bool | None = None, 659 | ) -> vs.VideoNode: 660 | if not 0 < depth < 8 : 661 | raise vs.Error("lowbitdepth_sim: depth should be positive integer and less than 8.") 662 | 663 | sFormat = clip.format 664 | 665 | sColorFamily = sFormat.color_family 666 | sIsYUV = sColorFamily == vs.YUV 667 | sIsGRAY = sColorFamily == vs.GRAY 668 | 669 | sbitPS = sFormat.bits_per_sample 670 | sSType = sFormat.sample_type 671 | 672 | dSType = vs.INTEGER 673 | dbitPS = 8 674 | 675 | if fulls is None: 676 | fulls = False if sIsYUV or sIsGRAY else True 677 | if fulld is None: 678 | fulld=fulls 679 | 680 | #from mvsfunc 681 | def _quantization_parameters(sample=None, depth=None, full=None, chroma=None): 682 | qp = {} 683 | 684 | if sample is None: 685 | sample = vs.INTEGER 686 | if depth is None: 687 | depth = 8 688 | elif depth < 1: 689 | raise vs.Error('"depth" should not be less than 1!') 690 | if full is None: 691 | full = True 692 | if chroma is None: 693 | chroma = False 694 | 695 | lShift = depth - 8 696 | rShift = 8 - depth 697 | 698 | if sample == vs.INTEGER: 699 | if chroma: 700 | qp['floor'] = 0 if full else 16 << lShift if lShift >= 0 else 16 >> rShift 701 | qp['neutral'] = 128 << lShift if lShift >= 0 else 128 >> rShift 702 | qp['ceil'] = (1 << depth) - 1 if full else 240 << lShift if lShift >= 0 else 240 >> rShift 703 | qp['range'] = qp['ceil'] - qp['floor'] 704 | else: 705 | qp['floor'] = 0 if full else 16 << lShift if lShift >= 0 else 16 >> rShift 706 | qp['neutral'] = qp['floor'] 707 | qp['ceil'] = (1 << depth) - 1 if full else 235 << lShift if lShift >= 0 else 235 >> rShift 708 | qp['range'] = qp['ceil'] - qp['floor'] 709 | elif sample == vs.FLOAT: 710 | if chroma: 711 | qp['floor'] = -0.5 712 | qp['neutral'] = 0.0 713 | qp['ceil'] = 0.5 714 | qp['range'] = qp['ceil'] - qp['floor'] 715 | else: 716 | qp['floor'] = 0.0 717 | qp['neutral'] = qp['floor'] 718 | qp['ceil'] = 1.0 719 | qp['range'] = qp['ceil'] - qp['floor'] 720 | else: 721 | raise vs.Error('Unsupported "sample" specified!') 722 | 723 | return qp 724 | 725 | #simplified from mvsfunc 726 | def _quantization_conversion(clip, depths=None, depthd=None, sample=None, fulls=None, fulld=None, 727 | chroma=None, clamp=None, dbitPS=None, mode=None): 728 | # input clip 729 | 730 | # Get properties of input clip 731 | sFormat = clip.format 732 | 733 | sColorFamily = sFormat.color_family 734 | sIsYUV = sColorFamily == vs.YUV 735 | sIsGRAY = sColorFamily == vs.GRAY 736 | 737 | sbitPS = sFormat.bits_per_sample 738 | sSType = sFormat.sample_type 739 | 740 | if depths is None: 741 | depths = sbitPS 742 | 743 | if fulls is None: 744 | # If not set, assume limited range for YUV and Gray input 745 | fulls = False if sIsYUV or sIsGRAY else True 746 | 747 | if chroma is None: 748 | chroma = False 749 | elif not sIsGRAY: 750 | chroma = False 751 | 752 | # Get properties of output clip 753 | if depthd is None: 754 | pass 755 | 756 | if sample is None: 757 | if depthd is None: 758 | dSType = sSType 759 | depthd = depths 760 | else: 761 | dSType = vs.FLOAT if dbitPS >= 32 else vs.INTEGER 762 | else: 763 | dSType = sample 764 | 765 | if fulld is None: 766 | fulld = fulls 767 | 768 | 769 | if clamp is None: 770 | clamp = dSType == vs.INTEGER 771 | 772 | if dbitPS is None: 773 | if depthd < 8: 774 | dbitPS = 8 775 | else: 776 | dbitPS = depthd 777 | 778 | if mode is None: 779 | mode = 0 780 | elif depthd >= 8: 781 | mode = 0 782 | 783 | dFormat = core.query_video_format(sFormat.color_family, dSType, dbitPS, sFormat.subsampling_w, sFormat.subsampling_h) 784 | 785 | # Expression function 786 | def gen_expr(chroma, mode): 787 | if dSType == vs.INTEGER: 788 | exprLower = 0 789 | exprUpper = 1 << (dFormat.bytes_per_sample * 8) - 1 790 | else: 791 | exprLower = float('-inf') 792 | exprUpper = float('inf') 793 | 794 | sQP = _quantization_parameters(sSType, depths, fulls, chroma) 795 | dQP = _quantization_parameters(dSType, depthd, fulld, chroma) 796 | 797 | gain = dQP['range'] / sQP['range'] 798 | offset = dQP['neutral' if chroma else 'floor'] - sQP['neutral' if chroma else 'floor'] * gain 799 | 800 | if mode == 1: 801 | scale = 256 802 | gain = gain * scale 803 | offset = offset * scale 804 | else: 805 | scale = 1 806 | 807 | if gain != 1 or offset != 0 or clamp: 808 | expr = " x " 809 | if gain != 1: expr = expr + f" {gain} * " 810 | if offset != 0: expr = expr + f" {offset} + " 811 | if clamp: 812 | if dQP['floor'] * scale > exprLower: expr = expr + f" {dQP['floor'] * scale} max " 813 | if dQP['ceil'] * scale < exprUpper: expr = expr + f" {dQP['ceil'] * scale} min " 814 | else: 815 | expr = "" 816 | 817 | return expr 818 | 819 | # Process 820 | Yexpr = gen_expr(False, mode) 821 | Cexpr = gen_expr(True, mode) 822 | 823 | if sIsYUV: 824 | expr = [Yexpr, Cexpr] 825 | elif sIsGRAY and chroma: 826 | expr = Cexpr 827 | else: 828 | expr = Yexpr 829 | 830 | clip = Expr(clip, expr, format=dFormat.id) 831 | 832 | # Output 833 | clip=core.std.SetFrameProp(clip, prop='_ColorRange', intval=0 if fulld else 1) 834 | return clip 835 | 836 | 837 | if dither == 1: 838 | clip = _quantization_conversion(clip, sbitPS, depth, vs.INTEGER, fulls, fulld, False, False, 8, 0) 839 | clip = _quantization_conversion(clip, depth, 8, vs.INTEGER, fulld, fulld, False, False, 8, 0) 840 | return clip 841 | else: 842 | full = fulld 843 | clip = _quantization_conversion(clip, sbitPS, depth, vs.INTEGER, fulls, full, False, False, 16, 1) 844 | sSType = vs.INTEGER 845 | sbitPS = 16 846 | fulls = False 847 | fulld = False 848 | 849 | clip = core.fmtc.bitdepth(clip, bits=dbitPS, flt=dSType, fulls=fulls, fulld=fulld, dmode=dither) 850 | clip = core.std.SetFrameProp(clip, prop='_ColorRange', intval=0 if fulld else 1) 851 | 852 | clip = _quantization_conversion(clip, depth, 8, vs.INTEGER, full, full, False, False, 8, 0) 853 | return clip --------------------------------------------------------------------------------