├── .dockerignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── docker-compose.yml ├── docker ├── ffmpeg │ ├── cuda.Dockerfile │ ├── gltransition.Dockerfile │ └── golang.Dockerfile ├── nginx │ ├── conf │ │ ├── nginx-nvenc.conf │ │ ├── nginx.conf │ │ ├── nginx_no-ffmpeg.conf │ │ └── nginx_rtmp_minimal_no-stats.conf │ ├── players │ │ ├── dash.html │ │ ├── hls.html │ │ ├── hls_hlsjs.html │ │ ├── rtmp.html │ │ └── rtmp_hls.html │ └── rtmp-alpine.yaml └── python │ └── 3.8-ubuntu20.04.Dockerfile ├── docs ├── afilters.md ├── avfilters.md ├── ffplay.md ├── sources.md └── vfilters.md ├── ffmpeg ├── __init__.py ├── _dag.py ├── _ffmpeg.py ├── _ffplay.py ├── _ffprobe.py ├── _node.py ├── _utils.py ├── constants.py ├── expression │ ├── __init__.py │ └── layout.py ├── filters │ ├── __init__.py │ ├── afilters.py │ ├── avfilters.py │ └── vfilters.py ├── nodes.py ├── settings.py ├── tools │ ├── __init__.py │ ├── atools.py │ ├── avtools.py │ ├── etools.py │ └── vtools.py └── transitions │ ├── __init__.py │ ├── _gltransition.py │ ├── _xfade.py │ └── glsl │ ├── Bounce.glsl │ ├── BowTieHorizontal.glsl │ ├── BowTieVertical.glsl │ ├── BowTieWithParameter.glsl │ ├── ButterflyWaveScrawler.glsl │ ├── CircleCrop.glsl │ ├── ColourDistance.glsl │ ├── CrazyParametricFun.glsl │ ├── CrossZoom.glsl │ ├── Directional.glsl │ ├── DoomScreenTransition.glsl │ ├── Dreamy.glsl │ ├── DreamyZoom.glsl │ ├── FilmBurn.glsl │ ├── GlitchDisplace.glsl │ ├── GlitchMemories.glsl │ ├── GridFlip.glsl │ ├── InvertedPageCurl.glsl │ ├── LeftRight.glsl │ ├── LinearBlur.glsl │ ├── Mosaic.glsl │ ├── PolkaDotsCurtain.glsl │ ├── Radial.glsl │ ├── SimpleZoom.glsl │ ├── StereoViewer.glsl │ ├── Swirl.glsl │ ├── TVStatic.glsl │ ├── TopBottom.glsl │ ├── WaterDrop.glsl │ ├── ZoomInCircles.glsl │ ├── angular.glsl │ ├── burn.glsl │ ├── cannabisleaf.glsl │ ├── circle.glsl │ ├── circleopen.glsl │ ├── colorphase.glsl │ ├── crosshatch.glsl │ ├── crosswarp.glsl │ ├── cube.glsl │ ├── directional-easing.glsl │ ├── directionalwarp.glsl │ ├── directionalwipe.glsl │ ├── displacement.glsl │ ├── doorway.glsl │ ├── fade.glsl │ ├── fadecolor.glsl │ ├── fadegrayscale.glsl │ ├── flyeye.glsl │ ├── heart.glsl │ ├── hexagonalize.glsl │ ├── kaleidoscope.glsl │ ├── luma.glsl │ ├── luminance_melt.glsl │ ├── morph.glsl │ ├── multiply_blend.glsl │ ├── perlin.glsl │ ├── pinwheel.glsl │ ├── pixelize.glsl │ ├── polar_function.glsl │ ├── randomNoisex.glsl │ ├── randomsquares.glsl │ ├── ripple.glsl │ ├── rotate_scale_fade.glsl │ ├── squareswire.glsl │ ├── squeeze.glsl │ ├── swap.glsl │ ├── tangentMotionBlur.glsl │ ├── undulatingBurnOut.glsl │ ├── wind.glsl │ ├── windowblinds.glsl │ ├── windowslice.glsl │ ├── wipeDown.glsl │ ├── wipeLeft.glsl │ ├── wipeRight.glsl │ └── wipeUp.glsl ├── requirements.txt └── setup.py /.dockerignore: -------------------------------------------------------------------------------- 1 | # 自定义文件 2 | upload.sh 3 | w_* 4 | replace 5 | *ttf 6 | fonts 7 | cc 8 | 9 | # IDE 文件 10 | .idea 11 | .vscode 12 | .vscode-test/ 13 | .vscodeignore 14 | 15 | # Go 相关 16 | vendor 17 | go.sum 18 | *.exe 19 | 20 | # Python 相关 21 | venv 22 | __pycache__ 23 | build 24 | dist 25 | *egg-info 26 | 27 | # JavaScript / TypeScript 28 | out 29 | node_modules 30 | *.vsix 31 | *.lock 32 | .yarnrc 33 | 34 | # 日志文件 35 | *.log 36 | logs 37 | 38 | # 存储文件 39 | uploads 40 | storage 41 | *.db 42 | testdata/* 43 | !testdata/*.py 44 | !testdata/*.sh 45 | _gsdata_ 46 | 47 | # Dropbox 48 | *.paper 49 | 50 | .gitignore 51 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # This is a contributed example of how to build ffmpeg-gl-transions using Docker 2 | # If you use Docker, this should get the job done 3 | # if you don't use Docker, you could still run the commands 4 | # manually and get the same result 5 | 6 | # docker build -t rustlekarl/ffmpeg-generator:latest . 7 | FROM rustlekarl/ffmpeg-gltransition:latest 8 | 9 | MAINTAINER rustlekarl "rustlekarl@gmail.com" 10 | 11 | ENV DEBIAN_FRONTEND=noninteractive 12 | 13 | RUN echo "deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\n" >/etc/apt/sources.list 14 | RUN echo "[global]\nindex-url=http://mirrors.aliyun.com/pypi/simple/\n[install]\ntrusted-host=mirrors.aliyun.com" > /etc/pip.conf 15 | 16 | RUN apt-get update \ 17 | && apt-get install -y python3-pip python3-dev \ 18 | && cd /usr/local/bin \ 19 | && ln -s /usr/bin/python3 python \ 20 | && ln -s /usr/bin/pip3 pip \ 21 | && pip3 --no-cache-dir install --upgrade pip 22 | 23 | WORKDIR /root 24 | 25 | COPY requirements.txt . 26 | RUN pip install -r requirements.txt 27 | 28 | WORKDIR /generator 29 | 30 | COPY . /generator 31 | 32 | RUN (cd /generator; python run_examples.py) 33 | 34 | RUN rm -rf /generator/* && rm -rf /var/lib/apt/lists/* && apt-get -y purge 35 | 36 | # Overlay parent's ENTRYPOINT 37 | RUN echo "#!/bin/bash\nXvfb -ac :1 -screen 0 1280x1024x16 > /dev/null 2>&1" > /entrypoint.sh 38 | 39 | RUN chmod +x /entrypoint.sh 40 | 41 | ENTRYPOINT ["/entrypoint.sh"] 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2021 Rustle Karl 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: ; 2 | .SILENT: ; # no need for @ 3 | .ONESHELL: ; # recipes execute in same shell 4 | .NOTPARALLEL: ; # wait for target to finish 5 | .EXPORT_ALL_VARIABLES: ; # send all vars to shell 6 | 7 | VERSION = 1.0.5 8 | PACKAGE = ffmpeg-generator 9 | 10 | # While console windows in Windows 10 do support VT (Virtual Terminal) / ANSI 11 | # escape sequences in principle, support is turned OFF by default. 12 | # Set-ItemProperty HKCU:\Console VirtualTerminalLevel -Type DWORD 1 13 | # reg add HKCU\Console /v VirtualTerminalLevel /t REG_DWORD /d 1 14 | 15 | all: dep 16 | 17 | dep: 18 | pip install twine 19 | pip install -r requirements.txt 20 | 21 | setup: dep 22 | python setup.py sdist 23 | python setup.py bdist_wheel 24 | pip install dist/$(PACKAGE)-$(VERSION).tar.gz 25 | 26 | uninstall: 27 | pip uninstall -y $(PACKAGE) 28 | 29 | upload: setup 30 | twine upload dist/$(PACKAGE)-$(VERSION).tar.gz 31 | 32 | docker-build: 33 | docker build -t rustlekarl/ffmpeg-generator:latest . 34 | 35 | docker-exec: 36 | docker-compose up -d 37 | docker exec -it ffmpeg-generator_ffmpeg_1 bash 38 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.5' 2 | 3 | # docker exec -it ffmpeg-generator_ffmpeg_1 bash 4 | services: 5 | 6 | ffmpeg: 7 | build: . 8 | image: rustlekarl/ffmpeg-generator 9 | restart: unless-stopped 10 | volumes: 11 | - ".:/generator" 12 | 13 | nginx-rtmp: 14 | image: alqutami/rtmp-hls:latest-alpine 15 | restart: unless-stopped 16 | ports: 17 | - "11935:1935" 18 | - "11936:8080" 19 | volumes: 20 | - "./docker/nginx/conf/nginx_no-ffmpeg.conf:/etc/nginx/nginx.conf" 21 | -------------------------------------------------------------------------------- /docker/ffmpeg/cuda.Dockerfile: -------------------------------------------------------------------------------- 1 | # docker build -t rustlekarl/ffmpeg-gpu:latest -t rustlekarl/ffmpeg-gpu:ubuntu20.04-cuda11.2.1 -f cuda.Dockerfile . 2 | # Docker on Windows unsupports cuda. 3 | 4 | FROM nvidia/cuda:11.2.1-base-ubuntu20.04 5 | 6 | MAINTAINER rustlekarl 7 | 8 | RUN echo "deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\n" >/etc/apt/sources.list 9 | 10 | RUN apt update && apt install -y ffmpeg 11 | 12 | WORKDIR /ffmpeg 13 | 14 | ARG NGINX_VERSION=1.18.0 15 | ARG NGINX_RTMP_VERSION=1.2.1 16 | ARG FFMPEG_VERSION=4.3.1 17 | 18 | 19 | ############################## 20 | # Build the NGINX-build image. 21 | FROM ubuntu:18.04 as build-nginx 22 | ARG NGINX_VERSION 23 | ARG NGINX_RTMP_VERSION 24 | 25 | # Build dependencies. 26 | RUN apt update && apt install -y \ 27 | build-essential \ 28 | cmake \ 29 | ca-certificates \ 30 | curl \ 31 | gcc \ 32 | libc-dev \ 33 | make \ 34 | musl-dev \ 35 | openssl \ 36 | libssl-dev \ 37 | libpcre3 \ 38 | libpcre3-dev \ 39 | pkg-config \ 40 | zlib1g-dev \ 41 | wget 42 | 43 | # Get nginx source. 44 | RUN cd /tmp && \ 45 | wget https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz && \ 46 | tar zxf nginx-${NGINX_VERSION}.tar.gz && \ 47 | rm nginx-${NGINX_VERSION}.tar.gz 48 | 49 | # Get nginx-rtmp module. 50 | RUN cd /tmp && \ 51 | wget https://github.com/arut/nginx-rtmp-module/archive/v${NGINX_RTMP_VERSION}.tar.gz && \ 52 | tar zxf v${NGINX_RTMP_VERSION}.tar.gz && rm v${NGINX_RTMP_VERSION}.tar.gz 53 | 54 | # Compile nginx with nginx-rtmp module. 55 | RUN cd /tmp/nginx-${NGINX_VERSION} && \ 56 | ./configure \ 57 | --prefix=/usr/local/nginx \ 58 | --add-module=/tmp/nginx-rtmp-module-${NGINX_RTMP_VERSION} \ 59 | --conf-path=/etc/nginx/nginx.conf \ 60 | --with-threads \ 61 | --with-file-aio \ 62 | --with-http_ssl_module \ 63 | --with-debug \ 64 | --with-cc-opt="-Wimplicit-fallthrough=0" && \ 65 | cd /tmp/nginx-${NGINX_VERSION} && make && make install 66 | 67 | ############################### 68 | # Build the FFmpeg-build image. 69 | FROM nvidia/cuda:11.1-devel-ubuntu20.04 as build-ffmpeg 70 | 71 | ENV DEBIAN_FRONTEND=noninteractive 72 | ARG FFMPEG_VERSION 73 | ARG PREFIX=/usr/local 74 | ARG MAKEFLAGS="-j4" 75 | 76 | # FFmpeg build dependencies. 77 | RUN apt update && apt install -y \ 78 | build-essential \ 79 | coreutils \ 80 | cmake \ 81 | libx264-dev \ 82 | libx265-dev \ 83 | libc6 \ 84 | libc6-dev \ 85 | libfreetype6-dev \ 86 | libfdk-aac-dev \ 87 | libmp3lame-dev \ 88 | libogg-dev \ 89 | libass9 \ 90 | libass-dev \ 91 | libnuma1 \ 92 | libnuma-dev \ 93 | libopus-dev \ 94 | librtmp-dev \ 95 | libvpx-dev \ 96 | libvorbis-dev \ 97 | libwebp-dev \ 98 | libtheora-dev \ 99 | libtool \ 100 | libssl-dev \ 101 | pkg-config \ 102 | wget \ 103 | yasm \ 104 | git 105 | 106 | # Clone and install ffnvcodec 107 | RUN cd /tmp && git clone https://git.videolan.org/git/ffmpeg/nv-codec-headers.git && \ 108 | cd nv-codec-headers && make install 109 | 110 | # Get FFmpeg source. 111 | RUN cd /tmp/ && \ 112 | wget http://ffmpeg.org/releases/ffmpeg-${FFMPEG_VERSION}.tar.gz && \ 113 | tar zxf ffmpeg-${FFMPEG_VERSION}.tar.gz && rm ffmpeg-${FFMPEG_VERSION}.tar.gz 114 | 115 | # Compile ffmpeg. 116 | RUN cd /tmp/ffmpeg-${FFMPEG_VERSION} && \ 117 | ./configure \ 118 | --prefix=${PREFIX} \ 119 | --enable-version3 \ 120 | --enable-gpl \ 121 | --enable-nonfree \ 122 | --enable-small \ 123 | --enable-libfdk-aac \ 124 | --enable-openssl \ 125 | --enable-libnpp \ 126 | --enable-cuda \ 127 | --enable-cuvid \ 128 | --enable-nvenc \ 129 | --enable-libnpp \ 130 | --disable-debug \ 131 | --disable-doc \ 132 | --disable-ffplay \ 133 | --extra-cflags=-I/usr/local/cuda/include \ 134 | --extra-ldflags=-L/usr/local/cuda/lib64 \ 135 | --extra-libs="-lpthread -lm" && \ 136 | make && make install && make distclean 137 | 138 | # Cleanup. 139 | RUN rm -rf /var/cache/* /tmp/* 140 | 141 | ########################## 142 | # Build the release image. 143 | FROM nvidia/cuda:11.1-runtime-ubuntu20.04 144 | LABEL MAINTAINER Alfred Gutierrez 145 | 146 | ENV DEBIAN_FRONTEND=noninteractive 147 | ENV NVIDIA_DRIVER_VERSION=455 148 | ENV NVIDIA_VISIBLE_DEVICES all 149 | ENV NVIDIA_DRIVER_CAPABILITIES compute,video,utility 150 | 151 | # Set default ports. 152 | ENV HTTP_PORT 80 153 | ENV HTTPS_PORT 443 154 | ENV RTMP_PORT 1935 155 | 156 | # Set default options. 157 | ENV SINGLE_STREAM "" 158 | ENV MAX_MUXING_QUEUE_SIZE "" 159 | ENV ANALYZEDURATION "" 160 | 161 | RUN apt update && apt install -y --no-install-recommends \ 162 | ca-certificates \ 163 | curl \ 164 | gettext \ 165 | libpcre3-dev \ 166 | libnvidia-decode-${NVIDIA_DRIVER_VERSION} \ 167 | libnvidia-encode-${NVIDIA_DRIVER_VERSION} \ 168 | libtheora0 \ 169 | openssl \ 170 | rtmpdump 171 | 172 | COPY --from=build-nginx /usr/local/nginx /usr/local/nginx 173 | COPY --from=build-nginx /etc/nginx /etc/nginx 174 | COPY --from=build-ffmpeg /usr/local /usr/local 175 | COPY --from=build-ffmpeg /usr/lib/x86_64-linux-gnu/libfdk-aac.so.1 /usr/lib/x86_64-linux-gnu/libfdk-aac.so.1 176 | 177 | # Add NGINX path, config and static files. 178 | ENV PATH "${PATH}:/usr/local/nginx/sbin" 179 | RUN mkdir -p /opt/data && mkdir /www 180 | ADD nginx-cuda.conf /etc/nginx/nginx.conf.template 181 | ADD entrypoint.cuda.sh /opt/entrypoint.sh 182 | RUN chmod gu+x /opt/entrypoint.sh 183 | ADD static /www/static 184 | 185 | EXPOSE 1935 186 | EXPOSE 80 187 | 188 | CMD /opt/entrypoint.sh 189 | -------------------------------------------------------------------------------- /docker/ffmpeg/gltransition.Dockerfile: -------------------------------------------------------------------------------- 1 | # This is a contributed example of how to build ffmpeg-gl-transions using Docker 2 | # If you use Docker, this should get the job done 3 | # if you don't use Docker, you could still run the commands 4 | # manually and get the same result 5 | 6 | # docker build -t rustlekarl/ffmpeg-gltransition:n4.3.2-20210303 -t rustlekarl/ffmpeg-gltransition:latest -f docker/ffmpeg/gltransition.Dockerfile . 7 | FROM ubuntu:20.04 8 | 9 | MAINTAINER rustlekarl "rustlekarl@gmail.com" 10 | 11 | ENV FFMPEG_VERSION "n4.3.2" 12 | 13 | # everything is relative to /build 14 | WORKDIR /build 15 | 16 | # enable contrib/non-free 17 | RUN echo "deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\n" >/etc/apt/sources.list 18 | 19 | ARG DEBIAN_FRONTEND=noninteractive 20 | 21 | RUN export TZ=Asia/Shanghai 22 | 23 | # update anything needed 24 | RUN apt-get -y update && apt-get -y upgrade 25 | 26 | # need dep 27 | RUN apt-get -y install git \ 28 | apt-utils \ 29 | autoconf \ 30 | automake \ 31 | build-essential \ 32 | cmake \ 33 | g++ \ 34 | gcc \ 35 | git-core \ 36 | libass-dev \ 37 | libfdk-aac-dev \ 38 | libfreetype6-dev \ 39 | libglew-dev \ 40 | libglfw3-dev \ 41 | libglu1-mesa-dev \ 42 | libgnutls28-dev \ 43 | libmp3lame-dev \ 44 | libopus-dev \ 45 | libsdl2-dev \ 46 | libtheora-dev \ 47 | libtool \ 48 | libva-dev \ 49 | libvdpau-dev \ 50 | libvorbis-dev \ 51 | libvpx-dev \ 52 | libx264-dev \ 53 | libx265-dev \ 54 | libxcb-shm0-dev \ 55 | libxcb-xfixes0-dev \ 56 | libxcb1-dev \ 57 | libxvidcore-dev \ 58 | make \ 59 | nasm \ 60 | pkg-config \ 61 | texinfo \ 62 | wget \ 63 | xorg-dev \ 64 | yasm \ 65 | zlib1g-dev \ 66 | gperf \ 67 | libglew2.1 68 | 69 | # get ffmpeg sources 70 | RUN (cd /build; git clone -b "$FFMPEG_VERSION" https://gitee.com/fujiawei/FFmpeg.git ffmpeg) 71 | 72 | # get ffmpeg-gl-transition modifications 73 | # this pulls from the original master for standalone use 74 | # but you could modify to copy from your clone/repository 75 | RUN (cd /build; git clone https://gitee.com/fujiawei/ffmpeg-gl-transition.git; cd ffmpeg-gl-transition; git clone https://gitee.com/fujiawei/gl-transitions.git; cd /build/ffmpeg; git apply /build/ffmpeg-gl-transition/ffmpeg.diff; grep -v "define GL_TRANSITION_USING_EGL" /build/ffmpeg-gl-transition/vf_gltransition.c > /build/ffmpeg/libavfilter/vf_gltransition.c) 76 | 77 | RUN (cd /build; git clone https://gitee.com/fujiawei/libass.git) 78 | 79 | RUN (cd /build; git clone https://gitee.com/fujiawei/mirror.git) 80 | 81 | RUN (cd /build; mv /build/mirror/freetype-2.10.4.tar.xz /build/freetype-2.10.4.tar.xz; tar -xf freetype-2.10.4.tar.xz; cd freetype-2.10.4; ./configure --prefix=/usr --enable-freetype-config --disable-static; make; make install) 82 | 83 | RUN (cd /build; mv /build/mirror/fribidi-1.0.9.tar.xz /build/fribidi-1.0.9.tar.xz; tar -xf fribidi-1.0.9.tar.xz; cd fribidi-1.0.9; ./configure --prefix=/usr; make; make install) 84 | 85 | RUN (cd /build; mv /build/mirror/nasm-2.15.05.tar.xz /build/nasm-2.15.05.tar.xz; tar -xf nasm-2.15.05.tar.xz; cd nasm-2.15.05; ./configure --prefix=/usr; make; make install) 86 | 87 | RUN (cd /build; mv /build/mirror/fontconfig-2.13.1.tar.bz2 /build/fontconfig-2.13.1.tar.bz2; tar -xf fontconfig-2.13.1.tar.bz2; cd fontconfig-2.13.1; rm -f src/fcobjshash.h; ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-docs --docdir=/usr/share/doc/fontconfig-2.13.1; make; make install) 88 | 89 | RUN (cd /build/libass; sh autogen.sh; ./configure --prefix=/usr --disable-static; make; make install) 90 | 91 | ENV PKG_CONFIG_PATH=/usr/local/ass/lib/pkgconfig:$PKG_CONFIG_PATH 92 | 93 | RUN (cd /build; git clone --depth=1 https://gitee.com/fujiawei/SVT-AV1; cd SVT-AV1; cd Build; cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Release; make install) 94 | 95 | # RUN (cd /build; git clone https://gitee.com/fujiawei/x264.git; cd x264; ./configure --prefix=/usr --enable-static --enable-shared; make && make install) 96 | 97 | # configure/compile/install ffmpeg 98 | RUN (cd /build/ffmpeg; ./configure --enable-gnutls --enable-gpl --enable-libass --enable-libfdk-aac --enable-libfreetype --enable-libmp3lame --enable-libopus --enable-libtheora --enable-libvorbis --enable-libvpx --enable-libx264 --enable-libx265 --enable-libxvid --enable-nonfree --enable-opengl --enable-filter=gltransition --extra-libs='-lGLEW -lglfw -ldl') 99 | 100 | # the -j speeds up compilation, but if your container host is limited on resources, you may need to remove it to force a non-parallel build to avoid memory usage issues 101 | RUN (cd /build/ffmpeg; make -j && make install) 102 | 103 | # needed for running it 104 | RUN apt-get -y install xvfb 105 | 106 | # try the demo 107 | RUN (cd ffmpeg-gl-transition; ln -s /usr/local/bin/ffmpeg .) 108 | RUN (cd ffmpeg-gl-transition; xvfb-run --auto-servernum -s '+iglx -screen 0 1920x1080x24' bash concat.sh) 109 | # result would be in out.mp4 in that directory 110 | 111 | #COPY testdata /build/testdata 112 | # 113 | #RUN (cd /build/testdata; ln -s /usr/local/bin/ffmpeg .) 114 | #RUN (cd /build/testdata; bash test_drawtext.sh; bash test_libx264.sh) 115 | #RUN (cd /build/testdata; xvfb-run --auto-servernum -s '+iglx -screen 0 1920x1080x24' bash test_gltransition.sh) 116 | 117 | RUN rm -rf /build 118 | RUN rm -rf /var/lib/apt/lists/* && apt-get -y purge 119 | 120 | WORKDIR /root 121 | 122 | # drop you into a shell to look around 123 | # modify as needed for actual use 124 | RUN echo "#!/bin/bash\nnohup Xvfb -ac :1 -screen 0 1280x1024x16 > /dev/null 2>&1 &\n/bin/bash" > /entrypoint.sh 125 | 126 | RUN chmod +x /entrypoint.sh 127 | 128 | ENV DISPLAY=:1 129 | 130 | ENTRYPOINT ["/entrypoint.sh"] 131 | -------------------------------------------------------------------------------- /docker/ffmpeg/golang.Dockerfile: -------------------------------------------------------------------------------- 1 | # docker build -t rustlekarl/ffmpeg-golang:latest -t rustlekarl/ffmpeg-golang:ubuntu-focal -f golang.Dockerfile . 2 | 3 | FROM lsiobase/ffmpeg:bin as binstage 4 | FROM lsiobase/ubuntu:focal 5 | 6 | MAINTAINER rustlekarl 7 | 8 | # Add files from binstage 9 | COPY --from=binstage / / 10 | 11 | ARG DEBIAN_FRONTEND=noninteractive 12 | 13 | # hardware env 14 | ENV \ 15 | LIBVA_DRIVERS_PATH="/usr/lib/x86_64-linux-gnu/dri" \ 16 | NVIDIA_DRIVER_CAPABILITIES="compute,video,utility" \ 17 | NVIDIA_VISIBLE_DEVICES="all" 18 | 19 | ENV TZ="Asia/Shanghai" 20 | 21 | RUN echo "deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\n" >/etc/apt/sources.list 22 | 23 | # update anything needed 24 | RUN apt-get -y update && apt-get -y upgrade 25 | 26 | # need dep 27 | RUN \ 28 | echo "**** install runtime ****" && \ 29 | apt-get install -y \ 30 | i965-va-driver \ 31 | libexpat1 \ 32 | libgl1-mesa-dri \ 33 | libglib2.0-0 \ 34 | libgomp1 \ 35 | libharfbuzz0b \ 36 | libv4l-0 \ 37 | libx11-6 \ 38 | libxcb1 \ 39 | libxext6 \ 40 | libxml2 41 | 42 | # golang 43 | RUN apt-get -y install golang-go make 44 | 45 | RUN go env -w GOPROXY=https://goproxy.cn,direct && go env -w GOSUMDB=off && go env -w GO111MODULE=on 46 | 47 | RUN \ 48 | echo "**** clean up ****" && \ 49 | rm -rf \ 50 | /var/lib/apt/lists/* \ 51 | /var/tmp/* 52 | 53 | # Set up project directory 54 | WORKDIR "/ffmpeg" 55 | 56 | CMD /bin/bash 57 | -------------------------------------------------------------------------------- /docker/nginx/conf/nginx-nvenc.conf: -------------------------------------------------------------------------------- 1 | worker_processes auto; 2 | #error_log logs/error.log; 3 | 4 | events { 5 | worker_connections 1024; 6 | } 7 | 8 | # RTMP configuration 9 | rtmp { 10 | server { 11 | listen 1935; # Listen on standard RTMP port 12 | chunk_size 4000; 13 | # ping 30s; 14 | # notify_method get; 15 | 16 | # This application is to accept incoming stream 17 | application live { 18 | live on; # Allows live input 19 | 20 | # for each received stream, transcode for adaptive streaming 21 | # This single ffmpeg command takes the input and transforms 22 | # the source into 4 different streams with different bitrates 23 | # and qualities. # these settings respect the aspect ratio. 24 | exec_push /app/ffmpeg/bin/ffmpeg -async 1 -vsync -1 -hwaccel cuvid -c:v h264_cuvid -i rtmp://localhost:1935/$app/$name 25 | -c:v h264_nvenc -c:a aac -b:v 256k -b:a 64k -vf "scale_npp=480:trunc(ow/a/2)*2" -zerolatency 1 -f flv rtmp://localhost:1935/show/$name_low 26 | -c:v h264_nvenc -c:a aac -b:v 768k -b:a 128k -vf "scale_npp=720:trunc(ow/a/2)*2" -zerolatency 1 -f flv rtmp://localhost:1935/show/$name_mid 27 | -c:v h264_nvenc -c:a aac -b:v 1024k -b:a 128k -vf "scale_npp=960:trunc(ow/a/2)*2" -zerolatency 1 -f flv rtmp://localhost:1935/show/$name_high 28 | -c:v h264_nvenc -c:a aac -b:v 1920k -b:a 128k -vf "scale_npp=1280:trunc(ow/a/2)*2" -zerolatency 1 -f flv rtmp://localhost:1935/show/$name_hd720 29 | -c copy -f flv rtmp://localhost:1935/show/$name_src; 30 | } 31 | 32 | # This is the HLS application 33 | application show { 34 | live on; # Allows live input from above application 35 | deny play all; # disable consuming the stream from nginx as rtmp 36 | 37 | hls on; # Enable HTTP Live Streaming 38 | hls_fragment 3; 39 | hls_playlist_length 20; 40 | hls_path /mnt/hls/; # hls fragments path 41 | # Instruct clients to adjust resolution according to bandwidth 42 | hls_variant _src BANDWIDTH=4096000; # Source bitrate, source resolution 43 | hls_variant _hd720 BANDWIDTH=2048000; # High bitrate, HD 720p resolution 44 | hls_variant _high BANDWIDTH=1152000; # High bitrate, higher-than-SD resolution 45 | hls_variant _mid BANDWIDTH=448000; # Medium bitrate, SD resolution 46 | hls_variant _low BANDWIDTH=288000; # Low bitrate, sub-SD resolution 47 | 48 | # MPEG-DASH 49 | dash on; 50 | dash_path /mnt/dash/; # dash fragments path 51 | dash_fragment 3; 52 | dash_playlist_length 20; 53 | } 54 | } 55 | } 56 | 57 | 58 | http { 59 | sendfile off; 60 | tcp_nopush on; 61 | directio 512; 62 | # aio on; 63 | 64 | # HTTP server required to serve the player and HLS fragments 65 | server { 66 | listen 8080; 67 | 68 | # Serve HLS fragments 69 | location /hls { 70 | types { 71 | application/vnd.apple.mpegurl m3u8; 72 | video/mp2t ts; 73 | } 74 | 75 | root /mnt; 76 | 77 | add_header Cache-Control no-cache; # Disable cache 78 | 79 | # CORS setup 80 | add_header 'Access-Control-Allow-Origin' '*' always; 81 | add_header 'Access-Control-Expose-Headers' 'Content-Length'; 82 | 83 | # allow CORS preflight requests 84 | if ($request_method = 'OPTIONS') { 85 | add_header 'Access-Control-Allow-Origin' '*'; 86 | add_header 'Access-Control-Max-Age' 1728000; 87 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 88 | add_header 'Content-Length' 0; 89 | return 204; 90 | } 91 | } 92 | 93 | # Serve DASH fragments 94 | location /dash { 95 | types { 96 | application/dash+xml mpd; 97 | video/mp4 mp4; 98 | } 99 | 100 | root /mnt; 101 | 102 | add_header Cache-Control no-cache; # Disable cache 103 | 104 | 105 | # CORS setup 106 | add_header 'Access-Control-Allow-Origin' '*' always; 107 | add_header 'Access-Control-Expose-Headers' 'Content-Length'; 108 | 109 | # Allow CORS preflight requests 110 | if ($request_method = 'OPTIONS') { 111 | add_header 'Access-Control-Allow-Origin' '*'; 112 | add_header 'Access-Control-Max-Age' 1728000; 113 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 114 | add_header 'Content-Length' 0; 115 | return 204; 116 | } 117 | } 118 | 119 | # This URL provides RTMP statistics in XML 120 | location /stat { 121 | rtmp_stat all; 122 | rtmp_stat_stylesheet stat.xsl; # Use stat.xsl stylesheet 123 | } 124 | 125 | location /stat.xsl { 126 | # XML stylesheet to view RTMP stats. 127 | root /app/nginx/html; 128 | } 129 | 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /docker/nginx/conf/nginx.conf: -------------------------------------------------------------------------------- 1 | worker_processes auto; 2 | #error_log logs/error.log; 3 | 4 | events { 5 | worker_connections 1024; 6 | } 7 | 8 | # RTMP configuration 9 | rtmp { 10 | server { 11 | listen 1935; # Listen on standard RTMP port 12 | chunk_size 4000; 13 | # ping 30s; 14 | # notify_method get; 15 | 16 | # This application is to accept incoming stream 17 | application live { 18 | live on; # Allows live input 19 | 20 | # for each received stream, transcode for adaptive streaming 21 | # This single ffmpeg command takes the input and transforms 22 | # the source into 4 different streams with different bitrates 23 | # and qualities. # these settings respect the aspect ratio. 24 | exec_push /usr/local/bin/ffmpeg -i rtmp://localhost:1935/$app/$name -async 1 -vsync -1 25 | -c:v libx264 -c:a aac -b:v 256k -b:a 64k -vf "scale=480:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_low 26 | -c:v libx264 -c:a aac -b:v 768k -b:a 128k -vf "scale=720:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_mid 27 | -c:v libx264 -c:a aac -b:v 1024k -b:a 128k -vf "scale=960:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_high 28 | -c:v libx264 -c:a aac -b:v 1920k -b:a 128k -vf "scale=1280:trunc(ow/a/2)*2" -tune zerolatency -preset superfast -crf 23 -f flv rtmp://localhost:1935/show/$name_hd720 29 | -c copy -f flv rtmp://localhost:1935/show/$name_src; 30 | } 31 | 32 | # This is the HLS application 33 | application show { 34 | live on; # Allows live input from above application 35 | deny play all; # disable consuming the stream from nginx as rtmp 36 | 37 | hls on; # Enable HTTP Live Streaming 38 | hls_fragment 3; 39 | hls_playlist_length 20; 40 | hls_path /mnt/hls/; # hls fragments path 41 | # Instruct clients to adjust resolution according to bandwidth 42 | hls_variant _src BANDWIDTH=4096000; # Source bitrate, source resolution 43 | hls_variant _hd720 BANDWIDTH=2048000; # High bitrate, HD 720p resolution 44 | hls_variant _high BANDWIDTH=1152000; # High bitrate, higher-than-SD resolution 45 | hls_variant _mid BANDWIDTH=448000; # Medium bitrate, SD resolution 46 | hls_variant _low BANDWIDTH=288000; # Low bitrate, sub-SD resolution 47 | 48 | # MPEG-DASH 49 | dash on; 50 | dash_path /mnt/dash/; # dash fragments path 51 | dash_fragment 3; 52 | dash_playlist_length 20; 53 | } 54 | } 55 | } 56 | 57 | 58 | http { 59 | sendfile off; 60 | tcp_nopush on; 61 | directio 512; 62 | # aio on; 63 | 64 | # HTTP server required to serve the player and HLS fragments 65 | server { 66 | listen 8080; 67 | 68 | # Serve HLS fragments 69 | location /hls { 70 | types { 71 | application/vnd.apple.mpegurl m3u8; 72 | video/mp2t ts; 73 | } 74 | 75 | root /mnt; 76 | 77 | add_header Cache-Control no-cache; # Disable cache 78 | 79 | # CORS setup 80 | add_header 'Access-Control-Allow-Origin' '*' always; 81 | add_header 'Access-Control-Expose-Headers' 'Content-Length'; 82 | 83 | # allow CORS preflight requests 84 | if ($request_method = 'OPTIONS') { 85 | add_header 'Access-Control-Allow-Origin' '*'; 86 | add_header 'Access-Control-Max-Age' 1728000; 87 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 88 | add_header 'Content-Length' 0; 89 | return 204; 90 | } 91 | } 92 | 93 | # Serve DASH fragments 94 | location /dash { 95 | types { 96 | application/dash+xml mpd; 97 | video/mp4 mp4; 98 | } 99 | 100 | root /mnt; 101 | 102 | add_header Cache-Control no-cache; # Disable cache 103 | 104 | 105 | # CORS setup 106 | add_header 'Access-Control-Allow-Origin' '*' always; 107 | add_header 'Access-Control-Expose-Headers' 'Content-Length'; 108 | 109 | # Allow CORS preflight requests 110 | if ($request_method = 'OPTIONS') { 111 | add_header 'Access-Control-Allow-Origin' '*'; 112 | add_header 'Access-Control-Max-Age' 1728000; 113 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 114 | add_header 'Content-Length' 0; 115 | return 204; 116 | } 117 | } 118 | 119 | # This URL provides RTMP statistics in XML 120 | location /stat { 121 | rtmp_stat all; 122 | rtmp_stat_stylesheet stat.xsl; # Use stat.xsl stylesheet 123 | } 124 | 125 | location /stat.xsl { 126 | # XML stylesheet to view RTMP stats. 127 | root /usr/local/nginx/html; 128 | } 129 | 130 | } 131 | } -------------------------------------------------------------------------------- /docker/nginx/conf/nginx_no-ffmpeg.conf: -------------------------------------------------------------------------------- 1 | worker_processes auto; 2 | #error_log logs/error.log; 3 | 4 | events { 5 | worker_connections 1024; 6 | } 7 | 8 | # RTMP configuration 9 | rtmp { 10 | server { 11 | listen 1935; # Listen on standard RTMP port 12 | chunk_size 4000; 13 | # ping 30s; 14 | # notify_method get; 15 | 16 | # This application is to accept incoming stream 17 | application live { 18 | live on; # Allows live input 19 | push rtmp://localhost:1935/show; 20 | } 21 | 22 | # This is the HLS application 23 | application show { 24 | live on; # Allows live input from above application 25 | deny play all; # disable consuming the stream from nginx as rtmp 26 | 27 | hls on; # Enable HTTP Live Streaming 28 | hls_fragment 3; 29 | hls_playlist_length 10; 30 | hls_path /mnt/hls/; # hls fragments path 31 | 32 | # MPEG-DASH 33 | dash on; 34 | dash_path /mnt/dash/; # dash fragments path 35 | dash_fragment 3; 36 | dash_playlist_length 10; 37 | } 38 | } 39 | } 40 | 41 | 42 | http { 43 | sendfile off; 44 | tcp_nopush on; 45 | directio 512; 46 | # aio on; 47 | 48 | # HTTP server required to serve the player and HLS fragments 49 | server { 50 | listen 8080; 51 | 52 | # Serve HLS fragments 53 | location /hls { 54 | types { 55 | application/vnd.apple.mpegurl m3u8; 56 | video/mp2t ts; 57 | } 58 | 59 | root /mnt; 60 | 61 | add_header Cache-Control no-cache; # Disable cache 62 | 63 | # CORS setup 64 | add_header 'Access-Control-Allow-Origin' '*' always; 65 | add_header 'Access-Control-Expose-Headers' 'Content-Length'; 66 | 67 | # allow CORS preflight requests 68 | if ($request_method = 'OPTIONS') { 69 | add_header 'Access-Control-Allow-Origin' '*'; 70 | add_header 'Access-Control-Max-Age' 1728000; 71 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 72 | add_header 'Content-Length' 0; 73 | return 204; 74 | } 75 | } 76 | 77 | # Serve DASH fragments 78 | location /dash { 79 | types { 80 | application/dash+xml mpd; 81 | video/mp4 mp4; 82 | } 83 | 84 | root /mnt; 85 | 86 | add_header Cache-Control no-cache; # Disable cache 87 | 88 | 89 | # CORS setup 90 | add_header 'Access-Control-Allow-Origin' '*' always; 91 | add_header 'Access-Control-Expose-Headers' 'Content-Length'; 92 | 93 | # Allow CORS preflight requests 94 | if ($request_method = 'OPTIONS') { 95 | add_header 'Access-Control-Allow-Origin' '*'; 96 | add_header 'Access-Control-Max-Age' 1728000; 97 | add_header 'Content-Type' 'text/plain charset=UTF-8'; 98 | add_header 'Content-Length' 0; 99 | return 204; 100 | } 101 | } 102 | 103 | # This URL provides RTMP statistics in XML 104 | location /stat { 105 | rtmp_stat all; 106 | rtmp_stat_stylesheet stat.xsl; # Use stat.xsl stylesheet 107 | } 108 | 109 | location /stat.xsl { 110 | # XML stylesheet to view RTMP stats. 111 | root /usr/local/nginx/html; 112 | } 113 | 114 | } 115 | } -------------------------------------------------------------------------------- /docker/nginx/conf/nginx_rtmp_minimal_no-stats.conf: -------------------------------------------------------------------------------- 1 | worker_processes auto; 2 | rtmp_auto_push on; 3 | events {} 4 | rtmp { 5 | server { 6 | listen 1935; 7 | listen [::]:1935; 8 | 9 | application live { 10 | live on; 11 | record off; 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /docker/nginx/players/dash.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | DASH Live Streaming 6 | 7 | 8 | 9 | 10 |

DASH Player

11 | 14 | 17 | 18 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /docker/nginx/players/hls.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | HLS Live Streaming 6 | 7 | 8 | 9 | 10 |

HLS Player

11 | 14 | 17 | 18 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /docker/nginx/players/hls_hlsjs.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | HLS streaming 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 |

HLS Player (using hls.js)

17 | 18 |
19 |
20 | 21 |
22 |
23 | 24 | 35 | 36 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /docker/nginx/players/rtmp.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | RTMP Live Streaming 6 | Live Streaming 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |

RTMP Player

15 | 19 | 20 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /docker/nginx/players/rtmp_hls.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Live Streaming 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |

RTMP Player

15 | 19 | 20 |

HLS Player

21 | 25 | 26 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /docker/nginx/rtmp-alpine.yaml: -------------------------------------------------------------------------------- 1 | # docker run -d -p 1935:1935 -p 8080:8080 -v custom.conf:/etc/nginx/nginx.conf alqutami/rtmp-hls:latest-alpine 2 | 3 | # docker-compose -f rtmp-alpine.yaml up 4 | version: "3.5" 5 | 6 | services: 7 | nginx-rtmp: 8 | image: alqutami/rtmp-hls:latest-alpine 9 | restart: always 10 | ports: 11 | - "1935:1935" 12 | - "8080:8080" 13 | volumes: 14 | - "./conf/nginx_no-ffmpeg.conf:/etc/nginx/nginx.conf" 15 | -------------------------------------------------------------------------------- /docker/python/3.8-ubuntu20.04.Dockerfile: -------------------------------------------------------------------------------- 1 | # Docker file for a slim Ubuntu:20.04-based Python3.8 image 2 | # docker build -t rustlekarl/python:3.8-ubuntu20.04 -t rustlekarl/python:latest -f docker/python/3.8-ubuntu20.04.Dockerfile . 3 | FROM ubuntu:20.04 4 | 5 | MAINTAINER rustlekarl "rustlekarl@gmail.com" 6 | 7 | ENV DEBIAN_FRONTEND=noninteractive 8 | 9 | RUN echo "deb http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-security main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-updates main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-proposed main restricted universe multiverse\ndeb http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\ndeb-src http://mirrors.aliyun.com/ubuntu/ focal-backports main restricted universe multiverse\n" >/etc/apt/sources.list 10 | RUN echo "[global]\nindex-url=http://mirrors.aliyun.com/pypi/simple/\n[install]\ntrusted-host=mirrors.aliyun.com" > /etc/pip.conf 11 | 12 | RUN apt-get update \ 13 | && apt-get install -y python3-pip python3-dev \ 14 | && cd /usr/local/bin \ 15 | && ln -s /usr/bin/python3 python \ 16 | && ln -s /usr/bin/pip3 pip \ 17 | && pip3 --no-cache-dir install --upgrade pip 18 | 19 | RUN rm -rf /var/lib/apt/lists/* && apt-get -y purge 20 | 21 | # drop you into a shell to look around 22 | # modify as needed for actual use 23 | RUN echo "#!/bin/bash\n/bin/bash" > /entrypoint.sh 24 | 25 | RUN chmod +x /entrypoint.sh 26 | 27 | ENTRYPOINT ["/entrypoint.sh"] 28 | -------------------------------------------------------------------------------- /docs/ffplay.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | # FFplay 常用命令 9 | 10 | ## 播放控制 11 | 12 | | 选项 | 说明 | 13 | | ------------ | ------------ | 14 | | q,ESC | 退出播放 | 15 | | f | 全屏切换 | 16 | | p,SPC | 暂停 | 17 | | m | 静音播放 | 18 | | 9,0 | 9 减少音量,0 增加音量 | 19 | | a | 循环切换音频流 | 20 | | v | 循环切换视频流 | 21 | | t | 循环切换字幕流 | 22 | | c | 循环切换节目 | 23 | | w | 循环切换过滤器或显示模式 | 24 | | s | 逐帧播放 | 25 | | left/right | 向后/向前拖动 10 秒 | 26 | | down/up | 向后/向前拖动 1 分钟 | 27 | | 鼠标右键单击 | 拖动与显示宽度对应百分比的文件进行播放 | 28 | | 鼠标左键双击 | 全屏切换 | 29 | 30 | ## 命令选项 31 | 32 | | 主要选项 | 说明 | 33 | | -------------- | -------------- | 34 | | -x | 强制显示宽带 | 35 | | -y height | 强制显示高度 | 36 | | -video_size | 帧尺寸设置显示帧存储(WxH 格式),仅适用于类似原始 YUV 等没有包含帧大小(WxH)的视频,如果设备不支持该分辨率则报错 | 37 | | -pixel_format | 格式设置像素格式 | 38 | | -fs | 以全屏模式启动 | 39 | | -an | 禁止音频(不播放声音) | 40 | | -vn | 禁止视频(不播放视频) | 41 | | -sn | 禁用字幕(不显示字幕) | 42 | | -ss pos | 根据设置的秒进行定位拖动 | 43 | | -t duration | 设置播放视频/音频长度 | 44 | | -bytes | 按字节进行定位拖动(0=off 1=on -1=auto) | 45 | | -seek_interval | 自定义左/右键定位拖动间隔(以秒为单位),默认10s | 46 | | -nodisp | 关闭图形化显示窗口,视频将不显示 | 47 | | -noborder | 无边框窗口 | 48 | | -volume | 设置起始音量,range[0,100] | 49 | | -f | 强制使用设置的格式进行解析,比如 `-f s16le` | 50 | | -window_title | 设置窗口标题(默认为输入文件名) | 51 | | -loop | 设置播放循环次数 | 52 | | -showmode | 设置显示模式,0 显示视频,1 显示音频波形,2 显示音频频谱,缺省值为 0,如果视频不存在则自动选择 2 | 53 | | -vf | 设置视频滤镜 | 54 | | -af | 设置音频滤镜 | 55 | 56 | ## 高级选项 57 | 58 | | 选项 | 说明 | 59 | | -------------- | -------------- | 60 | | -stats | 打印多个回放统计信息。包括显示流持续时间,编解码器参数,流中的当前位置,以及音频/视频同步差值。缺省值是自动开启,显示禁用指定-stats | 61 | | -fast | 非标准化规范的多媒体兼容优化 | 62 | | -genpts | 生产pts | 63 | | -sync | 同步类型,将主时钟设置为audio,video或external,默认是audio | 64 | | -ast | audio_stream_specifier 指定音频流索引,比如-ast 3,播放流索引为3的音频流 | 65 | | -vst | video_stream_specifier 指定视频流索引 | 66 | | -sst | subtitle_stream_specifier 指定字幕流索引 | 67 | | -autoexit | 视频播放完毕后退出 | 68 | | -exitonkeydown | 键盘按下任何键退出播放 | 69 | | -exitonmousedown | 鼠标按下任何键退出播放 | 70 | | -codec:media_specifier | 强制使用设置的多媒体解码器,a(音频),v(视频)和s(字幕),如 -codec:v h264_qsv | 71 | | -acodec | 强制使用设置的音频解码器进行音频解码 | 72 | | -vcodec | 强制使用设置的视频解码器进行视频解码 | 73 | | -scodec | 强制使用设置的字幕解码器进行字幕解码 | 74 | | -autorotate | 根据文件元数据自动旋转视频。值为0或1,默认为1 | 75 | | -framedrop | 如果视频不同步则丢弃视频帧,当主时钟非视频时钟时默认开启,若需禁用使用选项-noframedrop | 76 | | -inbuf | 不限制输入缓冲区大小,尽可能地从输入中读取尽可能多的数据。 | 77 | 78 | ## 过滤器 79 | 80 | > 似乎不支持复杂滤镜 81 | 82 | | 例子 | 命令 | 83 | | -------------- | -------------- | 84 | | 视频旋转 | ffplay -i test.mp4 -vf transpose=1 | 85 | | 视频反转 | ffplay test.mp4 -vf hflip, ffplay test.mp4 -vf vflip | 86 | | 视频旋转和反转 | ffplay test.mp4 -vf hflip,transpose=1 | 87 | | 音频变速播放 | ffplay -i test.mp4 -af atempo=2 | 88 | | 视频变速播放 | ffplay -i test.pm4 -vf setpts=PTS/2 | 89 | | 音视频同时变速播放 | ffplay -i test.mp4 -vf setpts=PTS/2 -af atempo=2 | 90 | 91 | ## 代码示例 92 | 93 | ```python 94 | from ffmpeg import ffplay_video 95 | from tests import data 96 | 97 | ffplay_video(data.V1, vf='transpose=1') 98 | ffplay_video(data.V1, vf='hflip') 99 | ffplay_video(data.V1, af='atempo=2') 100 | ffplay_video(data.V1, vf='setpts=PTS/2') 101 | ffplay_video(data.V1, vf='transpose=1,setpts=PTS/2', af='atempo=2') 102 | ``` 103 | 104 | ## 命令行示例 105 | 106 | ### 播放一个音频文件 107 | 108 | ```shell 109 | ffplay audio.aac 110 | ``` 111 | 112 | 这时候会弹出一个窗口,一边播放音频文件,一边将播放声音的语谱图画到该窗口上。针对该窗口的操作如下,点击窗口的任意一个位置,ffplay 会按照点击的位置计算出时间的进度,然后跳到这个时间点上继续播放;按下键盘上的右键会默认快进 10s,左键默认后退 10s,上键默认快进 1min,下键默认后退 1min;按 ESC 键就是退出播放进程;如果按 w 键则将绘制音频的波形图等。 113 | 114 | ### 播放一个视频文件 115 | 116 | ```shell 117 | ffplay video.mp4 118 | ``` 119 | 120 | 这时候会直接在新弹出的窗口上播放该视频,如果想要同时播放多个文件,那么只需要在多个命令行下同时执行 ffplay 就可以了,按 s 键则可以进入 frame-step 模式,即按 s 键一次就会播放下一帧图像。 121 | 122 | ### 从第 30 秒开始播放 10 秒 123 | 124 | ```shell 125 | # 从第 30 秒开始播放 10 秒 126 | ffplay -ss 30 -t 10 long.mp4 127 | ``` 128 | 129 | ### 循环播放 130 | 131 | ```shell 132 | ffplay video.mp4 -loop 10 133 | ``` 134 | 135 | ### 播放视频中的第一路音频流 136 | 137 | ```shell 138 | ffplay video.mkv -ast 1 139 | ``` 140 | 141 | ### 表示播放视频中的第一路视频流 142 | 143 | ```shell 144 | ffplay video.mkv -vst 1 145 | ``` 146 | 147 | ## 播放裸数据 148 | 149 | ### 播放 PCM 格式的音频 150 | 151 | ```shell 152 | ffplay song.pcm -f s16le -channels 2 -ar 4 153 | ``` 154 | 155 | 格式(-f)、声道数(-channels)、采样率(-ar)必须设置正确。其中任何一项参数设置不正确,都不会得到正常的播放结果。 156 | 157 | WAV 格式的文件称为无压缩的格式,其实就是在 PCM 的头部添加 44 个字节,用于标识这个 PCM 的采样表示格式、声道数、采样率等信息,对于 WAV 格式音频文件,ffplay 可以直接播放,但是若让 ffplay 播放 PCM 裸数据的话,只要为其提供上述三个主要的信息,那么它就可以正确地播放了。 158 | 159 | ### 播放 YUV420P 格式的视频帧 160 | 161 | ```shell 162 | ffplay -f rawvideo -pixel_format yuv420p -s 480*480 texture.yuv 163 | ``` 164 | 165 | 格式(-f rawvideo代表原始格式)、表示格式(-pixel_format yuv420p)、宽高(-s 480*480)。 166 | 167 | ### 播放 RGB 的原始数据 168 | 169 | ```shell 170 | ffplay -f rawvideo -pixel_format rgb24 -s 480*480 texture.rgb 171 | ``` 172 | 173 | ## 音画同步 174 | 175 | FFplay 中音画同步的实现方式其实有三种:以音频为主时间轴作为同步源;以视频为主时间轴作为同步源;以外部时钟为主时间轴作为同步源。在 ffplay 中默认的对齐方式也是以音频为基准进行对齐的。 176 | 177 | 播放器接收到的视频帧或者音频帧,内部都会有时间戳(PTS 时钟)来标识它实际应该在什么时刻进行展示。实际的对齐策略如下:比较视频当前的播放时间和音频当前的播放时间,如果视频播放过快,则通过加大延迟或者重复播放来降低视频播放速度;如果视频播放慢了,则通过减小延迟或者丢帧来追赶音频播放的时间点。关键就在于音视频时间的比较以及延迟的计算,在比较的过程中会设置一个阈值(Threshold),若超过预设的阈值就应该做调整(丢帧渲染或者重复渲染)。 178 | 179 | ### 指定对齐方式 180 | 181 | ```shell 182 | # 以音频为主时间轴 183 | ffplay 32037.mp4 -sync audio 184 | 185 | # 以视频为主时间轴 186 | ffplay 32037.mp4 -sync video 187 | 188 | # 以外部时钟为主时间轴 189 | ffplay 32037.mp4 -sync ext 190 | ``` 191 | -------------------------------------------------------------------------------- /docs/sources.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | 音频源/视频源是作为输入的参数,不能当成滤镜处理。 9 | 10 | ## buffer 11 | 12 | > https://ffmpeg.org/ffmpeg-filters.html#buffer 13 | 14 | 缓冲视频帧,其可以作为滤镜链图的环节。 15 | 16 | ### 参数 17 | 18 | - video_size 指定视频尺寸 19 | - width 输入视频宽 20 | - height 输入视频高 21 | - pix_fmt 像素格式描述,可以是一个表征像素格式的号码或者名称 22 | - time_base 指定时间戳时基 23 | - frame_rate 指定帧率 24 | - pixel_aspect, sar 输入视频的像素长宽比 25 | - sws_param 指定一个可选参数用于在自动检测到输入视频大小或者格式变化时插入放缩滤镜。 26 | - hw_frames_ctx 当使用硬件像素格式时,这应该是对描述输入帧的 AVHWFramesContext 的引用。 27 | 28 | ### 示例 29 | 30 | 不懂,不知道有什么用,略。 31 | 32 | ## cellauto 33 | 34 | > https://ffmpeg.org/ffmpeg-filters.html#cellauto 35 | 36 | 创建由基本元胞自动机生成的模式。 37 | 38 | 细胞自动发送器的初始状态可以通过 `filename` 选项和 `pattern` 选项的模式来定义,如果不知道则是随机初始状态。每个新帧中的一个新行视频充满了下一代细胞自动发生器的结果。当 `scroll` 选项被指定时,整个帧会被滚动填充。 39 | 40 | ### 参数 41 | 42 | - filename, f 用于读取细胞自动发生器初始状态的文件。在文件中第一行从行首开始每个非空字符被认为是活的细胞直到换行,更多行则被忽略 43 | - pattern, p 用于定义细胞自动发生器初始状态,从指定字符串开始作为起始行每个非空字符作为一个细胞直到换行(或者字符串结束),更多的行被忽略 44 | - rate, r 设置视频帧率,默认 25 45 | - random_fill_ratio, ratio 设置初始随机填充率,是浮点数,范围 0-1,默认 `1/PHI` 此选项在指定了初始文件或模式时被忽略 46 | - random_seed, seed 设置随机填充初始种子,必须是整数,范围 0-`UINT32_MAX`。不指定或显式指定为 -1,将尝试使用一个更好的随机种子 47 | - rule 设置细胞自动发生规则,是 0-255 间数,默认 110 48 | - size, s 设置输出视频尺寸,如果尺寸被设置,其宽必须匹配 `pattern` 字符串中最大行。如果 `filename` 和 `pattern` 都没有指定,则默认为 `320x518` 49 | - scroll 如果为 1,向上滚出已经填满的行。如果为 0,到最后一行后,新行将覆盖第一行,默认为 1 50 | - start_full, full 如果设置为 1,则需要完全填满后才输出第一帧,这时默认行为,设置为 0 则禁用 51 | - stitch 如果设置为 1,左和右连接在一起,这是默认行为,为 0 则禁用 52 | 53 | ### 示例 54 | 55 | ```python 56 | 57 | ``` 58 | 59 | ``` 60 | 61 | ``` 62 | 63 | ## coreimagesrc 64 | 65 | > https://ffmpeg.org/ffmpeg-filters.html#coreimagesrc 66 | 67 | ### 参数 68 | 69 | 70 | 71 | ### 示例 72 | 73 | - 从`pattern`读取初始化,输出为 200x400.cellauto=f=pattern:s=200x400 74 | - 随机化输出初始化,宽200个细胞,填充率为2/3:cellauto=ratio=2/3:s=200x200 75 | - 以规则18创建一个由单细胞开始,初始化宽度为100的源:cellauto=p=@:s=100x400:full=0:rule=18 76 | - 指定一个详细的初始模式:cellauto=p='@@ @ @@':s=100x400:full=0:rule=18 77 | 78 | ```python 79 | 80 | ``` 81 | 82 | ``` 83 | 84 | ``` 85 | 86 | ## gradients 87 | 88 | > https://ffmpeg.org/ffmpeg-filters.html#gradients 89 | 90 | ### 参数 91 | 92 | 93 | ### 示例 94 | 95 | ```python 96 | 97 | ``` 98 | 99 | ``` 100 | 101 | ``` 102 | 103 | ## mandelbrot 104 | 105 | > https://ffmpeg.org/ffmpeg-filters.html#mandelbrot 106 | 107 | 生成一个曼德尔勃特(Mandelbrot)集合分形,它逐渐从点(start_x,start_y)放大 108 | 109 | ### 参数 110 | 111 | - end_pts设置终端`pts`值,默认400. 112 | - end_scale设置终端缩放值,必须是浮点数,默认0.3. 113 | - inner设置内部着色模式,该算法用于绘制曼德布洛特分形内部区域.允许下面的值:black设置black模式. 114 | convergence设置时间收缩模式 115 | mincol设置基于点的颜色最接近的起源迭代 116 | period设置时间模式 默认为mincol. 117 | - bailout设置bailout值,默认为10 118 | - maxiter设置最大迭代执行的渲染算法,默认7189. 119 | - outer设置外部着色模式,允许下面的值:iteration_count设置为迭代计算模式 120 | normalized_iteration_count设置为规范化的迭代计算模式 默认为normalized_iteration_count. 121 | - rate, r设置帧率,可以是表达式和每秒帧数,默认为25 122 | - size, s设置帧尺寸,语法同于`ffmpeg-utils`手册中的[视频尺寸](ffmpeg-doc-cn-07.md#视频尺寸分辨率)章节. 默认"640x480". 123 | - start_scale设置初始化放大值,默认为3.0. 124 | - start_x设置初始化点的x坐标,必须是-100 到100间的浮点数,默认为 -0.743643887037158704752191506114774. 125 | - start_y设置初始化点的y坐标,必须是-100 到100间的浮点数,默认为-0.131825904205311970493132056385139. 126 | 127 | ### 示例 128 | 129 | ```python 130 | 131 | ``` 132 | 133 | ``` 134 | 135 | ``` 136 | 137 | ## mptestsrc 138 | 139 | > https://ffmpeg.org/ffmpeg-filters.html#mptestsrc 140 | 141 | 生成各种测试模式,以作为MPlayer测试滤镜。生成视频是固定的256x256分辨率。 142 | 143 | ### 参数 144 | 145 | - rate, r指定帧率,是默认每秒帧数数字。也可以以`frame_rate_num/frame_rate_den`格式设定整数和浮点数以及帧频短语都是有效值,默认25 146 | - duration, d设置持续时间秒数,语法同于[持续时间](ffmpeg-doc-cn-07.md#持续时间)章节,如果不指定或者指定为负数,表示持续不断 147 | - test, t设置测试项的数字或者名称,允许下面的值:dc_luma 148 | 149 | 这个源用于在特定编码功能测试支持下面选项: 150 | 151 | dc_chroma 152 | freq_luma 153 | freq_chroma 154 | amp_luma 155 | amp_chroma 156 | cbp 157 | mv 158 | ring1 159 | ring2 160 | all默认为"all",表示都要测试例如: mptestsrc=t=dc_luma 161 | 将进行`dc_luma`测试 162 | 163 | ### 示例 164 | 165 | ```python 166 | 167 | ``` 168 | 169 | ``` 170 | 171 | ``` 172 | 173 | ## frei0r_src 174 | 175 | > https://ffmpeg.org/ffmpeg-filters.html#frei0r_src 176 | 177 | 提供一个frei0r源编译需要`frei0r`头以及配置项`--enable-frei0r`,接受如下参数: 178 | 179 | ### 参数 180 | 181 | - size生成视频大小。语法同于`ffmpeg-utils`手册中的[视频尺寸](ffmpeg-doc-cn-07.md#视频尺寸分辨率)章节. 182 | - framerate设置帧率,值为数字字符串,或者`num/den`形式字符串或者帧率短语 183 | - filter_name这个名字frei0r源到负载。获得有关frei0r的更多信息以及如何设置参数,读取文档中的frei0r视频滤镜部分。 184 | - filter_params由’|’分隔的参数列表传递给`frei0r`源例如:要产生一个200x200分辨率,帧率为10,产生一个frei0r源用作`partik0l` frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay 185 | 186 | ### 示例 187 | 188 | ```python 189 | 190 | ``` 191 | 192 | ``` 193 | 194 | ``` 195 | 196 | ## life 197 | 198 | > https://ffmpeg.org/ffmpeg-filters.html#life 199 | 200 | 产生life模式这个源基于John Conway’s life游戏源输入一个网格、每个像素(代表细胞)可以有2个状态,活或者死。每个细胞有8个邻国水平、垂直或对角相邻。根据采用的规则发展网格,它指定邻居活细胞的数量会使细胞生存或出生,这里`rule`选项在下面介绍。这个源支持下面的选项: 201 | 202 | ### 参数 203 | 204 | - filename, f设置读取初始化网格的文件。在文件中每个非空字符代表存活的细胞,换行结束一行。如果没有指定则随机生成 205 | - rate, r设置视频帧率,默认25. 206 | - random_fill_ratio, ratio设置随机初始化随机网格填充率,值为0-1的浮点数,默认为`1/PHI`,在设置了`filename`时忽略 207 | - random_seed, seed设置随机种子,值为0 - `UINT32_MAX`如果设置为-1或者不设置,表示尽量用优化的种子 208 | - rule设置规则规则可以是指定代码的形式"SNS/BNB",这里`NS`和`NB`是0-8的数字序数,`NS`在一个存活细胞周围还存活的细胞数,`NB`指定周围要新生的细胞数,`s`和`b`分别是`S`和`B`的替代另外一个规则可以被描述为18位的整数。其中高段9位表示存活细胞周围存活细胞状态数,低段9位则为要新生的细胞状态数。例如数字6153=(12<<9)+9,表示细胞周围有12个存活细胞,新生为9的规则,其等效于"S23/B03".默认为"S23/B3",它是原始的Conway’s 游戏规则。如果它周围有2或者3个细胞将新生细胞,否则将死亡细胞 209 | - size, s设置输出视频分辨率,语法同于`ffmpeg-utils`手册中的[视频尺寸](ffmpeg-doc-cn-07.md#视频尺寸分辨率)章节当`filename`被设定,则默认会采用输入文件的最大行宽。如果设置了这个值则需与输入文件相匹配。如果没有设置`filename`则默认为 "320x240" (用于随机初始化模式). 210 | - stitch如果设置为1,则左右网格边和上下网格边缝合在一起(连续面),默认为1 211 | - mold设置细胞分解速度。如果设置,则为死细胞将从 `death_color` 在`mold`步骤内转变为 `mold_color` 的速度。范围0-255。 212 | - life_color设置存活的细胞颜色 (或新生) 213 | - death_color设置死亡细胞颜色。如果`mold`被设置,则为死亡后第一个颜色 214 | - mold_color设置分解后颜色,作为绝对死亡或已被分解的细胞颜色前面3个颜色 215 | 216 | ### 示例 217 | 218 | - 从模板读取一个网格,分辨率为300x300:life=f=pattern:s=300x300 219 | - 填充率2/3的随机初始化,尺寸200x200, :life=ratio=2/3:s=200x200 220 | - 指定一个规则的随机初始化和生成:life=rule=S14/B34 221 | - 前面所有例子,且还伴有`mold`(分解)效果,在ffplay中播放:ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16 222 | 223 | ```python 224 | 225 | ``` 226 | 227 | ``` 228 | 229 | ``` 230 | 231 | ## allrgb, allyuv, color, haldclutsrc, nullsrc, pal75bars, pal100bars, rgbtestsrc, smptebars, smptehdbars, testsrc, testsrc2, yuvtestsrc 232 | 233 | - `allrgb` 源返回所有 RGB 颜色且大小为 4096x4096 的帧 234 | - `allyuv` 源返回所有 YUV 颜色且大小为 4096x4096 的帧 235 | - `color` 源提供一致的颜色输入 236 | - `haldclutsrc` 源提供哈尔德(Hald)CLUT 输入 237 | - `nullsrc` 源返回未处理的视频帧,作为滤镜中可以忽略的输入数据 238 | - `pal75bars` 源产生 75%的颜色级别生成颜色条图案 239 | - `pal100bars` 源产生 100%的颜色级别生成颜色条图案 240 | - `rgbtestsrc` 源产生 `RGB` 测试模板,用于检测对比 `RGB` 与 `BGR` 问题,可以看到一个红色、绿色和蓝色的从上到下条纹 241 | - `smptebars` 源产生颜色条模板 242 | - `smptehdbars` 源产生颜色条模板 243 | - `testsrc` 源产生测试视频模板,显示颜色模板和滚动的梯形以及时间戳 244 | - `testsrc2` 源产生测试视频模板,显示颜色模板和滚动的梯形以及时间戳 245 | - `yuvtestsrc` 源生成一个YUV测试模式,从上到下看到 y, cb 和 cr 的条纹。 246 | 247 | ### 参数 248 | 249 | - color, c 指定源颜色,仅作 `color` 源中有效 250 | - level 指定 Hald CLUT 的层次。仅在 `haldclutsrc` 有效。`level` 中的 `N` 用于生成一个 `N * N * N` 像素为单位矩阵用于三维查找表。每个组件都是编码在 1 /(N * N) 范围内 251 | - size, s 指定源视频尺寸。默认值为 `320x240`,这个选项在 `haldclutsrc` 中无效 252 | - rate, r 设置帧率,默认为 25 253 | - sar 设置样品长宽比(像素点长宽比) 254 | - duration, d 设置源视频持续时间,不设置或者设置为负数,表示持续存在。 255 | - decimals, n 设置屏幕时间戳的小数数字显示,仅在 `testsrc` 源有效显示的时间戳值将对应于原来的时间戳值乘以 `10 的 X 次方数 ` 的指定值。默认为 0。 256 | 257 | ### 示例 258 | 259 | #### 用 FFplay 播放 260 | 261 | ```python 262 | _ = run_ffplay("testsrc=size=400x300:rate=60", f="lavfi") 263 | ``` 264 | 265 | ``` 266 | ffplay -hide_banner -f lavfi testsrc=size=900x400:rate=60 267 | ``` 268 | 269 | ```python 270 | _ = run_ffplay("allrgb", f="lavfi") 271 | _ = run_ffplay("allyuv", f="lavfi") 272 | _ = run_ffplay("color=c=red@0.2:s=1600x900:r=10", f="lavfi") 273 | _ = run_ffplay("haldclutsrc", f="lavfi") 274 | _ = run_ffplay("pal75bars", f="lavfi") 275 | _ = run_ffplay("allyuv", f="lavfi") 276 | _ = run_ffplay("allyuv", f="lavfi") 277 | _ = run_ffplay("rgbtestsrc", f="lavfi") 278 | _ = run_ffplay("smptebars", f="lavfi") 279 | _ = run_ffplay("smptehdbars", f="lavfi") 280 | _ = run_ffplay("testsrc=size=400x300:rate=60", f="lavfi") 281 | _ = run_ffplay("testsrc2=s=1600x900", f="lavfi") 282 | _ = run_ffplay("yuvtestsrc=s=1600x900", f="lavfi") 283 | ``` 284 | 285 | #### 用 FFmpeg 预览 286 | 287 | 预览的播放进行了加速。 288 | 289 | ```python 290 | ffmpeg.input_source("testsrc", size="600x900", rate=60).output(preview=True).run() 291 | ``` 292 | 293 | ``` 294 | ffmpeg -f lavfi -i testsrc=size=600x900:rate=60 -f sdl preview -y -hide_banner 295 | ``` 296 | 297 | ## openclsrc 298 | 299 | > https://ffmpeg.org/ffmpeg-filters.html#openclsrc 300 | 301 | 302 | ### 参数 303 | 304 | 305 | ### 示例 306 | 307 | ```python 308 | 309 | ``` 310 | 311 | ``` 312 | 313 | ``` 314 | 315 | ## sierpinski 316 | 317 | > https://ffmpeg.org/ffmpeg-filters.html#sierpinski 318 | 319 | ### 参数 320 | 321 | 322 | 323 | ### 示例 324 | 325 | ```python 326 | 327 | ``` 328 | 329 | ``` 330 | 331 | ``` 332 | 333 | ## buffersink 334 | 335 | > https://ffmpeg.org/ffmpeg-filters.html#buffersink 336 | 337 | 缓冲视频帧,可作为滤镜链图中有效的结束点。这个槽主要用于编程使用,特别是通过`libavfilter/buffersink.h`的接口或选择操作系统它接受指向`AVABufferSinkContext`结构的指针,用于定义传入缓冲区的格式,作为不透明参数传递给`avfilter_init_filter`以初始化。 338 | 339 | ### 参数 340 | 341 | 342 | ### 示例 343 | 344 | ```python 345 | 346 | ``` 347 | 348 | ``` 349 | 350 | ``` 351 | 352 | ## nullsink 353 | 354 | > https://ffmpeg.org/ffmpeg-filters.html#nullsink 355 | 356 | Null(空)视频槽,绝对没有输入的视频。它主要用作模板以分析/调试工具。 357 | 358 | ### 参数 359 | 360 | 361 | ### 示例 362 | 363 | ```python 364 | 365 | ``` 366 | 367 | ``` 368 | 369 | ``` 370 | -------------------------------------------------------------------------------- /ffmpeg/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.02.25 14:34:07 3 | Description: Omit 4 | LastEditors: Rustle Karl 5 | LastEditTime: 2021.06.25 09:25:13 6 | ''' 7 | import subprocess 8 | 9 | from pkgs import color 10 | 11 | from ._ffmpeg import input, input_source, merge_outputs, output 12 | from ._ffplay import ffplay_audio, ffplay_video, run_ffplay 13 | from ._ffprobe import FFprobe, metadata, run_ffprobe 14 | from ._utils import convert_kwargs_to_cmd_line_args 15 | from .filters import afilters, avfilters, vfilters 16 | from .nodes import FFmpegError 17 | from .tools import atools, avtools, vtools 18 | 19 | __all__ = [ 20 | 'FFmpeg', 21 | 'FFmpegError', 22 | 'FFprobe', 23 | 'afilters', 24 | 'atools', 25 | 'avfilters', 26 | 'avtools', 27 | 'constants', 28 | 'ffplay_audio', 29 | 'ffplay_video', 30 | 'input', 31 | 'input_source', 32 | 'merge_outputs', 33 | 'metadata', 34 | 'output', 35 | 'run_ffmpeg', 36 | 'run_ffplay', 37 | 'run_ffprobe', 38 | 'vfilters', 39 | 'vtools', 40 | ] 41 | 42 | 43 | def run_ffmpeg(option: str = None, stdout=None, check=True, **kwargs) -> subprocess.CompletedProcess: 44 | '''Run raw ffmpeg command.''' 45 | args = ['ffmpeg', '-hide_banner'] 46 | 47 | if option: 48 | args.append(f'-{option}') 49 | 50 | args.extend(convert_kwargs_to_cmd_line_args(kwargs)) 51 | 52 | return subprocess.run(args, stdout=stdout, encoding='utf-8', check=check) 53 | 54 | 55 | def _findstr(option, str_: str = None): 56 | stdout = run_ffmpeg(option, stdout=subprocess.PIPE).stdout 57 | 58 | if str_ is None: 59 | print(stdout) 60 | else: 61 | print('\n'.join([line.replace(str_, color.sredf(str_)) 62 | for line in stdout.splitlines() if str_ in line])) 63 | 64 | 65 | class FFmpeg(object): 66 | 67 | @staticmethod 68 | def cuda(): 69 | FFmpeg.hwaccels() 70 | 71 | color.cyanln('Cuda Encoders:') 72 | FFmpeg.codecs(findstr='_nvenc') 73 | 74 | color.cyanln('Cuda Decoders:') 75 | FFmpeg.codecs(findstr='_cuvid') 76 | 77 | @staticmethod 78 | def version(): 79 | run_ffmpeg('version') 80 | 81 | @staticmethod 82 | def formats(findstr: str = None): 83 | _findstr('formats', str_=findstr) 84 | 85 | @staticmethod 86 | def devices(findstr: str = None): 87 | _findstr('devices', str_=findstr) 88 | 89 | @staticmethod 90 | def codecs(findstr: str = None): 91 | ''' 92 | Examples: 93 | FFmpeg.codecs(find='_cuvid') 94 | FFmpeg.codecs(find='_nvenc') 95 | ''' 96 | _findstr('codecs', str_=findstr) 97 | 98 | @staticmethod 99 | def decoders(findstr: str = None): 100 | _findstr('decoders', str_=findstr) 101 | 102 | @staticmethod 103 | def encoders(findstr: str = None): 104 | _findstr('encoders', str_=findstr) 105 | 106 | @staticmethod 107 | def bsfs(): 108 | run_ffmpeg('bsfs') 109 | 110 | @staticmethod 111 | def protocols(findstr: str = None): 112 | _findstr('protocols', str_=findstr) 113 | 114 | @staticmethod 115 | def filters(findstr: str = None): 116 | _findstr('filters', str_=findstr) 117 | 118 | @staticmethod 119 | def pix_fmts(findstr: str = None): 120 | _findstr('pix_fmts', str_=findstr) 121 | 122 | @staticmethod 123 | def layouts(findstr: str = None): 124 | _findstr('layouts', str_=findstr) 125 | 126 | @staticmethod 127 | def colors(findstr: str = None): 128 | _findstr('colors', str_=findstr) 129 | 130 | @staticmethod 131 | def hwaccels(): 132 | run_ffmpeg('hwaccels') 133 | 134 | @staticmethod 135 | def help(filter: str): 136 | run_ffmpeg(help='filter=' + filter) 137 | 138 | @staticmethod 139 | def list_devices(f='dshow', i='dummy'): 140 | run_ffmpeg(check=False, list_devices=True, f=f, i=i) 141 | 142 | @staticmethod 143 | def list_options(f='dshow', i='dummy'): 144 | ''' 145 | Examples: 146 | ffmpeg -list_options true -f dshow -i video='USB2.0 PC CAMERA' 147 | ffmpeg -list_options true -f dshow -i audio='麦克风 (2- USB2.0 MIC)' 148 | ''' 149 | run_ffmpeg(check=False, list_options=True, f=f, i=i) 150 | -------------------------------------------------------------------------------- /ffmpeg/_dag.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.02.26 21:39:59 3 | Description: Omit 4 | LastEditors: Rustle Karl 5 | LastEditTime: 2021.04.29 15:42:51 6 | ''' 7 | from __future__ import annotations 8 | 9 | from collections import defaultdict 10 | from functools import cached_property 11 | from typing import Dict, List, NamedTuple, Tuple 12 | 13 | __all__ = [ 14 | "DagEdge", 15 | "DagNode", 16 | "Edge", 17 | "get_incoming_edges", 18 | "get_outgoing_edges", 19 | "topological_sort" 20 | ] 21 | 22 | 23 | class Edge(NamedTuple): 24 | Node: DagNode 25 | Label: str 26 | Selector: str 27 | 28 | 29 | class DagEdge(NamedTuple): 30 | '''DagNodes are connected by edges. An edge 31 | connects two nodes with a label for each side.''' 32 | 33 | DownstreamNode: DagNode # downstream/child node 34 | DownstreamLabel: str # label on the incoming side of the downstream node 35 | UpstreamNode: DagNode # upstream/parent node 36 | UpstreamLabel: str # label on the outgoing side of the upstream node 37 | Selector: str 38 | 39 | 40 | class DagNode(object): 41 | '''Node in a directed-acyclic graph (DAG).''' 42 | 43 | def __init__(self, label: str, incoming_edge_graph: Dict[str, Edge], 44 | node_type: str, args: List, kwargs: Dict): 45 | self._label = label 46 | self._args = list(map(str, args)) if args else [] 47 | self._kwargs = kwargs or {} 48 | self._node_type = node_type 49 | self._incoming_edge_graph = incoming_edge_graph 50 | 51 | def __repr__(self): 52 | return f" {self.detail}" 53 | 54 | @cached_property 55 | def detail(self) -> str: 56 | """Return a full string representation of the node.""" 57 | props = self._args + [f'{k}={self._kwargs[k]}' for k in sorted(self._kwargs)] 58 | if props: 59 | return f'{self.brief}:{",".join(props)}' 60 | else: 61 | return self.brief 62 | 63 | @property 64 | def brief(self) -> str: 65 | """Return a partial/concise representation of the node.""" 66 | return self._label 67 | 68 | @property 69 | def Label(self) -> str: 70 | return self._label 71 | 72 | @property 73 | def Type(self) -> str: 74 | return self._node_type 75 | 76 | @property 77 | def incoming_edge_graph(self) -> Dict[str, Edge]: 78 | return self._incoming_edge_graph 79 | 80 | @cached_property 81 | def incoming_edges(self) -> Tuple[DagEdge]: 82 | """Provides information about all incoming edges that connect to this node.""" 83 | return get_incoming_edges(self, self.incoming_edge_graph) 84 | 85 | def stream(self, label: str = None, selector: str = None) -> DagEdge: 86 | raise NotImplementedError 87 | 88 | # TODO cause recursion, maybe it's not a good idea. It doesn't seem necessary. 89 | # def __eq__(self, other: DagNode): 90 | # return (self.Type, self.detail, self.incoming_edges) == (other.Type, other.detail, other.incoming_edges) 91 | # 92 | # def __hash__(self): 93 | # return hash((self.Type, self.detail)) 94 | 95 | 96 | def get_incoming_edges(node: DagNode, incoming_edge_graph: Dict[str, Edge]) -> Tuple[DagEdge]: 97 | incoming_edges = [] 98 | for label, edge in incoming_edge_graph.items(): 99 | incoming_edges.append(DagEdge(node, label, edge.Node, edge.Label, edge.Selector)) 100 | return tuple(incoming_edges) 101 | 102 | 103 | def get_outgoing_edges(node: DagNode, outgoing_edge_graph: Dict[str, List[Edge]]) -> Tuple[DagEdge]: 104 | outgoing_edges = [] 105 | for label, edges in outgoing_edge_graph.items(): 106 | for edge in edges: 107 | outgoing_edges.append(DagEdge(edge.Node, edge.Label, node, label, edge.Selector)) 108 | return tuple(outgoing_edges) 109 | 110 | 111 | def topological_sort(nodes: List[DagNode]) -> Tuple[Tuple[DagNode], Dict[DagNode, Dict[str, List[Edge]]]]: 112 | '''NOTE nodes can be part of the nodes, but not all. 113 | 114 | DagNodes may have any number of incoming edges and any number of 115 | outgoing edges. DagNodes keep track only of their incoming edges, but 116 | the entire graph structure can be inferred by looking at the furthest 117 | downstream nodes and working backwards. 118 | ''' 119 | outgoing_edge_graphs = defaultdict(lambda: defaultdict(list)) 120 | dependent_count = defaultdict(lambda: 0) 121 | outgoing_graph = defaultdict(list) 122 | visited_nodes = set() 123 | sorted_nodes = [] 124 | 125 | # Convert to a friendly representation 126 | while nodes: 127 | node = nodes.pop() 128 | if node not in visited_nodes: 129 | for edge in node.incoming_edges: 130 | outgoing_graph[edge.UpstreamNode].append(node) # node == edge.DownstreamNode 131 | outgoing_edge_graphs[edge.UpstreamNode][edge.UpstreamLabel]. \ 132 | append(Edge(node, edge.DownstreamLabel, edge.Selector)) 133 | nodes.append(edge.UpstreamNode) 134 | visited_nodes.add(node) 135 | 136 | # Calculate the number of dependents 137 | for val in outgoing_graph.values(): 138 | for v in val: 139 | dependent_count[v] += 1 140 | 141 | # Zero dependent nodes 142 | stack = [node for node in visited_nodes if dependent_count[node] == 0] 143 | 144 | while stack: 145 | node = stack.pop() 146 | sorted_nodes.append(node) 147 | 148 | for n in outgoing_graph[node]: 149 | dependent_count[n] -= 1 150 | if dependent_count[n] == 0: 151 | stack.append(n) 152 | 153 | if len(sorted_nodes) != len(visited_nodes): 154 | raise RuntimeError('This graph is not a DAG') 155 | 156 | return tuple(sorted_nodes), outgoing_edge_graphs 157 | -------------------------------------------------------------------------------- /ffmpeg/_ffmpeg.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.02.25 15:02:34 3 | Description: Omit 4 | LastEditors: Rustle Karl 5 | LastEditTime: 2021.05.10 12:33:20 6 | ''' 7 | from pathlib import Path 8 | 9 | from . import constants, settings 10 | from ._node import Stream 11 | from ._utils import (convert_kwargs_string, drop_empty_dict_values, 12 | drop_empty_list_values) 13 | from .expression import generate_resolution 14 | from .nodes import (FilterableStream, InputNode, MergeOutputsNode, OutputNode, 15 | OutputStream, filterable) 16 | 17 | # http://ffmpeg.org/ffmpeg-all.html 18 | 19 | 20 | def input(source, video_device: str = None, audio_device: str = None, format: str = None, 21 | pixel_format=None, fps: int = None, start_position: float = None, duration: float = None, 22 | to_position: float = None, start_position_eof: float = None, stream_loop: int = None, 23 | frame_rate: int = None, width: int = None, height: int = None, vcodec: str = None, 24 | hwaccel: str = None, enable_cuda=True, **kwargs) -> FilterableStream: 25 | """https://ffmpeg.org/ffmpeg.html#Main-options""" 26 | 27 | if video_device: 28 | kwargs['source'] = "video=" + video_device 29 | elif audio_device: 30 | kwargs['source'] = "audio=" + audio_device 31 | elif source is None: 32 | raise ValueError("Must specify an input source") 33 | 34 | kwargs['source'] = str(source) 35 | 36 | if settings.CUDA_ENABLE and enable_cuda and \ 37 | Path(source).suffix not in constants.IMAGE_FORMATS: 38 | hwaccel = "cuda" 39 | if vcodec not in constants.CUDA_ENCODERS: 40 | vcodec = settings.DEFAULT_DECODER 41 | 42 | kwargs = drop_empty_dict_values(kwargs, hwaccel=hwaccel, vcodec=vcodec, 43 | f=format, pix_fmt=pixel_format, ss=start_position, 44 | t=duration, to=to_position, sseof=start_position_eof, 45 | stream_loop=stream_loop, r=fps, framerate=frame_rate, 46 | s=generate_resolution(width, height)) 47 | 48 | return InputNode(args=None, kwargs=kwargs).stream() 49 | 50 | 51 | def input_source(source: str, color: str = None, level: int = None, 52 | size: str = None, rate: int = None, sar: str = None, 53 | duration: float = None, decimals: bool = None) -> FilterableStream: 54 | """https://ffmpeg.org/ffmpeg-filters.html#Video-Sources""" 55 | if source not in constants.VIDEO_SOURCES: 56 | raise ValueError("Here is currently available video sources: %s" % constants.VIDEO_SOURCES) 57 | 58 | args = convert_kwargs_string(color=color, level=level, size=size, rate=rate, 59 | sar=sar, duration=duration, decimals=decimals) 60 | 61 | if args: 62 | source = f"{source}={args}" 63 | 64 | return input(source, format="lavfi", enable_cuda=False) 65 | 66 | 67 | @filterable() 68 | def output(*streams_or_source, vn=False, an=False, ar=None, ab=None, ac=None, 69 | acodec=None, vcodec=None, codec: str = None, aq_scale=None, vq_scale=None, 70 | aspect=None, fps=None, format=None, pixel_format=None, video_bitrate=None, 71 | audio_bitrate=None, v_profile=None, preset=None, mov_flags=None, 72 | shortest=False, frame_size=None, v_frames: int = None, start_position: float = None, 73 | duration: float = None, video_filter: str = None, audio_filter: str = None, 74 | ignore_output=False, preview: bool = False, enable_cuda=True, 75 | args: list = None, **kwargs) -> OutputStream: 76 | if args is None: 77 | args = [] 78 | 79 | args = drop_empty_list_values(args, vn=vn, an=an, shortest=shortest) 80 | 81 | if ignore_output: 82 | kwargs['source'] = "NUL" if constants.WINDOWS else "/dev/null" 83 | format = "null" 84 | 85 | if preview: 86 | kwargs['source'] = "preview" 87 | format = "sdl" 88 | 89 | streams_or_source = list(streams_or_source) 90 | if not kwargs.get('source'): 91 | if not isinstance(streams_or_source[-1], (str, Path)): 92 | raise ValueError("Must specify an output source") 93 | kwargs['source'] = str(streams_or_source.pop(-1)) 94 | 95 | streams = streams_or_source 96 | 97 | if settings.CUDA_ENABLE and enable_cuda and not preview and \ 98 | Path(kwargs['source']).suffix not in constants.IMAGE_FORMATS: 99 | if vcodec not in constants.CUDA_ENCODERS: 100 | vcodec = settings.DEFAULT_ENCODER 101 | 102 | # codec over acodec/vcodec 103 | if codec is not None: 104 | acodec = None 105 | vcodec = None 106 | 107 | if video_bitrate is not None: 108 | kwargs['b:v'] = video_bitrate 109 | 110 | if audio_bitrate is not None: 111 | kwargs['b:a'] = audio_bitrate 112 | 113 | if v_profile is not None: 114 | kwargs['profile:v'] = v_profile 115 | 116 | kwargs = drop_empty_dict_values(kwargs, r=fps, ss=start_position, t=duration, 117 | aspect=aspect, f=format, pix_fmt=pixel_format, ar=ar, 118 | ab=ab, ac=ac, codec=codec, acodec=acodec, vcodec=vcodec, 119 | aq=aq_scale, vq=vq_scale, s=frame_size, vframes=v_frames, 120 | preset=preset, movflags=mov_flags, vf=video_filter, af=audio_filter) 121 | 122 | return OutputNode(streams, args=args, kwargs=kwargs).stream() 123 | 124 | 125 | def merge_outputs(*streams: Stream) -> OutputStream: 126 | """Include all given outputs in one ffmpeg command line.""" 127 | return MergeOutputsNode(streams).stream() 128 | -------------------------------------------------------------------------------- /ffmpeg/_ffplay.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.03.06 23:06:21 3 | LastEditors: Rustle Karl 4 | LastEditTime: 2021.06.25 09:45:47 5 | ''' 6 | import subprocess 7 | from pathlib import Path 8 | 9 | from pkgs import color 10 | 11 | from ._utils import convert_kwargs_to_cmd_line_args, join_cmd_args_seq 12 | 13 | __all__ = [ 14 | "ffplay_audio", 15 | "ffplay_video", 16 | "run_ffplay", 17 | ] 18 | 19 | 20 | def run_ffplay(source: str = None, print_cmd=True, **kwargs): 21 | """Run raw ffplay command.""" 22 | args = ["ffplay", "-hide_banner"] 23 | 24 | _kwargs = {} 25 | for k, v in kwargs.items(): 26 | if v is True: 27 | args.append(f"-{k}") 28 | elif v: 29 | _kwargs[k] = v 30 | 31 | args.extend(convert_kwargs_to_cmd_line_args(_kwargs, sort=False)) 32 | 33 | if source is not None: 34 | args.append(Path(source).as_posix()) 35 | 36 | if print_cmd: 37 | color.greenln(join_cmd_args_seq(args)) 38 | 39 | return subprocess.Popen(args) 40 | 41 | 42 | def ffplay_audio(source: str, f: str = None, channels: int = None, ar: int = None, 43 | ss: float = None, t: float = None, loop: int = None, vf: str = None): 44 | """ 45 | Examples: 46 | ffplay song.pcm -f s16le -channels 2 -ar 4 47 | '""" 48 | run_ffplay(source, f=f, channels=channels, ar=ar, ss=ss, t=t, loop=loop, vf=vf) 49 | 50 | 51 | def ffplay_video(source: str, x: int = None, y: int = None, video_size: str = None, 52 | pixel_format: str = None, fs: bool = False, an: bool = False, 53 | vn: bool = False, sn: bool = False, f: str = None, s: str = None, 54 | sync: str = None, ss: float = None, t: float = None, vf: str = None, 55 | af: str = None, seek_interval: int = None, window_title=None, 56 | show_mode=None, loop: int = None): 57 | """ 58 | Examples: 59 | ffplay -f rawvideo -pixel_format yuv420p -s 480*480 texture.yuv 60 | ffplay -f rawvideo -pixel_format rgb24 -s 480*480 texture.rgb 61 | 62 | ffplay video.mp4 -sync audio 63 | ffplay video.mp4 -sync video 64 | ffplay video.mp4 -sync ext 65 | '""" 66 | run_ffplay(source, x=x, y=y, video_size=video_size, pixel_format=pixel_format, 67 | fs=fs, an=an, vn=vn, sn=sn, f=f, s=s, sync=sync, ss=ss, t=t, vf=vf, 68 | af=af, seek_interval=seek_interval, window_title=window_title, 69 | showmode=show_mode, loop=loop) 70 | -------------------------------------------------------------------------------- /ffmpeg/_ffprobe.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.02-25 14:34:07 3 | LastEditors: Rustle Karl 4 | LastEditTime: 2021.06.18 17:21:29 5 | ''' 6 | import json 7 | import subprocess 8 | from pathlib import Path 9 | from typing import Dict, List, Union 10 | 11 | from ._utils import convert_kwargs_to_cmd_line_args, drop_empty_list_values 12 | from .constants import JSON_FORMAT 13 | from .nodes import FFmpegError 14 | 15 | __all__ = [ 16 | 'FFprobe', 17 | 'metadata', 18 | 'run_ffprobe', 19 | ] 20 | 21 | 22 | def run_ffprobe(source, *args: List, **kwargs: Dict): 23 | '''https://ffmpeg.org/ffprobe-all.html''' 24 | args = ['ffprobe', '-hide_banner'] + list(args) + convert_kwargs_to_cmd_line_args(kwargs) + [source] 25 | 26 | proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) 27 | stdout, stderr = proc.communicate() 28 | if proc.returncode != 0: 29 | raise FFmpegError('ffprobe', stdout, stderr) 30 | 31 | if kwargs.get('print_format') == JSON_FORMAT: 32 | return json.loads(stdout) 33 | 34 | return stderr 35 | 36 | 37 | def metadata(filepath, show_format=False, show_streams=False, show_frames=False, 38 | show_packets=False, show_programs=False, print_format=None, 39 | timeout: float = None, **kwargs) -> Union[dict, str]: 40 | if timeout: 41 | kwargs['timeout'] = timeout * 1000 * 1000 # s 42 | 43 | if print_format == JSON_FORMAT: 44 | kwargs['print_format'] = JSON_FORMAT 45 | show_streams = True 46 | 47 | args = drop_empty_list_values([], show_format=show_format, 48 | show_streams=show_streams, show_frames=show_frames, 49 | show_packets=show_packets, show_programs=show_programs) 50 | 51 | return run_ffprobe(filepath, *args, **kwargs) 52 | 53 | 54 | class FFprobe(object): 55 | 56 | def __init__(self, source: Union[str, Path], show_format=False, 57 | show_streams=True, show_frames=False, show_packets=False, 58 | show_programs=False, print_format='json', timeout: float = None, **kwargs): 59 | self._source = source 60 | self._metadata = metadata(source, show_format=show_format, 61 | show_streams=show_streams, show_frames=show_frames, 62 | show_packets=show_packets, show_programs=show_programs, 63 | print_format=print_format, timeout=timeout, **kwargs) 64 | self._streams = self._metadata.get('streams', []) 65 | 66 | if len(self._streams) == 0: 67 | raise ValueError('This media file does not contain any streams.') 68 | 69 | for stream in self._streams: 70 | codec_type = stream.get('codec_type') 71 | if codec_type == 'video': 72 | self.__video = stream 73 | elif codec_type == 'audio': 74 | self.__audio = stream 75 | 76 | @property 77 | def source(self): 78 | return self._source 79 | 80 | @property 81 | def metadata(self): 82 | return self._metadata 83 | 84 | @property 85 | def streams(self): 86 | return self._streams 87 | 88 | @property 89 | def video(self): 90 | return self.__video or {} 91 | 92 | @property 93 | def video_duration(self) -> float: 94 | return float(self.video.get('duration')) or 0 95 | 96 | @property 97 | def video_scale(self) -> List[int]: 98 | return self.video.get('width') or 0, self.video.get('height') or 0 99 | 100 | @property 101 | def video_frame_rate(self) -> float: 102 | return eval(self.video.get('r_frame_rate', 30)) 103 | 104 | @property 105 | def video_total_frames(self) -> int: 106 | '''video_total_frames is the number of frames as indicated 107 | in the file metadata - this may not always be accurate.''' 108 | return int(self.video.get('nb_frames')) or \ 109 | int(self.video.get('nb_read_frames')) or \ 110 | int(self.video_frame_rate * self.video_duration) or 0 111 | 112 | @property 113 | def video_tags(self) -> dict: 114 | return self.video.get('tags', {}) 115 | 116 | @property 117 | def video_codec(self) -> str: 118 | return self.video_tags.get('ENCODER') or \ 119 | self.video.get('codec_long_name') or \ 120 | self.video.get('codec_name') 121 | 122 | @property 123 | def audio(self): 124 | return self.__audio or {} 125 | 126 | @property 127 | def audio_duration(self) -> float: 128 | return float(self.audio.get('duration')) or 0 129 | 130 | def __str__(self): 131 | return '' % self.source 132 | 133 | def __dict__(self): 134 | return self.metadata 135 | -------------------------------------------------------------------------------- /ffmpeg/_utils.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.02.25 14:34:07 3 | Description: Omit 4 | LastEditors: Rustle Karl 5 | LastEditTime: 2021.05.04 23:37:10 6 | ''' 7 | from typing import Dict, Iterable, List, Union 8 | 9 | _backslash = '\\' 10 | _empty_symbols = (None, '', [], {}) # exclude 0 11 | _filter_symbols = {"-filter_complex", "-vf", "-af", "-lavfi"} 12 | 13 | 14 | def drop_empty_dict_values(already: Dict, **kwargs) -> Dict: 15 | overlay = {k: v for k, v in kwargs.items() if v not in _empty_symbols} 16 | return {**already, **overlay} 17 | 18 | 19 | def drop_empty_list_values(already: list, **kwargs) -> list: 20 | already = list(already) 21 | for k, v in kwargs.items(): 22 | if v: 23 | already.append(f'-{k}') 24 | return already 25 | 26 | 27 | def convert_kwargs_string(**kwargs): 28 | return ':'.join([f'{k}={v}' for k, v in kwargs.items() if v not in _empty_symbols]) 29 | 30 | 31 | def escape(text: str, chars: str) -> str: 32 | """Helper function to escape uncomfortable characters.""" 33 | text = str(text) 34 | chars = list(set(chars)) 35 | 36 | if _backslash in chars: 37 | chars.remove(_backslash) 38 | chars.insert(0, _backslash) 39 | 40 | for char in chars: 41 | text = text.replace(char, _backslash + char) 42 | 43 | return text 44 | 45 | 46 | def convert_kwargs_to_cmd_line_args(kwargs: Dict, sort=True) -> List[str]: 47 | """Helper function to build command line arguments out of dict.""" 48 | args = [] 49 | keys = sorted(kwargs.keys()) if sort else kwargs.keys() 50 | 51 | for key in keys: 52 | v = kwargs[key] 53 | 54 | # list, tuple, map 55 | if isinstance(v, Iterable) and not isinstance(v, str): 56 | for value in v: 57 | args.append(f'-{key}') 58 | if value not in _empty_symbols: 59 | args.append(f'{value}') 60 | continue 61 | 62 | args.append(f'-{key}') 63 | 64 | if v not in _empty_symbols: 65 | args.append(f'{v}') 66 | 67 | return args 68 | 69 | 70 | def join_cmd_args_seq(args: List[str]) -> str: 71 | cmd_args_seq = list(args) 72 | 73 | for i in range(len(cmd_args_seq)): 74 | if cmd_args_seq[i] in _filter_symbols: 75 | cmd_args_seq[i + 1] = f'"{cmd_args_seq[i + 1]}"' 76 | elif ' ' in cmd_args_seq[i]: 77 | cmd_args_seq[i] = f'"{cmd_args_seq[i]}"' 78 | 79 | return " ".join(cmd_args_seq) 80 | 81 | 82 | def string_to_seconds(clock: str) -> int: 83 | if isinstance(clock, (int, float)): 84 | return clock 85 | 86 | clock = [int(c) for c in clock.split(":")] 87 | if len(clock) == 0: 88 | hours, minutes, seconds = 0, 0, 0 89 | elif len(clock) == 1: 90 | hours, minutes, seconds = 0, 0, clock[0] 91 | elif len(clock) == 2: 92 | hours, minutes, seconds = 0, clock[0], clock[1] 93 | else: 94 | hours, minutes, seconds = clock[0], clock[1], clock[2] 95 | 96 | return hours * 60 * 60 + minutes * 60 + seconds 97 | 98 | 99 | def seconds_to_string(seconds: Union[float, int, str]) -> str: 100 | if isinstance(seconds, str): 101 | return seconds 102 | 103 | hours = seconds // (60 * 60) 104 | minutes = seconds % (60 * 60) // 60 105 | seconds -= hours * 60 * 60 + minutes * 60 106 | return f"{hours:02.0f}:{minutes:02.0f}:{seconds:02.03f}" 107 | 108 | 109 | if __name__ == "__main__": 110 | assert escape('a:b', ':') == 'a\:b' 111 | assert escape('a\\:b', ':\\') == 'a\\\\\\:b' 112 | assert (escape('a:b,c[d]e%{}f\'g\'h\\i', '\\\':,[]%') == 'a\\:b\\,c\\[d\\]e\\%{}f\\\'g\\\'h\\\\i') 113 | assert escape(123, ':\\') == '123' 114 | 115 | assert seconds_to_string(345.4246) == '00:05:45.425' 116 | -------------------------------------------------------------------------------- /ffmpeg/constants.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.02-24 14:58:57 3 | LastEditors: Rustle Karl 4 | LastEditTime: 2021.05.04 23:36:56 5 | ''' 6 | import sys 7 | 8 | LINUX = sys.platform == 'linux' 9 | WINDOWS = sys.platform == 'win32' 10 | 11 | # Video Source 12 | VIDEO_SOURCES = { 13 | 'allrgb', 'allyuv', 'color', 'haldclutsrc', 'nullsrc', 14 | 'pal75bars', 'pal100bars', 'rgbtestsrc', 'smptebars', 15 | 'smptehdbars', 'testsrc', 'testsrc2', 'yuvtestsrc' 16 | } 17 | 18 | # CUDA Encoders 19 | H264_NVENC = 'h264_nvenc' 20 | HEVC_NVENC = 'hevc_nvenc' 21 | CUDA_ENCODERS = {H264_NVENC, HEVC_NVENC} 22 | 23 | # CUDA Decoders 24 | H264_CUVID = 'h264_cuvid' 25 | HEVC_CUVID = 'hevc_cuvid' 26 | MJPEG_CUVID = 'mjpeg_cuvid' 27 | MPEG1_CUVID = 'mpeg1_cuvid' 28 | MPEG2_CUVID = 'mpeg2_cuvid' 29 | MPEG4_CUVID = 'mpeg4_cuvid' 30 | VC1_CUVID = 'vc1_cuvid' 31 | VP8_CUVID = 'vp8_cuvid' 32 | VP9_CUVID = 'vp9_cuvid' 33 | 34 | # Expression 35 | REAL_TIME = '%{localtime:%Y-%m-%d %H-%M-%S}' 36 | 37 | # Format 38 | COPY = 'copy' 39 | RAW_VIDEO = 'rawvideo' 40 | S16LE = 's16le' 41 | 42 | # Pixel Format 43 | RGB24 = 'rgb24' 44 | PCM_S16LE = 'pcm_s16le' 45 | 46 | # PTS 47 | PTS_STARTPTS = 'PTS-STARTPTS' 48 | 49 | # Input/Output 50 | PIPE = 'pipe:' 51 | 52 | # Resolution 53 | HD = HD720 = '1280x720' 54 | FHD = HD1080 = '1920x1080' 55 | QHD = HD2K = HD1440 = '2560x1440' 56 | UHD = HD4K = HD2160 = '3840x2160' 57 | 58 | # Image Formats 59 | IMAGE_FORMATS = {'.bmp', '.gif', '.heif', '.jpeg', '.jpg', '.png', '.raw', '.tiff'} 60 | 61 | JSON_FORMAT = 'json' 62 | -------------------------------------------------------------------------------- /ffmpeg/expression/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.04.29 22:31 3 | Description: Omit 4 | LastEditors: Rustle Karl 5 | LastEditTime: 2021.04.30 09:31:00 6 | ''' 7 | import contextlib 8 | 9 | from .layout import generate_gird_layout 10 | 11 | __all__ = [ 12 | 'generate_gird_layout', 13 | 'generate_resolution', 14 | ] 15 | 16 | 17 | def generate_resolution(width, height) -> str: 18 | with contextlib.suppress(Exception): 19 | return f"{int(width)}x{int(height)}" 20 | -------------------------------------------------------------------------------- /ffmpeg/expression/layout.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.04.29 22:31 3 | Description: Omit 4 | LastEditors: Rustle Karl 5 | LastEditTime: 2021.04.29 22:31 6 | ''' 7 | 8 | # column x row 9 | GIRD_1x4 = '0_0|0_h0|0_h0+h1|0_h0+h1+h2' 10 | GIRD_2x2 = '0_0|0_h0|w0_0|w0_h0' 11 | GIRD_3x3 = '0_0|0_h0|0_h0+h1|w0_0|w0_h0|w0_h0+h1|w0+w3_0|w0+w3_h0|w0+w3_h0+h1' 12 | GIRD_4x4 = '0_0|0_h0|0_h0+h1|0_h0+h1+h2|w0_0|w0_h0|w0_h0+h1|w0_h0+h1+h2|' \ 13 | 'w0+w4_0|w0+w4_h0|w0+w4_h0+h1|w0+w4_h0+h1+h2|w0+w4+w8_0|' \ 14 | 'w0+w4+w8_h0|w0+w4+w8_h0+h1|w0+w4+w8_h0+h1+h2' 15 | 16 | 17 | def generate_gird_layout(column: int, row: int) -> str: 18 | layout = [] 19 | 20 | for position in range(column * row): 21 | _column, _row = divmod(position, row) 22 | co = ['w%d' % (i * column) for i in range(_column)] or ['0'] 23 | ro = ['h%d' % i for i in range(_row)] or ['0'] 24 | layout.append(f"{'+'.join(co)}_{'+'.join(ro)}") 25 | 26 | return '|'.join(layout) 27 | 28 | 29 | if __name__ == '__main__': 30 | print(generate_gird_layout(1, 4)) 31 | print(generate_gird_layout(4, 40)) 32 | -------------------------------------------------------------------------------- /ffmpeg/filters/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.04.25 9:36 3 | Description : Omit 4 | LastEditors: Rustle Karl 5 | LastEditTime: 2021.04.25 9:36 6 | ''' 7 | from . import afilters, vfilters, avfilters 8 | 9 | __all__ = [ 10 | 'afilters', 11 | 'avfilters', 12 | 'vfilters', 13 | ] 14 | -------------------------------------------------------------------------------- /ffmpeg/settings.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.04.24 23:34 3 | Description: Omit 4 | LastEditors: Rustle Karl 5 | LastEditTime: 2021.04.24 23:34 6 | ''' 7 | from .constants import H264_CUVID, H264_NVENC 8 | 9 | CUDA_ENABLE = True 10 | DEFAULT_ENCODER = H264_NVENC 11 | DEFAULT_DECODER = H264_CUVID 12 | -------------------------------------------------------------------------------- /ffmpeg/tools/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.04.25 10:21 3 | Description : Omit 4 | LastEditors: Rustle Karl 5 | LastEditTime: 2021.04.25 10:21 6 | ''' 7 | from . import atools, avtools, etools, vtools 8 | 9 | try: 10 | from .etools import view 11 | except ImportError as e: 12 | def view(): 13 | raise e 14 | 15 | __all__ = [ 16 | 'atools', 17 | 'avtools', 18 | 'etools', 19 | 'vtools', 20 | ] 21 | -------------------------------------------------------------------------------- /ffmpeg/tools/atools.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.02-28 19:35:09 3 | LastEditors: Rustle Karl 4 | LastEditTime: 2021.05.24 07:33:35 5 | ''' 6 | import re 7 | import subprocess 8 | import sys 9 | from pathlib import Path 10 | from typing import List, Tuple, Union 11 | 12 | from .._ffmpeg import input 13 | from ..constants import PCM_S16LE, S16LE 14 | 15 | __all__ = [ 16 | "convert_audio_to_raw_pcm", 17 | "detect_silence", 18 | ] 19 | 20 | 21 | def convert_audio_to_raw_pcm(src: Union[str, Path], dst: Union[str, Path] = None) -> bytes: 22 | raw, _ = input(src, enable_cuda=False). \ 23 | output(dst or "-", format=S16LE, acodec=PCM_S16LE, 24 | ac=1, ar="16k", enable_cuda=False). \ 25 | run(capture_stdout=dst is None) 26 | 27 | return raw 28 | 29 | 30 | def detect_silence(src, *, noise=-60, duration=2) -> List[Tuple[float, float]]: 31 | """Detect silence in an audio stream. 32 | 33 | This filter logs a message when it detects that the input audio volume is less or 34 | equal to a noise tolerance value for a duration greater or equal to the minimum 35 | detected noise duration. 36 | 37 | Args: 38 | noise, n: Set noise tolerance. Can be specified in dB (in case "dB" is appended to the 39 | specified value) or amplitude ratio. Default is -60dB, or 0.001. 40 | duration, d: Set silence duration until notification (default is 2 seconds). 41 | """ 42 | silence_start = re.compile(r'silence_start: ([0-9]+\.?[0-9]*)') 43 | silence_end = re.compile(r'silence_end: ([0-9]+\.?[0-9]*)') 44 | 45 | args = input(src).silencedetect(noise, duration).output("-", format="null").compile() 46 | process = subprocess.Popen(args, stderr=subprocess.PIPE) 47 | 48 | info = process.communicate()[1].decode("utf-8") 49 | if process.returncode != 0: 50 | sys.stderr.write(info) 51 | return [] 52 | 53 | return list(zip(map(float, silence_start.findall(info)), map(float, silence_end.findall(info)))) 54 | -------------------------------------------------------------------------------- /ffmpeg/tools/avtools.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.02-25 20:50:07 3 | LastEditors: Rustle Karl 4 | LastEditTime: 2021.05.04 23:34:46 5 | ''' 6 | import tempfile 7 | from pathlib import Path 8 | from typing import Dict, List, NamedTuple, Optional, Union 9 | 10 | from .. import FFprobe, constants 11 | from .._ffmpeg import input, merge_outputs, output 12 | from .._utils import seconds_to_string, string_to_seconds 13 | 14 | __all__ = [ 15 | "adjust_tempo", 16 | "concat_multiple_parts", 17 | "cut_into_multiple_parts", 18 | "merge_video_audio", 19 | "separate_audio_stream", 20 | "separate_video_stream", 21 | ] 22 | 23 | 24 | def adjust_tempo(src: Union[str, Path], dst: Union[str, Path], *, vtempo: float = 2, 25 | atempo: float = 2, acodec=None, vcodec=None, **kwargs): 26 | """Adjust audio and video playback speed. 27 | 28 | Args: 29 | vtempo: video current playback speed * vtempo, -1 mean no video 30 | atempo: audio current playback speed * atempo, -1 mean no audio 31 | """ 32 | _input = input(src) 33 | 34 | v_input = _input.video.setpts(f"{1 / vtempo}*PTS") 35 | a_input = _input.audio.atempo(atempo) 36 | 37 | if vtempo == -1 and atempo == -1: 38 | raise ValueError("`vtempo` and `atempo` cannot all be -1") 39 | 40 | elif vtempo == -1: 41 | a_input.output(dst, acodec=acodec, **kwargs).run() 42 | elif atempo == -1: 43 | v_input.output(dst, vcodec=vcodec, **kwargs).run() 44 | else: 45 | v_input.output(a_input, dst, acodec=acodec, vcodec=vcodec, **kwargs).run() 46 | 47 | 48 | def modify_metadata(src: Union[str, Path], dst: Union[str, Path], *, 49 | metadata: Dict[str, Union[str, int]], specifier: Optional[str] = None): 50 | """Set a metadata key/value pair. 51 | 52 | An optional specifier may be given to set metadata on streams, chapters or programs. 53 | """ 54 | if not metadata: 55 | raise ValueError("Provide at least one metadata %s" % metadata) 56 | 57 | specifier = "-metadata:" + specifier if specifier else "-metadata" 58 | 59 | args = [] 60 | for k, v in metadata.items(): 61 | args.extend([specifier, f"{k}={v}"]) 62 | 63 | input(src).output(dst, vcodec=constants.COPY, acodec=constants.COPY, args=args).run() 64 | 65 | 66 | def separate_video_stream(src: Union[str, Path], dst: Union[str, Path]): 67 | input(src, enable_cuda=False).output(dst, an=True, enable_cuda=False, vcodec="copy").run() 68 | 69 | 70 | def separate_audio_stream(src: Union[str, Path], dst: Union[str, Path], pcm_format=False): 71 | if pcm_format: 72 | kwargs = dict(format=constants.S16LE, acodec=constants.PCM_S16LE, ac=1, ar="16k") 73 | else: 74 | kwargs = dict(acodec=constants.COPY) 75 | 76 | input(src, enable_cuda=False).output(dst, vn=True, enable_cuda=False, **kwargs).run() 77 | 78 | 79 | def convert_format(src: Union[str, Path], dst: Union[str, Path], *, 80 | format=None, vcodec="copy", acodec="copy", enable_cuda=True): 81 | if vcodec == constants.COPY: 82 | enable_cuda = False 83 | 84 | input(src, enable_cuda=enable_cuda). \ 85 | output(dst, format=format, acodec=acodec, 86 | vcodec=vcodec, enable_cuda=enable_cuda).run() 87 | 88 | 89 | def cut_into_multiple_parts(src: Union[str, Path], dst: Union[str, Path], 90 | *, durations: List[Union[float, int, str]], vcodec="libx264", 91 | enable_cuda=True, overwrite=True, accumulative=False): 92 | """Cut the video or audio into multiple parts. 93 | 94 | Example: 95 | avutils.cut_into_multiple_parts("video.mp4", [10, 10, 10, None]) 96 | avutils.cut_into_multiple_parts("music.mp3", [-10, 10, -10, None]) 97 | """ 98 | if not isinstance(durations, (list, tuple)): 99 | raise ValueError 100 | 101 | if len(durations) < 2: 102 | raise ValueError(f'Expected at least 2 duration values; got {len(durations)}') 103 | 104 | outs = [] 105 | path = Path(src) 106 | start_position = 0 107 | raw = input(src, enable_cuda=enable_cuda) 108 | 109 | for order, duration in enumerate(durations): 110 | # skip negative value 111 | if isinstance(duration, (int, float)) and duration < 0: 112 | start_position -= duration 113 | continue 114 | 115 | if isinstance(duration, str): 116 | duration = string_to_seconds(duration) 117 | 118 | if isinstance(duration, (int, float)) and accumulative: 119 | duration -= start_position 120 | 121 | outs.append(raw.output(f"{dst / path.stem}_{order}{path.suffix}", 122 | acodec="copy", vcodec=vcodec, enable_cuda=enable_cuda, 123 | start_position=seconds_to_string(start_position), duration=duration)) 124 | 125 | if duration is not None: 126 | start_position += duration 127 | 128 | merge_outputs(*outs).run(overwrite=overwrite) 129 | 130 | 131 | class TrimPair(NamedTuple): 132 | Start: Union[str, int, float] 133 | End: Union[str, int, float] 134 | IsDuration: bool = False 135 | 136 | 137 | def cut_into_multiple_parts_v2(src: Union[str, Path], dst: Union[str, Path], 138 | *, start_duration_pairs: List[TrimPair], vcodec="libx264", 139 | enable_cuda=True, overwrite=True): 140 | outs = [] 141 | path = Path(src) 142 | raw = input(src, enable_cuda=enable_cuda) 143 | 144 | for order, pair in enumerate(start_duration_pairs): 145 | start_position = string_to_seconds(pair.Start) 146 | end_position = string_to_seconds(pair.End) 147 | 148 | if not pair.IsDuration and end_position > start_position: 149 | duration = end_position - start_position 150 | else: 151 | duration = end_position 152 | 153 | outs.append(raw.output(f"{dst / path.stem}_{order}{path.suffix}", 154 | acodec="copy", vcodec=vcodec, enable_cuda=enable_cuda, 155 | start_position=start_position, duration=duration)) 156 | 157 | merge_outputs(*outs).run(overwrite=overwrite) 158 | 159 | 160 | def cut_one_part(src: Union[str, Path], dst: Union[str, Path], *, vcodec="libx264", 161 | enable_cuda=True, overwrite=True, start: Union[str, int, float] = None, 162 | end: Union[str, int, float] = None, duration: Union[int, float] = None, 163 | only_video=False, only_audio=False): 164 | '''Intercept a piece of audio or video from audio or video. 165 | Slower than `cut_into_multiple_parts`.''' 166 | if isinstance(start, (int, float)) and isinstance(end, (int, float)): 167 | end = start + duration if end == 0 or end < start else end 168 | 169 | av = input(src, enable_cuda=enable_cuda) 170 | a = av.audio.atrim(start=start, end=end, duration=duration).asetpts("PTS-STARTPTS") 171 | v = av.video.trim(start=start, end=end, duration=duration).setpts("PTS-STARTPTS") 172 | 173 | streams = [v, a] 174 | if only_video: 175 | streams = [v] 176 | elif only_audio: 177 | streams = [a] 178 | 179 | output(*streams, dst, vcodec=vcodec, enable_cuda=enable_cuda). \ 180 | run(overwrite=overwrite) 181 | 182 | 183 | def merge_video_audio(v_src: Union[str, Path], a_src: Union[str, Path], 184 | dst: Union[str, Path], vcodec="copy", acodec="copy"): 185 | v_input = input(v_src).video 186 | a_input = input(a_src).audio 187 | v_input.output(a_input, dst, acodec=acodec, vcodec=vcodec).run() 188 | 189 | 190 | def concat_multiple_parts(dst: Union[str, Path], *files: Union[str, Path], 191 | vcodec="copy", acodec="copy"): 192 | '''Splicing video or audio clips.''' 193 | concat = tempfile.mktemp() 194 | 195 | with open(concat, "w", encoding="utf-8") as fp: 196 | for file in files: 197 | fp.write("file '%s'\n" % Path(file).absolute().as_posix()) 198 | 199 | # https://stackoverflow.com/questions/38996925/ffmpeg-concat-unsafe-file-name/56029574 200 | input(concat, format="concat", safe=0).output(dst, acodec=acodec, vcodec=vcodec).run() 201 | 202 | Path(concat).unlink(missing_ok=True) 203 | 204 | 205 | def start_one_stream_loop(src: Union[str, Path], *, loop: int = -1, codec="copy", 206 | vcodec="copy", acodec="copy", format="mpegts", 207 | source_url: str = "udp://localhost:10240"): 208 | '''Push a video stream in a loop forever.''' 209 | input(src, stream_loop=loop, re=None) \ 210 | .output(source_url, codec=codec, vcodec=vcodec, 211 | acodec=acodec, format=format). \ 212 | run(capture_stdout=False, capture_stderr=False) 213 | 214 | 215 | def detect_source_stream(source_url: str, timeout: int = 3) -> dict: 216 | '''Detect whether is a stream source.''' 217 | return FFprobe(source_url, timeout=timeout).metadata 218 | 219 | 220 | def merge_m3u8_files(src: Union[Path, str], dst: Union[Path, str]): 221 | '''Merge m3u8 playlist together.''' 222 | input(src, protocol_whitelist='file,http,https,tcp,tls,crypto'). \ 223 | output(dst, codec=constants.COPY). \ 224 | run(capture_stdout=False, capture_stderr=False) 225 | -------------------------------------------------------------------------------- /ffmpeg/tools/etools.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.06.25 8:54 3 | Description : Additional, experimental, non-essential features 4 | LastEditors: Rustle Karl 5 | LastEditTime: 2021.06.25 09:24:48 6 | ''' 7 | import contextlib 8 | import os 9 | import shutil 10 | import signal 11 | import socket 12 | import tempfile 13 | from pathlib import Path 14 | from threading import Thread 15 | from typing import Union 16 | 17 | from .._dag import get_outgoing_edges, topological_sort 18 | from .._ffmpeg import input 19 | from .._node import get_stream_spec_nodes, streamable 20 | from ..constants import LINUX, WINDOWS 21 | from ..nodes import FilterNode, InputNode, OutputNode 22 | 23 | try: 24 | import psutil 25 | except ImportError as e: 26 | psutil = e 27 | 28 | __all__ = [ 29 | 'ScreenRecorder', # Record screen content 30 | 'show_progress', # Show progress bar 31 | 'view', # Draw a topology graph 32 | ] 33 | 34 | 35 | # --------------------- Draw a topology graph --------------------- 36 | 37 | @streamable() 38 | def view(stream_spec, save_path=None, detail=False, show_labels=True, pipe=False): 39 | import graphviz 40 | 41 | if pipe and save_path is not None: 42 | raise ValueError("Can't specify both `source` and `pipe`") 43 | elif not pipe and save_path is None: 44 | save_path = tempfile.mktemp() 45 | 46 | nodes = get_stream_spec_nodes(stream_spec) 47 | sorted_nodes, outgoing_edge_graphs = topological_sort(nodes) 48 | graph = graphviz.Digraph(format='png') 49 | graph.attr(rankdir='LR') 50 | 51 | for node in sorted_nodes: 52 | if isinstance(node, InputNode): 53 | color = '#99CC00' 54 | elif isinstance(node, OutputNode): 55 | color = '#99CCFF' 56 | elif isinstance(node, FilterNode): 57 | color = '#FFCC00' 58 | else: 59 | color = None 60 | 61 | if detail: 62 | label = node.detail 63 | else: 64 | label = node.brief 65 | 66 | graph.node(str(hash(node)), label, shape='box', style='filled', fillcolor=color) 67 | outgoing_edge_graph = outgoing_edge_graphs.get(node, {}) 68 | 69 | for edge in get_outgoing_edges(node, outgoing_edge_graph): 70 | kwargs = {} 71 | upstream_label = edge.UpstreamLabel 72 | downstream_label = edge.DownstreamLabel 73 | selector = edge.Selector 74 | if show_labels and (upstream_label or downstream_label or selector): 75 | if upstream_label is None: 76 | upstream_label = '' 77 | if selector is not None: 78 | upstream_label += ":" + selector 79 | if downstream_label is None: 80 | downstream_label = '' 81 | if upstream_label != '' and downstream_label != '': 82 | middle = ' {} '.format('\u2192') # Right Arrow 83 | else: 84 | middle = '' 85 | kwargs['label'] = '{} {} {}'.format(upstream_label, middle, downstream_label) 86 | 87 | upstream_node_id = str(hash(edge.UpstreamNode)) 88 | downstream_node_id = str(hash(edge.DownstreamNode)) 89 | graph.edge(upstream_node_id, downstream_node_id, **kwargs) 90 | 91 | if pipe: 92 | return graph.pipe() 93 | 94 | graph.view(save_path, cleanup=True) 95 | 96 | return stream_spec 97 | 98 | 99 | # -------------------- Record screen content -------------------- 100 | 101 | def record_screen_windows(dst: Union[str, Path], *, area="desktop", duration=None, 102 | frame_rate=30, offset_x=0, offset_y=0, video_size="vga", 103 | output_vcodec="libx264", output_acodec="libfaac", 104 | output_format="flv", run=True, **output_kwargs): 105 | """https://ffmpeg.org/ffmpeg-all.html#gdigrab""" 106 | command = input(area, format="gdigrab", frame_rate=frame_rate, offset_x=offset_x, 107 | offset_y=offset_y, video_size=video_size, duration=duration). \ 108 | output(dst, vcodec=output_vcodec, acodec=output_acodec, 109 | format=output_format, **output_kwargs) 110 | if run: 111 | command.run(capture_stdout=False, capture_stderr=False) 112 | else: 113 | return command 114 | 115 | 116 | class ScreenRecorder(object): 117 | 118 | def __init__(self, dst: Union[str, Path], *, area="desktop", 119 | frame_rate=30, offset_x=0, offset_y=0, video_size="vga", 120 | duration=None, output_vcodec="libx264", output_acodec="libfaac", 121 | output_format="flv", **output_kwargs): 122 | 123 | if isinstance(psutil, Exception): 124 | raise psutil 125 | 126 | if WINDOWS: 127 | self.command = record_screen_windows( 128 | dst, area=area, frame_rate=frame_rate, duration=duration, 129 | offset_x=offset_x, offset_y=offset_y, video_size=video_size, 130 | output_vcodec=output_vcodec, output_acodec=output_acodec, 131 | output_format=output_format, run=False, **output_kwargs 132 | ) 133 | elif LINUX: 134 | raise NotImplementedError 135 | else: 136 | raise NotImplementedError 137 | 138 | self.proc: psutil.Process = None 139 | self.paused = False 140 | 141 | def start(self): 142 | if self.proc is None: 143 | _proc = self.command.run_async(quiet=True) 144 | self.proc = psutil.Process(_proc.pid) 145 | elif self.paused: 146 | self.proc.resume() 147 | self.paused = False 148 | 149 | def pause(self): 150 | if self.proc is None or self.paused: 151 | return 152 | else: 153 | self.proc.suspend() 154 | self.paused = True 155 | 156 | def resume(self): 157 | self.start() 158 | 159 | def stop(self): 160 | if self.proc is None: 161 | return 162 | 163 | self.proc.send_signal(signal.CTRL_C_EVENT) 164 | 165 | 166 | # ---------------------- Show progress bar ---------------------- 167 | 168 | """ 169 | Process video and report and show progress bar. 170 | 171 | This is an example of using the ffmpeg `-progress` option with a 172 | unix-domain socket to report progress in the form of a progress bar. 173 | 174 | The video processing simply consists of converting the video to 175 | sepia colors, but the same pattern can be applied to other use cases. 176 | """ 177 | 178 | 179 | @contextlib.contextmanager 180 | def open_temporary_directory() -> str: 181 | temporary_directory = tempfile.mkdtemp() 182 | try: 183 | yield temporary_directory 184 | finally: 185 | shutil.rmtree(temporary_directory) 186 | 187 | 188 | def accept(s: socket.socket, handler): 189 | """Read progress events from a unix-domain socket.""" 190 | conn, _ = s.accept() 191 | 192 | buffer = b"" 193 | previous = 0 194 | 195 | while more := conn.recv(16): 196 | buffer += more 197 | lines = buffer.splitlines() 198 | 199 | for line in lines[:-1]: 200 | parts = line.decode().split("=") 201 | if len(parts) < 2: 202 | continue 203 | 204 | k, v = parts[:2] 205 | if v == "continueframe": 206 | k = v 207 | frame = int(parts[2]) 208 | v = frame - previous 209 | previous = frame 210 | 211 | handler(k, v) 212 | 213 | buffer = lines[-1] 214 | 215 | conn.close() 216 | 217 | 218 | @contextlib.contextmanager 219 | def watch_progress(handler): 220 | """Context manager for creating a unix-domain socket and listen for 221 | ffmpeg progress events. 222 | 223 | The socket domain is yielded from the context manager and the 224 | socket is closed when the context manager is exited. 225 | 226 | Args: 227 | handler: a function to be called when progress events are 228 | received; receives a ``key`` argument and ``value`` argument. 229 | """ 230 | with open_temporary_directory() as temporary_d: 231 | unix_sock = os.path.join(temporary_d, "sock") 232 | s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 233 | 234 | with contextlib.closing(s): 235 | s.bind(unix_sock) 236 | s.listen(1) 237 | Thread(target=accept, args=(s, handler)).start() 238 | 239 | yield unix_sock 240 | 241 | 242 | @contextlib.contextmanager 243 | def show_progress(total_frames): 244 | """Create a unix-domain socket to watch progress and 245 | render tqdm progress bar.""" 246 | from tqdm import tqdm 247 | 248 | if not LINUX: 249 | raise OSError("Only supports Linux platform") 250 | 251 | with tqdm(total=total_frames, desc="Processing", unit="f") as bar: 252 | 253 | def handler(key, value): 254 | if key == "continueframe": 255 | bar.update(value) 256 | elif key == "progress" and value == "end": 257 | bar.update(bar.total - bar.n) 258 | 259 | with watch_progress(handler) as unix_sock: 260 | yield unix_sock 261 | -------------------------------------------------------------------------------- /ffmpeg/tools/vtools.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.03.01 19:46:08 3 | LastEditors: Rustle Karl 4 | LastEditTime: 2021.05.24 07:34:01 5 | ''' 6 | import os 7 | from pathlib import Path 8 | from typing import Union 9 | 10 | import numpy as np 11 | 12 | from .. import vfilters 13 | from .._ffmpeg import input 14 | from .._ffprobe import FFprobe 15 | from ..constants import PIPE, RAW_VIDEO, RGB24 16 | 17 | __all__ = [ 18 | "assemble_video_from_images", 19 | "compare_2_videos", 20 | "convert_video_to_np_array", 21 | "generate_video_thumbnail", 22 | "hstack_videos", 23 | "read_frame_as_jpeg", 24 | "side_by_side_2_videos", 25 | "timed_video_screenshot", 26 | "video_add_image_watermark", 27 | "video_add_text_watermark", 28 | "vstack_videos", 29 | ] 30 | 31 | 32 | # TODO 33 | def capture_x11_screen(dst: Union[str, Path], *, screen: str = None, 34 | duration: int = 3, frame_rate: int = 25): 35 | screen = os.environ["DISPLAY"] if screen is None else screen 36 | 37 | input(screen, duration=duration, format="x11grab", 38 | video_size="cif", framerate=frame_rate).output(dst).run() 39 | 40 | 41 | def capture_video_key_frame(src: Union[str, Path], dst: Union[str, Path]): 42 | if not os.path.isdir(dst): 43 | dst = os.path.dirname(dst) 44 | 45 | input(src).output(os.path.join(dst, Path(src).stem + "_key_frame_%d.png"), 46 | video_filter="select='eq(pict_type,PICT_TYPE_I)'", vsync="vfr").run() 47 | 48 | 49 | def timed_video_screenshot(src: Union[str, Path], dst: Union[str, Path], interval=3): 50 | os.makedirs(dst, exist_ok=True) 51 | input(src).output(os.path.join(dst, Path(src).stem + "_screenshot_%d.png"), 52 | video_filter=f"fps=1/{interval}").run() 53 | 54 | 55 | def flip_mirror_video(src: Union[str, Path], dst: Union[str, Path], *, 56 | horizontal=True, keep_audio=True, hwaccel: str = None, 57 | output_vcodec: str = None, **output_kwargs): 58 | input_v = input(src, hwaccel=hwaccel) 59 | 60 | if horizontal: 61 | stream = input_v.pad(w="2*iw").overlay(input_v.hflip(), x="w") 62 | else: 63 | stream = input_v.pad(h="2*ih").overlay(input_v.vflip(), y="h") 64 | 65 | if keep_audio: 66 | stream.output(input_v.audio, dst, acodec="copy", 67 | vcodec=output_vcodec, **output_kwargs).run() 68 | else: 69 | stream.output(dst, vcodec=output_vcodec, **output_kwargs).run() 70 | 71 | 72 | def compare_2_videos(v1: Union[str, Path], v2: Union[str, Path], 73 | dst: Union[str, Path], horizontal=True): 74 | if horizontal: 75 | hstack_videos(dst, v1, v2) # Fastest 76 | # input(v1).pad(w="2*iw").overlay(input(v2), x="w").output(dst).run() 77 | else: 78 | vstack_videos(dst, v1, v2) # Fastest 79 | # side_by_side_2_videos(v1, v2, dst, False) # Fast 80 | # input(v1).pad(h="2*ih").overlay(input(v2), y="h").output(dst).run() # Slowest 81 | 82 | 83 | def side_by_side_2_videos(v1: Union[str, Path], v2: Union[str, Path], 84 | dst: Union[str, Path], horizontal=True): 85 | vfilters.framepack(input(v1), input(v2), format="sbs" if horizontal else "tab").output(dst).run() 86 | 87 | 88 | def hstack_videos(dst: Union[str, Path], *videos: Union[str, Path]): 89 | vfilters.hstack(*list(map(input, videos)), inputs=len(videos), shortest=0).output(dst).run() 90 | 91 | 92 | def vstack_videos(dst: Union[str, Path], *videos: Union[str, Path]): 93 | vfilters.vstack(*list(map(input, videos)), inputs=len(videos), shortest=0).output(dst).run() 94 | 95 | 96 | def xstack_videos(*videos: Union[str, Path], dst: Union[str, Path], layout: str, fill: str = None): 97 | vfilters.xstack(*list(map(input, videos)), inputs=len(videos), 98 | layout=layout, shortest=0, fill=fill).output(dst).run() 99 | 100 | 101 | def concat_2_videos_with_gltransition(dst: Union[str, Path], *videos: Union[str, Path], 102 | offset: float = 0, duration: float = 0, source: Union[str, Path] = None): 103 | if len(videos) < 2: 104 | raise ValueError(f'Expected at least 2 videos; got {len(videos)}') 105 | 106 | in1, in2 = input(videos[0]), input(videos[1]) 107 | vfilters.gltransition(in1, in2, offset=offset, duration=duration, 108 | source=source).output(dst).run() 109 | 110 | 111 | def concat_2_videos_with_xfade(dst: Union[str, Path], *videos: Union[str, Path], 112 | transition: str = None, duration: float = None, 113 | offset: float = None, expr: str = None, 114 | hwaccel: str = None, output_vcodec: str = None): 115 | if len(videos) < 2: 116 | raise ValueError(f'Expected at least 2 videos; got {len(videos)}') 117 | 118 | in1, in2 = input(videos[0], hwaccel=hwaccel), input(videos[1], hwaccel=hwaccel) 119 | vfilters.xfade(in1, in2, transition=transition, duration=duration, offset=offset, expr=expr). \ 120 | output(dst, vcodec=output_vcodec).run() 121 | 122 | 123 | def video_add_image_watermark(v_src: Union[str, Path], i_src: Union[str, Path], 124 | dst: Union[str, Path], *, w: int = 0, h: int = 0, 125 | x: int = 0, y: int = 0, _eval='init', ): 126 | v_input = input(v_src) 127 | i_input = input(i_src).scale(w, h) 128 | v_input.overlay(i_input, x=x, y=y, eval=_eval).output(v_input.audio, dst, acodec="copy").run() 129 | 130 | 131 | def video_add_text_watermark(v_src, dst, *, text: str, x: int = 0, y: int = 0, 132 | fontsize: int = 24, fontfile: Union[str, Path] = None, 133 | keep_audio=True): 134 | v_input = input(v_src) 135 | stream = v_input.drawtext(text=text, x=x, y=y, fontsize=fontsize, fontfile=fontfile) 136 | 137 | if keep_audio: 138 | stream.output(v_input.audio, dst, acodec="copy").run() 139 | else: 140 | stream.output(dst).run() 141 | 142 | 143 | def video_add_ass_subtitle(v_src, s_src=None, dst=None, keep_audio=True): 144 | if not s_src: 145 | s_src = Path(v_src).with_suffix('.ass') 146 | assert s_src.exists() 147 | 148 | if not dst: 149 | path = Path(v_src) 150 | dst = path.with_name(path.stem + '_video_ass.mp4') 151 | 152 | v_input = input(v_src) 153 | stream = v_input.ass(filename=str(s_src)) 154 | 155 | if keep_audio: 156 | stream.output(v_input.audio, dst, acodec="copy").run() 157 | else: 158 | stream.output(dst).run() 159 | 160 | 161 | def assemble_video_from_images(glob_pattern, dst, *, pattern_type="glob", frame_rate=25): 162 | # https://stackoverflow.com/questions/31201164/ffmpeg-error-pattern-type-glob-was-selected-but-globbing-is-not-support-ed-by 163 | if pattern_type: 164 | input(glob_pattern, frame_rate=frame_rate, pattern_type=pattern_type).output(dst).run() 165 | else: 166 | input(glob_pattern, frame_rate=frame_rate).output(dst).run() 167 | 168 | 169 | def convert_video_to_np_array(src, *, width=0, height=0) -> np.ndarray: 170 | width_, height_ = FFprobe(src).video_scale 171 | stdout, _ = input(src, enable_cuda=False). \ 172 | output(PIPE, format=RAW_VIDEO, pixel_format=RGB24, enable_cuda=False).run() 173 | return np.frombuffer(stdout, np.uint8).reshape([-1, height or height_, width or width_, 3]) 174 | 175 | 176 | def read_frame_as_jpeg(src, frame=1) -> bytes: 177 | raw, _ = input(src, enable_cuda=False).select(f"gte(n, {frame})"). \ 178 | output(PIPE, vframes=1, format='image2', vcodec='mjpeg', enable_cuda=False). \ 179 | run(capture_stdout=True) 180 | return raw 181 | 182 | 183 | def generate_video_thumbnail(src, dst, *, start_position=1, width=-1, height=-1): 184 | input(src, start_position=start_position).scale(width, height).output(dst, vframes=1).run() 185 | -------------------------------------------------------------------------------- /ffmpeg/transitions/__init__.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.04.25 20:19:14 3 | LastEditors: Rustle Karl 4 | LastEditTime: 2021.04.25 20:20:09 5 | ''' 6 | from ._gltransition import All as GLTransitionAll 7 | from ._gltransition import GLTransition 8 | from ._xfade import All as XFadeAll 9 | from ._xfade import XFade 10 | 11 | __all__ = [ 12 | "GLTransition", 13 | "GLTransitionAll", 14 | "XFade", 15 | "XFadeAll", 16 | ] 17 | -------------------------------------------------------------------------------- /ffmpeg/transitions/_gltransition.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.03.07 22:24:00 3 | LastEditors: Rustle Karl 4 | LastEditTime: 2021.03.07 22:28:36 5 | ''' 6 | from pathlib import Path 7 | 8 | _dst = Path(__file__).absolute().parent / "glsl" 9 | 10 | 11 | class GLTransition(object): 12 | Angular = _dst / "angular.glsl" 13 | Bounce = _dst / "Bounce.glsl" 14 | BowTieHorizontal = _dst / "BowTieHorizontal.glsl" 15 | BowTieVertical = _dst / "BowTieVertical.glsl" 16 | BowTieWithParameter = _dst / "BowTieWithParameter.glsl" 17 | Burn = _dst / "burn.glsl" 18 | ButterflyWaveScrawler = _dst / "ButterflyWaveScrawler.glsl" 19 | CannabisLeaf = _dst / "cannabisleaf.glsl" 20 | Circle = _dst / "circle.glsl" 21 | CircleCrop = _dst / "CircleCrop.glsl" 22 | CircleOpen = _dst / "circleopen.glsl" 23 | ColorPhase = _dst / "colorphase.glsl" 24 | ColourDistance = _dst / "ColourDistance.glsl" 25 | CrazyParametricFun = _dst / "CrazyParametricFun.glsl" 26 | Crosshatch = _dst / "crosshatch.glsl" 27 | CrossWarp = _dst / "crosswarp.glsl" 28 | CrossZoom = _dst / "CrossZoom.glsl" 29 | Cube = _dst / "cube.glsl" 30 | DirectionalEasing = _dst / "directional-easing.glsl" 31 | Directional = _dst / "Directional.glsl" 32 | DirectionalWarp = _dst / "directionalwarp.glsl" 33 | DirectionalWipe = _dst / "directionalwipe.glsl" 34 | Displacement = _dst / "displacement.glsl" 35 | DoomScreenTransition = _dst / "DoomScreenTransition.glsl" 36 | Doorway = _dst / "doorway.glsl" 37 | Dreamy = _dst / "Dreamy.glsl" 38 | DreamyZoom = _dst / "DreamyZoom.glsl" 39 | Fade = _dst / "fade.glsl" 40 | FadeColor = _dst / "fadecolor.glsl" 41 | FadeGrayscale = _dst / "fadegrayscale.glsl" 42 | FilmBurn = _dst / "FilmBurn.glsl" 43 | FlyEye = _dst / "flyeye.glsl" 44 | GlitchDisplace = _dst / "GlitchDisplace.glsl" 45 | GlitchMemories = _dst / "GlitchMemories.glsl" 46 | GridFlip = _dst / "GridFlip.glsl" 47 | Heart = _dst / "heart.glsl" 48 | Hexagonalize = _dst / "hexagonalize.glsl" 49 | InvertedPageCurl = _dst / "InvertedPageCurl.glsl" 50 | Kaleidoscope = _dst / "kaleidoscope.glsl" 51 | LeftRight = _dst / "LeftRight.glsl" 52 | LinearBlur = _dst / "LinearBlur.glsl" 53 | Luma = _dst / "luma.glsl" 54 | LuminanceMelt = _dst / "luminance_melt.glsl" 55 | Morph = _dst / "morph.glsl" 56 | Mosaic = _dst / "Mosaic.glsl" 57 | MultiplyBlend = _dst / "multiply_blend.glsl" 58 | Perlin = _dst / "perlin.glsl" 59 | Pinwheel = _dst / "pinwheel.glsl" 60 | Pixelize = _dst / "pixelize.glsl" 61 | PolarFunction = _dst / "polar_function.glsl" 62 | PolkaDotsCurtain = _dst / "PolkaDotsCurtain.glsl" 63 | Radial = _dst / "Radial.glsl" 64 | RandomNoisex = _dst / "randomNoisex.glsl" 65 | RandomSquares = _dst / "randomsquares.glsl" 66 | Ripple = _dst / "ripple.glsl" 67 | RotateScaleFade = _dst / "rotate_scale_fade.glsl" 68 | SimpleZoom = _dst / "SimpleZoom.glsl" 69 | SquaresWire = _dst / "squareswire.glsl" 70 | Squeeze = _dst / "squeeze.glsl" 71 | StereoViewer = _dst / "StereoViewer.glsl" 72 | Swap = _dst / "swap.glsl" 73 | Swirl = _dst / "Swirl.glsl" 74 | TangentMotionBlur = _dst / "tangentMotionBlur.glsl" 75 | TopBottom = _dst / "TopBottom.glsl" 76 | WaterDrop = _dst / "WaterDrop.glsl" 77 | Wind = _dst / "wind.glsl" 78 | WindowBlinds = _dst / "windowblinds.glsl" 79 | WindowSlice = _dst / "windowslice.glsl" 80 | WipeDown = _dst / "wipeDown.glsl" 81 | WipeLeft = _dst / "wipeLeft.glsl" 82 | WipeRight = _dst / "wipeRight.glsl" 83 | WipeUp = _dst / "wipeUp.glsl" 84 | ZoomInCircles = _dst / "ZoomInCircles.glsl" 85 | 86 | 87 | All = [v for k, v in vars(GLTransition).items() if not k.endswith("__")] 88 | 89 | if __name__ == '__main__': 90 | print(All) 91 | -------------------------------------------------------------------------------- /ffmpeg/transitions/_xfade.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021.03.07 21:50:15 3 | LastEditors: Rustle Karl 4 | LastEditTime: 2021.03.07 21:55:27 5 | ''' 6 | 7 | 8 | class XFade(object): 9 | """Apply cross fade from one input video stream to another input video stream. 10 | The cross fade is applied for specified duration. 11 | 12 | https://ffmpeg.org/ffmpeg-filters.html#xfade 13 | """ 14 | Circleclose = "circleclose" 15 | Circlecrop = "circlecrop" 16 | Circleopen = "circleopen" 17 | Custom = "custom" 18 | Diagbl = "diagbl" 19 | Diagbr = "diagbr" 20 | Diagtl = "diagtl" 21 | Diagtr = "diagtr" 22 | Dissolve = "dissolve" 23 | Distance = "distance" 24 | Fade = "fade" 25 | Fadeblack = "fadeblack" 26 | Fadegrays = "fadegrays" 27 | Fadewhite = "fadewhite" 28 | Hblur = "hblur" 29 | Hlslice = "hlslice" 30 | Horzopen = "horzopen" 31 | Hrslice = "hrslice" 32 | Pixelize = "pixelize" 33 | Radial = "radial" 34 | Rectcrop = "rectcrop" 35 | Slidedown = "slidedown" 36 | Slideleft = "slideleft" 37 | Slideright = "slideright" 38 | Slideup = "slideup" 39 | Smoothdown = "smoothdown" 40 | Smoothleft = "smoothleft" 41 | Smoothright = "smoothright" 42 | Smoothup = "smoothup" 43 | Squeezeh = "squeezeh" 44 | Squeezev = "squeezev" 45 | Vdslice = "vdslice" 46 | Vertclose = "vertclose" 47 | Vertopen = "vertopen" 48 | Vuslice = "vuslice" 49 | Wipebl = "wipebl" 50 | Wipebr = "wipebr" 51 | Wipedown = "wipedown" 52 | Wipeleft = "wipeleft" 53 | Wiperight = "wiperight" 54 | Wipetl = "wipetl" 55 | Wipetr = "wipetr" 56 | Wipeup = "wipeup" 57 | Horzclose = "horzclose" 58 | 59 | 60 | All = [v for k, v in vars(XFade).items() if not k.endswith("__")] 61 | 62 | if __name__ == '__main__': 63 | print(All) 64 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/Bounce.glsl: -------------------------------------------------------------------------------- 1 | // Author: Adrian Purser 2 | // License: MIT 3 | 4 | uniform vec4 shadow_colour; // = vec4(0.,0.,0.,.6) 5 | uniform float shadow_height; // = 0.075 6 | uniform float bounces; // = 3.0 7 | 8 | const float PI = 3.14159265358; 9 | 10 | vec4 transition (vec2 uv) { 11 | float time = progress; 12 | float stime = sin(time * PI / 2.); 13 | float phase = time * PI * bounces; 14 | float y = (abs(cos(phase))) * (1.0 - stime); 15 | float d = uv.y - y; 16 | return mix( 17 | mix( 18 | getToColor(uv), 19 | shadow_colour, 20 | step(d, shadow_height) * (1. - mix( 21 | ((d / shadow_height) * shadow_colour.a) + (1.0 - shadow_colour.a), 22 | 1.0, 23 | smoothstep(0.95, 1., progress) // fade-out the shadow at the end 24 | )) 25 | ), 26 | getFromColor(vec2(uv.x, uv.y + (1.0 - y))), 27 | step(d, 0.0) 28 | ); 29 | } 30 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/BowTieHorizontal.glsl: -------------------------------------------------------------------------------- 1 | // Author: huynx 2 | // License: MIT 3 | 4 | vec2 bottom_left = vec2(0.0, 1.0); 5 | vec2 bottom_right = vec2(1.0, 1.0); 6 | vec2 top_left = vec2(0.0, 0.0); 7 | vec2 top_right = vec2(1.0, 0.0); 8 | vec2 center = vec2(0.5, 0.5); 9 | 10 | float check(vec2 p1, vec2 p2, vec2 p3) 11 | { 12 | return (p1.x - p3.x) * (p2.y - p3.y) - (p2.x - p3.x) * (p1.y - p3.y); 13 | } 14 | 15 | bool PointInTriangle (vec2 pt, vec2 p1, vec2 p2, vec2 p3) 16 | { 17 | bool b1, b2, b3; 18 | b1 = check(pt, p1, p2) < 0.0; 19 | b2 = check(pt, p2, p3) < 0.0; 20 | b3 = check(pt, p3, p1) < 0.0; 21 | return ((b1 == b2) && (b2 == b3)); 22 | } 23 | 24 | bool in_left_triangle(vec2 p){ 25 | vec2 vertex1, vertex2, vertex3; 26 | vertex1 = vec2(progress, 0.5); 27 | vertex2 = vec2(0.0, 0.5-progress); 28 | vertex3 = vec2(0.0, 0.5+progress); 29 | if (PointInTriangle(p, vertex1, vertex2, vertex3)) 30 | { 31 | return true; 32 | } 33 | return false; 34 | } 35 | 36 | bool in_right_triangle(vec2 p){ 37 | vec2 vertex1, vertex2, vertex3; 38 | vertex1 = vec2(1.0-progress, 0.5); 39 | vertex2 = vec2(1.0, 0.5-progress); 40 | vertex3 = vec2(1.0, 0.5+progress); 41 | if (PointInTriangle(p, vertex1, vertex2, vertex3)) 42 | { 43 | return true; 44 | } 45 | return false; 46 | } 47 | 48 | float blur_edge(vec2 bot1, vec2 bot2, vec2 top, vec2 testPt) 49 | { 50 | vec2 lineDir = bot1 - top; 51 | vec2 perpDir = vec2(lineDir.y, -lineDir.x); 52 | vec2 dirToPt1 = bot1 - testPt; 53 | float dist1 = abs(dot(normalize(perpDir), dirToPt1)); 54 | 55 | lineDir = bot2 - top; 56 | perpDir = vec2(lineDir.y, -lineDir.x); 57 | dirToPt1 = bot2 - testPt; 58 | float min_dist = min(abs(dot(normalize(perpDir), dirToPt1)), dist1); 59 | 60 | if (min_dist < 0.005) { 61 | return min_dist / 0.005; 62 | } 63 | else { 64 | return 1.0; 65 | }; 66 | } 67 | 68 | 69 | vec4 transition (vec2 uv) { 70 | if (in_left_triangle(uv)) 71 | { 72 | if (progress < 0.1) 73 | { 74 | return getFromColor(uv); 75 | } 76 | if (uv.x < 0.5) 77 | { 78 | vec2 vertex1 = vec2(progress, 0.5); 79 | vec2 vertex2 = vec2(0.0, 0.5-progress); 80 | vec2 vertex3 = vec2(0.0, 0.5+progress); 81 | return mix( 82 | getFromColor(uv), 83 | getToColor(uv), 84 | blur_edge(vertex2, vertex3, vertex1, uv) 85 | ); 86 | } 87 | else 88 | { 89 | if (progress > 0.0) 90 | { 91 | return getToColor(uv); 92 | } 93 | else 94 | { 95 | return getFromColor(uv); 96 | } 97 | } 98 | } 99 | else if (in_right_triangle(uv)) 100 | { 101 | if (uv.x >= 0.5) 102 | { 103 | vec2 vertex1 = vec2(1.0-progress, 0.5); 104 | vec2 vertex2 = vec2(1.0, 0.5-progress); 105 | vec2 vertex3 = vec2(1.0, 0.5+progress); 106 | return mix( 107 | getFromColor(uv), 108 | getToColor(uv), 109 | blur_edge(vertex2, vertex3, vertex1, uv) 110 | ); 111 | } 112 | else 113 | { 114 | return getFromColor(uv); 115 | } 116 | } 117 | else { 118 | return getFromColor(uv); 119 | } 120 | } -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/BowTieVertical.glsl: -------------------------------------------------------------------------------- 1 | // Author: huynx 2 | // License: MIT 3 | 4 | float check(vec2 p1, vec2 p2, vec2 p3) 5 | { 6 | return (p1.x - p3.x) * (p2.y - p3.y) - (p2.x - p3.x) * (p1.y - p3.y); 7 | } 8 | 9 | bool PointInTriangle (vec2 pt, vec2 p1, vec2 p2, vec2 p3) 10 | { 11 | bool b1, b2, b3; 12 | b1 = check(pt, p1, p2) < 0.0; 13 | b2 = check(pt, p2, p3) < 0.0; 14 | b3 = check(pt, p3, p1) < 0.0; 15 | return ((b1 == b2) && (b2 == b3)); 16 | } 17 | 18 | bool in_top_triangle(vec2 p){ 19 | vec2 vertex1, vertex2, vertex3; 20 | vertex1 = vec2(0.5, progress); 21 | vertex2 = vec2(0.5-progress, 0.0); 22 | vertex3 = vec2(0.5+progress, 0.0); 23 | if (PointInTriangle(p, vertex1, vertex2, vertex3)) 24 | { 25 | return true; 26 | } 27 | return false; 28 | } 29 | 30 | bool in_bottom_triangle(vec2 p){ 31 | vec2 vertex1, vertex2, vertex3; 32 | vertex1 = vec2(0.5, 1.0 - progress); 33 | vertex2 = vec2(0.5-progress, 1.0); 34 | vertex3 = vec2(0.5+progress, 1.0); 35 | if (PointInTriangle(p, vertex1, vertex2, vertex3)) 36 | { 37 | return true; 38 | } 39 | return false; 40 | } 41 | 42 | float blur_edge(vec2 bot1, vec2 bot2, vec2 top, vec2 testPt) 43 | { 44 | vec2 lineDir = bot1 - top; 45 | vec2 perpDir = vec2(lineDir.y, -lineDir.x); 46 | vec2 dirToPt1 = bot1 - testPt; 47 | float dist1 = abs(dot(normalize(perpDir), dirToPt1)); 48 | 49 | lineDir = bot2 - top; 50 | perpDir = vec2(lineDir.y, -lineDir.x); 51 | dirToPt1 = bot2 - testPt; 52 | float min_dist = min(abs(dot(normalize(perpDir), dirToPt1)), dist1); 53 | 54 | if (min_dist < 0.005) { 55 | return min_dist / 0.005; 56 | } 57 | else { 58 | return 1.0; 59 | }; 60 | } 61 | 62 | 63 | vec4 transition (vec2 uv) { 64 | if (in_top_triangle(uv)) 65 | { 66 | if (progress < 0.1) 67 | { 68 | return getFromColor(uv); 69 | } 70 | if (uv.y < 0.5) 71 | { 72 | vec2 vertex1 = vec2(0.5, progress); 73 | vec2 vertex2 = vec2(0.5-progress, 0.0); 74 | vec2 vertex3 = vec2(0.5+progress, 0.0); 75 | return mix( 76 | getFromColor(uv), 77 | getToColor(uv), 78 | blur_edge(vertex2, vertex3, vertex1, uv) 79 | ); 80 | } 81 | else 82 | { 83 | if (progress > 0.0) 84 | { 85 | return getToColor(uv); 86 | } 87 | else 88 | { 89 | return getFromColor(uv); 90 | } 91 | } 92 | } 93 | else if (in_bottom_triangle(uv)) 94 | { 95 | if (uv.y >= 0.5) 96 | { 97 | vec2 vertex1 = vec2(0.5, 1.0-progress); 98 | vec2 vertex2 = vec2(0.5-progress, 1.0); 99 | vec2 vertex3 = vec2(0.5+progress, 1.0); 100 | return mix( 101 | getFromColor(uv), 102 | getToColor(uv), 103 | blur_edge(vertex2, vertex3, vertex1, uv) 104 | ); 105 | } 106 | else 107 | { 108 | return getFromColor(uv); 109 | } 110 | } 111 | else { 112 | return getFromColor(uv); 113 | } 114 | } -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/BowTieWithParameter.glsl: -------------------------------------------------------------------------------- 1 | // Author:KMojek 2 | // License: MIT 3 | 4 | uniform float adjust; // = 0.5; 5 | uniform bool reverse; // = false; 6 | 7 | float check(vec2 p1, vec2 p2, vec2 p3) 8 | { 9 | return (p1.x - p3.x) * (p2.y - p3.y) - (p2.x - p3.x) * (p1.y - p3.y); 10 | } 11 | 12 | bool pointInTriangle(vec2 pt, vec2 p1, vec2 p2, vec2 p3) 13 | { 14 | 15 | bool b1 = check(pt, p1, p2) < 0.0; 16 | bool b2 = check(pt, p2, p3) < 0.0; 17 | bool b3 = check(pt, p3, p1) < 0.0; 18 | return b1 == b2 && b2 == b3; 19 | } 20 | 21 | const float height = 0.5; 22 | 23 | vec4 transition_firstHalf( vec2 uv, float prog ) 24 | { 25 | if ( uv.y < 0.5 ) 26 | { 27 | vec2 botLeft = vec2( -0., prog-height ); 28 | vec2 botRight = vec2( 1., prog-height ); 29 | vec2 tip = vec2( adjust, prog ); 30 | if ( pointInTriangle( uv, botLeft, botRight, tip ) ) 31 | return getToColor(uv); 32 | } 33 | else 34 | { 35 | vec2 topLeft = vec2( -0., 1.-prog+height ); 36 | vec2 topRight = vec2( 1., 1.-prog+height ); 37 | vec2 tip = vec2( adjust, 1.-prog ); 38 | if ( pointInTriangle( uv, topLeft, topRight, tip ) ) 39 | return getToColor( uv ); 40 | } 41 | return getFromColor( uv ); 42 | } 43 | 44 | vec4 transition_secondHalf( vec2 uv, float prog ) 45 | { 46 | if ( uv.x > adjust ) 47 | { 48 | vec2 top = vec2( prog + height, 1. ); 49 | vec2 bot = vec2( prog + height, -0. ); 50 | vec2 tip = vec2( mix( adjust, 1.0, 2.0 * (prog - 0.5) ), 0.5 ); 51 | if ( pointInTriangle( uv, top, bot, tip) ) 52 | return getFromColor( uv ); 53 | } 54 | else 55 | { 56 | vec2 top = vec2( 1.0-prog - height, 1. ); 57 | vec2 bot = vec2( 1.0-prog - height, -0. ); 58 | vec2 tip = vec2( mix( adjust, 0.0, 2.0 * (prog - 0.5) ), 0.5 ); 59 | if ( pointInTriangle( uv, top, bot, tip) ) 60 | return getFromColor( uv ); 61 | } 62 | return getToColor( uv ); 63 | } 64 | 65 | vec4 transition (vec2 uv) { 66 | if ( reverse ) 67 | return ( progress < 0.5 ) ? transition_secondHalf( uv, 1.-progress ) : transition_firstHalf( uv, 1.-progress ); 68 | else 69 | return ( progress < 0.5 ) ? transition_firstHalf( uv, progress ) : transition_secondHalf( uv, progress ); 70 | } -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/ButterflyWaveScrawler.glsl: -------------------------------------------------------------------------------- 1 | // Author: mandubian 2 | // License: MIT 3 | uniform float amplitude; // = 1.0 4 | uniform float waves; // = 30.0 5 | uniform float colorSeparation; // = 0.3 6 | float PI = 3.14159265358979323846264; 7 | float compute(vec2 p, float progress, vec2 center) { 8 | vec2 o = p*sin(progress * amplitude)-center; 9 | // horizontal vector 10 | vec2 h = vec2(1., 0.); 11 | // butterfly polar function (don't ask me why this one :)) 12 | float theta = acos(dot(o, h)) * waves; 13 | return (exp(cos(theta)) - 2.*cos(4.*theta) + pow(sin((2.*theta - PI) / 24.), 5.)) / 10.; 14 | } 15 | vec4 transition(vec2 uv) { 16 | vec2 p = uv.xy / vec2(1.0).xy; 17 | float inv = 1. - progress; 18 | vec2 dir = p - vec2(.5); 19 | float dist = length(dir); 20 | float disp = compute(p, progress, vec2(0.5, 0.5)) ; 21 | vec4 texTo = getToColor(p + inv*disp); 22 | vec4 texFrom = vec4( 23 | getFromColor(p + progress*disp*(1.0 - colorSeparation)).r, 24 | getFromColor(p + progress*disp).g, 25 | getFromColor(p + progress*disp*(1.0 + colorSeparation)).b, 26 | 1.0); 27 | return texTo*progress + texFrom*inv; 28 | } 29 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/CircleCrop.glsl: -------------------------------------------------------------------------------- 1 | // License: MIT 2 | // Author: fkuteken 3 | // ported by gre from https://gist.github.com/fkuteken/f63e3009c1143950dee9063c3b83fb88 4 | 5 | uniform vec4 bgcolor; // = vec4(0.0, 0.0, 0.0, 1.0) 6 | 7 | vec2 ratio2 = vec2(1.0, 1.0 / ratio); 8 | float s = pow(2.0 * abs(progress - 0.5), 3.0); 9 | 10 | vec4 transition(vec2 p) { 11 | float dist = length((vec2(p) - 0.5) * ratio2); 12 | return mix( 13 | progress < 0.5 ? getFromColor(p) : getToColor(p), // branching is ok here as we statically depend on progress uniform (branching won't change over pixels) 14 | bgcolor, 15 | step(s, dist) 16 | ); 17 | } 18 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/ColourDistance.glsl: -------------------------------------------------------------------------------- 1 | // License: MIT 2 | // Author: P-Seebauer 3 | // ported by gre from https://gist.github.com/P-Seebauer/2a5fa2f77c883dd661f9 4 | 5 | uniform float power; // = 5.0 6 | 7 | vec4 transition(vec2 p) { 8 | vec4 fTex = getFromColor(p); 9 | vec4 tTex = getToColor(p); 10 | float m = step(distance(fTex, tTex), progress); 11 | return mix( 12 | mix(fTex, tTex, m), 13 | tTex, 14 | pow(progress, power) 15 | ); 16 | } 17 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/CrazyParametricFun.glsl: -------------------------------------------------------------------------------- 1 | // Author: mandubian 2 | // License: MIT 3 | 4 | uniform float a; // = 4 5 | uniform float b; // = 1 6 | uniform float amplitude; // = 120 7 | uniform float smoothness; // = 0.1 8 | 9 | vec4 transition(vec2 uv) { 10 | vec2 p = uv.xy / vec2(1.0).xy; 11 | vec2 dir = p - vec2(.5); 12 | float dist = length(dir); 13 | float x = (a - b) * cos(progress) + b * cos(progress * ((a / b) - 1.) ); 14 | float y = (a - b) * sin(progress) - b * sin(progress * ((a / b) - 1.)); 15 | vec2 offset = dir * vec2(sin(progress * dist * amplitude * x), sin(progress * dist * amplitude * y)) / smoothness; 16 | return mix(getFromColor(p + offset), getToColor(p), smoothstep(0.2, 1.0, progress)); 17 | } 18 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/CrossZoom.glsl: -------------------------------------------------------------------------------- 1 | // License: MIT 2 | // Author: rectalogic 3 | // ported by gre from https://gist.github.com/rectalogic/b86b90161503a0023231 4 | 5 | // Converted from https://github.com/rectalogic/rendermix-basic-effects/blob/master/assets/com/rendermix/CrossZoom/CrossZoom.frag 6 | // Which is based on https://github.com/evanw/glfx.js/blob/master/src/filters/blur/zoomblur.js 7 | // With additional easing functions from https://github.com/rectalogic/rendermix-basic-effects/blob/master/assets/com/rendermix/Easing/Easing.glsllib 8 | 9 | uniform float strength; // = 0.4 10 | 11 | const float PI = 3.141592653589793; 12 | 13 | float Linear_ease(in float begin, in float change, in float duration, in float time) { 14 | return change * time / duration + begin; 15 | } 16 | 17 | float Exponential_easeInOut(in float begin, in float change, in float duration, in float time) { 18 | if (time == 0.0) 19 | return begin; 20 | else if (time == duration) 21 | return begin + change; 22 | time = time / (duration / 2.0); 23 | if (time < 1.0) 24 | return change / 2.0 * pow(2.0, 10.0 * (time - 1.0)) + begin; 25 | return change / 2.0 * (-pow(2.0, -10.0 * (time - 1.0)) + 2.0) + begin; 26 | } 27 | 28 | float Sinusoidal_easeInOut(in float begin, in float change, in float duration, in float time) { 29 | return -change / 2.0 * (cos(PI * time / duration) - 1.0) + begin; 30 | } 31 | 32 | float rand (vec2 co) { 33 | return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); 34 | } 35 | 36 | vec3 crossFade(in vec2 uv, in float dissolve) { 37 | return mix(getFromColor(uv).rgb, getToColor(uv).rgb, dissolve); 38 | } 39 | 40 | vec4 transition(vec2 uv) { 41 | vec2 texCoord = uv.xy / vec2(1.0).xy; 42 | 43 | // Linear interpolate center across center half of the image 44 | vec2 center = vec2(Linear_ease(0.25, 0.5, 1.0, progress), 0.5); 45 | float dissolve = Exponential_easeInOut(0.0, 1.0, 1.0, progress); 46 | 47 | // Mirrored sinusoidal loop. 0->strength then strength->0 48 | float strength = Sinusoidal_easeInOut(0.0, strength, 0.5, progress); 49 | 50 | vec3 color = vec3(0.0); 51 | float total = 0.0; 52 | vec2 toCenter = center - texCoord; 53 | 54 | /* randomize the lookup values to hide the fixed number of samples */ 55 | float offset = rand(uv); 56 | 57 | for (float t = 0.0; t <= 40.0; t++) { 58 | float percent = (t + offset) / 40.0; 59 | float weight = 4.0 * (percent - percent * percent); 60 | color += crossFade(texCoord + toCenter * percent * strength, dissolve) * weight; 61 | total += weight; 62 | } 63 | return vec4(color / total, 1.0); 64 | } 65 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/Directional.glsl: -------------------------------------------------------------------------------- 1 | // Author: Gaëtan Renaudeau 2 | // License: MIT 3 | 4 | uniform vec2 direction; // = vec2(0.0, 1.0) 5 | 6 | vec4 transition (vec2 uv) { 7 | vec2 p = uv + progress * sign(direction); 8 | vec2 f = fract(p); 9 | return mix( 10 | getToColor(f), 11 | getFromColor(f), 12 | step(0.0, p.y) * step(p.y, 1.0) * step(0.0, p.x) * step(p.x, 1.0) 13 | ); 14 | } 15 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/DoomScreenTransition.glsl: -------------------------------------------------------------------------------- 1 | // Author: Zeh Fernando 2 | // License: MIT 3 | 4 | 5 | // Transition parameters -------- 6 | 7 | // Number of total bars/columns 8 | uniform int bars; // = 30 9 | 10 | // Multiplier for speed ratio. 0 = no variation when going down, higher = some elements go much faster 11 | uniform float amplitude; // = 2 12 | 13 | // Further variations in speed. 0 = no noise, 1 = super noisy (ignore frequency) 14 | uniform float noise; // = 0.1 15 | 16 | // Speed variation horizontally. the bigger the value, the shorter the waves 17 | uniform float frequency; // = 0.5 18 | 19 | // How much the bars seem to "run" from the middle of the screen first (sticking to the sides). 0 = no drip, 1 = curved drip 20 | uniform float dripScale; // = 0.5 21 | 22 | 23 | // The code proper -------- 24 | 25 | float rand(int num) { 26 | return fract(mod(float(num) * 67123.313, 12.0) * sin(float(num) * 10.3) * cos(float(num))); 27 | } 28 | 29 | float wave(int num) { 30 | float fn = float(num) * frequency * 0.1 * float(bars); 31 | return cos(fn * 0.5) * cos(fn * 0.13) * sin((fn+10.0) * 0.3) / 2.0 + 0.5; 32 | } 33 | 34 | float drip(int num) { 35 | return sin(float(num) / float(bars - 1) * 3.141592) * dripScale; 36 | } 37 | 38 | float pos(int num) { 39 | return (noise == 0.0 ? wave(num) : mix(wave(num), rand(num), noise)) + (dripScale == 0.0 ? 0.0 : drip(num)); 40 | } 41 | 42 | vec4 transition(vec2 uv) { 43 | int bar = int(uv.x * (float(bars))); 44 | float scale = 1.0 + pos(bar) * amplitude; 45 | float phase = progress * scale; 46 | float posY = uv.y / vec2(1.0).y; 47 | vec2 p; 48 | vec4 c; 49 | if (phase + posY < 1.0) { 50 | p = vec2(uv.x, uv.y + mix(0.0, vec2(1.0).y, phase)) / vec2(1.0).xy; 51 | c = getFromColor(p); 52 | } else { 53 | p = uv.xy / vec2(1.0).xy; 54 | c = getToColor(p); 55 | } 56 | 57 | // Finally, apply the color 58 | return c; 59 | } 60 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/Dreamy.glsl: -------------------------------------------------------------------------------- 1 | // Author: mikolalysenko 2 | // License: MIT 3 | 4 | vec2 offset(float progress, float x, float theta) { 5 | float phase = progress*progress + progress + theta; 6 | float shifty = 0.03*progress*cos(10.0*(progress+x)); 7 | return vec2(0, shifty); 8 | } 9 | vec4 transition(vec2 p) { 10 | return mix(getFromColor(p + offset(progress, p.x, 0.0)), getToColor(p + offset(1.0-progress, p.x, 3.14)), progress); 11 | } 12 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/DreamyZoom.glsl: -------------------------------------------------------------------------------- 1 | // Author: Zeh Fernando 2 | // License: MIT 3 | 4 | // Definitions -------- 5 | #define DEG2RAD 0.03926990816987241548078304229099 // 1/180*PI 6 | 7 | 8 | // Transition parameters -------- 9 | 10 | // In degrees 11 | uniform float rotation; // = 6 12 | 13 | // Multiplier 14 | uniform float scale; // = 1.2 15 | 16 | 17 | // The code proper -------- 18 | 19 | vec4 transition(vec2 uv) { 20 | // Massage parameters 21 | float phase = progress < 0.5 ? progress * 2.0 : (progress - 0.5) * 2.0; 22 | float angleOffset = progress < 0.5 ? mix(0.0, rotation * DEG2RAD, phase) : mix(-rotation * DEG2RAD, 0.0, phase); 23 | float newScale = progress < 0.5 ? mix(1.0, scale, phase) : mix(scale, 1.0, phase); 24 | 25 | vec2 center = vec2(0, 0); 26 | 27 | // Calculate the source point 28 | vec2 assumedCenter = vec2(0.5, 0.5); 29 | vec2 p = (uv.xy - vec2(0.5, 0.5)) / newScale * vec2(ratio, 1.0); 30 | 31 | // This can probably be optimized (with distance()) 32 | float angle = atan(p.y, p.x) + angleOffset; 33 | float dist = distance(center, p); 34 | p.x = cos(angle) * dist / ratio + 0.5; 35 | p.y = sin(angle) * dist + 0.5; 36 | vec4 c = progress < 0.5 ? getFromColor(p) : getToColor(p); 37 | 38 | // Finally, apply the color 39 | return c + (progress < 0.5 ? mix(0.0, 1.0, phase) : mix(1.0, 0.0, phase)); 40 | } 41 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/FilmBurn.glsl: -------------------------------------------------------------------------------- 1 | // author: Anastasia Dunbar 2 | // license: MIT 3 | uniform float Seed; // = 2.31 4 | float sigmoid(float x, float a) { 5 | float b = pow(x*2.,a)/2.; 6 | if (x > .5) { 7 | b = 1.-pow(2.-(x*2.),a)/2.; 8 | } 9 | return b; 10 | } 11 | float rand(float co){ 12 | return fract(sin((co*24.9898)+Seed)*43758.5453); 13 | } 14 | float rand(vec2 co){ 15 | return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); 16 | } 17 | float apow(float a,float b) { return pow(abs(a),b)*sign(b); } 18 | vec3 pow3(vec3 a,vec3 b) { return vec3(apow(a.r,b.r),apow(a.g,b.g),apow(a.b,b.b)); } 19 | float smooth_mix(float a,float b,float c) { return mix(a,b,sigmoid(c,2.)); } 20 | float random(vec2 co, float shft){ 21 | co += 10.; 22 | return smooth_mix(fract(sin(dot(co.xy ,vec2(12.9898+(floor(shft)*.5),78.233+Seed))) * 43758.5453),fract(sin(dot(co.xy ,vec2(12.9898+(floor(shft+1.)*.5),78.233+Seed))) * 43758.5453),fract(shft)); 23 | } 24 | float smooth_random(vec2 co, float shft) { 25 | return smooth_mix(smooth_mix(random(floor(co),shft),random(floor(co+vec2(1.,0.)),shft),fract(co.x)),smooth_mix(random(floor(co+vec2(0.,1.)),shft),random(floor(co+vec2(1.,1.)),shft),fract(co.x)),fract(co.y)); 26 | } 27 | vec4 texture(vec2 p) { 28 | return mix(getFromColor(p), getToColor(p), sigmoid(progress,10.)); 29 | } 30 | #define pi 3.14159265358979323 31 | #define clamps(x) clamp(x,0.,1.) 32 | 33 | vec4 transition(vec2 p) { 34 | vec3 f = vec3(0.); 35 | for (float i = 0.; i < 13.; i++) { 36 | f += sin(((p.x*rand(i)*6.)+(progress*8.))+rand(i+1.43))*sin(((p.y*rand(i+4.4)*6.)+(progress*6.))+rand(i+2.4)); 37 | f += 1.-clamps(length(p-vec2(smooth_random(vec2(progress*1.3),i+1.),smooth_random(vec2(progress*.5),i+6.25)))*mix(20.,70.,rand(i))); 38 | } 39 | f += 4.; 40 | f /= 11.; 41 | f = pow3(f*vec3(1.,0.7,0.6),vec3(1.,2.-sin(progress*pi),1.3)); 42 | f *= sin(progress*pi); 43 | 44 | p -= .5; 45 | p *= 1.+(smooth_random(vec2(progress*5.),6.3)*sin(progress*pi)*.05); 46 | p += .5; 47 | 48 | vec4 blurred_image = vec4(0.); 49 | float bluramount = sin(progress*pi)*.03; 50 | #define repeats 50. 51 | for (float i = 0.; i < repeats; i++) { 52 | vec2 q = vec2(cos(degrees((i/repeats)*360.)),sin(degrees((i/repeats)*360.))) * (rand(vec2(i,p.x+p.y))+bluramount); 53 | vec2 uv2 = p+(q*bluramount); 54 | blurred_image += texture(uv2); 55 | } 56 | blurred_image /= repeats; 57 | 58 | return blurred_image+vec4(f,0.); 59 | } 60 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/GlitchDisplace.glsl: -------------------------------------------------------------------------------- 1 | // Author: Matt DesLauriers 2 | // License: MIT 3 | 4 | #ifdef GL_ES 5 | precision highp float; 6 | #endif 7 | 8 | float random(vec2 co) 9 | { 10 | float a = 12.9898; 11 | float b = 78.233; 12 | float c = 43758.5453; 13 | float dt= dot(co.xy ,vec2(a,b)); 14 | float sn= mod(dt,3.14); 15 | return fract(sin(sn) * c); 16 | } 17 | float voronoi( in vec2 x ) { 18 | vec2 p = floor( x ); 19 | vec2 f = fract( x ); 20 | float res = 8.0; 21 | for( float j=-1.; j<=1.; j++ ) 22 | for( float i=-1.; i<=1.; i++ ) { 23 | vec2 b = vec2( i, j ); 24 | vec2 r = b - f + random( p + b ); 25 | float d = dot( r, r ); 26 | res = min( res, d ); 27 | } 28 | return sqrt( res ); 29 | } 30 | 31 | vec2 displace(vec4 tex, vec2 texCoord, float dotDepth, float textureDepth, float strength) { 32 | float b = voronoi(.003 * texCoord + 2.0); 33 | float g = voronoi(0.2 * texCoord); 34 | float r = voronoi(texCoord - 1.0); 35 | vec4 dt = tex * 1.0; 36 | vec4 dis = dt * dotDepth + 1.0 - tex * textureDepth; 37 | 38 | dis.x = dis.x - 1.0 + textureDepth*dotDepth; 39 | dis.y = dis.y - 1.0 + textureDepth*dotDepth; 40 | dis.x *= strength; 41 | dis.y *= strength; 42 | vec2 res_uv = texCoord ; 43 | res_uv.x = res_uv.x + dis.x - 0.0; 44 | res_uv.y = res_uv.y + dis.y; 45 | return res_uv; 46 | } 47 | 48 | float ease1(float t) { 49 | return t == 0.0 || t == 1.0 50 | ? t 51 | : t < 0.5 52 | ? +0.5 * pow(2.0, (20.0 * t) - 10.0) 53 | : -0.5 * pow(2.0, 10.0 - (t * 20.0)) + 1.0; 54 | } 55 | float ease2(float t) { 56 | return t == 1.0 ? t : 1.0 - pow(2.0, -10.0 * t); 57 | } 58 | 59 | 60 | 61 | vec4 transition(vec2 uv) { 62 | vec2 p = uv.xy / vec2(1.0).xy; 63 | vec4 color1 = getFromColor(p); 64 | vec4 color2 = getToColor(p); 65 | vec2 disp = displace(color1, p, 0.33, 0.7, 1.0-ease1(progress)); 66 | vec2 disp2 = displace(color2, p, 0.33, 0.5, ease2(progress)); 67 | vec4 dColor1 = getToColor(disp); 68 | vec4 dColor2 = getFromColor(disp2); 69 | float val = ease1(progress); 70 | vec3 gray = vec3(dot(min(dColor2, dColor1).rgb, vec3(0.299, 0.587, 0.114))); 71 | dColor2 = vec4(gray, 1.0); 72 | dColor2 *= 2.0; 73 | color1 = mix(color1, dColor2, smoothstep(0.0, 0.5, progress)); 74 | color2 = mix(color2, dColor1, smoothstep(1.0, 0.5, progress)); 75 | return mix(color1, color2, val); 76 | //gl_FragColor = mix(gl_FragColor, dColor, smoothstep(0.0, 0.5, progress)); 77 | 78 | //gl_FragColor = mix(texture2D(from, p), texture2D(to, p), progress); 79 | } 80 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/GlitchMemories.glsl: -------------------------------------------------------------------------------- 1 | // author: Gunnar Roth 2 | // based on work from natewave 3 | // license: MIT 4 | vec4 transition(vec2 p) { 5 | vec2 block = floor(p.xy / vec2(16)); 6 | vec2 uv_noise = block / vec2(64); 7 | uv_noise += floor(vec2(progress) * vec2(1200.0, 3500.0)) / vec2(64); 8 | vec2 dist = progress > 0.0 ? (fract(uv_noise) - 0.5) * 0.3 *(1.0 -progress) : vec2(0.0); 9 | vec2 red = p + dist * 0.2; 10 | vec2 green = p + dist * .3; 11 | vec2 blue = p + dist * .5; 12 | 13 | return vec4(mix(getFromColor(red), getToColor(red), progress).r,mix(getFromColor(green), getToColor(green), progress).g,mix(getFromColor(blue), getToColor(blue), progress).b,1.0); 14 | } 15 | 16 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/GridFlip.glsl: -------------------------------------------------------------------------------- 1 | // License: MIT 2 | // Author: TimDonselaar 3 | // ported by gre from https://gist.github.com/TimDonselaar/9bcd1c4b5934ba60087bdb55c2ea92e5 4 | 5 | uniform ivec2 size; // = ivec2(4) 6 | uniform float pause; // = 0.1 7 | uniform float dividerWidth; // = 0.05 8 | uniform vec4 bgcolor; // = vec4(0.0, 0.0, 0.0, 1.0) 9 | uniform float randomness; // = 0.1 10 | 11 | float rand (vec2 co) { 12 | return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); 13 | } 14 | 15 | float getDelta(vec2 p) { 16 | vec2 rectanglePos = floor(vec2(size) * p); 17 | vec2 rectangleSize = vec2(1.0 / vec2(size).x, 1.0 / vec2(size).y); 18 | float top = rectangleSize.y * (rectanglePos.y + 1.0); 19 | float bottom = rectangleSize.y * rectanglePos.y; 20 | float left = rectangleSize.x * rectanglePos.x; 21 | float right = rectangleSize.x * (rectanglePos.x + 1.0); 22 | float minX = min(abs(p.x - left), abs(p.x - right)); 23 | float minY = min(abs(p.y - top), abs(p.y - bottom)); 24 | return min(minX, minY); 25 | } 26 | 27 | float getDividerSize() { 28 | vec2 rectangleSize = vec2(1.0 / vec2(size).x, 1.0 / vec2(size).y); 29 | return min(rectangleSize.x, rectangleSize.y) * dividerWidth; 30 | } 31 | 32 | vec4 transition(vec2 p) { 33 | if(progress < pause) { 34 | float currentProg = progress / pause; 35 | float a = 1.0; 36 | if(getDelta(p) < getDividerSize()) { 37 | a = 1.0 - currentProg; 38 | } 39 | return mix(bgcolor, getFromColor(p), a); 40 | } 41 | else if(progress < 1.0 - pause){ 42 | if(getDelta(p) < getDividerSize()) { 43 | return bgcolor; 44 | } else { 45 | float currentProg = (progress - pause) / (1.0 - pause * 2.0); 46 | vec2 q = p; 47 | vec2 rectanglePos = floor(vec2(size) * q); 48 | 49 | float r = rand(rectanglePos) - randomness; 50 | float cp = smoothstep(0.0, 1.0 - r, currentProg); 51 | 52 | float rectangleSize = 1.0 / vec2(size).x; 53 | float delta = rectanglePos.x * rectangleSize; 54 | float offset = rectangleSize / 2.0 + delta; 55 | 56 | p.x = (p.x - offset)/abs(cp - 0.5)*0.5 + offset; 57 | vec4 a = getFromColor(p); 58 | vec4 b = getToColor(p); 59 | 60 | float s = step(abs(vec2(size).x * (q.x - delta) - 0.5), abs(cp - 0.5)); 61 | return mix(bgcolor, mix(b, a, step(cp, 0.5)), s); 62 | } 63 | } 64 | else { 65 | float currentProg = (progress - 1.0 + pause) / pause; 66 | float a = 1.0; 67 | if(getDelta(p) < getDividerSize()) { 68 | a = currentProg; 69 | } 70 | return mix(bgcolor, getToColor(p), a); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/InvertedPageCurl.glsl: -------------------------------------------------------------------------------- 1 | // author: Hewlett-Packard 2 | // license: BSD 3 Clause 3 | // Adapted by Sergey Kosarevsky from: 4 | // http://rectalogic.github.io/webvfx/examples_2transition-shader-pagecurl_8html-example.html 5 | 6 | /* 7 | Copyright (c) 2010 Hewlett-Packard Development Company, L.P. All rights reserved. 8 | 9 | Redistribution and use in source and binary forms, with or without 10 | modification, are permitted provided that the following conditions are 11 | met: 12 | 13 | * Redistributions of source code must retain the above copyright 14 | notice, this list of conditions and the following disclaimer. 15 | * Redistributions in binary form must reproduce the above 16 | copyright notice, this list of conditions and the following disclaimer 17 | in the documentation and/or other materials provided with the 18 | distribution. 19 | * Neither the name of Hewlett-Packard nor the names of its 20 | contributors may be used to endorse or promote products derived from 21 | this software without specific prior written permission. 22 | 23 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 26 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 27 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 28 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 29 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 30 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 31 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 32 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 33 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 34 | in vec2 texCoord; 35 | */ 36 | 37 | const float MIN_AMOUNT = -0.16; 38 | const float MAX_AMOUNT = 1.5; 39 | float amount = progress * (MAX_AMOUNT - MIN_AMOUNT) + MIN_AMOUNT; 40 | 41 | const float PI = 3.141592653589793; 42 | 43 | const float scale = 512.0; 44 | const float sharpness = 3.0; 45 | 46 | float cylinderCenter = amount; 47 | // 360 degrees * amount 48 | float cylinderAngle = 2.0 * PI * amount; 49 | 50 | const float cylinderRadius = 1.0 / PI / 2.0; 51 | 52 | vec3 hitPoint(float hitAngle, float yc, vec3 point, mat3 rrotation) 53 | { 54 | float hitPoint = hitAngle / (2.0 * PI); 55 | point.y = hitPoint; 56 | return rrotation * point; 57 | } 58 | 59 | vec4 antiAlias(vec4 color1, vec4 color2, float distanc) 60 | { 61 | distanc *= scale; 62 | if (distanc < 0.0) return color2; 63 | if (distanc > 2.0) return color1; 64 | float dd = pow(1.0 - distanc / 2.0, sharpness); 65 | return ((color2 - color1) * dd) + color1; 66 | } 67 | 68 | float distanceToEdge(vec3 point) 69 | { 70 | float dx = abs(point.x > 0.5 ? 1.0 - point.x : point.x); 71 | float dy = abs(point.y > 0.5 ? 1.0 - point.y : point.y); 72 | if (point.x < 0.0) dx = -point.x; 73 | if (point.x > 1.0) dx = point.x - 1.0; 74 | if (point.y < 0.0) dy = -point.y; 75 | if (point.y > 1.0) dy = point.y - 1.0; 76 | if ((point.x < 0.0 || point.x > 1.0) && (point.y < 0.0 || point.y > 1.0)) return sqrt(dx * dx + dy * dy); 77 | return min(dx, dy); 78 | } 79 | 80 | vec4 seeThrough(float yc, vec2 p, mat3 rotation, mat3 rrotation) 81 | { 82 | float hitAngle = PI - (acos(yc / cylinderRadius) - cylinderAngle); 83 | vec3 point = hitPoint(hitAngle, yc, rotation * vec3(p, 1.0), rrotation); 84 | if (yc <= 0.0 && (point.x < 0.0 || point.y < 0.0 || point.x > 1.0 || point.y > 1.0)) 85 | { 86 | return getToColor(p); 87 | } 88 | 89 | if (yc > 0.0) return getFromColor(p); 90 | 91 | vec4 color = getFromColor(point.xy); 92 | vec4 tcolor = vec4(0.0); 93 | 94 | return antiAlias(color, tcolor, distanceToEdge(point)); 95 | } 96 | 97 | vec4 seeThroughWithShadow(float yc, vec2 p, vec3 point, mat3 rotation, mat3 rrotation) 98 | { 99 | float shadow = distanceToEdge(point) * 30.0; 100 | shadow = (1.0 - shadow) / 3.0; 101 | 102 | if (shadow < 0.0) shadow = 0.0; else shadow *= amount; 103 | 104 | vec4 shadowColor = seeThrough(yc, p, rotation, rrotation); 105 | shadowColor.r -= shadow; 106 | shadowColor.g -= shadow; 107 | shadowColor.b -= shadow; 108 | 109 | return shadowColor; 110 | } 111 | 112 | vec4 backside(float yc, vec3 point) 113 | { 114 | vec4 color = getFromColor(point.xy); 115 | float gray = (color.r + color.b + color.g) / 15.0; 116 | gray += (8.0 / 10.0) * (pow(1.0 - abs(yc / cylinderRadius), 2.0 / 10.0) / 2.0 + (5.0 / 10.0)); 117 | color.rgb = vec3(gray); 118 | return color; 119 | } 120 | 121 | vec4 behindSurface(vec2 p, float yc, vec3 point, mat3 rrotation) 122 | { 123 | float shado = (1.0 - ((-cylinderRadius - yc) / amount * 7.0)) / 6.0; 124 | shado *= 1.0 - abs(point.x - 0.5); 125 | 126 | yc = (-cylinderRadius - cylinderRadius - yc); 127 | 128 | float hitAngle = (acos(yc / cylinderRadius) + cylinderAngle) - PI; 129 | point = hitPoint(hitAngle, yc, point, rrotation); 130 | 131 | if (yc < 0.0 && point.x >= 0.0 && point.y >= 0.0 && point.x <= 1.0 && point.y <= 1.0 && (hitAngle < PI || amount > 0.5)) 132 | { 133 | shado = 1.0 - (sqrt(pow(point.x - 0.5, 2.0) + pow(point.y - 0.5, 2.0)) / (71.0 / 100.0)); 134 | shado *= pow(-yc / cylinderRadius, 3.0); 135 | shado *= 0.5; 136 | } 137 | else 138 | { 139 | shado = 0.0; 140 | } 141 | return vec4(getToColor(p).rgb - shado, 1.0); 142 | } 143 | 144 | vec4 transition(vec2 p) { 145 | 146 | const float angle = 100.0 * PI / 180.0; 147 | float c = cos(-angle); 148 | float s = sin(-angle); 149 | 150 | mat3 rotation = mat3( c, s, 0, 151 | -s, c, 0, 152 | -0.801, 0.8900, 1 153 | ); 154 | c = cos(angle); 155 | s = sin(angle); 156 | 157 | mat3 rrotation = mat3( c, s, 0, 158 | -s, c, 0, 159 | 0.98500, 0.985, 1 160 | ); 161 | 162 | vec3 point = rotation * vec3(p, 1.0); 163 | 164 | float yc = point.y - cylinderCenter; 165 | 166 | if (yc < -cylinderRadius) 167 | { 168 | // Behind surface 169 | return behindSurface(p,yc, point, rrotation); 170 | } 171 | 172 | if (yc > cylinderRadius) 173 | { 174 | // Flat surface 175 | return getFromColor(p); 176 | } 177 | 178 | float hitAngle = (acos(yc / cylinderRadius) + cylinderAngle) - PI; 179 | 180 | float hitAngleMod = mod(hitAngle, 2.0 * PI); 181 | if ((hitAngleMod > PI && amount < 0.5) || (hitAngleMod > PI/2.0 && amount < 0.0)) 182 | { 183 | return seeThrough(yc, p, rotation, rrotation); 184 | } 185 | 186 | point = hitPoint(hitAngle, yc, point, rrotation); 187 | 188 | if (point.x < 0.0 || point.y < 0.0 || point.x > 1.0 || point.y > 1.0) 189 | { 190 | return seeThroughWithShadow(yc, p, point, rotation, rrotation); 191 | } 192 | 193 | vec4 color = backside(yc, point); 194 | 195 | vec4 otherColor; 196 | if (yc < 0.0) 197 | { 198 | float shado = 1.0 - (sqrt(pow(point.x - 0.5, 2.0) + pow(point.y - 0.5, 2.0)) / 0.71); 199 | shado *= pow(-yc / cylinderRadius, 3.0); 200 | shado *= 0.5; 201 | otherColor = vec4(0.0, 0.0, 0.0, shado); 202 | } 203 | else 204 | { 205 | otherColor = getFromColor(p); 206 | } 207 | 208 | color = antiAlias(color, otherColor, cylinderRadius - abs(yc)); 209 | 210 | vec4 cl = seeThroughWithShadow(yc, p, point, rotation, rrotation); 211 | float dist = distanceToEdge(point); 212 | 213 | return antiAlias(color, cl, dist); 214 | } 215 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/LeftRight.glsl: -------------------------------------------------------------------------------- 1 | // Author:zhmy 2 | // License: MIT 3 | 4 | const vec4 black = vec4(0.0, 0.0, 0.0, 1.0); 5 | const vec2 boundMin = vec2(0.0, 0.0); 6 | const vec2 boundMax = vec2(1.0, 1.0); 7 | 8 | bool inBounds (vec2 p) { 9 | return all(lessThan(boundMin, p)) && all(lessThan(p, boundMax)); 10 | } 11 | 12 | vec4 transition (vec2 uv) { 13 | vec2 spfr,spto = vec2(-1.); 14 | 15 | float size = mix(1.0, 3.0, progress*0.2); 16 | spto = (uv + vec2(-0.5,-0.5))*vec2(size,size)+vec2(0.5,0.5); 17 | spfr = (uv - vec2(1.-progress, 0.0)); 18 | if(inBounds(spfr)){ 19 | return getToColor(spfr); 20 | }else if(inBounds(spto)){ 21 | return getFromColor(spto) * (1.0 - progress); 22 | } else{ 23 | return black; 24 | } 25 | } -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/LinearBlur.glsl: -------------------------------------------------------------------------------- 1 | // author: gre 2 | // license: MIT 3 | uniform float intensity; // = 0.1 4 | const int passes = 6; 5 | 6 | vec4 transition(vec2 uv) { 7 | vec4 c1 = vec4(0.0); 8 | vec4 c2 = vec4(0.0); 9 | 10 | float disp = intensity*(0.5-distance(0.5, progress)); 11 | for (int xi=0; xi.5) { 38 | return getToColor(mrp); 39 | } else { 40 | return getFromColor(mrp); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/PolkaDotsCurtain.glsl: -------------------------------------------------------------------------------- 1 | // author: bobylito 2 | // license: MIT 3 | const float SQRT_2 = 1.414213562373; 4 | uniform float dots;// = 20.0; 5 | uniform vec2 center;// = vec2(0, 0); 6 | 7 | vec4 transition(vec2 uv) { 8 | bool nextImage = distance(fract(uv * dots), vec2(0.5, 0.5)) < ( progress / distance(uv, center)); 9 | return nextImage ? getToColor(uv) : getFromColor(uv); 10 | } 11 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/Radial.glsl: -------------------------------------------------------------------------------- 1 | // License: MIT 2 | // Author: Xaychru 3 | // ported by gre from https://gist.github.com/Xaychru/ce1d48f0ce00bb379750 4 | 5 | uniform float smoothness; // = 1.0 6 | 7 | const float PI = 3.141592653589; 8 | 9 | vec4 transition(vec2 p) { 10 | vec2 rp = p*2.-1.; 11 | return mix( 12 | getToColor(p), 13 | getFromColor(p), 14 | smoothstep(0., smoothness, atan(rp.y,rp.x) - (progress-.5) * PI * 2.5) 15 | ); 16 | } 17 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/SimpleZoom.glsl: -------------------------------------------------------------------------------- 1 | // Author: 0gust1 2 | // License: MIT 3 | 4 | uniform float zoom_quickness; // = 0.8 5 | float nQuick = clamp(zoom_quickness,0.2,1.0); 6 | 7 | vec2 zoom(vec2 uv, float amount) { 8 | return 0.5 + ((uv - 0.5) * (1.0-amount)); 9 | } 10 | 11 | vec4 transition (vec2 uv) { 12 | return mix( 13 | getFromColor(zoom(uv, smoothstep(0.0, nQuick, progress))), 14 | getToColor(uv), 15 | smoothstep(nQuick-0.2, 1.0, progress) 16 | ); 17 | } -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/StereoViewer.glsl: -------------------------------------------------------------------------------- 1 | // Tunable parameters 2 | // How much to zoom (out) for the effect ~ 0.5 - 1.0 3 | uniform float zoom; // = 0.88 4 | // Corner radius as a fraction of the image height 5 | uniform float corner_radius; // = 0.22 6 | 7 | // author: Ted Schundler 8 | // license: BSD 2 Clause 9 | // Free for use and modification by anyone with credit 10 | 11 | // Copyright (c) 2016, Theodore K Schundler 12 | // All rights reserved. 13 | 14 | // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 15 | 16 | // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 17 | 18 | // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 19 | 20 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 21 | 22 | /////////////////////////////////////////////////////////////////////////////// 23 | // Stereo Viewer Toy Transition // 24 | // // 25 | // Inspired by ViewMaster / Image3D image viewer devices. // 26 | // This effect is similar to what you see when you press the device's lever. // 27 | // There is a quick zoom in / out to make the transition 'valid' for GLSL.io // 28 | /////////////////////////////////////////////////////////////////////////////// 29 | 30 | const vec4 black = vec4(0.0, 0.0, 0.0, 1.0); 31 | const vec2 c00 = vec2(0.0, 0.0); // the four corner points 32 | const vec2 c01 = vec2(0.0, 1.0); 33 | const vec2 c11 = vec2(1.0, 1.0); 34 | const vec2 c10 = vec2(1.0, 0.0); 35 | 36 | // Check if a point is within a given corner 37 | bool in_corner(vec2 p, vec2 corner, vec2 radius) { 38 | // determine the direction we want to be filled 39 | vec2 axis = (c11 - corner) - corner; 40 | 41 | // warp the point so we are always testing the bottom left point with the 42 | // circle centered on the origin 43 | p = p - (corner + axis * radius); 44 | p *= axis / radius; 45 | return (p.x > 0.0 && p.y > -1.0) || (p.y > 0.0 && p.x > -1.0) || dot(p, p) < 1.0; 46 | } 47 | 48 | // Check all four corners 49 | // return a float for v2 for anti-aliasing? 50 | bool test_rounded_mask(vec2 p, vec2 corner_size) { 51 | return 52 | in_corner(p, c00, corner_size) && 53 | in_corner(p, c01, corner_size) && 54 | in_corner(p, c10, corner_size) && 55 | in_corner(p, c11, corner_size); 56 | } 57 | 58 | // Screen blend mode - https://en.wikipedia.org/wiki/Blend_modes 59 | // This more closely approximates what you see than linear blending 60 | vec4 screen(vec4 a, vec4 b) { 61 | return 1.0 - (1.0 - a) * (1.0 -b); 62 | } 63 | 64 | // Given RGBA, find a value that when screened with itself 65 | // will yield the original value. 66 | vec4 unscreen(vec4 c) { 67 | return 1.0 - sqrt(1.0 - c); 68 | } 69 | 70 | // Grab a pixel, only if it isn't masked out by the rounded corners 71 | vec4 sample_with_corners_from(vec2 p, vec2 corner_size) { 72 | p = (p - 0.5) / zoom + 0.5; 73 | if (!test_rounded_mask(p, corner_size)) { 74 | return black; 75 | } 76 | return unscreen(getFromColor(p)); 77 | } 78 | 79 | vec4 sample_with_corners_to(vec2 p, vec2 corner_size) { 80 | p = (p - 0.5) / zoom + 0.5; 81 | if (!test_rounded_mask(p, corner_size)) { 82 | return black; 83 | } 84 | return unscreen(getToColor(p)); 85 | } 86 | 87 | // special sampling used when zooming - extra zoom parameter and don't unscreen 88 | vec4 simple_sample_with_corners_from(vec2 p, vec2 corner_size, float zoom_amt) { 89 | p = (p - 0.5) / (1.0 - zoom_amt + zoom * zoom_amt) + 0.5; 90 | if (!test_rounded_mask(p, corner_size)) { 91 | return black; 92 | } 93 | return getFromColor(p); 94 | } 95 | 96 | vec4 simple_sample_with_corners_to(vec2 p, vec2 corner_size, float zoom_amt) { 97 | p = (p - 0.5) / (1.0 - zoom_amt + zoom * zoom_amt) + 0.5; 98 | if (!test_rounded_mask(p, corner_size)) { 99 | return black; 100 | } 101 | return getToColor(p); 102 | } 103 | 104 | // Basic 2D affine transform matrix helpers 105 | // These really shouldn't be used in a fragment shader - I should work out the 106 | // the math for a translate & rotate function as a pair of dot products instead 107 | 108 | mat3 rotate2d(float angle, float ratio) { 109 | float s = sin(angle); 110 | float c = cos(angle); 111 | return mat3( 112 | c, s ,0.0, 113 | -s, c, 0.0, 114 | 0.0, 0.0, 1.0); 115 | } 116 | 117 | mat3 translate2d(float x, float y) { 118 | return mat3( 119 | 1.0, 0.0, 0, 120 | 0.0, 1.0, 0, 121 | -x, -y, 1.0); 122 | } 123 | 124 | mat3 scale2d(float x, float y) { 125 | return mat3( 126 | x, 0.0, 0, 127 | 0.0, y, 0, 128 | 0, 0, 1.0); 129 | } 130 | 131 | // Split an image and rotate one up and one down along off screen pivot points 132 | vec4 get_cross_rotated(vec3 p3, float angle, vec2 corner_size, float ratio) { 133 | angle = angle * angle; // easing 134 | angle /= 2.4; // works out to be a good number of radians 135 | 136 | mat3 center_and_scale = translate2d(-0.5, -0.5) * scale2d(1.0, ratio); 137 | mat3 unscale_and_uncenter = scale2d(1.0, 1.0/ratio) * translate2d(0.5,0.5); 138 | mat3 slide_left = translate2d(-2.0,0.0); 139 | mat3 slide_right = translate2d(2.0,0.0); 140 | mat3 rotate = rotate2d(angle, ratio); 141 | 142 | mat3 op_a = center_and_scale * slide_right * rotate * slide_left * unscale_and_uncenter; 143 | mat3 op_b = center_and_scale * slide_left * rotate * slide_right * unscale_and_uncenter; 144 | 145 | vec4 a = sample_with_corners_from((op_a * p3).xy, corner_size); 146 | vec4 b = sample_with_corners_from((op_b * p3).xy, corner_size); 147 | 148 | return screen(a, b); 149 | } 150 | 151 | // Image stays put, but this time move two masks 152 | vec4 get_cross_masked(vec3 p3, float angle, vec2 corner_size, float ratio) { 153 | angle = 1.0 - angle; 154 | angle = angle * angle; // easing 155 | angle /= 2.4; 156 | 157 | vec4 img; 158 | 159 | mat3 center_and_scale = translate2d(-0.5, -0.5) * scale2d(1.0, ratio); 160 | mat3 unscale_and_uncenter = scale2d(1.0 / zoom, 1.0 / (zoom * ratio)) * translate2d(0.5,0.5); 161 | mat3 slide_left = translate2d(-2.0,0.0); 162 | mat3 slide_right = translate2d(2.0,0.0); 163 | mat3 rotate = rotate2d(angle, ratio); 164 | 165 | mat3 op_a = center_and_scale * slide_right * rotate * slide_left * unscale_and_uncenter; 166 | mat3 op_b = center_and_scale * slide_left * rotate * slide_right * unscale_and_uncenter; 167 | 168 | bool mask_a = test_rounded_mask((op_a * p3).xy, corner_size); 169 | bool mask_b = test_rounded_mask((op_b * p3).xy, corner_size); 170 | 171 | if (mask_a || mask_b) { 172 | img = sample_with_corners_to(p3.xy, corner_size); 173 | return screen(mask_a ? img : black, mask_b ? img : black); 174 | } else { 175 | return black; 176 | } 177 | } 178 | 179 | vec4 transition(vec2 uv) { 180 | float a; 181 | vec2 p=uv.xy/vec2(1.0).xy; 182 | vec3 p3 = vec3(p.xy, 1.0); // for 2D matrix transforms 183 | 184 | // corner is warped to represent to size after mapping to 1.0, 1.0 185 | vec2 corner_size = vec2(corner_radius / ratio, corner_radius); 186 | 187 | if (progress <= 0.0) { 188 | // 0.0: start with the base frame always 189 | return getFromColor(p); 190 | } else if (progress < 0.1) { 191 | // 0.0-0.1: zoom out and add rounded corners 192 | a = progress / 0.1; 193 | return simple_sample_with_corners_from(p, corner_size * a, a); 194 | } else if (progress < 0.48) { 195 | // 0.1-0.48: Split original image apart 196 | a = (progress - 0.1)/0.38; 197 | return get_cross_rotated(p3, a, corner_size, ratio); 198 | } else if (progress < 0.9) { 199 | // 0.48-0.52: black 200 | // 0.52 - 0.9: unmask new image 201 | return get_cross_masked(p3, (progress - 0.52)/0.38, corner_size, ratio); 202 | } else if (progress < 1.0) { 203 | // zoom out and add rounded corners 204 | a = (1.0 - progress) / 0.1; 205 | return simple_sample_with_corners_to(p, corner_size * a, a); 206 | } else { 207 | // 1.0 end with base frame 208 | return getToColor(p); 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/Swirl.glsl: -------------------------------------------------------------------------------- 1 | // License: MIT 2 | // Author: Sergey Kosarevsky 3 | // ( http://www.linderdaum.com ) 4 | // ported by gre from https://gist.github.com/corporateshark/cacfedb8cca0f5ce3f7c 5 | 6 | vec4 transition(vec2 UV) 7 | { 8 | float Radius = 1.0; 9 | 10 | float T = progress; 11 | 12 | UV -= vec2( 0.5, 0.5 ); 13 | 14 | float Dist = length(UV); 15 | 16 | if ( Dist < Radius ) 17 | { 18 | float Percent = (Radius - Dist) / Radius; 19 | float A = ( T <= 0.5 ) ? mix( 0.0, 1.0, T/0.5 ) : mix( 1.0, 0.0, (T-0.5)/0.5 ); 20 | float Theta = Percent * Percent * A * 8.0 * 3.14159; 21 | float S = sin( Theta ); 22 | float C = cos( Theta ); 23 | UV = vec2( dot(UV, vec2(C, -S)), dot(UV, vec2(S, C)) ); 24 | } 25 | UV += vec2( 0.5, 0.5 ); 26 | 27 | vec4 C0 = getFromColor(UV); 28 | vec4 C1 = getToColor(UV); 29 | 30 | return mix( C0, C1, T ); 31 | } 32 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/TVStatic.glsl: -------------------------------------------------------------------------------- 1 | // author: Brandon Anzaldi 2 | // license: MIT 3 | uniform float offset; // = 0.05 4 | 5 | // Pseudo-random noise function 6 | // http://byteblacksmith.com/improvements-to-the-canonical-one-liner-glsl-rand-for-opengl-es-2-0/ 7 | highp float noise(vec2 co) 8 | { 9 | highp float a = 12.9898; 10 | highp float b = 78.233; 11 | highp float c = 43758.5453; 12 | highp float dt= dot(co.xy * progress, vec2(a, b)); 13 | highp float sn= mod(dt,3.14); 14 | return fract(sin(sn) * c); 15 | } 16 | 17 | vec4 transition(vec2 p) { 18 | if (progress < offset) { 19 | return getFromColor(p); 20 | } else if (progress > (1.0 - offset)) { 21 | return getToColor(p); 22 | } else { 23 | return vec4(vec3(noise(p)), 1.0); 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/TopBottom.glsl: -------------------------------------------------------------------------------- 1 | // Author:zhmy 2 | // License: MIT 3 | 4 | const vec4 black = vec4(0.0, 0.0, 0.0, 1.0); 5 | const vec2 boundMin = vec2(0.0, 0.0); 6 | const vec2 boundMax = vec2(1.0, 1.0); 7 | 8 | bool inBounds (vec2 p) { 9 | return all(lessThan(boundMin, p)) && all(lessThan(p, boundMax)); 10 | } 11 | 12 | vec4 transition (vec2 uv) { 13 | vec2 spfr,spto = vec2(-1.); 14 | float size = mix(1.0, 3.0, progress*0.2); 15 | spto = (uv + vec2(-0.5,-0.5))*vec2(size,size)+vec2(0.5,0.5); 16 | spfr = (uv + vec2(0.0, 1.0 - progress)); 17 | if(inBounds(spfr)){ 18 | return getToColor(spfr); 19 | } else if(inBounds(spto)){ 20 | return getFromColor(spto) * (1.0 - progress); 21 | } else{ 22 | return black; 23 | } 24 | } -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/WaterDrop.glsl: -------------------------------------------------------------------------------- 1 | // author: Paweł Płóciennik 2 | // license: MIT 3 | uniform float amplitude; // = 30 4 | uniform float speed; // = 30 5 | 6 | vec4 transition(vec2 p) { 7 | vec2 dir = p - vec2(.5); 8 | float dist = length(dir); 9 | 10 | if (dist > progress) { 11 | return mix(getFromColor( p), getToColor( p), progress); 12 | } else { 13 | vec2 offset = dir * sin(dist * amplitude - progress * speed); 14 | return mix(getFromColor( p + offset), getToColor( p), progress); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/ZoomInCircles.glsl: -------------------------------------------------------------------------------- 1 | // License: MIT 2 | // Author: dycm8009 3 | // ported by gre from https://gist.github.com/dycm8009/948e99b1800e81ad909a 4 | 5 | vec2 zoom(vec2 uv, float amount) { 6 | return 0.5 + ((uv - 0.5) * amount); 7 | } 8 | 9 | vec2 ratio2 = vec2(1.0, 1.0 / ratio); 10 | 11 | vec4 transition(vec2 uv) { 12 | // TODO: some timing are hardcoded but should be one or many parameters 13 | // TODO: should also be able to configure how much circles 14 | // TODO: if() branching should be avoided when possible, prefer use of step() & other functions 15 | vec2 r = 2.0 * ((vec2(uv.xy) - 0.5) * ratio2); 16 | float pro = progress / 0.8; 17 | float z = pro * 0.2; 18 | float t = 0.0; 19 | if (pro > 1.0) { 20 | z = 0.2 + (pro - 1.0) * 5.; 21 | t = clamp((progress - 0.8) / 0.07, 0.0, 1.0); 22 | } 23 | if (length(r) < 0.5+z) { 24 | // uv = zoom(uv, 0.9 - 0.1 * pro); 25 | } 26 | else if (length(r) < 0.8+z*1.5) { 27 | uv = zoom(uv, 1.0 - 0.15 * pro); 28 | t = t * 0.5; 29 | } 30 | else if (length(r) < 1.2+z*2.5) { 31 | uv = zoom(uv, 1.0 - 0.2 * pro); 32 | t = t * 0.2; 33 | } 34 | else { 35 | uv = zoom(uv, 1.0 - 0.25 * pro); 36 | } 37 | return mix(getFromColor(uv), getToColor(uv), t); 38 | } 39 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/angular.glsl: -------------------------------------------------------------------------------- 1 | // Author: Fernando Kuteken 2 | // License: MIT 3 | 4 | #define PI 3.141592653589 5 | 6 | uniform float startingAngle; // = 90; 7 | 8 | vec4 transition (vec2 uv) { 9 | 10 | float offset = startingAngle * PI / 180.0; 11 | float angle = atan(uv.y - 0.5, uv.x - 0.5) + offset; 12 | float normalizedAngle = (angle + PI) / (2.0 * PI); 13 | 14 | normalizedAngle = normalizedAngle - floor(normalizedAngle); 15 | 16 | return mix( 17 | getFromColor(uv), 18 | getToColor(uv), 19 | step(normalizedAngle, progress) 20 | ); 21 | } 22 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/burn.glsl: -------------------------------------------------------------------------------- 1 | // author: gre 2 | // License: MIT 3 | uniform vec3 color /* = vec3(0.9, 0.4, 0.2) */; 4 | vec4 transition (vec2 uv) { 5 | return mix( 6 | getFromColor(uv) + vec4(progress*color, 1.0), 7 | getToColor(uv) + vec4((1.0-progress)*color, 1.0), 8 | progress 9 | ); 10 | } 11 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/cannabisleaf.glsl: -------------------------------------------------------------------------------- 1 | // Author: @Flexi23 2 | // License: MIT 3 | 4 | // inspired by http://www.wolframalpha.com/input/?i=cannabis+curve 5 | 6 | vec4 transition (vec2 uv) { 7 | if(progress == 0.0){ 8 | return getFromColor(uv); 9 | } 10 | vec2 leaf_uv = (uv - vec2(0.5))/10./pow(progress,3.5); 11 | leaf_uv.y += 0.35; 12 | float r = 0.18; 13 | float o = atan(leaf_uv.y, leaf_uv.x); 14 | return mix(getFromColor(uv), getToColor(uv), 1.-step(1. - length(leaf_uv)+r*(1.+sin(o))*(1.+0.9 * cos(8.*o))*(1.+0.1*cos(24.*o))*(0.9+0.05*cos(200.*o)), 1.)); 15 | } 16 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/circle.glsl: -------------------------------------------------------------------------------- 1 | // Author: Fernando Kuteken 2 | // License: MIT 3 | 4 | uniform vec2 center; // = vec2(0.5, 0.5); 5 | uniform vec3 backColor; // = vec3(0.1, 0.1, 0.1); 6 | 7 | vec4 transition (vec2 uv) { 8 | 9 | float distance = length(uv - center); 10 | float radius = sqrt(8.0) * abs(progress - 0.5); 11 | 12 | if (distance > radius) { 13 | return vec4(backColor, 1.0); 14 | } 15 | else { 16 | if (progress < 0.5) return getFromColor(uv); 17 | else return getToColor(uv); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/circleopen.glsl: -------------------------------------------------------------------------------- 1 | // author: gre 2 | // License: MIT 3 | uniform float smoothness; // = 0.3 4 | uniform bool opening; // = true 5 | 6 | const vec2 center = vec2(0.5, 0.5); 7 | const float SQRT_2 = 1.414213562373; 8 | 9 | vec4 transition (vec2 uv) { 10 | float x = opening ? progress : 1.-progress; 11 | float m = smoothstep(-smoothness, 0.0, SQRT_2*distance(center, uv) - x*(1.+smoothness)); 12 | return mix(getFromColor(uv), getToColor(uv), opening ? 1.-m : m); 13 | } 14 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/colorphase.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | 4 | // Usage: fromStep and toStep must be in [0.0, 1.0] range 5 | // and all(fromStep) must be < all(toStep) 6 | 7 | uniform vec4 fromStep; // = vec4(0.0, 0.2, 0.4, 0.0) 8 | uniform vec4 toStep; // = vec4(0.6, 0.8, 1.0, 1.0) 9 | 10 | vec4 transition (vec2 uv) { 11 | vec4 a = getFromColor(uv); 12 | vec4 b = getToColor(uv); 13 | return mix(a, b, smoothstep(fromStep, toStep, vec4(progress))); 14 | } 15 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/crosshatch.glsl: -------------------------------------------------------------------------------- 1 | // License: MIT 2 | // Author: pthrasher 3 | // adapted by gre from https://gist.github.com/pthrasher/04fd9a7de4012cbb03f6 4 | 5 | uniform vec2 center; // = vec2(0.5) 6 | uniform float threshold; // = 3.0 7 | uniform float fadeEdge; // = 0.1 8 | 9 | float rand(vec2 co) { 10 | return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); 11 | } 12 | vec4 transition(vec2 p) { 13 | float dist = distance(center, p) / threshold; 14 | float r = progress - min(rand(vec2(p.y, 0.0)), rand(vec2(0.0, p.x))); 15 | return mix(getFromColor(p), getToColor(p), mix(0.0, mix(step(dist, r), 1.0, smoothstep(1.0-fadeEdge, 1.0, progress)), smoothstep(0.0, fadeEdge, progress))); 16 | } 17 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/crosswarp.glsl: -------------------------------------------------------------------------------- 1 | // Author: Eke Péter 2 | // License: MIT 3 | vec4 transition(vec2 p) { 4 | float x = progress; 5 | x=smoothstep(.0,1.0,(x*2.0+p.x-1.0)); 6 | return mix(getFromColor((p-.5)*(1.-x)+.5), getToColor((p-.5)*x+.5), x); 7 | } 8 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/cube.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | uniform float persp; // = 0.7 4 | uniform float unzoom; // = 0.3 5 | uniform float reflection; // = 0.4 6 | uniform float floating; // = 3.0 7 | 8 | vec2 project (vec2 p) { 9 | return p * vec2(1.0, -1.2) + vec2(0.0, -floating/100.); 10 | } 11 | 12 | bool inBounds (vec2 p) { 13 | return all(lessThan(vec2(0.0), p)) && all(lessThan(p, vec2(1.0))); 14 | } 15 | 16 | vec4 bgColor (vec2 p, vec2 pfr, vec2 pto) { 17 | vec4 c = vec4(0.0, 0.0, 0.0, 1.0); 18 | pfr = project(pfr); 19 | // FIXME avoid branching might help perf! 20 | if (inBounds(pfr)) { 21 | c += mix(vec4(0.0), getFromColor(pfr), reflection * mix(1.0, 0.0, pfr.y)); 22 | } 23 | pto = project(pto); 24 | if (inBounds(pto)) { 25 | c += mix(vec4(0.0), getToColor(pto), reflection * mix(1.0, 0.0, pto.y)); 26 | } 27 | return c; 28 | } 29 | 30 | // p : the position 31 | // persp : the perspective in [ 0, 1 ] 32 | // center : the xcenter in [0, 1] \ 0.5 excluded 33 | vec2 xskew (vec2 p, float persp, float center) { 34 | float x = mix(p.x, 1.0-p.x, center); 35 | return ( 36 | ( 37 | vec2( x, (p.y - 0.5*(1.0-persp) * x) / (1.0+(persp-1.0)*x) ) 38 | - vec2(0.5-distance(center, 0.5), 0.0) 39 | ) 40 | * vec2(0.5 / distance(center, 0.5) * (center<0.5 ? 1.0 : -1.0), 1.0) 41 | + vec2(center<0.5 ? 0.0 : 1.0, 0.0) 42 | ); 43 | } 44 | 45 | vec4 transition(vec2 op) { 46 | float uz = unzoom * 2.0*(0.5-distance(0.5, progress)); 47 | vec2 p = -uz*0.5+(1.0+uz) * op; 48 | vec2 fromP = xskew( 49 | (p - vec2(progress, 0.0)) / vec2(1.0-progress, 1.0), 50 | 1.0-mix(progress, 0.0, persp), 51 | 0.0 52 | ); 53 | vec2 toP = xskew( 54 | p / vec2(progress, 1.0), 55 | mix(pow(progress, 2.0), 1.0, persp), 56 | 1.0 57 | ); 58 | // FIXME avoid branching might help perf! 59 | if (inBounds(fromP)) { 60 | return getFromColor(fromP); 61 | } 62 | else if (inBounds(toP)) { 63 | return getToColor(toP); 64 | } 65 | return bgColor(op, fromP, toP); 66 | } 67 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/directional-easing.glsl: -------------------------------------------------------------------------------- 1 | // Author: Max Plotnikov 2 | // License: MIT 3 | 4 | uniform vec2 direction; // = vec2(0.0, 1.0) 5 | 6 | vec4 transition (vec2 uv) { 7 | float easing = sqrt((2.0 - progress) * progress); 8 | vec2 p = uv + easing * sign(direction); 9 | vec2 f = fract(p); 10 | return mix( 11 | getToColor(f), 12 | getFromColor(f), 13 | step(0.0, p.y) * step(p.y, 1.0) * step(0.0, p.x) * step(p.x, 1.0) 14 | ); 15 | } 16 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/directionalwarp.glsl: -------------------------------------------------------------------------------- 1 | // Author: pschroen 2 | // License: MIT 3 | 4 | uniform vec2 direction; // = vec2(-1.0, 1.0) 5 | 6 | const float smoothness = 0.5; 7 | const vec2 center = vec2(0.5, 0.5); 8 | 9 | vec4 transition (vec2 uv) { 10 | vec2 v = normalize(direction); 11 | v /= abs(v.x) + abs(v.y); 12 | float d = v.x * center.x + v.y * center.y; 13 | float m = 1.0 - smoothstep(-smoothness, 0.0, v.x * uv.x + v.y * uv.y - (d - 0.5 + progress * (1.0 + smoothness))); 14 | return mix(getFromColor((uv - 0.5) * (1.0 - m) + 0.5), getToColor((uv - 0.5) * m + 0.5), m); 15 | } 16 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/directionalwipe.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | 4 | uniform vec2 direction; // = vec2(1.0, -1.0) 5 | uniform float smoothness; // = 0.5 6 | 7 | const vec2 center = vec2(0.5, 0.5); 8 | 9 | vec4 transition (vec2 uv) { 10 | vec2 v = normalize(direction); 11 | v /= abs(v.x)+abs(v.y); 12 | float d = v.x * center.x + v.y * center.y; 13 | float m = 14 | (1.0-step(progress, 0.0)) * // there is something wrong with our formula that makes m not equals 0.0 with progress is 0.0 15 | (1.0 - smoothstep(-smoothness, 0.0, v.x * uv.x + v.y * uv.y - (d-0.5+progress*(1.+smoothness)))); 16 | return mix(getFromColor(uv), getToColor(uv), m); 17 | } 18 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/displacement.glsl: -------------------------------------------------------------------------------- 1 | // Author: Travis Fischer 2 | // License: MIT 3 | // 4 | // Adapted from a Codrops article by Robin Delaporte 5 | // https://tympanus.net/Development/DistortionHoverEffect 6 | 7 | uniform sampler2D displacementMap; 8 | 9 | uniform float strength; // = 0.5 10 | 11 | vec4 transition (vec2 uv) { 12 | float displacement = texture2D(displacementMap, uv).r * strength; 13 | 14 | vec2 uvFrom = vec2(uv.x + progress * displacement, uv.y); 15 | vec2 uvTo = vec2(uv.x - (1.0 - progress) * displacement, uv.y); 16 | 17 | return mix( 18 | getFromColor(uvFrom), 19 | getToColor(uvTo), 20 | progress 21 | ); 22 | } 23 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/doorway.glsl: -------------------------------------------------------------------------------- 1 | // author: gre 2 | // License: MIT 3 | uniform float reflection; // = 0.4 4 | uniform float perspective; // = 0.4 5 | uniform float depth; // = 3 6 | 7 | const vec4 black = vec4(0.0, 0.0, 0.0, 1.0); 8 | const vec2 boundMin = vec2(0.0, 0.0); 9 | const vec2 boundMax = vec2(1.0, 1.0); 10 | 11 | bool inBounds (vec2 p) { 12 | return all(lessThan(boundMin, p)) && all(lessThan(p, boundMax)); 13 | } 14 | 15 | vec2 project (vec2 p) { 16 | return p * vec2(1.0, -1.2) + vec2(0.0, -0.02); 17 | } 18 | 19 | vec4 bgColor (vec2 p, vec2 pto) { 20 | vec4 c = black; 21 | pto = project(pto); 22 | if (inBounds(pto)) { 23 | c += mix(black, getToColor(pto), reflection * mix(1.0, 0.0, pto.y)); 24 | } 25 | return c; 26 | } 27 | 28 | 29 | vec4 transition (vec2 p) { 30 | vec2 pfr = vec2(-1.), pto = vec2(-1.); 31 | float middleSlit = 2.0 * abs(p.x-0.5) - progress; 32 | if (middleSlit > 0.0) { 33 | pfr = p + (p.x > 0.5 ? -1.0 : 1.0) * vec2(0.5*progress, 0.0); 34 | float d = 1.0/(1.0+perspective*progress*(1.0-middleSlit)); 35 | pfr.y -= d/2.; 36 | pfr.y *= d; 37 | pfr.y += d/2.; 38 | } 39 | float size = mix(1.0, depth, 1.-progress); 40 | pto = (p + vec2(-0.5, -0.5)) * vec2(size, size) + vec2(0.5, 0.5); 41 | if (inBounds(pfr)) { 42 | return getFromColor(pfr); 43 | } 44 | else if (inBounds(pto)) { 45 | return getToColor(pto); 46 | } 47 | else { 48 | return bgColor(p, pto); 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/fade.glsl: -------------------------------------------------------------------------------- 1 | // author: gre 2 | // license: MIT 3 | 4 | vec4 transition (vec2 uv) { 5 | return mix( 6 | getFromColor(uv), 7 | getToColor(uv), 8 | progress 9 | ); 10 | } 11 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/fadecolor.glsl: -------------------------------------------------------------------------------- 1 | // author: gre 2 | // License: MIT 3 | uniform vec3 color;// = vec3(0.0) 4 | uniform float colorPhase; // = 0.4 ; // if 0.0, there is no black phase, if 0.9, the black phase is very important 5 | vec4 transition (vec2 uv) { 6 | return mix( 7 | mix(vec4(color, 1.0), getFromColor(uv), smoothstep(1.0-colorPhase, 0.0, progress)), 8 | mix(vec4(color, 1.0), getToColor(uv), smoothstep( colorPhase, 1.0, progress)), 9 | progress); 10 | } 11 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/fadegrayscale.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | 4 | uniform float intensity; // = 0.3; // if 0.0, the image directly turn grayscale, if 0.9, the grayscale transition phase is very important 5 | 6 | vec3 grayscale (vec3 color) { 7 | return vec3(0.2126*color.r + 0.7152*color.g + 0.0722*color.b); 8 | } 9 | 10 | vec4 transition (vec2 uv) { 11 | vec4 fc = getFromColor(uv); 12 | vec4 tc = getToColor(uv); 13 | return mix( 14 | mix(vec4(grayscale(fc.rgb), 1.0), fc, smoothstep(1.0-intensity, 0.0, progress)), 15 | mix(vec4(grayscale(tc.rgb), 1.0), tc, smoothstep( intensity, 1.0, progress)), 16 | progress); 17 | } 18 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/flyeye.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | uniform float size; // = 0.04 4 | uniform float zoom; // = 50.0 5 | uniform float colorSeparation; // = 0.3 6 | 7 | vec4 transition(vec2 p) { 8 | float inv = 1. - progress; 9 | vec2 disp = size*vec2(cos(zoom*p.x), sin(zoom*p.y)); 10 | vec4 texTo = getToColor(p + inv*disp); 11 | vec4 texFrom = vec4( 12 | getFromColor(p + progress*disp*(1.0 - colorSeparation)).r, 13 | getFromColor(p + progress*disp).g, 14 | getFromColor(p + progress*disp*(1.0 + colorSeparation)).b, 15 | 1.0); 16 | return texTo*progress + texFrom*inv; 17 | } 18 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/heart.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | 4 | float inHeart (vec2 p, vec2 center, float size) { 5 | if (size==0.0) return 0.0; 6 | vec2 o = (p-center)/(1.6*size); 7 | float a = o.x*o.x+o.y*o.y-0.3; 8 | return step(a*a*a, o.x*o.x*o.y*o.y*o.y); 9 | } 10 | vec4 transition (vec2 uv) { 11 | return mix( 12 | getFromColor(uv), 13 | getToColor(uv), 14 | inHeart(uv, vec2(0.5, 0.4), progress) 15 | ); 16 | } 17 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/hexagonalize.glsl: -------------------------------------------------------------------------------- 1 | // Author: Fernando Kuteken 2 | // License: MIT 3 | // Hexagonal math from: http://www.redblobgames.com/grids/hexagons/ 4 | 5 | uniform int steps; // = 50; 6 | uniform float horizontalHexagons; //= 20; 7 | 8 | struct Hexagon { 9 | float q; 10 | float r; 11 | float s; 12 | }; 13 | 14 | Hexagon createHexagon(float q, float r){ 15 | Hexagon hex; 16 | hex.q = q; 17 | hex.r = r; 18 | hex.s = -q - r; 19 | return hex; 20 | } 21 | 22 | Hexagon roundHexagon(Hexagon hex){ 23 | 24 | float q = floor(hex.q + 0.5); 25 | float r = floor(hex.r + 0.5); 26 | float s = floor(hex.s + 0.5); 27 | 28 | float deltaQ = abs(q - hex.q); 29 | float deltaR = abs(r - hex.r); 30 | float deltaS = abs(s - hex.s); 31 | 32 | if (deltaQ > deltaR && deltaQ > deltaS) 33 | q = -r - s; 34 | else if (deltaR > deltaS) 35 | r = -q - s; 36 | else 37 | s = -q - r; 38 | 39 | return createHexagon(q, r); 40 | } 41 | 42 | Hexagon hexagonFromPoint(vec2 point, float size) { 43 | 44 | point.y /= ratio; 45 | point = (point - 0.5) / size; 46 | 47 | float q = (sqrt(3.0) / 3.0) * point.x + (-1.0 / 3.0) * point.y; 48 | float r = 0.0 * point.x + 2.0 / 3.0 * point.y; 49 | 50 | Hexagon hex = createHexagon(q, r); 51 | return roundHexagon(hex); 52 | 53 | } 54 | 55 | vec2 pointFromHexagon(Hexagon hex, float size) { 56 | 57 | float x = (sqrt(3.0) * hex.q + (sqrt(3.0) / 2.0) * hex.r) * size + 0.5; 58 | float y = (0.0 * hex.q + (3.0 / 2.0) * hex.r) * size + 0.5; 59 | 60 | return vec2(x, y * ratio); 61 | } 62 | 63 | vec4 transition (vec2 uv) { 64 | 65 | float dist = 2.0 * min(progress, 1.0 - progress); 66 | dist = steps > 0 ? ceil(dist * float(steps)) / float(steps) : dist; 67 | 68 | float size = (sqrt(3.0) / 3.0) * dist / horizontalHexagons; 69 | 70 | vec2 point = dist > 0.0 ? pointFromHexagon(hexagonFromPoint(uv, size), size) : uv; 71 | 72 | return mix(getFromColor(point), getToColor(point), progress); 73 | 74 | } 75 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/kaleidoscope.glsl: -------------------------------------------------------------------------------- 1 | // Author: nwoeanhinnogaehr 2 | // License: MIT 3 | 4 | uniform float speed; // = 1.0; 5 | uniform float angle; // = 1.0; 6 | uniform float power; // = 1.5; 7 | 8 | vec4 transition(vec2 uv) { 9 | vec2 p = uv.xy / vec2(1.0).xy; 10 | vec2 q = p; 11 | float t = pow(progress, power)*speed; 12 | p = p -0.5; 13 | for (int i = 0; i < 7; i++) { 14 | p = vec2(sin(t)*p.x + cos(t)*p.y, sin(t)*p.y - cos(t)*p.x); 15 | t += angle; 16 | p = abs(mod(p, 2.0) - 1.0); 17 | } 18 | abs(mod(p, 1.0)); 19 | return mix( 20 | mix(getFromColor(q), getToColor(q), progress), 21 | mix(getFromColor(p), getToColor(p), progress), 1.0 - 2.0*abs(progress - 0.5)); 22 | } 23 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/luma.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | 4 | uniform sampler2D luma; 5 | 6 | vec4 transition(vec2 uv) { 7 | return mix( 8 | getToColor(uv), 9 | getFromColor(uv), 10 | step(progress, texture2D(luma, uv).r) 11 | ); 12 | } 13 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/luminance_melt.glsl: -------------------------------------------------------------------------------- 1 | // Author: 0gust1 2 | // License: MIT 3 | //My own first transition — based on crosshatch code (from pthrasher), using simplex noise formula (copied and pasted) 4 | //-> cooler with high contrasted images (isolated dark subject on light background f.e.) 5 | //TODO : try to rebase it on DoomTransition (from zeh)? 6 | //optimizations : 7 | //luminance (see http://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color#answer-596241) 8 | // Y = (R+R+B+G+G+G)/6 9 | //or Y = (R+R+R+B+G+G+G+G)>>3 10 | 11 | 12 | //direction of movement : 0 : up, 1, down 13 | uniform bool direction; // = 1 14 | //luminance threshold 15 | uniform float l_threshold; // = 0.8 16 | //does the movement takes effect above or below luminance threshold ? 17 | uniform bool above; // = false 18 | 19 | 20 | //Random function borrowed from everywhere 21 | float rand(vec2 co){ 22 | return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); 23 | } 24 | 25 | 26 | // Simplex noise : 27 | // Description : Array and textureless GLSL 2D simplex noise function. 28 | // Author : Ian McEwan, Ashima Arts. 29 | // Maintainer : ijm 30 | // Lastmod : 20110822 (ijm) 31 | // License : MIT 32 | // 2011 Ashima Arts. All rights reserved. 33 | // Distributed under the MIT License. See LICENSE file. 34 | // https://github.com/ashima/webgl-noise 35 | // 36 | 37 | vec3 mod289(vec3 x) { 38 | return x - floor(x * (1.0 / 289.0)) * 289.0; 39 | } 40 | 41 | vec2 mod289(vec2 x) { 42 | return x - floor(x * (1.0 / 289.0)) * 289.0; 43 | } 44 | 45 | vec3 permute(vec3 x) { 46 | return mod289(((x*34.0)+1.0)*x); 47 | } 48 | 49 | float snoise(vec2 v) 50 | { 51 | const vec4 C = vec4(0.211324865405187, // (3.0-sqrt(3.0))/6.0 52 | 0.366025403784439, // 0.5*(sqrt(3.0)-1.0) 53 | -0.577350269189626, // -1.0 + 2.0 * C.x 54 | 0.024390243902439); // 1.0 / 41.0 55 | // First corner 56 | vec2 i = floor(v + dot(v, C.yy) ); 57 | vec2 x0 = v - i + dot(i, C.xx); 58 | 59 | // Other corners 60 | vec2 i1; 61 | //i1.x = step( x0.y, x0.x ); // x0.x > x0.y ? 1.0 : 0.0 62 | //i1.y = 1.0 - i1.x; 63 | i1 = (x0.x > x0.y) ? vec2(1.0, 0.0) : vec2(0.0, 1.0); 64 | // x0 = x0 - 0.0 + 0.0 * C.xx ; 65 | // x1 = x0 - i1 + 1.0 * C.xx ; 66 | // x2 = x0 - 1.0 + 2.0 * C.xx ; 67 | vec4 x12 = x0.xyxy + C.xxzz; 68 | x12.xy -= i1; 69 | 70 | // Permutations 71 | i = mod289(i); // Avoid truncation effects in permutation 72 | vec3 p = permute( permute( i.y + vec3(0.0, i1.y, 1.0 )) 73 | + i.x + vec3(0.0, i1.x, 1.0 )); 74 | 75 | vec3 m = max(0.5 - vec3(dot(x0,x0), dot(x12.xy,x12.xy), dot(x12.zw,x12.zw)), 0.0); 76 | m = m*m ; 77 | m = m*m ; 78 | 79 | // Gradients: 41 points uniformly over a line, mapped onto a diamond. 80 | // The ring size 17*17 = 289 is close to a multiple of 41 (41*7 = 287) 81 | 82 | vec3 x = 2.0 * fract(p * C.www) - 1.0; 83 | vec3 h = abs(x) - 0.5; 84 | vec3 ox = floor(x + 0.5); 85 | vec3 a0 = x - ox; 86 | 87 | // Normalise gradients implicitly by scaling m 88 | // Approximation of: m *= inversesqrt( a0*a0 + h*h ); 89 | m *= 1.79284291400159 - 0.85373472095314 * ( a0*a0 + h*h ); 90 | 91 | // Compute final noise value at P 92 | vec3 g; 93 | g.x = a0.x * x0.x + h.x * x0.y; 94 | g.yz = a0.yz * x12.xz + h.yz * x12.yw; 95 | return 130.0 * dot(m, g); 96 | } 97 | 98 | // Simplex noise -- end 99 | 100 | float luminance(vec4 color){ 101 | //(0.299*R + 0.587*G + 0.114*B) 102 | return color.r*0.299+color.g*0.587+color.b*0.114; 103 | } 104 | 105 | vec2 center = vec2(1.0, direction); 106 | 107 | vec4 transition(vec2 uv) { 108 | vec2 p = uv.xy / vec2(1.0).xy; 109 | if (progress == 0.0) { 110 | return getFromColor(p); 111 | } else if (progress == 1.0) { 112 | return getToColor(p); 113 | } else { 114 | float x = progress; 115 | float dist = distance(center, p)- progress*exp(snoise(vec2(p.x, 0.0))); 116 | float r = x - rand(vec2(p.x, 0.1)); 117 | float m; 118 | if(above){ 119 | m = dist <= r && luminance(getFromColor(p))>l_threshold ? 1.0 : (progress*progress*progress); 120 | } 121 | else{ 122 | m = dist <= r && luminance(getFromColor(p))0 ? ceil(d * float(steps)) / float(steps) : d; 12 | vec2 squareSize = 2.0 * dist / vec2(squaresMin); 13 | 14 | vec4 transition(vec2 uv) { 15 | vec2 p = dist>0.0 ? (floor(uv / squareSize) + 0.5) * squareSize : uv; 16 | return mix(getFromColor(p), getToColor(p), progress); 17 | } 18 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/polar_function.glsl: -------------------------------------------------------------------------------- 1 | // Author: Fernando Kuteken 2 | // License: MIT 3 | 4 | #define PI 3.14159265359 5 | 6 | uniform int segments; // = 5; 7 | 8 | vec4 transition (vec2 uv) { 9 | 10 | float angle = atan(uv.y - 0.5, uv.x - 0.5) - 0.5 * PI; 11 | float normalized = (angle + 1.5 * PI) * (2.0 * PI); 12 | 13 | float radius = (cos(float(segments) * angle) + 4.0) / 4.0; 14 | float difference = length(uv - vec2(0.5, 0.5)); 15 | 16 | if (difference > radius * progress) 17 | return getFromColor(uv); 18 | else 19 | return getToColor(uv); 20 | } 21 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/randomNoisex.glsl: -------------------------------------------------------------------------------- 1 | // Author:towrabbit 2 | // License: MIT 3 | 4 | float random (vec2 st) { 5 | return fract(sin(dot(st.xy,vec2(12.9898,78.233)))*43758.5453123); 6 | } 7 | vec4 transition (vec2 uv) { 8 | vec4 leftSide = getFromColor(uv); 9 | vec2 uv1 = uv; 10 | vec2 uv2 = uv; 11 | float uvz = floor(random(uv1)+progress); 12 | vec4 rightSide = getToColor(uv); 13 | float p = progress*2.0; 14 | return mix(leftSide,rightSide,uvz); 15 | return leftSide * ceil(uv.x*2.-p) + rightSide * ceil(-uv.x*2.+p); 16 | } 17 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/randomsquares.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | 4 | uniform ivec2 size; // = ivec2(10, 10) 5 | uniform float smoothness; // = 0.5 6 | 7 | float rand (vec2 co) { 8 | return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); 9 | } 10 | 11 | vec4 transition(vec2 p) { 12 | float r = rand(floor(vec2(size) * p)); 13 | float m = smoothstep(0.0, -smoothness, r - (progress * (1.0 + smoothness))); 14 | return mix(getFromColor(p), getToColor(p), m); 15 | } 16 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/ripple.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | uniform float amplitude; // = 100.0 4 | uniform float speed; // = 50.0 5 | 6 | vec4 transition (vec2 uv) { 7 | vec2 dir = uv - vec2(.5); 8 | float dist = length(dir); 9 | vec2 offset = dir * (sin(progress * dist * amplitude - progress * speed) + .5) / 30.; 10 | return mix( 11 | getFromColor(uv + offset), 12 | getToColor(uv), 13 | smoothstep(0.2, 1.0, progress) 14 | ); 15 | } 16 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/rotate_scale_fade.glsl: -------------------------------------------------------------------------------- 1 | // Author: Fernando Kuteken 2 | // License: MIT 3 | 4 | #define PI 3.14159265359 5 | 6 | uniform vec2 center; // = vec2(0.5, 0.5); 7 | uniform float rotations; // = 1; 8 | uniform float scale; // = 8; 9 | uniform vec4 backColor; // = vec4(0.15, 0.15, 0.15, 1.0); 10 | 11 | vec4 transition (vec2 uv) { 12 | 13 | vec2 difference = uv - center; 14 | vec2 dir = normalize(difference); 15 | float dist = length(difference); 16 | 17 | float angle = 2.0 * PI * rotations * progress; 18 | 19 | float c = cos(angle); 20 | float s = sin(angle); 21 | 22 | float currentScale = mix(scale, 1.0, 2.0 * abs(progress - 0.5)); 23 | 24 | vec2 rotatedDir = vec2(dir.x * c - dir.y * s, dir.x * s + dir.y * c); 25 | vec2 rotatedUv = center + rotatedDir * dist / currentScale; 26 | 27 | if (rotatedUv.x < 0.0 || rotatedUv.x > 1.0 || 28 | rotatedUv.y < 0.0 || rotatedUv.y > 1.0) 29 | return backColor; 30 | 31 | return mix(getFromColor(rotatedUv), getToColor(rotatedUv), progress); 32 | } 33 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/squareswire.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | 4 | uniform ivec2 squares;// = ivec2(10,10) 5 | uniform vec2 direction;// = vec2(1.0, -0.5) 6 | uniform float smoothness; // = 1.6 7 | 8 | const vec2 center = vec2(0.5, 0.5); 9 | vec4 transition (vec2 p) { 10 | vec2 v = normalize(direction); 11 | v /= abs(v.x)+abs(v.y); 12 | float d = v.x * center.x + v.y * center.y; 13 | float offset = smoothness; 14 | float pr = smoothstep(-offset, 0.0, v.x * p.x + v.y * p.y - (d-0.5+progress*(1.+offset))); 15 | vec2 squarep = fract(p*vec2(squares)); 16 | vec2 squaremin = vec2(pr/2.0); 17 | vec2 squaremax = vec2(1.0 - pr/2.0); 18 | float a = (1.0 - step(progress, 0.0)) * step(squaremin.x, squarep.x) * step(squaremin.y, squarep.y) * step(squarep.x, squaremax.x) * step(squarep.y, squaremax.y); 19 | return mix(getFromColor(p), getToColor(p), a); 20 | } 21 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/squeeze.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | 4 | uniform float colorSeparation; // = 0.04 5 | 6 | vec4 transition (vec2 uv) { 7 | float y = 0.5 + (uv.y-0.5) / (1.0-progress); 8 | if (y < 0.0 || y > 1.0) { 9 | return getToColor(uv); 10 | } 11 | else { 12 | vec2 fp = vec2(uv.x, y); 13 | vec2 off = progress * vec2(0.0, colorSeparation); 14 | vec4 c = getFromColor(fp); 15 | vec4 cn = getFromColor(fp - off); 16 | vec4 cp = getFromColor(fp + off); 17 | return vec4(cn.r, c.g, cp.b, c.a); 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/swap.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | // General parameters 4 | uniform float reflection; // = 0.4 5 | uniform float perspective; // = 0.2 6 | uniform float depth; // = 3.0 7 | 8 | const vec4 black = vec4(0.0, 0.0, 0.0, 1.0); 9 | const vec2 boundMin = vec2(0.0, 0.0); 10 | const vec2 boundMax = vec2(1.0, 1.0); 11 | 12 | bool inBounds (vec2 p) { 13 | return all(lessThan(boundMin, p)) && all(lessThan(p, boundMax)); 14 | } 15 | 16 | vec2 project (vec2 p) { 17 | return p * vec2(1.0, -1.2) + vec2(0.0, -0.02); 18 | } 19 | 20 | vec4 bgColor (vec2 p, vec2 pfr, vec2 pto) { 21 | vec4 c = black; 22 | pfr = project(pfr); 23 | if (inBounds(pfr)) { 24 | c += mix(black, getFromColor(pfr), reflection * mix(1.0, 0.0, pfr.y)); 25 | } 26 | pto = project(pto); 27 | if (inBounds(pto)) { 28 | c += mix(black, getToColor(pto), reflection * mix(1.0, 0.0, pto.y)); 29 | } 30 | return c; 31 | } 32 | 33 | vec4 transition(vec2 p) { 34 | vec2 pfr, pto = vec2(-1.); 35 | 36 | float size = mix(1.0, depth, progress); 37 | float persp = perspective * progress; 38 | pfr = (p + vec2(-0.0, -0.5)) * vec2(size/(1.0-perspective*progress), size/(1.0-size*persp*p.x)) + vec2(0.0, 0.5); 39 | 40 | size = mix(1.0, depth, 1.-progress); 41 | persp = perspective * (1.-progress); 42 | pto = (p + vec2(-1.0, -0.5)) * vec2(size/(1.0-perspective*(1.0-progress)), size/(1.0-size*persp*(0.5-p.x))) + vec2(1.0, 0.5); 43 | 44 | if (progress < 0.5) { 45 | if (inBounds(pfr)) { 46 | return getFromColor(pfr); 47 | } 48 | if (inBounds(pto)) { 49 | return getToColor(pto); 50 | } 51 | } 52 | if (inBounds(pto)) { 53 | return getToColor(pto); 54 | } 55 | if (inBounds(pfr)) { 56 | return getFromColor(pfr); 57 | } 58 | return bgColor(p, pfr, pto); 59 | } 60 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/tangentMotionBlur.glsl: -------------------------------------------------------------------------------- 1 | 2 | // License: MIT 3 | // Author: chenkai 4 | // ported from https://codertw.com/%E7%A8%8B%E5%BC%8F%E8%AA%9E%E8%A8%80/671116/ 5 | 6 | float rand (vec2 co) { 7 | return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); 8 | } 9 | 10 | // motion blur for texture from 11 | vec4 motionBlurFrom(vec2 _st, vec2 speed) { 12 | vec2 texCoord = _st.xy / vec2(1.0).xy; 13 | vec3 color = vec3(0.0); 14 | float total = 0.0; 15 | float offset = rand(_st); 16 | for (float t = 0.0; t <= 20.0; t++) { 17 | float percent = (t + offset) / 20.0; 18 | float weight = 4.0 * (percent - percent * percent); 19 | vec2 newuv = texCoord + speed * percent; 20 | newuv = fract(newuv); 21 | color += getFromColor(newuv).rgb * weight; 22 | total += weight; 23 | } 24 | return vec4(color / total, 1.0); 25 | } 26 | 27 | // motion blur for texture to 28 | vec4 motionBlurTo(vec2 _st, vec2 speed) { 29 | vec2 texCoord = _st.xy / vec2(1.0).xy; 30 | vec3 color = vec3(0.0); 31 | float total = 0.0; 32 | float offset = rand(_st); 33 | for (float t = 0.0; t <= 20.0; t++) { 34 | float percent = (t + offset) / 20.0; 35 | float weight = 4.0 * (percent - percent * percent); 36 | vec2 newuv = texCoord + speed * percent; 37 | newuv = fract(newuv); 38 | color += getToColor(newuv).rgb * weight; 39 | total += weight; 40 | } 41 | return vec4(color / total, 1.0); 42 | } 43 | 44 | 45 | // bezier in gpu 46 | float A(float aA1, float aA2) { 47 | return 1.0 - 3.0 * aA2 + 3.0 * aA1; 48 | } 49 | float B(float aA1, float aA2) { 50 | return 3.0 * aA2 - 6.0 * aA1; 51 | } 52 | float C(float aA1) { 53 | return 3.0 * aA1; 54 | } 55 | float GetSlope(float aT, float aA1, float aA2) { 56 | return 3.0 * A(aA1, aA2)*aT*aT + 2.0 * B(aA1, aA2) * aT + C(aA1); 57 | } 58 | float CalcBezier(float aT, float aA1, float aA2) { 59 | return ((A(aA1, aA2)*aT + B(aA1, aA2))*aT + C(aA1))*aT; 60 | } 61 | float GetTForX(float aX, float mX1, float mX2) { 62 | // iteration to solve 63 | float aGuessT = aX; 64 | for (int i = 0; i < 4; ++i) { 65 | float currentSlope = GetSlope(aGuessT, mX1, mX2); 66 | if (currentSlope == 0.0) return aGuessT; 67 | float currentX = CalcBezier(aGuessT, mX1, mX2) - aX; 68 | aGuessT -= currentX / currentSlope; 69 | } 70 | return aGuessT; 71 | } 72 | float KeySpline(float aX, float mX1, float mY1, float mX2, float mY2) { 73 | if (mX1 == mY1 && mX2 == mY2) return aX; // linear 74 | return CalcBezier(GetTForX(aX, mX1, mX2), mY1, mY2); // x to t, t to y 75 | } 76 | 77 | // norm distribution 78 | float normpdf(float x) { 79 | return exp(-20.*pow(x-.5,2.)); 80 | } 81 | 82 | vec2 rotateUv(vec2 uv, float angle, vec2 anchor, float zDirection) { 83 | uv = uv - anchor; // anchor to origin 84 | float s = sin(angle); 85 | float c = cos(angle); 86 | mat2 m = mat2(c, -s, s, c); 87 | uv = m * uv; 88 | uv += anchor; // anchor back 89 | return uv; 90 | } 91 | 92 | 93 | 94 | vec4 transition (vec2 uv) { 95 | 96 | vec2 iResolution = vec2(100.0, 100.0); // screen size 97 | 98 | vec2 myst = uv; 99 | float ratio = iResolution.x / iResolution.y; // screen ratio 100 | float animationTime = progress; //getAnimationTime(); 101 | float easingTime = KeySpline(animationTime, .68,.01,.17,.98); 102 | float blur = normpdf(easingTime); 103 | float r = 0.; 104 | float rotation = 180./180.*3.14159; 105 | if (easingTime <= .5) { 106 | r = rotation * easingTime; 107 | } else { 108 | r = -rotation + rotation * easingTime; 109 | } 110 | 111 | // rotation for current frame 112 | vec2 mystCurrent = myst; 113 | mystCurrent.y *= 1./ratio; 114 | mystCurrent = rotateUv(mystCurrent, r, vec2(1., 0.), -1.); 115 | mystCurrent.y *= ratio; 116 | 117 | // frame timeInterval by fps=30 118 | float timeInterval = 0.0167*2.0; 119 | if (easingTime <= .5) { 120 | r = rotation * (easingTime+timeInterval); 121 | } else { 122 | r = -rotation + rotation * (easingTime+timeInterval); 123 | } 124 | 125 | // rotation for next frame 126 | vec2 mystNext = myst; 127 | mystNext.y *= 1./ratio; 128 | mystNext = rotateUv(mystNext, r, vec2(1., 0.), -1.); 129 | mystNext.y *= ratio; 130 | 131 | // get speed at tagent direction 132 | vec2 speed = (mystNext - mystCurrent) / timeInterval * blur * 0.5; 133 | if (easingTime <= .5) { 134 | return motionBlurFrom(mystCurrent, speed); 135 | } else { 136 | return motionBlurTo(mystCurrent, speed); 137 | } 138 | } 139 | 140 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/undulatingBurnOut.glsl: -------------------------------------------------------------------------------- 1 | // License: MIT 2 | // Author: pthrasher 3 | // adapted by gre from https://gist.github.com/pthrasher/8e6226b215548ba12734 4 | 5 | uniform float smoothness; // = 0.03 6 | uniform vec2 center; // = vec2(0.5) 7 | uniform vec3 color; // = vec3(0.0) 8 | 9 | const float M_PI = 3.14159265358979323846; 10 | 11 | float quadraticInOut(float t) { 12 | float p = 2.0 * t * t; 13 | return t < 0.5 ? p : -p + (4.0 * t) - 1.0; 14 | } 15 | 16 | float getGradient(float r, float dist) { 17 | float d = r - dist; 18 | return mix( 19 | smoothstep(-smoothness, 0.0, r - dist * (1.0 + smoothness)), 20 | -1.0 - step(0.005, d), 21 | step(-0.005, d) * step(d, 0.01) 22 | ); 23 | } 24 | 25 | float getWave(vec2 p){ 26 | vec2 _p = p - center; // offset from center 27 | float rads = atan(_p.y, _p.x); 28 | float degs = degrees(rads) + 180.0; 29 | vec2 range = vec2(0.0, M_PI * 30.0); 30 | vec2 domain = vec2(0.0, 360.0); 31 | float ratio = (M_PI * 30.0) / 360.0; 32 | degs = degs * ratio; 33 | float x = progress; 34 | float magnitude = mix(0.02, 0.09, smoothstep(0.0, 1.0, x)); 35 | float offset = mix(40.0, 30.0, smoothstep(0.0, 1.0, x)); 36 | float ease_degs = quadraticInOut(sin(degs)); 37 | float deg_wave_pos = (ease_degs * magnitude) * sin(x * offset); 38 | return x + deg_wave_pos; 39 | } 40 | 41 | vec4 transition(vec2 p) { 42 | float dist = distance(center, p); 43 | float m = getGradient(getWave(p), dist); 44 | vec4 cfrom = getFromColor(p); 45 | vec4 cto = getToColor(p); 46 | return mix(mix(cfrom, cto, m), mix(cfrom, vec4(color, 1.0), 0.75), step(m, -2.0)); 47 | } 48 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/wind.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | 4 | // Custom parameters 5 | uniform float size; // = 0.2 6 | 7 | float rand (vec2 co) { 8 | return fract(sin(dot(co.xy ,vec2(12.9898,78.233))) * 43758.5453); 9 | } 10 | 11 | vec4 transition (vec2 uv) { 12 | float r = rand(vec2(0, uv.y)); 13 | float m = smoothstep(0.0, -size, uv.x*(1.0-size) + size*r - (progress * (1.0 + size))); 14 | return mix( 15 | getFromColor(uv), 16 | getToColor(uv), 17 | m 18 | ); 19 | } 20 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/windowblinds.glsl: -------------------------------------------------------------------------------- 1 | // Author: Fabien Benetou 2 | // License: MIT 3 | 4 | vec4 transition (vec2 uv) { 5 | float t = progress; 6 | 7 | if (mod(floor(uv.y*100.*progress),2.)==0.) 8 | t*=2.-.5; 9 | 10 | return mix( 11 | getFromColor(uv), 12 | getToColor(uv), 13 | mix(t, progress, smoothstep(0.8, 1.0, progress)) 14 | ); 15 | } 16 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/windowslice.glsl: -------------------------------------------------------------------------------- 1 | // Author: gre 2 | // License: MIT 3 | 4 | uniform float count; // = 10.0 5 | uniform float smoothness; // = 0.5 6 | 7 | vec4 transition (vec2 p) { 8 | float pr = smoothstep(-smoothness, 0.0, p.x - progress * (1.0 + smoothness)); 9 | float s = step(pr, fract(count * p.x)); 10 | return mix(getFromColor(p), getToColor(p), s); 11 | } 12 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/wipeDown.glsl: -------------------------------------------------------------------------------- 1 | // Author: Jake Nelson 2 | // License: MIT 3 | 4 | vec4 transition(vec2 uv) { 5 | vec2 p=uv.xy/vec2(1.0).xy; 6 | vec4 a=getFromColor(p); 7 | vec4 b=getToColor(p); 8 | return mix(a, b, step(1.0-p.y,progress)); 9 | } 10 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/wipeLeft.glsl: -------------------------------------------------------------------------------- 1 | // Author: Jake Nelson 2 | // License: MIT 3 | 4 | vec4 transition(vec2 uv) { 5 | vec2 p=uv.xy/vec2(1.0).xy; 6 | vec4 a=getFromColor(p); 7 | vec4 b=getToColor(p); 8 | return mix(a, b, step(1.0-p.x,progress)); 9 | } 10 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/wipeRight.glsl: -------------------------------------------------------------------------------- 1 | // Author: Jake Nelson 2 | // License: MIT 3 | 4 | vec4 transition(vec2 uv) { 5 | vec2 p=uv.xy/vec2(1.0).xy; 6 | vec4 a=getFromColor(p); 7 | vec4 b=getToColor(p); 8 | return mix(a, b, step(0.0+p.x,progress)); 9 | } 10 | -------------------------------------------------------------------------------- /ffmpeg/transitions/glsl/wipeUp.glsl: -------------------------------------------------------------------------------- 1 | // Author: Jake Nelson 2 | // License: MIT 3 | 4 | vec4 transition(vec2 uv) { 5 | vec2 p=uv.xy/vec2(1.0).xy; 6 | vec4 a=getFromColor(p); 7 | vec4 b=getToColor(p); 8 | return mix(a, b, step(0.0+p.y,progress)); 9 | } 10 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | project-pkgs~=1.0.0 2 | tqdm~=4.61.1 3 | graphviz~=0.16 4 | psutil~=5.8.0 5 | numpy~=1.21.0 -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Date: 2021-02-28 17:06:01 3 | LastEditors: Rustle Karl 4 | LastEditTime: 2021.06.24 17:16:23 5 | ''' 6 | import os.path 7 | 8 | from setuptools import setup 9 | 10 | __version__ = '1.0.5' 11 | 12 | # What packages are required for this module to be executed? 13 | requires = [ 14 | 'project-pkgs', 15 | ] 16 | 17 | # Import the README and use it as the long-description. 18 | cwd = os.path.abspath(os.path.dirname(__file__)) 19 | with open(os.path.join(cwd, 'README.md'), encoding='utf-8') as f: 20 | long_description = f.read() 21 | 22 | setup( 23 | name='ffmpeg-generator', 24 | packages=[ 25 | 'ffmpeg', 26 | 'ffmpeg.expression', 27 | 'ffmpeg.filters', 28 | 'ffmpeg.tools', 29 | 'ffmpeg.transitions', 30 | ], 31 | version=__version__, 32 | license='MIT', 33 | author='Rustle Karl', 34 | author_email='fu.jiawei@outlook.com', 35 | description='Python bindings for FFmpeg - with almost all filters support, even `gltransition` filter.', 36 | long_description=long_description, 37 | long_description_content_type='text/markdown', 38 | url='https://github.com/fujiawei-dev/ffmpeg-generator', 39 | keywords=['ffmpeg', 'ffprobe', 'ffplay'], 40 | classifiers=[ 41 | 'Intended Audience :: Developers', 42 | 'License :: OSI Approved :: MIT License', 43 | 'Operating System :: OS Independent', 44 | 'Programming Language :: Python :: 3.8', 45 | 'Programming Language :: Python :: 3.9', 46 | ], 47 | install_requires=requires, 48 | ) 49 | --------------------------------------------------------------------------------