├── .github
└── workflows
│ └── ci.yml
├── .gitignore
├── LICENSE
├── MANIFEST
├── README
├── README.md
├── doc
├── .gitignore
├── Makefile
├── formula.png
├── formula.xcf
├── graph1.png
├── html
│ ├── .buildinfo
│ ├── _sources
│ │ └── index.rst.txt
│ ├── _static
│ │ ├── basic.css
│ │ ├── doctools.js
│ │ ├── documentation_options.js
│ │ ├── file.png
│ │ ├── jquery-3.2.1.js
│ │ ├── jquery.js
│ │ ├── language_data.js
│ │ ├── minus.png
│ │ ├── nature.css
│ │ ├── plus.png
│ │ ├── pygments.css
│ │ ├── searchtools.js
│ │ ├── underscore-1.3.1.js
│ │ └── underscore.js
│ ├── genindex.html
│ ├── index.html
│ ├── objects.inv
│ ├── py-modindex.html
│ ├── search.html
│ └── searchindex.js
├── jupyter-demo.gif
├── jupyter-screenshot.png
├── logo.png
├── logo.xcf
├── screenshot.png
├── src
│ ├── conf.py
│ └── index.rst
└── update-gh-pages.sh
├── examples
├── README.md
├── facetime.py
├── ffmpeg-numpy.ipynb
├── get_video_thumbnail.py
├── graphs
│ ├── av-pipeline.png
│ ├── dream.png
│ ├── ffmpeg-numpy.png
│ ├── get_video_thumbnail.png
│ ├── glob-filter.png
│ ├── glob.png
│ ├── mono-to-stereo.png
│ ├── read_frame_as_jpeg.png
│ ├── tensorflow-stream.png
│ └── transcribe.png
├── in.mp4
├── overlay.png
├── read_frame_as_jpeg.py
├── requirements.txt
├── show_progress.py
├── split_silence.py
├── tensorflow_stream.py
├── transcribe.py
└── video_info.py
├── ffmpeg
├── __init__.py
├── _ffmpeg.py
├── _filters.py
├── _probe.py
├── _run.py
├── _utils.py
├── _view.py
├── dag.py
├── nodes.py
└── tests
│ ├── __init__.py
│ ├── sample_data
│ ├── in1.mp4
│ └── overlay.png
│ └── test_ffmpeg.py
├── pyproject.toml
├── pytest.ini
├── requirements.txt
├── setup.cfg
├── setup.py
└── tox.ini
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 | on:
3 | - push
4 | - pull_request
5 | jobs:
6 | test:
7 | runs-on: ubuntu-20.04
8 | strategy:
9 | fail-fast: false
10 | matrix:
11 | python-version:
12 | - "2.7"
13 | - "3.5"
14 | - "3.6"
15 | - "3.7"
16 | - "3.8"
17 | - "3.9"
18 | - "3.10"
19 | steps:
20 | - uses: actions/checkout@v3
21 | - name: Set up Python ${{ matrix.python-version }}
22 | uses: actions/setup-python@v4
23 | with:
24 | python-version: ${{ matrix.python-version }}
25 | - name: Install ffmpeg
26 | run: |
27 | sudo apt update
28 | sudo apt install ffmpeg
29 | - name: Setup pip + tox
30 | run: |
31 | python -m pip install --upgrade \
32 | "pip==20.3.4; python_version < '3.6'" \
33 | "pip==21.3.1; python_version >= '3.6'"
34 | python -m pip install tox==3.24.5 tox-gh-actions==2.9.1
35 | - name: Test with tox
36 | run: tox
37 | black:
38 | runs-on: ubuntu-20.04
39 | steps:
40 | - uses: actions/checkout@v3
41 | - name: Black
42 | run: |
43 | # TODO: use standard `psf/black` action after dropping Python 2 support.
44 | pip install black==21.12b0 click==8.0.2 # https://stackoverflow.com/questions/71673404
45 | black ffmpeg --check --color --diff
46 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .cache
2 | .eggs
3 | .tox/
4 | dist/
5 | ffmpeg/tests/sample_data/out*.mp4
6 | ffmpeg_python.egg-info/
7 | venv*
8 | build/
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2017 Karl Kroening
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/MANIFEST:
--------------------------------------------------------------------------------
1 | # file GENERATED by distutils, do NOT edit
2 | README
3 | setup.py
4 | ffmpeg/__init__.py
5 | ffmpeg/_ffmpeg.py
6 | ffmpeg/_filters.py
7 | ffmpeg/_run.py
8 | ffmpeg/nodes.py
9 |
--------------------------------------------------------------------------------
/README:
--------------------------------------------------------------------------------
1 | README.md
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ffmpeg-python: Python bindings for FFmpeg
2 |
3 | [![CI][ci-badge]][ci]
4 |
5 | [ci-badge]: https://github.com/kkroening/ffmpeg-python/actions/workflows/ci.yml/badge.svg
6 | [ci]: https://github.com/kkroening/ffmpeg-python/actions/workflows/ci.yml
7 |
8 |
9 |
10 | ## Overview
11 |
12 | There are tons of Python FFmpeg wrappers out there but they seem to lack complex filter support. `ffmpeg-python` works well for simple as well as complex signal graphs.
13 |
14 |
15 | ## Quickstart
16 |
17 | Flip a video horizontally:
18 | ```python
19 | import ffmpeg
20 | stream = ffmpeg.input('input.mp4')
21 | stream = ffmpeg.hflip(stream)
22 | stream = ffmpeg.output(stream, 'output.mp4')
23 | ffmpeg.run(stream)
24 | ```
25 |
26 | Or if you prefer a fluent interface:
27 | ```python
28 | import ffmpeg
29 | (
30 | ffmpeg
31 | .input('input.mp4')
32 | .hflip()
33 | .output('output.mp4')
34 | .run()
35 | )
36 | ```
37 |
38 | ## [API reference](https://kkroening.github.io/ffmpeg-python/)
39 |
40 | ## Complex filter graphs
41 | FFmpeg is extremely powerful, but its command-line interface gets really complicated rather quickly - especially when working with signal graphs and doing anything more than trivial.
42 |
43 | Take for example a signal graph that looks like this:
44 |
45 | 
46 |
47 | The corresponding command-line arguments are pretty gnarly:
48 | ```bash
49 | ffmpeg -i input.mp4 -i overlay.png -filter_complex "[0]trim=start_frame=10:end_frame=20[v0];\
50 | [0]trim=start_frame=30:end_frame=40[v1];[v0][v1]concat=n=2[v2];[1]hflip[v3];\
51 | [v2][v3]overlay=eof_action=repeat[v4];[v4]drawbox=50:50:120:120:red:t=5[v5]"\
52 | -map [v5] output.mp4
53 | ```
54 |
55 | Maybe this looks great to you, but if you're not an FFmpeg command-line expert, it probably looks alien.
56 |
57 | If you're like me and find Python to be powerful and readable, it's easier with `ffmpeg-python`:
58 | ```python
59 | import ffmpeg
60 |
61 | in_file = ffmpeg.input('input.mp4')
62 | overlay_file = ffmpeg.input('overlay.png')
63 | (
64 | ffmpeg
65 | .concat(
66 | in_file.trim(start_frame=10, end_frame=20),
67 | in_file.trim(start_frame=30, end_frame=40),
68 | )
69 | .overlay(overlay_file.hflip())
70 | .drawbox(50, 50, 120, 120, color='red', thickness=5)
71 | .output('out.mp4')
72 | .run()
73 | )
74 | ```
75 |
76 | `ffmpeg-python` takes care of running `ffmpeg` with the command-line arguments that correspond to the above filter diagram, in familiar Python terms.
77 |
78 |
79 |
80 | Real-world signal graphs can get a heck of a lot more complex, but `ffmpeg-python` handles arbitrarily large (directed-acyclic) signal graphs.
81 |
82 | ## Installation
83 |
84 | ### Installing `ffmpeg-python`
85 |
86 | The latest version of `ffmpeg-python` can be acquired via a typical pip install:
87 |
88 | ```bash
89 | pip install ffmpeg-python
90 | ```
91 |
92 | Or the source can be cloned and installed from locally:
93 | ```bash
94 | git clone git@github.com:kkroening/ffmpeg-python.git
95 | pip install -e ./ffmpeg-python
96 | ```
97 |
98 | > **Note**: `ffmpeg-python` makes no attempt to download/install FFmpeg, as `ffmpeg-python` is merely a pure-Python wrapper - whereas FFmpeg installation is platform-dependent/environment-specific, and is thus the responsibility of the user, as described below.
99 |
100 | ### Installing FFmpeg
101 |
102 | Before using `ffmpeg-python`, FFmpeg must be installed and accessible via the `$PATH` environment variable.
103 |
104 | There are a variety of ways to install FFmpeg, such as the [official download links](https://ffmpeg.org/download.html), or using your package manager of choice (e.g. `sudo apt install ffmpeg` on Debian/Ubuntu, `brew install ffmpeg` on OS X, etc.).
105 |
106 | Regardless of how FFmpeg is installed, you can check if your environment path is set correctly by running the `ffmpeg` command from the terminal, in which case the version information should appear, as in the following example (truncated for brevity):
107 |
108 | ```
109 | $ ffmpeg
110 | ffmpeg version 4.2.4-1ubuntu0.1 Copyright (c) 2000-2020 the FFmpeg developers
111 | built with gcc 9 (Ubuntu 9.3.0-10ubuntu2)
112 | ```
113 |
114 | > **Note**: The actual version information displayed here may vary from one system to another; but if a message such as `ffmpeg: command not found` appears instead of the version information, FFmpeg is not properly installed.
115 |
116 | ## [Examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples)
117 |
118 | When in doubt, take a look at the [examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples) to see if there's something that's close to whatever you're trying to do.
119 |
120 | Here are a few:
121 | - [Convert video to numpy array](https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#convert-video-to-numpy-array)
122 | - [Generate thumbnail for video](https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#generate-thumbnail-for-video)
123 | - [Read raw PCM audio via pipe](https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#convert-sound-to-raw-pcm-audio)
124 |
125 | - [JupyterLab/Notebook stream editor](https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#jupyter-stream-editor)
126 |
127 |
128 |
129 | - [Tensorflow/DeepDream streaming](https://github.com/kkroening/ffmpeg-python/blob/master/examples/README.md#tensorflow-streaming)
130 |
131 |
132 |
133 | See the [Examples README](https://github.com/kkroening/ffmpeg-python/tree/master/examples) for additional examples.
134 |
135 | ## Custom Filters
136 |
137 | Don't see the filter you're looking for? While `ffmpeg-python` includes shorthand notation for some of the most commonly used filters (such as `concat`), all filters can be referenced via the `.filter` operator:
138 | ```python
139 | stream = ffmpeg.input('dummy.mp4')
140 | stream = ffmpeg.filter(stream, 'fps', fps=25, round='up')
141 | stream = ffmpeg.output(stream, 'dummy2.mp4')
142 | ffmpeg.run(stream)
143 | ```
144 |
145 | Or fluently:
146 | ```python
147 | (
148 | ffmpeg
149 | .input('dummy.mp4')
150 | .filter('fps', fps=25, round='up')
151 | .output('dummy2.mp4')
152 | .run()
153 | )
154 | ```
155 |
156 | **Special option names:**
157 |
158 | Arguments with special names such as `-qscale:v` (variable bitrate), `-b:v` (constant bitrate), etc. can be specified as a keyword-args dictionary as follows:
159 | ```python
160 | (
161 | ffmpeg
162 | .input('in.mp4')
163 | .output('out.mp4', **{'qscale:v': 3})
164 | .run()
165 | )
166 | ```
167 |
168 | **Multiple inputs:**
169 |
170 | Filters that take multiple input streams can be used by passing the input streams as an array to `ffmpeg.filter`:
171 | ```python
172 | main = ffmpeg.input('main.mp4')
173 | logo = ffmpeg.input('logo.png')
174 | (
175 | ffmpeg
176 | .filter([main, logo], 'overlay', 10, 10)
177 | .output('out.mp4')
178 | .run()
179 | )
180 | ```
181 |
182 | **Multiple outputs:**
183 |
184 | Filters that produce multiple outputs can be used with `.filter_multi_output`:
185 | ```python
186 | split = (
187 | ffmpeg
188 | .input('in.mp4')
189 | .filter_multi_output('split') # or `.split()`
190 | )
191 | (
192 | ffmpeg
193 | .concat(split[0], split[1].reverse())
194 | .output('out.mp4')
195 | .run()
196 | )
197 | ```
198 | (In this particular case, `.split()` is the equivalent shorthand, but the general approach works for other multi-output filters)
199 |
200 | **String expressions:**
201 |
202 | Expressions to be interpreted by ffmpeg can be included as string parameters and reference any special ffmpeg variable names:
203 | ```python
204 | (
205 | ffmpeg
206 | .input('in.mp4')
207 | .filter('crop', 'in_w-2*10', 'in_h-2*20')
208 | .input('out.mp4')
209 | )
210 | ```
211 |
212 |
213 |
214 | When in doubt, refer to the [existing filters](https://github.com/kkroening/ffmpeg-python/blob/master/ffmpeg/_filters.py), [examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples), and/or the [official ffmpeg documentation](https://ffmpeg.org/ffmpeg-filters.html).
215 |
216 | ## Frequently asked questions
217 |
218 | **Why do I get an import/attribute/etc. error from `import ffmpeg`?**
219 |
220 | Make sure you ran `pip install ffmpeg-python` and _**not**_ `pip install ffmpeg` (wrong) or `pip install python-ffmpeg` (also wrong).
221 |
222 | **Why did my audio stream get dropped?**
223 |
224 | Some ffmpeg filters drop audio streams, and care must be taken to preserve the audio in the final output. The ``.audio`` and ``.video`` operators can be used to reference the audio/video portions of a stream so that they can be processed separately and then re-combined later in the pipeline.
225 |
226 | This dilemma is intrinsic to ffmpeg, and ffmpeg-python tries to stay out of the way while users may refer to the official ffmpeg documentation as to why certain filters drop audio.
227 |
228 | As usual, take a look at the [examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples#audiovideo-pipeline) (*Audio/video pipeline* in particular).
229 |
230 | **How can I find out the used command line arguments?**
231 |
232 | You can run `stream.get_args()` before `stream.run()` to retrieve the command line arguments that will be passed to `ffmpeg`. You can also run `stream.compile()` that also includes the `ffmpeg` executable as the first argument.
233 |
234 | **How do I do XYZ?**
235 |
236 | Take a look at each of the links in the [Additional Resources](https://kkroening.github.io/ffmpeg-python/) section at the end of this README. If you look everywhere and can't find what you're looking for and have a question that may be relevant to other users, you may open an issue asking how to do it, while providing a thorough explanation of what you're trying to do and what you've tried so far.
237 |
238 | Issues not directly related to `ffmpeg-python` or issues asking others to write your code for you or how to do the work of solving a complex signal processing problem for you that's not relevant to other users will be closed.
239 |
240 | That said, we hope to continue improving our documentation and provide a community of support for people using `ffmpeg-python` to do cool and exciting things.
241 |
242 | ## Contributing
243 |
244 |
245 |
246 | One of the best things you can do to help make `ffmpeg-python` better is to answer [open questions](https://github.com/kkroening/ffmpeg-python/labels/question) in the issue tracker. The questions that are answered will be tagged and incorporated into the documentation, examples, and other learning resources.
247 |
248 | If you notice things that could be better in the documentation or overall development experience, please say so in the [issue tracker](https://github.com/kkroening/ffmpeg-python/issues). And of course, feel free to report any bugs or submit feature requests.
249 |
250 | Pull requests are welcome as well, but it wouldn't hurt to touch base in the issue tracker or hop on the [Matrix chat channel](https://riot.im/app/#/room/#ffmpeg-python:matrix.org) first.
251 |
252 | Anyone who fixes any of the [open bugs](https://github.com/kkroening/ffmpeg-python/labels/bug) or implements [requested enhancements](https://github.com/kkroening/ffmpeg-python/labels/enhancement) is a hero, but changes should include passing tests.
253 |
254 | ### Running tests
255 |
256 | ```bash
257 | git clone git@github.com:kkroening/ffmpeg-python.git
258 | cd ffmpeg-python
259 | virtualenv venv
260 | . venv/bin/activate # (OS X / Linux)
261 | venv\bin\activate # (Windows)
262 | pip install -e .[dev]
263 | pytest
264 | ```
265 |
266 |
267 |
268 | ### Special thanks
269 |
270 | - [Fabrice Bellard](https://bellard.org/)
271 | - [The FFmpeg team](https://ffmpeg.org/donations.html)
272 | - [Arne de Laat](https://github.com/153957)
273 | - [Davide Depau](https://github.com/depau)
274 | - [Dim](https://github.com/lloti)
275 | - [Noah Stier](https://github.com/noahstier)
276 |
277 | ## Additional Resources
278 |
279 | - [API Reference](https://kkroening.github.io/ffmpeg-python/)
280 | - [Examples](https://github.com/kkroening/ffmpeg-python/tree/master/examples)
281 | - [Filters](https://github.com/kkroening/ffmpeg-python/blob/master/ffmpeg/_filters.py)
282 | - [FFmpeg Homepage](https://ffmpeg.org/)
283 | - [FFmpeg Documentation](https://ffmpeg.org/ffmpeg.html)
284 | - [FFmpeg Filters Documentation](https://ffmpeg.org/ffmpeg-filters.html)
285 | - [Test cases](https://github.com/kkroening/ffmpeg-python/blob/master/ffmpeg/tests/test_ffmpeg.py)
286 | - [Issue tracker](https://github.com/kkroening/ffmpeg-python/issues)
287 | - Matrix Chat: [#ffmpeg-python:matrix.org](https://riot.im/app/#/room/#ffmpeg-python:matrix.org)
288 |
--------------------------------------------------------------------------------
/doc/.gitignore:
--------------------------------------------------------------------------------
1 | doctrees
2 | gh-pages
3 |
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = python -msphinx
7 | SPHINXPROJ = ffmpeg-python
8 | SOURCEDIR = src
9 | BUILDDIR = .
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @rm -rf html doctrees
21 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
22 |
23 |
24 | update-gh-pages:
25 | @./update-gh-pages.sh
26 |
--------------------------------------------------------------------------------
/doc/formula.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/formula.png
--------------------------------------------------------------------------------
/doc/formula.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/formula.xcf
--------------------------------------------------------------------------------
/doc/graph1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/graph1.png
--------------------------------------------------------------------------------
/doc/html/.buildinfo:
--------------------------------------------------------------------------------
1 | # Sphinx build info version 1
2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3 | config: f3635c9edf6e9bff1735d57d26069ada
4 | tags: 645f666f9bcd5a90fca523b33c5a78b7
5 |
--------------------------------------------------------------------------------
/doc/html/_sources/index.rst.txt:
--------------------------------------------------------------------------------
1 | ffmpeg-python: Python bindings for FFmpeg
2 | =========================================
3 |
4 | :Github: https://github.com/kkroening/ffmpeg-python
5 |
6 | .. toctree::
7 | :maxdepth: 2
8 | :caption: Contents:
9 |
10 | .. automodule:: ffmpeg
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 |
16 | Indices and tables
17 | ==================
18 |
19 | * :ref:`genindex`
20 | * :ref:`modindex`
21 | * :ref:`search`
22 |
--------------------------------------------------------------------------------
/doc/html/_static/basic.css:
--------------------------------------------------------------------------------
1 | /*
2 | * basic.css
3 | * ~~~~~~~~~
4 | *
5 | * Sphinx stylesheet -- basic theme.
6 | *
7 | * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 |
12 | /* -- main layout ----------------------------------------------------------- */
13 |
14 | div.clearer {
15 | clear: both;
16 | }
17 |
18 | /* -- relbar ---------------------------------------------------------------- */
19 |
20 | div.related {
21 | width: 100%;
22 | font-size: 90%;
23 | }
24 |
25 | div.related h3 {
26 | display: none;
27 | }
28 |
29 | div.related ul {
30 | margin: 0;
31 | padding: 0 0 0 10px;
32 | list-style: none;
33 | }
34 |
35 | div.related li {
36 | display: inline;
37 | }
38 |
39 | div.related li.right {
40 | float: right;
41 | margin-right: 5px;
42 | }
43 |
44 | /* -- sidebar --------------------------------------------------------------- */
45 |
46 | div.sphinxsidebarwrapper {
47 | padding: 10px 5px 0 10px;
48 | }
49 |
50 | div.sphinxsidebar {
51 | float: left;
52 | width: 230px;
53 | margin-left: -100%;
54 | font-size: 90%;
55 | word-wrap: break-word;
56 | overflow-wrap : break-word;
57 | }
58 |
59 | div.sphinxsidebar ul {
60 | list-style: none;
61 | }
62 |
63 | div.sphinxsidebar ul ul,
64 | div.sphinxsidebar ul.want-points {
65 | margin-left: 20px;
66 | list-style: square;
67 | }
68 |
69 | div.sphinxsidebar ul ul {
70 | margin-top: 0;
71 | margin-bottom: 0;
72 | }
73 |
74 | div.sphinxsidebar form {
75 | margin-top: 10px;
76 | }
77 |
78 | div.sphinxsidebar input {
79 | border: 1px solid #98dbcc;
80 | font-family: sans-serif;
81 | font-size: 1em;
82 | }
83 |
84 | div.sphinxsidebar #searchbox form.search {
85 | overflow: hidden;
86 | }
87 |
88 | div.sphinxsidebar #searchbox input[type="text"] {
89 | float: left;
90 | width: 80%;
91 | padding: 0.25em;
92 | box-sizing: border-box;
93 | }
94 |
95 | div.sphinxsidebar #searchbox input[type="submit"] {
96 | float: left;
97 | width: 20%;
98 | border-left: none;
99 | padding: 0.25em;
100 | box-sizing: border-box;
101 | }
102 |
103 |
104 | img {
105 | border: 0;
106 | max-width: 100%;
107 | }
108 |
109 | /* -- search page ----------------------------------------------------------- */
110 |
111 | ul.search {
112 | margin: 10px 0 0 20px;
113 | padding: 0;
114 | }
115 |
116 | ul.search li {
117 | padding: 5px 0 5px 20px;
118 | background-image: url(file.png);
119 | background-repeat: no-repeat;
120 | background-position: 0 7px;
121 | }
122 |
123 | ul.search li a {
124 | font-weight: bold;
125 | }
126 |
127 | ul.search li div.context {
128 | color: #888;
129 | margin: 2px 0 0 30px;
130 | text-align: left;
131 | }
132 |
133 | ul.keywordmatches li.goodmatch a {
134 | font-weight: bold;
135 | }
136 |
137 | /* -- index page ------------------------------------------------------------ */
138 |
139 | table.contentstable {
140 | width: 90%;
141 | margin-left: auto;
142 | margin-right: auto;
143 | }
144 |
145 | table.contentstable p.biglink {
146 | line-height: 150%;
147 | }
148 |
149 | a.biglink {
150 | font-size: 1.3em;
151 | }
152 |
153 | span.linkdescr {
154 | font-style: italic;
155 | padding-top: 5px;
156 | font-size: 90%;
157 | }
158 |
159 | /* -- general index --------------------------------------------------------- */
160 |
161 | table.indextable {
162 | width: 100%;
163 | }
164 |
165 | table.indextable td {
166 | text-align: left;
167 | vertical-align: top;
168 | }
169 |
170 | table.indextable ul {
171 | margin-top: 0;
172 | margin-bottom: 0;
173 | list-style-type: none;
174 | }
175 |
176 | table.indextable > tbody > tr > td > ul {
177 | padding-left: 0em;
178 | }
179 |
180 | table.indextable tr.pcap {
181 | height: 10px;
182 | }
183 |
184 | table.indextable tr.cap {
185 | margin-top: 10px;
186 | background-color: #f2f2f2;
187 | }
188 |
189 | img.toggler {
190 | margin-right: 3px;
191 | margin-top: 3px;
192 | cursor: pointer;
193 | }
194 |
195 | div.modindex-jumpbox {
196 | border-top: 1px solid #ddd;
197 | border-bottom: 1px solid #ddd;
198 | margin: 1em 0 1em 0;
199 | padding: 0.4em;
200 | }
201 |
202 | div.genindex-jumpbox {
203 | border-top: 1px solid #ddd;
204 | border-bottom: 1px solid #ddd;
205 | margin: 1em 0 1em 0;
206 | padding: 0.4em;
207 | }
208 |
209 | /* -- domain module index --------------------------------------------------- */
210 |
211 | table.modindextable td {
212 | padding: 2px;
213 | border-collapse: collapse;
214 | }
215 |
216 | /* -- general body styles --------------------------------------------------- */
217 |
218 | div.body {
219 | min-width: 450px;
220 | max-width: 800px;
221 | }
222 |
223 | div.body p, div.body dd, div.body li, div.body blockquote {
224 | -moz-hyphens: auto;
225 | -ms-hyphens: auto;
226 | -webkit-hyphens: auto;
227 | hyphens: auto;
228 | }
229 |
230 | a.headerlink {
231 | visibility: hidden;
232 | }
233 |
234 | a.brackets:before,
235 | span.brackets > a:before{
236 | content: "[";
237 | }
238 |
239 | a.brackets:after,
240 | span.brackets > a:after {
241 | content: "]";
242 | }
243 |
244 | h1:hover > a.headerlink,
245 | h2:hover > a.headerlink,
246 | h3:hover > a.headerlink,
247 | h4:hover > a.headerlink,
248 | h5:hover > a.headerlink,
249 | h6:hover > a.headerlink,
250 | dt:hover > a.headerlink,
251 | caption:hover > a.headerlink,
252 | p.caption:hover > a.headerlink,
253 | div.code-block-caption:hover > a.headerlink {
254 | visibility: visible;
255 | }
256 |
257 | div.body p.caption {
258 | text-align: inherit;
259 | }
260 |
261 | div.body td {
262 | text-align: left;
263 | }
264 |
265 | .first {
266 | margin-top: 0 !important;
267 | }
268 |
269 | p.rubric {
270 | margin-top: 30px;
271 | font-weight: bold;
272 | }
273 |
274 | img.align-left, .figure.align-left, object.align-left {
275 | clear: left;
276 | float: left;
277 | margin-right: 1em;
278 | }
279 |
280 | img.align-right, .figure.align-right, object.align-right {
281 | clear: right;
282 | float: right;
283 | margin-left: 1em;
284 | }
285 |
286 | img.align-center, .figure.align-center, object.align-center {
287 | display: block;
288 | margin-left: auto;
289 | margin-right: auto;
290 | }
291 |
292 | img.align-default, .figure.align-default {
293 | display: block;
294 | margin-left: auto;
295 | margin-right: auto;
296 | }
297 |
298 | .align-left {
299 | text-align: left;
300 | }
301 |
302 | .align-center {
303 | text-align: center;
304 | }
305 |
306 | .align-default {
307 | text-align: center;
308 | }
309 |
310 | .align-right {
311 | text-align: right;
312 | }
313 |
314 | /* -- sidebars -------------------------------------------------------------- */
315 |
316 | div.sidebar {
317 | margin: 0 0 0.5em 1em;
318 | border: 1px solid #ddb;
319 | padding: 7px 7px 0 7px;
320 | background-color: #ffe;
321 | width: 40%;
322 | float: right;
323 | }
324 |
325 | p.sidebar-title {
326 | font-weight: bold;
327 | }
328 |
329 | /* -- topics ---------------------------------------------------------------- */
330 |
331 | div.topic {
332 | border: 1px solid #ccc;
333 | padding: 7px 7px 0 7px;
334 | margin: 10px 0 10px 0;
335 | }
336 |
337 | p.topic-title {
338 | font-size: 1.1em;
339 | font-weight: bold;
340 | margin-top: 10px;
341 | }
342 |
343 | /* -- admonitions ----------------------------------------------------------- */
344 |
345 | div.admonition {
346 | margin-top: 10px;
347 | margin-bottom: 10px;
348 | padding: 7px;
349 | }
350 |
351 | div.admonition dt {
352 | font-weight: bold;
353 | }
354 |
355 | div.admonition dl {
356 | margin-bottom: 0;
357 | }
358 |
359 | p.admonition-title {
360 | margin: 0px 10px 5px 0px;
361 | font-weight: bold;
362 | }
363 |
364 | div.body p.centered {
365 | text-align: center;
366 | margin-top: 25px;
367 | }
368 |
369 | /* -- tables ---------------------------------------------------------------- */
370 |
371 | table.docutils {
372 | border: 0;
373 | border-collapse: collapse;
374 | }
375 |
376 | table.align-center {
377 | margin-left: auto;
378 | margin-right: auto;
379 | }
380 |
381 | table.align-default {
382 | margin-left: auto;
383 | margin-right: auto;
384 | }
385 |
386 | table caption span.caption-number {
387 | font-style: italic;
388 | }
389 |
390 | table caption span.caption-text {
391 | }
392 |
393 | table.docutils td, table.docutils th {
394 | padding: 1px 8px 1px 5px;
395 | border-top: 0;
396 | border-left: 0;
397 | border-right: 0;
398 | border-bottom: 1px solid #aaa;
399 | }
400 |
401 | table.footnote td, table.footnote th {
402 | border: 0 !important;
403 | }
404 |
405 | th {
406 | text-align: left;
407 | padding-right: 5px;
408 | }
409 |
410 | table.citation {
411 | border-left: solid 1px gray;
412 | margin-left: 1px;
413 | }
414 |
415 | table.citation td {
416 | border-bottom: none;
417 | }
418 |
419 | th > p:first-child,
420 | td > p:first-child {
421 | margin-top: 0px;
422 | }
423 |
424 | th > p:last-child,
425 | td > p:last-child {
426 | margin-bottom: 0px;
427 | }
428 |
429 | /* -- figures --------------------------------------------------------------- */
430 |
431 | div.figure {
432 | margin: 0.5em;
433 | padding: 0.5em;
434 | }
435 |
436 | div.figure p.caption {
437 | padding: 0.3em;
438 | }
439 |
440 | div.figure p.caption span.caption-number {
441 | font-style: italic;
442 | }
443 |
444 | div.figure p.caption span.caption-text {
445 | }
446 |
447 | /* -- field list styles ----------------------------------------------------- */
448 |
449 | table.field-list td, table.field-list th {
450 | border: 0 !important;
451 | }
452 |
453 | .field-list ul {
454 | margin: 0;
455 | padding-left: 1em;
456 | }
457 |
458 | .field-list p {
459 | margin: 0;
460 | }
461 |
462 | .field-name {
463 | -moz-hyphens: manual;
464 | -ms-hyphens: manual;
465 | -webkit-hyphens: manual;
466 | hyphens: manual;
467 | }
468 |
469 | /* -- hlist styles ---------------------------------------------------------- */
470 |
471 | table.hlist td {
472 | vertical-align: top;
473 | }
474 |
475 |
476 | /* -- other body styles ----------------------------------------------------- */
477 |
478 | ol.arabic {
479 | list-style: decimal;
480 | }
481 |
482 | ol.loweralpha {
483 | list-style: lower-alpha;
484 | }
485 |
486 | ol.upperalpha {
487 | list-style: upper-alpha;
488 | }
489 |
490 | ol.lowerroman {
491 | list-style: lower-roman;
492 | }
493 |
494 | ol.upperroman {
495 | list-style: upper-roman;
496 | }
497 |
498 | li > p:first-child {
499 | margin-top: 0px;
500 | }
501 |
502 | li > p:last-child {
503 | margin-bottom: 0px;
504 | }
505 |
506 | dl.footnote > dt,
507 | dl.citation > dt {
508 | float: left;
509 | }
510 |
511 | dl.footnote > dd,
512 | dl.citation > dd {
513 | margin-bottom: 0em;
514 | }
515 |
516 | dl.footnote > dd:after,
517 | dl.citation > dd:after {
518 | content: "";
519 | clear: both;
520 | }
521 |
522 | dl.field-list {
523 | display: flex;
524 | flex-wrap: wrap;
525 | }
526 |
527 | dl.field-list > dt {
528 | flex-basis: 20%;
529 | font-weight: bold;
530 | word-break: break-word;
531 | }
532 |
533 | dl.field-list > dt:after {
534 | content: ":";
535 | }
536 |
537 | dl.field-list > dd {
538 | flex-basis: 70%;
539 | padding-left: 1em;
540 | margin-left: 0em;
541 | margin-bottom: 0em;
542 | }
543 |
544 | dl {
545 | margin-bottom: 15px;
546 | }
547 |
548 | dd > p:first-child {
549 | margin-top: 0px;
550 | }
551 |
552 | dd ul, dd table {
553 | margin-bottom: 10px;
554 | }
555 |
556 | dd {
557 | margin-top: 3px;
558 | margin-bottom: 10px;
559 | margin-left: 30px;
560 | }
561 |
562 | dt:target, span.highlighted {
563 | background-color: #fbe54e;
564 | }
565 |
566 | rect.highlighted {
567 | fill: #fbe54e;
568 | }
569 |
570 | dl.glossary dt {
571 | font-weight: bold;
572 | font-size: 1.1em;
573 | }
574 |
575 | .optional {
576 | font-size: 1.3em;
577 | }
578 |
579 | .sig-paren {
580 | font-size: larger;
581 | }
582 |
583 | .versionmodified {
584 | font-style: italic;
585 | }
586 |
587 | .system-message {
588 | background-color: #fda;
589 | padding: 5px;
590 | border: 3px solid red;
591 | }
592 |
593 | .footnote:target {
594 | background-color: #ffa;
595 | }
596 |
597 | .line-block {
598 | display: block;
599 | margin-top: 1em;
600 | margin-bottom: 1em;
601 | }
602 |
603 | .line-block .line-block {
604 | margin-top: 0;
605 | margin-bottom: 0;
606 | margin-left: 1.5em;
607 | }
608 |
609 | .guilabel, .menuselection {
610 | font-family: sans-serif;
611 | }
612 |
613 | .accelerator {
614 | text-decoration: underline;
615 | }
616 |
617 | .classifier {
618 | font-style: oblique;
619 | }
620 |
621 | .classifier:before {
622 | font-style: normal;
623 | margin: 0.5em;
624 | content: ":";
625 | }
626 |
627 | abbr, acronym {
628 | border-bottom: dotted 1px;
629 | cursor: help;
630 | }
631 |
632 | /* -- code displays --------------------------------------------------------- */
633 |
634 | pre {
635 | overflow: auto;
636 | overflow-y: hidden; /* fixes display issues on Chrome browsers */
637 | }
638 |
639 | span.pre {
640 | -moz-hyphens: none;
641 | -ms-hyphens: none;
642 | -webkit-hyphens: none;
643 | hyphens: none;
644 | }
645 |
646 | td.linenos pre {
647 | padding: 5px 0px;
648 | border: 0;
649 | background-color: transparent;
650 | color: #aaa;
651 | }
652 |
653 | table.highlighttable {
654 | margin-left: 0.5em;
655 | }
656 |
657 | table.highlighttable td {
658 | padding: 0 0.5em 0 0.5em;
659 | }
660 |
661 | div.code-block-caption {
662 | padding: 2px 5px;
663 | font-size: small;
664 | }
665 |
666 | div.code-block-caption code {
667 | background-color: transparent;
668 | }
669 |
670 | div.code-block-caption + div > div.highlight > pre {
671 | margin-top: 0;
672 | }
673 |
674 | div.code-block-caption span.caption-number {
675 | padding: 0.1em 0.3em;
676 | font-style: italic;
677 | }
678 |
679 | div.code-block-caption span.caption-text {
680 | }
681 |
682 | div.literal-block-wrapper {
683 | padding: 1em 1em 0;
684 | }
685 |
686 | div.literal-block-wrapper div.highlight {
687 | margin: 0;
688 | }
689 |
690 | code.descname {
691 | background-color: transparent;
692 | font-weight: bold;
693 | font-size: 1.2em;
694 | }
695 |
696 | code.descclassname {
697 | background-color: transparent;
698 | }
699 |
700 | code.xref, a code {
701 | background-color: transparent;
702 | font-weight: bold;
703 | }
704 |
705 | h1 code, h2 code, h3 code, h4 code, h5 code, h6 code {
706 | background-color: transparent;
707 | }
708 |
709 | .viewcode-link {
710 | float: right;
711 | }
712 |
713 | .viewcode-back {
714 | float: right;
715 | font-family: sans-serif;
716 | }
717 |
718 | div.viewcode-block:target {
719 | margin: -1px -10px;
720 | padding: 0 10px;
721 | }
722 |
723 | /* -- math display ---------------------------------------------------------- */
724 |
725 | img.math {
726 | vertical-align: middle;
727 | }
728 |
729 | div.body div.math p {
730 | text-align: center;
731 | }
732 |
733 | span.eqno {
734 | float: right;
735 | }
736 |
737 | span.eqno a.headerlink {
738 | position: relative;
739 | left: 0px;
740 | z-index: 1;
741 | }
742 |
743 | div.math:hover a.headerlink {
744 | visibility: visible;
745 | }
746 |
747 | /* -- printout stylesheet --------------------------------------------------- */
748 |
749 | @media print {
750 | div.document,
751 | div.documentwrapper,
752 | div.bodywrapper {
753 | margin: 0 !important;
754 | width: 100%;
755 | }
756 |
757 | div.sphinxsidebar,
758 | div.related,
759 | div.footer,
760 | #top-link {
761 | display: none;
762 | }
763 | }
--------------------------------------------------------------------------------
/doc/html/_static/doctools.js:
--------------------------------------------------------------------------------
1 | /*
2 | * doctools.js
3 | * ~~~~~~~~~~~
4 | *
5 | * Sphinx JavaScript utilities for all documentation.
6 | *
7 | * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 |
12 | /**
13 | * select a different prefix for underscore
14 | */
15 | $u = _.noConflict();
16 |
17 | /**
18 | * make the code below compatible with browsers without
19 | * an installed firebug like debugger
20 | if (!window.console || !console.firebug) {
21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
23 | "profile", "profileEnd"];
24 | window.console = {};
25 | for (var i = 0; i < names.length; ++i)
26 | window.console[names[i]] = function() {};
27 | }
28 | */
29 |
30 | /**
31 | * small helper function to urldecode strings
32 | */
33 | jQuery.urldecode = function(x) {
34 | return decodeURIComponent(x).replace(/\+/g, ' ');
35 | };
36 |
37 | /**
38 | * small helper function to urlencode strings
39 | */
40 | jQuery.urlencode = encodeURIComponent;
41 |
42 | /**
43 | * This function returns the parsed url parameters of the
44 | * current request. Multiple values per key are supported,
45 | * it will always return arrays of strings for the value parts.
46 | */
47 | jQuery.getQueryParameters = function(s) {
48 | if (typeof s === 'undefined')
49 | s = document.location.search;
50 | var parts = s.substr(s.indexOf('?') + 1).split('&');
51 | var result = {};
52 | for (var i = 0; i < parts.length; i++) {
53 | var tmp = parts[i].split('=', 2);
54 | var key = jQuery.urldecode(tmp[0]);
55 | var value = jQuery.urldecode(tmp[1]);
56 | if (key in result)
57 | result[key].push(value);
58 | else
59 | result[key] = [value];
60 | }
61 | return result;
62 | };
63 |
64 | /**
65 | * highlight a given string on a jquery object by wrapping it in
66 | * span elements with the given class name.
67 | */
68 | jQuery.fn.highlightText = function(text, className) {
69 | function highlight(node, addItems) {
70 | if (node.nodeType === 3) {
71 | var val = node.nodeValue;
72 | var pos = val.toLowerCase().indexOf(text);
73 | if (pos >= 0 &&
74 | !jQuery(node.parentNode).hasClass(className) &&
75 | !jQuery(node.parentNode).hasClass("nohighlight")) {
76 | var span;
77 | var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
78 | if (isInSVG) {
79 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
80 | } else {
81 | span = document.createElement("span");
82 | span.className = className;
83 | }
84 | span.appendChild(document.createTextNode(val.substr(pos, text.length)));
85 | node.parentNode.insertBefore(span, node.parentNode.insertBefore(
86 | document.createTextNode(val.substr(pos + text.length)),
87 | node.nextSibling));
88 | node.nodeValue = val.substr(0, pos);
89 | if (isInSVG) {
90 | var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
91 | var bbox = node.parentElement.getBBox();
92 | rect.x.baseVal.value = bbox.x;
93 | rect.y.baseVal.value = bbox.y;
94 | rect.width.baseVal.value = bbox.width;
95 | rect.height.baseVal.value = bbox.height;
96 | rect.setAttribute('class', className);
97 | addItems.push({
98 | "parent": node.parentNode,
99 | "target": rect});
100 | }
101 | }
102 | }
103 | else if (!jQuery(node).is("button, select, textarea")) {
104 | jQuery.each(node.childNodes, function() {
105 | highlight(this, addItems);
106 | });
107 | }
108 | }
109 | var addItems = [];
110 | var result = this.each(function() {
111 | highlight(this, addItems);
112 | });
113 | for (var i = 0; i < addItems.length; ++i) {
114 | jQuery(addItems[i].parent).before(addItems[i].target);
115 | }
116 | return result;
117 | };
118 |
119 | /*
120 | * backward compatibility for jQuery.browser
121 | * This will be supported until firefox bug is fixed.
122 | */
123 | if (!jQuery.browser) {
124 | jQuery.uaMatch = function(ua) {
125 | ua = ua.toLowerCase();
126 |
127 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
128 | /(webkit)[ \/]([\w.]+)/.exec(ua) ||
129 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
130 | /(msie) ([\w.]+)/.exec(ua) ||
131 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
132 | [];
133 |
134 | return {
135 | browser: match[ 1 ] || "",
136 | version: match[ 2 ] || "0"
137 | };
138 | };
139 | jQuery.browser = {};
140 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
141 | }
142 |
143 | /**
144 | * Small JavaScript module for the documentation.
145 | */
146 | var Documentation = {
147 |
148 | init : function() {
149 | this.fixFirefoxAnchorBug();
150 | this.highlightSearchWords();
151 | this.initIndexTable();
152 | if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) {
153 | this.initOnKeyListeners();
154 | }
155 | },
156 |
157 | /**
158 | * i18n support
159 | */
160 | TRANSLATIONS : {},
161 | PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
162 | LOCALE : 'unknown',
163 |
164 | // gettext and ngettext don't access this so that the functions
165 | // can safely bound to a different name (_ = Documentation.gettext)
166 | gettext : function(string) {
167 | var translated = Documentation.TRANSLATIONS[string];
168 | if (typeof translated === 'undefined')
169 | return string;
170 | return (typeof translated === 'string') ? translated : translated[0];
171 | },
172 |
173 | ngettext : function(singular, plural, n) {
174 | var translated = Documentation.TRANSLATIONS[singular];
175 | if (typeof translated === 'undefined')
176 | return (n == 1) ? singular : plural;
177 | return translated[Documentation.PLURALEXPR(n)];
178 | },
179 |
180 | addTranslations : function(catalog) {
181 | for (var key in catalog.messages)
182 | this.TRANSLATIONS[key] = catalog.messages[key];
183 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
184 | this.LOCALE = catalog.locale;
185 | },
186 |
187 | /**
188 | * add context elements like header anchor links
189 | */
190 | addContextElements : function() {
191 | $('div[id] > :header:first').each(function() {
192 | $('').
193 | attr('href', '#' + this.id).
194 | attr('title', _('Permalink to this headline')).
195 | appendTo(this);
196 | });
197 | $('dt[id]').each(function() {
198 | $('').
199 | attr('href', '#' + this.id).
200 | attr('title', _('Permalink to this definition')).
201 | appendTo(this);
202 | });
203 | },
204 |
205 | /**
206 | * workaround a firefox stupidity
207 | * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
208 | */
209 | fixFirefoxAnchorBug : function() {
210 | if (document.location.hash && $.browser.mozilla)
211 | window.setTimeout(function() {
212 | document.location.href += '';
213 | }, 10);
214 | },
215 |
216 | /**
217 | * highlight the search words provided in the url in the text
218 | */
219 | highlightSearchWords : function() {
220 | var params = $.getQueryParameters();
221 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
222 | if (terms.length) {
223 | var body = $('div.body');
224 | if (!body.length) {
225 | body = $('body');
226 | }
227 | window.setTimeout(function() {
228 | $.each(terms, function() {
229 | body.highlightText(this.toLowerCase(), 'highlighted');
230 | });
231 | }, 10);
232 | $('
' + _('Hide Search Matches') + '
')
234 | .appendTo($('#searchbox'));
235 | }
236 | },
237 |
238 | /**
239 | * init the domain index toggle buttons
240 | */
241 | initIndexTable : function() {
242 | var togglers = $('img.toggler').click(function() {
243 | var src = $(this).attr('src');
244 | var idnum = $(this).attr('id').substr(7);
245 | $('tr.cg-' + idnum).toggle();
246 | if (src.substr(-9) === 'minus.png')
247 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
248 | else
249 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
250 | }).css('display', '');
251 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
252 | togglers.click();
253 | }
254 | },
255 |
256 | /**
257 | * helper function to hide the search marks again
258 | */
259 | hideSearchWords : function() {
260 | $('#searchbox .highlight-link').fadeOut(300);
261 | $('span.highlighted').removeClass('highlighted');
262 | },
263 |
264 | /**
265 | * make the url absolute
266 | */
267 | makeURL : function(relativeURL) {
268 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
269 | },
270 |
271 | /**
272 | * get the current relative url
273 | */
274 | getCurrentURL : function() {
275 | var path = document.location.pathname;
276 | var parts = path.split(/\//);
277 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
278 | if (this === '..')
279 | parts.pop();
280 | });
281 | var url = parts.join('/');
282 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
283 | },
284 |
285 | initOnKeyListeners: function() {
286 | $(document).keyup(function(event) {
287 | var activeElementType = document.activeElement.tagName;
288 | // don't navigate when in search box or textarea
289 | if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') {
290 | switch (event.keyCode) {
291 | case 37: // left
292 | var prevHref = $('link[rel="prev"]').prop('href');
293 | if (prevHref) {
294 | window.location.href = prevHref;
295 | return false;
296 | }
297 | case 39: // right
298 | var nextHref = $('link[rel="next"]').prop('href');
299 | if (nextHref) {
300 | window.location.href = nextHref;
301 | return false;
302 | }
303 | }
304 | }
305 | });
306 | }
307 | };
308 |
309 | // quick alias for translations
310 | _ = Documentation.gettext;
311 |
312 | $(document).ready(function() {
313 | Documentation.init();
314 | });
315 |
--------------------------------------------------------------------------------
/doc/html/_static/documentation_options.js:
--------------------------------------------------------------------------------
1 | var DOCUMENTATION_OPTIONS = {
2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
3 | VERSION: '',
4 | LANGUAGE: 'None',
5 | COLLAPSE_INDEX: false,
6 | FILE_SUFFIX: '.html',
7 | HAS_SOURCE: true,
8 | SOURCELINK_SUFFIX: '.txt',
9 | NAVIGATION_WITH_KEYS: false
10 | };
--------------------------------------------------------------------------------
/doc/html/_static/file.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/html/_static/file.png
--------------------------------------------------------------------------------
/doc/html/_static/language_data.js:
--------------------------------------------------------------------------------
1 | /*
2 | * language_data.js
3 | * ~~~~~~~~~~~~~~~~
4 | *
5 | * This script contains the language-specific data used by searchtools.js,
6 | * namely the list of stopwords, stemmer, scorer and splitter.
7 | *
8 | * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
9 | * :license: BSD, see LICENSE for details.
10 | *
11 | */
12 |
13 | var stopwords = ["a","and","are","as","at","be","but","by","for","if","in","into","is","it","near","no","not","of","on","or","such","that","the","their","then","there","these","they","this","to","was","will","with"];
14 |
15 |
16 | /* Non-minified version JS is _stemmer.js if file is provided */
17 | /**
18 | * Porter Stemmer
19 | */
20 | var Stemmer = function() {
21 |
22 | var step2list = {
23 | ational: 'ate',
24 | tional: 'tion',
25 | enci: 'ence',
26 | anci: 'ance',
27 | izer: 'ize',
28 | bli: 'ble',
29 | alli: 'al',
30 | entli: 'ent',
31 | eli: 'e',
32 | ousli: 'ous',
33 | ization: 'ize',
34 | ation: 'ate',
35 | ator: 'ate',
36 | alism: 'al',
37 | iveness: 'ive',
38 | fulness: 'ful',
39 | ousness: 'ous',
40 | aliti: 'al',
41 | iviti: 'ive',
42 | biliti: 'ble',
43 | logi: 'log'
44 | };
45 |
46 | var step3list = {
47 | icate: 'ic',
48 | ative: '',
49 | alize: 'al',
50 | iciti: 'ic',
51 | ical: 'ic',
52 | ful: '',
53 | ness: ''
54 | };
55 |
56 | var c = "[^aeiou]"; // consonant
57 | var v = "[aeiouy]"; // vowel
58 | var C = c + "[^aeiouy]*"; // consonant sequence
59 | var V = v + "[aeiou]*"; // vowel sequence
60 |
61 | var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
62 | var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
63 | var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
64 | var s_v = "^(" + C + ")?" + v; // vowel in stem
65 |
66 | this.stemWord = function (w) {
67 | var stem;
68 | var suffix;
69 | var firstch;
70 | var origword = w;
71 |
72 | if (w.length < 3)
73 | return w;
74 |
75 | var re;
76 | var re2;
77 | var re3;
78 | var re4;
79 |
80 | firstch = w.substr(0,1);
81 | if (firstch == "y")
82 | w = firstch.toUpperCase() + w.substr(1);
83 |
84 | // Step 1a
85 | re = /^(.+?)(ss|i)es$/;
86 | re2 = /^(.+?)([^s])s$/;
87 |
88 | if (re.test(w))
89 | w = w.replace(re,"$1$2");
90 | else if (re2.test(w))
91 | w = w.replace(re2,"$1$2");
92 |
93 | // Step 1b
94 | re = /^(.+?)eed$/;
95 | re2 = /^(.+?)(ed|ing)$/;
96 | if (re.test(w)) {
97 | var fp = re.exec(w);
98 | re = new RegExp(mgr0);
99 | if (re.test(fp[1])) {
100 | re = /.$/;
101 | w = w.replace(re,"");
102 | }
103 | }
104 | else if (re2.test(w)) {
105 | var fp = re2.exec(w);
106 | stem = fp[1];
107 | re2 = new RegExp(s_v);
108 | if (re2.test(stem)) {
109 | w = stem;
110 | re2 = /(at|bl|iz)$/;
111 | re3 = new RegExp("([^aeiouylsz])\\1$");
112 | re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
113 | if (re2.test(w))
114 | w = w + "e";
115 | else if (re3.test(w)) {
116 | re = /.$/;
117 | w = w.replace(re,"");
118 | }
119 | else if (re4.test(w))
120 | w = w + "e";
121 | }
122 | }
123 |
124 | // Step 1c
125 | re = /^(.+?)y$/;
126 | if (re.test(w)) {
127 | var fp = re.exec(w);
128 | stem = fp[1];
129 | re = new RegExp(s_v);
130 | if (re.test(stem))
131 | w = stem + "i";
132 | }
133 |
134 | // Step 2
135 | re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
136 | if (re.test(w)) {
137 | var fp = re.exec(w);
138 | stem = fp[1];
139 | suffix = fp[2];
140 | re = new RegExp(mgr0);
141 | if (re.test(stem))
142 | w = stem + step2list[suffix];
143 | }
144 |
145 | // Step 3
146 | re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
147 | if (re.test(w)) {
148 | var fp = re.exec(w);
149 | stem = fp[1];
150 | suffix = fp[2];
151 | re = new RegExp(mgr0);
152 | if (re.test(stem))
153 | w = stem + step3list[suffix];
154 | }
155 |
156 | // Step 4
157 | re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
158 | re2 = /^(.+?)(s|t)(ion)$/;
159 | if (re.test(w)) {
160 | var fp = re.exec(w);
161 | stem = fp[1];
162 | re = new RegExp(mgr1);
163 | if (re.test(stem))
164 | w = stem;
165 | }
166 | else if (re2.test(w)) {
167 | var fp = re2.exec(w);
168 | stem = fp[1] + fp[2];
169 | re2 = new RegExp(mgr1);
170 | if (re2.test(stem))
171 | w = stem;
172 | }
173 |
174 | // Step 5
175 | re = /^(.+?)e$/;
176 | if (re.test(w)) {
177 | var fp = re.exec(w);
178 | stem = fp[1];
179 | re = new RegExp(mgr1);
180 | re2 = new RegExp(meq1);
181 | re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
182 | if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
183 | w = stem;
184 | }
185 | re = /ll$/;
186 | re2 = new RegExp(mgr1);
187 | if (re.test(w) && re2.test(w)) {
188 | re = /.$/;
189 | w = w.replace(re,"");
190 | }
191 |
192 | // and turn initial Y back to y
193 | if (firstch == "y")
194 | w = firstch.toLowerCase() + w.substr(1);
195 | return w;
196 | }
197 | }
198 |
199 |
200 |
201 |
202 |
203 | var splitChars = (function() {
204 | var result = {};
205 | var singles = [96, 180, 187, 191, 215, 247, 749, 885, 903, 907, 909, 930, 1014, 1648,
206 | 1748, 1809, 2416, 2473, 2481, 2526, 2601, 2609, 2612, 2615, 2653, 2702,
207 | 2706, 2729, 2737, 2740, 2857, 2865, 2868, 2910, 2928, 2948, 2961, 2971,
208 | 2973, 3085, 3089, 3113, 3124, 3213, 3217, 3241, 3252, 3295, 3341, 3345,
209 | 3369, 3506, 3516, 3633, 3715, 3721, 3736, 3744, 3748, 3750, 3756, 3761,
210 | 3781, 3912, 4239, 4347, 4681, 4695, 4697, 4745, 4785, 4799, 4801, 4823,
211 | 4881, 5760, 5901, 5997, 6313, 7405, 8024, 8026, 8028, 8030, 8117, 8125,
212 | 8133, 8181, 8468, 8485, 8487, 8489, 8494, 8527, 11311, 11359, 11687, 11695,
213 | 11703, 11711, 11719, 11727, 11735, 12448, 12539, 43010, 43014, 43019, 43587,
214 | 43696, 43713, 64286, 64297, 64311, 64317, 64319, 64322, 64325, 65141];
215 | var i, j, start, end;
216 | for (i = 0; i < singles.length; i++) {
217 | result[singles[i]] = true;
218 | }
219 | var ranges = [[0, 47], [58, 64], [91, 94], [123, 169], [171, 177], [182, 184], [706, 709],
220 | [722, 735], [741, 747], [751, 879], [888, 889], [894, 901], [1154, 1161],
221 | [1318, 1328], [1367, 1368], [1370, 1376], [1416, 1487], [1515, 1519], [1523, 1568],
222 | [1611, 1631], [1642, 1645], [1750, 1764], [1767, 1773], [1789, 1790], [1792, 1807],
223 | [1840, 1868], [1958, 1968], [1970, 1983], [2027, 2035], [2038, 2041], [2043, 2047],
224 | [2070, 2073], [2075, 2083], [2085, 2087], [2089, 2307], [2362, 2364], [2366, 2383],
225 | [2385, 2391], [2402, 2405], [2419, 2424], [2432, 2436], [2445, 2446], [2449, 2450],
226 | [2483, 2485], [2490, 2492], [2494, 2509], [2511, 2523], [2530, 2533], [2546, 2547],
227 | [2554, 2564], [2571, 2574], [2577, 2578], [2618, 2648], [2655, 2661], [2672, 2673],
228 | [2677, 2692], [2746, 2748], [2750, 2767], [2769, 2783], [2786, 2789], [2800, 2820],
229 | [2829, 2830], [2833, 2834], [2874, 2876], [2878, 2907], [2914, 2917], [2930, 2946],
230 | [2955, 2957], [2966, 2968], [2976, 2978], [2981, 2983], [2987, 2989], [3002, 3023],
231 | [3025, 3045], [3059, 3076], [3130, 3132], [3134, 3159], [3162, 3167], [3170, 3173],
232 | [3184, 3191], [3199, 3204], [3258, 3260], [3262, 3293], [3298, 3301], [3312, 3332],
233 | [3386, 3388], [3390, 3423], [3426, 3429], [3446, 3449], [3456, 3460], [3479, 3481],
234 | [3518, 3519], [3527, 3584], [3636, 3647], [3655, 3663], [3674, 3712], [3717, 3718],
235 | [3723, 3724], [3726, 3731], [3752, 3753], [3764, 3772], [3774, 3775], [3783, 3791],
236 | [3802, 3803], [3806, 3839], [3841, 3871], [3892, 3903], [3949, 3975], [3980, 4095],
237 | [4139, 4158], [4170, 4175], [4182, 4185], [4190, 4192], [4194, 4196], [4199, 4205],
238 | [4209, 4212], [4226, 4237], [4250, 4255], [4294, 4303], [4349, 4351], [4686, 4687],
239 | [4702, 4703], [4750, 4751], [4790, 4791], [4806, 4807], [4886, 4887], [4955, 4968],
240 | [4989, 4991], [5008, 5023], [5109, 5120], [5741, 5742], [5787, 5791], [5867, 5869],
241 | [5873, 5887], [5906, 5919], [5938, 5951], [5970, 5983], [6001, 6015], [6068, 6102],
242 | [6104, 6107], [6109, 6111], [6122, 6127], [6138, 6159], [6170, 6175], [6264, 6271],
243 | [6315, 6319], [6390, 6399], [6429, 6469], [6510, 6511], [6517, 6527], [6572, 6592],
244 | [6600, 6607], [6619, 6655], [6679, 6687], [6741, 6783], [6794, 6799], [6810, 6822],
245 | [6824, 6916], [6964, 6980], [6988, 6991], [7002, 7042], [7073, 7085], [7098, 7167],
246 | [7204, 7231], [7242, 7244], [7294, 7400], [7410, 7423], [7616, 7679], [7958, 7959],
247 | [7966, 7967], [8006, 8007], [8014, 8015], [8062, 8063], [8127, 8129], [8141, 8143],
248 | [8148, 8149], [8156, 8159], [8173, 8177], [8189, 8303], [8306, 8307], [8314, 8318],
249 | [8330, 8335], [8341, 8449], [8451, 8454], [8456, 8457], [8470, 8472], [8478, 8483],
250 | [8506, 8507], [8512, 8516], [8522, 8525], [8586, 9311], [9372, 9449], [9472, 10101],
251 | [10132, 11263], [11493, 11498], [11503, 11516], [11518, 11519], [11558, 11567],
252 | [11622, 11630], [11632, 11647], [11671, 11679], [11743, 11822], [11824, 12292],
253 | [12296, 12320], [12330, 12336], [12342, 12343], [12349, 12352], [12439, 12444],
254 | [12544, 12548], [12590, 12592], [12687, 12689], [12694, 12703], [12728, 12783],
255 | [12800, 12831], [12842, 12880], [12896, 12927], [12938, 12976], [12992, 13311],
256 | [19894, 19967], [40908, 40959], [42125, 42191], [42238, 42239], [42509, 42511],
257 | [42540, 42559], [42592, 42593], [42607, 42622], [42648, 42655], [42736, 42774],
258 | [42784, 42785], [42889, 42890], [42893, 43002], [43043, 43055], [43062, 43071],
259 | [43124, 43137], [43188, 43215], [43226, 43249], [43256, 43258], [43260, 43263],
260 | [43302, 43311], [43335, 43359], [43389, 43395], [43443, 43470], [43482, 43519],
261 | [43561, 43583], [43596, 43599], [43610, 43615], [43639, 43641], [43643, 43647],
262 | [43698, 43700], [43703, 43704], [43710, 43711], [43715, 43738], [43742, 43967],
263 | [44003, 44015], [44026, 44031], [55204, 55215], [55239, 55242], [55292, 55295],
264 | [57344, 63743], [64046, 64047], [64110, 64111], [64218, 64255], [64263, 64274],
265 | [64280, 64284], [64434, 64466], [64830, 64847], [64912, 64913], [64968, 65007],
266 | [65020, 65135], [65277, 65295], [65306, 65312], [65339, 65344], [65371, 65381],
267 | [65471, 65473], [65480, 65481], [65488, 65489], [65496, 65497]];
268 | for (i = 0; i < ranges.length; i++) {
269 | start = ranges[i][0];
270 | end = ranges[i][1];
271 | for (j = start; j <= end; j++) {
272 | result[j] = true;
273 | }
274 | }
275 | return result;
276 | })();
277 |
278 | function splitQuery(query) {
279 | var result = [];
280 | var start = -1;
281 | for (var i = 0; i < query.length; i++) {
282 | if (splitChars[query.charCodeAt(i)]) {
283 | if (start !== -1) {
284 | result.push(query.slice(start, i));
285 | start = -1;
286 | }
287 | } else if (start === -1) {
288 | start = i;
289 | }
290 | }
291 | if (start !== -1) {
292 | result.push(query.slice(start));
293 | }
294 | return result;
295 | }
296 |
297 |
298 |
--------------------------------------------------------------------------------
/doc/html/_static/minus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/html/_static/minus.png
--------------------------------------------------------------------------------
/doc/html/_static/nature.css:
--------------------------------------------------------------------------------
1 | /*
2 | * nature.css_t
3 | * ~~~~~~~~~~~~
4 | *
5 | * Sphinx stylesheet -- nature theme.
6 | *
7 | * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
8 | * :license: BSD, see LICENSE for details.
9 | *
10 | */
11 |
12 | @import url("basic.css");
13 |
14 | /* -- page layout ----------------------------------------------------------- */
15 |
16 | body {
17 | font-family: Arial, sans-serif;
18 | font-size: 100%;
19 | background-color: #fff;
20 | color: #555;
21 | margin: 0;
22 | padding: 0;
23 | }
24 |
25 | div.documentwrapper {
26 | float: left;
27 | width: 100%;
28 | }
29 |
30 | div.bodywrapper {
31 | margin: 0 0 0 230px;
32 | }
33 |
34 | hr {
35 | border: 1px solid #B1B4B6;
36 | }
37 |
38 | div.document {
39 | background-color: #eee;
40 | }
41 |
42 | div.body {
43 | background-color: #ffffff;
44 | color: #3E4349;
45 | padding: 0 30px 30px 30px;
46 | font-size: 0.9em;
47 | }
48 |
49 | div.footer {
50 | color: #555;
51 | width: 100%;
52 | padding: 13px 0;
53 | text-align: center;
54 | font-size: 75%;
55 | }
56 |
57 | div.footer a {
58 | color: #444;
59 | text-decoration: underline;
60 | }
61 |
62 | div.related {
63 | background-color: #6BA81E;
64 | line-height: 32px;
65 | color: #fff;
66 | text-shadow: 0px 1px 0 #444;
67 | font-size: 0.9em;
68 | }
69 |
70 | div.related a {
71 | color: #E2F3CC;
72 | }
73 |
74 | div.sphinxsidebar {
75 | font-size: 0.75em;
76 | line-height: 1.5em;
77 | }
78 |
79 | div.sphinxsidebarwrapper{
80 | padding: 20px 0;
81 | }
82 |
83 | div.sphinxsidebar h3,
84 | div.sphinxsidebar h4 {
85 | font-family: Arial, sans-serif;
86 | color: #222;
87 | font-size: 1.2em;
88 | font-weight: normal;
89 | margin: 0;
90 | padding: 5px 10px;
91 | background-color: #ddd;
92 | text-shadow: 1px 1px 0 white
93 | }
94 |
95 | div.sphinxsidebar h4{
96 | font-size: 1.1em;
97 | }
98 |
99 | div.sphinxsidebar h3 a {
100 | color: #444;
101 | }
102 |
103 |
104 | div.sphinxsidebar p {
105 | color: #888;
106 | padding: 5px 20px;
107 | }
108 |
109 | div.sphinxsidebar p.topless {
110 | }
111 |
112 | div.sphinxsidebar ul {
113 | margin: 10px 20px;
114 | padding: 0;
115 | color: #000;
116 | }
117 |
118 | div.sphinxsidebar a {
119 | color: #444;
120 | }
121 |
122 | div.sphinxsidebar input {
123 | border: 1px solid #ccc;
124 | font-family: sans-serif;
125 | font-size: 1em;
126 | }
127 |
128 | div.sphinxsidebar .searchformwrapper {
129 | margin-left: 20px;
130 | margin-right: 20px;
131 | }
132 |
133 | /* -- body styles ----------------------------------------------------------- */
134 |
135 | a {
136 | color: #005B81;
137 | text-decoration: none;
138 | }
139 |
140 | a:hover {
141 | color: #E32E00;
142 | text-decoration: underline;
143 | }
144 |
145 | div.body h1,
146 | div.body h2,
147 | div.body h3,
148 | div.body h4,
149 | div.body h5,
150 | div.body h6 {
151 | font-family: Arial, sans-serif;
152 | background-color: #BED4EB;
153 | font-weight: normal;
154 | color: #212224;
155 | margin: 30px 0px 10px 0px;
156 | padding: 5px 0 5px 10px;
157 | text-shadow: 0px 1px 0 white
158 | }
159 |
160 | div.body h1 { border-top: 20px solid white; margin-top: 0; font-size: 200%; }
161 | div.body h2 { font-size: 150%; background-color: #C8D5E3; }
162 | div.body h3 { font-size: 120%; background-color: #D8DEE3; }
163 | div.body h4 { font-size: 110%; background-color: #D8DEE3; }
164 | div.body h5 { font-size: 100%; background-color: #D8DEE3; }
165 | div.body h6 { font-size: 100%; background-color: #D8DEE3; }
166 |
167 | a.headerlink {
168 | color: #c60f0f;
169 | font-size: 0.8em;
170 | padding: 0 4px 0 4px;
171 | text-decoration: none;
172 | }
173 |
174 | a.headerlink:hover {
175 | background-color: #c60f0f;
176 | color: white;
177 | }
178 |
179 | div.body p, div.body dd, div.body li {
180 | line-height: 1.5em;
181 | }
182 |
183 | div.admonition p.admonition-title + p {
184 | display: inline;
185 | }
186 |
187 | div.highlight{
188 | background-color: white;
189 | }
190 |
191 | div.note {
192 | background-color: #eee;
193 | border: 1px solid #ccc;
194 | }
195 |
196 | div.seealso {
197 | background-color: #ffc;
198 | border: 1px solid #ff6;
199 | }
200 |
201 | div.topic {
202 | background-color: #eee;
203 | }
204 |
205 | div.warning {
206 | background-color: #ffe4e4;
207 | border: 1px solid #f66;
208 | }
209 |
210 | p.admonition-title {
211 | display: inline;
212 | }
213 |
214 | p.admonition-title:after {
215 | content: ":";
216 | }
217 |
218 | pre {
219 | padding: 10px;
220 | background-color: White;
221 | color: #222;
222 | line-height: 1.2em;
223 | border: 1px solid #C6C9CB;
224 | font-size: 1.1em;
225 | margin: 1.5em 0 1.5em 0;
226 | -webkit-box-shadow: 1px 1px 1px #d8d8d8;
227 | -moz-box-shadow: 1px 1px 1px #d8d8d8;
228 | }
229 |
230 | code {
231 | background-color: #ecf0f3;
232 | color: #222;
233 | /* padding: 1px 2px; */
234 | font-size: 1.1em;
235 | font-family: monospace;
236 | }
237 |
238 | .viewcode-back {
239 | font-family: Arial, sans-serif;
240 | }
241 |
242 | div.viewcode-block:target {
243 | background-color: #f4debf;
244 | border-top: 1px solid #ac9;
245 | border-bottom: 1px solid #ac9;
246 | }
247 |
248 | div.code-block-caption {
249 | background-color: #ddd;
250 | color: #222;
251 | border: 1px solid #C6C9CB;
252 | }
--------------------------------------------------------------------------------
/doc/html/_static/plus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/html/_static/plus.png
--------------------------------------------------------------------------------
/doc/html/_static/pygments.css:
--------------------------------------------------------------------------------
1 | .highlight .hll { background-color: #ffffcc }
2 | .highlight { background: #eeffcc; }
3 | .highlight .c { color: #408090; font-style: italic } /* Comment */
4 | .highlight .err { border: 1px solid #FF0000 } /* Error */
5 | .highlight .k { color: #007020; font-weight: bold } /* Keyword */
6 | .highlight .o { color: #666666 } /* Operator */
7 | .highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */
8 | .highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */
9 | .highlight .cp { color: #007020 } /* Comment.Preproc */
10 | .highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */
11 | .highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */
12 | .highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */
13 | .highlight .gd { color: #A00000 } /* Generic.Deleted */
14 | .highlight .ge { font-style: italic } /* Generic.Emph */
15 | .highlight .gr { color: #FF0000 } /* Generic.Error */
16 | .highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
17 | .highlight .gi { color: #00A000 } /* Generic.Inserted */
18 | .highlight .go { color: #333333 } /* Generic.Output */
19 | .highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
20 | .highlight .gs { font-weight: bold } /* Generic.Strong */
21 | .highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
22 | .highlight .gt { color: #0044DD } /* Generic.Traceback */
23 | .highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */
24 | .highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */
25 | .highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */
26 | .highlight .kp { color: #007020 } /* Keyword.Pseudo */
27 | .highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */
28 | .highlight .kt { color: #902000 } /* Keyword.Type */
29 | .highlight .m { color: #208050 } /* Literal.Number */
30 | .highlight .s { color: #4070a0 } /* Literal.String */
31 | .highlight .na { color: #4070a0 } /* Name.Attribute */
32 | .highlight .nb { color: #007020 } /* Name.Builtin */
33 | .highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */
34 | .highlight .no { color: #60add5 } /* Name.Constant */
35 | .highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */
36 | .highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */
37 | .highlight .ne { color: #007020 } /* Name.Exception */
38 | .highlight .nf { color: #06287e } /* Name.Function */
39 | .highlight .nl { color: #002070; font-weight: bold } /* Name.Label */
40 | .highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */
41 | .highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */
42 | .highlight .nv { color: #bb60d5 } /* Name.Variable */
43 | .highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */
44 | .highlight .w { color: #bbbbbb } /* Text.Whitespace */
45 | .highlight .mb { color: #208050 } /* Literal.Number.Bin */
46 | .highlight .mf { color: #208050 } /* Literal.Number.Float */
47 | .highlight .mh { color: #208050 } /* Literal.Number.Hex */
48 | .highlight .mi { color: #208050 } /* Literal.Number.Integer */
49 | .highlight .mo { color: #208050 } /* Literal.Number.Oct */
50 | .highlight .sa { color: #4070a0 } /* Literal.String.Affix */
51 | .highlight .sb { color: #4070a0 } /* Literal.String.Backtick */
52 | .highlight .sc { color: #4070a0 } /* Literal.String.Char */
53 | .highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */
54 | .highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */
55 | .highlight .s2 { color: #4070a0 } /* Literal.String.Double */
56 | .highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */
57 | .highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */
58 | .highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */
59 | .highlight .sx { color: #c65d09 } /* Literal.String.Other */
60 | .highlight .sr { color: #235388 } /* Literal.String.Regex */
61 | .highlight .s1 { color: #4070a0 } /* Literal.String.Single */
62 | .highlight .ss { color: #517918 } /* Literal.String.Symbol */
63 | .highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */
64 | .highlight .fm { color: #06287e } /* Name.Function.Magic */
65 | .highlight .vc { color: #bb60d5 } /* Name.Variable.Class */
66 | .highlight .vg { color: #bb60d5 } /* Name.Variable.Global */
67 | .highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */
68 | .highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */
69 | .highlight .il { color: #208050 } /* Literal.Number.Integer.Long */
--------------------------------------------------------------------------------
/doc/html/_static/underscore.js:
--------------------------------------------------------------------------------
1 | // Underscore.js 1.3.1
2 | // (c) 2009-2012 Jeremy Ashkenas, DocumentCloud Inc.
3 | // Underscore is freely distributable under the MIT license.
4 | // Portions of Underscore are inspired or borrowed from Prototype,
5 | // Oliver Steele's Functional, and John Resig's Micro-Templating.
6 | // For all details and documentation:
7 | // http://documentcloud.github.com/underscore
8 | (function(){function q(a,c,d){if(a===c)return a!==0||1/a==1/c;if(a==null||c==null)return a===c;if(a._chain)a=a._wrapped;if(c._chain)c=c._wrapped;if(a.isEqual&&b.isFunction(a.isEqual))return a.isEqual(c);if(c.isEqual&&b.isFunction(c.isEqual))return c.isEqual(a);var e=l.call(a);if(e!=l.call(c))return false;switch(e){case "[object String]":return a==String(c);case "[object Number]":return a!=+a?c!=+c:a==0?1/a==1/c:a==+c;case "[object Date]":case "[object Boolean]":return+a==+c;case "[object RegExp]":return a.source==
9 | c.source&&a.global==c.global&&a.multiline==c.multiline&&a.ignoreCase==c.ignoreCase}if(typeof a!="object"||typeof c!="object")return false;for(var f=d.length;f--;)if(d[f]==a)return true;d.push(a);var f=0,g=true;if(e=="[object Array]"){if(f=a.length,g=f==c.length)for(;f--;)if(!(g=f in a==f in c&&q(a[f],c[f],d)))break}else{if("constructor"in a!="constructor"in c||a.constructor!=c.constructor)return false;for(var h in a)if(b.has(a,h)&&(f++,!(g=b.has(c,h)&&q(a[h],c[h],d))))break;if(g){for(h in c)if(b.has(c,
10 | h)&&!f--)break;g=!f}}d.pop();return g}var r=this,G=r._,n={},k=Array.prototype,o=Object.prototype,i=k.slice,H=k.unshift,l=o.toString,I=o.hasOwnProperty,w=k.forEach,x=k.map,y=k.reduce,z=k.reduceRight,A=k.filter,B=k.every,C=k.some,p=k.indexOf,D=k.lastIndexOf,o=Array.isArray,J=Object.keys,s=Function.prototype.bind,b=function(a){return new m(a)};if(typeof exports!=="undefined"){if(typeof module!=="undefined"&&module.exports)exports=module.exports=b;exports._=b}else r._=b;b.VERSION="1.3.1";var j=b.each=
11 | b.forEach=function(a,c,d){if(a!=null)if(w&&a.forEach===w)a.forEach(c,d);else if(a.length===+a.length)for(var e=0,f=a.length;e2;a==
12 | null&&(a=[]);if(y&&a.reduce===y)return e&&(c=b.bind(c,e)),f?a.reduce(c,d):a.reduce(c);j(a,function(a,b,i){f?d=c.call(e,d,a,b,i):(d=a,f=true)});if(!f)throw new TypeError("Reduce of empty array with no initial value");return d};b.reduceRight=b.foldr=function(a,c,d,e){var f=arguments.length>2;a==null&&(a=[]);if(z&&a.reduceRight===z)return e&&(c=b.bind(c,e)),f?a.reduceRight(c,d):a.reduceRight(c);var g=b.toArray(a).reverse();e&&!f&&(c=b.bind(c,e));return f?b.reduce(g,c,d,e):b.reduce(g,c)};b.find=b.detect=
13 | function(a,c,b){var e;E(a,function(a,g,h){if(c.call(b,a,g,h))return e=a,true});return e};b.filter=b.select=function(a,c,b){var e=[];if(a==null)return e;if(A&&a.filter===A)return a.filter(c,b);j(a,function(a,g,h){c.call(b,a,g,h)&&(e[e.length]=a)});return e};b.reject=function(a,c,b){var e=[];if(a==null)return e;j(a,function(a,g,h){c.call(b,a,g,h)||(e[e.length]=a)});return e};b.every=b.all=function(a,c,b){var e=true;if(a==null)return e;if(B&&a.every===B)return a.every(c,b);j(a,function(a,g,h){if(!(e=
14 | e&&c.call(b,a,g,h)))return n});return e};var E=b.some=b.any=function(a,c,d){c||(c=b.identity);var e=false;if(a==null)return e;if(C&&a.some===C)return a.some(c,d);j(a,function(a,b,h){if(e||(e=c.call(d,a,b,h)))return n});return!!e};b.include=b.contains=function(a,c){var b=false;if(a==null)return b;return p&&a.indexOf===p?a.indexOf(c)!=-1:b=E(a,function(a){return a===c})};b.invoke=function(a,c){var d=i.call(arguments,2);return b.map(a,function(a){return(b.isFunction(c)?c||a:a[c]).apply(a,d)})};b.pluck=
15 | function(a,c){return b.map(a,function(a){return a[c]})};b.max=function(a,c,d){if(!c&&b.isArray(a))return Math.max.apply(Math,a);if(!c&&b.isEmpty(a))return-Infinity;var e={computed:-Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;b>=e.computed&&(e={value:a,computed:b})});return e.value};b.min=function(a,c,d){if(!c&&b.isArray(a))return Math.min.apply(Math,a);if(!c&&b.isEmpty(a))return Infinity;var e={computed:Infinity};j(a,function(a,b,h){b=c?c.call(d,a,b,h):a;bd?1:0}),"value")};b.groupBy=function(a,c){var d={},e=b.isFunction(c)?c:function(a){return a[c]};j(a,function(a,b){var c=e(a,b);(d[c]||(d[c]=[])).push(a)});return d};b.sortedIndex=function(a,
17 | c,d){d||(d=b.identity);for(var e=0,f=a.length;e>1;d(a[g])=0})})};b.difference=function(a){var c=b.flatten(i.call(arguments,1));return b.filter(a,function(a){return!b.include(c,a)})};b.zip=function(){for(var a=i.call(arguments),c=b.max(b.pluck(a,"length")),d=Array(c),e=0;e=0;d--)b=[a[d].apply(this,b)];return b[0]}};
24 | b.after=function(a,b){return a<=0?b():function(){if(--a<1)return b.apply(this,arguments)}};b.keys=J||function(a){if(a!==Object(a))throw new TypeError("Invalid object");var c=[],d;for(d in a)b.has(a,d)&&(c[c.length]=d);return c};b.values=function(a){return b.map(a,b.identity)};b.functions=b.methods=function(a){var c=[],d;for(d in a)b.isFunction(a[d])&&c.push(d);return c.sort()};b.extend=function(a){j(i.call(arguments,1),function(b){for(var d in b)a[d]=b[d]});return a};b.defaults=function(a){j(i.call(arguments,
25 | 1),function(b){for(var d in b)a[d]==null&&(a[d]=b[d])});return a};b.clone=function(a){return!b.isObject(a)?a:b.isArray(a)?a.slice():b.extend({},a)};b.tap=function(a,b){b(a);return a};b.isEqual=function(a,b){return q(a,b,[])};b.isEmpty=function(a){if(b.isArray(a)||b.isString(a))return a.length===0;for(var c in a)if(b.has(a,c))return false;return true};b.isElement=function(a){return!!(a&&a.nodeType==1)};b.isArray=o||function(a){return l.call(a)=="[object Array]"};b.isObject=function(a){return a===Object(a)};
26 | b.isArguments=function(a){return l.call(a)=="[object Arguments]"};if(!b.isArguments(arguments))b.isArguments=function(a){return!(!a||!b.has(a,"callee"))};b.isFunction=function(a){return l.call(a)=="[object Function]"};b.isString=function(a){return l.call(a)=="[object String]"};b.isNumber=function(a){return l.call(a)=="[object Number]"};b.isNaN=function(a){return a!==a};b.isBoolean=function(a){return a===true||a===false||l.call(a)=="[object Boolean]"};b.isDate=function(a){return l.call(a)=="[object Date]"};
27 | b.isRegExp=function(a){return l.call(a)=="[object RegExp]"};b.isNull=function(a){return a===null};b.isUndefined=function(a){return a===void 0};b.has=function(a,b){return I.call(a,b)};b.noConflict=function(){r._=G;return this};b.identity=function(a){return a};b.times=function(a,b,d){for(var e=0;e /g,">").replace(/"/g,""").replace(/'/g,"'").replace(/\//g,"/")};b.mixin=function(a){j(b.functions(a),
28 | function(c){K(c,b[c]=a[c])})};var L=0;b.uniqueId=function(a){var b=L++;return a?a+b:b};b.templateSettings={evaluate:/<%([\s\S]+?)%>/g,interpolate:/<%=([\s\S]+?)%>/g,escape:/<%-([\s\S]+?)%>/g};var t=/.^/,u=function(a){return a.replace(/\\\\/g,"\\").replace(/\\'/g,"'")};b.template=function(a,c){var d=b.templateSettings,d="var __p=[],print=function(){__p.push.apply(__p,arguments);};with(obj||{}){__p.push('"+a.replace(/\\/g,"\\\\").replace(/'/g,"\\'").replace(d.escape||t,function(a,b){return"',_.escape("+
29 | u(b)+"),'"}).replace(d.interpolate||t,function(a,b){return"',"+u(b)+",'"}).replace(d.evaluate||t,function(a,b){return"');"+u(b).replace(/[\r\n\t]/g," ")+";__p.push('"}).replace(/\r/g,"\\r").replace(/\n/g,"\\n").replace(/\t/g,"\\t")+"');}return __p.join('');",e=new Function("obj","_",d);return c?e(c,b):function(a){return e.call(this,a,b)}};b.chain=function(a){return b(a).chain()};var m=function(a){this._wrapped=a};b.prototype=m.prototype;var v=function(a,c){return c?b(a).chain():a},K=function(a,c){m.prototype[a]=
30 | function(){var a=i.call(arguments);H.call(a,this._wrapped);return v(c.apply(b,a),this._chain)}};b.mixin(b);j("pop,push,reverse,shift,sort,splice,unshift".split(","),function(a){var b=k[a];m.prototype[a]=function(){var d=this._wrapped;b.apply(d,arguments);var e=d.length;(a=="shift"||a=="splice")&&e===0&&delete d[0];return v(d,this._chain)}});j(["concat","join","slice"],function(a){var b=k[a];m.prototype[a]=function(){return v(b.apply(this._wrapped,arguments),this._chain)}});m.prototype.chain=function(){this._chain=
31 | true;return this};m.prototype.value=function(){return this._wrapped}}).call(this);
32 |
--------------------------------------------------------------------------------
/doc/html/genindex.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | Index — ffmpeg-python documentation
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
Index
39 |
40 |
41 |
A
42 | |
C
43 | |
D
44 | |
E
45 | |
F
46 | |
G
47 | |
H
48 | |
I
49 | |
M
50 | |
O
51 | |
P
52 | |
R
53 | |
S
54 | |
T
55 | |
V
56 | |
Z
57 |
58 |
59 |
A
60 |
66 |
67 |
C
68 |
82 |
83 |
D
84 |
94 |
95 |
E
96 |
102 |
103 |
F
104 |
118 |
119 |
G
120 |
126 |
127 |
H
128 |
138 |
139 |
I
140 |
146 |
147 |
M
148 |
154 |
155 |
O
156 |
168 |
169 |
P
170 |
176 |
177 |
R
178 |
188 |
189 |
S
190 |
200 |
201 |
T
202 |
208 |
209 |
V
210 |
226 |
227 |
Z
228 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
254 |
255 |
256 |
268 |
272 |
273 |
--------------------------------------------------------------------------------
/doc/html/objects.inv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/html/objects.inv
--------------------------------------------------------------------------------
/doc/html/py-modindex.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Python Module Index — ffmpeg-python documentation
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
22 |
23 |
24 |
25 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
Python Module Index
45 |
46 |
49 |
50 |
51 |
52 |
53 | f
54 |
55 |
56 |
57 | ffmpeg
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
79 |
80 |
81 |
93 |
97 |
98 |
--------------------------------------------------------------------------------
/doc/html/search.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Search — ffmpeg-python documentation
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
Search
42 |
43 |
44 |
45 | Please activate JavaScript to enable the search
46 | functionality.
47 |
48 |
49 |
50 | From here you can search these documents. Enter your search
51 | words into the box below and click "search". Note that the search
52 | function will automatically search for all of the words. Pages
53 | containing fewer words won't appear in the result list.
54 |
55 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
72 |
73 |
74 |
86 |
90 |
91 |
--------------------------------------------------------------------------------
/doc/html/searchindex.js:
--------------------------------------------------------------------------------
1 | Search.setIndex({docnames:["index"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,sphinx:56},filenames:["index.rst"],objects:{"":{ffmpeg:[0,0,0,"-"]},"ffmpeg.Stream":{audio:[0,3,1,""],video:[0,3,1,""],view:[0,3,1,""]},ffmpeg:{Error:[0,1,1,""],Stream:[0,2,1,""],colorchannelmixer:[0,4,1,""],compile:[0,4,1,""],concat:[0,4,1,""],crop:[0,4,1,""],drawbox:[0,4,1,""],drawtext:[0,4,1,""],filter:[0,4,1,""],filter_:[0,4,1,""],filter_multi_output:[0,4,1,""],get_args:[0,4,1,""],hflip:[0,4,1,""],hue:[0,4,1,""],input:[0,4,1,""],merge_outputs:[0,4,1,""],output:[0,4,1,""],overlay:[0,4,1,""],overwrite_output:[0,4,1,""],probe:[0,4,1,""],run:[0,4,1,""],run_async:[0,4,1,""],setpts:[0,4,1,""],trim:[0,4,1,""],vflip:[0,4,1,""],view:[0,4,1,""],zoompan:[0,4,1,""]}},objnames:{"0":["py","module","Python module"],"1":["py","exception","Python exception"],"2":["py","class","Python class"],"3":["py","method","Python method"],"4":["py","function","Python function"]},objtypes:{"0":"py:module","1":"py:exception","2":"py:class","3":"py:method","4":"py:function"},terms:{"break":0,"case":0,"class":0,"default":0,"final":0,"function":0,"return":0,"true":0,"while":0,For:0,PTS:0,The:0,These:0,Used:0,accept:0,access:0,acodec:0,action:0,activ:0,adjust:0,aecho:0,after:0,alia:0,all:0,allow:0,alpha:0,also:0,altern:0,alwai:0,angl:0,ani:0,anoth:0,appli:0,arab:0,area:0,arg:0,argument:0,around:0,ascent:0,asetpt:0,ask:0,aspect:0,astyp:0,asynchron:0,atom:0,attempt:0,audio:0,audio_bitr:0,author:0,automat:0,avoid:0,axi:0,background:0,base:0,baselin:0,basetim:0,befor:0,behavior:0,behind:0,below:0,between:0,black:0,blend:0,border:0,bordercolor:0,borderw:0,both:0,box:0,boxborderw:0,boxcolor:0,bracket:0,bright:0,build:0,built:0,call:0,can:0,captur:0,capture_stderr:0,capture_stdout:0,care:0,certain:0,chang:0,channel:0,charact:0,check:0,child:0,chroma:0,clip:0,close:0,cmd:0,code:0,codec:0,collid:0,color:0,colorchannelmix:0,com:0,combin:0,command:0,commnad:0,common:0,commun:0,compil:0,concat:0,concaten:0,configur:0,confus:0,connect:0,constant:0,construct:0,consult:0,contain:0,continu:0,convert:0,coord:0,coordin:0,corner:0,correctli:0,correspond:0,count:0,creat:0,crop:0,crop_bitmap:0,custom:0,dar:0,data:0,debug:0,degre:0,deprec:0,descent:0,detail:0,dictionari:0,differ:0,dilemma:0,directli:0,disabl:0,displai:0,distanc:0,document:0,downstream:0,draw:0,drawbox:0,drawn:0,drawtext:0,drop:0,due:0,durat:0,dure:0,dynam:0,each:0,edg:0,effect:0,either:0,empti:0,emul:0,enabl:0,encod:0,encount:0,end:0,end_fram:0,end_pt:0,endal:0,eof:0,eof_act:0,equival:0,err:0,error:0,escape_text:0,etc:0,eval:0,evalu:0,even:0,exactli:0,exampl:0,except:0,exit:0,expand:0,expans:0,explicitli:0,expr:0,express:0,fail:0,fallback:0,fals:0,famili:0,ffmpeg_arg:0,ffprobe:0,file:0,filenam:0,filter:0,filter_:0,filter_multi_output:0,filter_nam:0,first:0,fix:0,fix_bound:0,flag:0,flip:0,follow:0,font:0,fontcolor:0,fontcolor_expr:0,fontconfig:0,fontfil:0,fontsiz:0,forc:0,force_autohint:0,format:0,fps:0,frame:0,frame_num:0,from:0,frombuff:0,ft_load_:0,ft_load_flag:0,gbrp:0,gener:0,get_arg:0,github:0,given:0,glyph:0,graph:0,greater:0,grid:0,handl:0,has:0,have:0,hd720:0,height:0,heigth:0,hflip:0,higher:0,highest:0,horizont:0,hour:0,how:0,hsub:0,http:0,hue:0,huge:0,ignore_global_advance_width:0,ignore_transform:0,imag:0,immedi:0,implement:0,in_byt:0,in_filenam:0,in_fram:0,includ:0,incom:0,independ:0,index:0,inform:0,init:0,initi:0,input:0,input_data:0,instead:0,interpret:0,intrins:0,invalid:0,invert:0,invok:0,its:0,join:0,json:0,just:0,kept:0,keyword:0,kkroen:0,kwarg:0,label:0,last:0,later:0,layout:0,left:0,level:0,libfontconfig:0,libfreetyp:0,libfribidi:0,librari:0,line:0,line_h:0,line_spac:0,linear_design:0,list:0,load:0,longest:0,lowest:0,luma:0,mai:0,main:0,main_h:0,main_parent_nod:0,main_w:0,mandatori:0,mani:0,manual:0,map:0,max:0,max_glyph_a:0,max_glyph_d:0,max_glyph_h:0,max_glyph_w:0,maximum:0,mean:0,merge_output:0,messag:0,microsecond:0,min:0,miss:0,mix:0,mode:0,modifi:0,modul:0,monochrom:0,more:0,most:0,mp4:0,multipl:0,must:0,name:0,nan:0,necessari:0,need:0,neg:0,no_autohint:0,no_bitmap:0,no_hint:0,no_recurs:0,no_scal:0,node:0,node_typ:0,non:0,none:0,normal:0,number:0,numpi:0,object:0,obtain:0,offici:0,offset:0,onc:0,one:0,onli:0,oper:0,option:0,order:0,orient:0,other:0,otherwis:0,out:0,out_filenam:0,out_fram:0,outgo:0,outlin:0,output:0,over:0,overlai:0,overlaid:0,overlay_parent_nod:0,overrid:0,overwrit:0,overwrite_output:0,pack:0,pad:0,page:0,pan:0,paramet:0,partial:0,pass:0,path:0,pcm:0,pedant:0,pipe:0,pipe_stderr:0,pipe_stdin:0,pipe_stdout:0,pipelin:0,pix_fmt:0,pixel:0,place:0,planar:0,pleas:0,point:0,popen:0,portion:0,posit:0,preced:0,present:0,preserv:0,probe:0,process1:0,process2:0,process:0,produc:0,properti:0,provid:0,pts:0,quiet:0,radian:0,rais:0,rand:0,random:0,rang:0,rate:0,ratio:0,rawvideo:0,read:0,reason:0,refer:0,rel:0,relat:0,reload:0,render:0,repeat:0,repeatlast:0,repres:0,represent:0,reshap:0,resolut:0,respect:0,result:0,retriev:0,revers:0,rgb24:0,rgb:0,right:0,run:0,run_async:0,same:0,sampl:0,san:0,sar:0,satur:0,search:0,second:0,secondari:0,section:0,see:0,segment:0,select:0,sent:0,separ:0,sequenc:0,set:0,setpt:0,shadow:0,shadowcolor:0,shadowi:0,shadowx:0,shape:0,shorter:0,shortest:0,shorthand:0,should:0,shown:0,silenc:0,singl:0,size:0,sloppi:0,some:0,space:0,special:0,specifi:0,split0:0,split1:0,split:0,stai:0,standard:0,start:0,start_fram:0,start_numb:0,start_pt:0,stderr:0,stdin:0,stdout:0,stream1:0,stream2:0,stream3:0,stream:0,stream_spec:0,streams_and_filenam:0,strftime:0,string:0,subpart:0,subprocess:0,subsampl:0,suffix:0,suppli:0,support:0,sure:0,synchron:0,synopsi:0,syntax:0,system:0,tab:0,tabsiz:0,take:0,taken:0,tc24hmax:0,tell:0,termin:0,text:0,text_h:0,text_shap:0,text_w:0,textfil:0,than:0,thei:0,them:0,thi:0,thick:0,through:0,thrown:0,time:0,timebas:0,timecod:0,timecode_r:0,timestamp:0,tobyt:0,togeth:0,top:0,track:0,tri:0,trim:0,tupl:0,type:0,uint8:0,unit:0,unknown:0,unsaf:0,until:0,updat:0,upper:0,upstream:0,upstream_label:0,upstream_nod:0,upstream_selector:0,upward:0,url:0,use:0,used:0,useful:0,user:0,uses:0,using:0,utf:0,util:0,valu:0,variabl:0,variou:0,vcodec:0,verbatim:0,vertic:0,vertical_layout:0,vflip:0,video:0,video_bitr:0,view:0,visibl:0,vsub:0,wai:0,wait:0,well:0,whatev:0,when:0,where:0,which:0,white:0,why:0,width:0,within:0,without:0,work:0,wrap:0,write:0,you:0,yuv420:0,yuv420p:0,yuv422:0,yuv422p:0,yuv444:0,zero:0,zoom:0,zoompan:0},titles:["ffmpeg-python: Python bindings for FFmpeg"],titleterms:{bind:0,ffmpeg:0,indic:0,python:0,tabl:0}})
--------------------------------------------------------------------------------
/doc/jupyter-demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/jupyter-demo.gif
--------------------------------------------------------------------------------
/doc/jupyter-screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/jupyter-screenshot.png
--------------------------------------------------------------------------------
/doc/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/logo.png
--------------------------------------------------------------------------------
/doc/logo.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/logo.xcf
--------------------------------------------------------------------------------
/doc/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/doc/screenshot.png
--------------------------------------------------------------------------------
/doc/src/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # ffmpeg-python documentation build configuration file, created by
4 | # sphinx-quickstart on Sat May 27 14:30:53 2017.
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 | # If extensions (or modules to document with autodoc) are in another directory,
16 | # add these directories to sys.path here. If the directory is relative to the
17 | # documentation root, use os.path.abspath to make it absolute, like shown here.
18 | #
19 | import os
20 | import sys
21 | sys.path.insert(0, os.path.abspath('../..'))
22 |
23 |
24 | # -- General configuration ------------------------------------------------
25 |
26 | # If your documentation needs a minimal Sphinx version, state it here.
27 | #
28 | # needs_sphinx = '1.0'
29 |
30 | # Add any Sphinx extension module names here, as strings. They can be
31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
32 | # ones.
33 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
34 |
35 | # Add any paths that contain templates here, relative to this directory.
36 | templates_path = ['_templates']
37 |
38 | # The suffix(es) of source filenames.
39 | # You can specify multiple suffix as a list of string:
40 | #
41 | # source_suffix = ['.rst', '.md']
42 | source_suffix = '.rst'
43 |
44 | # The master toctree document.
45 | master_doc = 'index'
46 |
47 | # General information about the project.
48 | project = u'ffmpeg-python'
49 | copyright = u'2017, Karl Kroening'
50 | author = u'Karl Kroening'
51 |
52 | # The version info for the project you're documenting, acts as replacement for
53 | # |version| and |release|, also used in various other places throughout the
54 | # built documents.
55 | #
56 | # The short X.Y version.
57 | version = u''
58 | # The full version, including alpha/beta/rc tags.
59 | release = u''
60 |
61 | # The language for content autogenerated by Sphinx. Refer to documentation
62 | # for a list of supported languages.
63 | #
64 | # This is also used if you do content translation via gettext catalogs.
65 | # Usually you set "language" from the command line for these cases.
66 | language = None
67 |
68 | # List of patterns, relative to source directory, that match files and
69 | # directories to ignore when looking for source files.
70 | # This patterns also effect to html_static_path and html_extra_path
71 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
72 |
73 | # The name of the Pygments (syntax highlighting) style to use.
74 | pygments_style = 'sphinx'
75 |
76 | # If true, `todo` and `todoList` produce output, else they produce nothing.
77 | todo_include_todos = False
78 |
79 |
80 | # -- Options for HTML output ----------------------------------------------
81 |
82 | # The theme to use for HTML and HTML Help pages. See the documentation for
83 | # a list of builtin themes.
84 | #
85 | html_theme = 'nature'
86 |
87 | # Theme options are theme-specific and customize the look and feel of a theme
88 | # further. For a list of options available for each theme, see the
89 | # documentation.
90 | #
91 | # html_theme_options = {}
92 |
93 | # Add any paths that contain custom static files (such as style sheets) here,
94 | # relative to this directory. They are copied after the builtin static files,
95 | # so a file named "default.css" will overwrite the builtin "default.css".
96 | html_static_path = ['_static']
97 |
98 |
99 | # -- Options for HTMLHelp output ------------------------------------------
100 |
101 | # Output file base name for HTML help builder.
102 | htmlhelp_basename = 'ffmpeg-pythondoc'
103 |
104 |
105 | # -- Options for LaTeX output ---------------------------------------------
106 |
107 | latex_elements = {
108 | # The paper size ('letterpaper' or 'a4paper').
109 | #
110 | # 'papersize': 'letterpaper',
111 |
112 | # The font size ('10pt', '11pt' or '12pt').
113 | #
114 | # 'pointsize': '10pt',
115 |
116 | # Additional stuff for the LaTeX preamble.
117 | #
118 | # 'preamble': '',
119 |
120 | # Latex figure (float) alignment
121 | #
122 | # 'figure_align': 'htbp',
123 | }
124 |
125 | # Grouping the document tree into LaTeX files. List of tuples
126 | # (source start file, target name, title,
127 | # author, documentclass [howto, manual, or own class]).
128 | latex_documents = [
129 | (master_doc, 'ffmpeg-python.tex', u'ffmpeg-python Documentation',
130 | u'Karl Kroening', 'manual'),
131 | ]
132 |
133 |
134 | # -- Options for manual page output ---------------------------------------
135 |
136 | # One entry per manual page. List of tuples
137 | # (source start file, name, description, authors, manual section).
138 | man_pages = [
139 | (master_doc, 'ffmpeg-python', u'ffmpeg-python Documentation',
140 | [author], 1)
141 | ]
142 |
143 |
144 | # -- Options for Texinfo output -------------------------------------------
145 |
146 | # Grouping the document tree into Texinfo files. List of tuples
147 | # (source start file, target name, title, author,
148 | # dir menu entry, description, category)
149 | texinfo_documents = [
150 | (master_doc, 'ffmpeg-python', u'ffmpeg-python Documentation',
151 | author, 'ffmpeg-python', 'One line description of project.',
152 | 'Miscellaneous'),
153 | ]
154 |
155 |
156 |
157 |
--------------------------------------------------------------------------------
/doc/src/index.rst:
--------------------------------------------------------------------------------
1 | ffmpeg-python: Python bindings for FFmpeg
2 | =========================================
3 |
4 | :Github: https://github.com/kkroening/ffmpeg-python
5 |
6 | .. toctree::
7 | :maxdepth: 2
8 | :caption: Contents:
9 |
10 | .. automodule:: ffmpeg
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 |
16 | Indices and tables
17 | ==================
18 |
19 | * :ref:`genindex`
20 | * :ref:`modindex`
21 | * :ref:`search`
22 |
--------------------------------------------------------------------------------
/doc/update-gh-pages.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -ex
3 | CLONE_URL=$(git remote -v | grep origin | head -n1 | awk '{print $2}')
4 | if [ ! -d gh-pages ]; then
5 | git clone -b gh-pages ${CLONE_URL} gh-pages
6 | else
7 | (cd gh-pages && git pull origin gh-pages)
8 | fi
9 |
10 | cd gh-pages
11 | rm -rf *
12 | touch .nojekyll
13 | cp -r ../html/* .
14 | git add -A
15 | git commit -m 'Update docs'
16 | git push origin gh-pages
17 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | ## [Get video info (ffprobe)](https://github.com/kkroening/ffmpeg-python/blob/master/examples/video_info.py#L15)
4 |
5 | ```python
6 | probe = ffmpeg.probe(args.in_filename)
7 | video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
8 | width = int(video_stream['width'])
9 | height = int(video_stream['height'])
10 | ```
11 |
12 | ## [Generate thumbnail for video](https://github.com/kkroening/ffmpeg-python/blob/master/examples/get_video_thumbnail.py#L21)
13 |
14 |
15 |
16 | ```python
17 | (
18 | ffmpeg
19 | .input(in_filename, ss=time)
20 | .filter('scale', width, -1)
21 | .output(out_filename, vframes=1)
22 | .run()
23 | )
24 | ```
25 |
26 | ## [Convert video to numpy array](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb)
27 |
28 |
29 |
30 | ```python
31 | out, _ = (
32 | ffmpeg
33 | .input('in.mp4')
34 | .output('pipe:', format='rawvideo', pix_fmt='rgb24')
35 | .run(capture_stdout=True)
36 | )
37 | video = (
38 | np
39 | .frombuffer(out, np.uint8)
40 | .reshape([-1, height, width, 3])
41 | )
42 | ```
43 |
44 | ## [Read single video frame as jpeg through pipe](https://github.com/kkroening/ffmpeg-python/blob/master/examples/read_frame_as_jpeg.py#L16)
45 |
46 |
47 |
48 | ```python
49 | out, _ = (
50 | ffmpeg
51 | .input(in_filename)
52 | .filter('select', 'gte(n,{})'.format(frame_num))
53 | .output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
54 | .run(capture_stdout=True)
55 | )
56 | ```
57 |
58 | ## [Convert sound to raw PCM audio](https://github.com/kkroening/ffmpeg-python/blob/master/examples/transcribe.py#L23)
59 |
60 |
61 |
62 | ```python
63 | out, _ = (ffmpeg
64 | .input(in_filename, **input_kwargs)
65 | .output('-', format='s16le', acodec='pcm_s16le', ac=1, ar='16k')
66 | .overwrite_output()
67 | .run(capture_stdout=True)
68 | )
69 | ```
70 |
71 | ## Assemble video from sequence of frames
72 |
73 |
74 |
75 | ```python
76 | (
77 | ffmpeg
78 | .input('/path/to/jpegs/*.jpg', pattern_type='glob', framerate=25)
79 | .output('movie.mp4')
80 | .run()
81 | )
82 | ```
83 |
84 | With additional filtering:
85 |
86 |
87 |
88 | ```python
89 | (
90 | ffmpeg
91 | .input('/path/to/jpegs/*.jpg', pattern_type='glob', framerate=25)
92 | .filter('deflicker', mode='pm', size=10)
93 | .filter('scale', size='hd1080', force_original_aspect_ratio='increase')
94 | .output('movie.mp4', crf=20, preset='slower', movflags='faststart', pix_fmt='yuv420p')
95 | .view(filename='filter_graph')
96 | .run()
97 | )
98 | ```
99 |
100 | ## Audio/video pipeline
101 |
102 |
103 |
104 | ```python
105 | in1 = ffmpeg.input('in1.mp4')
106 | in2 = ffmpeg.input('in2.mp4')
107 | v1 = in1.video.hflip()
108 | a1 = in1.audio
109 | v2 = in2.video.filter('reverse').filter('hue', s=0)
110 | a2 = in2.audio.filter('areverse').filter('aphaser')
111 | joined = ffmpeg.concat(v1, a1, v2, a2, v=1, a=1).node
112 | v3 = joined[0]
113 | a3 = joined[1].filter('volume', 0.8)
114 | out = ffmpeg.output(v3, a3, 'out.mp4')
115 | out.run()
116 | ```
117 |
118 | ## Mono to stereo with offsets and video
119 |
120 |
121 |
122 | ```python
123 | audio_left = (
124 | ffmpeg
125 | .input('audio-left.wav')
126 | .filter('atrim', start=5)
127 | .filter('asetpts', 'PTS-STARTPTS')
128 | )
129 |
130 | audio_right = (
131 | ffmpeg
132 | .input('audio-right.wav')
133 | .filter('atrim', start=10)
134 | .filter('asetpts', 'PTS-STARTPTS')
135 | )
136 |
137 | input_video = ffmpeg.input('input-video.mp4')
138 |
139 | (
140 | ffmpeg
141 | .filter((audio_left, audio_right), 'join', inputs=2, channel_layout='stereo')
142 | .output(input_video.video, 'output-video.mp4', shortest=None, vcodec='copy')
143 | .overwrite_output()
144 | .run()
145 | )
146 | ```
147 |
148 | ## [Jupyter Frame Viewer](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb)
149 |
150 |
151 |
152 | ## [Jupyter Stream Editor](https://github.com/kkroening/ffmpeg-python/blob/master/examples/ffmpeg-numpy.ipynb)
153 |
154 |
155 |
156 | ## [Tensorflow Streaming](https://github.com/kkroening/ffmpeg-python/blob/master/examples/tensorflow_stream.py)
157 |
158 |
159 |
160 | - Decode input video with ffmpeg
161 | - Process video with tensorflow using "deep dream" example
162 | - Encode output video with ffmpeg
163 |
164 | ```python
165 | process1 = (
166 | ffmpeg
167 | .input(in_filename)
168 | .output('pipe:', format='rawvideo', pix_fmt='rgb24', vframes=8)
169 | .run_async(pipe_stdout=True)
170 | )
171 |
172 | process2 = (
173 | ffmpeg
174 | .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
175 | .output(out_filename, pix_fmt='yuv420p')
176 | .overwrite_output()
177 | .run_async(pipe_stdin=True)
178 | )
179 |
180 | while True:
181 | in_bytes = process1.stdout.read(width * height * 3)
182 | if not in_bytes:
183 | break
184 | in_frame = (
185 | np
186 | .frombuffer(in_bytes, np.uint8)
187 | .reshape([height, width, 3])
188 | )
189 |
190 | # See examples/tensorflow_stream.py:
191 | out_frame = deep_dream.process_frame(in_frame)
192 |
193 | process2.stdin.write(
194 | out_frame
195 | .astype(np.uint8)
196 | .tobytes()
197 | )
198 |
199 | process2.stdin.close()
200 | process1.wait()
201 | process2.wait()
202 | ```
203 |
204 |
205 |
206 | ## [FaceTime webcam input (OS X)](https://github.com/kkroening/ffmpeg-python/blob/master/examples/facetime.py)
207 |
208 | ```python
209 | (
210 | ffmpeg
211 | .input('FaceTime', format='avfoundation', pix_fmt='uyvy422', framerate=30)
212 | .output('out.mp4', pix_fmt='yuv420p', vframes=100)
213 | .run()
214 | )
215 | ```
216 |
217 | ## Stream from a local video to HTTP server
218 |
219 | ```python
220 | video_format = "flv"
221 | server_url = "http://127.0.0.1:8080"
222 |
223 | process = (
224 | ffmpeg
225 | .input("input.mp4")
226 | .output(
227 | server_url,
228 | codec = "copy", # use same codecs of the original video
229 | listen=1, # enables HTTP server
230 | f=video_format)
231 | .global_args("-re") # argument to act as a live stream
232 | .run()
233 | )
234 |
235 | ```
236 |
237 | to receive the video you can use ffplay in the terminal:
238 |
239 | ```
240 | $ ffplay -f flv http://localhost:8080
241 | ```
242 |
243 | ## Stream from RTSP server to TCP socket
244 |
245 | ```python
246 | packet_size = 4096
247 |
248 | process = (
249 | ffmpeg
250 | .input('rtsp://%s:8554/default')
251 | .output('-', format='h264')
252 | .run_async(pipe_stdout=True)
253 | )
254 |
255 | while process.poll() is None:
256 | packet = process.stdout.read(packet_size)
257 | try:
258 | tcp_socket.send(packet)
259 | except socket.error:
260 | process.stdout.close()
261 | process.wait()
262 | break
263 | ```
264 |
--------------------------------------------------------------------------------
/examples/facetime.py:
--------------------------------------------------------------------------------
1 | import ffmpeg
2 |
3 | (
4 | ffmpeg
5 | .input('FaceTime', format='avfoundation', pix_fmt='uyvy422', framerate=30)
6 | .output('out.mp4', pix_fmt='yuv420p', vframes=100)
7 | .run()
8 | )
9 |
--------------------------------------------------------------------------------
/examples/ffmpeg-numpy.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from ipywidgets import interact\n",
10 | "from matplotlib import pyplot as plt\n",
11 | "import ffmpeg\n",
12 | "import ipywidgets as widgets\n",
13 | "import numpy as np"
14 | ]
15 | },
16 | {
17 | "cell_type": "code",
18 | "execution_count": 2,
19 | "metadata": {},
20 | "outputs": [],
21 | "source": [
22 | "probe = ffmpeg.probe('in.mp4')\n",
23 | "video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')\n",
24 | "width = int(video_info['width'])\n",
25 | "height = int(video_info['height'])\n",
26 | "num_frames = int(video_info['nb_frames'])"
27 | ]
28 | },
29 | {
30 | "cell_type": "code",
31 | "execution_count": 3,
32 | "metadata": {},
33 | "outputs": [
34 | {
35 | "data": {
36 | "application/vnd.jupyter.widget-view+json": {
37 | "model_id": "5f63dc164956464c994ec58d86ee7cd9",
38 | "version_major": 2,
39 | "version_minor": 0
40 | },
41 | "text/plain": [
42 | "interactive(children=(IntSlider(value=0, description='frame', max=209), Output()), _dom_classes=('widget-inter…"
43 | ]
44 | },
45 | "metadata": {},
46 | "output_type": "display_data"
47 | }
48 | ],
49 | "source": [
50 | "out, err = (\n",
51 | " ffmpeg\n",
52 | " .input('in.mp4')\n",
53 | " .output('pipe:', format='rawvideo', pix_fmt='rgb24')\n",
54 | " .run(capture_stdout=True)\n",
55 | ")\n",
56 | "video = (\n",
57 | " np\n",
58 | " .frombuffer(out, np.uint8)\n",
59 | " .reshape([-1, height, width, 3])\n",
60 | ")\n",
61 | "\n",
62 | "@interact(frame=(0, num_frames))\n",
63 | "def show_frame(frame=0):\n",
64 | " plt.imshow(video[frame,:,:,:])"
65 | ]
66 | },
67 | {
68 | "cell_type": "code",
69 | "execution_count": 4,
70 | "metadata": {},
71 | "outputs": [
72 | {
73 | "data": {
74 | "application/vnd.jupyter.widget-view+json": {
75 | "model_id": "84bcac52195f47f8854f09acd7666b84",
76 | "version_major": 2,
77 | "version_minor": 0
78 | },
79 | "text/plain": [
80 | "interactive(children=(Checkbox(value=True, description='enable_overlay'), Checkbox(value=True, description='en…"
81 | ]
82 | },
83 | "metadata": {},
84 | "output_type": "display_data"
85 | }
86 | ],
87 | "source": [
88 | "from io import BytesIO\n",
89 | "from PIL import Image\n",
90 | "\n",
91 | "\n",
92 | "def extract_frame(stream, frame_num):\n",
93 | " while isinstance(stream, ffmpeg.nodes.OutputStream):\n",
94 | " stream = stream.node.incoming_edges[0].upstream_node.stream()\n",
95 | " out, _ = (\n",
96 | " stream\n",
97 | " .filter_('select', 'gte(n,{})'.format(frame_num))\n",
98 | " .output('pipe:', format='rawvideo', pix_fmt='rgb24', vframes=1)\n",
99 | " .run(capture_stdout=True, capture_stderr=True)\n",
100 | " )\n",
101 | " return np.frombuffer(out, np.uint8).reshape([height, width, 3])\n",
102 | "\n",
103 | "\n",
104 | "def png_to_np(png_bytes):\n",
105 | " buffer = BytesIO(png_bytes)\n",
106 | " pil_image = Image.open(buffer)\n",
107 | " return np.array(pil_image)\n",
108 | " \n",
109 | "\n",
110 | "def build_graph(\n",
111 | " enable_overlay, flip_overlay, enable_box, box_x, box_y,\n",
112 | " thickness, color):\n",
113 | "\n",
114 | " stream = ffmpeg.input('in.mp4')\n",
115 | "\n",
116 | " if enable_overlay:\n",
117 | " overlay = ffmpeg.input('overlay.png')\n",
118 | " if flip_overlay:\n",
119 | " overlay = overlay.hflip()\n",
120 | " stream = stream.overlay(overlay)\n",
121 | "\n",
122 | " if enable_box:\n",
123 | " stream = stream.drawbox(\n",
124 | " box_x, box_y, 120, 120, color=color, t=thickness)\n",
125 | "\n",
126 | " return stream.output('out.mp4')\n",
127 | "\n",
128 | "\n",
129 | "def show_image(ax, stream, frame_num):\n",
130 | " try:\n",
131 | " image = extract_frame(stream, frame_num)\n",
132 | " ax.imshow(image)\n",
133 | " ax.axis('off')\n",
134 | " except ffmpeg.Error as e:\n",
135 | " print(e.stderr.decode())\n",
136 | "\n",
137 | "\n",
138 | "def show_graph(ax, stream, detail):\n",
139 | " data = ffmpeg.view(stream, detail=detail, pipe=True)\n",
140 | " image = png_to_np(data)\n",
141 | " ax.imshow(image, aspect='equal', interpolation='hanning')\n",
142 | " ax.set_xlim(0, 1100)\n",
143 | " ax.axis('off')\n",
144 | "\n",
145 | "\n",
146 | "@interact(\n",
147 | " frame_num=(0, num_frames),\n",
148 | " box_x=(0, 200),\n",
149 | " box_y=(0, 200),\n",
150 | " thickness=(1, 40),\n",
151 | " color=['red', 'green', 'magenta', 'blue'],\n",
152 | ")\n",
153 | "def f(\n",
154 | " enable_overlay=True,\n",
155 | " enable_box=True,\n",
156 | " flip_overlay=True,\n",
157 | " graph_detail=False,\n",
158 | " frame_num=0,\n",
159 | " box_x=50,\n",
160 | " box_y=50,\n",
161 | " thickness=5,\n",
162 | " color='red'):\n",
163 | "\n",
164 | " stream = build_graph(\n",
165 | " enable_overlay,\n",
166 | " flip_overlay,\n",
167 | " enable_box,\n",
168 | " box_x,\n",
169 | " box_y,\n",
170 | " thickness,\n",
171 | " color\n",
172 | " )\n",
173 | "\n",
174 | " fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(15,4))\n",
175 | " plt.tight_layout()\n",
176 | " show_image(ax0, stream, frame_num)\n",
177 | " show_graph(ax1, stream, graph_detail)"
178 | ]
179 | },
180 | {
181 | "cell_type": "code",
182 | "execution_count": null,
183 | "metadata": {},
184 | "outputs": [],
185 | "source": []
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": null,
190 | "metadata": {},
191 | "outputs": [],
192 | "source": []
193 | }
194 | ],
195 | "metadata": {
196 | "kernelspec": {
197 | "display_name": "Python 3",
198 | "language": "python",
199 | "name": "python3"
200 | },
201 | "language_info": {
202 | "codemirror_mode": {
203 | "name": "ipython",
204 | "version": 3
205 | },
206 | "file_extension": ".py",
207 | "mimetype": "text/x-python",
208 | "name": "python",
209 | "nbconvert_exporter": "python",
210 | "pygments_lexer": "ipython3",
211 | "version": "3.6.4"
212 | }
213 | },
214 | "nbformat": 4,
215 | "nbformat_minor": 2
216 | }
217 |
--------------------------------------------------------------------------------
/examples/get_video_thumbnail.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from __future__ import unicode_literals, print_function
3 | import argparse
4 | import ffmpeg
5 | import sys
6 |
7 |
8 | parser = argparse.ArgumentParser(description='Generate video thumbnail')
9 | parser.add_argument('in_filename', help='Input filename')
10 | parser.add_argument('out_filename', help='Output filename')
11 | parser.add_argument(
12 | '--time', type=int, default=0.1, help='Time offset')
13 | parser.add_argument(
14 | '--width', type=int, default=120,
15 | help='Width of output thumbnail (height automatically determined by aspect ratio)')
16 |
17 |
18 | def generate_thumbnail(in_filename, out_filename, time, width):
19 | try:
20 | (
21 | ffmpeg
22 | .input(in_filename, ss=time)
23 | .filter('scale', width, -1)
24 | .output(out_filename, vframes=1)
25 | .overwrite_output()
26 | .run(capture_stdout=True, capture_stderr=True)
27 | )
28 | except ffmpeg.Error as e:
29 | print(e.stderr.decode(), file=sys.stderr)
30 | sys.exit(1)
31 |
32 |
33 | if __name__ == '__main__':
34 | args = parser.parse_args()
35 | generate_thumbnail(args.in_filename, args.out_filename, args.time, args.width)
36 |
--------------------------------------------------------------------------------
/examples/graphs/av-pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/graphs/av-pipeline.png
--------------------------------------------------------------------------------
/examples/graphs/dream.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/graphs/dream.png
--------------------------------------------------------------------------------
/examples/graphs/ffmpeg-numpy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/graphs/ffmpeg-numpy.png
--------------------------------------------------------------------------------
/examples/graphs/get_video_thumbnail.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/graphs/get_video_thumbnail.png
--------------------------------------------------------------------------------
/examples/graphs/glob-filter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/graphs/glob-filter.png
--------------------------------------------------------------------------------
/examples/graphs/glob.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/graphs/glob.png
--------------------------------------------------------------------------------
/examples/graphs/mono-to-stereo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/graphs/mono-to-stereo.png
--------------------------------------------------------------------------------
/examples/graphs/read_frame_as_jpeg.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/graphs/read_frame_as_jpeg.png
--------------------------------------------------------------------------------
/examples/graphs/tensorflow-stream.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/graphs/tensorflow-stream.png
--------------------------------------------------------------------------------
/examples/graphs/transcribe.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/graphs/transcribe.png
--------------------------------------------------------------------------------
/examples/in.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/in.mp4
--------------------------------------------------------------------------------
/examples/overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/examples/overlay.png
--------------------------------------------------------------------------------
/examples/read_frame_as_jpeg.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from __future__ import unicode_literals
3 | import argparse
4 | import ffmpeg
5 | import sys
6 |
7 |
8 | parser = argparse.ArgumentParser(
9 | description='Read individual video frame into memory as jpeg and write to stdout')
10 | parser.add_argument('in_filename', help='Input filename')
11 | parser.add_argument('frame_num', help='Frame number')
12 |
13 |
14 | def read_frame_as_jpeg(in_filename, frame_num):
15 | out, err = (
16 | ffmpeg
17 | .input(in_filename)
18 | .filter('select', 'gte(n,{})'.format(frame_num))
19 | .output('pipe:', vframes=1, format='image2', vcodec='mjpeg')
20 | .run(capture_stdout=True)
21 | )
22 | return out
23 |
24 |
25 | if __name__ == '__main__':
26 | args = parser.parse_args()
27 | out = read_frame_as_jpeg(args.in_filename, args.frame_num)
28 | sys.stdout.buffer.write(out)
29 |
--------------------------------------------------------------------------------
/examples/requirements.txt:
--------------------------------------------------------------------------------
1 | ffmpeg-python
2 | gevent
3 | google-cloud-speech
4 | graphviz
5 | ipywidgets
6 | jupyter
7 | matplotlib
8 | Pillow
9 | tqdm
10 |
--------------------------------------------------------------------------------
/examples/show_progress.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from __future__ import unicode_literals, print_function
3 | from tqdm import tqdm
4 | import argparse
5 | import contextlib
6 | import ffmpeg
7 | import gevent
8 | import gevent.monkey; gevent.monkey.patch_all(thread=False)
9 | import os
10 | import shutil
11 | import socket
12 | import sys
13 | import tempfile
14 | import textwrap
15 |
16 |
17 | parser = argparse.ArgumentParser(description=textwrap.dedent('''\
18 | Process video and report and show progress bar.
19 |
20 | This is an example of using the ffmpeg `-progress` option with a
21 | unix-domain socket to report progress in the form of a progress
22 | bar.
23 |
24 | The video processing simply consists of converting the video to
25 | sepia colors, but the same pattern can be applied to other use
26 | cases.
27 | '''))
28 |
29 | parser.add_argument('in_filename', help='Input filename')
30 | parser.add_argument('out_filename', help='Output filename')
31 |
32 |
33 | @contextlib.contextmanager
34 | def _tmpdir_scope():
35 | tmpdir = tempfile.mkdtemp()
36 | try:
37 | yield tmpdir
38 | finally:
39 | shutil.rmtree(tmpdir)
40 |
41 |
42 | def _do_watch_progress(filename, sock, handler):
43 | """Function to run in a separate gevent greenlet to read progress
44 | events from a unix-domain socket."""
45 | connection, client_address = sock.accept()
46 | data = b''
47 | try:
48 | while True:
49 | more_data = connection.recv(16)
50 | if not more_data:
51 | break
52 | data += more_data
53 | lines = data.split(b'\n')
54 | for line in lines[:-1]:
55 | line = line.decode()
56 | parts = line.split('=')
57 | key = parts[0] if len(parts) > 0 else None
58 | value = parts[1] if len(parts) > 1 else None
59 | handler(key, value)
60 | data = lines[-1]
61 | finally:
62 | connection.close()
63 |
64 |
65 | @contextlib.contextmanager
66 | def _watch_progress(handler):
67 | """Context manager for creating a unix-domain socket and listen for
68 | ffmpeg progress events.
69 |
70 | The socket filename is yielded from the context manager and the
71 | socket is closed when the context manager is exited.
72 |
73 | Args:
74 | handler: a function to be called when progress events are
75 | received; receives a ``key`` argument and ``value``
76 | argument. (The example ``show_progress`` below uses tqdm)
77 |
78 | Yields:
79 | socket_filename: the name of the socket file.
80 | """
81 | with _tmpdir_scope() as tmpdir:
82 | socket_filename = os.path.join(tmpdir, 'sock')
83 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
84 | with contextlib.closing(sock):
85 | sock.bind(socket_filename)
86 | sock.listen(1)
87 | child = gevent.spawn(_do_watch_progress, socket_filename, sock, handler)
88 | try:
89 | yield socket_filename
90 | except:
91 | gevent.kill(child)
92 | raise
93 |
94 |
95 |
96 | @contextlib.contextmanager
97 | def show_progress(total_duration):
98 | """Create a unix-domain socket to watch progress and render tqdm
99 | progress bar."""
100 | with tqdm(total=round(total_duration, 2)) as bar:
101 | def handler(key, value):
102 | if key == 'out_time_ms':
103 | time = round(float(value) / 1000000., 2)
104 | bar.update(time - bar.n)
105 | elif key == 'progress' and value == 'end':
106 | bar.update(bar.total - bar.n)
107 | with _watch_progress(handler) as socket_filename:
108 | yield socket_filename
109 |
110 |
111 | if __name__ == '__main__':
112 | args = parser.parse_args()
113 | total_duration = float(ffmpeg.probe(args.in_filename)['format']['duration'])
114 |
115 | with show_progress(total_duration) as socket_filename:
116 | # See https://ffmpeg.org/ffmpeg-filters.html#Examples-44
117 | sepia_values = [.393, .769, .189, 0, .349, .686, .168, 0, .272, .534, .131]
118 | try:
119 | (ffmpeg
120 | .input(args.in_filename)
121 | .colorchannelmixer(*sepia_values)
122 | .output(args.out_filename)
123 | .global_args('-progress', 'unix://{}'.format(socket_filename))
124 | .overwrite_output()
125 | .run(capture_stdout=True, capture_stderr=True)
126 | )
127 | except ffmpeg.Error as e:
128 | print(e.stderr, file=sys.stderr)
129 | sys.exit(1)
130 |
131 |
--------------------------------------------------------------------------------
/examples/split_silence.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from __future__ import unicode_literals
3 |
4 | import argparse
5 | import errno
6 | import ffmpeg
7 | import logging
8 | import os
9 | import re
10 | import subprocess
11 | import sys
12 |
13 |
14 | logging.basicConfig(level=logging.INFO, format='%(message)s')
15 | logger = logging.getLogger(__file__)
16 | logger.setLevel(logging.INFO)
17 |
18 | DEFAULT_DURATION = 0.3
19 | DEFAULT_THRESHOLD = -60
20 |
21 | parser = argparse.ArgumentParser(description='Split media into separate chunks wherever silence occurs')
22 | parser.add_argument('in_filename', help='Input filename (`-` for stdin)')
23 | parser.add_argument('out_pattern', help='Output filename pattern (e.g. `out/chunk_{:04d}.wav`)')
24 | parser.add_argument('--silence-threshold', default=DEFAULT_THRESHOLD, type=int, help='Silence threshold (in dB)')
25 | parser.add_argument('--silence-duration', default=DEFAULT_DURATION, type=float, help='Silence duration')
26 | parser.add_argument('--start-time', type=float, help='Start time (seconds)')
27 | parser.add_argument('--end-time', type=float, help='End time (seconds)')
28 | parser.add_argument('-v', dest='verbose', action='store_true', help='Verbose mode')
29 |
30 | silence_start_re = re.compile(r' silence_start: (?P[0-9]+(\.?[0-9]*))$')
31 | silence_end_re = re.compile(r' silence_end: (?P[0-9]+(\.?[0-9]*)) ')
32 | total_duration_re = re.compile(
33 | r'size=[^ ]+ time=(?P[0-9]{2}):(?P[0-9]{2}):(?P[0-9\.]{5}) bitrate=')
34 |
35 |
36 | def _logged_popen(cmd_line, *args, **kwargs):
37 | logger.debug('Running command: {}'.format(subprocess.list2cmdline(cmd_line)))
38 | return subprocess.Popen(cmd_line, *args, **kwargs)
39 |
40 |
41 | def get_chunk_times(in_filename, silence_threshold, silence_duration, start_time=None, end_time=None):
42 | input_kwargs = {}
43 | if start_time is not None:
44 | input_kwargs['ss'] = start_time
45 | else:
46 | start_time = 0.
47 | if end_time is not None:
48 | input_kwargs['t'] = end_time - start_time
49 |
50 | p = _logged_popen(
51 | (ffmpeg
52 | .input(in_filename, **input_kwargs)
53 | .filter('silencedetect', n='{}dB'.format(silence_threshold), d=silence_duration)
54 | .output('-', format='null')
55 | .compile()
56 | ) + ['-nostats'], # FIXME: use .nostats() once it's implemented in ffmpeg-python.
57 | stderr=subprocess.PIPE
58 | )
59 | output = p.communicate()[1].decode('utf-8')
60 | if p.returncode != 0:
61 | sys.stderr.write(output)
62 | sys.exit(1)
63 | logger.debug(output)
64 | lines = output.splitlines()
65 |
66 | # Chunks start when silence ends, and chunks end when silence starts.
67 | chunk_starts = []
68 | chunk_ends = []
69 | for line in lines:
70 | silence_start_match = silence_start_re.search(line)
71 | silence_end_match = silence_end_re.search(line)
72 | total_duration_match = total_duration_re.search(line)
73 | if silence_start_match:
74 | chunk_ends.append(float(silence_start_match.group('start')))
75 | if len(chunk_starts) == 0:
76 | # Started with non-silence.
77 | chunk_starts.append(start_time or 0.)
78 | elif silence_end_match:
79 | chunk_starts.append(float(silence_end_match.group('end')))
80 | elif total_duration_match:
81 | hours = int(total_duration_match.group('hours'))
82 | minutes = int(total_duration_match.group('minutes'))
83 | seconds = float(total_duration_match.group('seconds'))
84 | end_time = hours * 3600 + minutes * 60 + seconds
85 |
86 | if len(chunk_starts) == 0:
87 | # No silence found.
88 | chunk_starts.append(start_time)
89 |
90 | if len(chunk_starts) > len(chunk_ends):
91 | # Finished with non-silence.
92 | chunk_ends.append(end_time or 10000000.)
93 |
94 | return list(zip(chunk_starts, chunk_ends))
95 |
96 |
97 | def _makedirs(path):
98 | """Python2-compatible version of ``os.makedirs(path, exist_ok=True)``."""
99 | try:
100 | os.makedirs(path)
101 | except OSError as exc:
102 | if exc.errno != errno.EEXIST or not os.path.isdir(path):
103 | raise
104 |
105 |
106 | def split_audio(
107 | in_filename,
108 | out_pattern,
109 | silence_threshold=DEFAULT_THRESHOLD,
110 | silence_duration=DEFAULT_DURATION,
111 | start_time=None,
112 | end_time=None,
113 | verbose=False,
114 | ):
115 | chunk_times = get_chunk_times(in_filename, silence_threshold, silence_duration, start_time, end_time)
116 |
117 | for i, (start_time, end_time) in enumerate(chunk_times):
118 | time = end_time - start_time
119 | out_filename = out_pattern.format(i, i=i)
120 | _makedirs(os.path.dirname(out_filename))
121 |
122 | logger.info('{}: start={:.02f}, end={:.02f}, duration={:.02f}'.format(out_filename, start_time, end_time,
123 | time))
124 | _logged_popen(
125 | (ffmpeg
126 | .input(in_filename, ss=start_time, t=time)
127 | .output(out_filename)
128 | .overwrite_output()
129 | .compile()
130 | ),
131 | stdout=subprocess.PIPE if not verbose else None,
132 | stderr=subprocess.PIPE if not verbose else None,
133 | ).communicate()
134 |
135 |
136 | if __name__ == '__main__':
137 | kwargs = vars(parser.parse_args())
138 | if kwargs['verbose']:
139 | logging.basicConfig(level=logging.DEBUG, format='%(levels): %(message)s')
140 | logger.setLevel(logging.DEBUG)
141 | split_audio(**kwargs)
142 |
--------------------------------------------------------------------------------
/examples/tensorflow_stream.py:
--------------------------------------------------------------------------------
1 | '''Example streaming ffmpeg numpy processing.
2 |
3 | Demonstrates using ffmpeg to decode video input, process the frames in
4 | python, and then encode video output using ffmpeg.
5 |
6 | This example uses two ffmpeg processes - one to decode the input video
7 | and one to encode an output video - while the raw frame processing is
8 | done in python with numpy.
9 |
10 | At a high level, the signal graph looks like this:
11 |
12 | (input video) -> [ffmpeg process 1] -> [python] -> [ffmpeg process 2] -> (output video)
13 |
14 | This example reads/writes video files on the local filesystem, but the
15 | same pattern can be used for other kinds of input/output (e.g. webcam,
16 | rtmp, etc.).
17 |
18 | The simplest processing example simply darkens each frame by
19 | multiplying the frame's numpy array by a constant value; see
20 | ``process_frame_simple``.
21 |
22 | A more sophisticated example processes each frame with tensorflow using
23 | the "deep dream" tensorflow tutorial; activate this mode by calling
24 | the script with the optional `--dream` argument. (Make sure tensorflow
25 | is installed before running)
26 | '''
27 | from __future__ import print_function
28 | import argparse
29 | import ffmpeg
30 | import logging
31 | import numpy as np
32 | import os
33 | import subprocess
34 | import zipfile
35 |
36 |
37 | parser = argparse.ArgumentParser(description='Example streaming ffmpeg numpy processing')
38 | parser.add_argument('in_filename', help='Input filename')
39 | parser.add_argument('out_filename', help='Output filename')
40 | parser.add_argument(
41 | '--dream', action='store_true', help='Use DeepDream frame processing (requires tensorflow)')
42 |
43 | logger = logging.getLogger(__name__)
44 | logging.basicConfig(level=logging.INFO)
45 |
46 |
47 | def get_video_size(filename):
48 | logger.info('Getting video size for {!r}'.format(filename))
49 | probe = ffmpeg.probe(filename)
50 | video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')
51 | width = int(video_info['width'])
52 | height = int(video_info['height'])
53 | return width, height
54 |
55 |
56 | def start_ffmpeg_process1(in_filename):
57 | logger.info('Starting ffmpeg process1')
58 | args = (
59 | ffmpeg
60 | .input(in_filename)
61 | .output('pipe:', format='rawvideo', pix_fmt='rgb24')
62 | .compile()
63 | )
64 | return subprocess.Popen(args, stdout=subprocess.PIPE)
65 |
66 |
67 | def start_ffmpeg_process2(out_filename, width, height):
68 | logger.info('Starting ffmpeg process2')
69 | args = (
70 | ffmpeg
71 | .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
72 | .output(out_filename, pix_fmt='yuv420p')
73 | .overwrite_output()
74 | .compile()
75 | )
76 | return subprocess.Popen(args, stdin=subprocess.PIPE)
77 |
78 |
79 | def read_frame(process1, width, height):
80 | logger.debug('Reading frame')
81 |
82 | # Note: RGB24 == 3 bytes per pixel.
83 | frame_size = width * height * 3
84 | in_bytes = process1.stdout.read(frame_size)
85 | if len(in_bytes) == 0:
86 | frame = None
87 | else:
88 | assert len(in_bytes) == frame_size
89 | frame = (
90 | np
91 | .frombuffer(in_bytes, np.uint8)
92 | .reshape([height, width, 3])
93 | )
94 | return frame
95 |
96 |
97 | def process_frame_simple(frame):
98 | '''Simple processing example: darken frame.'''
99 | return frame * 0.3
100 |
101 |
102 | def write_frame(process2, frame):
103 | logger.debug('Writing frame')
104 | process2.stdin.write(
105 | frame
106 | .astype(np.uint8)
107 | .tobytes()
108 | )
109 |
110 |
111 | def run(in_filename, out_filename, process_frame):
112 | width, height = get_video_size(in_filename)
113 | process1 = start_ffmpeg_process1(in_filename)
114 | process2 = start_ffmpeg_process2(out_filename, width, height)
115 | while True:
116 | in_frame = read_frame(process1, width, height)
117 | if in_frame is None:
118 | logger.info('End of input stream')
119 | break
120 |
121 | logger.debug('Processing frame')
122 | out_frame = process_frame(in_frame)
123 | write_frame(process2, out_frame)
124 |
125 | logger.info('Waiting for ffmpeg process1')
126 | process1.wait()
127 |
128 | logger.info('Waiting for ffmpeg process2')
129 | process2.stdin.close()
130 | process2.wait()
131 |
132 | logger.info('Done')
133 |
134 |
135 | class DeepDream(object):
136 | '''DeepDream implementation, adapted from official tensorflow deepdream tutorial:
137 | https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/tutorials/deepdream
138 |
139 | Credit: Alexander Mordvintsev
140 | '''
141 |
142 | _DOWNLOAD_URL = 'https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip'
143 | _ZIP_FILENAME = 'deepdream_model.zip'
144 | _MODEL_FILENAME = 'tensorflow_inception_graph.pb'
145 |
146 | @staticmethod
147 | def _download_model():
148 | logger.info('Downloading deepdream model...')
149 | try:
150 | from urllib.request import urlretrieve # python 3
151 | except ImportError:
152 | from urllib import urlretrieve # python 2
153 | urlretrieve(DeepDream._DOWNLOAD_URL, DeepDream._ZIP_FILENAME)
154 |
155 | logger.info('Extracting deepdream model...')
156 | zipfile.ZipFile(DeepDream._ZIP_FILENAME, 'r').extractall('.')
157 |
158 | @staticmethod
159 | def _tffunc(*argtypes):
160 | '''Helper that transforms TF-graph generating function into a regular one.
161 | See `_resize` function below.
162 | '''
163 | placeholders = list(map(tf.placeholder, argtypes))
164 | def wrap(f):
165 | out = f(*placeholders)
166 | def wrapper(*args, **kw):
167 | return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
168 | return wrapper
169 | return wrap
170 |
171 | @staticmethod
172 | def _base_resize(img, size):
173 | '''Helper function that uses TF to resize an image'''
174 | img = tf.expand_dims(img, 0)
175 | return tf.image.resize_bilinear(img, size)[0,:,:,:]
176 |
177 | def __init__(self):
178 | if not os.path.exists(DeepDream._MODEL_FILENAME):
179 | self._download_model()
180 |
181 | self._graph = tf.Graph()
182 | self._session = tf.InteractiveSession(graph=self._graph)
183 | self._resize = self._tffunc(np.float32, np.int32)(self._base_resize)
184 | with tf.gfile.FastGFile(DeepDream._MODEL_FILENAME, 'rb') as f:
185 | graph_def = tf.GraphDef()
186 | graph_def.ParseFromString(f.read())
187 | self._t_input = tf.placeholder(np.float32, name='input') # define the input tensor
188 | imagenet_mean = 117.0
189 | t_preprocessed = tf.expand_dims(self._t_input-imagenet_mean, 0)
190 | tf.import_graph_def(graph_def, {'input':t_preprocessed})
191 |
192 | self.t_obj = self.T('mixed4d_3x3_bottleneck_pre_relu')[:,:,:,139]
193 | #self.t_obj = tf.square(self.T('mixed4c'))
194 |
195 | def T(self, layer_name):
196 | '''Helper for getting layer output tensor'''
197 | return self._graph.get_tensor_by_name('import/%s:0'%layer_name)
198 |
199 | def _calc_grad_tiled(self, img, t_grad, tile_size=512):
200 | '''Compute the value of tensor t_grad over the image in a tiled way.
201 | Random shifts are applied to the image to blur tile boundaries over
202 | multiple iterations.'''
203 | sz = tile_size
204 | h, w = img.shape[:2]
205 | sx, sy = np.random.randint(sz, size=2)
206 | img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
207 | grad = np.zeros_like(img)
208 | for y in range(0, max(h-sz//2, sz),sz):
209 | for x in range(0, max(w-sz//2, sz),sz):
210 | sub = img_shift[y:y+sz,x:x+sz]
211 | g = self._session.run(t_grad, {self._t_input:sub})
212 | grad[y:y+sz,x:x+sz] = g
213 | return np.roll(np.roll(grad, -sx, 1), -sy, 0)
214 |
215 | def process_frame(self, frame, iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
216 | t_score = tf.reduce_mean(self.t_obj) # defining the optimization objective
217 | t_grad = tf.gradients(t_score, self._t_input)[0] # behold the power of automatic differentiation!
218 |
219 | # split the image into a number of octaves
220 | img = frame
221 | octaves = []
222 | for i in range(octave_n-1):
223 | hw = img.shape[:2]
224 | lo = self._resize(img, np.int32(np.float32(hw)/octave_scale))
225 | hi = img-self._resize(lo, hw)
226 | img = lo
227 | octaves.append(hi)
228 |
229 | # generate details octave by octave
230 | for octave in range(octave_n):
231 | if octave>0:
232 | hi = octaves[-octave]
233 | img = self._resize(img, hi.shape[:2])+hi
234 | for i in range(iter_n):
235 | g = self._calc_grad_tiled(img, t_grad)
236 | img += g*(step / (np.abs(g).mean()+1e-7))
237 | #print('.',end = ' ')
238 | return img
239 |
240 |
241 | if __name__ == '__main__':
242 | args = parser.parse_args()
243 | if args.dream:
244 | import tensorflow as tf
245 | process_frame = DeepDream().process_frame
246 | else:
247 | process_frame = process_frame_simple
248 | run(args.in_filename, args.out_filename, process_frame)
249 |
--------------------------------------------------------------------------------
/examples/transcribe.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from __future__ import unicode_literals, print_function
3 | from google.cloud import speech
4 | from google.cloud.speech import enums
5 | from google.cloud.speech import types
6 | import argparse
7 | import ffmpeg
8 | import logging
9 | import sys
10 |
11 |
12 | logging.basicConfig(level=logging.INFO, format='%(message)s')
13 | logger = logging.getLogger(__file__)
14 | logger.setLevel(logging.INFO)
15 |
16 |
17 | parser = argparse.ArgumentParser(description='Convert speech audio to text using Google Speech API')
18 | parser.add_argument('in_filename', help='Input filename (`-` for stdin)')
19 |
20 |
21 | def decode_audio(in_filename, **input_kwargs):
22 | try:
23 | out, err = (ffmpeg
24 | .input(in_filename, **input_kwargs)
25 | .output('-', format='s16le', acodec='pcm_s16le', ac=1, ar='16k')
26 | .overwrite_output()
27 | .run(capture_stdout=True, capture_stderr=True)
28 | )
29 | except ffmpeg.Error as e:
30 | print(e.stderr, file=sys.stderr)
31 | sys.exit(1)
32 | return out
33 |
34 |
35 | def get_transcripts(audio_data):
36 | client = speech.SpeechClient()
37 | audio = types.RecognitionAudio(content=audio_data)
38 | config = types.RecognitionConfig(
39 | encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
40 | sample_rate_hertz=16000,
41 | language_code='en-US'
42 | )
43 | response = client.recognize(config, audio)
44 | return [result.alternatives[0].transcript for result in response.results]
45 |
46 |
47 | def transcribe(in_filename):
48 | audio_data = decode_audio(in_filename)
49 | transcripts = get_transcripts(audio_data)
50 | for transcript in transcripts:
51 | print(repr(transcript.encode('utf-8')))
52 |
53 |
54 | if __name__ == '__main__':
55 | args = parser.parse_args()
56 | transcribe(args.in_filename)
57 |
--------------------------------------------------------------------------------
/examples/video_info.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | from __future__ import unicode_literals, print_function
3 | import argparse
4 | import ffmpeg
5 | import sys
6 |
7 |
8 | parser = argparse.ArgumentParser(description='Get video information')
9 | parser.add_argument('in_filename', help='Input filename')
10 |
11 |
12 | if __name__ == '__main__':
13 | args = parser.parse_args()
14 |
15 | try:
16 | probe = ffmpeg.probe(args.in_filename)
17 | except ffmpeg.Error as e:
18 | print(e.stderr, file=sys.stderr)
19 | sys.exit(1)
20 |
21 | video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
22 | if video_stream is None:
23 | print('No video stream found', file=sys.stderr)
24 | sys.exit(1)
25 |
26 | width = int(video_stream['width'])
27 | height = int(video_stream['height'])
28 | num_frames = int(video_stream['nb_frames'])
29 | print('width: {}'.format(width))
30 | print('height: {}'.format(height))
31 | print('num_frames: {}'.format(num_frames))
32 |
--------------------------------------------------------------------------------
/ffmpeg/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import unicode_literals
2 | from . import nodes
3 | from . import _ffmpeg
4 | from . import _filters
5 | from . import _probe
6 | from . import _run
7 | from . import _view
8 | from .nodes import *
9 | from ._ffmpeg import *
10 | from ._filters import *
11 | from ._probe import *
12 | from ._run import *
13 | from ._view import *
14 |
15 | __all__ = (
16 | nodes.__all__
17 | + _ffmpeg.__all__
18 | + _probe.__all__
19 | + _run.__all__
20 | + _view.__all__
21 | + _filters.__all__
22 | )
23 |
--------------------------------------------------------------------------------
/ffmpeg/_ffmpeg.py:
--------------------------------------------------------------------------------
1 | from __future__ import unicode_literals
2 |
3 | from past.builtins import basestring
4 | from ._utils import basestring
5 |
6 | from .nodes import (
7 | filter_operator,
8 | GlobalNode,
9 | InputNode,
10 | MergeOutputsNode,
11 | OutputNode,
12 | output_operator,
13 | )
14 |
15 |
16 | def input(filename, **kwargs):
17 | """Input file URL (ffmpeg ``-i`` option)
18 |
19 | Any supplied kwargs are passed to ffmpeg verbatim (e.g. ``t=20``,
20 | ``f='mp4'``, ``acodec='pcm'``, etc.).
21 |
22 | To tell ffmpeg to read from stdin, use ``pipe:`` as the filename.
23 |
24 | Official documentation: `Main options `__
25 | """
26 | kwargs['filename'] = filename
27 | fmt = kwargs.pop('f', None)
28 | if fmt:
29 | if 'format' in kwargs:
30 | raise ValueError("Can't specify both `format` and `f` kwargs")
31 | kwargs['format'] = fmt
32 | return InputNode(input.__name__, kwargs=kwargs).stream()
33 |
34 |
35 | @output_operator()
36 | def global_args(stream, *args):
37 | """Add extra global command-line argument(s), e.g. ``-progress``."""
38 | return GlobalNode(stream, global_args.__name__, args).stream()
39 |
40 |
41 | @output_operator()
42 | def overwrite_output(stream):
43 | """Overwrite output files without asking (ffmpeg ``-y`` option)
44 |
45 | Official documentation: `Main options `__
46 | """
47 | return GlobalNode(stream, overwrite_output.__name__, ['-y']).stream()
48 |
49 |
50 | @output_operator()
51 | def merge_outputs(*streams):
52 | """Include all given outputs in one ffmpeg command line"""
53 | return MergeOutputsNode(streams, merge_outputs.__name__).stream()
54 |
55 |
56 | @filter_operator()
57 | def output(*streams_and_filename, **kwargs):
58 | """Output file URL
59 |
60 | Syntax:
61 | `ffmpeg.output(stream1[, stream2, stream3...], filename, **ffmpeg_args)`
62 |
63 | Any supplied keyword arguments are passed to ffmpeg verbatim (e.g.
64 | ``t=20``, ``f='mp4'``, ``acodec='pcm'``, ``vcodec='rawvideo'``,
65 | etc.). Some keyword-arguments are handled specially, as shown below.
66 |
67 | Args:
68 | video_bitrate: parameter for ``-b:v``, e.g. ``video_bitrate=1000``.
69 | audio_bitrate: parameter for ``-b:a``, e.g. ``audio_bitrate=200``.
70 | format: alias for ``-f`` parameter, e.g. ``format='mp4'``
71 | (equivalent to ``f='mp4'``).
72 |
73 | If multiple streams are provided, they are mapped to the same
74 | output.
75 |
76 | To tell ffmpeg to write to stdout, use ``pipe:`` as the filename.
77 |
78 | Official documentation: `Synopsis `__
79 | """
80 | streams_and_filename = list(streams_and_filename)
81 | if 'filename' not in kwargs:
82 | if not isinstance(streams_and_filename[-1], basestring):
83 | raise ValueError('A filename must be provided')
84 | kwargs['filename'] = streams_and_filename.pop(-1)
85 | streams = streams_and_filename
86 |
87 | fmt = kwargs.pop('f', None)
88 | if fmt:
89 | if 'format' in kwargs:
90 | raise ValueError("Can't specify both `format` and `f` kwargs")
91 | kwargs['format'] = fmt
92 | return OutputNode(streams, output.__name__, kwargs=kwargs).stream()
93 |
94 |
95 | __all__ = ['input', 'merge_outputs', 'output', 'overwrite_output']
96 |
--------------------------------------------------------------------------------
/ffmpeg/_probe.py:
--------------------------------------------------------------------------------
1 | import json
2 | import subprocess
3 | from ._run import Error
4 | from ._utils import convert_kwargs_to_cmd_line_args
5 |
6 |
7 | def probe(filename, cmd='ffprobe', timeout=None, **kwargs):
8 | """Run ffprobe on the specified file and return a JSON representation of the output.
9 |
10 | Raises:
11 | :class:`ffmpeg.Error`: if ffprobe returns a non-zero exit code,
12 | an :class:`Error` is returned with a generic error message.
13 | The stderr output can be retrieved by accessing the
14 | ``stderr`` property of the exception.
15 | """
16 | args = [cmd, '-show_format', '-show_streams', '-of', 'json']
17 | args += convert_kwargs_to_cmd_line_args(kwargs)
18 | args += [filename]
19 |
20 | p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
21 | communicate_kwargs = {}
22 | if timeout is not None:
23 | communicate_kwargs['timeout'] = timeout
24 | out, err = p.communicate(**communicate_kwargs)
25 | if p.returncode != 0:
26 | raise Error('ffprobe', out, err)
27 | return json.loads(out.decode('utf-8'))
28 |
29 |
30 | __all__ = ['probe']
31 |
--------------------------------------------------------------------------------
/ffmpeg/_run.py:
--------------------------------------------------------------------------------
1 | from __future__ import unicode_literals
2 | from .dag import get_outgoing_edges, topo_sort
3 | from ._utils import basestring, convert_kwargs_to_cmd_line_args
4 | from builtins import str
5 | from functools import reduce
6 | import copy
7 | import operator
8 | import subprocess
9 |
10 | from ._ffmpeg import input, output
11 | from .nodes import (
12 | get_stream_spec_nodes,
13 | FilterNode,
14 | GlobalNode,
15 | InputNode,
16 | OutputNode,
17 | output_operator,
18 | )
19 |
20 | try:
21 | from collections.abc import Iterable
22 | except ImportError:
23 | from collections import Iterable
24 |
25 |
26 | class Error(Exception):
27 | def __init__(self, cmd, stdout, stderr):
28 | super(Error, self).__init__(
29 | '{} error (see stderr output for detail)'.format(cmd)
30 | )
31 | self.stdout = stdout
32 | self.stderr = stderr
33 |
34 |
35 | def _get_input_args(input_node):
36 | if input_node.name == input.__name__:
37 | kwargs = copy.copy(input_node.kwargs)
38 | filename = kwargs.pop('filename')
39 | fmt = kwargs.pop('format', None)
40 | video_size = kwargs.pop('video_size', None)
41 | args = []
42 | if fmt:
43 | args += ['-f', fmt]
44 | if video_size:
45 | args += ['-video_size', '{}x{}'.format(video_size[0], video_size[1])]
46 | args += convert_kwargs_to_cmd_line_args(kwargs)
47 | args += ['-i', filename]
48 | else:
49 | raise ValueError('Unsupported input node: {}'.format(input_node))
50 | return args
51 |
52 |
53 | def _format_input_stream_name(stream_name_map, edge, is_final_arg=False):
54 | prefix = stream_name_map[edge.upstream_node, edge.upstream_label]
55 | if not edge.upstream_selector:
56 | suffix = ''
57 | else:
58 | suffix = ':{}'.format(edge.upstream_selector)
59 | if is_final_arg and isinstance(edge.upstream_node, InputNode):
60 | ## Special case: `-map` args should not have brackets for input
61 | ## nodes.
62 | fmt = '{}{}'
63 | else:
64 | fmt = '[{}{}]'
65 | return fmt.format(prefix, suffix)
66 |
67 |
68 | def _format_output_stream_name(stream_name_map, edge):
69 | return '[{}]'.format(stream_name_map[edge.upstream_node, edge.upstream_label])
70 |
71 |
72 | def _get_filter_spec(node, outgoing_edge_map, stream_name_map):
73 | incoming_edges = node.incoming_edges
74 | outgoing_edges = get_outgoing_edges(node, outgoing_edge_map)
75 | inputs = [
76 | _format_input_stream_name(stream_name_map, edge) for edge in incoming_edges
77 | ]
78 | outputs = [
79 | _format_output_stream_name(stream_name_map, edge) for edge in outgoing_edges
80 | ]
81 | filter_spec = '{}{}{}'.format(
82 | ''.join(inputs), node._get_filter(outgoing_edges), ''.join(outputs)
83 | )
84 | return filter_spec
85 |
86 |
87 | def _allocate_filter_stream_names(filter_nodes, outgoing_edge_maps, stream_name_map):
88 | stream_count = 0
89 | for upstream_node in filter_nodes:
90 | outgoing_edge_map = outgoing_edge_maps[upstream_node]
91 | for upstream_label, downstreams in sorted(outgoing_edge_map.items()):
92 | if len(downstreams) > 1:
93 | # TODO: automatically insert `splits` ahead of time via graph transformation.
94 | raise ValueError(
95 | 'Encountered {} with multiple outgoing edges with same upstream '
96 | 'label {!r}; a `split` filter is probably required'.format(
97 | upstream_node, upstream_label
98 | )
99 | )
100 | stream_name_map[upstream_node, upstream_label] = 's{}'.format(stream_count)
101 | stream_count += 1
102 |
103 |
104 | def _get_filter_arg(filter_nodes, outgoing_edge_maps, stream_name_map):
105 | _allocate_filter_stream_names(filter_nodes, outgoing_edge_maps, stream_name_map)
106 | filter_specs = [
107 | _get_filter_spec(node, outgoing_edge_maps[node], stream_name_map)
108 | for node in filter_nodes
109 | ]
110 | return ';'.join(filter_specs)
111 |
112 |
113 | def _get_global_args(node):
114 | return list(node.args)
115 |
116 |
117 | def _get_output_args(node, stream_name_map):
118 | if node.name != output.__name__:
119 | raise ValueError('Unsupported output node: {}'.format(node))
120 | args = []
121 |
122 | if len(node.incoming_edges) == 0:
123 | raise ValueError('Output node {} has no mapped streams'.format(node))
124 |
125 | for edge in node.incoming_edges:
126 | # edge = node.incoming_edges[0]
127 | stream_name = _format_input_stream_name(
128 | stream_name_map, edge, is_final_arg=True
129 | )
130 | if stream_name != '0' or len(node.incoming_edges) > 1:
131 | args += ['-map', stream_name]
132 |
133 | kwargs = copy.copy(node.kwargs)
134 | filename = kwargs.pop('filename')
135 | if 'format' in kwargs:
136 | args += ['-f', kwargs.pop('format')]
137 | if 'video_bitrate' in kwargs:
138 | args += ['-b:v', str(kwargs.pop('video_bitrate'))]
139 | if 'audio_bitrate' in kwargs:
140 | args += ['-b:a', str(kwargs.pop('audio_bitrate'))]
141 | if 'video_size' in kwargs:
142 | video_size = kwargs.pop('video_size')
143 | if not isinstance(video_size, basestring) and isinstance(video_size, Iterable):
144 | video_size = '{}x{}'.format(video_size[0], video_size[1])
145 | args += ['-video_size', video_size]
146 | args += convert_kwargs_to_cmd_line_args(kwargs)
147 | args += [filename]
148 | return args
149 |
150 |
151 | @output_operator()
152 | def get_args(stream_spec, overwrite_output=False):
153 | """Build command-line arguments to be passed to ffmpeg."""
154 | nodes = get_stream_spec_nodes(stream_spec)
155 | args = []
156 | # TODO: group nodes together, e.g. `-i somefile -r somerate`.
157 | sorted_nodes, outgoing_edge_maps = topo_sort(nodes)
158 | input_nodes = [node for node in sorted_nodes if isinstance(node, InputNode)]
159 | output_nodes = [node for node in sorted_nodes if isinstance(node, OutputNode)]
160 | global_nodes = [node for node in sorted_nodes if isinstance(node, GlobalNode)]
161 | filter_nodes = [node for node in sorted_nodes if isinstance(node, FilterNode)]
162 | stream_name_map = {(node, None): str(i) for i, node in enumerate(input_nodes)}
163 | filter_arg = _get_filter_arg(filter_nodes, outgoing_edge_maps, stream_name_map)
164 | args += reduce(operator.add, [_get_input_args(node) for node in input_nodes])
165 | if filter_arg:
166 | args += ['-filter_complex', filter_arg]
167 | args += reduce(
168 | operator.add, [_get_output_args(node, stream_name_map) for node in output_nodes]
169 | )
170 | args += reduce(operator.add, [_get_global_args(node) for node in global_nodes], [])
171 | if overwrite_output:
172 | args += ['-y']
173 | return args
174 |
175 |
176 | @output_operator()
177 | def compile(stream_spec, cmd='ffmpeg', overwrite_output=False):
178 | """Build command-line for invoking ffmpeg.
179 |
180 | The :meth:`run` function uses this to build the command line
181 | arguments and should work in most cases, but calling this function
182 | directly is useful for debugging or if you need to invoke ffmpeg
183 | manually for whatever reason.
184 |
185 | This is the same as calling :meth:`get_args` except that it also
186 | includes the ``ffmpeg`` command as the first argument.
187 | """
188 | if isinstance(cmd, basestring):
189 | cmd = [cmd]
190 | elif type(cmd) != list:
191 | cmd = list(cmd)
192 | return cmd + get_args(stream_spec, overwrite_output=overwrite_output)
193 |
194 |
195 | @output_operator()
196 | def run_async(
197 | stream_spec,
198 | cmd='ffmpeg',
199 | pipe_stdin=False,
200 | pipe_stdout=False,
201 | pipe_stderr=False,
202 | quiet=False,
203 | overwrite_output=False,
204 | cwd=None,
205 | ):
206 | """Asynchronously invoke ffmpeg for the supplied node graph.
207 |
208 | Args:
209 | pipe_stdin: if True, connect pipe to subprocess stdin (to be
210 | used with ``pipe:`` ffmpeg inputs).
211 | pipe_stdout: if True, connect pipe to subprocess stdout (to be
212 | used with ``pipe:`` ffmpeg outputs).
213 | pipe_stderr: if True, connect pipe to subprocess stderr.
214 | quiet: shorthand for setting ``capture_stdout`` and
215 | ``capture_stderr``.
216 | **kwargs: keyword-arguments passed to ``get_args()`` (e.g.
217 | ``overwrite_output=True``).
218 |
219 | Returns:
220 | A `subprocess Popen`_ object representing the child process.
221 |
222 | Examples:
223 | Run and stream input::
224 |
225 | process = (
226 | ffmpeg
227 | .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
228 | .output(out_filename, pix_fmt='yuv420p')
229 | .overwrite_output()
230 | .run_async(pipe_stdin=True)
231 | )
232 | process.communicate(input=input_data)
233 |
234 | Run and capture output::
235 |
236 | process = (
237 | ffmpeg
238 | .input(in_filename)
239 | .output('pipe:', format='rawvideo', pix_fmt='rgb24')
240 | .run_async(pipe_stdout=True, pipe_stderr=True)
241 | )
242 | out, err = process.communicate()
243 |
244 | Process video frame-by-frame using numpy::
245 |
246 | process1 = (
247 | ffmpeg
248 | .input(in_filename)
249 | .output('pipe:', format='rawvideo', pix_fmt='rgb24')
250 | .run_async(pipe_stdout=True)
251 | )
252 |
253 | process2 = (
254 | ffmpeg
255 | .input('pipe:', format='rawvideo', pix_fmt='rgb24', s='{}x{}'.format(width, height))
256 | .output(out_filename, pix_fmt='yuv420p')
257 | .overwrite_output()
258 | .run_async(pipe_stdin=True)
259 | )
260 |
261 | while True:
262 | in_bytes = process1.stdout.read(width * height * 3)
263 | if not in_bytes:
264 | break
265 | in_frame = (
266 | np
267 | .frombuffer(in_bytes, np.uint8)
268 | .reshape([height, width, 3])
269 | )
270 | out_frame = in_frame * 0.3
271 | process2.stdin.write(
272 | frame
273 | .astype(np.uint8)
274 | .tobytes()
275 | )
276 |
277 | process2.stdin.close()
278 | process1.wait()
279 | process2.wait()
280 |
281 | .. _subprocess Popen: https://docs.python.org/3/library/subprocess.html#popen-objects
282 | """
283 | args = compile(stream_spec, cmd, overwrite_output=overwrite_output)
284 | stdin_stream = subprocess.PIPE if pipe_stdin else None
285 | stdout_stream = subprocess.PIPE if pipe_stdout else None
286 | stderr_stream = subprocess.PIPE if pipe_stderr else None
287 | if quiet:
288 | stderr_stream = subprocess.STDOUT
289 | stdout_stream = subprocess.DEVNULL
290 | return subprocess.Popen(
291 | args,
292 | stdin=stdin_stream,
293 | stdout=stdout_stream,
294 | stderr=stderr_stream,
295 | cwd=cwd,
296 | )
297 |
298 |
299 | @output_operator()
300 | def run(
301 | stream_spec,
302 | cmd='ffmpeg',
303 | capture_stdout=False,
304 | capture_stderr=False,
305 | input=None,
306 | quiet=False,
307 | overwrite_output=False,
308 | cwd=None,
309 | ):
310 | """Invoke ffmpeg for the supplied node graph.
311 |
312 | Args:
313 | capture_stdout: if True, capture stdout (to be used with
314 | ``pipe:`` ffmpeg outputs).
315 | capture_stderr: if True, capture stderr.
316 | quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``.
317 | input: text to be sent to stdin (to be used with ``pipe:``
318 | ffmpeg inputs)
319 | **kwargs: keyword-arguments passed to ``get_args()`` (e.g.
320 | ``overwrite_output=True``).
321 |
322 | Returns: (out, err) tuple containing captured stdout and stderr data.
323 | """
324 | process = run_async(
325 | stream_spec,
326 | cmd,
327 | pipe_stdin=input is not None,
328 | pipe_stdout=capture_stdout,
329 | pipe_stderr=capture_stderr,
330 | quiet=quiet,
331 | overwrite_output=overwrite_output,
332 | cwd=cwd,
333 | )
334 | out, err = process.communicate(input)
335 | retcode = process.poll()
336 | if retcode:
337 | raise Error('ffmpeg', out, err)
338 | return out, err
339 |
340 |
341 | __all__ = [
342 | 'compile',
343 | 'Error',
344 | 'get_args',
345 | 'run',
346 | 'run_async',
347 | ]
348 |
--------------------------------------------------------------------------------
/ffmpeg/_utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import unicode_literals
2 | from builtins import str
3 | from past.builtins import basestring
4 | import hashlib
5 | import sys
6 |
7 |
8 | if sys.version_info.major == 2:
9 | # noinspection PyUnresolvedReferences,PyShadowingBuiltins
10 | str = str
11 |
12 | try:
13 | from collections.abc import Iterable
14 | except ImportError:
15 | from collections import Iterable
16 |
17 |
18 | # `past.builtins.basestring` module can't be imported on Python3 in some environments (Ubuntu).
19 | # This code is copy-pasted from it to avoid crashes.
20 | class BaseBaseString(type):
21 | def __instancecheck__(cls, instance):
22 | return isinstance(instance, (bytes, str))
23 |
24 | def __subclasshook__(cls, thing):
25 | # TODO: What should go here?
26 | raise NotImplemented
27 |
28 |
29 | def with_metaclass(meta, *bases):
30 | class metaclass(meta):
31 | __call__ = type.__call__
32 | __init__ = type.__init__
33 |
34 | def __new__(cls, name, this_bases, d):
35 | if this_bases is None:
36 | return type.__new__(cls, name, (), d)
37 | return meta(name, bases, d)
38 |
39 | return metaclass('temporary_class', None, {})
40 |
41 |
42 | if sys.version_info.major >= 3:
43 |
44 | class basestring(with_metaclass(BaseBaseString)):
45 | pass
46 |
47 | else:
48 | # noinspection PyUnresolvedReferences,PyCompatibility
49 | from builtins import basestring
50 |
51 |
52 | def _recursive_repr(item):
53 | """Hack around python `repr` to deterministically represent dictionaries.
54 |
55 | This is able to represent more things than json.dumps, since it does not require
56 | things to be JSON serializable (e.g. datetimes).
57 | """
58 | if isinstance(item, basestring):
59 | result = str(item)
60 | elif isinstance(item, list):
61 | result = '[{}]'.format(', '.join([_recursive_repr(x) for x in item]))
62 | elif isinstance(item, dict):
63 | kv_pairs = [
64 | '{}: {}'.format(_recursive_repr(k), _recursive_repr(item[k]))
65 | for k in sorted(item)
66 | ]
67 | result = '{' + ', '.join(kv_pairs) + '}'
68 | else:
69 | result = repr(item)
70 | return result
71 |
72 |
73 | def get_hash(item):
74 | repr_ = _recursive_repr(item).encode('utf-8')
75 | return hashlib.md5(repr_).hexdigest()
76 |
77 |
78 | def get_hash_int(item):
79 | return int(get_hash(item), base=16)
80 |
81 |
82 | def escape_chars(text, chars):
83 | """Helper function to escape uncomfortable characters."""
84 | text = str(text)
85 | chars = list(set(chars))
86 | if '\\' in chars:
87 | chars.remove('\\')
88 | chars.insert(0, '\\')
89 | for ch in chars:
90 | text = text.replace(ch, '\\' + ch)
91 | return text
92 |
93 |
94 | def convert_kwargs_to_cmd_line_args(kwargs):
95 | """Helper function to build command line arguments out of dict."""
96 | args = []
97 | for k in sorted(kwargs.keys()):
98 | v = kwargs[k]
99 | if isinstance(v, Iterable) and not isinstance(v, str):
100 | for value in v:
101 | args.append('-{}'.format(k))
102 | if value is not None:
103 | args.append('{}'.format(value))
104 | continue
105 | args.append('-{}'.format(k))
106 | if v is not None:
107 | args.append('{}'.format(v))
108 | return args
109 |
--------------------------------------------------------------------------------
/ffmpeg/_view.py:
--------------------------------------------------------------------------------
1 | from __future__ import unicode_literals
2 |
3 | from builtins import str
4 | from .dag import get_outgoing_edges
5 | from ._run import topo_sort
6 | import tempfile
7 |
8 | from ffmpeg.nodes import (
9 | FilterNode,
10 | get_stream_spec_nodes,
11 | InputNode,
12 | OutputNode,
13 | stream_operator,
14 | )
15 |
16 |
17 | _RIGHT_ARROW = '\u2192'
18 |
19 |
20 | def _get_node_color(node):
21 | if isinstance(node, InputNode):
22 | color = '#99cc00'
23 | elif isinstance(node, OutputNode):
24 | color = '#99ccff'
25 | elif isinstance(node, FilterNode):
26 | color = '#ffcc00'
27 | else:
28 | color = None
29 | return color
30 |
31 |
32 | @stream_operator()
33 | def view(stream_spec, detail=False, filename=None, pipe=False, **kwargs):
34 | try:
35 | import graphviz
36 | except ImportError:
37 | raise ImportError(
38 | 'failed to import graphviz; please make sure graphviz is installed (e.g. '
39 | '`pip install graphviz`)'
40 | )
41 |
42 | show_labels = kwargs.pop('show_labels', True)
43 | if pipe and filename is not None:
44 | raise ValueError('Can\'t specify both `filename` and `pipe`')
45 | elif not pipe and filename is None:
46 | filename = tempfile.mktemp()
47 |
48 | nodes = get_stream_spec_nodes(stream_spec)
49 |
50 | sorted_nodes, outgoing_edge_maps = topo_sort(nodes)
51 | graph = graphviz.Digraph(format='png')
52 | graph.attr(rankdir='LR')
53 | if len(list(kwargs.keys())) != 0:
54 | raise ValueError(
55 | 'Invalid kwargs key(s): {}'.format(', '.join(list(kwargs.keys())))
56 | )
57 |
58 | for node in sorted_nodes:
59 | color = _get_node_color(node)
60 |
61 | if detail:
62 | lines = [node.short_repr]
63 | lines += ['{!r}'.format(arg) for arg in node.args]
64 | lines += [
65 | '{}={!r}'.format(key, node.kwargs[key]) for key in sorted(node.kwargs)
66 | ]
67 | node_text = '\n'.join(lines)
68 | else:
69 | node_text = node.short_repr
70 | graph.node(
71 | str(hash(node)), node_text, shape='box', style='filled', fillcolor=color
72 | )
73 | outgoing_edge_map = outgoing_edge_maps.get(node, {})
74 |
75 | for edge in get_outgoing_edges(node, outgoing_edge_map):
76 | kwargs = {}
77 | up_label = edge.upstream_label
78 | down_label = edge.downstream_label
79 | up_selector = edge.upstream_selector
80 |
81 | if show_labels and (
82 | up_label is not None
83 | or down_label is not None
84 | or up_selector is not None
85 | ):
86 | if up_label is None:
87 | up_label = ''
88 | if up_selector is not None:
89 | up_label += ":" + up_selector
90 | if down_label is None:
91 | down_label = ''
92 | if up_label != '' and down_label != '':
93 | middle = ' {} '.format(_RIGHT_ARROW)
94 | else:
95 | middle = ''
96 | kwargs['label'] = '{} {} {}'.format(up_label, middle, down_label)
97 | upstream_node_id = str(hash(edge.upstream_node))
98 | downstream_node_id = str(hash(edge.downstream_node))
99 | graph.edge(upstream_node_id, downstream_node_id, **kwargs)
100 |
101 | if pipe:
102 | return graph.pipe()
103 | else:
104 | graph.view(filename, cleanup=True)
105 | return stream_spec
106 |
107 |
108 | __all__ = ['view']
109 |
--------------------------------------------------------------------------------
/ffmpeg/dag.py:
--------------------------------------------------------------------------------
1 | from __future__ import unicode_literals
2 |
3 | from ._utils import get_hash, get_hash_int
4 | from builtins import object
5 | from collections import namedtuple
6 |
7 |
8 | class DagNode(object):
9 | """Node in a directed-acyclic graph (DAG).
10 |
11 | Edges:
12 | DagNodes are connected by edges. An edge connects two nodes with a label for
13 | each side:
14 | - ``upstream_node``: upstream/parent node
15 | - ``upstream_label``: label on the outgoing side of the upstream node
16 | - ``downstream_node``: downstream/child node
17 | - ``downstream_label``: label on the incoming side of the downstream node
18 |
19 | For example, DagNode A may be connected to DagNode B with an edge labelled
20 | "foo" on A's side, and "bar" on B's side:
21 |
22 | _____ _____
23 | | | | |
24 | | A >[foo]---[bar]> B |
25 | |_____| |_____|
26 |
27 | Edge labels may be integers or strings, and nodes cannot have more than one
28 | incoming edge with the same label.
29 |
30 | DagNodes may have any number of incoming edges and any number of outgoing
31 | edges. DagNodes keep track only of their incoming edges, but the entire graph
32 | structure can be inferred by looking at the furthest downstream nodes and
33 | working backwards.
34 |
35 | Hashing:
36 | DagNodes must be hashable, and two nodes are considered to be equivalent if
37 | they have the same hash value.
38 |
39 | Nodes are immutable, and the hash should remain constant as a result. If a
40 | node with new contents is required, create a new node and throw the old one
41 | away.
42 |
43 | String representation:
44 | In order for graph visualization tools to show useful information, nodes must
45 | be representable as strings. The ``repr`` operator should provide a more or
46 | less "full" representation of the node, and the ``short_repr`` property should
47 | be a shortened, concise representation.
48 |
49 | Again, because nodes are immutable, the string representations should remain
50 | constant.
51 | """
52 |
53 | def __hash__(self):
54 | """Return an integer hash of the node."""
55 | raise NotImplementedError()
56 |
57 | def __eq__(self, other):
58 | """Compare two nodes; implementations should return True if (and only if)
59 | hashes match.
60 | """
61 | raise NotImplementedError()
62 |
63 | def __repr__(self, other):
64 | """Return a full string representation of the node."""
65 | raise NotImplementedError()
66 |
67 | @property
68 | def short_repr(self):
69 | """Return a partial/concise representation of the node."""
70 | raise NotImplementedError()
71 |
72 | @property
73 | def incoming_edge_map(self):
74 | """Provides information about all incoming edges that connect to this node.
75 |
76 | The edge map is a dictionary that maps an ``incoming_label`` to
77 | ``(outgoing_node, outgoing_label)``. Note that implicitly, ``incoming_node`` is
78 | ``self``. See "Edges" section above.
79 | """
80 | raise NotImplementedError()
81 |
82 |
83 | DagEdge = namedtuple(
84 | 'DagEdge',
85 | [
86 | 'downstream_node',
87 | 'downstream_label',
88 | 'upstream_node',
89 | 'upstream_label',
90 | 'upstream_selector',
91 | ],
92 | )
93 |
94 |
95 | def get_incoming_edges(downstream_node, incoming_edge_map):
96 | edges = []
97 | for downstream_label, upstream_info in list(incoming_edge_map.items()):
98 | upstream_node, upstream_label, upstream_selector = upstream_info
99 | edges += [
100 | DagEdge(
101 | downstream_node,
102 | downstream_label,
103 | upstream_node,
104 | upstream_label,
105 | upstream_selector,
106 | )
107 | ]
108 | return edges
109 |
110 |
111 | def get_outgoing_edges(upstream_node, outgoing_edge_map):
112 | edges = []
113 | for upstream_label, downstream_infos in sorted(outgoing_edge_map.items()):
114 | for downstream_info in downstream_infos:
115 | downstream_node, downstream_label, downstream_selector = downstream_info
116 | edges += [
117 | DagEdge(
118 | downstream_node,
119 | downstream_label,
120 | upstream_node,
121 | upstream_label,
122 | downstream_selector,
123 | )
124 | ]
125 | return edges
126 |
127 |
128 | class KwargReprNode(DagNode):
129 | """A DagNode that can be represented as a set of args+kwargs."""
130 |
131 | @property
132 | def __upstream_hashes(self):
133 | hashes = []
134 | for downstream_label, upstream_info in list(self.incoming_edge_map.items()):
135 | upstream_node, upstream_label, upstream_selector = upstream_info
136 | hashes += [
137 | hash(x)
138 | for x in [
139 | downstream_label,
140 | upstream_node,
141 | upstream_label,
142 | upstream_selector,
143 | ]
144 | ]
145 | return hashes
146 |
147 | @property
148 | def __inner_hash(self):
149 | props = {'args': self.args, 'kwargs': self.kwargs}
150 | return get_hash(props)
151 |
152 | def __get_hash(self):
153 | hashes = self.__upstream_hashes + [self.__inner_hash]
154 | return get_hash_int(hashes)
155 |
156 | def __init__(self, incoming_edge_map, name, args, kwargs):
157 | self.__incoming_edge_map = incoming_edge_map
158 | self.name = name
159 | self.args = args
160 | self.kwargs = kwargs
161 | self.__hash = self.__get_hash()
162 |
163 | def __hash__(self):
164 | return self.__hash
165 |
166 | def __eq__(self, other):
167 | return hash(self) == hash(other)
168 |
169 | @property
170 | def short_hash(self):
171 | return '{:x}'.format(abs(hash(self)))[:12]
172 |
173 | def long_repr(self, include_hash=True):
174 | formatted_props = ['{!r}'.format(arg) for arg in self.args]
175 | formatted_props += [
176 | '{}={!r}'.format(key, self.kwargs[key]) for key in sorted(self.kwargs)
177 | ]
178 | out = '{}({})'.format(self.name, ', '.join(formatted_props))
179 | if include_hash:
180 | out += ' <{}>'.format(self.short_hash)
181 | return out
182 |
183 | def __repr__(self):
184 | return self.long_repr()
185 |
186 | @property
187 | def incoming_edges(self):
188 | return get_incoming_edges(self, self.incoming_edge_map)
189 |
190 | @property
191 | def incoming_edge_map(self):
192 | return self.__incoming_edge_map
193 |
194 | @property
195 | def short_repr(self):
196 | return self.name
197 |
198 |
199 | def topo_sort(downstream_nodes):
200 | marked_nodes = []
201 | sorted_nodes = []
202 | outgoing_edge_maps = {}
203 |
204 | def visit(
205 | upstream_node,
206 | upstream_label,
207 | downstream_node,
208 | downstream_label,
209 | downstream_selector=None,
210 | ):
211 | if upstream_node in marked_nodes:
212 | raise RuntimeError('Graph is not a DAG')
213 |
214 | if downstream_node is not None:
215 | outgoing_edge_map = outgoing_edge_maps.get(upstream_node, {})
216 | outgoing_edge_infos = outgoing_edge_map.get(upstream_label, [])
217 | outgoing_edge_infos += [
218 | (downstream_node, downstream_label, downstream_selector)
219 | ]
220 | outgoing_edge_map[upstream_label] = outgoing_edge_infos
221 | outgoing_edge_maps[upstream_node] = outgoing_edge_map
222 |
223 | if upstream_node not in sorted_nodes:
224 | marked_nodes.append(upstream_node)
225 | for edge in upstream_node.incoming_edges:
226 | visit(
227 | edge.upstream_node,
228 | edge.upstream_label,
229 | edge.downstream_node,
230 | edge.downstream_label,
231 | edge.upstream_selector,
232 | )
233 | marked_nodes.remove(upstream_node)
234 | sorted_nodes.append(upstream_node)
235 |
236 | unmarked_nodes = [(node, None) for node in downstream_nodes]
237 | while unmarked_nodes:
238 | upstream_node, upstream_label = unmarked_nodes.pop()
239 | visit(upstream_node, upstream_label, None, None)
240 | return sorted_nodes, outgoing_edge_maps
241 |
--------------------------------------------------------------------------------
/ffmpeg/nodes.py:
--------------------------------------------------------------------------------
1 | from __future__ import unicode_literals
2 |
3 | from past.builtins import basestring
4 | from .dag import KwargReprNode
5 | from ._utils import escape_chars, get_hash_int
6 | from builtins import object
7 | import os
8 |
9 |
10 | def _is_of_types(obj, types):
11 | valid = False
12 | for stream_type in types:
13 | if isinstance(obj, stream_type):
14 | valid = True
15 | break
16 | return valid
17 |
18 |
19 | def _get_types_str(types):
20 | return ', '.join(['{}.{}'.format(x.__module__, x.__name__) for x in types])
21 |
22 |
23 | class Stream(object):
24 | """Represents the outgoing edge of an upstream node; may be used to create more
25 | downstream nodes.
26 | """
27 |
28 | def __init__(
29 | self, upstream_node, upstream_label, node_types, upstream_selector=None
30 | ):
31 | if not _is_of_types(upstream_node, node_types):
32 | raise TypeError(
33 | 'Expected upstream node to be of one of the following type(s): {}; got {}'.format(
34 | _get_types_str(node_types), type(upstream_node)
35 | )
36 | )
37 | self.node = upstream_node
38 | self.label = upstream_label
39 | self.selector = upstream_selector
40 |
41 | def __hash__(self):
42 | return get_hash_int([hash(self.node), hash(self.label)])
43 |
44 | def __eq__(self, other):
45 | return hash(self) == hash(other)
46 |
47 | def __repr__(self):
48 | node_repr = self.node.long_repr(include_hash=False)
49 | selector = ''
50 | if self.selector:
51 | selector = ':{}'.format(self.selector)
52 | out = '{}[{!r}{}] <{}>'.format(
53 | node_repr, self.label, selector, self.node.short_hash
54 | )
55 | return out
56 |
57 | def __getitem__(self, index):
58 | """
59 | Select a component (audio, video) of the stream.
60 |
61 | Example:
62 | Process the audio and video portions of a stream independently::
63 |
64 | input = ffmpeg.input('in.mp4')
65 | audio = input['a'].filter("aecho", 0.8, 0.9, 1000, 0.3)
66 | video = input['v'].hflip()
67 | out = ffmpeg.output(audio, video, 'out.mp4')
68 | """
69 | if self.selector is not None:
70 | raise ValueError('Stream already has a selector: {}'.format(self))
71 | elif not isinstance(index, basestring):
72 | raise TypeError("Expected string index (e.g. 'a'); got {!r}".format(index))
73 | return self.node.stream(label=self.label, selector=index)
74 |
75 | @property
76 | def audio(self):
77 | """Select the audio-portion of a stream.
78 |
79 | Some ffmpeg filters drop audio streams, and care must be taken
80 | to preserve the audio in the final output. The ``.audio`` and
81 | ``.video`` operators can be used to reference the audio/video
82 | portions of a stream so that they can be processed separately
83 | and then re-combined later in the pipeline. This dilemma is
84 | intrinsic to ffmpeg, and ffmpeg-python tries to stay out of the
85 | way while users may refer to the official ffmpeg documentation
86 | as to why certain filters drop audio.
87 |
88 | ``stream.audio`` is a shorthand for ``stream['a']``.
89 |
90 | Example:
91 | Process the audio and video portions of a stream independently::
92 |
93 | input = ffmpeg.input('in.mp4')
94 | audio = input.audio.filter("aecho", 0.8, 0.9, 1000, 0.3)
95 | video = input.video.hflip()
96 | out = ffmpeg.output(audio, video, 'out.mp4')
97 | """
98 | return self['a']
99 |
100 | @property
101 | def video(self):
102 | """Select the video-portion of a stream.
103 |
104 | Some ffmpeg filters drop audio streams, and care must be taken
105 | to preserve the audio in the final output. The ``.audio`` and
106 | ``.video`` operators can be used to reference the audio/video
107 | portions of a stream so that they can be processed separately
108 | and then re-combined later in the pipeline. This dilemma is
109 | intrinsic to ffmpeg, and ffmpeg-python tries to stay out of the
110 | way while users may refer to the official ffmpeg documentation
111 | as to why certain filters drop audio.
112 |
113 | ``stream.video`` is a shorthand for ``stream['v']``.
114 |
115 | Example:
116 | Process the audio and video portions of a stream independently::
117 |
118 | input = ffmpeg.input('in.mp4')
119 | audio = input.audio.filter("aecho", 0.8, 0.9, 1000, 0.3)
120 | video = input.video.hflip()
121 | out = ffmpeg.output(audio, video, 'out.mp4')
122 | """
123 | return self['v']
124 |
125 |
126 | def get_stream_map(stream_spec):
127 | if stream_spec is None:
128 | stream_map = {}
129 | elif isinstance(stream_spec, Stream):
130 | stream_map = {None: stream_spec}
131 | elif isinstance(stream_spec, (list, tuple)):
132 | stream_map = dict(enumerate(stream_spec))
133 | elif isinstance(stream_spec, dict):
134 | stream_map = stream_spec
135 | return stream_map
136 |
137 |
138 | def get_stream_map_nodes(stream_map):
139 | nodes = []
140 | for stream in list(stream_map.values()):
141 | if not isinstance(stream, Stream):
142 | raise TypeError('Expected Stream; got {}'.format(type(stream)))
143 | nodes.append(stream.node)
144 | return nodes
145 |
146 |
147 | def get_stream_spec_nodes(stream_spec):
148 | stream_map = get_stream_map(stream_spec)
149 | return get_stream_map_nodes(stream_map)
150 |
151 |
152 | class Node(KwargReprNode):
153 | """Node base"""
154 |
155 | @classmethod
156 | def __check_input_len(cls, stream_map, min_inputs, max_inputs):
157 | if min_inputs is not None and len(stream_map) < min_inputs:
158 | raise ValueError(
159 | 'Expected at least {} input stream(s); got {}'.format(
160 | min_inputs, len(stream_map)
161 | )
162 | )
163 | elif max_inputs is not None and len(stream_map) > max_inputs:
164 | raise ValueError(
165 | 'Expected at most {} input stream(s); got {}'.format(
166 | max_inputs, len(stream_map)
167 | )
168 | )
169 |
170 | @classmethod
171 | def __check_input_types(cls, stream_map, incoming_stream_types):
172 | for stream in list(stream_map.values()):
173 | if not _is_of_types(stream, incoming_stream_types):
174 | raise TypeError(
175 | 'Expected incoming stream(s) to be of one of the following types: {}; got {}'.format(
176 | _get_types_str(incoming_stream_types), type(stream)
177 | )
178 | )
179 |
180 | @classmethod
181 | def __get_incoming_edge_map(cls, stream_map):
182 | incoming_edge_map = {}
183 | for downstream_label, upstream in list(stream_map.items()):
184 | incoming_edge_map[downstream_label] = (
185 | upstream.node,
186 | upstream.label,
187 | upstream.selector,
188 | )
189 | return incoming_edge_map
190 |
191 | def __init__(
192 | self,
193 | stream_spec,
194 | name,
195 | incoming_stream_types,
196 | outgoing_stream_type,
197 | min_inputs,
198 | max_inputs,
199 | args=[],
200 | kwargs={},
201 | ):
202 | stream_map = get_stream_map(stream_spec)
203 | self.__check_input_len(stream_map, min_inputs, max_inputs)
204 | self.__check_input_types(stream_map, incoming_stream_types)
205 | incoming_edge_map = self.__get_incoming_edge_map(stream_map)
206 |
207 | super(Node, self).__init__(incoming_edge_map, name, args, kwargs)
208 | self.__outgoing_stream_type = outgoing_stream_type
209 | self.__incoming_stream_types = incoming_stream_types
210 |
211 | def stream(self, label=None, selector=None):
212 | """Create an outgoing stream originating from this node.
213 |
214 | More nodes may be attached onto the outgoing stream.
215 | """
216 | return self.__outgoing_stream_type(self, label, upstream_selector=selector)
217 |
218 | def __getitem__(self, item):
219 | """Create an outgoing stream originating from this node; syntactic sugar for
220 | ``self.stream(label)``. It can also be used to apply a selector: e.g.
221 | ``node[0:'a']`` returns a stream with label 0 and selector ``'a'``, which is
222 | the same as ``node.stream(label=0, selector='a')``.
223 |
224 | Example:
225 | Process the audio and video portions of a stream independently::
226 |
227 | input = ffmpeg.input('in.mp4')
228 | audio = input[:'a'].filter("aecho", 0.8, 0.9, 1000, 0.3)
229 | video = input[:'v'].hflip()
230 | out = ffmpeg.output(audio, video, 'out.mp4')
231 | """
232 | if isinstance(item, slice):
233 | return self.stream(label=item.start, selector=item.stop)
234 | else:
235 | return self.stream(label=item)
236 |
237 |
238 | class FilterableStream(Stream):
239 | def __init__(self, upstream_node, upstream_label, upstream_selector=None):
240 | super(FilterableStream, self).__init__(
241 | upstream_node, upstream_label, {InputNode, FilterNode}, upstream_selector
242 | )
243 |
244 |
245 | # noinspection PyMethodOverriding
246 | class InputNode(Node):
247 | """InputNode type"""
248 |
249 | def __init__(self, name, args=[], kwargs={}):
250 | super(InputNode, self).__init__(
251 | stream_spec=None,
252 | name=name,
253 | incoming_stream_types={},
254 | outgoing_stream_type=FilterableStream,
255 | min_inputs=0,
256 | max_inputs=0,
257 | args=args,
258 | kwargs=kwargs,
259 | )
260 |
261 | @property
262 | def short_repr(self):
263 | return os.path.basename(self.kwargs['filename'])
264 |
265 |
266 | # noinspection PyMethodOverriding
267 | class FilterNode(Node):
268 | def __init__(self, stream_spec, name, max_inputs=1, args=[], kwargs={}):
269 | super(FilterNode, self).__init__(
270 | stream_spec=stream_spec,
271 | name=name,
272 | incoming_stream_types={FilterableStream},
273 | outgoing_stream_type=FilterableStream,
274 | min_inputs=1,
275 | max_inputs=max_inputs,
276 | args=args,
277 | kwargs=kwargs,
278 | )
279 |
280 | """FilterNode"""
281 |
282 | def _get_filter(self, outgoing_edges):
283 | args = self.args
284 | kwargs = self.kwargs
285 | if self.name in ('split', 'asplit'):
286 | args = [len(outgoing_edges)]
287 |
288 | out_args = [escape_chars(x, '\\\'=:') for x in args]
289 | out_kwargs = {}
290 | for k, v in list(kwargs.items()):
291 | k = escape_chars(k, '\\\'=:')
292 | v = escape_chars(v, '\\\'=:')
293 | out_kwargs[k] = v
294 |
295 | arg_params = [escape_chars(v, '\\\'=:') for v in out_args]
296 | kwarg_params = ['{}={}'.format(k, out_kwargs[k]) for k in sorted(out_kwargs)]
297 | params = arg_params + kwarg_params
298 |
299 | params_text = escape_chars(self.name, '\\\'=:')
300 |
301 | if params:
302 | params_text += '={}'.format(':'.join(params))
303 | return escape_chars(params_text, '\\\'[],;')
304 |
305 |
306 | # noinspection PyMethodOverriding
307 | class OutputNode(Node):
308 | def __init__(self, stream, name, args=[], kwargs={}):
309 | super(OutputNode, self).__init__(
310 | stream_spec=stream,
311 | name=name,
312 | incoming_stream_types={FilterableStream},
313 | outgoing_stream_type=OutputStream,
314 | min_inputs=1,
315 | max_inputs=None,
316 | args=args,
317 | kwargs=kwargs,
318 | )
319 |
320 | @property
321 | def short_repr(self):
322 | return os.path.basename(self.kwargs['filename'])
323 |
324 |
325 | class OutputStream(Stream):
326 | def __init__(self, upstream_node, upstream_label, upstream_selector=None):
327 | super(OutputStream, self).__init__(
328 | upstream_node,
329 | upstream_label,
330 | {OutputNode, GlobalNode, MergeOutputsNode},
331 | upstream_selector=upstream_selector,
332 | )
333 |
334 |
335 | # noinspection PyMethodOverriding
336 | class MergeOutputsNode(Node):
337 | def __init__(self, streams, name):
338 | super(MergeOutputsNode, self).__init__(
339 | stream_spec=streams,
340 | name=name,
341 | incoming_stream_types={OutputStream},
342 | outgoing_stream_type=OutputStream,
343 | min_inputs=1,
344 | max_inputs=None,
345 | )
346 |
347 |
348 | # noinspection PyMethodOverriding
349 | class GlobalNode(Node):
350 | def __init__(self, stream, name, args=[], kwargs={}):
351 | super(GlobalNode, self).__init__(
352 | stream_spec=stream,
353 | name=name,
354 | incoming_stream_types={OutputStream},
355 | outgoing_stream_type=OutputStream,
356 | min_inputs=1,
357 | max_inputs=1,
358 | args=args,
359 | kwargs=kwargs,
360 | )
361 |
362 |
363 | def stream_operator(stream_classes={Stream}, name=None):
364 | def decorator(func):
365 | func_name = name or func.__name__
366 | [setattr(stream_class, func_name, func) for stream_class in stream_classes]
367 | return func
368 |
369 | return decorator
370 |
371 |
372 | def filter_operator(name=None):
373 | return stream_operator(stream_classes={FilterableStream}, name=name)
374 |
375 |
376 | def output_operator(name=None):
377 | return stream_operator(stream_classes={OutputStream}, name=name)
378 |
379 |
380 | __all__ = ['Stream']
381 |
--------------------------------------------------------------------------------
/ffmpeg/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/ffmpeg/tests/__init__.py
--------------------------------------------------------------------------------
/ffmpeg/tests/sample_data/in1.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/ffmpeg/tests/sample_data/in1.mp4
--------------------------------------------------------------------------------
/ffmpeg/tests/sample_data/overlay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kkroening/ffmpeg-python/df129c7ba30aaa9ffffb81a48f53aa7253b0b4e6/ffmpeg/tests/sample_data/overlay.png
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | skip-string-normalization = true
3 | target_version = ['py27'] # TODO: drop Python 2 support (... "Soon").
4 | include = '\.pyi?$'
5 | exclude = '''
6 | (
7 | /(
8 | \.eggs
9 | | \.git
10 | | \.tox
11 | | \venv
12 | | dist
13 | )/
14 | )
15 | '''
16 |
--------------------------------------------------------------------------------
/pytest.ini:
--------------------------------------------------------------------------------
1 | [pytest]
2 | testpaths = ffmpeg/tests
3 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | alabaster==0.7.12
2 | atomicwrites==1.3.0
3 | attrs==19.1.0
4 | Babel==2.7.0
5 | certifi==2019.3.9
6 | chardet==3.0.4
7 | docutils==0.14
8 | filelock==3.0.12
9 | future==0.17.1
10 | idna==2.8
11 | imagesize==1.1.0
12 | importlib-metadata==0.17
13 | Jinja2==2.10.1
14 | MarkupSafe==1.1.1
15 | more-itertools==7.0.0
16 | numpy==1.16.4
17 | packaging==19.0
18 | pluggy==0.12.0
19 | py==1.8.0
20 | Pygments==2.4.2
21 | pyparsing==2.4.0
22 | pytest==4.6.1
23 | pytest-mock==1.10.4
24 | pytz==2019.1
25 | requests==2.22.0
26 | six==1.12.0
27 | snowballstemmer==1.2.1
28 | Sphinx==2.1.0
29 | sphinxcontrib-applehelp==1.0.1
30 | sphinxcontrib-devhelp==1.0.1
31 | sphinxcontrib-htmlhelp==1.0.2
32 | sphinxcontrib-jsmath==1.0.1
33 | sphinxcontrib-qthelp==1.0.2
34 | sphinxcontrib-serializinghtml==1.1.3
35 | toml==0.10.0
36 | tox==3.12.1
37 | urllib3==1.25.3
38 | virtualenv==16.6.0
39 | wcwidth==0.1.7
40 | zipp==0.5.1
41 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [aliases]
2 | test=pytest
3 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | from textwrap import dedent
3 |
4 | version = '0.2.0'
5 | download_url = 'https://github.com/kkroening/ffmpeg-python/archive/v{}.zip'.format(
6 | version
7 | )
8 |
9 | long_description = dedent(
10 | '''\
11 | ffmpeg-python: Python bindings for FFmpeg
12 | =========================================
13 |
14 | :Github: https://github.com/kkroening/ffmpeg-python
15 | :API Reference: https://kkroening.github.io/ffmpeg-python/
16 | '''
17 | )
18 |
19 |
20 | file_formats = [
21 | 'aac',
22 | 'ac3',
23 | 'avi',
24 | 'bmp',
25 | 'flac',
26 | 'gif',
27 | 'mov',
28 | 'mp3',
29 | 'mp4',
30 | 'png',
31 | 'raw',
32 | 'rawvideo',
33 | 'wav',
34 | ]
35 | file_formats += ['.{}'.format(x) for x in file_formats]
36 |
37 | misc_keywords = [
38 | '-vf',
39 | 'a/v',
40 | 'audio',
41 | 'dsp',
42 | 'FFmpeg',
43 | 'ffmpeg',
44 | 'ffprobe',
45 | 'filtering',
46 | 'filter_complex',
47 | 'movie',
48 | 'render',
49 | 'signals',
50 | 'sound',
51 | 'streaming',
52 | 'streams',
53 | 'vf',
54 | 'video',
55 | 'wrapper',
56 | ]
57 |
58 | keywords = misc_keywords + file_formats
59 |
60 | setup(
61 | name='ffmpeg-python',
62 | packages=['ffmpeg'],
63 | version=version,
64 | description='Python bindings for FFmpeg - with complex filtering support',
65 | author='Karl Kroening',
66 | author_email='karlk@kralnet.us',
67 | url='https://github.com/kkroening/ffmpeg-python',
68 | download_url=download_url,
69 | keywords=keywords,
70 | long_description=long_description,
71 | install_requires=['future'],
72 | extras_require={
73 | 'dev': [
74 | 'future==0.17.1',
75 | 'numpy==1.16.4',
76 | 'pytest-mock==1.10.4',
77 | 'pytest==4.6.1',
78 | 'Sphinx==2.1.0',
79 | 'tox==3.12.1',
80 | ]
81 | },
82 | classifiers=[
83 | 'Intended Audience :: Developers',
84 | 'License :: OSI Approved :: Apache Software License',
85 | 'Natural Language :: English',
86 | 'Operating System :: OS Independent',
87 | 'Programming Language :: Python',
88 | 'Programming Language :: Python :: 2',
89 | 'Programming Language :: Python :: 2.7',
90 | 'Programming Language :: Python :: 3',
91 | 'Programming Language :: Python :: 3.3',
92 | 'Programming Language :: Python :: 3.4',
93 | 'Programming Language :: Python :: 3.5',
94 | 'Programming Language :: Python :: 3.6',
95 | 'Programming Language :: Python :: 3.7',
96 | 'Programming Language :: Python :: 3.8',
97 | 'Programming Language :: Python :: 3.9',
98 | 'Programming Language :: Python :: 3.10',
99 | ],
100 | )
101 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | # Tox (https://tox.readthedocs.io/) is a tool for running tests
2 | # in multiple virtualenvs. This configuration file will run the
3 | # test suite on all supported python versions. To use it, "pip install tox"
4 | # and then run "tox" from this directory.
5 |
6 | [tox]
7 | envlist = py27, py35, py36, py37, py38, py39, py310
8 |
9 | [gh-actions]
10 | python =
11 | 2.7: py27
12 | 3.5: py35
13 | 3.6: py36
14 | 3.7: py37
15 | 3.8: py38
16 | 3.9: py39
17 | 3.10: py310
18 |
19 | [testenv]
20 | commands = py.test -vv
21 | deps =
22 | future
23 | pytest
24 | pytest-mock
25 |
--------------------------------------------------------------------------------