├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.yml
│ ├── crash_report.md
│ ├── desync.md
│ └── feature_request.md
└── workflows
│ ├── build.yml
│ └── python-publish.yml
├── .gitignore
├── AUTHORS.md
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── articles
├── imgs
│ ├── auto-editor_banner.png
│ └── cross_platform.png
├── installing.md
└── legalinfo.md
├── auto_editor
├── CreateEXE-Help-EmbeddedFFMPEG.bat
├── __init__.py
├── __main__.py
├── analyze
│ ├── __init__.py
│ ├── audio.py
│ ├── motion.py
│ └── pixeldiff.py
├── edit.py
├── ffwrapper.py
├── formats
│ ├── __init__.py
│ ├── final_cut_pro.py
│ ├── json.py
│ ├── premiere.py
│ ├── shotcut.py
│ └── utils.py
├── help.json
├── icon2.ico
├── method.py
├── objects.py
├── output.py
├── preview.py
├── render
│ ├── __init__.py
│ ├── audio.py
│ ├── subtitle.py
│ ├── tsm
│ │ ├── __init__.py
│ │ ├── analysis_synthesis.py
│ │ ├── array.py
│ │ ├── cbuffer.py
│ │ ├── normalizebuffer.py
│ │ └── phasevocoder.py
│ └── video.py
├── subcommands
│ ├── __init__.py
│ ├── desc.py
│ ├── grep.py
│ ├── info.py
│ ├── levels.py
│ ├── subdump.py
│ └── test.py
├── timeline.py
├── utils
│ ├── __init__.py
│ ├── container.py
│ ├── encoder.py
│ ├── func.py
│ ├── log.py
│ ├── progressbar.py
│ └── types.py
├── validate_input.py
├── vanparse.py
└── wavfile.py
├── example.mp4
├── resources
├── aac.m4a
├── alac.m4a
├── data
│ ├── example_1.5_speed.npz
│ └── example_2.0_speed.npz
├── embedded-image
│ ├── h264-mjpeg.mkv
│ ├── h264-mjpeg.mp4
│ ├── h264-png.mkv
│ └── h264-png.mp4
├── json
│ ├── 0.1-disjoint.json
│ └── 0.1-non-zero-start.json
├── mono.mp3
├── multi-track.mov
├── new-commentary.mp3
├── only-video
│ ├── man-on-green-screen.gif
│ └── man-on-green-screen.mp4
├── subtitle.mp4
├── testsrc.mkv
├── testsrc.mp4
└── wav
│ ├── example-cut-s16le.wav
│ ├── pcm-f32le.wav
│ └── pcm-s32le.wav
└── setup.py
/.github/ISSUE_TEMPLATE/bug_report.yml:
--------------------------------------------------------------------------------
1 | name: Bug Report
2 | description: Create a report to help improve the program.
3 | labels: bug
4 | body:
5 | - type: textarea
6 | attributes:
7 | label: What's your platform?
8 | description: "Which operating system do you use? Run `auto-editor --debug`"
9 | placeholder: "Paste output of `auto-editor --debug`"
10 | render: shell
11 | validations:
12 | required: true
13 |
14 | - type: textarea
15 | attributes:
16 | label: Bug description
17 | description: What happened?
18 | validations:
19 | required: true
20 |
21 | - type: textarea
22 | attributes:
23 | label: What command did you use
24 | placeholder: "Paste the command you've tried."
25 | render: shell
26 | validations:
27 | required: true
28 |
29 | - type: textarea
30 | attributes:
31 | label: What properties does your input video have?
32 | description: This is NOT the place where you paste your command. IF YOU DO NOT FILL THIS OUT CORRECTLY, YOUR ISSUE WILL BE DELETED.
33 | placeholder: Run `auto-editor info your_video.mp4` AND `ffmpeg -hide_banner -i your_video.mp4`
34 | render: shell
35 |
36 | - type: textarea
37 | attributes:
38 | label: Comments
39 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/crash_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Crash Report
3 | about: Create a report to help improve the program.
4 | title: ''
5 | labels: crash
6 |
7 | ---
8 |
9 | (Make sure you're using the latest version)
10 |
11 | **What went wrong**
12 |
13 | ```
14 | Paste your command and it's output here.
15 | ```
16 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/desync.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Desync Report
3 | about: Create a report to help improve the program.
4 | title: ''
5 | labels: desync
6 |
7 | ---
8 |
9 | You're Exporting to:
10 | - [x] Media File (Video or Audio)
11 | - [ ] Premiere Pro
12 | - [ ] Final Cut Pro
13 |
14 | What is the frame rate and length of your video? (use `auto-editor info $FILE --has-vfr`)
15 |
16 | What is desyncing and how bad is it? (exp. every 30 minutes, the audio is one second behind of video)
17 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | about: 'Suggest a feature or improvement'
4 | title: ''
5 | labels: enhancement
6 |
7 | ---
8 |
9 | Descript what feature/improvement you would like here.
10 |
11 | Descript how you would use it.
12 |
13 |
14 | Make sure that it is something that YOU need, not that just seems useful.
15 |
16 |
17 | (Make sure you're using the latest version)
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: build
2 | on:
3 | release:
4 | types: [created]
5 | push:
6 | paths-ignore:
7 | - '**.md'
8 | - '**.txt'
9 | - '**.png'
10 | - 'auto_editor/__init__.py'
11 | branches:
12 | - master
13 | jobs:
14 | build1:
15 | strategy:
16 | matrix:
17 | python-version: ['3.8']
18 | runs-on: ubuntu-latest
19 | steps:
20 | - uses: actions/checkout@v2
21 | - name: Set up Python ${{ matrix.python-version }}
22 | uses: actions/setup-python@v2
23 | with:
24 | python-version: ${{ matrix.python-version }}
25 | - name: Install FFmpeg
26 | run: |
27 | sudo apt update
28 | sudo apt install ffmpeg
29 | - name: Install Auto-Editor
30 | run: pip install -e .
31 | - name: Test
32 | run: auto-editor test all
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflows will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | name: Upload Python Package
5 | on:
6 | release:
7 | types: [created]
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@v2
13 | - name: Set up Python
14 | uses: actions/setup-python@v2
15 | with:
16 | python-version: '3.x'
17 | - name: Install dependencies
18 | run: |
19 | python -m pip install --upgrade pip
20 | pip install setuptools wheel twine
21 | - name: Build and publish
22 | env:
23 | TWINE_USERNAME: ${{ secrets.PIP_USERNAME }}
24 | TWINE_PASSWORD: ${{ secrets.PIP_PASSWORD }}
25 | run: |
26 | mv auto_editor/ffmpeg/Windows/ffmpeg.exe ./
27 | python setup.py bdist_wheel --plat-name=macosx_10_9_x86_64
28 |
29 | twine upload dist/*
30 | rm -rf dist build
31 |
32 | mv ffmpeg.exe auto_editor/ffmpeg/Windows/
33 | mv auto_editor/ffmpeg/Darwin/ffmpeg ./
34 |
35 | python setup.py bdist_wheel --plat-name=win_amd64
36 | twine upload dist/*
37 | rm -rf dist build
38 |
39 | mv auto_editor/ffmpeg/Windows/ffmpeg.exe ./
40 |
41 | python setup.py sdist bdist_wheel
42 | twine upload dist/*
43 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Auto-Editor Generated Files
2 | *_ALTERED*
3 | *_tracks
4 | *example
5 | *.xml
6 | *.mlt
7 | *.fcpxml
8 | *https*
9 | example.json
10 |
11 | # Python Generated Files
12 | __pycache__/
13 | *.pyc
14 | *.pyo
15 | *.spec
16 |
17 | # Build Files
18 | build/
19 | dist/
20 | *.egg-info/
21 |
22 | # OS Files
23 | .DS_Store
24 |
--------------------------------------------------------------------------------
/AUTHORS.md:
--------------------------------------------------------------------------------
1 | # Authors
2 |
3 | ### Creator
4 | @WyattBlue
5 |
6 | ### Contributions
7 | @OParczyk
8 | @m-rey
9 | @Saya47
10 | @Steffo99
11 | @mrioqueiroz
12 | @zoe1337
13 | @eykd
14 | @mlinaje
15 |
16 | ### Early Testers
17 | Breadstick King#2906
18 | cuye#1920
19 | Twitch User: GanerRL
20 | Youtuber: Mark Santos
21 | @flyingzambie
22 | @sasa
23 | @callmedougan
24 | @p4jo
25 | @firelightning13
26 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Code of Conduct
2 |
3 | In the interest of furthering the goals of this project and creating a welcoming and professional space, we have written these rules and guidelines to guide and reassure others of our standards.
4 |
5 | * Do not threaten, defraud, doxx, or scam anyone.
6 |
7 | * Do not make personal attacks against other people.
8 |
9 | * Do not call [violence](https://www.oed.com/oed2/00277885#:~:text=1.,c1290%20Beket%20932%20in%20S.) against any individual or group.
10 |
11 | * Do not make sweeping generalizations about groups defined by mostly uncontrollable physical or mental attributes.
12 |
13 | * Always follow the platform's Acceptable Use Policies and Terms of Service. (e.g. Github's [Acceptable Use Policy](https://docs.github.com/en/free-pro-team@latest/github/site-policy/github-acceptable-use-policies))
14 |
15 |
16 | ## Enforcement
17 |
18 | These rules are to be applied and enforced equally, regardless of the offending or victims: maintainer status, seniority, societal status, or physical attributes.
19 |
20 | Content that violates these standards may be removed at the maintainers discretion. Content that comes close to violating these standards may be warned. Multiple warnings may lead to removal to content warned about. Persons who repeatedly break the rules may be banned or excluded.
21 |
22 | If the maintainers constantly break there own rules, or selectively enforce the rules to a great extend, the maintainers must lower their own standards, and let others do what they can get away with, or revise the rules to a more fair and equal standing.
23 |
24 | ## Scope
25 |
26 | This code of conduct applies to this GitHub repository and the Discord server linked.
27 |
28 | The code of conduct does not apply to private conversations between persons or in real life unless the person represents this project in an official capacity.
29 |
30 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | ## Making a Great Issue
4 |
5 | ### Nice formatting
6 |
7 | - For option names, surround them with a single backtick (\`). \`--example\` will be rendered as `--example`
8 | - For auto-editor output or tracebacks, surround them with triple backticks.
9 |
10 | \`\`\`
11 | auto-editor --version
12 |
13 | Auto-Editor Version: x
14 | \`\`\`
15 |
16 | Will be rendered as:
17 |
18 | ```
19 | auto-editor --version
20 |
21 | Auto-Editor Version: x
22 | ```
23 |
24 | ### General Tips
25 | - See what does and doesn't work.
26 | - If you're confused what `--option` does. Run `auto-editor --option --help`
27 |
28 | ## Contributing Code
29 |
30 | Fork the main repo and then clone your repo. Replacing `USER` with your actual GitHub username.
31 |
32 | ```
33 | git clone https://github.com/USER/auto-editor.git
34 | cd auto-editor
35 | ```
36 |
37 | Then make a new branch and call it something like `patch-1`, `support-for-gif`, or `fix-typo`
38 |
39 | ```
40 | git branch patch-1
41 | git checkout patch-1
42 | ```
43 |
44 | Then open up your favorite text editor and apply your changes.
45 |
46 | ### Code styling
47 |
48 | Auto-Editor uses black to keep the code tidy and consistent. Install black with:
49 | ```
50 | pip install black
51 | ```
52 |
53 | Run black on the root folder. Do this every time you make a change.
54 | ```
55 | black auto_editor
56 | ```
57 |
58 | ### Making a Pull Request
59 | Go to https://github.com/WyattBlue/auto-editor/pulls and hit the "New Pull Request" button.
60 |
61 | GitHub should autofill in the rest since you've made a fork. Make sure you select the branch you've made and not `master`.
62 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | This is free and unencumbered software released into the public domain.
2 |
3 | Anyone is free to copy, modify, publish, use, compile, sell, or
4 | distribute this software, either in source code form or as a compiled
5 | binary, for any purpose, commercial or non-commercial, and by any
6 | means.
7 |
8 | In jurisdictions that recognize copyright laws, the author or authors
9 | of this software dedicate any and all copyright interest in the
10 | software to the public domain. We make this dedication for the benefit
11 | of the public at large and to the detriment of our heirs and
12 | successors. We intend this dedication to be an overt act of
13 | relinquishment in perpetuity of all present and future rights to this
14 | software under copyright law.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 | OTHER DEALINGS IN THE SOFTWARE.
23 |
24 | For more information, please refer to
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |

2 |
3 | **Auto-Editor** is a command line application for automatically **editing video and audio** by analyzing a variety of methods, most notability audio loudness.
4 |
5 | ---
6 |
7 | [](https://github.com/wyattblue/auto-editor/actions)
8 |
9 |
10 |
11 | Before doing the real editing, you first cut out the "dead space" which is typically silence. This is known as a "first pass". Cutting these is a boring task, especially if the video is very long.
12 |
13 | ```
14 | auto-editor path/to/your/video.mp4
15 | ```
16 |
17 | Installing
18 |
19 | ```
20 | pip install auto-editor
21 | ```
22 |
23 | See [Installing](https://github.com/WyattBlue/auto-editor/blob/master/articles/installing.md) for additional information.
24 |
25 |
26 | Cutting
27 |
28 | Change the **pace** of the edited video by using `--frame-margin`.
29 |
30 | `--frame-margin` will including small sections that are next to loud parts. A frame margin of 8 will add up to 8 frames before and 8 frames after the loud part.
31 |
32 | ```
33 | auto-editor example.mp4 --frame-margin 8
34 | ```
35 |
36 | Set how cuts are made
37 |
38 | Use the `--edit` option to change how auto-editor makes automated cuts.
39 |
40 | For example, edit out motionlessness in a video by setting `--edit motion`.
41 |
42 |
43 | ```
44 | # cut out sections where percentage of motion is less than 2.
45 | auto-editor example.mp4 --edit motion:threshold=2%
46 |
47 | # --edit is set to "audio" by default
48 | auto-editor example.mp4 --silent-threshold 4%
49 |
50 | # audio and motion thresholds are toggled independently
51 | auto-editor example.mp4 --edit 'audio:threshold=3% or motion:threshold=6%'
52 | ```
53 |
54 | See what auto-editor cuts out
55 |
56 | To export what auto-editor normally cuts out. Set `--video-speed` to `99999` and `--silent-speed` to `1`. This is the reverse of the usual default values.
57 |
58 | ```
59 | auto-editor example.mp4 --video-speed 99999 --silent-speed 1
60 | ```
61 |
62 | Exporting to Editors
63 |
64 | Create an XML file that can be imported to Adobe Premiere Pro using this command:
65 |
66 | ```
67 | auto-editor example.mp4 --export premiere
68 | ```
69 |
70 | Similar commands exist for:
71 |
72 | - `--export final-cut-pro` for Final Cut Pro.
73 | - `--export shotcut` for ShotCut.
74 |
75 | Other editors, like Sony Vegas, can understand the `premiere` format. If your favorite editor doesn't, you can use ` --export clip-sequence` which creates many video clips that can be imported and manipulated like normal.
76 |
77 | Manual Editing
78 |
79 | Use the `--cut-out` option to always remove a section.
80 |
81 | ```
82 | # Cut out the first 10 seconds.
83 | auto-editor example.mp4 --cut-out start,10sec
84 |
85 | # Cut out the first 10 frames.
86 | auto-editor example.mp4 --cut-out start,10
87 |
88 | # Cut out the last 10 seconds.
89 | auto-editor example.mp4 --cut-out -10sec,end
90 |
91 | # Cut out the first 10 seconds and cut out the range from 15 seconds to 20 seconds.
92 | auto-editor example.mp4 --cut-out start,10sec 15sec,20sec
93 | ```
94 |
95 | And of course, all the audio cuts still apply.
96 |
97 | If you don't want **any automatic cuts**, use `--edit none`
98 |
99 | ```
100 | # Cut out the first 5 seconds, leave the rest untouched.
101 | auto-editor example.mp4 --edit none --cut-out start,5sec
102 | ```
103 |
104 | And the inverse
105 |
106 | ```
107 | # Leave in the first 5 seconds, cut everything else out.
108 | auto-editor example.mp4 --edit all --add-in start,5sec
109 | ```
110 |
111 | More Options
112 |
113 | List all available options:
114 |
115 | ```
116 | auto-editor --help
117 | ```
118 |
119 | Use `--help` with a specific option for more information:
120 |
121 | ```
122 | auto-editor --scale --help
123 | --scale
124 | Scale the output media file by a certain factor.
125 |
126 | type: float_type
127 | default: 1
128 | ```
129 |
130 |
131 | Auto-Editor is available on all platforms
132 | 
133 |
134 |
135 | ## Articles
136 | - [How to Install Auto-Editor](https://github.com/WyattBlue/auto-editor/blob/master/articles/installing.md)
137 | - [All the Options (And What They Do)](https://auto-editor.com/options)
138 | - [Supported Media](https://auto-editor.com/supported_media)
139 | - [What is Range Syntax](https://auto-editor.com/range_syntax)
140 | - [Subcommands](https://auto-editor.com/subcommands)
141 | - [GPU Acceleration](https://auto-editor.com/gpu_acceleration)
142 |
143 |
144 | ## Copyright
145 | Auto-Editor is under the [Public Domain](https://github.com/WyattBlue/auto-editor/blob/master/LICENSE) but contains non-free elements. See [this page](https://github.com/WyattBlue/auto-editor/blob/master/articles/legalinfo.md) for more info.
146 |
147 |
148 | ## Issues
149 | If you have a bug or a code suggestion, you can [create a new issue](https://github.com/WyattBlue/auto-editor/issues/new) here. If you'll like to discuss this project, suggest new features, or chat with other users, you can use [the discord server](https://discord.com/invite/kMHAWJJ).
150 |
--------------------------------------------------------------------------------
/articles/imgs/auto-editor_banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/articles/imgs/auto-editor_banner.png
--------------------------------------------------------------------------------
/articles/imgs/cross_platform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/articles/imgs/cross_platform.png
--------------------------------------------------------------------------------
/articles/installing.md:
--------------------------------------------------------------------------------
1 | # Installing Auto-Editor
2 |
3 | Download and Install [Python 3](https://www.python.org/downloads/).
4 |
5 | > If you are installing on Windows, make sure "Add Python 3.10 to PATH" is checked.
6 |
7 | Once that's done, you should have pip on your PATH. That means when you run `pip3` on your console, you should get a list of commands and not `command not found`. If you don't have pip on your PATH, try reinstalling Python.
8 |
9 | Then run:
10 | ```
11 | pip install --upgrade pip
12 | ```
13 |
14 | to upgrade pip to the latest version. Then use pip to install auto-editor:
15 |
16 | ```
17 | pip install auto-editor
18 | ```
19 |
20 | > Linux users: you will need to have FFmpeg installed and on your PATH.
21 |
22 | Now run this command and it should list all the options you can use.
23 |
24 | ```
25 | auto-editor --help
26 | ```
27 |
28 | If that works then congratulations, you have successfully installed auto-editor. You can use now use this with any other type of video or audio that you have.
29 |
30 | ```
31 | auto-editor C:path\to\your\video.mp4
32 | ```
33 |
34 | About every 1 or 2 weeks, a new version will be available. It's recommended that you stay up to date so you always get the latest improvements and bug fixes. Upgrade by running:
35 |
36 |
37 | ```
38 | pip install auto-editor --upgrade
39 | ```
40 |
41 | Run this to uninstall auto-editor:
42 |
43 | ```
44 | pip uninstall auto-editor
45 | ```
46 |
47 |
48 | ----
49 |
50 | ## Installing from Source
51 |
52 | Use git to download the repository:
53 |
54 | ```terminal
55 | pip install git+https://github.com/WyattBlue/auto-editor.git
56 | ```
57 |
58 | Then run the local version using `py` or `python3`
59 | ```
60 | python3 -m auto_editor example.mp4 --frame-margin 7
61 | ```
62 |
63 | ----
64 |
65 | ## Dependencies
66 |
67 | If auto-editor could not be installed because a dependency couldn't be installed. Run:
68 |
69 | ```
70 | pip install auto-editor --no-deps
71 | ```
72 |
73 | ```
74 | pip install numpy
75 | pip install av
76 | pip install Pillow
77 | pip install yt-dlp
78 | ```
79 |
80 | ### numpy
81 |
82 | Foundational math module needed for handling large data. Must be installed for any use with auto-editor.
83 |
84 | ### av
85 |
86 | Retrieve low level video data in a form auto-editor can natively use. Allows for very fast rendering of videos.
87 |
88 | ### Pillow
89 |
90 | Render video objects like text and ellipses.
91 |
92 | ### yt-dlp
93 |
94 | Module used to download videos off of websites.
95 |
--------------------------------------------------------------------------------
/articles/legalinfo.md:
--------------------------------------------------------------------------------
1 | ## Legal Info
2 |
3 | Auto-Editor is under the Public Domain and includes all files besides the ones listed below. Auto-Editor was created by [these people.](https://github.com/WyattBlue/auto-editor/blob/master/AUTHORS.md)
4 |
5 | ----
6 |
7 | The binaries under auto_editor/ffmpeg are under the [LGPLv3 License](https://github.com/WyattBlue/auto-editor/blob/master/auto_editor/ffmpeg/LICENSE.txt) and was made by the FFmpeg team.
8 |
--------------------------------------------------------------------------------
/auto_editor/CreateEXE-Help-EmbeddedFFMPEG.bat:
--------------------------------------------------------------------------------
1 | pyinstaller --clean --onefile -i icon2.ico -y -n AEGPU.exe --add-data="help.json;auto_editor" --add-binary "ffmpeg.exe;auto_editor/ffmpeg/Windows/" __main__.py
2 |
3 | pause
4 |
--------------------------------------------------------------------------------
/auto_editor/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "22.20.1"
2 | version = "22w20a"
3 |
--------------------------------------------------------------------------------
/auto_editor/analyze/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/auto_editor/analyze/__init__.py
--------------------------------------------------------------------------------
/auto_editor/analyze/audio.py:
--------------------------------------------------------------------------------
1 | from math import ceil
2 |
3 | import numpy as np
4 | import numpy.typing as npt
5 |
6 | from auto_editor.utils.progressbar import ProgressBar
7 |
8 |
9 | def get_max_volume(s: np.ndarray) -> float:
10 | return max(float(np.max(s)), -float(np.min(s)))
11 |
12 |
13 | def audio_detection(
14 | audio_samples: np.ndarray,
15 | sample_rate: int,
16 | fps: float,
17 | progress: ProgressBar,
18 | ) -> npt.NDArray[np.float_]:
19 |
20 | max_volume = get_max_volume(audio_samples)
21 |
22 | if max_volume == 0:
23 | # Prevent dividing by zero
24 | max_volume = 1
25 |
26 | sample_count = audio_samples.shape[0]
27 |
28 | sample_rate_per_frame = sample_rate / fps
29 | audio_frame_count = ceil(sample_count / sample_rate_per_frame)
30 |
31 | progress.start(audio_frame_count, "Analyzing audio volume")
32 |
33 | threshold_list = np.zeros((audio_frame_count), dtype=np.float_)
34 |
35 | # Calculate when the audio is loud or silent.
36 | for i in range(audio_frame_count):
37 |
38 | if i % 500 == 0:
39 | progress.tick(i)
40 |
41 | start = int(i * sample_rate_per_frame)
42 | end = min(int((i + 1) * sample_rate_per_frame), sample_count)
43 |
44 | threshold_list[i] = get_max_volume(audio_samples[start:end]) / max_volume
45 |
46 | progress.end()
47 | return threshold_list
48 |
--------------------------------------------------------------------------------
/auto_editor/analyze/motion.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple
2 |
3 | import av
4 | import numpy as np
5 | from numpy.typing import NDArray
6 | from PIL import ImageOps, ImageChops, ImageFilter
7 |
8 | from auto_editor.utils.progressbar import ProgressBar
9 |
10 |
11 | def new_size(size: Tuple[int, int], width: int) -> Tuple[int, int]:
12 | h, w = size
13 | return width, int(h * (width / w))
14 |
15 |
16 | def motion_detection(
17 | path: str, fps: float, progress: ProgressBar, width: int, blur: int
18 | ) -> NDArray[np.float_]:
19 | container = av.open(path, "r")
20 |
21 | stream = container.streams.video[0]
22 | stream.thread_type = "AUTO"
23 |
24 | inaccurate_dur = int(stream.duration * stream.time_base * stream.rate)
25 |
26 | progress.start(inaccurate_dur, "Analyzing motion")
27 |
28 | prev_image = None
29 | image = None
30 | total_pixels = None
31 | index = 0
32 |
33 | threshold_list = np.zeros((1024), dtype=np.float_)
34 |
35 | for frame in container.decode(stream):
36 | if image is None:
37 | prev_image = None
38 | else:
39 | prev_image = image
40 |
41 | index = int(frame.time * fps)
42 |
43 | progress.tick(index)
44 |
45 | if index > len(threshold_list) - 1:
46 | threshold_list = np.concatenate(
47 | (threshold_list, np.zeros((len(threshold_list)), dtype=np.float_)),
48 | axis=0,
49 | )
50 |
51 | image = frame.to_image()
52 |
53 | if total_pixels is None:
54 | total_pixels = image.size[0] * image.size[1]
55 |
56 | image.thumbnail(new_size(image.size, width))
57 | image = ImageOps.grayscale(image)
58 |
59 | if blur > 0:
60 | image = image.filter(ImageFilter.GaussianBlur(radius=blur))
61 |
62 | if prev_image is not None:
63 | count = np.count_nonzero(ImageChops.difference(prev_image, image))
64 |
65 | threshold_list[index] = count / total_pixels
66 |
67 | progress.end()
68 | return threshold_list[:index]
69 |
--------------------------------------------------------------------------------
/auto_editor/analyze/pixeldiff.py:
--------------------------------------------------------------------------------
1 | import av
2 | import numpy as np
3 | from numpy.typing import NDArray
4 | from PIL import ImageChops
5 |
6 | from auto_editor.utils.progressbar import ProgressBar
7 |
8 |
9 | def pixel_difference(
10 | path: str, fps: float, progress: ProgressBar
11 | ) -> NDArray[np.uint64]:
12 | container = av.open(path, "r")
13 |
14 | stream = container.streams.video[0]
15 | stream.thread_type = "AUTO"
16 |
17 | inaccurate_dur = int(stream.duration * stream.time_base * stream.rate)
18 |
19 | progress.start(inaccurate_dur, "Analyzing pixel diffs")
20 |
21 | prev_image = None
22 | image = None
23 | index = 0
24 |
25 | threshold_list = np.zeros((1024), dtype=np.uint64)
26 |
27 | for frame in container.decode(stream):
28 | if image is None:
29 | prev_image = None
30 | else:
31 | prev_image = image
32 |
33 | index = int(frame.time * fps)
34 | progress.tick(index)
35 |
36 | if index > len(threshold_list) - 1:
37 | threshold_list = np.concatenate(
38 | (threshold_list, np.zeros((len(threshold_list)), dtype=np.uint64)),
39 | axis=0,
40 | )
41 |
42 | image = frame.to_image()
43 |
44 | if prev_image is not None:
45 | threshold_list[index] = np.count_nonzero(
46 | ImageChops.difference(prev_image, image)
47 | )
48 |
49 | progress.end()
50 | return threshold_list[:index]
51 |
--------------------------------------------------------------------------------
/auto_editor/edit.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import List, Optional
3 |
4 | from auto_editor.ffwrapper import FFmpeg, FileInfo
5 | from auto_editor.timeline import Timeline, make_timeline
6 | from auto_editor.utils.container import get_rules
7 | from auto_editor.utils.log import Log
8 | from auto_editor.utils.progressbar import ProgressBar
9 |
10 |
11 | def set_output_name(path: str, inp_ext: str, export: str) -> str:
12 | root, ext = os.path.splitext(path)
13 |
14 | if export == "json":
15 | return f"{root}.json"
16 | if export == "final-cut-pro":
17 | return f"{root}.fcpxml"
18 | if export == "shotcut":
19 | return f"{root}.mlt"
20 | if export == "premiere":
21 | return f"{root}.xml"
22 | if export == "audio":
23 | return f"{root}_ALTERED.wav"
24 | if ext == "":
25 | return root + inp_ext
26 |
27 | return f"{root}_ALTERED{ext}"
28 |
29 |
30 | def edit_media(
31 | inputs: List[FileInfo], ffmpeg: FFmpeg, args, temp: str, log: Log
32 | ) -> Optional[str]:
33 |
34 | progress = ProgressBar(args.progress)
35 |
36 | timeline = None
37 | if inputs[0].ext == ".json":
38 | from auto_editor.formats.json import read_json
39 |
40 | timeline = read_json(inputs[0].path, ffmpeg, log)
41 | inputs = timeline.inputs
42 |
43 | inp = inputs[0]
44 |
45 | if args.output_file is None:
46 | output = set_output_name(inp.path, inp.ext, args.export)
47 | else:
48 | output = args.output_file
49 | if os.path.splitext(output)[1] == "":
50 | output = set_output_name(output, inp.ext, args.export)
51 |
52 | output_container = os.path.splitext(output)[1].replace(".", "")
53 |
54 | # Check if export options make sense.
55 | rules = get_rules(output_container)
56 | codec_error = "'{}' codec is not supported in '{}' container."
57 |
58 | if not (args.sample_rate is None or args.sample_rate == "unset"):
59 | if rules.samplerate is not None and args.sample_rate not in rules.samplerate:
60 | log.error(
61 | f"'{output_container}' container only supports samplerates: {rules.samplerate}"
62 | )
63 |
64 | vcodec = args.video_codec
65 | if vcodec == "uncompressed":
66 | vcodec = "mpeg4"
67 | if vcodec == "copy":
68 | vcodec = inp.videos[0].codec
69 |
70 | if vcodec != "auto":
71 | if rules.vstrict:
72 | assert rules.vcodecs is not None
73 | if vcodec not in rules.vcodecs:
74 | log.error(codec_error.format(vcodec, output_container))
75 |
76 | if vcodec in rules.disallow_v:
77 | log.error(codec_error.format(vcodec, output_container))
78 |
79 | acodec = args.audio_codec
80 | if acodec == "copy":
81 | acodec = inp.audios[0].codec
82 | log.debug(f"Settings acodec to {acodec}")
83 |
84 | if acodec not in ("unset", "auto"):
85 | if rules.astrict:
86 | assert rules.acodecs is not None
87 | if acodec not in rules.acodecs:
88 | log.error(codec_error.format(acodec, output_container))
89 |
90 | if acodec in rules.disallow_a:
91 | log.error(codec_error.format(acodec, output_container))
92 |
93 | if args.keep_tracks_seperate and rules.max_audios == 1:
94 | log.warning(
95 | f"'{output_container}' container doesn't support multiple audio tracks."
96 | )
97 |
98 | if not args.preview and not args.timeline:
99 | if os.path.isdir(output):
100 | log.error("Output path already has an existing directory!")
101 |
102 | if os.path.isfile(output) and inputs[0].path != output:
103 | log.debug(f"Removing already existing file: {output}")
104 | os.remove(output)
105 |
106 | # Extract subtitles in their native format.
107 | # TODO
108 | # if len(inp.subtitles) > 0:
109 | # cmd = ["-i", inp.path, "-hide_banner"]
110 | # for s, sub in enumerate(inp.subtitles):
111 | # cmd.extend(["-map", f"0:s:{s}"])
112 | # for s, sub in enumerate(inp.subtitles):
113 | # cmd.extend([os.path.join(temp, f"{s}s.{sub.ext}")])
114 | # ffmpeg.run(cmd)
115 |
116 | log.conwrite("Extracting audio")
117 |
118 | cmd = []
119 | for i, inp in enumerate(inputs):
120 | cmd.extend(["-i", inp.path])
121 | cmd.append("-hide_banner")
122 |
123 | if args.sample_rate is None:
124 | samplerate = inputs[0].get_samplerate()
125 | else:
126 | samplerate = args.sample_rate
127 |
128 | for i, inp in enumerate(inputs):
129 | for s in range(len(inp.audios)):
130 | cmd.extend(
131 | [
132 | "-map",
133 | f"{i}:a:{s}",
134 | "-ac",
135 | "2",
136 | "-ar",
137 | f"{samplerate}",
138 | "-rf64",
139 | "always",
140 | os.path.join(temp, f"{i}-{s}.wav"),
141 | ]
142 | )
143 |
144 | ffmpeg.run(cmd)
145 |
146 | if timeline is None:
147 | timeline = make_timeline(inputs, args, samplerate, progress, temp, log)
148 |
149 | if args.timeline:
150 | from auto_editor.formats.json import make_json_timeline
151 |
152 | make_json_timeline(args.api, 0, timeline, log)
153 | return None
154 |
155 | if args.preview:
156 | from auto_editor.preview import preview
157 |
158 | preview(timeline, log)
159 | return None
160 |
161 | if args.export == "json":
162 | from auto_editor.formats.json import make_json_timeline
163 |
164 | make_json_timeline(args.api, output, timeline, log)
165 | return output
166 |
167 | if args.export == "premiere":
168 | from auto_editor.formats.premiere import premiere_xml
169 |
170 | premiere_xml(temp, output, timeline)
171 | return output
172 |
173 | if args.export == "final-cut-pro":
174 | from auto_editor.formats.final_cut_pro import fcp_xml
175 |
176 | fcp_xml(output, timeline)
177 | return output
178 |
179 | if args.export == "shotcut":
180 | from auto_editor.formats.shotcut import shotcut_xml
181 |
182 | shotcut_xml(output, timeline)
183 | return output
184 |
185 | def make_media(inp: FileInfo, timeline: Timeline, output: str) -> None:
186 | from auto_editor.output import mux_quality_media
187 | from auto_editor.render.video import render_av
188 |
189 | if rules.allow_subtitle:
190 | from auto_editor.render.subtitle import cut_subtitles
191 |
192 | cut_subtitles(ffmpeg, timeline, temp, log)
193 |
194 | if rules.allow_audio:
195 | from auto_editor.render.audio import make_new_audio
196 |
197 | for t in range(len(inp.audios)):
198 | make_new_audio(t, temp, timeline, progress, log)
199 |
200 | video_output = []
201 |
202 | if rules.allow_video:
203 | for v, vid in enumerate(inp.videos):
204 | if vid.codec not in ("png", "mjpeg", "webp"):
205 | out_path, apply_later = render_av(
206 | ffmpeg, v, timeline, args, progress, rules, temp, log
207 | )
208 | video_output.append((v, True, out_path, apply_later))
209 | elif rules.allow_image:
210 | out_path = os.path.join(temp, f"{v}.{vid.codec}")
211 | # fmt: off
212 | ffmpeg.run(["-i", inp.path, "-map", "0:v", "-map", "-0:V",
213 | "-c", "copy", out_path])
214 | # fmt: on
215 | video_output.append((v, False, out_path, False))
216 |
217 | log.conwrite("Writing output file")
218 |
219 | mux_quality_media(
220 | ffmpeg, video_output, rules, output, output_container, args, inp, temp, log
221 | )
222 |
223 | # if args.export == "clip-sequence":
224 | # total_frames = chunks[-1][1]
225 | # clip_num = 0
226 | # for chunk in chunks:
227 | # if chunk[2] == 99999:
228 | # continue
229 | # make_media(
230 | # inp,
231 | # pad_chunk(chunk, total_frames),
232 | # append_filename(output, f"-{clip_num}"),
233 | # )
234 | # clip_num += 1
235 | # else:
236 | make_media(inp, timeline, output)
237 | return output
238 |
--------------------------------------------------------------------------------
/auto_editor/formats/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/auto_editor/formats/__init__.py
--------------------------------------------------------------------------------
/auto_editor/formats/final_cut_pro.py:
--------------------------------------------------------------------------------
1 | """
2 | Export a FCPXML 9 file readable with Final Cut Pro 10.4.9 or later.
3 |
4 | See docs here:
5 | https://developer.apple.com/documentation/professional_video_applications/fcpxml_reference
6 |
7 | """
8 |
9 | from platform import system
10 | from pathlib import Path, PureWindowsPath
11 |
12 | from typing import List, Tuple, Union
13 |
14 | from .utils import indent
15 | from auto_editor.timeline import Timeline
16 |
17 |
18 | def fraction(_a: Union[int, float], _fps: float) -> str:
19 | from fractions import Fraction
20 |
21 | if _a == 0:
22 | return "0s"
23 |
24 | if isinstance(_a, float):
25 | a = Fraction(_a)
26 | else:
27 | a = _a
28 |
29 | fps = Fraction(_fps)
30 |
31 | frac = Fraction(a, fps).limit_denominator()
32 | num = frac.numerator
33 | dem = frac.denominator
34 |
35 | if dem < 3000:
36 | factor = int(3000 / dem)
37 |
38 | if factor == 3000 / dem:
39 | num *= factor
40 | dem *= factor
41 | else:
42 | # Good enough but has some error that are impacted at speeds such as 150%.
43 | total = Fraction(0)
44 | while total < frac:
45 | total += Fraction(1, 30)
46 | num = total.numerator
47 | dem = total.denominator
48 |
49 | return f"{num}/{dem}s"
50 |
51 |
52 | def fcp_xml(output: str, timeline: Timeline) -> None:
53 | inp = timeline.inp
54 | fps = timeline.fps
55 | chunks = timeline.chunks
56 |
57 | if chunks is None:
58 | raise ValueError("Timeline too complex")
59 |
60 | total_dur = chunks[-1][1]
61 |
62 | if system() == "Windows":
63 | pathurl = "file://localhost/" + PureWindowsPath(inp.abspath).as_posix()
64 | else:
65 | pathurl = Path(inp.abspath).as_uri()
66 |
67 | width, height = timeline.res
68 | frame_duration = fraction(1, fps)
69 |
70 | audio_file = len(inp.videos) == 0 and len(inp.audios) > 0
71 | group_name = "Auto-Editor {} Group".format("Audio" if audio_file else "Video")
72 | name = inp.basename
73 |
74 | with open(output, "w", encoding="utf-8") as outfile:
75 | outfile.write('\n')
76 | outfile.write("\n\n")
77 | outfile.write('\n')
78 | outfile.write("\t\n")
79 | outfile.write(
80 | f'\t\t\n'
84 | )
85 | outfile.write(
86 | f'\t\t\n'
89 | )
90 | outfile.write(
91 | f'\t\t\t\n'
92 | )
93 | outfile.write("\t\t\n")
94 | outfile.write("\t\n")
95 | outfile.write("\t\n")
96 | outfile.write(f'\t\t\n')
97 | outfile.write(f'\t\t\t\n')
98 | outfile.write(
99 | indent(
100 | 4,
101 | '',
102 | "\t",
103 | )
104 | )
105 |
106 | last_dur = 0.0
107 | for clip in chunks:
108 | if clip[2] == 99999:
109 | continue
110 |
111 | clip_dur = (clip[1] - clip[0] + 1) / clip[2]
112 | dur = fraction(clip_dur, fps)
113 |
114 | close = "/" if clip[2] == 1 else ""
115 |
116 | if last_dur == 0:
117 | outfile.write(
118 | indent(
119 | 6,
120 | f'',
121 | )
122 | )
123 | else:
124 | start = fraction(clip[0] / clip[2], fps)
125 | off = fraction(last_dur, fps)
126 | outfile.write(
127 | indent(
128 | 6,
129 | f'',
132 | )
133 | )
134 |
135 | if clip[2] != 1:
136 | # See the "Time Maps" section.
137 | # https://developer.apple.com/library/archive/documentation/FinalCutProX/Reference/FinalCutProXXMLFormat/StoryElements/StoryElements.html
138 |
139 | frac_total = fraction(total_dur, fps)
140 | speed_dur = fraction(total_dur / clip[2], fps)
141 |
142 | outfile.write(
143 | indent(
144 | 6,
145 | "\t",
146 | '\t\t',
147 | f'\t\t',
148 | "\t",
149 | "",
150 | )
151 | )
152 |
153 | last_dur += clip_dur
154 |
155 | outfile.write("\t\t\t\t\t\n")
156 | outfile.write("\t\t\t\t\n")
157 | outfile.write("\t\t\t\n")
158 | outfile.write("\t\t\n")
159 | outfile.write("\t\n")
160 | outfile.write("\n")
161 |
--------------------------------------------------------------------------------
/auto_editor/formats/json.py:
--------------------------------------------------------------------------------
1 | """
2 | Make a pre-edited file reference that can be inputted back into auto-editor.
3 | """
4 |
5 | import os
6 | import sys
7 | import json
8 |
9 | from typing import List, Tuple, Union, Any
10 |
11 | from auto_editor.utils.log import Log
12 | from auto_editor.timeline import Timeline, make_av, clipify
13 | from auto_editor.ffwrapper import FFmpeg, FileInfo
14 |
15 |
16 | def check_attrs(data: object, log: Log, *attrs: str) -> None:
17 | for attr in attrs:
18 | if attr not in data:
19 | log.error(f"'{attr}' attribute not found!")
20 |
21 |
22 | def check_file(path: str, log: Log):
23 | if not os.path.isfile(path):
24 | log.error(f"Could not locate media file: '{path}'")
25 |
26 |
27 | def validate_chunks(chunks: object, log: Log) -> List[Tuple[int, int, float]]:
28 | if not isinstance(chunks, (list, tuple)):
29 | log.error("Chunks must be a list")
30 |
31 | if len(chunks) == 0:
32 | log.error("Chunks are empty!")
33 |
34 | new_chunks = []
35 |
36 | prev_end = None
37 |
38 | for i, chunk in enumerate(chunks):
39 |
40 | if len(chunk) != 3:
41 | log.error("Chunk must have a length of 3.")
42 |
43 | if i == 0 and chunk[0] != 0:
44 | log.error("First chunk must start with 0")
45 |
46 | if chunk[1] - chunk[0] < 1:
47 | log.error("Chunk duration must be at least 1")
48 |
49 | if chunk[2] <= 0 or chunk[2] > 99999:
50 | log.error("Chunk speed range must be >0 and <=99999")
51 |
52 | if prev_end is not None and chunk[0] != prev_end:
53 | log.error(f"Chunk disjointed at {chunk}")
54 |
55 | prev_end = chunk[1]
56 |
57 | new_chunks.append((chunk[0], chunk[1], float(chunk[2])))
58 |
59 | return new_chunks
60 |
61 |
62 | class Version:
63 | __slots__ = ("major", "minor", "micro")
64 |
65 | def __init__(self, val: str, log: Log) -> None:
66 | ver_str = val.split(".")
67 | if len(ver_str) > 3:
68 | log.error("Version string: Too many separators!")
69 | while len(ver_str) < 3:
70 | ver_str.append("0")
71 |
72 | try:
73 | self.major, self.minor, self.micro = map(int, ver_str)
74 | except ValueError:
75 | log.error("Version string: Could not convert to int.")
76 |
77 | def __eq__(self, other: object) -> bool:
78 | if isinstance(other, tuple) and len(other) == 2:
79 | return (self.major, self.minor) == other
80 | return (self.major, self.minor, self.micro) == other
81 |
82 | def __str__(self) -> str:
83 | return f"{self.major}.{self.minor}.{self.micro}"
84 |
85 |
86 | def read_json(path: str, ffmpeg: FFmpeg, log: Log) -> Timeline:
87 | with open(path, "r") as f:
88 | data = json.load(f)
89 |
90 | check_attrs(data, log, "version")
91 | version = Version(data["version"], log)
92 |
93 | if version == (1, 0) or version == (0, 1):
94 | check_attrs(data, log, "source", "chunks")
95 | check_file(data["source"], log)
96 |
97 | chunks = validate_chunks(data["chunks"], log)
98 | inp = FileInfo(data["source"], ffmpeg, log)
99 |
100 | vclips, aclips = make_av(clipify(chunks, 0), inp)
101 |
102 | fps = inp.get_fps()
103 | sr = inp.get_samplerate()
104 | res = inp.get_res()
105 |
106 | return Timeline([inp], fps, sr, res, "#000", vclips, aclips, chunks)
107 |
108 | if version == (2, 0) or version == (0, 2):
109 | check_attrs(data, log, "timeline")
110 | # check_file(data["source"], log)
111 | # return data["background"], data["source"], chunks
112 |
113 | raise ValueError("Incomplete")
114 |
115 | log.error(f"Unsupported version: {version}")
116 |
117 |
118 | def make_json_timeline(
119 | _version: str,
120 | out: Union[str, int],
121 | timeline: Timeline,
122 | log: Log,
123 | ) -> None:
124 |
125 | version = Version(_version, log)
126 |
127 | if version == (1, 0) or version == (0, 1):
128 | if timeline.chunks is None:
129 | log.error("Timeline too complex to convert to version 1.0")
130 |
131 | data: Any = {
132 | "version": "1.0.0",
133 | "source": os.path.abspath(timeline.inp.path),
134 | "chunks": timeline.chunks,
135 | }
136 | elif version == (2, 0) or version == (0, 2):
137 | sources = [os.path.abspath(inp.path) for inp in timeline.inputs]
138 | data = {
139 | "version": "2.0.0",
140 | "sources": sources,
141 | "timeline": {
142 | "background": timeline.background,
143 | "resolution": timeline.res,
144 | "fps": timeline.fps,
145 | "samplerate": timeline.samplerate,
146 | "video": timeline.vclips,
147 | "audio": timeline.aclips,
148 | },
149 | }
150 | else:
151 | log.error(f"Version {version} is not supported!")
152 |
153 | if isinstance(out, str):
154 | if not out.endswith(".json"):
155 | log.error("Output extension must be .json")
156 |
157 | with open(out, "w") as outfile:
158 | json.dump(data, outfile, indent=2, default=lambda o: o.__dict__)
159 | else:
160 | json.dump(data, sys.stdout, indent=2, default=lambda o: o.__dict__)
161 | print("") # Flush stdout
162 |
--------------------------------------------------------------------------------
/auto_editor/formats/shotcut.py:
--------------------------------------------------------------------------------
1 | from auto_editor.utils.func import aspect_ratio, to_timecode
2 | from auto_editor.timeline import Timeline
3 |
4 |
5 | def timecode_to_frames(timecode: str, fps: float) -> int:
6 | _h, _m, _s = timecode.split(":")
7 | h = int(_h)
8 | m = int(_m)
9 | s = float(_s)
10 | return round((h * 3600 + m * 60 + s) * fps)
11 |
12 |
13 | def shotcut_xml(
14 | output: str,
15 | timeline: Timeline,
16 | ) -> None:
17 | width, height = timeline.res
18 | num, den = aspect_ratio(width, height)
19 |
20 | chunks = timeline.chunks
21 | if chunks is None:
22 | raise ValueError("Timeline too complex")
23 | fps = timeline.fps
24 | inp = timeline.inp
25 | global_out = inp.duration
26 |
27 | version = "21.05.18"
28 |
29 | with open(output, "w", encoding="utf-8") as out:
30 | out.write('\n')
31 | out.write(
32 | '\n'
34 | )
35 | out.write(
36 | '\t\n'
41 | )
42 | out.write('\t\n')
43 | out.write('\t\t1\n')
44 | out.write("\t\n")
45 |
46 | # out was the new video length in the original xml
47 | out.write(f'\t\n')
48 | out.write(f'\t\t{global_out}\n')
49 | out.write('\t\tpause\n')
50 | out.write('\t\t0\n')
51 | out.write('\t\t1\n')
52 | out.write('\t\tcolor\n')
53 | out.write('\t\trgba\n')
54 | out.write('\t\t0\n')
55 | out.write("\t\n")
56 |
57 | out.write('\t\n') # same for this out too.
58 | out.write(
59 | f'\t\t\n'
60 | )
61 | out.write("\t\n")
62 |
63 | chains = 0
64 | producers = 0
65 |
66 | # Speeds like [1.5, 3] don't work because of duration issues, too bad!
67 |
68 | for clip in chunks:
69 | if clip[2] == 99999:
70 | continue
71 |
72 | speed = clip[2]
73 |
74 | _out = to_timecode(clip[1] / speed / fps, "standard")
75 | length = to_timecode((clip[1] / speed + 1) / fps, "standard")
76 |
77 | if speed == 1:
78 | resource = inp.path
79 | caption = inp.basename
80 | out.write(f'\t\n')
81 | else:
82 | resource = f"{speed}:{inp.path}"
83 | caption = f"{inp.basename} ({speed}x)"
84 | out.write(
85 | f'\t\n'
86 | )
87 | producers += 1
88 | chains += 1
89 |
90 | out.write(f'\t\t{length}\n')
91 | out.write('\t\tpause\n')
92 | out.write(f'\t\t{resource}\n')
93 |
94 | if speed == 1:
95 | out.write(
96 | '\t\tavformat-novalidate\n'
97 | )
98 | out.write('\t\t1\n')
99 | out.write('\t\t1\n')
100 | out.write('\t\t0\n')
101 | out.write('\t\t0\n')
102 | out.write(
103 | f'\t\t{caption}\n'
104 | )
105 | out.write('\t\twas here\n')
106 | else:
107 | out.write('\t\t1\n')
108 | out.write('\t\t1\n')
109 | out.write('\t\t1\n')
110 | out.write('\t\t0\n')
111 | out.write('\t\t1\n')
112 | out.write(f'\t\t{speed}\n')
113 | out.write(f'\t\t{inp.path}\n')
114 | out.write('\t\ttimewarp\n')
115 | out.write('\t\tavformat\n')
116 | out.write('\t\t0\n')
117 | out.write(
118 | f'\t\t{caption}\n'
119 | )
120 | out.write('\t\twas here\n')
121 | out.write('\t\t1\n')
122 |
123 | out.write("\t\n" if speed == 1 else "\t\n")
124 |
125 | out.write('\t\n')
126 | out.write('\t\t1\n')
127 | out.write('\t\tV1\n')
128 |
129 | producers = 0
130 | i = 0
131 | for clip in chunks:
132 | if clip[2] == 99999:
133 | continue
134 |
135 | speed = clip[2]
136 |
137 | if speed == 1:
138 | in_len: float = clip[0] - 1
139 | else:
140 | in_len = max(clip[0] / speed, 0)
141 |
142 | out_len = max((clip[1] - 2) / speed, 0)
143 |
144 | _in = to_timecode(in_len / fps, "standard")
145 | _out = to_timecode(out_len / fps, "standard")
146 |
147 | tag_name = f"chain{i}"
148 | if speed != 1:
149 | tag_name = f"producer{producers}"
150 | producers += 1
151 |
152 | out.write(f'\t\t\n')
153 | i += 1
154 |
155 | out.write("\t\n")
156 |
157 | out.write(
158 | f'\t\n'
160 | )
161 | out.write('\t\t1\n')
162 | out.write('\t\t2\n')
163 | out.write('\t\t0\n')
164 | out.write('\t\t\n')
165 | out.write('\t\t\n')
166 | out.write('\t\t\n')
167 | out.write('\t\t\t0\n')
168 | out.write('\t\t\t1\n')
169 | out.write('\t\t\tmix\n')
170 | out.write('\t\t\t1\n')
171 | out.write('\t\t\t1\n')
172 | out.write("\t\t\n")
173 | out.write('\t\t\n')
174 | out.write('\t\t\t0\n')
175 | out.write('\t\t\t1\n')
176 | out.write('\t\t\t0.9\n')
177 | out.write('\t\t\tfrei0r.cairoblend\n')
178 | out.write('\t\t\t0\n')
179 | out.write('\t\t\t1\n')
180 | out.write("\t\t\n")
181 |
182 | out.write("\t\n")
183 | out.write("\n")
184 |
--------------------------------------------------------------------------------
/auto_editor/formats/utils.py:
--------------------------------------------------------------------------------
1 | def safe_mkdir(path: str) -> str:
2 | from shutil import rmtree
3 | from os import mkdir
4 |
5 | try:
6 | mkdir(path)
7 | except OSError:
8 | rmtree(path)
9 | mkdir(path)
10 | return path
11 |
12 |
13 | def indent(base: int, *lines: str) -> str:
14 | new_lines = ""
15 | for line in lines:
16 | new_lines += ("\t" * base) + line + "\n"
17 | return new_lines
18 |
--------------------------------------------------------------------------------
/auto_editor/help.json:
--------------------------------------------------------------------------------
1 | {
2 | "Auto-Editor": {
3 | "_": "Auto-Editor is an automatic video/audio creator and editor. By default, it will detect silence and create a new video with those sections cut out. By changing some of the options, you can export to a traditional editor like Premiere Pro and adjust the edits there, adjust the pacing of the cuts, and change the method of editing like using audio loudness and video motion to judge making cuts.\n\nRun:\n auto-editor --help\n\nTo get the list of options.\n",
4 | "--add-ellipse": "The x and y coordinates specify a bounding box where the ellipse is drawn.",
5 | "--add-image": "Opacity is how transparent or solid the image is. A transparency of 1 or 100% is completely solid. A transparency of 0 or 0% is completely transparent.\nThe anchor point tells how the image is placed relative to its x y coordinates.",
6 | "--set-speed-for-range": "This option takes 3 arguments delimited with commas and they are as follows:\n Speed\n - How fast the media plays. Speeds 0 or below and 99999 or above will be cut completely.\n Start\n - When the speed first gets applied. The default unit is in frames, but second units can also be used.\n End\n - When the speed stops being applied. It can use both frame and second units.",
7 | "--edit-based-on": "Editing Methods:\n - audio: General audio detection\n - motion: Motion detection specialized for real life noisy video\n - pixeldiff: Detect when a certain amount of pixels have changed between frames\n - random: Set silent/loud randomly based on a random or preset seed\n - none: Do not modify the media in anyway (Mark all sections as \"loud\")\n - all: Cut out everything out (Mark all sections as \"silent\")\n\nDefault Attributes:\n - audio\n - stream: 0 (int | \"all\")\n - threshold: args.silent_threshold (float_type)\n - motion\n - threshold: 2% (float_type)\n - blur: 9 (int)\n - width: 400 (int)\n - pixeldiff\n - threshold: 1 (int)\n - random\n - cutchance: 0.5 (float_type)\n - seed: RANDOMLY-GENERATED (int)\n\nLogical Operators:\n - and\n - or\n - xor\n\nExamples:\n --edit audio\n --edit audio:stream=1\n --edit audio:threshold=4%\n --edit audio:threshold=0.03\n --edit motion\n --edit motion:threshold=2%,blur=3\n --edit audio:threshold=4% or motion:threshold=2%,blur=3\n --edit none\n --edit all",
8 | "--export": "Instead of exporting a video, export as one of these options instead.\n\ndefault : Export as usual\npremiere : Export as an XML timeline file for Adobe Premiere Pro\nfinal-cut-pro : Export as an XML timeline file for Final Cut Pro\nshotcut : Export as an XML timeline file for Shotcut\njson : Export as an auto-editor JSON timeline file\naudio : Export as a WAV audio file\nclip-sequence : Export as multiple numbered media files",
9 | "--temp-dir": "If not set, tempdir will be set with Python's tempfile module\nThe directory doesn't have to exist beforehand, however, the root path must be valid.\nThe temp file can get quite big if you're generating a huge video, so make sure your location has enough space.",
10 | "--ffmpeg-location": "This takes precedence over `--my-ffmpeg`.",
11 | "--my-ffmpeg": "This is equivalent to `--ffmpeg-location ffmpeg`.",
12 | "--silent-threshold": "Silent threshold is a percentage where 0% represents absolute silence and 100% represents the highest volume in the media file.\nSetting the threshold to `0%` will cut only out areas where area is absolutely silence.",
13 | "--frame-margin": "Margin is measured in frames, however, seconds can be used. e.g. `0.3secs`\nThe starting and ending margins can be set separately with the use of a comma. e.g. `2sec,3sec` `7,10` `-1,6`\nRange: -Infinity to Infinity",
14 | "--silent-speed": "Values <= 0 or >= 99999 are the 'cut speed'",
15 | "--video-speed": "Values <= 0 or >= 99999 are the 'cut speed'",
16 | "--min-clip-length": "Range: 0 to Infinity",
17 | "--min-cut-length": "Range: 0 to Infinity"
18 | },
19 | "info": {
20 | "_": "Retrieve information and properties about media files",
21 | "--include-vfr": "A typical output looks like this:\n - VFR:0.583394 (3204/2288) min: 41 max: 42 avg: 41\n\nThe first number is the ratio of how many VFR frames are there in total.\nThe second number is the total number of VFR frames and the third is the total number of CFR frames. Adding the second and third number will result in how many frames the video has in total."
22 | },
23 | "levels": {
24 | "_": "Display loudness over time"
25 | },
26 | "subdump": {
27 | "_": "Dump text-based subtitles to stdout with formatting stripped out"
28 | },
29 | "grep": {
30 | "_": "Read and match text-based subtitle tracks"
31 | },
32 | "desc": {
33 | "_": "Display a media's description metadata"
34 | },
35 | "test": {
36 | "_": "Self-Hosted Unit and End-to-End tests"
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/auto_editor/icon2.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/auto_editor/icon2.ico
--------------------------------------------------------------------------------
/auto_editor/method.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | from math import ceil
4 | from dataclasses import asdict, dataclass, fields
5 | from typing import Any, Callable, Dict, List, Tuple, Type, TypeVar, Union
6 |
7 | import numpy as np
8 | from numpy.typing import NDArray
9 |
10 | from auto_editor.ffwrapper import FileInfo
11 | from auto_editor.utils.func import (
12 | cook,
13 | set_range,
14 | apply_margin,
15 | apply_mark_as,
16 | to_speed_list,
17 | parse_dataclass,
18 | seconds_to_frames,
19 | )
20 | from auto_editor.utils.log import Log
21 | from auto_editor.utils.progressbar import ProgressBar
22 | from auto_editor.utils.types import StreamType, float_type, stream_type
23 | from auto_editor.wavfile import read
24 |
25 | T = TypeVar("T")
26 |
27 |
28 | def get_attributes(attrs_str: str, dataclass: T, log: Log) -> T:
29 | attrs: T = parse_dataclass(attrs_str, dataclass, log)
30 |
31 | dic_value = asdict(attrs)
32 | dic_type: Dict[str, Union[type, Callable[[Any], Any]]] = {}
33 | for field in fields(attrs):
34 | dic_type[field.name] = field.type
35 |
36 | # Convert to the correct types
37 | for k, _type in dic_type.items():
38 |
39 | if _type == float:
40 | _type = float_type
41 | elif _type == StreamType:
42 | _type = stream_type
43 |
44 | try:
45 | attrs.__setattr__(k, _type(dic_value[k]))
46 | except (ValueError, TypeError) as e:
47 | log.error(str(e))
48 |
49 | return attrs
50 |
51 |
52 | def get_media_duration(path: str, fps: float, temp: str, log: Log) -> int:
53 |
54 | audio_path = os.path.join(temp, "0-0.wav")
55 |
56 | if os.path.isfile(audio_path):
57 | sample_rate, audio_samples = read(audio_path)
58 | sample_rate_per_frame = sample_rate / fps
59 | return ceil(audio_samples.shape[0] / sample_rate_per_frame)
60 |
61 | import av
62 |
63 | cn = av.open(path, "r")
64 |
65 | if len(cn.streams.video) < 1:
66 | log.error("Could not get media duration")
67 |
68 | video = cn.streams.video[0]
69 | return int(float(video.duration * video.time_base) * fps)
70 |
71 |
72 | def get_audio_list(
73 | i: int,
74 | stream: int,
75 | threshold: float,
76 | fps: float,
77 | progress: ProgressBar,
78 | temp: str,
79 | log: Log,
80 | ) -> NDArray[np.bool_]:
81 |
82 | from auto_editor.analyze.audio import audio_detection
83 |
84 | path = os.path.join(temp, f"{i}-{stream}.wav")
85 |
86 | if os.path.isfile(path):
87 | sample_rate, audio_samples = read(path)
88 | else:
89 | log.error(f"Audio stream '{stream}' does not exist.")
90 |
91 | audio_list = audio_detection(audio_samples, sample_rate, fps, progress)
92 |
93 | return np.fromiter((x > threshold for x in audio_list), dtype=np.bool_)
94 |
95 |
96 | def operand_combine(
97 | a: NDArray[np.bool_], b: NDArray[np.bool_], call: Callable
98 | ) -> NDArray[np.bool_]:
99 | if len(a) > len(b):
100 | b = np.resize(b, len(a))
101 | if len(b) > len(a):
102 | a = np.resize(a, len(b))
103 |
104 | return call(a, b)
105 |
106 |
107 | def get_stream_data(
108 | method: str,
109 | attrs,
110 | args,
111 | i: int,
112 | inp: FileInfo,
113 | fps: float,
114 | progress: ProgressBar,
115 | temp: str,
116 | log: Log,
117 | ) -> NDArray[np.bool_]:
118 |
119 | if method == "none":
120 | return np.ones((get_media_duration(inp.path, fps, temp, log)), dtype=np.bool_)
121 | if method == "all":
122 | return np.zeros((get_media_duration(inp.path, fps, temp, log)), dtype=np.bool_)
123 | if method == "random":
124 | if attrs.cutchance > 1 or attrs.cutchance < 0:
125 | log.error(f"random:cutchance must be between 0 and 1")
126 | l = get_media_duration(inp.path, fps, temp, log)
127 |
128 | random.seed(attrs.seed)
129 | log.debug(f"Seed: {attrs.seed}")
130 |
131 | a = random.choices((0, 1), weights=(attrs.cutchance, 1 - attrs.cutchance), k=l)
132 |
133 | return np.asarray(a, dtype=np.bool_)
134 | if method == "audio":
135 | if attrs.stream == "all":
136 | total_list = None
137 | for s in range(len(inp.audios)):
138 | audio_list = get_audio_list(
139 | i, s, attrs.threshold, fps, progress, temp, log
140 | )
141 | if total_list is None:
142 | total_list = audio_list
143 | else:
144 | total_list = operand_combine(total_list, audio_list, np.logical_or)
145 |
146 | if total_list is None:
147 | log.error("Input has no audio streams.")
148 | return total_list
149 | else:
150 | return get_audio_list(
151 | i, attrs.stream, attrs.threshold, fps, progress, temp, log
152 | )
153 | if method == "motion":
154 | from auto_editor.analyze.motion import motion_detection
155 |
156 | if len(inp.videos) == 0:
157 | log.error("Video stream '0' does not exist.")
158 |
159 | motion_list = motion_detection(inp.path, fps, progress, attrs.width, attrs.blur)
160 | return np.fromiter((x >= attrs.threshold for x in motion_list), dtype=np.bool_)
161 |
162 | # "pixeldiff"
163 | if method == "pixeldiff":
164 | from auto_editor.analyze.pixeldiff import pixel_difference
165 |
166 | if len(inp.videos) == 0:
167 | log.error("Video stream '0' does not exist.")
168 |
169 | pixel_list = pixel_difference(inp.path, fps, progress)
170 | return np.fromiter((x >= attrs.threshold for x in pixel_list), dtype=np.bool_)
171 |
172 | raise ValueError(f"Unreachable. {method=}")
173 |
174 |
175 | def get_has_loud(
176 | method_str: str,
177 | i: int,
178 | inp: FileInfo,
179 | fps: float,
180 | progress: ProgressBar,
181 | temp: str,
182 | log: Log,
183 | args,
184 | ) -> NDArray[np.bool_]:
185 | @dataclass
186 | class Audio:
187 | stream: StreamType = 0
188 | threshold: float = args.silent_threshold
189 |
190 | @dataclass
191 | class Motion:
192 | threshold: float = 0.02
193 | blur: int = 9
194 | width: int = 400
195 |
196 | @dataclass
197 | class Pixeldiff:
198 | threshold: int = 1
199 |
200 | @dataclass
201 | class Random:
202 | cutchance: float = 0.5
203 | seed: int = random.randint(0, 2147483647)
204 |
205 | KEYWORD_SEP = " "
206 | METHOD_ATTRS_SEP = ":"
207 |
208 | result_array = None
209 | operand = None
210 |
211 | logic_funcs = {
212 | "and": np.logical_and,
213 | "or": np.logical_or,
214 | "xor": np.logical_xor,
215 | }
216 |
217 | Methods = Union[Type[Audio], Type[Motion], Type[Pixeldiff], Type[Random], None]
218 |
219 | method_str = method_str.replace("_", " ") # Allow old style `--edit` to work
220 |
221 | for method in method_str.split(KEYWORD_SEP):
222 |
223 | if method == "": # Skip whitespace
224 | continue
225 |
226 | if METHOD_ATTRS_SEP in method:
227 | method, attrs_str = method.split(METHOD_ATTRS_SEP)
228 | else:
229 | attrs_str = ""
230 |
231 | if method in ("audio", "motion", "pixeldiff", "random", "none", "all"):
232 | if result_array is not None and operand is None:
233 | log.error("Logic operator must be between two editing methods.")
234 |
235 | if method == "audio":
236 | attrs: Methods = get_attributes(attrs_str, Audio, log)
237 | elif method == "motion":
238 | attrs = get_attributes(attrs_str, Motion, log)
239 | elif method == "pixeldiff":
240 | attrs = get_attributes(attrs_str, Pixeldiff, log)
241 | elif method == "random":
242 | attrs = get_attributes(attrs_str, Random, log)
243 | else:
244 | attrs = None
245 |
246 | stream_data = get_stream_data(
247 | method, attrs, args, i, inp, fps, progress, temp, log
248 | )
249 |
250 | if operand == "not":
251 | result_array = np.logical_not(stream_data)
252 | operand = None
253 | elif result_array is None:
254 | result_array = stream_data
255 | elif operand is not None and operand in ("and", "or", "xor"):
256 | result_array = operand_combine(
257 | result_array, stream_data, logic_funcs[operand]
258 | )
259 | operand = None
260 |
261 | elif method in ("and", "or", "xor"):
262 | if operand is not None:
263 | log.error("Invalid Editing Syntax.")
264 | if result_array is None:
265 | log.error(f"'{method}' operand needs two arguments.")
266 | operand = method
267 | elif method == "not":
268 | if operand is not None:
269 | log.error("Invalid Editing Syntax.")
270 | operand = method
271 | else:
272 | log.error(f"Unknown method/operator: '{method}'")
273 |
274 | if operand is not None:
275 | log.error(f"Dangling operand: '{operand}'")
276 |
277 | assert result_array is not None
278 | return result_array
279 |
280 |
281 | def get_speed_list(
282 | i: int, inp: FileInfo, fps: float, args, progress: ProgressBar, temp: str, log: Log
283 | ) -> NDArray[np.float_]:
284 |
285 | start_margin, end_margin = args.frame_margin
286 |
287 | start_margin = seconds_to_frames(start_margin, fps)
288 | end_margin = seconds_to_frames(end_margin, fps)
289 | min_clip = seconds_to_frames(args.min_clip_length, fps)
290 | min_cut = seconds_to_frames(args.min_cut_length, fps)
291 |
292 | has_loud = get_has_loud(args.edit_based_on, i, inp, fps, progress, temp, log, args)
293 | has_loud_length = len(has_loud)
294 |
295 | has_loud = apply_mark_as(has_loud, has_loud_length, fps, args, log)
296 | has_loud = cook(has_loud, min_clip, min_cut)
297 | has_loud = apply_margin(has_loud, has_loud_length, start_margin, end_margin)
298 |
299 | # Remove small clips/cuts created by applying other rules.
300 | has_loud = cook(has_loud, min_clip, min_cut)
301 |
302 | speed_list = to_speed_list(has_loud, args.video_speed, args.silent_speed)
303 |
304 | if args.cut_out != []:
305 | speed_list = set_range(speed_list, args.cut_out, fps, 99999, log)
306 |
307 | if args.add_in != []:
308 | speed_list = set_range(speed_list, args.add_in, fps, args.video_speed, log)
309 |
310 | if args.set_speed_for_range != []:
311 | for item in args.set_speed_for_range:
312 | speed_list = set_range(speed_list, [item[1:]], fps, item[0], log)
313 |
314 | return speed_list
315 |
--------------------------------------------------------------------------------
/auto_editor/objects.py:
--------------------------------------------------------------------------------
1 | from typing import Any
2 | from dataclasses import dataclass
3 |
4 | from auto_editor.utils.types import AlignType
5 |
6 | # start - When the clip starts in the timeline
7 | # dur - The duration of the clip in the timeline before speed is applied
8 | # offset - When from the source to start playing the media at
9 |
10 |
11 | @dataclass
12 | class VideoObj:
13 | start: int
14 | dur: int
15 | offset: int
16 | speed: float
17 | src: int
18 | stream: int = 0
19 |
20 |
21 | @dataclass
22 | class AudioObj:
23 | start: int
24 | dur: int
25 | offset: int
26 | speed: float
27 | src: int
28 | stream: int = 0
29 |
30 |
31 | @dataclass
32 | class TextObj:
33 | start: int
34 | dur: int
35 | content: str
36 | x: int = "centerX" # type: ignore
37 | y: int = "centerY" # type: ignore
38 | size: int = 30
39 | font: str = "default"
40 | align: AlignType = "left"
41 | fill: str = "#000"
42 | stroke: int = 0
43 | strokecolor: str = "#000"
44 | _cache_font: Any = None
45 |
46 |
47 | @dataclass
48 | class ImageObj:
49 | start: int
50 | dur: int
51 | src: str
52 | x: int = "centerX" # type: ignore
53 | y: int = "centerY" # type: ignore
54 | opacity: float = 1
55 | anchor: str = "ce"
56 | rotate: float = 0 # in degrees
57 | _cache_src: Any = None
58 |
59 |
60 | @dataclass
61 | class RectangleObj:
62 | start: int
63 | dur: int
64 | x: int
65 | y: int
66 | width: int
67 | height: int
68 | anchor: str = "ce"
69 | fill: str = "#c4c4c4"
70 | stroke: int = 0
71 | strokecolor: str = "#000"
72 |
73 |
74 | @dataclass
75 | class EllipseObj:
76 | start: int
77 | dur: int
78 | x: int
79 | y: int
80 | width: int
81 | height: int
82 | anchor: str = "ce"
83 | fill: str = "#c4c4c4"
84 | stroke: int = 0
85 | strokecolor: str = "#000"
86 |
--------------------------------------------------------------------------------
/auto_editor/output.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | from typing import List, Tuple
3 |
4 | from auto_editor.ffwrapper import FFmpeg, FileInfo
5 | from auto_editor.utils.container import Container
6 | from auto_editor.utils.log import Log
7 |
8 |
9 | def fset(cmd: List[str], option: str, value: str) -> List[str]:
10 | if value is None or value == "unset":
11 | return cmd
12 | return cmd + [option] + [value]
13 |
14 |
15 | def get_vcodec(vcodec: str, inp: FileInfo, rules: Container) -> str:
16 | if vcodec == "auto":
17 | vcodec = inp.videos[0].codec
18 |
19 | if rules.vcodecs is not None:
20 | if rules.vstrict and vcodec not in rules.vcodecs:
21 | return rules.vcodecs[0]
22 |
23 | if vcodec in rules.disallow_v:
24 | return rules.vcodecs[0]
25 |
26 | if vcodec == "copy":
27 | return inp.videos[0].codec
28 |
29 | if vcodec == "uncompressed":
30 | return "mpeg4"
31 | return vcodec
32 |
33 |
34 | def get_acodec(acodec: str, inp: FileInfo, rules: Container) -> str:
35 | if acodec == "auto":
36 | acodec = inp.audios[0].codec
37 |
38 | if rules.acodecs is not None: # Just in case, but shouldn't happen
39 | if rules.astrict and acodec not in rules.acodecs:
40 | # Input codec can't be used for output, so use a new safe codec.
41 | return rules.acodecs[0]
42 |
43 | if acodec in rules.disallow_a:
44 | return rules.acodecs[0]
45 |
46 | if acodec == "copy":
47 | return inp.audios[0].codec
48 | return acodec
49 |
50 |
51 | def video_quality(cmd: List[str], args, inp: FileInfo, rules: Container) -> List[str]:
52 | cmd = fset(cmd, "-b:v", args.video_bitrate)
53 |
54 | qscale = args.video_quality_scale
55 |
56 | if args.video_codec == "uncompressed" and (qscale is None or qscale == "unset"):
57 | qscale = "1"
58 |
59 | vcodec = get_vcodec(args.video_codec, inp, rules)
60 |
61 | cmd.extend(["-c:v", vcodec])
62 |
63 | cmd = fset(cmd, "-qscale:v", qscale)
64 |
65 | cmd.extend(["-movflags", "faststart"])
66 | return cmd
67 |
68 |
69 | def mux_quality_media(
70 | ffmpeg: FFmpeg,
71 | video_output: List[Tuple[int, bool, str, bool]],
72 | rules: Container,
73 | write_file: str,
74 | container: str,
75 | args,
76 | inp: FileInfo,
77 | temp: str,
78 | log: Log,
79 | ) -> None:
80 | s_tracks = 0 if not rules.allow_subtitle else len(inp.subtitles)
81 | a_tracks = 0 if not rules.allow_audio else len(inp.audios)
82 | v_tracks = 0 if not rules.allow_video else len(video_output)
83 |
84 | cmd = ["-hide_banner", "-y", "-i", inp.path]
85 |
86 | # fmt: off
87 | for _, is_video, path, _, in video_output:
88 | if is_video or rules.allow_image:
89 | cmd.extend(["-i", path])
90 | else:
91 | v_tracks -= 1
92 | # fmt: on
93 |
94 | if a_tracks > 0:
95 | if args.keep_tracks_seperate and rules.max_audios is None:
96 | for t in range(a_tracks):
97 | cmd.extend(["-i", os.path.join(temp, f"new{t}.wav")])
98 | else:
99 | # Merge all the audio a_tracks into one.
100 | new_a_file = os.path.join(temp, "new_audio.wav")
101 | if a_tracks > 1:
102 | new_cmd = []
103 | for t in range(a_tracks):
104 | new_cmd.extend(["-i", os.path.join(temp, f"new{t}.wav")])
105 | new_cmd.extend(
106 | [
107 | "-filter_complex",
108 | f"amerge=inputs={a_tracks}",
109 | "-ac",
110 | "2",
111 | new_a_file,
112 | ]
113 | )
114 | ffmpeg.run(new_cmd)
115 | a_tracks = 1
116 | else:
117 | new_a_file = os.path.join(temp, "new0.wav")
118 | cmd.extend(["-i", new_a_file])
119 |
120 | if s_tracks > 0:
121 | for s, sub in enumerate(inp.subtitles):
122 | cmd.extend(["-i", os.path.join(temp, f"new{s}s.{sub.ext}")])
123 |
124 | total_streams = v_tracks + s_tracks + a_tracks
125 |
126 | for i in range(total_streams):
127 | cmd.extend(["-map", f"{i+1}:0"])
128 |
129 | cmd.extend(["-map_metadata", "0"])
130 |
131 | for track, is_video, path, apply_video in video_output:
132 | if is_video:
133 | if apply_video:
134 | cmd = video_quality(cmd, args, inp, rules)
135 | else:
136 | cmd.extend([f"-c:v:{track}", "copy"])
137 | elif rules.allow_image:
138 | ext = os.path.splitext(path)[1][1:]
139 | cmd.extend(
140 | [f"-c:v:{track}", ext, f"-disposition:v:{track}", "attached_pic"]
141 | )
142 |
143 | for i, vstream in enumerate(inp.videos):
144 | if i > v_tracks:
145 | break
146 | if vstream.lang is not None:
147 | cmd.extend([f"-metadata:s:v:{i}", f"language={vstream.lang}"])
148 | for i, astream in enumerate(inp.audios):
149 | if i > a_tracks:
150 | break
151 | if astream.lang is not None:
152 | cmd.extend([f"-metadata:s:a:{i}", f"language={astream.lang}"])
153 | for i, sstream in enumerate(inp.subtitles):
154 | if i > s_tracks:
155 | break
156 | if sstream.lang is not None:
157 | cmd.extend([f"-metadata:s:s:{i}", f"language={sstream.lang}"])
158 |
159 | if s_tracks > 0:
160 | scodec = inp.subtitles[0].codec
161 | if inp.ext == f".{container}":
162 | cmd.extend(["-c:s", scodec])
163 | elif rules.scodecs is not None:
164 | if scodec not in rules.scodecs:
165 | scodec = rules.scodecs[0]
166 | cmd.extend(["-c:s", scodec])
167 |
168 | if a_tracks > 0:
169 | acodec = get_acodec(args.audio_codec, inp, rules)
170 |
171 | cmd = fset(cmd, "-c:a", acodec)
172 | cmd = fset(cmd, "-b:a", args.audio_bitrate)
173 |
174 | if args.extras is not None:
175 | cmd.extend(args.extras.split(" "))
176 | cmd.extend(["-strict", "-2"]) # Allow experimental codecs.
177 | cmd.extend(
178 | ["-map", "0:t?", "-map", "0:d?"]
179 | ) # Add input attachments and data to output.
180 | cmd.append(write_file)
181 | ffmpeg.run_check_errors(cmd, log, path=write_file)
182 |
--------------------------------------------------------------------------------
/auto_editor/preview.py:
--------------------------------------------------------------------------------
1 | from statistics import fmean, median
2 | from typing import List, Tuple
3 |
4 | from auto_editor.timeline import Timeline
5 | from auto_editor.utils.func import to_timecode
6 | from auto_editor.utils.log import Log
7 |
8 |
9 | def display(secs: float) -> str:
10 | return to_timecode(round(secs), "rass")
11 |
12 |
13 | def time_frame(title: str, frames: float, fps: float) -> None:
14 | tc = to_timecode(frames / fps, "ass")
15 | preci = 0 if int(frames) == frames else 2
16 | print(f" - {f'{title}:':<10} {tc:<12} ({frames:.{preci}f})")
17 |
18 |
19 | def preview(timeline: Timeline, log: Log) -> None:
20 | log.conwrite("")
21 |
22 | fps = timeline.fps
23 | in_len = sum([inp.fdur for inp in timeline.inputs])
24 |
25 | out_len: float = 0
26 | for vclips in timeline.vclips:
27 | dur: float = 0
28 | for vclip in vclips:
29 | dur += vclip.dur / vclip.speed
30 | out_len = max(out_len, dur / fps)
31 | for aclips in timeline.aclips:
32 | dur = 0
33 | for aclip in aclips:
34 | dur += aclip.dur / aclip.speed
35 | out_len = max(out_len, dur / fps)
36 |
37 | diff = out_len - in_len
38 |
39 | print(
40 | f"\nlength:\n - change: ({display(in_len)}) 100% -> "
41 | f"({display(out_len)}) {round((out_len / in_len) * 100, 2)}%\n "
42 | f"- diff: ({display(diff)}) {round((diff / in_len) * 100, 2)}%"
43 | )
44 |
45 | clip_lens = [clip.dur / clip.speed for clip in timeline.aclips[0]]
46 |
47 | # Calculate cuts
48 | oe: List[Tuple[int, int]] = []
49 |
50 | # TODO: Make offset_end_pairs work on overlapping clips.
51 | for clip in timeline.aclips[0]:
52 | oe.append((clip.offset, clip.offset + clip.dur))
53 |
54 | cut_lens = []
55 | i = 0
56 | while i < len(oe) - 1:
57 | if i == 0 and oe[i][0] != 0:
58 | cut_lens.append(oe[i][1])
59 |
60 | cut_lens.append(oe[i + 1][0] - oe[i][1])
61 | i += 1
62 |
63 | if len(oe) > 0 and oe[-1][1] < round(in_len * fps):
64 | cut_lens.append(round(in_len * fps) - oe[-1][1])
65 |
66 | print(f"clips: {len(clip_lens)}")
67 | log.debug(clip_lens)
68 | if len(clip_lens) == 0:
69 | clip_lens = [0]
70 | time_frame("smallest", min(clip_lens), fps)
71 | time_frame("largest", max(clip_lens), fps)
72 | if len(clip_lens) > 1:
73 | time_frame("median", median(clip_lens), fps)
74 | time_frame("average", fmean(clip_lens), fps)
75 |
76 | print(f"cuts: {len(cut_lens)}")
77 | log.debug(cut_lens)
78 | if len(cut_lens) == 0:
79 | cut_lens = [0]
80 | time_frame("smallest", min(cut_lens), fps)
81 | time_frame("largest", max(cut_lens), fps)
82 | if len(cut_lens) > 1:
83 | time_frame("median", median(cut_lens), fps)
84 | time_frame("average", fmean(cut_lens), fps)
85 | print("")
86 |
--------------------------------------------------------------------------------
/auto_editor/render/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/auto_editor/render/__init__.py
--------------------------------------------------------------------------------
/auto_editor/render/audio.py:
--------------------------------------------------------------------------------
1 | import os
2 | import wave
3 |
4 | from auto_editor.render.tsm.phasevocoder import phasevocoder
5 | from auto_editor.timeline import Timeline
6 | from auto_editor.utils.log import Log
7 | from auto_editor.utils.progressbar import ProgressBar
8 | from auto_editor.wavfile import read
9 |
10 |
11 | def make_new_audio(
12 | t: int,
13 | temp: str,
14 | timeline: Timeline,
15 | progress: ProgressBar,
16 | log: Log,
17 | ) -> None:
18 |
19 | clips = timeline.aclips[t]
20 | if len(clips) == 0:
21 | log.error("Trying to create an empty file.")
22 |
23 | samples = []
24 | samplerate = 0
25 | for x in range(len(timeline.inputs)):
26 | samplerate, s = read(os.path.join(temp, f"{x}-{t}.wav"))
27 | samples.append(s)
28 |
29 | assert samplerate != 0
30 |
31 | progress.start(len(clips), "Creating new audio")
32 | fps = timeline.fps
33 |
34 | main_writer = wave.open(os.path.join(temp, f"new{t}.wav"), "wb")
35 | main_writer.setnchannels(2)
36 | main_writer.setframerate(samplerate)
37 | main_writer.setsampwidth(2)
38 |
39 | for c, clip in enumerate(clips):
40 | sample_start = int(clip.offset / fps * samplerate)
41 | sample_end = int((clip.offset + clip.dur) / fps * samplerate)
42 |
43 | samp_list = samples[clip.src]
44 |
45 | if sample_end > len(samp_list):
46 | sample_end = len(samp_list)
47 |
48 | if clip.speed == 1:
49 | main_writer.writeframes(samp_list[sample_start:sample_end]) # type: ignore
50 | else:
51 | output = phasevocoder(2, clip.speed, samp_list[sample_start:sample_end])
52 | if output.shape[0] != 0:
53 | main_writer.writeframes(output) # type: ignore
54 |
55 | progress.tick(c)
56 | progress.end()
57 |
--------------------------------------------------------------------------------
/auto_editor/render/subtitle.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | from dataclasses import dataclass
4 | from typing import List, Tuple
5 |
6 | from auto_editor.ffwrapper import FFmpeg
7 | from auto_editor.timeline import Timeline
8 | from auto_editor.utils.func import to_timecode
9 | from auto_editor.utils.log import Log
10 |
11 |
12 | @dataclass
13 | class SerialSub:
14 | start: int
15 | end: int
16 | before: str
17 | middle: str
18 | after: str
19 |
20 |
21 | class SubtitleParser:
22 | def __init__(self) -> None:
23 | self.supported_codecs = ("ass", "webvtt", "mov_text")
24 |
25 | def parse(self, text, fps: float, codec: str) -> None:
26 |
27 | if codec not in self.supported_codecs:
28 | raise ValueError(f"codec {codec} not supported.")
29 |
30 | self.fps = fps
31 | self.codec = codec
32 | self.contents: List[SerialSub] = []
33 |
34 | if codec == "ass":
35 | time_code = re.compile(r"(.*)(\d+:\d+:[\d.]+)(.*)(\d+:\d+:[\d.]+)(.*)")
36 | if codec == "webvtt":
37 | time_code = re.compile(r"()(\d+:[\d.]+)( --> )(\d+:[\d.]+)(\n.*)")
38 | if codec == "mov_text":
39 | time_code = re.compile(r"()(\d+:\d+:[\d,]+)( --> )(\d+:\d+:[\d,]+)(\n.*)")
40 |
41 | i = 0
42 | for reg in re.finditer(time_code, text):
43 | i += 1
44 | if i == 1:
45 | self.header = text[: reg.span()[0]]
46 |
47 | self.contents.append(
48 | SerialSub(
49 | self.to_frame(reg.group(2)),
50 | self.to_frame(reg.group(4)),
51 | reg.group(1),
52 | reg.group(3),
53 | f"{reg.group(5)}\n",
54 | )
55 | )
56 |
57 | if i == 0:
58 | self.header = ""
59 | self.footer = ""
60 | else:
61 | self.footer = text[reg.span()[1] :]
62 |
63 | def edit(self, chunks: List[Tuple[int, int, float]]) -> None:
64 | for cut in reversed(chunks):
65 | the_speed = cut[2]
66 | speed_factor = 1 if the_speed == 99999 else 1 - (1 / the_speed)
67 |
68 | new_content = []
69 | for content in self.contents:
70 | if cut[0] <= content.end and cut[1] > content.start:
71 |
72 | diff = int(
73 | (min(cut[1], content.end) - max(cut[0], content.start))
74 | * speed_factor
75 | )
76 | if content.start > cut[0]:
77 | content.start -= diff
78 | content.end -= diff
79 |
80 | content.end -= diff
81 |
82 | elif content.start >= cut[0]:
83 | diff = int((cut[1] - cut[0]) * speed_factor)
84 |
85 | content.start -= diff
86 | content.end -= diff
87 |
88 | if content.start != content.end:
89 | new_content.append(content)
90 |
91 | self.contents = new_content
92 |
93 | def write(self, file_path: str) -> None:
94 | with open(file_path, "w") as file:
95 | file.write(self.header)
96 | for c in self.contents:
97 | file.write(
98 | f"{c.before}{to_timecode(c.start / self.fps, self.codec)}"
99 | f"{c.middle}{to_timecode(c.end / self.fps, self.codec)}"
100 | f"{c.after}"
101 | )
102 | file.write(self.footer)
103 |
104 | def to_frame(self, text: str) -> int:
105 | if self.codec == "mov_text":
106 | time_format = r"(\d+):?(\d+):([\d,]+)"
107 | else:
108 | time_format = r"(\d+):?(\d+):([\d.]+)"
109 |
110 | nums = re.match(time_format, text)
111 | assert nums is not None
112 |
113 | hours, minutes, seconds = nums.groups()
114 | seconds = seconds.replace(",", ".", 1)
115 | return round(
116 | (int(hours) * 3600 + int(minutes) * 60 + float(seconds)) * self.fps
117 | )
118 |
119 |
120 | def cut_subtitles(
121 | ffmpeg: FFmpeg,
122 | timeline: Timeline,
123 | temp: str,
124 | log: Log,
125 | ) -> None:
126 | inp = timeline.inp
127 | chunks = timeline.chunks
128 |
129 | if chunks is None:
130 | log.error("Timeline too complex")
131 |
132 | for s, sub in enumerate(inp.subtitles):
133 | file_path = os.path.join(temp, f"{s}s.{sub.ext}")
134 | new_path = os.path.join(temp, f"new{s}s.{sub.ext}")
135 |
136 | parser = SubtitleParser()
137 |
138 | if sub.codec in parser.supported_codecs:
139 | with open(file_path) as file:
140 | parser.parse(file.read(), timeline.fps, sub.codec)
141 | else:
142 | convert_path = os.path.join(temp, f"{s}s_convert.vtt")
143 | ffmpeg.run(["-i", file_path, convert_path])
144 | with open(convert_path) as file:
145 | parser.parse(file.read(), timeline.fps, "webvtt")
146 |
147 | parser.edit(chunks)
148 | parser.write(new_path)
149 |
--------------------------------------------------------------------------------
/auto_editor/render/tsm/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/auto_editor/render/tsm/__init__.py
--------------------------------------------------------------------------------
/auto_editor/render/tsm/analysis_synthesis.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple
2 |
3 | import numpy as np
4 | from numpy.typing import NDArray
5 |
6 | from .array import ArrReader, ArrWriter
7 | from .cbuffer import CBuffer
8 | from .normalizebuffer import NormalizeBuffer
9 |
10 | EPSILON = 0.0001
11 |
12 |
13 | def find_peaks(amplitude: NDArray[np.float_]) -> NDArray[np.bool_]:
14 | # To avoid overflows
15 | padded = np.concatenate((-np.ones(2), amplitude, -np.ones(2)))
16 |
17 | # Shift the array by one/two values to the left/right
18 | shifted_l2 = padded[:-4]
19 | shifted_l1 = padded[1:-3]
20 | shifted_r1 = padded[3:-1]
21 | shifted_r2 = padded[4:]
22 |
23 | # Compare the original array with the shifted versions.
24 | peaks = (
25 | (amplitude >= shifted_l2)
26 | & (amplitude >= shifted_l1)
27 | & (amplitude >= shifted_r1)
28 | & (amplitude >= shifted_r2)
29 | )
30 |
31 | return peaks
32 |
33 |
34 | def get_closest_peaks(peaks: NDArray[np.bool_]) -> NDArray[np.int_]:
35 | """
36 | Returns an array containing the index of the closest peak of each index.
37 | """
38 | closest_peak = np.empty_like(peaks, dtype=int)
39 | previous = -1
40 | for i, is_peak in enumerate(peaks):
41 | if is_peak:
42 | if previous >= 0:
43 | closest_peak[previous : (previous + i) // 2 + 1] = previous
44 | closest_peak[(previous + i) // 2 + 1 : i] = i
45 | else:
46 | closest_peak[:i] = i
47 | previous = i
48 | closest_peak[previous:] = previous
49 |
50 | return closest_peak
51 |
52 |
53 | class PhaseVocoderConverter:
54 | def __init__(
55 | self, channels: int, frame_length: int, analysis_hop: int, synthesis_hop: int
56 | ) -> None:
57 | self.channels = channels
58 | self._frame_length = frame_length
59 | self._synthesis_hop = synthesis_hop
60 | self._analysis_hop = analysis_hop
61 |
62 | self._center_frequency = np.fft.rfftfreq(frame_length) * 2 * np.pi # type: ignore
63 | fft_length = len(self._center_frequency)
64 |
65 | self._first = True
66 |
67 | self._previous_phase = np.empty((channels, fft_length))
68 | self._output_phase = np.empty((channels, fft_length))
69 |
70 | # Buffer used to compute the phase increment and the instantaneous frequency
71 | self._buffer = np.empty(fft_length)
72 |
73 | def clear(self) -> None:
74 | self._first = True
75 |
76 | def convert_frame(self, frame: np.ndarray) -> np.ndarray:
77 | for k in range(self.channels):
78 | # Compute the FFT of the analysis frame
79 | stft = np.fft.rfft(frame[k])
80 | amplitude = np.abs(stft)
81 |
82 | phase: NDArray[np.float_]
83 | phase = np.angle(stft) # type: ignore
84 | del stft
85 |
86 | peaks = find_peaks(amplitude)
87 | closest_peak = get_closest_peaks(peaks)
88 |
89 | if self._first:
90 | # Leave the first frame unchanged
91 | self._output_phase[k, :] = phase
92 | else:
93 | # Compute the phase increment
94 | self._buffer[peaks] = (
95 | phase[peaks]
96 | - self._previous_phase[k, peaks]
97 | - self._analysis_hop * self._center_frequency[peaks]
98 | )
99 |
100 | # Unwrap the phase increment
101 | self._buffer[peaks] += np.pi
102 | self._buffer[peaks] %= 2 * np.pi
103 | self._buffer[peaks] -= np.pi
104 |
105 | # Compute the instantaneous frequency (in the same buffer,
106 | # since the phase increment wont be required after that)
107 | self._buffer[peaks] /= self._analysis_hop
108 | self._buffer[peaks] += self._center_frequency[peaks]
109 |
110 | self._buffer[peaks] *= self._synthesis_hop
111 | self._output_phase[k][peaks] += self._buffer[peaks]
112 |
113 | # Phase locking
114 | self._output_phase[k] = (
115 | self._output_phase[k][closest_peak] + phase - phase[closest_peak]
116 | )
117 |
118 | # Compute the new stft
119 | output_stft = amplitude * np.exp(1j * self._output_phase[k])
120 |
121 | frame[k, :] = np.fft.irfft(output_stft).real
122 |
123 | # Save the phase for the next analysis frame
124 | self._previous_phase[k, :] = phase
125 | del phase
126 | del amplitude
127 |
128 | self._first = False
129 |
130 | return frame
131 |
132 |
133 | class AnalysisSynthesisTSM:
134 | def run(self, reader: ArrReader, writer: ArrWriter, flush: bool = True) -> None:
135 | finished = False
136 | while not (finished and reader.empty):
137 | self.read_from(reader)
138 | _, finished = self.write_to(writer)
139 |
140 | if flush:
141 | finished = False
142 | while not finished:
143 | _, finished = self.flush_to(writer)
144 |
145 | self.clear()
146 |
147 | def __init__(
148 | self,
149 | channels: int,
150 | frame_length: int,
151 | analysis_hop: int,
152 | synthesis_hop: int,
153 | analysis_window: np.ndarray,
154 | synthesis_window: np.ndarray,
155 | ) -> None:
156 |
157 | self._converter = PhaseVocoderConverter(
158 | channels, frame_length, analysis_hop, synthesis_hop
159 | )
160 |
161 | self._channels = channels
162 | self._frame_length = frame_length
163 | self._analysis_hop = analysis_hop
164 | self._synthesis_hop = synthesis_hop
165 |
166 | self._analysis_window = analysis_window
167 | self._synthesis_window = synthesis_window
168 |
169 | # When the analysis hop is larger than the frame length, some samples
170 | # from the input need to be skipped.
171 | self._skip_input_samples = 0
172 |
173 | # Used to start the output signal in the middle of a frame, which should
174 | # be the peek of the window function
175 | self._skip_output_samples = 0
176 |
177 | self._normalize_window = self._analysis_window * self._synthesis_window
178 |
179 | # Initialize the buffers
180 | self._in_buffer = CBuffer(self._channels, self._frame_length)
181 | self._analysis_frame = np.empty((self._channels, self._frame_length))
182 | self._out_buffer = CBuffer(self._channels, self._frame_length)
183 | self._normalize_buffer = NormalizeBuffer(self._frame_length)
184 |
185 | self.clear()
186 |
187 | def clear(self) -> None:
188 | self._in_buffer.remove(self._in_buffer.length)
189 | self._out_buffer.remove(self._out_buffer.length)
190 | self._out_buffer.right_pad(self._frame_length)
191 | self._normalize_buffer.remove(self._normalize_buffer.length)
192 |
193 | # Left pad the input with half a frame of zeros, and ignore that half
194 | # frame in the output. This makes the output signal start in the middle
195 | # of a frame, which should be the peak of the window function.
196 | self._in_buffer.write(np.zeros((self._channels, self._frame_length // 2)))
197 | self._skip_output_samples = self._frame_length // 2
198 |
199 | self._converter.clear()
200 |
201 | def flush_to(self, writer: ArrWriter) -> Tuple[int, bool]:
202 | if self._in_buffer.remaining_length == 0:
203 | raise RuntimeError(
204 | "There is still data to process in the input buffer, flush_to method "
205 | "should only be called when write_to returns True."
206 | )
207 |
208 | n = self._out_buffer.write_to(writer)
209 | if self._out_buffer.ready == 0:
210 | # The output buffer is empty
211 | self.clear()
212 | return n, True
213 |
214 | return n, False
215 |
216 | def get_max_output_length(self, input_length: int) -> int:
217 | input_length -= self._skip_input_samples
218 | if input_length <= 0:
219 | return 0
220 |
221 | n_frames = input_length // self._analysis_hop + 1
222 | return n_frames * self._synthesis_hop
223 |
224 | def _process_frame(self) -> None:
225 | """Read an analysis frame from the input buffer, process it, and write
226 | the result to the output buffer."""
227 | # Generate the analysis frame and discard the input samples that will
228 | # not be needed anymore
229 | self._in_buffer.peek(self._analysis_frame)
230 | self._in_buffer.remove(self._analysis_hop)
231 |
232 | for channel in self._analysis_frame:
233 | channel *= self._analysis_window
234 |
235 | synthesis_frame = self._converter.convert_frame(self._analysis_frame)
236 |
237 | for channel in synthesis_frame:
238 | channel *= self._synthesis_window
239 |
240 | # Overlap and add the synthesis frame in the output buffer
241 | self._out_buffer.add(synthesis_frame)
242 |
243 | # The overlap and add step changes the volume of the signal. The
244 | # normalize_buffer is used to keep track of "how much of the input
245 | # signal was added" to each part of the output buffer, allowing to
246 | # normalize it.
247 | self._normalize_buffer.add(self._normalize_window)
248 |
249 | # Normalize the samples that are ready to be written to the output
250 | normalize = self._normalize_buffer.to_array(end=self._synthesis_hop)
251 | normalize[normalize < EPSILON] = 1
252 | self._out_buffer.divide(normalize)
253 | self._out_buffer.set_ready(self._synthesis_hop)
254 | self._normalize_buffer.remove(self._synthesis_hop)
255 |
256 | def read_from(self, reader: ArrReader) -> int:
257 | n = reader.skip(self._skip_input_samples)
258 | self._skip_input_samples -= n
259 | if self._skip_input_samples > 0:
260 | return n
261 |
262 | n += self._in_buffer.read_from(reader)
263 |
264 | if (
265 | self._in_buffer.remaining_length == 0
266 | and self._out_buffer.remaining_length >= self._synthesis_hop
267 | ):
268 | # The input buffer has enough data to process, and there is enough
269 | # space in the output buffer to store the output
270 | self._process_frame()
271 |
272 | # Skip output samples if necessary
273 | skipped = self._out_buffer.remove(self._skip_output_samples)
274 | self._out_buffer.right_pad(skipped)
275 | self._skip_output_samples -= skipped
276 |
277 | # Set the number of input samples to be skipped
278 | self._skip_input_samples = self._analysis_hop - self._frame_length
279 | if self._skip_input_samples < 0:
280 | self._skip_input_samples = 0
281 |
282 | return n
283 |
284 | def write_to(self, writer: ArrWriter) -> Tuple[int, bool]:
285 | n = self._out_buffer.write_to(writer)
286 | self._out_buffer.right_pad(n)
287 |
288 | if self._in_buffer.remaining_length > 0 and self._out_buffer.ready == 0:
289 | # There is not enough data to process in the input buffer, and the
290 | # output buffer is empty
291 | return n, True
292 |
293 | return n, False
294 |
--------------------------------------------------------------------------------
/auto_editor/render/tsm/array.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpy.typing import NDArray
3 |
4 |
5 | class ArrReader:
6 | __slots__ = ("samples", "pointer")
7 |
8 | def __init__(self, arr: np.ndarray) -> None:
9 | self.samples = arr
10 | self.pointer = 0
11 |
12 | @property
13 | def empty(self) -> bool:
14 | return self.samples.shape[0] <= self.pointer
15 |
16 | def read(self, buffer: np.ndarray) -> int:
17 | end = self.pointer + buffer.shape[1]
18 | frames = self.samples[self.pointer : end].T.astype(np.float32)
19 | n = frames.shape[1]
20 | np.copyto(buffer[:, :n], frames)
21 | del frames
22 | self.pointer = end
23 | return n
24 |
25 | def skip(self, n: int) -> int:
26 | self.pointer += n
27 | return n
28 |
29 |
30 | class ArrWriter:
31 | __slots__ = ("output", "pointer")
32 |
33 | def __init__(self, arr: NDArray[np.int16]) -> None:
34 | self.output = arr
35 | self.pointer = 0
36 |
37 | def write(self, buffer: np.ndarray) -> int:
38 | end = self.pointer + buffer.shape[1]
39 | changed_buffer: NDArray[np.int16] = buffer.T.astype(np.int16)
40 | self.output = np.concatenate((self.output, changed_buffer))
41 | self.pointer = end
42 |
43 | return buffer.shape[1]
44 |
--------------------------------------------------------------------------------
/auto_editor/render/tsm/cbuffer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from .array import ArrReader, ArrWriter
4 |
5 |
6 | class CBuffer:
7 | def __init__(self, channels: int, max_length: int) -> None:
8 | self._data = np.zeros((channels, max_length), dtype=np.float32)
9 | self._channels = channels
10 | self._max_length = max_length
11 |
12 | self._offset = 0
13 | self._ready = 0
14 | self.length = 0
15 |
16 | def add(self, buffer: np.ndarray) -> None:
17 | """Adds a buffer element-wise to the CBuffer."""
18 | if buffer.shape[0] != self._data.shape[0]:
19 | raise ValueError("the two buffers should have the same number of channels")
20 |
21 | n = buffer.shape[1]
22 | if n > self.length:
23 | raise ValueError("not enough space remaining in CBuffer")
24 |
25 | # Compute the slice of data where the values will be added
26 | start = self._offset
27 | end = self._offset + n
28 |
29 | if end <= self._max_length:
30 | self._data[:, start:end] += buffer[:, :n]
31 | else:
32 | end -= self._max_length
33 | self._data[:, start:] += buffer[:, : self._max_length - start]
34 | self._data[:, :end] += buffer[:, self._max_length - start : n]
35 |
36 | def divide(self, array: np.ndarray) -> None:
37 | n = len(array)
38 | if n > self.length:
39 | raise ValueError("not enough space remaining in the CBuffer")
40 |
41 | start = self._offset
42 | end = self._offset + n
43 |
44 | if end <= self._max_length:
45 | self._data[:, start:end] /= array[:n]
46 | else:
47 | end -= self._max_length
48 | self._data[:, start:] /= array[: self._max_length - start]
49 | self._data[:, :end] /= array[self._max_length - start : n]
50 |
51 | def peek(self, buffer: np.ndarray) -> int:
52 | if buffer.shape[0] != self._data.shape[0]:
53 | raise ValueError("the two buffers should have the same number of channels")
54 |
55 | n = min(buffer.shape[1], self._ready)
56 |
57 | start = self._offset
58 | end = self._offset + n
59 |
60 | if end <= self._max_length:
61 | np.copyto(buffer[:, :n], self._data[:, start:end])
62 | else:
63 | end -= self._max_length
64 | np.copyto(buffer[:, : self._max_length - start], self._data[:, start:])
65 | np.copyto(buffer[:, self._max_length - start : n], self._data[:, :end])
66 |
67 | return n
68 |
69 | def read(self, buffer: np.ndarray) -> int:
70 | n = self.peek(buffer)
71 | self.remove(n)
72 | return n
73 |
74 | def read_from(self, reader: ArrReader) -> int:
75 | # Compute the slice of data that will be written to
76 | start = (self._offset + self.length) % self._max_length
77 | end = start + self._max_length - self.length
78 |
79 | if end <= self._max_length:
80 | n = reader.read(self._data[:, start:end])
81 | else:
82 | # There is not enough space to copy the whole buffer, it has to be
83 | # split into two parts, one of which will be copied at the end of
84 | # _data, and the other at the beginning.
85 | end -= self._max_length
86 |
87 | n = reader.read(self._data[:, start:])
88 | n += reader.read(self._data[:, :end])
89 |
90 | self.length += n
91 | self._ready = self.length
92 | return n
93 |
94 | @property
95 | def ready(self):
96 | return self._ready
97 |
98 | @property
99 | def remaining_length(self):
100 | return self._max_length - self._ready
101 |
102 | def remove(self, n: int) -> int:
103 | """
104 | Removes the first n samples of the CBuffer, preventing
105 | them to be read again, and leaving more space for new samples to be
106 | written.
107 | """
108 | if n >= self.length:
109 | n = self.length
110 |
111 | # Compute the slice of data that will be reset to 0
112 | start = self._offset
113 | end = self._offset + n
114 |
115 | if end <= self._max_length:
116 | self._data[:, start:end] = 0
117 | else:
118 | end -= self._max_length
119 | self._data[:, start:] = 0
120 | self._data[:, :end] = 0
121 |
122 | self._offset += n
123 | self._offset %= self._max_length
124 | self.length -= n
125 |
126 | self._ready -= n
127 | if self._ready < 0:
128 | self._ready = 0
129 |
130 | return n
131 |
132 | def right_pad(self, n: int) -> None:
133 | if n > self._max_length - self.length:
134 | raise ValueError("not enough space remaining in CBuffer")
135 |
136 | self.length += n
137 |
138 | def set_ready(self, n: int) -> None:
139 | """Mark the next n samples as ready to be read."""
140 | if self._ready + n > self.length:
141 | raise ValueError("not enough samples to be marked as ready")
142 |
143 | self._ready += n
144 |
145 | def to_array(self):
146 | out = np.empty((self._channels, self._ready))
147 | self.peek(out)
148 | return out
149 |
150 | def write(self, buffer: np.ndarray) -> int:
151 | if buffer.shape[0] != self._data.shape[0]:
152 | raise ValueError("the two buffers should have the same number of channels")
153 |
154 | n = min(buffer.shape[1], self._max_length - self.length)
155 |
156 | # Compute the slice of data that will be written to
157 | start = (self._offset + self.length) % self._max_length
158 | end = start + n
159 |
160 | if end <= self._max_length:
161 | np.copyto(self._data[:, start:end], buffer[:, :n])
162 | else:
163 | # There is not enough space to copy the whole buffer, it has to be
164 | # split into two parts, one of which will be copied at the end of
165 | # _data, and the other at the beginning.
166 | end -= self._max_length
167 |
168 | np.copyto(self._data[:, start:], buffer[:, : self._max_length - start])
169 | np.copyto(self._data[:, :end], buffer[:, self._max_length - start : n])
170 |
171 | self.length += n
172 | self._ready = self.length
173 | return n
174 |
175 | def write_to(self, writer: ArrWriter) -> int:
176 | start = self._offset
177 | end = self._offset + self._ready
178 |
179 | if end <= self._max_length:
180 | n = writer.write(self._data[:, start:end])
181 | else:
182 | end -= self._max_length
183 | n = writer.write(self._data[:, start:])
184 | n += writer.write(self._data[:, :end])
185 |
186 | self.remove(n)
187 | return n
188 |
--------------------------------------------------------------------------------
/auto_editor/render/tsm/normalizebuffer.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import numpy as np
4 |
5 | # A NormalizeBuffer is a mono-channel circular buffer, used to normalize audio buffers.
6 |
7 |
8 | class NormalizeBuffer:
9 | def __init__(self, length: int) -> None:
10 | self._data = np.zeros(length)
11 | self._offset = 0
12 | self.length = length
13 |
14 | def add(self, window: np.ndarray) -> None:
15 | # Adds a window element-wise to the NormalizeBuffer.
16 | n = len(window)
17 | if n > self.length:
18 | raise ValueError("the window should be smaller than the NormalizeBuffer")
19 |
20 | # Compute the slice of data where the values will be added
21 | start = self._offset
22 | end = self._offset + n
23 |
24 | if end <= self.length:
25 | self._data[start:end] += window
26 | else:
27 | end -= self.length
28 | self._data[start:] += window[: self.length - start]
29 | self._data[:end] += window[self.length - start :]
30 |
31 | def remove(self, n: int) -> None:
32 | if n >= self.length:
33 | n = self.length
34 | if n == 0:
35 | return
36 |
37 | # Compute the slice of data to reset
38 | start = self._offset
39 | end = self._offset + n
40 |
41 | if end <= self.length:
42 | self._data[start:end] = 0
43 | else:
44 | end -= self.length
45 | self._data[start:] = 0
46 | self._data[:end] = 0
47 |
48 | self._offset += n
49 | self._offset %= self.length
50 |
51 | def to_array(self, start: int = 0, end: Optional[int] = None) -> np.ndarray:
52 | if end is None:
53 | end = self.length
54 |
55 | start += self._offset
56 | end += self._offset
57 |
58 | if end <= self.length:
59 | return np.copy(self._data[start:end])
60 |
61 | end -= self.length
62 | if start < self.length:
63 | return np.concatenate((self._data[start:], self._data[:end]))
64 |
65 | start -= self.length
66 | return np.copy(self._data[start:end])
67 |
--------------------------------------------------------------------------------
/auto_editor/render/tsm/phasevocoder.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from numpy.typing import NDArray
3 |
4 | from .analysis_synthesis import AnalysisSynthesisTSM
5 | from .array import ArrReader, ArrWriter
6 |
7 |
8 | def hanning(length: int) -> np.ndarray:
9 | time = np.arange(length)
10 | return 0.5 * (1 - np.cos(2 * np.pi * time / length))
11 |
12 |
13 | def phasevocoder(
14 | channels: int, speed: float, arr: np.ndarray, frame_length: int = 2048
15 | ) -> NDArray[np.int16]:
16 |
17 | # Frame length should be a power of two for maximum performance.
18 |
19 | synthesis_hop = frame_length // 4
20 | analysis_hop = int(synthesis_hop * speed)
21 |
22 | analysis_window = hanning(frame_length)
23 | synthesis_window = hanning(frame_length)
24 |
25 | writer = ArrWriter(np.zeros((0, channels), dtype=np.int16))
26 | reader = ArrReader(arr)
27 |
28 | AnalysisSynthesisTSM(
29 | channels,
30 | frame_length,
31 | analysis_hop,
32 | synthesis_hop,
33 | analysis_window,
34 | synthesis_window,
35 | ).run(reader, writer)
36 |
37 | return writer.output
38 |
--------------------------------------------------------------------------------
/auto_editor/render/video.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | import subprocess
3 | from fractions import Fraction
4 | from typing import Tuple
5 |
6 | from auto_editor.ffwrapper import FFmpeg
7 | from auto_editor.objects import EllipseObj, ImageObj, RectangleObj, TextObj
8 | from auto_editor.output import get_vcodec, video_quality
9 | from auto_editor.timeline import Timeline
10 | from auto_editor.utils.encoder import encoders
11 | from auto_editor.utils.log import Log
12 | from auto_editor.utils.progressbar import ProgressBar
13 |
14 | # From: github.com/PyAV-Org/PyAV/blob/main/av/video/frame.pyx
15 | allowed_pix_fmt = {
16 | "yuv420p",
17 | "yuvj420p",
18 | "rgb48be",
19 | "rgb48le",
20 | "rgb64be",
21 | "rgb64le",
22 | "rgb24",
23 | "bgr24",
24 | "argb",
25 | "rgba",
26 | "abgr",
27 | "bgra",
28 | "gray",
29 | "gray8",
30 | "gray16be",
31 | "gray16le",
32 | "rgb8",
33 | "bgr8",
34 | "pal8",
35 | }
36 |
37 |
38 | def pix_fmt_allowed(pix_fmt: str) -> bool:
39 | return pix_fmt in allowed_pix_fmt
40 |
41 |
42 | def apply_anchor(
43 | x: int, y: int, width: int, height: int, anchor: str
44 | ) -> Tuple[int, int]:
45 | if anchor == "ce":
46 | x = int((x * 2 - width) / 2)
47 | y = int((y * 2 - height) / 2)
48 | if anchor == "tr":
49 | x -= width
50 | if anchor == "bl":
51 | y -= height
52 | if anchor == "br":
53 | x -= width
54 | y -= height
55 | # Pillow uses 'tl' by default
56 | return x, y
57 |
58 |
59 | def one_pos_two_pos(
60 | x: int, y: int, width: int, height: int, anchor: str
61 | ) -> Tuple[int, int, int, int]:
62 | """Convert: x, y, width, height -> x1, y1, x2, y2"""
63 |
64 | if anchor == "ce":
65 | x1 = x - int(width / 2)
66 | x2 = x + int(width / 2)
67 | y1 = y - int(height / 2)
68 | y2 = y + int(height / 2)
69 |
70 | return x1, y1, x2, y2
71 |
72 | if anchor in ("tr", "br"):
73 | x1 = x - width
74 | x2 = x
75 | else:
76 | x1 = x
77 | x2 = x + width
78 |
79 | if anchor in ("tl", "tr"):
80 | y1 = y
81 | y2 = y + height
82 | else:
83 | y1 = y
84 | y2 = y - height
85 |
86 | return x1, y1, x2, y2
87 |
88 |
89 | def render_av(
90 | ffmpeg: FFmpeg,
91 | track: int,
92 | timeline: Timeline,
93 | args,
94 | progress: ProgressBar,
95 | rules,
96 | temp: str,
97 | log: Log,
98 | ) -> Tuple[str, bool]:
99 | try:
100 | import av
101 |
102 | av.logging.set_level(av.logging.PANIC)
103 | except ImportError:
104 | log.import_error("av")
105 | try:
106 | from PIL import Image, ImageChops, ImageDraw, ImageFont
107 | except ImportError:
108 | log.import_error("Pillow")
109 |
110 | def set_static_assets(all_objects, log: Log):
111 | # Save reloading the same thing over and over.
112 |
113 | new_objects = []
114 |
115 | for obj in all_objects:
116 | if isinstance(obj, TextObj):
117 | try:
118 | obj._cache_font = ImageFont.truetype(obj.font, obj.size)
119 | except OSError:
120 | if obj.font == "default":
121 | obj._cache_font = ImageFont.load_default()
122 | else:
123 | log.error(f"Font '{obj.font}' not found.")
124 |
125 | if isinstance(obj, ImageObj):
126 | source = Image.open(obj.src)
127 | source = source.convert("RGBA")
128 | source = source.rotate(obj.rotate, expand=True)
129 | source = ImageChops.multiply(
130 | source,
131 | Image.new(
132 | "RGBA", source.size, (255, 255, 255, int(obj.opacity * 255))
133 | ),
134 | )
135 | obj._cache_src = source
136 |
137 | new_objects.append(obj)
138 |
139 | return new_objects
140 |
141 | def render_objects(timeline, index: int, frame, pix_fmt: str):
142 | img = frame.to_image().convert("RGBA")
143 |
144 | # TODO: Fix me
145 | for obj in timeline.vclips[track]:
146 |
147 | obj_img = Image.new("RGBA", img.size, (255, 255, 255, 0))
148 | draw = ImageDraw.Draw(obj_img)
149 |
150 | if isinstance(obj, TextObj):
151 | text_w, text_h = draw.textsize(obj.content, font=obj._cache_font)
152 | pos = apply_anchor(obj.x, obj.y, text_w, text_h, "ce")
153 | draw.text(
154 | pos,
155 | obj.content,
156 | font=obj._cache_font,
157 | fill=obj.fill,
158 | align=obj.align,
159 | stroke_width=obj.stroke,
160 | stroke_fill=obj.strokecolor,
161 | )
162 |
163 | if isinstance(obj, RectangleObj):
164 | draw.rectangle(
165 | one_pos_two_pos(obj.x, obj.y, obj.width, obj.height, obj.anchor),
166 | fill=obj.fill,
167 | width=obj.stroke,
168 | outline=obj.strokecolor,
169 | )
170 |
171 | if isinstance(obj, EllipseObj):
172 | draw.ellipse(
173 | one_pos_two_pos(obj.x, obj.y, obj.width, obj.height, obj.anchor),
174 | fill=obj.fill,
175 | width=obj.stroke,
176 | outline=obj.strokecolor,
177 | )
178 |
179 | if isinstance(obj, ImageObj):
180 | img_w, img_h = obj._cache_src.size
181 | pos = apply_anchor(obj.x, obj.y, img_w, img_h, obj.anchor)
182 | obj_img.paste(obj._cache_src, pos)
183 |
184 | img = Image.alpha_composite(img, obj_img)
185 |
186 | return frame.from_image(img).reformat(format=pix_fmt)
187 |
188 | chunks = timeline.chunks
189 |
190 | if chunks is None:
191 | log.error("Timeline too complex")
192 |
193 | inp = timeline.inp
194 |
195 | if chunks[-1][2] == 99999:
196 | chunks.pop()
197 |
198 | progress.start(chunks[-1][1], "Creating new video")
199 |
200 | cn = av.open(inp.path, "r")
201 | pix_fmt = cn.streams.video[track].pix_fmt
202 |
203 | target_pix_fmt = pix_fmt
204 |
205 | if not pix_fmt_allowed(pix_fmt):
206 | target_pix_fmt = "yuv420p"
207 |
208 | my_codec = get_vcodec(args.video_codec, inp, rules)
209 |
210 | apply_video_later = True
211 |
212 | if my_codec in encoders:
213 | apply_video_later = encoders[my_codec]["pix_fmt"].isdisjoint(allowed_pix_fmt)
214 |
215 | if args.scale != 1:
216 | apply_video_later = False
217 |
218 | log.debug(f"apply video quality settings now: {not apply_video_later}")
219 |
220 | stream = cn.streams.video[track]
221 | stream.thread_type = "AUTO"
222 |
223 | width = stream.width
224 | height = stream.height
225 |
226 | spedup = os.path.join(temp, f"spedup{track}.mp4")
227 |
228 | cmd = [
229 | "-hide_banner",
230 | "-y",
231 | "-f",
232 | "rawvideo",
233 | "-c:v",
234 | "rawvideo",
235 | "-pix_fmt",
236 | target_pix_fmt,
237 | "-s",
238 | f"{width}*{height}",
239 | "-framerate",
240 | f"{timeline.fps}",
241 | "-i",
242 | "-",
243 | "-pix_fmt",
244 | target_pix_fmt,
245 | ]
246 |
247 | if apply_video_later:
248 | cmd.extend(["-c:v", "mpeg4", "-qscale:v", "1"])
249 | else:
250 | cmd = video_quality(cmd, args, inp, rules)
251 |
252 | cmd.append(spedup)
253 |
254 | process2 = ffmpeg.Popen(
255 | cmd, stdin=subprocess.PIPE, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL
256 | )
257 | assert process2.stdin is not None
258 |
259 | input_equavalent = Fraction(0, 1)
260 | output_equavalent = 0
261 | chunk = chunks.pop(0)
262 |
263 | tou = int(stream.time_base.denominator / stream.average_rate)
264 | log.debug(f"Tou: {tou}")
265 |
266 | seek = 10
267 | seek_frame = None
268 | frames_saved = 0
269 |
270 | # Keyframes are usually spread out every 5 seconds or less.
271 | if args.no_seek:
272 | SEEK_COST = 4294967295
273 | else:
274 | SEEK_COST = int(stream.average_rate * 5)
275 | SEEK_RETRY = SEEK_COST // 2
276 |
277 | # Converting between two different framerates is a lot like applying a speed.
278 | fps_convert = Fraction(stream.average_rate, Fraction(timeline.fps))
279 |
280 | try:
281 | for frame in cn.decode(stream):
282 | # frame.time == frame.pts * stream.time_base
283 | index = round(frame.time * timeline.fps)
284 | index2 = round(frame.time * stream.average_rate)
285 |
286 | if frame.key_frame:
287 | log.debug(f"Keyframe {index} {frame.pts}")
288 |
289 | if seek_frame is not None:
290 | log.debug(f"Skipped {index - seek_frame} frames")
291 | frames_saved += index - seek_frame
292 | seek_frame = None
293 |
294 | if index > chunk[1]:
295 | if chunks:
296 | chunk = chunks.pop(0)
297 | else:
298 | break
299 |
300 | if chunk[2] == 99999:
301 | if chunk[1] - index2 > SEEK_COST and index2 > seek:
302 | seek = index2 + SEEK_RETRY
303 |
304 | seek_frame = index
305 | cn.seek(chunk[1] * tou, stream=stream)
306 | else:
307 | input_equavalent += Fraction(1, Fraction(chunk[2]) * fps_convert)
308 |
309 | while input_equavalent > output_equavalent:
310 | # if index in effects.sheet:
311 | # frame = render_objects(
312 | # effects.sheet, effects.all, index, frame, target_pix_fmt
313 | # )
314 | if pix_fmt != target_pix_fmt:
315 | frame = frame.reformat(format=target_pix_fmt)
316 |
317 | in_bytes = frame.to_ndarray().tobytes()
318 | process2.stdin.write(in_bytes)
319 | output_equavalent += 1
320 |
321 | if index % 3 == 0:
322 | progress.tick(index)
323 |
324 | progress.end()
325 | process2.stdin.close()
326 | process2.wait()
327 | except (OSError, BrokenPipeError):
328 | progress.end()
329 | ffmpeg.run_check_errors(cmd, log, True)
330 | log.error("FFmpeg Error!")
331 |
332 | log.debug(f"Total frames saved seeking: {frames_saved}")
333 |
334 | # Unfortunately, scaling has to be a concrete step.
335 | if args.scale != 1:
336 | sped_input = os.path.join(temp, f"spedup{track}.mp4")
337 | spedup = os.path.join(temp, f"scale{track}.mp4")
338 |
339 | cmd = [
340 | "-i",
341 | sped_input,
342 | "-vf",
343 | f"scale=iw*{args.scale}:ih*{args.scale}",
344 | spedup,
345 | ]
346 |
347 | check_errors = ffmpeg.pipe(cmd)
348 | if "Error" in check_errors or "failed" in check_errors:
349 | if "-allow_sw 1" in check_errors:
350 | cmd.insert(-1, "-allow_sw")
351 | cmd.insert(-1, "1")
352 | # Run again to show errors even if it might not work.
353 | ffmpeg.run(cmd)
354 |
355 | return spedup, apply_video_later
356 |
--------------------------------------------------------------------------------
/auto_editor/subcommands/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/auto_editor/subcommands/__init__.py
--------------------------------------------------------------------------------
/auto_editor/subcommands/desc.py:
--------------------------------------------------------------------------------
1 | import sys
2 |
3 | from auto_editor.ffwrapper import FFmpeg, FileInfo
4 | from auto_editor.utils.log import Log
5 | from auto_editor.vanparse import ArgumentParser
6 |
7 |
8 | def desc_options(parser: ArgumentParser) -> ArgumentParser:
9 | parser.add_argument("--ffmpeg-location", help="Point to your custom ffmpeg file.")
10 | parser.add_required("input", nargs="*", help="Path to file(s)")
11 | return parser
12 |
13 |
14 | def main(sys_args=sys.argv[1:]) -> None:
15 | parser = desc_options(ArgumentParser("desc"))
16 | args = parser.parse_args(sys_args)
17 |
18 | ffmpeg = FFmpeg(args.ffmpeg_location, debug=False)
19 |
20 | print("")
21 | for input_file in args.input:
22 | inp = FileInfo(input_file, ffmpeg, Log())
23 | if "description" in inp.metadata:
24 | print(inp.metadata["description"], end="\n\n")
25 | else:
26 | print("No description.", end="\n\n")
27 |
28 |
29 | if __name__ == "__main__":
30 | main()
31 |
--------------------------------------------------------------------------------
/auto_editor/subcommands/grep.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import sys
4 | import tempfile
5 |
6 | from auto_editor.ffwrapper import FFmpeg
7 | from auto_editor.utils.log import Log
8 | from auto_editor.vanparse import ArgumentParser
9 |
10 |
11 | def grep_options(parser: ArgumentParser) -> ArgumentParser:
12 | parser.add_argument(
13 | "--no-filename", flag=True, help="Never print filenames with output lines."
14 | )
15 | parser.add_argument(
16 | "--max-count",
17 | "-m",
18 | type=int,
19 | help="Stop reading a file after NUM matching lines.",
20 | )
21 | parser.add_argument(
22 | "--count",
23 | "-c",
24 | flag=True,
25 | help="Suppress normal output; instead print count of matching lines for each file.",
26 | )
27 | parser.add_argument(
28 | "--ignore-case",
29 | "-i",
30 | flag=True,
31 | help="Ignore case distinctions for the PATTERN.",
32 | )
33 | parser.add_argument("--timecode", flag=True, help="Print the match's timecode.")
34 | parser.add_argument(
35 | "--time", flag=True, help="Print when the match happens. (Ignore ending)."
36 | )
37 | parser.add_argument("--ffmpeg-location", help="Point to your custom ffmpeg file.")
38 | parser.add_argument(
39 | "--my-ffmpeg",
40 | flag=True,
41 | help="Use the ffmpeg on your PATH instead of the one packaged.",
42 | )
43 | parser.add_required(
44 | "input", nargs="*", help="The path to a file you want inspected."
45 | )
46 | return parser
47 |
48 |
49 | # stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
50 | def cleanhtml(raw_html: str) -> str:
51 | cleanr = re.compile("<.*?>")
52 | cleantext = re.sub(cleanr, "", raw_html)
53 | return cleantext
54 |
55 |
56 | def grep_file(
57 | media_file: str, add_prefix: bool, ffmpeg: FFmpeg, args, log: Log, TEMP: str
58 | ) -> None:
59 |
60 | """
61 | We're using the WEBVTT subtitle format. It's better than srt
62 | because it doesn't emit line numbers and the time code is in
63 | (hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)
64 | """
65 |
66 | out_file = os.path.join(TEMP, "media.vtt")
67 | ffmpeg.run(["-i", media_file, out_file])
68 |
69 | count = 0
70 |
71 | flags = 0
72 | if args.ignore_case:
73 | flags = re.IGNORECASE
74 |
75 | prefix = ""
76 | if add_prefix:
77 | prefix = f"{os.path.splitext(os.path.basename(media_file))[0]}:"
78 |
79 | if args.max_count is None:
80 | args.max_count = float("inf")
81 |
82 | timecode = ""
83 | line_number = -1
84 | with open(out_file, "r") as file:
85 | while True:
86 | line = file.readline()
87 | line_number += 1
88 | if line_number == 0:
89 | continue
90 |
91 | if not line or count >= args.max_count:
92 | break
93 |
94 | if line.strip() == "":
95 | continue
96 |
97 | if re.match(r"\d*:\d\d.\d*\s-->\s\d*:\d\d.\d*", line):
98 | if args.time:
99 | timecode = line.split("-->")[0].strip() + " "
100 | else:
101 | timecode = line.strip() + "; "
102 | continue
103 |
104 | line = cleanhtml(line)
105 | match = re.search(args.input[0], line, flags)
106 | line = line.strip()
107 |
108 | if match:
109 | count += 1
110 | if not args.count:
111 | if args.timecode or args.time:
112 | print(prefix + timecode + line)
113 | else:
114 | print(prefix + line)
115 |
116 | if args.count:
117 | print(prefix + str(count))
118 |
119 |
120 | def main(sys_args=sys.argv[1:]) -> None:
121 | parser = grep_options(ArgumentParser("grep"))
122 | args = parser.parse_args(sys_args)
123 |
124 | ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, debug=False)
125 |
126 | TEMP = tempfile.mkdtemp()
127 | log = Log(temp=TEMP)
128 |
129 | media_files = args.input[1:]
130 | add_prefix = (
131 | len(media_files) > 1 or os.path.isdir(media_files[0])
132 | ) and not args.no_filename
133 |
134 | for path in media_files:
135 | if os.path.isdir(path):
136 | for filename in [f for f in os.listdir(path) if not f.startswith(".")]:
137 | grep_file(
138 | os.path.join(path, filename),
139 | add_prefix,
140 | ffmpeg,
141 | args,
142 | log,
143 | TEMP,
144 | )
145 | elif os.path.isfile(path):
146 | grep_file(path, add_prefix, ffmpeg, args, log, TEMP)
147 | else:
148 | log.error(f"{path}: File does not exist.")
149 |
150 | log.cleanup()
151 |
152 |
153 | if __name__ == "__main__":
154 | main()
155 |
--------------------------------------------------------------------------------
/auto_editor/subcommands/info.py:
--------------------------------------------------------------------------------
1 | import json
2 | import os.path
3 | import sys
4 |
5 | import av
6 |
7 | from auto_editor.ffwrapper import FFmpeg, FileInfo
8 | from auto_editor.utils.func import aspect_ratio
9 | from auto_editor.utils.log import Log
10 | from auto_editor.vanparse import ArgumentParser
11 |
12 | av.logging.set_level(av.logging.PANIC)
13 |
14 |
15 | def info_options(parser: ArgumentParser) -> ArgumentParser:
16 | parser.add_argument("--json", flag=True, help="Export info in JSON format.")
17 | parser.add_argument(
18 | "--include-vfr",
19 | "--has-vfr",
20 | flag=True,
21 | help="Display the number of Variable Frame Rate (VFR) frames.",
22 | )
23 | parser.add_argument("--ffmpeg-location", help="Point to your custom ffmpeg file.")
24 | parser.add_argument(
25 | "--my-ffmpeg",
26 | flag=True,
27 | help="Use the ffmpeg on your PATH instead of the one packaged.",
28 | )
29 | parser.add_required(
30 | "input", nargs="*", help="The path to a file you want inspected."
31 | )
32 | return parser
33 |
34 |
35 | def main(sys_args=sys.argv[1:]):
36 | parser = info_options(ArgumentParser("info"))
37 | args = parser.parse_args(sys_args)
38 |
39 | ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, False)
40 |
41 | file_info = {}
42 |
43 | for file in args.input:
44 | if not os.path.isfile(file):
45 | Log().error(f"Could not find file: {file}")
46 |
47 | inp = FileInfo(file, ffmpeg, Log())
48 |
49 | if len(inp.videos) + len(inp.audios) + len(inp.subtitles) == 0:
50 | file_info[file] = {"media": "invalid"}
51 | continue
52 |
53 | file_info[file] = {
54 | "video": [],
55 | "audio": [],
56 | "subtitle": [],
57 | "container": {},
58 | }
59 |
60 | cn = av.open(file, "r")
61 |
62 | for track, stream in enumerate(inp.videos):
63 | pix_fmt = cn.streams.video[track].pix_fmt
64 | time_base = str(cn.streams.video[track].time_base)
65 | cc_time_base = str(cn.streams.video[track].codec_context.time_base)
66 | w, h = stream.width, stream.height
67 | w_, h_ = aspect_ratio(w, h)
68 |
69 | fps = stream.fps
70 | if fps is not None and int(fps) == float(fps):
71 | fps = int(fps)
72 |
73 | vid = {
74 | "codec": stream.codec,
75 | "pix_fmt": pix_fmt,
76 | "fps": fps,
77 | "resolution": [w, h],
78 | "aspect ratio": [w_, h_],
79 | "timebase": time_base,
80 | "cc timebase": cc_time_base,
81 | "bitrate": stream.bitrate,
82 | "lang": stream.lang,
83 | }
84 | file_info[file]["video"].append(vid)
85 |
86 | for track, stream in enumerate(inp.audios):
87 | aud = {
88 | "codec": stream.codec,
89 | "samplerate": stream.samplerate,
90 | "bitrate": stream.bitrate,
91 | "lang": stream.lang,
92 | }
93 | file_info[file]["audio"].append(aud)
94 |
95 | for track, stream in enumerate(inp.subtitles):
96 | sub = {"codec": stream.codec, "lang": stream.lang}
97 | file_info[file]["subtitle"].append(sub)
98 |
99 | cont = {"duration": inp.duration, "bitrate": inp.bitrate}
100 |
101 | if args.include_vfr:
102 | fps_mode = ffmpeg.pipe(
103 | [
104 | "-i",
105 | file,
106 | "-hide_banner",
107 | "-vf",
108 | "vfrdet",
109 | "-an",
110 | "-f",
111 | "null",
112 | "-",
113 | ]
114 | ).strip()
115 | if "VFR:" in fps_mode:
116 | fps_mode = (fps_mode[fps_mode.index("VFR:") :]).strip()
117 |
118 | cont["fps_mode"] = fps_mode
119 |
120 | file_info[file]["container"] = cont
121 |
122 | if args.json:
123 | print(json.dumps(file_info, indent=4))
124 | return
125 |
126 | def stream_to_text(text: str, label: str, streams) -> str:
127 | if len(streams) > 0:
128 | text += f" - {label}:\n"
129 |
130 | for s, stream in enumerate(streams):
131 | text += f" - track {s}:\n"
132 | for key, value in stream.items():
133 | if value is not None:
134 | if isinstance(value, list):
135 | sep = "x" if key == "resolution" else ":"
136 | value = sep.join([str(x) for x in value])
137 |
138 | text += f" - {key}: {value}\n"
139 | return text
140 |
141 | text = ""
142 | for name, info in file_info.items():
143 | text += f"{name}:\n"
144 | if "media" in info:
145 | text += " - invalid media\n\n"
146 | continue
147 |
148 | for label, streams in info.items():
149 | if isinstance(streams, dict):
150 | text += f" - container:\n"
151 | for key, value in streams.items():
152 | text += f" - {key}: {value}\n"
153 | else:
154 | text = stream_to_text(text, label, streams)
155 | text += "\n"
156 |
157 | sys.stdout.write(text)
158 |
159 |
160 | if __name__ == "__main__":
161 | main()
162 |
--------------------------------------------------------------------------------
/auto_editor/subcommands/levels.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import tempfile
4 | from typing import Sequence
5 |
6 | import numpy as np
7 | from numpy.typing import NDArray
8 |
9 | from auto_editor.ffwrapper import FFmpeg, FileInfo
10 | from auto_editor.utils.log import Log
11 | from auto_editor.utils.progressbar import ProgressBar
12 | from auto_editor.vanparse import ArgumentParser
13 |
14 |
15 | def levels_options(parser: ArgumentParser) -> ArgumentParser:
16 | parser.add_argument(
17 | "--kind",
18 | default="audio",
19 | choices=["audio", "motion", "pixeldiff"],
20 | help="Select the kind of detection to analyze.",
21 | )
22 | parser.add_argument(
23 | "--track",
24 | type=int,
25 | default=0,
26 | help="Select the track to get. If `--kind` is set to motion, track will look "
27 | "at video tracks instead of audio.",
28 | )
29 | parser.add_argument("--ffmpeg-location", help="Point to your custom ffmpeg file.")
30 | parser.add_argument(
31 | "--my-ffmpeg",
32 | flag=True,
33 | help="Use the ffmpeg on your PATH instead of the one packaged.",
34 | )
35 | parser.add_required(
36 | "input", nargs="*", help="Path to the file to have its levels dumped."
37 | )
38 | return parser
39 |
40 |
41 | def print_float_list(arr: NDArray[np.float_]) -> None:
42 | for a in arr:
43 | sys.stdout.write(f"{a:.20f}\n")
44 |
45 |
46 | def print_int_list(arr: NDArray[np.uint64]) -> None:
47 | for a in arr:
48 | sys.stdout.write(f"{a}\n")
49 |
50 |
51 | def main(sys_args=sys.argv[1:]) -> None:
52 | parser = levels_options(ArgumentParser("levels"))
53 | args = parser.parse_args(sys_args)
54 |
55 | ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, False)
56 |
57 | progress = ProgressBar("none")
58 | temp = tempfile.mkdtemp()
59 | log = Log(temp=temp)
60 |
61 | inp = FileInfo(args.input[0], ffmpeg, log)
62 | fps = inp.get_fps()
63 |
64 | if args.kind == "audio":
65 | from auto_editor.analyze.audio import audio_detection
66 | from auto_editor.wavfile import read
67 |
68 | if args.track >= len(inp.audios):
69 | log.error(f"Audio track '{args.track}' does not exist.")
70 |
71 | read_track = os.path.join(temp, f"{args.track}.wav")
72 |
73 | ffmpeg.run(
74 | ["-i", inp.path, "-ac", "2", "-map", f"0:a:{args.track}", read_track]
75 | )
76 |
77 | if not os.path.isfile(read_track):
78 | log.error("Audio track file not found!")
79 |
80 | sample_rate, audio_samples = read(read_track)
81 |
82 | print_float_list(audio_detection(audio_samples, sample_rate, fps, progress))
83 |
84 | if args.kind == "motion":
85 | if args.track >= len(inp.videos):
86 | log.error(f"Video track '{args.track}' does not exist.")
87 |
88 | from auto_editor.analyze.motion import motion_detection
89 |
90 | print_float_list(motion_detection(inp.path, fps, progress, width=400, blur=9))
91 |
92 | if args.kind == "pixeldiff":
93 | if args.track >= len(inp.videos):
94 | log.error(f"Video track '{args.track}' does not exist.")
95 |
96 | from auto_editor.analyze.pixeldiff import pixel_difference
97 |
98 | print_int_list(pixel_difference(inp.path, fps, progress))
99 |
100 | log.cleanup()
101 |
102 |
103 | if __name__ == "__main__":
104 | main()
105 |
--------------------------------------------------------------------------------
/auto_editor/subcommands/subdump.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import tempfile
4 |
5 | from auto_editor.ffwrapper import FFmpeg, FileInfo
6 | from auto_editor.utils.log import Log
7 | from auto_editor.vanparse import ArgumentParser
8 |
9 |
10 | def subdump_options(parser: ArgumentParser) -> ArgumentParser:
11 | parser.add_argument("--ffmpeg-location", help="Point to your custom ffmpeg file.")
12 | parser.add_argument(
13 | "--my-ffmpeg",
14 | flag=True,
15 | help="Use the ffmpeg on your PATH instead of the one packaged.",
16 | )
17 | parser.add_required(
18 | "input", nargs="*", help="Path to the file to have its subtitles dumped."
19 | )
20 | return parser
21 |
22 |
23 | def main(sys_args=sys.argv[1:]) -> None:
24 | parser = subdump_options(ArgumentParser("subdump"))
25 | args = parser.parse_args(sys_args)
26 |
27 | ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, debug=False)
28 |
29 | temp = tempfile.mkdtemp()
30 | log = Log(temp=temp)
31 |
32 | for i, input_file in enumerate(args.input):
33 | inp = FileInfo(input_file, ffmpeg, log)
34 |
35 | cmd = ["-i", input_file]
36 | for s, sub in enumerate(inp.subtitles):
37 | cmd.extend(["-map", f"0:s:{s}", os.path.join(temp, f"{i}s{s}.{sub.ext}")])
38 | ffmpeg.run(cmd)
39 |
40 | for s, sub in enumerate(inp.subtitles):
41 | print(f"file: {input_file} ({s}:{sub.lang}:{sub.ext})")
42 | with open(os.path.join(temp, f"{i}s{s}.{sub.ext}")) as file:
43 | print(file.read())
44 | print("------")
45 |
46 | log.cleanup()
47 |
48 |
49 | if __name__ == "__main__":
50 | main()
51 |
--------------------------------------------------------------------------------
/auto_editor/timeline.py:
--------------------------------------------------------------------------------
1 | from dataclasses import asdict, dataclass, fields
2 | from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union
3 |
4 | import numpy as np
5 | from numpy.typing import NDArray
6 |
7 | from auto_editor.ffwrapper import FileInfo
8 | from auto_editor.method import get_speed_list
9 | from auto_editor.objects import (
10 | AudioObj,
11 | EllipseObj,
12 | ImageObj,
13 | RectangleObj,
14 | TextObj,
15 | VideoObj,
16 | )
17 | from auto_editor.utils.func import chunkify, parse_dataclass
18 | from auto_editor.utils.log import Log
19 | from auto_editor.utils.progressbar import ProgressBar
20 | from auto_editor.utils.types import (
21 | AlignType,
22 | ChunkType,
23 | align_type,
24 | anchor_type,
25 | color_type,
26 | float_type,
27 | )
28 |
29 | Clip = NamedTuple(
30 | "Clip",
31 | [("start", int), ("dur", int), ("offset", int), ("speed", float), ("src", int)],
32 | )
33 |
34 |
35 | def unclipify(layer: List[Clip]) -> NDArray[np.float_]:
36 | l: List[int] = []
37 | for clip in layer:
38 | if clip.src != 0:
39 | raise ValueError("Clip has src that is not 0")
40 |
41 | if clip.start > len(l):
42 | raise ValueError(
43 | f"Clip layer has null frames, cannot convert speed list: {l}"
44 | )
45 |
46 | for item in range(clip.offset, clip.offset + clip.dur):
47 | l.append(item)
48 |
49 | if len(set(l)) != len(l) or sorted(l) != l:
50 | raise ValueError(f"Clip layer to complex, cannot convert to speed list: {l}")
51 |
52 | arr = np.empty(layer[-1].offset + layer[-1].dur, dtype=float)
53 | arr.fill(99999)
54 |
55 | for clip in layer:
56 | arr[clip.offset : clip.offset + clip.dur] = clip.speed
57 |
58 | return arr
59 |
60 |
61 | def _values(
62 | name: str,
63 | val: Union[float, str],
64 | _type: Union[type, Callable[[Any], Any]],
65 | _vars: Dict[str, int],
66 | log: Log,
67 | ):
68 | if _type is Any:
69 | return None
70 | if _type is float and name != "rotate":
71 | _type = float_type
72 | elif _type == AlignType:
73 | _type = align_type
74 | elif name == "anchor":
75 | _type = anchor_type
76 | elif name in ("fill", "strokecolor"):
77 | _type = color_type
78 |
79 | if _type is int:
80 | for key, item in _vars.items():
81 | if val == key:
82 | return item
83 |
84 | try:
85 | _type(val)
86 | except TypeError as e:
87 | log.error(str(e))
88 | except Exception:
89 | log.error(f"variable '{val}' is not defined.")
90 |
91 | return _type(val)
92 |
93 |
94 | @dataclass
95 | class Timeline:
96 | inputs: List[FileInfo]
97 | fps: float
98 | samplerate: int
99 | res: Tuple[int, int]
100 | background: str
101 | vclips: List[List[VideoObj]]
102 | aclips: List[List[AudioObj]]
103 | chunks: Optional[ChunkType] = None
104 |
105 | @property
106 | def inp(self):
107 | return self.inputs[0]
108 |
109 |
110 | def clipify(chunks: ChunkType, src: int) -> List[Clip]:
111 | clips = []
112 | start = 0
113 | for chunk in chunks:
114 | if chunk[2] != 99999:
115 | dur = chunk[1] - chunk[0]
116 | clips.append(Clip(start, dur, chunk[0], chunk[2], src))
117 | start += dur
118 |
119 | return clips
120 |
121 |
122 | def make_av(
123 | clips: List[Clip], inp: FileInfo
124 | ) -> Tuple[List[List[VideoObj]], List[List[AudioObj]]]:
125 |
126 | vclips: List[List[VideoObj]] = [[] for v in inp.videos]
127 | aclips: List[List[AudioObj]] = [[] for a in inp.audios]
128 |
129 | for v, _ in enumerate(inp.videos):
130 | for clip in clips:
131 | vclips[v].append(
132 | VideoObj(clip.start, clip.dur, clip.offset, clip.speed, clip.src)
133 | )
134 |
135 | for a, _ in enumerate(inp.audios):
136 | for clip in clips:
137 | aclips[a].append(
138 | AudioObj(clip.start, clip.dur, clip.offset, clip.speed, clip.src, a)
139 | )
140 |
141 | return vclips, aclips
142 |
143 |
144 | def make_layers(
145 | inputs: List[FileInfo], speedlists: List[NDArray[np.float_]]
146 | ) -> Tuple[Optional[ChunkType], List[List[VideoObj]], List[List[AudioObj]]]:
147 |
148 | clips = []
149 | for i, _chunks in enumerate([chunkify(s) for s in speedlists]):
150 | clips += clipify(_chunks, i)
151 |
152 | chunks: Optional[ChunkType] = None
153 | try:
154 | chunks = chunkify(unclipify(clips))
155 | except ValueError:
156 | pass
157 |
158 | vclips, aclips = make_av(clips, inputs[0])
159 | return chunks, vclips, aclips
160 |
161 |
162 | def make_timeline(
163 | inputs: List[FileInfo], args, sr: int, progress: ProgressBar, temp: str, log: Log
164 | ) -> Timeline:
165 | assert len(inputs) > 0
166 |
167 | inp = inputs[0]
168 |
169 | if args.frame_rate is None:
170 | fps = inp.get_fps()
171 | else:
172 | fps = args.frame_rate
173 |
174 | res = inp.get_res()
175 |
176 | speedlists = []
177 | for i, inp in enumerate(inputs):
178 | speedlists.append(get_speed_list(i, inp, fps, args, progress, temp, log))
179 |
180 | chunks, vclips, aclips = make_layers(inputs, speedlists)
181 |
182 | timeline = Timeline(inputs, fps, sr, res, args.background, vclips, aclips, chunks)
183 |
184 | w, h = res
185 | _vars: Dict[str, int] = {
186 | "width": w,
187 | "height": h,
188 | "centerX": w // 2,
189 | "centerY": h // 2,
190 | "start": 0,
191 | "end": 0, # TODO: deal with this
192 | }
193 |
194 | pool = []
195 |
196 | for o in args.add_text:
197 | pool.append(parse_dataclass(o, TextObj, log))
198 | for o in args.add_rectangle:
199 | pool.append(parse_dataclass(o, RectangleObj, log))
200 | for o in args.add_ellipse:
201 | pool.append(parse_dataclass(o, EllipseObj, log))
202 | for o in args.add_image:
203 | pool.append(parse_dataclass(o, ImageObj, log))
204 |
205 | # for index, obj in enumerate(pool):
206 |
207 | # dic_value = asdict(obj)
208 | # dic_type = {}
209 | # for field in fields(obj):
210 | # dic_type[field.name] = field.type
211 |
212 | # # Convert to the correct types
213 | # for k, _type in dic_type.items():
214 | # obj.__setattr__(k, _values(k, dic_value[k], _type, _vars, log))
215 |
216 | # if obj.dur < 1:
217 | # log.error(f"dur's value must be greater than 0. Was '{obj.dur}'.")
218 |
219 | # for frame in range(obj.start, obj.start + obj.dur, 1):
220 | # if frame in self.sheet:
221 | # self.sheet[frame].append(index)
222 | # else:
223 | # self.sheet[frame] = [index]
224 |
225 | # self.all.append(obj)
226 |
227 | return timeline
228 |
--------------------------------------------------------------------------------
/auto_editor/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/auto_editor/utils/__init__.py
--------------------------------------------------------------------------------
/auto_editor/utils/container.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, List, Dict, Any
2 |
3 | from dataclasses import dataclass, field
4 |
5 | pcm_formats = [
6 | "pcm_s16le", # default format
7 | "pcm_alaw",
8 | "pcm_f32be",
9 | "pcm_f32le",
10 | "pcm_f64be",
11 | "pcm_f64le",
12 | "pcm_mulaw",
13 | "pcm_s16be",
14 | "pcm_s24be",
15 | "pcm_s24le",
16 | "pcm_s32be",
17 | "pcm_s32le",
18 | "pcm_s8",
19 | "pcm_u16be",
20 | "pcm_u16le",
21 | "pcm_u24be",
22 | "pcm_u24le",
23 | "pcm_u32be",
24 | "pcm_u32le",
25 | "pcm_u8",
26 | "pcm_vidc",
27 | ]
28 |
29 | # Define aliases
30 | h265 = {
31 | "name": "H.265 / High Efficiency Video Coding (HEVC) / MPEG-H Part 2",
32 | "allow_video": True,
33 | "vcodecs": ["hevc", "mpeg4", "h264"],
34 | }
35 | h264 = {
36 | "name": "H.264 / Advanced Video Coding (AVC) / MPEG-4 Part 10",
37 | "allow_video": True,
38 | "vcodecs": ["h264", "mpeg4", "hevc"],
39 | }
40 | aac = {
41 | "name": "Advanced Audio Coding",
42 | "allow_audio": True,
43 | "max_audios": 1,
44 | "acodecs": ["aac"],
45 | "astrict": True,
46 | }
47 | ass = {
48 | "name": "SubStation Alpha",
49 | "allow_subtitle": True,
50 | "scodecs": ["ass", "ssa"],
51 | "max_subtitles": 1,
52 | "sstrict": True,
53 | }
54 | mp4 = {
55 | "name": "MP4 / MPEG-4 Part 14",
56 | "allow_video": True,
57 | "allow_audio": True,
58 | "allow_subtitle": True,
59 | "allow_image": True,
60 | "vcodecs": ["h264", "hevc", "vp9", "av1", "mpeg4", "mpeg2video", "mjpeg"],
61 | "acodecs": ["aac", "mp3", "opus", "flac", "vorbis", "mp2"],
62 | "disallow_v": ["prores", "apng", "gif", "msmpeg4v3", "flv1", "vp8", "rawvideo"],
63 | "disallow_a": pcm_formats,
64 | }
65 | ogg = {
66 | "allow_video": True,
67 | "allow_audio": True,
68 | "allow_subtitle": True,
69 | "vcodecs": ["libtheora", "theora"],
70 | "acodecs": ["libvorbis", "vorbis", "flac", "opus", "speex"],
71 | "vstrict": True,
72 | "astrict": True,
73 | }
74 |
75 | containers: Dict[str, Dict[str, Any]] = {
76 | # Aliases section
77 | "aac": aac,
78 | "adts": aac,
79 | "ass": ass,
80 | "ssa": ass,
81 | "264": h264,
82 | "h264": h264,
83 | "265": h265,
84 | "h265": h265,
85 | "hevc": h265,
86 | "mp4": mp4,
87 | "m4a": mp4,
88 | "ogg": ogg,
89 | "ogv": ogg,
90 | "apng": {
91 | "name": "Animated Portable Network Graphics",
92 | "allow_video": True,
93 | "max_videos": 1,
94 | "vcodecs": ["apng"],
95 | "vstrict": True,
96 | },
97 | "gif": {
98 | "name": "Graphics Interchange Format",
99 | "allow_video": True,
100 | "max_videos": 1,
101 | "vcodecs": ["gif"],
102 | "vstrict": True,
103 | },
104 | "wav": {
105 | "name": "Waveform Audio File Format",
106 | "allow_audio": True,
107 | "max_audios": 1,
108 | "acodecs": pcm_formats + ["mp3"],
109 | "astrict": True,
110 | },
111 | "ast": {
112 | "name": "AST / Audio Stream",
113 | "allow_audio": True,
114 | "acodecs": ["pcm_s16be_planar"],
115 | },
116 | "mp3": {
117 | "name": "MP3 / MPEG-2 Audio Layer 3",
118 | "allow_audio": True,
119 | "max_audios": 1,
120 | "acodecs": ["mp3"],
121 | "astrict": True,
122 | },
123 | "opus": {
124 | "name": "Opus",
125 | "allow_audio": True,
126 | "acodecs": ["opus", "flac", "libvorbis", "vorbis", "speex"],
127 | "astrict": True,
128 | },
129 | "oga": {
130 | "allow_audio": True,
131 | "acodecs": ["flac", "libvorbis", "vorbis", "opus", "speex"],
132 | "astrict": True,
133 | },
134 | "flac": {
135 | "name": "Free Lossless Audio Codec",
136 | "allow_audio": True,
137 | "max_audios": 1,
138 | "acodecs": ["flac"],
139 | },
140 | "webm": {
141 | "name": "WebM",
142 | "allow_video": True,
143 | "allow_audio": True,
144 | "allow_subtitle": True,
145 | "vcodecs": ["vp9", "vp8", "av1", "libaom-av1"],
146 | "acodecs": ["opus", "vorbis"],
147 | "scodecs": ["webvtt"],
148 | "vstrict": True,
149 | "astrict": True,
150 | "sstrict": True,
151 | },
152 | "srt": {
153 | "name": "SubRip Text / Subtitle Resource Tracks",
154 | "allow_subtitle": True,
155 | "scodecs": ["srt"],
156 | "max_subtitles": 1,
157 | "sstrict": True,
158 | },
159 | "vtt": {
160 | "name": "Web Video Text Track",
161 | "allow_subtitle": True,
162 | "scodecs": ["webvtt"],
163 | "max_subtitles": 1,
164 | "sstrict": True,
165 | },
166 | "avi": {
167 | "name": "Audio Video Interleave",
168 | "allow_video": True,
169 | "allow_audio": True,
170 | "vcodecs": ["mpeg4", "h264", "prores", "mjpeg", "mpeg2video", "rawvideo"],
171 | "acodecs": ["mp3", "aac", "vorbis", "mp2"],
172 | "disallow_v": ["hevc", "apng", "gif"],
173 | },
174 | "wmv": {
175 | "name": "Windows Media Video",
176 | "allow_video": True,
177 | "allow_audio": True,
178 | "vcodecs": ["msmpeg4v3", "h264", "mpeg4", "mpeg2video", "mjpeg", "rawvideo"],
179 | "acodecs": ["wmav2", "aac", "flac"],
180 | "disallow_v": ["prores", "hevc", "apng", "gif"],
181 | },
182 | "mkv": {
183 | "name": "Matroska",
184 | "allow_video": True,
185 | "allow_audio": True,
186 | "allow_subtitle": True,
187 | "allow_image": True,
188 | "vcodecs": [
189 | "h264",
190 | "hevc",
191 | "vp9",
192 | "vp8",
193 | "prores",
194 | "mpeg4",
195 | "mpeg2video",
196 | "msmpeg4v3",
197 | "mjpeg",
198 | "gif",
199 | "rawvideo",
200 | ],
201 | "acodecs": ["libvorbis", "vorbis", "opus", "flac", "aac", "mp2"],
202 | "disallow_v": ["apng"],
203 | },
204 | "mka": {
205 | "name": "Matroska Audio",
206 | "allow_audio": True,
207 | "acodecs": ["libvorbis", "vorbis", "opus", "flac", "aac", "mp2"],
208 | },
209 | "mov": {
210 | "name": "QuickTime / MOV",
211 | "allow_video": True,
212 | "allow_audio": True,
213 | "allow_subtitle": True,
214 | "vcodecs": [
215 | "h264",
216 | "hevc",
217 | "prores",
218 | "mpeg4",
219 | "mpeg2video",
220 | "msmpeg4v3",
221 | "mjpeg",
222 | "gif",
223 | "flv1",
224 | "rawvideo",
225 | ],
226 | "acodecs": ["aac", "mp3", "mp2", "vorbis"],
227 | "disallow_a": ["opus", "flac"],
228 | "disallow_v": ["apng", "vp9", "vp8"],
229 | },
230 | "swf": {
231 | "name": "ShockWave Flash / Small Web Format",
232 | "allow_video": True,
233 | "allow_audio": True,
234 | "vcodecs": ["flv1", "mjpeg"],
235 | "acodecs": ["mp3"],
236 | "vstrict": True,
237 | "astrict": True,
238 | "samplerate": [44100, 22050, 11025],
239 | },
240 | "not_in_here": {
241 | "allow_video": True,
242 | "allow_audio": True,
243 | "allow_subtitle": True,
244 | },
245 | }
246 |
247 |
248 | @dataclass
249 | class Container:
250 | name: Optional[str] = None
251 | allow_video: bool = False
252 | allow_audio: bool = False
253 | allow_subtitle: bool = False
254 | allow_image: bool = False
255 | max_videos: Optional[int] = None
256 | max_audios: Optional[int] = None
257 | max_subtitles: Optional[int] = None
258 | vcodecs: Optional[List[str]] = None
259 | acodecs: Optional[List[str]] = None
260 | scodecs: Optional[List[str]] = None
261 | vstrict: bool = False
262 | astrict: bool = False
263 | sstrict: bool = False
264 | disallow_v: List[str] = field(default_factory=list)
265 | disallow_a: List[str] = field(default_factory=list)
266 | samplerate: Optional[List[int]] = None # Any samplerate is allowed
267 |
268 |
269 | def get_rules(key: str) -> Container:
270 | if key in containers:
271 | return Container(**containers[key])
272 | return Container(**containers["not_in_here"])
273 |
--------------------------------------------------------------------------------
/auto_editor/utils/encoder.py:
--------------------------------------------------------------------------------
1 | encoders = {
2 | "libx264": {
3 | "pix_fmt": {
4 | "yuv420p",
5 | "yuvj420p",
6 | "yuv422p",
7 | "yuvj422p",
8 | "yuv444p",
9 | "yuvj444p",
10 | "nv12",
11 | "nv16",
12 | "nv21",
13 | "yuv420p10le",
14 | "yuv422p10le",
15 | "yuv444p10le",
16 | "nv20le",
17 | "gray",
18 | "gray10le",
19 | },
20 | },
21 | "libx264rgb": {
22 | "pix_fmt": {"bgr0", "bgr24", "rgb24"},
23 | },
24 | "h264_videotoolbox": {
25 | "pix_fmt": {"videotoolbox_vld", "nv12", "yuv420p"},
26 | },
27 | "h264": {
28 | "pix_fmt": {"videotoolbox_vld", "nv12", "yuv420p"},
29 | },
30 | "libx265": {
31 | "pix_fmt": {
32 | "yuv420p",
33 | "yuvj420p",
34 | "yuv422p",
35 | "yuvj422p",
36 | "yuv444p",
37 | "yuvj444p",
38 | "gbrp",
39 | "yuv420p10le",
40 | "yuv422p10le",
41 | "yuv444p10le",
42 | "gbrp10le",
43 | "yuv420p12le",
44 | "yuv422p12le",
45 | "yuv444p12le",
46 | "gbrp12le",
47 | "gray",
48 | "gray10le",
49 | "gray12le",
50 | },
51 | },
52 | "hevc_videotoolbox": {
53 | "pix_fmt": {"videotoolbox_vld", "nv12", "yuv420p", "bgra", "p010le"},
54 | },
55 | "hevc": {
56 | "pix_fmt": {
57 | "yuv420p",
58 | "yuvj420p",
59 | "yuv422p",
60 | "yuvj422p",
61 | "yuv444p",
62 | "yuvj444p",
63 | "gbrp",
64 | "yuv420p10le",
65 | "yuv422p10le",
66 | "yuv444p10le",
67 | "gbrp10le",
68 | "yuv420p12le",
69 | "yuv422p12le",
70 | "yuv444p12le",
71 | "gbrp12le",
72 | "gray",
73 | "gray10le",
74 | "gray12le",
75 | },
76 | },
77 | "hevc_nvenc": {
78 | "pix_fmt": {
79 | "yuv420p",
80 | "nv12",
81 | "p010le",
82 | "yuv444p",
83 | "p016le",
84 | "yuv444p16le",
85 | "bgr0",
86 | "rgb0",
87 | "gbrp",
88 | "gbrp16le",
89 | "cuda",
90 | "d3d11",
91 | },
92 | },
93 | "hevc_amf": {
94 | "pix_fmt": {
95 | "nv12",
96 | "yuv420p",
97 | "d3d11",
98 | "dxva2_vld",
99 | },
100 | },
101 | "h264_nvenc": {
102 | "pix_fmt": {"yuv420p", "nv12", "p010le", "yuv444p", "p016le", "yuv444p16le", "bgr0", "rgb0", "gbrp", "gbrp16le", "cuda", "d3d11",},
103 | },
104 | "h264_amf": {
105 | "pix_fmt": {"nv12", "yuv420p", "d3d11", "dxva2_vld",},
106 | },
107 | "hevc_qsv": {
108 | "pix_fmt": {"nv12", "p010le", "yuyv422", "y210le", "qsv", "bgra", "x2rgb10le",},
109 | },
110 | "h264_qsv": {
111 | "pix_fmt": {"nv12", "p010le", "qsv",},
112 | },
113 | "hevc_qsv": {
114 | "pix_fmt": {
115 | "nv12",
116 | "p010le",
117 | "yuyv422",
118 | "y210le",
119 | "qsv",
120 | "bgra",
121 | "x2rgb10le",
122 | },
123 | },
124 | "h264_qsv": {
125 | "pix_fmt": {
126 | "nv12",
127 | "p010le",
128 | "qsv",
129 | },
130 | },
131 | "vp9": {
132 | "pix_fmt": {
133 | "yuv420p",
134 | "yuva420p",
135 | "yuv422p",
136 | "yuv440p",
137 | "yuv444p",
138 | "yuv420p10le",
139 | "yuv422p10le",
140 | "yuv440p10le",
141 | "yuv444p10le",
142 | "yuv420p12le",
143 | "yuv422p12le",
144 | "yuv440p12le",
145 | "yuv444p12le",
146 | "gbrp",
147 | "gbrp10le",
148 | "gbrp12le",
149 | },
150 | },
151 | "vp8": {
152 | "pix_fmt": {"yuv420p", "yuva420p"},
153 | },
154 | "prores": {
155 | "pix_fmt": {"yuv422p10le", "yuv444p10le", "yuva444p10le"},
156 | },
157 | "av1": {
158 | "pix_fmt": {
159 | "yuv420p",
160 | "yuv422p",
161 | "yuv444p",
162 | "gbrp",
163 | "yuv420p10le",
164 | "yuv422p10le",
165 | "yuv444p10le",
166 | "yuv420p12le",
167 | "yuv422p12le",
168 | "yuv444p12le",
169 | "gbrp10le",
170 | "gbrp12le",
171 | "gray",
172 | "gray10le",
173 | "gray12le",
174 | },
175 | },
176 | "mpeg4": {
177 | "pix_fmt": {"yuv420p"},
178 | },
179 | "mpeg2video": {
180 | "pix_fmt": {"yuv420p", "yuv422p"},
181 | },
182 | "mjpeg": {
183 | "pix_fmt": {"yuvj420p", "yuvj422p", "yuvj444p"},
184 | },
185 | }
186 |
--------------------------------------------------------------------------------
/auto_editor/utils/func.py:
--------------------------------------------------------------------------------
1 | from typing import List, Tuple, Union, overload
2 |
3 | import numpy as np
4 | from numpy.typing import NDArray
5 |
6 | from auto_editor.utils.log import Log
7 | from auto_editor.utils.types import ChunkType, split_num_str
8 |
9 | """
10 | To prevent duplicate code being pasted between scripts, common functions should be
11 | put here. Every function should be pure with no side effects.
12 | """
13 |
14 | # Turn long silent/loud array to formatted chunk list.
15 | # Example: [1, 1, 1, 2, 2] => [(0, 3, 1), (3, 5, 2)]
16 | def chunkify(arr: Union[np.ndarray, List[int]]) -> ChunkType:
17 | arr_length = len(arr)
18 |
19 | chunks = []
20 | start = 0
21 | for j in range(1, arr_length):
22 | if arr[j] != arr[j - 1]:
23 | chunks.append((start, j, arr[j - 1]))
24 | start = j
25 | chunks.append((start, arr_length, arr[j]))
26 | return chunks
27 |
28 |
29 | def to_timecode(secs: float, fmt: str) -> str:
30 | sign = ""
31 | if secs < 0:
32 | sign = "-"
33 | secs = -secs
34 |
35 | _m, _s = divmod(secs, 60)
36 | _h, _m = divmod(_m, 60)
37 | s, m, h = float(_s), int(_m), int(_h)
38 |
39 | if fmt == "webvtt":
40 | if h == 0:
41 | return f"{sign}{m:02d}:{s:06.3f}"
42 | return f"{sign}{h:02d}:{m:02d}:{s:06.3f}"
43 | if fmt == "mov_text":
44 | return f"{sign}{h:02d}:{m:02d}:" + f"{s:06.3f}".replace(".", ",", 1)
45 | if fmt == "standard":
46 | return f"{sign}{h:02d}:{m:02d}:{s:06.3f}"
47 | if fmt == "ass":
48 | return f"{sign}{h:d}:{m:02d}:{s:05.2f}"
49 | if fmt == "rass":
50 | return f"{sign}{h:d}:{m:02d}:{s:02.0f}"
51 |
52 | raise ValueError("to_timecode: Unreachable")
53 |
54 |
55 | def remove_small(
56 | has_loud: NDArray[np.bool_], lim: int, replace: int, with_: int
57 | ) -> NDArray[np.bool_]:
58 | start_p = 0
59 | active = False
60 | for j, item in enumerate(has_loud):
61 | if item == replace:
62 | if not active:
63 | start_p = j
64 | active = True
65 | # Special case for end.
66 | if j == len(has_loud) - 1:
67 | if j - start_p < lim:
68 | has_loud[start_p : j + 1] = with_
69 | else:
70 | if active:
71 | if j - start_p < lim:
72 | has_loud[start_p:j] = with_
73 | active = False
74 | return has_loud
75 |
76 |
77 | def str_is_number(val: str) -> bool:
78 | return val.replace(".", "", 1).replace("-", "", 1).isdigit()
79 |
80 |
81 | def str_starts_with_number(val: str) -> bool:
82 | if val.startswith("-"):
83 | val = val[1:]
84 | val = val.replace(".", "", 1)
85 | return val[0].isdigit()
86 |
87 |
88 | @overload
89 | def set_range(
90 | arr: NDArray[np.float_],
91 | range_syntax: List[List[str]],
92 | fps: float,
93 | with_: int,
94 | log: Log,
95 | ) -> NDArray[np.float_]:
96 | pass
97 |
98 |
99 | @overload
100 | def set_range(
101 | arr: NDArray[np.bool_],
102 | range_syntax: List[List[str]],
103 | fps: float,
104 | with_: int,
105 | log: Log,
106 | ) -> NDArray[np.bool_]:
107 | pass
108 |
109 |
110 | def set_range(arr, range_syntax, fps, with_, log):
111 | def replace_variables_to_values(val: str, fps: float, log: Log) -> int:
112 | if str_is_number(val):
113 | return int(val)
114 | if str_starts_with_number(val):
115 | try:
116 | value, unit = split_num_str(val)
117 | except TypeError as e:
118 | log.error(str(e))
119 |
120 | if unit in ("", "f", "frame", "frames"):
121 | if isinstance(value, float):
122 | log.error("float type cannot be used with frame unit")
123 | return int(value)
124 | if unit in ("s", "sec", "secs", "second", "seconds"):
125 | return round(value * fps)
126 | log.error(f"Unknown unit: {unit}")
127 |
128 | if val == "start":
129 | return 0
130 | if val == "end":
131 | return len(arr)
132 | return log.error(f"variable '{val}' not available.")
133 |
134 | for _range in range_syntax:
135 | pair = []
136 | for val in _range:
137 | num = replace_variables_to_values(val, fps, log)
138 | if num < 0:
139 | num += len(arr)
140 | pair.append(num)
141 | arr[pair[0] : pair[1]] = with_
142 | return arr
143 |
144 |
145 | def seconds_to_frames(value: Union[int, str], fps: float) -> int:
146 | if isinstance(value, str):
147 | return int(float(value) * fps)
148 | return value
149 |
150 |
151 | def cook(has_loud: NDArray[np.bool_], min_clip: int, min_cut: int) -> NDArray[np.bool_]:
152 | has_loud = remove_small(has_loud, min_clip, replace=1, with_=0)
153 | has_loud = remove_small(has_loud, min_cut, replace=0, with_=1)
154 | return has_loud
155 |
156 |
157 | def apply_margin(
158 | has_loud: NDArray[np.bool_], has_loud_length: int, start_m: int, end_m: int
159 | ) -> NDArray[np.bool_]:
160 |
161 | # Find start and end indexes.
162 | start_index = []
163 | end_index = []
164 | for j in range(1, has_loud_length):
165 | if has_loud[j] != has_loud[j - 1]:
166 | if has_loud[j]:
167 | start_index.append(j)
168 | else:
169 | end_index.append(j)
170 |
171 | # Apply margin
172 | if start_m > 0:
173 | for i in start_index:
174 | has_loud[max(i - start_m, 0) : i] = True
175 | if start_m < 0:
176 | for i in start_index:
177 | has_loud[i : min(i - start_m, has_loud_length)] = False
178 |
179 | if end_m > 0:
180 | for i in end_index:
181 | has_loud[i : min(i + end_m, has_loud_length)] = True
182 | if end_m < 0:
183 | for i in end_index:
184 | has_loud[max(i + end_m, 0) : i] = False
185 |
186 | return has_loud
187 |
188 |
189 | def apply_mark_as(
190 | has_loud: NDArray[np.bool_], has_loud_length: int, fps: float, args, log: Log
191 | ) -> NDArray[np.bool_]:
192 |
193 | if args.mark_as_loud != []:
194 | has_loud = set_range(has_loud, args.mark_as_loud, fps, args.video_speed, log)
195 |
196 | if args.mark_as_silent != []:
197 | has_loud = set_range(has_loud, args.mark_as_silent, fps, args.silent_speed, log)
198 | return has_loud
199 |
200 |
201 | def to_speed_list(
202 | has_loud: NDArray[np.bool_], video_speed: float, silent_speed: float
203 | ) -> NDArray[np.float_]:
204 |
205 | speed_list = has_loud.astype(float)
206 |
207 | # WARN: This breaks if speed is allowed to be 0
208 | speed_list[speed_list == 1] = video_speed
209 | speed_list[speed_list == 0] = silent_speed
210 |
211 | return speed_list
212 |
213 |
214 | def merge(start_list: np.ndarray, end_list: np.ndarray) -> NDArray[np.bool_]:
215 | result = np.zeros((len(start_list)), dtype=np.bool_)
216 |
217 | for i, item in enumerate(start_list):
218 | if item == True:
219 | where = np.where(end_list[i:])[0]
220 | if len(where) > 0:
221 | result[i : where[0]] = True
222 | return result
223 |
224 |
225 | def parse_dataclass(unsplit_arguments, dataclass, log):
226 | from dataclasses import fields
227 |
228 | # Positional Arguments
229 | # --rectangle 0,end,10,20,20,30,#000, ...
230 | # Keyword Arguments
231 | # --rectangle start=0,end=end,x1=10, ...
232 |
233 | ARG_SEP = ","
234 | KEYWORD_SEP = "="
235 |
236 | d_name = dataclass.__name__
237 |
238 | keys = [field.name for field in fields(dataclass)]
239 | kwargs = {}
240 | args = []
241 |
242 | allow_positional_args = True
243 |
244 | if unsplit_arguments == "":
245 | return dataclass()
246 |
247 | for i, arg in enumerate(unsplit_arguments.split(ARG_SEP)):
248 | if i + 1 > len(keys):
249 | log.error(f"{d_name} has too many arguments, starting with '{arg}'.")
250 |
251 | if KEYWORD_SEP in arg:
252 | allow_positional_args = False
253 |
254 | parameters = arg.split(KEYWORD_SEP)
255 | if len(parameters) > 2:
256 | log.error(f"{d_name} invalid syntax: '{arg}'.")
257 | key, val = parameters
258 | if key not in keys:
259 | log.error(f"{d_name} got an unexpected keyword '{key}'")
260 |
261 | kwargs[key] = val
262 | elif allow_positional_args:
263 | args.append(arg)
264 | else:
265 | log.error(f"{d_name} positional argument follows keyword argument.")
266 |
267 | try:
268 | dataclass_instance = dataclass(*args, **kwargs)
269 | except TypeError as err:
270 | err_list = [d_name] + str(err).split(" ")[1:]
271 | log.error(" ".join(err_list))
272 |
273 | return dataclass_instance
274 |
275 |
276 | def get_stdout(cmd: List[str]) -> str:
277 | from subprocess import PIPE, STDOUT, Popen
278 |
279 | stdout, _ = Popen(cmd, stdout=PIPE, stderr=STDOUT).communicate()
280 | return stdout.decode("utf-8", "replace")
281 |
282 |
283 | def aspect_ratio(width: int, height: int) -> Union[Tuple[int, int], Tuple[None, None]]:
284 | if height == 0:
285 | return None, None
286 |
287 | def gcd(a: int, b: int) -> int:
288 | while b:
289 | a, b = b, a % b
290 | return a
291 |
292 | c = gcd(width, height)
293 | return width // c, height // c
294 |
295 |
296 | def human_readable_time(time_in_secs: float) -> str:
297 | units = "seconds"
298 | if time_in_secs >= 3600:
299 | time_in_secs = round(time_in_secs / 3600, 1)
300 | if time_in_secs % 1 == 0:
301 | time_in_secs = round(time_in_secs)
302 | units = "hours"
303 | if time_in_secs >= 60:
304 | time_in_secs = round(time_in_secs / 60, 1)
305 | if time_in_secs >= 10 or time_in_secs % 1 == 0:
306 | time_in_secs = round(time_in_secs)
307 | units = "minutes"
308 | return f"{time_in_secs} {units}"
309 |
310 |
311 | def open_with_system_default(path: str, log: Log) -> None:
312 | import sys
313 | from subprocess import run
314 |
315 | if sys.platform == "win64" or sys.platform == "win32":
316 | from os import startfile
317 |
318 | try:
319 | startfile(path)
320 | except OSError:
321 | log.warning("Could not find application to open file.")
322 | else:
323 | try: # should work on MacOS and some Linux distros
324 | run(["open", path])
325 | except Exception:
326 | try: # should work on WSL2
327 | run(["cmd.exe", "/C", "start", path])
328 | except Exception:
329 | try: # should work on most other Linux distros
330 | run(["xdg-open", path])
331 | except Exception:
332 | log.warning("Could not open output file.")
333 |
334 |
335 | def append_filename(path: str, val: str) -> str:
336 | from os.path import splitext
337 |
338 | root, ext = splitext(path)
339 | return root + val + ext
340 |
--------------------------------------------------------------------------------
/auto_editor/utils/log.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from shutil import rmtree, get_terminal_size
3 | from time import perf_counter, sleep
4 | from datetime import timedelta
5 |
6 | from typing import NoReturn, Optional
7 |
8 |
9 | class Timer:
10 | __slots__ = ("start_time", "quiet")
11 |
12 | def __init__(self, quiet: bool = False) -> None:
13 | self.start_time = perf_counter()
14 | self.quiet = quiet
15 |
16 | def stop(self) -> None:
17 | if not self.quiet:
18 | second_len = round(perf_counter() - self.start_time, 2)
19 | minute_len = timedelta(seconds=round(second_len))
20 |
21 | sys.stdout.write(f"Finished. took {second_len} seconds ({minute_len})\n")
22 |
23 |
24 | class Log:
25 | __slots__ = ("is_debug", "quiet", "temp")
26 |
27 | def __init__(
28 | self, show_debug: bool = False, quiet: bool = False, temp: Optional[str] = None
29 | ) -> None:
30 | self.is_debug = show_debug
31 | self.quiet = quiet
32 | self.temp = temp
33 |
34 | def debug(self, message: object) -> None:
35 | if self.is_debug:
36 | self.conwrite("")
37 | sys.stderr.write(f"Debug: {message}\n")
38 |
39 | def cleanup(self) -> None:
40 | if self.temp is None:
41 | return
42 | try:
43 | rmtree(self.temp)
44 | self.debug("Removed Temp Directory.")
45 | except FileNotFoundError:
46 | pass
47 | except PermissionError:
48 | sleep(0.1)
49 | try:
50 | rmtree(self.temp)
51 | self.debug("Removed Temp Directory.")
52 | except Exception:
53 | self.debug("Failed to delete temp dir.")
54 |
55 | def conwrite(self, message: str) -> None:
56 | if not self.quiet:
57 | buffer = " " * (get_terminal_size().columns - len(message) - 3)
58 | sys.stdout.write(f" {message}{buffer}\r")
59 |
60 | def error(self, message: str) -> NoReturn:
61 | self.conwrite("")
62 | sys.stderr.write(f"Error! {message}\n")
63 | self.cleanup()
64 | from platform import system
65 |
66 | if system() == "Linux":
67 | sys.exit(1)
68 | else:
69 | try:
70 | sys.exit(1)
71 | except SystemExit:
72 | import os
73 |
74 | os._exit(1)
75 |
76 | def import_error(self, lib: str) -> NoReturn:
77 | self.error(f"Python module '{lib}' not installed. Run: pip install {lib}")
78 |
79 | def warning(self, message: str) -> None:
80 | if not self.quiet:
81 | sys.stderr.write(f"Warning! {message}\n")
82 |
83 | def print(self, message: str) -> None:
84 | if not self.quiet:
85 | sys.stdout.write(f"{message}\n")
86 |
--------------------------------------------------------------------------------
/auto_editor/utils/progressbar.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from math import floor
3 | from time import time, localtime
4 | from shutil import get_terminal_size
5 | from platform import system
6 |
7 | from typing import Union
8 |
9 | from .func import get_stdout
10 |
11 |
12 | class ProgressBar:
13 | def __init__(self, bar_type: str) -> None:
14 |
15 | self.machine = False
16 | self.hide = False
17 |
18 | self.icon = "⏳"
19 | self.chars = [" ", "▏", "▎", "▍", "▌", "▋", "▊", "▉", "█"]
20 | self.brackets = ("|", "|")
21 |
22 | if bar_type == "classic":
23 | self.icon = "⏳"
24 | self.chars = ["░", "█"]
25 | self.brackets = ("[", "]")
26 | if bar_type == "ascii":
27 | self.icon = "& "
28 | self.chars = ["-", "#"]
29 | self.brackets = ("[", "]")
30 | if bar_type == "machine":
31 | self.machine = True
32 | if bar_type == "none":
33 | self.hide = True
34 |
35 | self.part_width = len(self.chars) - 1
36 |
37 | self.ampm = True
38 | if system() == "Darwin" and bar_type in ("default", "classic"):
39 | try:
40 | date_format = get_stdout(
41 | ["defaults", "read", "com.apple.menuextra.clock", "DateFormat"]
42 | )
43 | self.ampm = "a" in date_format
44 | except FileNotFoundError:
45 | pass
46 |
47 | @staticmethod
48 | def pretty_time(my_time: float, ampm: bool) -> str:
49 | new_time = localtime(my_time)
50 |
51 | hours = new_time.tm_hour
52 | minutes = new_time.tm_min
53 |
54 | if ampm:
55 | if hours == 0:
56 | hours = 12
57 | if hours > 12:
58 | hours -= 12
59 | ampm_marker = "PM" if new_time.tm_hour >= 12 else "AM"
60 | return f"{hours:02}:{minutes:02} {ampm_marker}"
61 | return f"{hours:02}:{minutes:02}"
62 |
63 | def tick(self, index: Union[int, float]) -> None:
64 |
65 | if self.hide:
66 | return
67 |
68 | progress = min(1, max(0, index / self.total))
69 |
70 | if progress == 0:
71 | progress_rate = 0.0
72 | else:
73 | progress_rate = (time() - self.begin_time) / progress
74 |
75 | if self.machine:
76 | index = min(index, self.total)
77 | raw = int(self.begin_time + progress_rate)
78 | print(
79 | f"{self.title}~{index}~{self.total}~{self.begin_time}~{raw}",
80 | end="\r",
81 | flush=True,
82 | )
83 | return
84 |
85 | new_time = self.pretty_time(self.begin_time + progress_rate, self.ampm)
86 |
87 | percent = round(progress * 100, 1)
88 | p_pad = " " * (4 - len(str(percent)))
89 |
90 | columns = get_terminal_size().columns
91 | bar_len = max(1, columns - (self.len_title + 32))
92 |
93 | progress_bar_str = self.progress_bar_str(progress, bar_len)
94 |
95 | bar = f" {self.icon}{self.title} {progress_bar_str} {p_pad}{percent}% ETA {new_time}"
96 |
97 | if len(bar) > columns - 2:
98 | bar = bar[: columns - 2]
99 | else:
100 | bar += " " * (columns - len(bar) - 4)
101 |
102 | sys.stdout.write(bar + "\r")
103 |
104 | def start(self, total: Union[int, float], title: str = "Please wait") -> None:
105 | self.title = title
106 | self.len_title = len(title)
107 | self.total = total
108 | self.begin_time = time()
109 |
110 | try:
111 | self.tick(0)
112 | except UnicodeEncodeError:
113 | self.icon = "& "
114 | self.chars = ["-", "#"]
115 | self.brackets = ("[", "]")
116 | self.part_width = 1
117 |
118 | def progress_bar_str(self, progress: float, width: int) -> str:
119 | whole_width = floor(progress * width)
120 | remainder_width = (progress * width) % 1
121 | part_width = floor(remainder_width * self.part_width)
122 | part_char = self.chars[part_width]
123 |
124 | if width - whole_width - 1 < 0:
125 | part_char = ""
126 |
127 | line = (
128 | self.brackets[0]
129 | + self.chars[-1] * whole_width
130 | + part_char
131 | + self.chars[0] * (width - whole_width - 1)
132 | + self.brackets[1]
133 | )
134 | return line
135 |
136 | @staticmethod
137 | def end() -> None:
138 | sys.stdout.write(" " * (get_terminal_size().columns - 2) + "\r")
139 |
--------------------------------------------------------------------------------
/auto_editor/utils/types.py:
--------------------------------------------------------------------------------
1 | import re
2 |
3 | from typing import List, Tuple, Sequence, Union, Literal
4 |
5 |
6 | ChunkType = List[Tuple[int, int, float]]
7 |
8 |
9 | def split_num_str(val: Union[str, int]) -> Tuple[float, str]:
10 | if isinstance(val, int):
11 | return val, ""
12 | index = 0
13 | for char in val:
14 | if not char.isdigit() and char not in (" ", ".", "-"):
15 | break
16 | index += 1
17 | num, unit = val[:index], val[index:]
18 |
19 | try:
20 | float(num)
21 | except ValueError:
22 | raise TypeError(f"Invalid number: '{val}'")
23 | return float(num), unit
24 |
25 |
26 | def unit_check(unit: str, allowed_units: Sequence[str]) -> None:
27 | if unit not in allowed_units:
28 | raise TypeError(f"Unknown unit: '{unit}'")
29 |
30 |
31 | def float_type(val: Union[str, float]) -> float:
32 | if not isinstance(val, str):
33 | return val
34 |
35 | num, unit = split_num_str(val)
36 | unit_check(unit, ("%", ""))
37 | if unit == "%":
38 | return num / 100
39 | return num
40 |
41 |
42 | def sample_rate_type(val: str) -> int:
43 | num, unit = split_num_str(val)
44 | unit_check(unit, ("Hz", "kHz", ""))
45 | if unit == "kHz":
46 | return int(num * 1000)
47 | return int(num)
48 |
49 |
50 | def frame_type(val: str) -> Union[int, str]:
51 | num, unit = split_num_str(val)
52 | if unit in ("s", "sec", "secs", "second", "seconds"):
53 | return str(num).strip()
54 |
55 | unit_check(unit, ("", "f", "frame", "frames"))
56 | return int(num)
57 |
58 |
59 | def anchor_type(val: str) -> str:
60 | allowed = ("tl", "tr", "bl", "br", "ce")
61 | if val not in allowed:
62 | raise TypeError("Anchor must be: " + " ".join(allowed))
63 | return val
64 |
65 |
66 | def margin_type(val: str) -> Tuple[Union[int, str], Union[int, str]]:
67 | vals = val.strip().split(",")
68 | if len(vals) == 1:
69 | vals.append(vals[0])
70 | if len(vals) != 2:
71 | raise TypeError("--margin has too many arguments.")
72 | return frame_type(vals[0]), frame_type(vals[1])
73 |
74 |
75 | def comma_type(name: str, val: str, min_args: int, max_args: int) -> List[str]:
76 | vals = val.strip().split(",")
77 | if min_args > len(vals):
78 | raise TypeError(f"Too few arguments for {name}.")
79 | if len(vals) > max_args:
80 | raise TypeError(f"Too many arguments for {name}.")
81 | return vals
82 |
83 |
84 | def range_type(val: str) -> List[str]:
85 | return comma_type("range_type", val, 2, 2)
86 |
87 |
88 | def speed_range_type(val: str) -> List[str]:
89 | return comma_type("speed_range_type", val, 3, 3)
90 |
91 |
92 | AlignType = Literal["left", "center", "right"]
93 |
94 |
95 | def align_type(val: str) -> AlignType:
96 | if val == "left":
97 | return "left"
98 | if val == "center":
99 | return "center"
100 | if val == "right":
101 | return "right"
102 | raise TypeError("Align must be 'left', 'right', or 'center'")
103 |
104 |
105 | def color_type(val: str) -> str:
106 | """
107 | Convert a color str into an RGB tuple
108 |
109 | Accepts:
110 | - color names (black, red, blue)
111 | - 3 digit hex codes (#FFF, #3AE)
112 | - 6 digit hex codes (#3F0401, #005601)
113 | """
114 |
115 | color = val.lower()
116 |
117 | if color in colormap:
118 | color = colormap[color]
119 |
120 | if re.match("#[a-f0-9]{3}$", color):
121 | return "#" + "".join([x * 2 for x in color[1:]])
122 |
123 | if re.match("#[a-f0-9]{6}$", color):
124 | return color
125 |
126 | raise ValueError(f"Invalid Color: '{color}'")
127 |
128 |
129 | StreamType = Union[int, Literal["all"]]
130 |
131 |
132 | def stream_type(val: str) -> StreamType:
133 | if val == "all":
134 | return "all"
135 | return int(val)
136 |
137 |
138 | colormap = {
139 | # Taken from https://www.w3.org/TR/css-color-4/#named-color
140 | "aliceblue": "#f0f8ff",
141 | "antiquewhite": "#faebd7",
142 | "aqua": "#00ffff",
143 | "aquamarine": "#7fffd4",
144 | "azure": "#f0ffff",
145 | "beige": "#f5f5dc",
146 | "bisque": "#ffe4c4",
147 | "black": "#000000",
148 | "blanchedalmond": "#ffebcd",
149 | "blue": "#0000ff",
150 | "blueviolet": "#8a2be2",
151 | "brown": "#a52a2a",
152 | "burlywood": "#deb887",
153 | "cadetblue": "#5f9ea0",
154 | "chartreuse": "#7fff00",
155 | "chocolate": "#d2691e",
156 | "coral": "#ff7f50",
157 | "cornflowerblue": "#6495ed",
158 | "cornsilk": "#fff8dc",
159 | "crimson": "#dc143c",
160 | "cyan": "#00ffff",
161 | "darkblue": "#00008b",
162 | "darkcyan": "#008b8b",
163 | "darkgoldenrod": "#b8860b",
164 | "darkgray": "#a9a9a9",
165 | "darkgrey": "#a9a9a9",
166 | "darkgreen": "#006400",
167 | "darkkhaki": "#bdb76b",
168 | "darkmagenta": "#8b008b",
169 | "darkolivegreen": "#556b2f",
170 | "darkorange": "#ff8c00",
171 | "darkorchid": "#9932cc",
172 | "darkred": "#8b0000",
173 | "darksalmon": "#e9967a",
174 | "darkseagreen": "#8fbc8f",
175 | "darkslateblue": "#483d8b",
176 | "darkslategray": "#2f4f4f",
177 | "darkslategrey": "#2f4f4f",
178 | "darkturquoise": "#00ced1",
179 | "darkviolet": "#9400d3",
180 | "deeppink": "#ff1493",
181 | "deepskyblue": "#00bfff",
182 | "dimgray": "#696969",
183 | "dimgrey": "#696969",
184 | "dodgerblue": "#1e90ff",
185 | "firebrick": "#b22222",
186 | "floralwhite": "#fffaf0",
187 | "forestgreen": "#228b22",
188 | "fuchsia": "#ff00ff",
189 | "gainsboro": "#dcdcdc",
190 | "ghostwhite": "#f8f8ff",
191 | "gold": "#ffd700",
192 | "goldenrod": "#daa520",
193 | "gray": "#808080",
194 | "grey": "#808080",
195 | "green": "#008000",
196 | "greenyellow": "#adff2f",
197 | "honeydew": "#f0fff0",
198 | "hotpink": "#ff69b4",
199 | "indianred": "#cd5c5c",
200 | "indigo": "#4b0082",
201 | "ivory": "#fffff0",
202 | "khaki": "#f0e68c",
203 | "lavender": "#e6e6fa",
204 | "lavenderblush": "#fff0f5",
205 | "lawngreen": "#7cfc00",
206 | "lemonchiffon": "#fffacd",
207 | "lightblue": "#add8e6",
208 | "lightcoral": "#f08080",
209 | "lightcyan": "#e0ffff",
210 | "lightgoldenrodyellow": "#fafad2",
211 | "lightgreen": "#90ee90",
212 | "lightgray": "#d3d3d3",
213 | "lightgrey": "#d3d3d3",
214 | "lightpink": "#ffb6c1",
215 | "lightsalmon": "#ffa07a",
216 | "lightseagreen": "#20b2aa",
217 | "lightskyblue": "#87cefa",
218 | "lightslategray": "#778899",
219 | "lightslategrey": "#778899",
220 | "lightsteelblue": "#b0c4de",
221 | "lightyellow": "#ffffe0",
222 | "lime": "#00ff00",
223 | "limegreen": "#32cd32",
224 | "linen": "#faf0e6",
225 | "magenta": "#ff00ff",
226 | "maroon": "#800000",
227 | "mediumaquamarine": "#66cdaa",
228 | "mediumblue": "#0000cd",
229 | "mediumorchid": "#ba55d3",
230 | "mediumpurple": "#9370db",
231 | "mediumseagreen": "#3cb371",
232 | "mediumslateblue": "#7b68ee",
233 | "mediumspringgreen": "#00fa9a",
234 | "mediumturquoise": "#48d1cc",
235 | "mediumvioletred": "#c71585",
236 | "midnightblue": "#191970",
237 | "mintcream": "#f5fffa",
238 | "mistyrose": "#ffe4e1",
239 | "moccasin": "#ffe4b5",
240 | "navajowhite": "#ffdead",
241 | "navy": "#000080",
242 | "oldlace": "#fdf5e6",
243 | "olive": "#808000",
244 | "olivedrab": "#6b8e23",
245 | "orange": "#ffa500",
246 | "orangered": "#ff4500",
247 | "orchid": "#da70d6",
248 | "palegoldenrod": "#eee8aa",
249 | "palegreen": "#98fb98",
250 | "paleturquoise": "#afeeee",
251 | "palevioletred": "#db7093",
252 | "papayawhip": "#ffefd5",
253 | "peachpuff": "#ffdab9",
254 | "peru": "#cd853f",
255 | "pink": "#ffc0cb",
256 | "plum": "#dda0dd",
257 | "powderblue": "#b0e0e6",
258 | "purple": "#800080",
259 | "rebeccapurple": "#663399",
260 | "red": "#ff0000",
261 | "rosybrown": "#bc8f8f",
262 | "royalblue": "#4169e1",
263 | "saddlebrown": "#8b4513",
264 | "salmon": "#fa8072",
265 | "sandybrown": "#f4a460",
266 | "seagreen": "#2e8b57",
267 | "seashell": "#fff5ee",
268 | "sienna": "#a0522d",
269 | "silver": "#c0c0c0",
270 | "skyblue": "#87ceeb",
271 | "slateblue": "#6a5acd",
272 | "slategray": "#708090",
273 | "slategrey": "#708090",
274 | "snow": "#fffafa",
275 | "springgreen": "#00ff7f",
276 | "steelblue": "#4682b4",
277 | "tan": "#d2b48c",
278 | "teal": "#008080",
279 | "thistle": "#d8bfd8",
280 | "tomato": "#ff6347",
281 | "turquoise": "#40e0d0",
282 | "violet": "#ee82ee",
283 | "wheat": "#f5deb3",
284 | "white": "#ffffff",
285 | "whitesmoke": "#f5f5f5",
286 | "yellow": "#ffff00",
287 | "yellowgreen": "#9acd32",
288 | }
289 |
--------------------------------------------------------------------------------
/auto_editor/validate_input.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import subprocess
4 | from typing import List
5 |
6 | from auto_editor.utils.log import Log
7 | from auto_editor.utils.func import get_stdout
8 | from auto_editor.ffwrapper import FFmpeg, FileInfo
9 |
10 |
11 | def get_domain(url: str) -> str:
12 | from urllib.parse import urlparse
13 |
14 | t = urlparse(url).netloc
15 | return ".".join(t.split(".")[-2:])
16 |
17 |
18 | def download_video(my_input: str, args, ffmpeg: FFmpeg, log: Log) -> str:
19 | log.conwrite("Downloading video...")
20 |
21 | download_format = args.download_format
22 |
23 | if download_format is None and get_domain(my_input) == "youtube.com":
24 | download_format = "bestvideo[ext=mp4]+bestaudio[ext=m4a]"
25 |
26 | if args.output_format is None:
27 | output_format = re.sub(r"\W+", "-", os.path.splitext(my_input)[0]) + ".%(ext)s"
28 | else:
29 | output_format = args.output_format
30 |
31 | yt_dlp_path = args.yt_dlp_location
32 |
33 | cmd = ["--ffmpeg-location", ffmpeg.path]
34 |
35 | if download_format is not None:
36 | cmd.extend(["-f", download_format])
37 |
38 | cmd.extend(["-o", output_format, my_input])
39 |
40 | if args.yt_dlp_extras is not None:
41 | cmd.extend(args.yt_dlp_extras.split(" "))
42 |
43 | location = get_stdout(
44 | [yt_dlp_path, "--get-filename", "--no-warnings"] + cmd
45 | ).strip()
46 |
47 | if not os.path.isfile(location):
48 | subprocess.run([yt_dlp_path] + cmd)
49 |
50 | if not os.path.isfile(location):
51 | log.error(f"Download file wasn't created: {location}")
52 |
53 | return location
54 |
55 |
56 | def valid_input(inputs: List[str], ffmpeg: FFmpeg, args, log: Log) -> List[FileInfo]:
57 | new_inputs = []
58 |
59 | for my_input in inputs:
60 | if os.path.isfile(my_input):
61 | _, ext = os.path.splitext(my_input)
62 | if ext == "":
63 | log.error("File must have an extension.")
64 | new_inputs.append(FileInfo(my_input, ffmpeg, log))
65 |
66 | elif my_input.startswith("http://") or my_input.startswith("https://"):
67 | new_inputs.append(
68 | FileInfo(download_video(my_input, args, ffmpeg, log), ffmpeg, log)
69 | )
70 | else:
71 | if os.path.isdir(my_input):
72 | log.error("Input must be a file or a URL, not a directory.")
73 | log.error(f"Could not find file: '{my_input}'")
74 |
75 | return new_inputs
76 |
--------------------------------------------------------------------------------
/auto_editor/vanparse.py:
--------------------------------------------------------------------------------
1 | import re
2 | import sys
3 | import difflib
4 | import textwrap
5 | from dataclasses import dataclass
6 | from shutil import get_terminal_size
7 |
8 | from typing import List, Sequence, Optional, Any, Union, Dict
9 |
10 | import auto_editor
11 | from auto_editor.utils.log import Log
12 |
13 |
14 | @dataclass
15 | class Required:
16 | names: Sequence[str]
17 | nargs: Union[int, str] = "*"
18 | type: type = str
19 | choices: Optional[Sequence[str]] = None
20 | help: str = ""
21 | _type: str = "required"
22 |
23 |
24 | @dataclass
25 | class Options:
26 | names: Sequence[str]
27 | nargs: Union[int, str] = 1
28 | type: type = str
29 | default: Optional[Union[int, str]] = None
30 | flag: bool = False
31 | choices: Optional[Sequence[str]] = None
32 | help: str = ""
33 | _type: str = "option"
34 |
35 |
36 | @dataclass
37 | class OptionText:
38 | text: str
39 | _type: str
40 |
41 |
42 | def indent(text: str, prefix: str) -> str:
43 | def predicate(line: str) -> str:
44 | return line.strip()
45 |
46 | def prefixed_lines():
47 | for line in text.splitlines(True):
48 | yield (prefix + line if predicate(line) else line)
49 |
50 | return "".join(prefixed_lines())
51 |
52 |
53 | def out(text: str) -> None:
54 | width = get_terminal_size().columns - 3
55 |
56 | indent_regex = re.compile(r"^(\s+)")
57 | wrapped_lines = []
58 |
59 | for line in text.split("\n"):
60 | exist_indent = re.search(indent_regex, line)
61 | pre_indent = exist_indent.groups()[0] if exist_indent else ""
62 |
63 | wrapped_lines.append(
64 | textwrap.fill(line, width=width, subsequent_indent=pre_indent)
65 | )
66 |
67 | print("\n".join(wrapped_lines))
68 |
69 |
70 | def print_program_help(
71 | reqs: List[Required], args: List[Union[Options, OptionText]]
72 | ) -> None:
73 | text = ""
74 | for arg in args:
75 | if isinstance(arg, OptionText):
76 | text += f"\n {arg.text}\n" if arg._type == "text" else "\n"
77 | else:
78 | text += " " + ", ".join(arg.names) + f": {arg.help}\n"
79 | text += "\n"
80 | for req in reqs:
81 | text += " " + ", ".join(req.names) + f": {req.help}\n"
82 | out(text)
83 |
84 |
85 | def get_help_data() -> Dict[str, Dict[str, str]]:
86 | import json
87 | import os.path
88 |
89 | dirpath = os.path.dirname(os.path.realpath(__file__))
90 |
91 | with open(os.path.join(dirpath, "help.json"), "r") as fileobj:
92 | data = json.load(fileobj)
93 |
94 | return data
95 |
96 |
97 | def print_option_help(program_name: str, option: Options) -> None:
98 | text = " " + ", ".join(option.names) + f"\n {option.help}\n\n"
99 |
100 | data = get_help_data()
101 |
102 | if option.names[0] in data[program_name]:
103 | text += indent(data[program_name][option.names[0]], " ") + "\n\n"
104 |
105 | if option.flag:
106 | text += " type: flag\n"
107 | else:
108 | text += f" type: {option.type.__name__}\n"
109 |
110 | if option.nargs != 1:
111 | text += f" nargs: {option.nargs}\n"
112 |
113 | if option.default is not None:
114 | text += f" default: {option.default}\n"
115 |
116 | if option.choices is not None:
117 | text += " choices: " + ", ".join(option.choices) + "\n"
118 |
119 | out(text)
120 |
121 |
122 | def to_underscore(name: str) -> str:
123 | """Convert new style options to old style. e.g. --hello-world -> --hello_world"""
124 | return name[:2] + name[2:].replace("-", "_")
125 |
126 |
127 | def to_key(op: Union[Options, Required]) -> str:
128 | """Convert option name to arg key. e.g. --hello-world -> hello_world"""
129 | return op.names[0][:2].replace("-", "") + op.names[0][2:].replace("-", "_")
130 |
131 |
132 | def get_option(name: str, options: List[Options]) -> Optional[Options]:
133 | for option in options:
134 | if name in option.names or name in map(to_underscore, option.names):
135 | return option
136 | return None
137 |
138 |
139 | class ArgumentParser:
140 | def __init__(self, program_name: str) -> None:
141 | self.program_name = program_name
142 | self.requireds: List[Required] = []
143 | self.options: List[Options] = []
144 | self.args: List[Union[Options, OptionText]] = []
145 |
146 | def add_argument(self, *args: str, **kwargs) -> None:
147 | x = Options(args, **kwargs)
148 | self.options.append(x)
149 | self.args.append(x)
150 |
151 | def add_required(self, *args: str, **kwargs) -> None:
152 | self.requireds.append(Required(args, **kwargs))
153 |
154 | def add_text(self, text: str) -> None:
155 | self.args.append(OptionText(text, "text"))
156 |
157 | def add_blank(self) -> None:
158 | self.args.append(OptionText("", "blank"))
159 |
160 | def parse_args(self, sys_args: List[str]):
161 | if sys_args == []:
162 | out(get_help_data()[self.program_name]["_"])
163 | sys.exit()
164 |
165 | if sys_args == ["-v"] or sys_args == ["-V"]:
166 | print(f"{auto_editor.version} ({auto_editor.__version__})")
167 | sys.exit()
168 |
169 | return ParseOptions(
170 | sys_args, self.program_name, self.options, self.requireds, self.args
171 | )
172 |
173 |
174 | class ParseOptions:
175 | @staticmethod
176 | def parse_value(option: Union[Options, Required], val: Optional[str]) -> Any:
177 | if val is None and option.nargs == 1:
178 | Log().error(f"{option.names[0]} needs argument.")
179 |
180 | try:
181 | value = option.type(val)
182 | except TypeError as e:
183 | Log().error(str(e))
184 |
185 | if option.choices is not None and value not in option.choices:
186 | my_choices = ", ".join(option.choices)
187 |
188 | Log().error(
189 | f"{value} is not a choice for {option.names[0]}\nchoices are:\n {my_choices}"
190 | )
191 |
192 | return value
193 |
194 | def set_arg_list(
195 | self, option_list_name: Optional[str], my_list: list, list_type: Optional[type]
196 | ) -> None:
197 | assert option_list_name is not None
198 | if list_type is not None:
199 | try:
200 | setattr(self, option_list_name, list(map(list_type, my_list)))
201 | except (TypeError, ValueError) as e:
202 | Log().error(str(e))
203 | else:
204 | setattr(self, option_list_name, my_list)
205 |
206 | def __init__(
207 | self,
208 | sys_args: List[str],
209 | program_name: str,
210 | options: List[Options],
211 | requireds: List[Required],
212 | args: List[Union[Options, OptionText]],
213 | ) -> None:
214 |
215 | option_names: List[str] = []
216 |
217 | builtin_help = Options(
218 | ("--help", "-h"),
219 | flag=True,
220 | help="Show info about this program or option then exit.",
221 | )
222 | options.append(builtin_help)
223 | args.append(builtin_help)
224 |
225 | self.help = False
226 |
227 | # Set default attributes
228 | for op in options:
229 | for name in op.names:
230 | option_names.append(name)
231 |
232 | if op.flag:
233 | value: Any = False
234 | elif op.nargs != 1:
235 | value = []
236 | elif op.default is None:
237 | value = None
238 | else:
239 | value = op.type(op.default)
240 |
241 | setattr(self, to_key(op), value)
242 |
243 | # Figure out command line options changed by user.
244 | used_options: List[Options] = []
245 |
246 | req_list = []
247 | req_list_name = requireds[0].names[0]
248 | req_list_type = requireds[0].type
249 | setting_req_list = requireds[0].nargs != 1
250 |
251 | option_list = []
252 | op_list_name = None
253 | op_list_type: Optional[type] = str
254 | setting_op_list = False
255 |
256 | i = 0
257 | while i < len(sys_args):
258 | arg = sys_args[i]
259 | option = get_option(arg, options)
260 |
261 | if option is None:
262 | if setting_op_list:
263 | option_list.append(arg)
264 |
265 | elif requireds and not arg.startswith("--"):
266 |
267 | if requireds[0].nargs == 1:
268 | setattr(
269 | self, req_list_name, self.parse_value(requireds[0], arg)
270 | )
271 | requireds.pop()
272 | else:
273 | req_list.append(arg)
274 | else:
275 | label = "option" if arg.startswith("--") else "short"
276 |
277 | # 'Did you mean' message might appear that options need a comma.
278 | if arg.replace(",", "") in option_names:
279 | Log().error(f"Option '{arg}' has an unnecessary comma.")
280 |
281 | close_matches = difflib.get_close_matches(arg, option_names)
282 | if close_matches:
283 | Log().error(
284 | f"Unknown {label}: {arg}\n\n Did you mean:\n "
285 | + ", ".join(close_matches)
286 | )
287 | Log().error(f"Unknown {label}: {arg}")
288 | else:
289 | if op_list_name is not None:
290 | self.set_arg_list(op_list_name, option_list, op_list_type)
291 |
292 | if option in used_options:
293 | Log().error(f"Cannot repeat option {option.names[0]} twice.")
294 |
295 | used_options.append(option)
296 |
297 | setting_op_list = False
298 | option_list = []
299 | op_list_name = None
300 |
301 | key = to_key(option)
302 |
303 | next_arg = None if i == len(sys_args) - 1 else sys_args[i + 1]
304 | if next_arg == "-h" or next_arg == "--help":
305 | print_option_help(program_name, option)
306 | sys.exit()
307 |
308 | if option.nargs != 1:
309 | setting_op_list = True
310 | op_list_name = key
311 | op_list_type = option.type
312 | elif option.flag:
313 | value = True
314 | else:
315 | value = self.parse_value(option, next_arg)
316 | i += 1
317 | setattr(self, key, value)
318 |
319 | i += 1
320 |
321 | if setting_op_list:
322 | self.set_arg_list(op_list_name, option_list, op_list_type)
323 |
324 | if setting_req_list:
325 | self.set_arg_list(req_list_name, req_list, req_list_type)
326 |
327 | if self.help:
328 | print_program_help(requireds, args)
329 | sys.exit()
330 |
--------------------------------------------------------------------------------
/auto_editor/wavfile.py:
--------------------------------------------------------------------------------
1 | import io
2 | import struct
3 | from typing import Literal, Optional, Tuple, Union
4 |
5 | import numpy as np
6 |
7 | PCM = 0x0001
8 | IEEE_FLOAT = 0x0003
9 | EXTENSIBLE = 0xFFFE
10 |
11 | AudioData = Union[np.memmap, np.ndarray]
12 | EndianType = Literal[">", "<"] # Big Endian, Little Endian
13 |
14 |
15 | def _read_fmt_chunk(
16 | fid: io.BufferedReader, en: EndianType
17 | ) -> Tuple[int, int, int, int, int]:
18 | size: int = struct.unpack(f"{en}I", fid.read(4))[0]
19 |
20 | if size < 16:
21 | raise ValueError("Binary structure of wave file is not compliant")
22 |
23 | res = struct.unpack(f"{en}HHIIHH", fid.read(16))
24 | bytes_read = 16
25 |
26 | format_tag, channels, fs, _, block_align, bit_depth = res
27 | # underscore is "bytes_per_second"
28 |
29 | if format_tag == EXTENSIBLE and size >= 18:
30 | ext_chunk_size = struct.unpack(f"{en}H", fid.read(2))[0]
31 | bytes_read += 2
32 | if ext_chunk_size >= 22:
33 | extensible_chunk_data = fid.read(22)
34 | bytes_read += 22
35 | raw_guid = extensible_chunk_data[6:22]
36 |
37 | if en == ">":
38 | tail = b"\x00\x00\x00\x10\x80\x00\x00\xAA\x00\x38\x9B\x71"
39 | else:
40 | tail = b"\x00\x00\x10\x00\x80\x00\x00\xAA\x00\x38\x9B\x71"
41 | if raw_guid.endswith(tail):
42 | format_tag = struct.unpack(f"{en}I", raw_guid[:4])[0]
43 | else:
44 | raise ValueError("Binary structure of wave file is not compliant")
45 |
46 | if format_tag not in {PCM, IEEE_FLOAT}:
47 | raise ValueError(
48 | f"Encountered unknown format tag: {format_tag:#06x}, while reading fmt chunk."
49 | )
50 |
51 | # move file pointer to next chunk
52 | if size > bytes_read:
53 | fid.read(size - bytes_read)
54 |
55 | # fmt should always be 16, 18 or 40, but handle it just in case
56 | _handle_pad_byte(fid, size)
57 |
58 | return format_tag, channels, fs, block_align, bit_depth
59 |
60 |
61 | def _read_data_chunk(
62 | fid: io.BufferedReader,
63 | format_tag: int,
64 | channels: int,
65 | bit_depth: int,
66 | en: EndianType,
67 | block_align: int,
68 | data_size: Optional[int],
69 | ) -> AudioData:
70 |
71 | size: int = struct.unpack(f"{en}I", fid.read(4))[0]
72 | if data_size is not None:
73 | # size is only 32-bits here, so get real size from header.
74 | size = data_size
75 |
76 | bytes_per_sample = block_align // channels
77 | n_samples = size // bytes_per_sample
78 |
79 | if bytes_per_sample in (3, 5, 6, 7):
80 | raise ValueError(f"Unsupported bytes per sample: {bytes_per_sample}")
81 |
82 | if format_tag == PCM:
83 | if 1 <= bit_depth <= 8:
84 | dtype = "u1" # WAVs of 8-bit integer or less are unsigned
85 | elif bit_depth <= 64:
86 | dtype = f"{en}i{bytes_per_sample}"
87 | else:
88 | raise ValueError(
89 | f"Unsupported bit depth: the WAV file has {bit_depth}-bit integer data."
90 | )
91 | elif format_tag == IEEE_FLOAT:
92 | if bit_depth in (32, 64):
93 | dtype = f"{en}f{bytes_per_sample}"
94 | else:
95 | raise ValueError(
96 | f"Unsupported bit depth: the WAV file has {bit_depth}-bit floating-point data."
97 | )
98 | else:
99 | raise ValueError(
100 | f"Unknown wave file format: {format_tag:#06x}. Supported formats: PCM, IEEE_FLOAT"
101 | )
102 |
103 | start = fid.tell()
104 | data = np.memmap(fid, dtype=dtype, mode="c", offset=start, shape=(n_samples,))
105 | fid.seek(start + size)
106 |
107 | _handle_pad_byte(fid, size)
108 |
109 | if channels > 1:
110 | try:
111 | _data = data.reshape(-1, channels)
112 | except ValueError:
113 | _data = data[:-1].reshape(-1, channels)
114 | return _data
115 | return data
116 |
117 |
118 | def _skip_unknown_chunk(fid: io.BufferedReader, en: EndianType) -> None:
119 | data = fid.read(4)
120 | if data:
121 | size = struct.unpack(f"{en}I", data)[0]
122 | fid.seek(size, 1)
123 | _handle_pad_byte(fid, size)
124 |
125 |
126 | def _read_rf64_chunk(fid: io.BufferedReader) -> Tuple[int, int, EndianType]:
127 |
128 | # https://tech.ebu.ch/docs/tech/tech3306v1_0.pdf
129 | # https://www.itu.int/dms_pubrec/itu-r/rec/bs/R-REC-BS.2088-1-201910-I!!PDF-E.pdf
130 |
131 | heading = fid.read(12)
132 | if heading != b"\xff\xff\xff\xffWAVEds64":
133 | raise ValueError(f"Wrong heading: {repr(heading)}")
134 |
135 | chunk_size = fid.read(4)
136 |
137 | bw_size_low = fid.read(4)
138 | bw_size_high = fid.read(4)
139 |
140 | en: EndianType = ">" if (bw_size_high > bw_size_low) else "<"
141 |
142 | data_size_low = fid.read(4)
143 | data_size_high = fid.read(4)
144 |
145 | # Combine bw_size and data_size to 64-bit ints
146 |
147 | def combine(a: bytes, b: bytes) -> int:
148 | return struct.unpack(" Tuple[None, int, EndianType]:
162 | en: EndianType = "<" if sig == b"RIFF" else ">"
163 | file_size: int = struct.unpack(f"{en}I", fid.read(4))[0] + 8
164 |
165 | form = fid.read(4)
166 | if form != b"WAVE":
167 | raise ValueError(f"Not a WAV file. RIFF form type is {repr(form)}.")
168 |
169 | return None, file_size, en
170 |
171 |
172 | def _handle_pad_byte(fid: io.BufferedReader, size: int) -> None:
173 | if size % 2 == 1:
174 | fid.seek(1, 1)
175 |
176 |
177 | def read(filename: str) -> Tuple[int, AudioData]:
178 | fid = open(filename, "rb")
179 |
180 | try:
181 | file_sig = fid.read(4)
182 | if file_sig in (b"RIFF", b"RIFX"):
183 | data_size, file_size, en = _read_riff_chunk(file_sig, fid)
184 | elif file_sig == b"RF64":
185 | data_size, file_size, en = _read_rf64_chunk(fid)
186 | else:
187 | raise ValueError(f"File format {repr(file_sig)} not supported.")
188 |
189 | fmt_chunk_received = False
190 | data_chunk_received = False
191 | while fid.tell() < file_size:
192 | chunk_id = fid.read(4)
193 |
194 | if not chunk_id:
195 | if data_chunk_received:
196 | # EOF but data successfully read
197 | break
198 | else:
199 | raise ValueError("Unexpected end of file.")
200 | elif len(chunk_id) < 4:
201 | if fmt_chunk_received and data_chunk_received:
202 | pass
203 | else:
204 | raise ValueError(f"Incomplete chunk ID: {repr(chunk_id)}")
205 |
206 | if chunk_id == b"fmt ":
207 | fmt_chunk_received = True
208 | format_tag, channels, fs, block_align, bit_depth = _read_fmt_chunk(
209 | fid, en
210 | )
211 | elif chunk_id == b"data":
212 | data_chunk_received = True
213 | if not fmt_chunk_received:
214 | raise ValueError("No fmt chunk before data")
215 |
216 | data = _read_data_chunk(
217 | fid,
218 | format_tag,
219 | channels,
220 | bit_depth,
221 | en,
222 | block_align,
223 | data_size,
224 | )
225 | else:
226 | _skip_unknown_chunk(fid, en)
227 |
228 | finally:
229 | fid.seek(0)
230 |
231 | return fs, data
232 |
--------------------------------------------------------------------------------
/example.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/example.mp4
--------------------------------------------------------------------------------
/resources/aac.m4a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/aac.m4a
--------------------------------------------------------------------------------
/resources/alac.m4a:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/alac.m4a
--------------------------------------------------------------------------------
/resources/data/example_1.5_speed.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/data/example_1.5_speed.npz
--------------------------------------------------------------------------------
/resources/data/example_2.0_speed.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/data/example_2.0_speed.npz
--------------------------------------------------------------------------------
/resources/embedded-image/h264-mjpeg.mkv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/embedded-image/h264-mjpeg.mkv
--------------------------------------------------------------------------------
/resources/embedded-image/h264-mjpeg.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/embedded-image/h264-mjpeg.mp4
--------------------------------------------------------------------------------
/resources/embedded-image/h264-png.mkv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/embedded-image/h264-png.mkv
--------------------------------------------------------------------------------
/resources/embedded-image/h264-png.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/embedded-image/h264-png.mp4
--------------------------------------------------------------------------------
/resources/json/0.1-disjoint.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "0.1.0",
3 | "source": "example.mp4",
4 | "chunks": [
5 | [
6 | 0,
7 | 26,
8 | 1.0
9 | ],
10 | [
11 | 27,
12 | 34,
13 | 99999.0
14 | ],
15 | [
16 | 34,
17 | 396,
18 | 1.0
19 | ],
20 | [
21 | 396,
22 | 410,
23 | 99999.0
24 | ],
25 | [
26 | 410,
27 | 522,
28 | 1.0
29 | ],
30 | [
31 | 522,
32 | 1192,
33 | 99999.0
34 | ],
35 | [
36 | 1192,
37 | 1220,
38 | 1.0
39 | ],
40 | [
41 | 1220,
42 | 1273,
43 | 99999.0
44 | ]
45 | ]
46 | }
47 |
--------------------------------------------------------------------------------
/resources/json/0.1-non-zero-start.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": "0.1.0",
3 | "source": "example.mp4",
4 | "chunks": [
5 | [
6 | 1,
7 | 26,
8 | 1.0
9 | ],
10 | [
11 | 26,
12 | 34,
13 | 99999.0
14 | ],
15 | [
16 | 34,
17 | 396,
18 | 1.0
19 | ],
20 | [
21 | 396,
22 | 410,
23 | 99999.0
24 | ],
25 | [
26 | 410,
27 | 522,
28 | 1.0
29 | ],
30 | [
31 | 522,
32 | 1192,
33 | 99999.0
34 | ],
35 | [
36 | 1192,
37 | 1220,
38 | 1.0
39 | ],
40 | [
41 | 1220,
42 | 1273,
43 | 99999.0
44 | ]
45 | ]
46 | }
--------------------------------------------------------------------------------
/resources/mono.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/mono.mp3
--------------------------------------------------------------------------------
/resources/multi-track.mov:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/multi-track.mov
--------------------------------------------------------------------------------
/resources/new-commentary.mp3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/new-commentary.mp3
--------------------------------------------------------------------------------
/resources/only-video/man-on-green-screen.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/only-video/man-on-green-screen.gif
--------------------------------------------------------------------------------
/resources/only-video/man-on-green-screen.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/only-video/man-on-green-screen.mp4
--------------------------------------------------------------------------------
/resources/subtitle.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/subtitle.mp4
--------------------------------------------------------------------------------
/resources/testsrc.mkv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/testsrc.mkv
--------------------------------------------------------------------------------
/resources/testsrc.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/testsrc.mp4
--------------------------------------------------------------------------------
/resources/wav/example-cut-s16le.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/wav/example-cut-s16le.wav
--------------------------------------------------------------------------------
/resources/wav/pcm-f32le.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/wav/pcm-f32le.wav
--------------------------------------------------------------------------------
/resources/wav/pcm-s32le.wav:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/petermg/auto-editor/cabfb0c7d123c2549914d5d3d60962d22e29a3c8/resources/wav/pcm-s32le.wav
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import re
2 | from setuptools import setup, find_packages
3 |
4 |
5 | def pip_version():
6 | with open("auto_editor/__init__.py") as f:
7 | version_content = f.read()
8 |
9 | version_match = re.search(
10 | r"^__version__ = ['\"]([^'\"]*)['\"]", version_content, re.M
11 | )
12 |
13 | if version_match:
14 | return version_match.group(1)
15 |
16 | raise ValueError("Unable to find version string.")
17 |
18 |
19 | with open("README.md", "r") as f:
20 | long_description = f.read()
21 |
22 | setup(
23 | name="auto-editor",
24 | version=pip_version(),
25 | description="Auto-Editor: Effort free video editing!",
26 | long_description=long_description,
27 | long_description_content_type="text/markdown",
28 | license="Unlicense",
29 | url="https://auto-editor.com",
30 | project_urls={
31 | "Bug Tracker": "https://github.com/WyattBlue/auto-editor/issues",
32 | "Source Code": "https://github.com/WyattBlue/auto-editor",
33 | },
34 | author="WyattBlue",
35 | author_email="wyattblue@auto-editor.com",
36 | keywords="video audio media editor editing processing nonlinear automatic "
37 | "silence-detect silence-removal silence-speedup motion-detection",
38 | packages=find_packages(),
39 | package_data={
40 | "auto_editor": [
41 | "help.json",
42 | "ffmpeg/LICENSE.txt",
43 | "ffmpeg/Windows/ffmpeg.exe",
44 | "ffmpeg/Windows/libopenh264.dll",
45 | "ffmpeg/Darwin/ffmpeg",
46 | ],
47 | },
48 | include_package_data=True,
49 | zip_safe=False,
50 | install_requires=[
51 | "numpy>=1.21.0",
52 | "yt-dlp>=2022.1.21",
53 | "pillow==9.1.0",
54 | "av==9.2.0",
55 | ],
56 | python_requires=">=3.8",
57 | classifiers=[
58 | "Topic :: Multimedia :: Sound/Audio",
59 | "Topic :: Multimedia :: Video",
60 | "License :: Public Domain",
61 | "License :: OSI Approved :: The Unlicense (Unlicense)",
62 | "Environment :: Console",
63 | "Natural Language :: English",
64 | "Intended Audience :: End Users/Desktop",
65 | "Development Status :: 5 - Production/Stable",
66 | "Programming Language :: Python",
67 | "Programming Language :: Python :: 3",
68 | "Programming Language :: Python :: 3 :: Only",
69 | "Programming Language :: Python :: 3.8",
70 | "Programming Language :: Python :: 3.9",
71 | "Programming Language :: Python :: 3.10",
72 | "Programming Language :: Python :: Implementation :: CPython",
73 | "Programming Language :: Python :: Implementation :: PyPy",
74 | ],
75 | entry_points={
76 | "console_scripts": [
77 | "auto-editor=auto_editor.__main__:main",
78 | "aedesc=auto_editor.subcommands.desc:main",
79 | "aeinfo=auto_editor.subcommands.info:main",
80 | "aesubdump=auto_editor.subcommands.subdump:main",
81 | "aegrep=auto_editor.subcommands.grep:main",
82 | "aelevels=auto_editor.subcommands.levels:main",
83 | ]
84 | },
85 | )
86 |
--------------------------------------------------------------------------------