--------------------------------------------------------------------------------
/moviepy/video/fx/scroll.py:
--------------------------------------------------------------------------------
1 | def scroll(
2 | clip, w=None, h=None, x_speed=0, y_speed=0, x_start=0, y_start=0, apply_to="mask"
3 | ):
4 | """
5 | Scrolls horizontally or vertically a clip, e.g. to make end credits
6 |
7 | Parameters
8 | ----------
9 |
10 | w, h
11 | The width and height of the final clip. Default to clip.w and clip.h
12 |
13 | x_speed, y_speed
14 |
15 | x_start, y_start
16 |
17 |
18 | apply_to
19 |
20 | """
21 | if h is None:
22 | h = clip.h
23 | if w is None:
24 | w = clip.w
25 |
26 | x_max = w - 1
27 | y_max = h - 1
28 |
29 | def filter(get_frame, t):
30 | x = int(max(0, min(x_max, x_start + round(x_speed * t))))
31 | y = int(max(0, min(y_max, y_start + round(y_speed * t))))
32 | return get_frame(t)[y : y + h, x : x + w]
33 |
34 | return clip.transform(filter, apply_to=apply_to)
35 |
--------------------------------------------------------------------------------
/moviepy/audio/fx/audio_loop.py:
--------------------------------------------------------------------------------
1 | from moviepy.audio.AudioClip import concatenate_audioclips
2 | from moviepy.decorators import audio_video_fx
3 |
4 |
5 | @audio_video_fx
6 | def audio_loop(clip, n_loops=None, duration=None):
7 | """Loops over an audio clip.
8 |
9 | Returns an audio clip that plays the given clip either
10 | `n_loops` times, or during `duration` seconds.
11 |
12 | Examples
13 | --------
14 |
15 | >>> from moviepy import *
16 | >>> videoclip = VideoFileClip('myvideo.mp4')
17 | >>> music = AudioFileClip('music.ogg')
18 | >>> audio = afx.audio_loop( music, duration=videoclip.duration)
19 | >>> videoclip.with_audio(audio)
20 |
21 | """
22 | if duration is not None:
23 | n_loops = int(duration / clip.duration) + 1
24 | return concatenate_audioclips(n_loops * [clip]).with_duration(duration)
25 |
26 | return concatenate_audioclips(n_loops * [clip])
27 |
--------------------------------------------------------------------------------
/moviepy/video/fx/fadein.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def fadein(clip, duration, initial_color=None):
5 | """Makes the clip progressively appear from some color (black by default),
6 | over ``duration`` seconds at the beginning of the clip. Can be used for
7 | masks too, where the initial color must be a number between 0 and 1.
8 |
9 | For cross-fading (progressive appearance or disappearance of a clip
10 | over another clip, see ``transfx.crossfadein``
11 | """
12 | if initial_color is None:
13 | initial_color = 0 if clip.is_mask else [0, 0, 0]
14 |
15 | initial_color = np.array(initial_color)
16 |
17 | def filter(get_frame, t):
18 | if t >= duration:
19 | return get_frame(t)
20 | else:
21 | fading = 1.0 * t / duration
22 | return fading * get_frame(t) + (1 - fading) * initial_color
23 |
24 | return clip.transform(filter)
25 |
--------------------------------------------------------------------------------
/moviepy/audio/tools/cuts.py:
--------------------------------------------------------------------------------
1 | """Cutting utilities working with audio."""
2 |
3 | import numpy as np
4 |
5 |
6 | def find_audio_period(clip, min_time=0.1, max_time=2, time_resolution=0.01):
7 | """Finds the period, in seconds of an audioclip.
8 |
9 | Parameters
10 | ----------
11 |
12 | min_time : float, optional
13 | Minimum bound for the returned value.
14 |
15 | max_time : float, optional
16 | Maximum bound for the returned value.
17 |
18 | time_resolution : float, optional
19 | Numerical precision.
20 | """
21 | chunksize = int(time_resolution * clip.fps)
22 | chunk_duration = 1.0 * chunksize / clip.fps
23 | # v denotes the list of volumes
24 | v = np.array([(chunk ** 2).sum() for chunk in clip.iter_chunks(chunksize)])
25 | v = v - v.mean()
26 | corrs = np.correlate(v, v, mode="full")[-len(v) :]
27 | corrs[: int(min_time / chunk_duration)] = 0
28 | corrs[int(max_time / chunk_duration) :] = 0
29 | return chunk_duration * np.argmax(corrs)
30 |
--------------------------------------------------------------------------------
/moviepy/video/fx/fadeout.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from moviepy.decorators import requires_duration
4 |
5 |
6 | @requires_duration
7 | def fadeout(clip, duration, final_color=None):
8 | """Makes the clip progressively fade to some color (black by default),
9 | over ``duration`` seconds at the end of the clip. Can be used for masks too,
10 | where the final color must be a number between 0 and 1.
11 |
12 | For cross-fading (progressive appearance or disappearance of a clip over another
13 | clip, see ``transfx.crossfadeout``
14 | """
15 | if final_color is None:
16 | final_color = 0 if clip.is_mask else [0, 0, 0]
17 |
18 | final_color = np.array(final_color)
19 |
20 | def filter(get_frame, t):
21 | if (clip.duration - t) >= duration:
22 | return get_frame(t)
23 | else:
24 | fading = 1.0 * (clip.duration - t) / duration
25 | return fading * get_frame(t) + (1 - fading) * final_color
26 |
27 | return clip.transform(filter)
28 |
--------------------------------------------------------------------------------
/moviepy/video/fx/mask_color.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def mask_color(clip, color=None, threshold=0, stiffness=1):
5 | """Returns a new clip with a mask for transparency where the original
6 | clip is of the given color.
7 |
8 | You can also have a "progressive" mask by specifying a non-null distance
9 | threshold ``threshold``. In this case, if the distance between a pixel and
10 | the given color is d, the transparency will be
11 |
12 | d**stiffness / (threshold**stiffness + d**stiffness)
13 |
14 | which is 1 when d>>threshold and 0 for d<>> from moviepy import AudioFileClip
15 | >>> music = AudioFileClip('music.ogg')
16 | >>> audio_r = music.multiply_stereo_volume(left=0, right=1) # mute left channel/s
17 | >>> audio_h = music.multiply_stereo_volume(left=0.5, right=0.5) # half audio
18 | """
19 |
20 | def stereo_volume(get_frame, t):
21 | frame = get_frame(t)
22 | if len(frame) == 1: # mono
23 | frame *= left if left is not None else right
24 | else: # stereo, stereo surround...
25 | for i in range(len(frame[0])): # odd channels are left
26 | frame[:, i] *= left if i % 2 == 0 else right
27 | return frame
28 |
29 | return clip.transform(stereo_volume, keep_duration=True)
30 |
--------------------------------------------------------------------------------
/moviepy/video/fx/mask_and.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from moviepy.video.VideoClip import ImageClip
4 |
5 |
6 | def mask_and(clip, other_clip):
7 | """Returns the logical 'and' (minimum pixel color values) between two masks.
8 |
9 | The result has the duration of the clip to which has been applied, if it has any.
10 |
11 | Parameters
12 | ----------
13 |
14 | other_clip ImageClip or np.ndarray
15 | Clip used to mask the original clip.
16 |
17 | Examples
18 | --------
19 |
20 | >>> clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red
21 | >>> mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green
22 | >>> masked_clip = clip.fx(mask_and, mask) # black
23 | >>> masked_clip.get_frame(0)
24 | [[[0 0 0]]]
25 | """
26 | # to ensure that 'and' of two ImageClips will be an ImageClip
27 | if isinstance(other_clip, ImageClip):
28 | other_clip = other_clip.img
29 |
30 | if isinstance(other_clip, np.ndarray):
31 | return clip.image_transform(lambda frame: np.minimum(frame, other_clip))
32 | else:
33 | return clip.transform(
34 | lambda get_frame, t: np.minimum(get_frame(t), other_clip.get_frame(t))
35 | )
36 |
--------------------------------------------------------------------------------
/moviepy/video/fx/mask_or.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from moviepy.video.VideoClip import ImageClip
4 |
5 |
6 | def mask_or(clip, other_clip):
7 | """Returns the logical 'or' (maximum pixel color values) between two masks.
8 |
9 | The result has the duration of the clip to which has been applied, if it has any.
10 |
11 | Parameters
12 | ----------
13 |
14 | other_clip ImageClip or np.ndarray
15 | Clip used to mask the original clip.
16 |
17 | Examples
18 | --------
19 |
20 | >>> clip = ColorClip(color=(255, 0, 0), size=(1, 1)) # red
21 | >>> mask = ColorClip(color=(0, 255, 0), size=(1, 1)) # green
22 | >>> masked_clip = clip.fx(mask_or, mask) # yellow
23 | >>> masked_clip.get_frame(0)
24 | [[[255 255 0]]]
25 | """
26 | # to ensure that 'or' of two ImageClips will be an ImageClip
27 | if isinstance(other_clip, ImageClip):
28 | other_clip = other_clip.img
29 |
30 | if isinstance(other_clip, np.ndarray):
31 | return clip.image_transform(lambda frame: np.maximum(frame, other_clip))
32 | else:
33 | return clip.transform(
34 | lambda get_frame, t: np.maximum(get_frame(t), other_clip.get_frame(t))
35 | )
36 |
--------------------------------------------------------------------------------
/moviepy/audio/fx/audio_fadein.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from moviepy.decorators import audio_video_fx, convert_parameter_to_seconds
4 |
5 |
6 | def _mono_factor_getter():
7 | return lambda t, duration: np.minimum(t / duration, 1)
8 |
9 |
10 | def _stereo_factor_getter(nchannels):
11 | def getter(t, duration):
12 | factor = np.minimum(t / duration, 1)
13 | return np.array([factor for _ in range(nchannels)]).T
14 |
15 | return getter
16 |
17 |
18 | @audio_video_fx
19 | @convert_parameter_to_seconds(["duration"])
20 | def audio_fadein(clip, duration):
21 | """Return an audio (or video) clip that is first mute, then the
22 | sound arrives progressively over ``duration`` seconds.
23 |
24 | Parameters
25 | ----------
26 |
27 | duration : float
28 | How long does it take for the sound to return to its normal level.
29 |
30 | Examples
31 | --------
32 |
33 | >>> clip = VideoFileClip("media/chaplin.mp4")
34 | >>> clip.fx(audio_fadein, "00:00:06")
35 | """
36 | get_factor = (
37 | _mono_factor_getter()
38 | if clip.nchannels == 1
39 | else _stereo_factor_getter(clip.nchannels)
40 | )
41 |
42 | return clip.transform(
43 | lambda get_frame, t: get_factor(t, duration) * get_frame(t),
44 | keep_duration=True,
45 | )
46 |
--------------------------------------------------------------------------------
/moviepy/video/fx/freeze.py:
--------------------------------------------------------------------------------
1 | from moviepy.decorators import requires_duration
2 | from moviepy.video.compositing.concatenate import concatenate_videoclips
3 |
4 |
5 | @requires_duration
6 | def freeze(clip, t=0, freeze_duration=None, total_duration=None, padding_end=0):
7 | """Momentarily freeze the clip at time t.
8 |
9 | Set `t='end'` to freeze the clip at the end (actually it will freeze on the
10 | frame at time clip.duration - padding_end seconds - 1 / clip_fps).
11 | With ``duration`` you can specify the duration of the freeze.
12 | With ``total_duration`` you can specify the total duration of
13 | the clip and the freeze (i.e. the duration of the freeze is
14 | automatically computed). One of them must be provided.
15 | """
16 | if t == "end":
17 | t = clip.duration - padding_end - 1 / clip.fps
18 |
19 | if freeze_duration is None:
20 | if total_duration is None:
21 | raise ValueError(
22 | "You must provide either 'freeze_duration' or 'total_duration'"
23 | )
24 | freeze_duration = total_duration - clip.duration
25 |
26 | before = [clip.subclip(0, t)] if (t != 0) else []
27 | freeze = [clip.to_ImageClip(t).with_duration(freeze_duration)]
28 | after = [clip.subclip(t)] if (t != clip.duration) else []
29 | return concatenate_videoclips(before + freeze + after)
30 |
--------------------------------------------------------------------------------
/moviepy/audio/fx/audio_fadeout.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from moviepy.decorators import (
4 | audio_video_fx,
5 | convert_parameter_to_seconds,
6 | requires_duration,
7 | )
8 |
9 |
10 | def _mono_factor_getter(clip_duration):
11 | return lambda t, duration: np.minimum(1.0 * (clip_duration - t) / duration, 1)
12 |
13 |
14 | def _stereo_factor_getter(clip_duration, nchannels):
15 | def getter(t, duration):
16 | factor = np.minimum(1.0 * (clip_duration - t) / duration, 1)
17 | return np.array([factor for _ in range(nchannels)]).T
18 |
19 | return getter
20 |
21 |
22 | @audio_video_fx
23 | @requires_duration
24 | @convert_parameter_to_seconds(["duration"])
25 | def audio_fadeout(clip, duration):
26 | """Return a sound clip where the sound fades out progressively
27 | over ``duration`` seconds at the end of the clip.
28 |
29 | Parameters
30 | ----------
31 |
32 | duration : float
33 | How long does it take for the sound to reach the zero level at the end
34 | of the clip.
35 |
36 | Examples
37 | --------
38 |
39 | >>> clip = VideoFileClip("media/chaplin.mp4")
40 | >>> clip.fx(audio_fadeout, "00:00:06")
41 | """
42 | get_factor = (
43 | _mono_factor_getter(clip.duration)
44 | if clip.nchannels == 1
45 | else _stereo_factor_getter(clip.duration, clip.nchannels)
46 | )
47 |
48 | return clip.transform(
49 | lambda get_frame, t: get_factor(t, duration) * get_frame(t),
50 | keep_duration=True,
51 | )
52 |
--------------------------------------------------------------------------------
/moviepy/utils.py:
--------------------------------------------------------------------------------
1 | """Useful utilities working with MoviePy."""
2 |
3 | from moviepy.audio.io.AudioFileClip import AudioFileClip
4 | from moviepy.video.io.VideoFileClip import VideoFileClip
5 | from moviepy.video.VideoClip import ImageClip
6 |
7 |
8 | CLIP_TYPES = {
9 | "audio": AudioFileClip,
10 | "video": VideoFileClip,
11 | "image": ImageClip,
12 | }
13 |
14 |
15 | def close_all_clips(objects="globals", types=("audio", "video", "image")):
16 | """Closes all clips in a context.
17 |
18 | Follows different strategies retrieving the namespace from which the clips
19 | to close will be retrieved depending on the ``objects`` argument, and filtering
20 | by type of clips depending on the ``types`` argument.
21 |
22 | Parameters
23 | ----------
24 |
25 | objects : str or dict, optional
26 | - If is a string an the value is ``"globals"``, will close all the clips
27 | contained by the ``globals()`` namespace.
28 | - If is a dictionary, the values of the dictionary could be clips to close,
29 | useful if you want to use ``locals()``.
30 |
31 | types : Iterable, optional
32 | Set of types of clips to close, being "audio", "video" or "image" the supported
33 | values.
34 | """
35 | if objects == "globals": # pragma: no cover
36 | objects = globals()
37 | if hasattr(objects, "values"):
38 | objects = objects.values()
39 | types_tuple = tuple(CLIP_TYPES[key] for key in types)
40 | for obj in objects:
41 | if isinstance(obj, types_tuple):
42 | obj.close()
43 |
--------------------------------------------------------------------------------
/moviepy/video/fx/painting.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | # ------- CHECKING DEPENDENCIES -----------------------------------------
5 | painting_possible = True
6 | try:
7 | from skimage.filter import sobel
8 | except Exception:
9 | try:
10 | from scipy.ndimage.filters import sobel
11 | except Exception:
12 | painting_possible = False
13 | # -----------------------------------------------------------------------
14 |
15 |
16 | def to_painting(image, saturation=1.4, black=0.006):
17 | """Transforms any photo into some kind of painting."""
18 | edges = sobel(image.mean(axis=2))
19 | darkening = black * (255 * np.dstack(3 * [edges]))
20 | painting = saturation * image - darkening
21 | return np.maximum(0, np.minimum(255, painting)).astype("uint8")
22 |
23 |
24 | def painting(clip, saturation=1.4, black=0.006):
25 | """
26 | Transforms any photo into some kind of painting. Saturation
27 | tells at which point the colors of the result should be
28 | flashy. ``black`` gives the amount of black lines wanted.
29 | Requires Scikit-image or Scipy installed.
30 | """
31 | return clip.image_transform(lambda im: to_painting(im, saturation, black))
32 |
33 |
34 | # ------- OVERWRITE IF REQUIREMENTS NOT MET -----------------------------
35 |
36 | if not painting_possible:
37 | doc = painting.__doc__
38 |
39 | def painting(clip, saturation=None, black=None):
40 | """Fallback painting FX function, used if scikit-image and scipy are not
41 | installed.
42 |
43 | This docstring will be replaced at runtime.
44 | """
45 | raise IOError("fx painting needs scikit-image or scipy")
46 |
47 | painting.__doc__ = doc
48 | # -----------------------------------------------------------------------
49 |
--------------------------------------------------------------------------------
/moviepy/video/io/downloader.py:
--------------------------------------------------------------------------------
1 | """Utilities to get a file from the internet."""
2 |
3 | import os
4 | import shutil
5 | import urllib.request
6 |
7 | from moviepy.tools import subprocess_call
8 |
9 |
10 | def download_webfile(url, filename, overwrite=False):
11 | """Small utility to download the file at ``url`` under name ``filename``.
12 |
13 | Parameters
14 | ----------
15 |
16 | url : str
17 | If url is a youtube video ID like z410eauCnH it will download the video
18 | using youtube-dl. Requires youtube-dl (pip install youtube-dl).
19 |
20 | filename : str
21 | Path to the new downloaded file location.
22 |
23 | overwrite : bool, optional
24 | If the filename already exists and overwrite=False, nothing will happen.
25 | Use it to force destination file overwriting.
26 |
27 | Examples
28 | --------
29 |
30 | >>> from moviepy.io.downloader import download_website
31 | >>>
32 | >>> download_website(
33 | ... "http://localhost:8000/media/chaplin.mp4",
34 | ... "media/chaplin-copy.mp4",
35 | ... )
36 | >>>
37 | """
38 | if os.path.exists(filename) and not overwrite: # pragma: no cover
39 | return
40 |
41 | if "." in url:
42 | with urllib.request.urlopen(url) as req, open(filename, "wb") as f:
43 | shutil.copyfileobj(req, f, 128)
44 |
45 | else:
46 | try:
47 | subprocess_call(["youtube-dl", url, "-o", filename])
48 | except OSError as e:
49 | raise OSError(
50 | (
51 | "Error running youtube-dl.\n%sA possible reason is that"
52 | " youtube-dl is not installed on your computer. Install it "
53 | " with 'pip install youtube_dl'."
54 | )
55 | % ((e.message + "\n" if hasattr(e, "message") else ""))
56 | )
57 |
--------------------------------------------------------------------------------
/moviepy/video/fx/crop.py:
--------------------------------------------------------------------------------
1 | def crop(
2 | clip,
3 | x1=None,
4 | y1=None,
5 | x2=None,
6 | y2=None,
7 | width=None,
8 | height=None,
9 | x_center=None,
10 | y_center=None,
11 | ):
12 | """
13 | Returns a new clip in which just a rectangular subregion of the
14 | original clip is conserved. x1,y1 indicates the top left corner and
15 | x2,y2 is the lower right corner of the croped region.
16 | All coordinates are in pixels. Float numbers are accepted.
17 |
18 | To crop an arbitrary rectangle:
19 |
20 | >>> crop(clip, x1=50, y1=60, x2=460, y2=275)
21 |
22 | Only remove the part above y=30:
23 |
24 | >>> crop(clip, y1=30)
25 |
26 | Crop a rectangle that starts 10 pixels left and is 200px wide
27 |
28 | >>> crop(clip, x1=10, width=200)
29 |
30 | Crop a rectangle centered in x,y=(300,400), width=50, height=150 :
31 |
32 | >>> crop(clip, x_center=300 , y_center=400,
33 | width=50, height=150)
34 |
35 | Any combination of the above should work, like for this rectangle
36 | centered in x=300, with explicit y-boundaries:
37 |
38 | >>> crop(clip, x_center=300, width=400, y1=100, y2=600)
39 |
40 | """
41 | if width and x1 is not None:
42 | x2 = x1 + width
43 | elif width and x2 is not None:
44 | x1 = x2 - width
45 |
46 | if height and y1 is not None:
47 | y2 = y1 + height
48 | elif height and y2 is not None:
49 | y1 = y2 - height
50 |
51 | if x_center:
52 | x1, x2 = x_center - width / 2, x_center + width / 2
53 |
54 | if y_center:
55 | y1, y2 = y_center - height / 2, y_center + height / 2
56 |
57 | x1 = x1 or 0
58 | y1 = y1 or 0
59 | x2 = x2 or clip.size[0]
60 | y2 = y2 or clip.size[1]
61 |
62 | return clip.image_transform(
63 | lambda frame: frame[int(y1) : int(y2), int(x1) : int(x2)], apply_to=["mask"]
64 | )
65 |
--------------------------------------------------------------------------------
/moviepy/video/fx/freeze_region.py:
--------------------------------------------------------------------------------
1 | from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
2 | from moviepy.video.fx.crop import crop
3 |
4 |
5 | def freeze_region(clip, t=0, region=None, outside_region=None, mask=None):
6 | """Freezes one region of the clip while the rest remains animated.
7 |
8 | You can choose one of three methods by providing either `region`,
9 | `outside_region`, or `mask`.
10 |
11 | Parameters
12 | ----------
13 |
14 | t
15 | Time at which to freeze the freezed region.
16 |
17 | region
18 | A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels)
19 | which will be freezed. You can provide outside_region or mask instead.
20 |
21 | outside_region
22 | A tuple (x1, y1, x2, y2) defining the region of the screen (in pixels)
23 | which will be the only non-freezed region.
24 |
25 | mask
26 | If not None, will overlay a freezed version of the clip on the current clip,
27 | with the provided mask. In other words, the "visible" pixels in the mask
28 | indicate the freezed region in the final picture.
29 |
30 | """
31 | if region is not None:
32 |
33 | x1, y1, x2, y2 = region
34 | freeze = (
35 | clip.fx(crop, *region)
36 | .to_ImageClip(t=t)
37 | .with_duration(clip.duration)
38 | .with_position((x1, y1))
39 | )
40 | return CompositeVideoClip([clip, freeze])
41 |
42 | elif outside_region is not None:
43 |
44 | x1, y1, x2, y2 = outside_region
45 | animated_region = clip.fx(crop, *outside_region).with_position((x1, y1))
46 | freeze = clip.to_ImageClip(t=t).with_duration(clip.duration)
47 | return CompositeVideoClip([freeze, animated_region])
48 |
49 | elif mask is not None:
50 | freeze = clip.to_ImageClip(t=t).with_duration(clip.duration).with_mask(mask)
51 | return CompositeVideoClip([clip, freeze])
52 |
--------------------------------------------------------------------------------
/moviepy/audio/fx/audio_delay.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from moviepy.audio.AudioClip import CompositeAudioClip
4 | from moviepy.audio.fx.multiply_volume import multiply_volume
5 | from moviepy.decorators import audio_video_fx
6 |
7 |
8 | @audio_video_fx
9 | def audio_delay(clip, offset=0.2, n_repeats=8, decay=1):
10 | """Repeats audio certain number of times at constant intervals multiplying
11 | their volume levels using a linear space in the range 1 to ``decay`` argument
12 | value.
13 |
14 | Parameters
15 | ----------
16 |
17 | offset : float, optional
18 | Gap between repetitions start times, in seconds.
19 |
20 | n_repeats : int, optional
21 | Number of repetitions (without including the clip itself).
22 |
23 | decay : float, optional
24 | Multiplication factor for the volume level of the last repetition. Each
25 | repetition will have a value in the linear function between 1 and this value,
26 | increasing or decreasing constantly. Keep in mind that the last repetition
27 | will be muted if this is 0, and if is greater than 1, the volume will increase
28 | for each repetition.
29 |
30 | Examples
31 | --------
32 |
33 | >>> from moviepy import *
34 | >>> videoclip = AudioFileClip('myaudio.wav').fx(
35 | ... audio_delay, offset=.2, n_repeats=10, decayment=.2
36 | ... )
37 |
38 | >>> # stereo A note
39 | >>> make_frame = lambda t: np.array(
40 | ... [np.sin(440 * 2 * np.pi * t), np.sin(880 * 2 * np.pi * t)]
41 | ... ).T
42 | ... clip = AudioClip(make_frame=make_frame, duration=0.1, fps=44100)
43 | ... clip = audio_delay(clip, offset=.2, n_repeats=11, decay=0)
44 | """
45 | decayments = np.linspace(1, max(0, decay), n_repeats + 1)
46 | return CompositeAudioClip(
47 | [
48 | clip.copy(),
49 | *[
50 | multiply_volume(
51 | clip.with_start((rep + 1) * offset), decayments[rep + 1]
52 | )
53 | for rep in range(n_repeats)
54 | ],
55 | ]
56 | )
57 |
--------------------------------------------------------------------------------
/moviepy/video/fx/accel_decel.py:
--------------------------------------------------------------------------------
1 | def _f_accel_decel(t, old_duration, new_duration, abruptness=1.0, soonness=1.0):
2 | a = 1.0 + abruptness
3 |
4 | def _f(t):
5 | def f1(t):
6 | return (0.5) ** (1 - a) * (t ** a)
7 |
8 | def f2(t):
9 | return 1 - f1(1 - t)
10 |
11 | return (t < 0.5) * f1(t) + (t >= 0.5) * f2(t)
12 |
13 | return old_duration * _f((t / new_duration) ** soonness)
14 |
15 |
16 | def accel_decel(clip, new_duration=None, abruptness=1.0, soonness=1.0):
17 | """Accelerates and decelerates a clip, useful for GIF making.
18 |
19 | Parameters
20 | ----------
21 |
22 | new_duration : float
23 | Duration for the new transformed clip. If None, will be that of the
24 | current clip.
25 |
26 | abruptness : float
27 | Slope shape in the acceleration-deceleration function. It will depend
28 | on the value of the parameter:
29 |
30 | * ``-1 < abruptness < 0``: speed up, down, up.
31 | * ``abruptness == 0``: no effect.
32 | * ``abruptness > 0``: speed down, up, down.
33 |
34 | soonness : float
35 | For positive abruptness, determines how soon the transformation occurs.
36 | Should be a positive number.
37 |
38 | Raises
39 | ------
40 |
41 | ValueError
42 | When ``sooness`` argument is lower than 0.
43 |
44 | Examples
45 | --------
46 |
47 | The following graphs show functions generated by different combinations
48 | of arguments, where the value of the slopes represents the speed of the
49 | videos generated, being the linear function (in red) a combination that
50 | does not produce any transformation.
51 |
52 | .. image:: /_static/accel_decel-fx-params.png
53 | :alt: acced_decel FX parameters combinations
54 | """
55 | if new_duration is None:
56 | new_duration = clip.duration
57 | if soonness < 0:
58 | raise ValueError("'sooness' should be a positive number")
59 |
60 | return clip.time_transform(
61 | lambda t: _f_accel_decel(t, clip.duration, new_duration, abruptness, soonness)
62 | ).with_duration(new_duration)
63 |
--------------------------------------------------------------------------------
/moviepy/video/fx/headblur.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | # ------- CHECKING DEPENDENCIES -----------------------------------------
5 | try:
6 | import cv2
7 |
8 | headblur_possible = True
9 | if cv2.__version__ >= "3.0.0":
10 | cv2.CV_AA = cv2.LINE_AA
11 | except Exception:
12 | headblur_possible = False
13 | # -----------------------------------------------------------------------
14 |
15 |
16 | def headblur(clip, fx, fy, radius, intensity=None):
17 | """Returns a filter that will blur a moving part (a head ?) of the frames.
18 |
19 | The position of the blur at time t is defined by (fx(t), fy(t)), the radius
20 | of the blurring by ``radius`` and the intensity of the blurring by ``intensity``.
21 |
22 | Requires OpenCV for the circling and the blurring. Automatically deals with the
23 | case where part of the image goes offscreen.
24 | """
25 | if intensity is None:
26 | intensity = int(2 * radius / 3)
27 |
28 | def filter(gf, t):
29 | im = gf(t).copy()
30 | h, w, d = im.shape
31 | x, y = int(fx(t)), int(fy(t))
32 | x1, x2 = max(0, x - radius), min(x + radius, w)
33 | y1, y2 = max(0, y - radius), min(y + radius, h)
34 | region_size = y2 - y1, x2 - x1
35 |
36 | mask = np.zeros(region_size).astype("uint8")
37 | cv2.circle(mask, (radius, radius), radius, 255, -1, lineType=cv2.CV_AA)
38 |
39 | mask = np.dstack(3 * [(1.0 / 255) * mask])
40 |
41 | orig = im[y1:y2, x1:x2]
42 | blurred = cv2.blur(orig, (intensity, intensity))
43 | im[y1:y2, x1:x2] = mask * blurred + (1 - mask) * orig
44 | return im
45 |
46 | return clip.transform(filter)
47 |
48 |
49 | # ------- OVERWRITE IF REQUIREMENTS NOT MET -----------------------------
50 | if not headblur_possible:
51 | doc = headblur.__doc__
52 |
53 | def headblur(clip, fx, fy, r_zone, r_blur=None):
54 | """Fallback headblur FX function, used if OpenCV is not installed.
55 |
56 | This docstring will be replaced at runtime.
57 | """
58 | raise IOError("fx painting needs opencv")
59 |
60 | headblur.__doc__ = doc
61 | # -----------------------------------------------------------------------
62 |
--------------------------------------------------------------------------------
/moviepy/video/fx/__init__.py:
--------------------------------------------------------------------------------
1 | # import every video fx function
2 |
3 | from moviepy.video.fx.accel_decel import accel_decel
4 | from moviepy.video.fx.blackwhite import blackwhite
5 | from moviepy.video.fx.blink import blink
6 | from moviepy.video.fx.crop import crop
7 | from moviepy.video.fx.even_size import even_size
8 | from moviepy.video.fx.fadein import fadein
9 | from moviepy.video.fx.fadeout import fadeout
10 | from moviepy.video.fx.freeze import freeze
11 | from moviepy.video.fx.freeze_region import freeze_region
12 | from moviepy.video.fx.gamma_corr import gamma_corr
13 | from moviepy.video.fx.headblur import headblur
14 | from moviepy.video.fx.invert_colors import invert_colors
15 | from moviepy.video.fx.loop import loop
16 | from moviepy.video.fx.lum_contrast import lum_contrast
17 | from moviepy.video.fx.make_loopable import make_loopable
18 | from moviepy.video.fx.margin import margin
19 | from moviepy.video.fx.mask_and import mask_and
20 | from moviepy.video.fx.mask_color import mask_color
21 | from moviepy.video.fx.mask_or import mask_or
22 | from moviepy.video.fx.mirror_x import mirror_x
23 | from moviepy.video.fx.mirror_y import mirror_y
24 | from moviepy.video.fx.multiply_color import multiply_color
25 | from moviepy.video.fx.multiply_speed import multiply_speed
26 | from moviepy.video.fx.painting import painting
27 | from moviepy.video.fx.resize import resize
28 | from moviepy.video.fx.rotate import rotate
29 | from moviepy.video.fx.scroll import scroll
30 | from moviepy.video.fx.supersample import supersample
31 | from moviepy.video.fx.time_mirror import time_mirror
32 | from moviepy.video.fx.time_symmetrize import time_symmetrize
33 |
34 |
35 | __all__ = (
36 | "accel_decel",
37 | "blackwhite",
38 | "blink",
39 | "crop",
40 | "even_size",
41 | "fadein",
42 | "fadeout",
43 | "freeze",
44 | "freeze_region",
45 | "gamma_corr",
46 | "headblur",
47 | "invert_colors",
48 | "loop",
49 | "lum_contrast",
50 | "make_loopable",
51 | "margin",
52 | "mask_and",
53 | "mask_color",
54 | "mask_or",
55 | "mirror_x",
56 | "mirror_y",
57 | "multiply_color",
58 | "multiply_speed",
59 | "painting",
60 | "resize",
61 | "rotate",
62 | "scroll",
63 | "supersample",
64 | "time_mirror",
65 | "time_symmetrize",
66 | )
67 |
--------------------------------------------------------------------------------
/moviepy/video/io/sliders.py:
--------------------------------------------------------------------------------
1 | """GUI matplotlib utility to tune the outputs of a function."""
2 |
3 | import matplotlib.pyplot as plt
4 | from matplotlib.widgets import Slider
5 |
6 |
7 | def sliders(func, sliders_properties, wait_for_validation=False):
8 | """A light GUI to manually explore and tune the outputs of a function.
9 |
10 | ``slider_properties`` is a list of dicts (arguments for Slider)::
11 |
12 | def volume(x,y,z):
13 | return x*y*z
14 |
15 | intervals = [ { 'label' : 'width', 'valmin': 1 , 'valmax': 5 },
16 | { 'label' : 'height', 'valmin': 1 , 'valmax': 5 },
17 | { 'label' : 'depth', 'valmin': 1 , 'valmax': 5 } ]
18 | inputExplorer(volume, intervals)
19 |
20 | """
21 | n_vars = len(sliders_properties)
22 | slider_width = 1.0 / n_vars
23 |
24 | # CREATE THE CANVAS
25 |
26 | figure, ax = plt.subplots(1)
27 | figure.canvas.set_window_title("Inputs for '%s'" % (func.func_name))
28 |
29 | # choose an appropriate height
30 |
31 | width, height = figure.get_size_inches()
32 | height = min(0.5 * n_vars, 8)
33 | figure.set_size_inches(width, height, forward=True)
34 |
35 | # hide the axis
36 | ax.set_frame_on(False)
37 | ax.get_xaxis().set_visible(False)
38 | ax.get_yaxis().set_visible(False)
39 |
40 | # CREATE THE SLIDERS
41 |
42 | sliders = []
43 |
44 | for i, properties in enumerate(sliders_properties):
45 | ax = plt.axes(
46 | [0.1, 0.95 - 0.9 * (i + 1) * slider_width, 0.8, 0.8 * slider_width]
47 | )
48 | if not isinstance(properties, dict):
49 | properties = dict(zip(["label", "valmin", "valmax", "valinit"], properties))
50 | sliders.append(Slider(ax=ax, **properties))
51 |
52 | # CREATE THE CALLBACK FUNCTIONS
53 |
54 | def on_changed(event):
55 | res = func(*(s.val for s in sliders))
56 | if res is not None:
57 | print(res)
58 |
59 | def on_key_press(event):
60 | if event.key == "enter":
61 | on_changed(event)
62 |
63 | figure.canvas.mpl_connect("key_press_event", on_key_press)
64 |
65 | # AUTOMATIC UPDATE ?
66 |
67 | if not wait_for_validation:
68 | for s in sliders:
69 | s.on_changed(on_changed)
70 |
71 | # DISPLAY THE SLIDERS
72 | plt.show()
73 |
--------------------------------------------------------------------------------
/moviepy/editor.py:
--------------------------------------------------------------------------------
1 | """
2 | Module meant to make it easy to load the features of MoviePy that you will use
3 | for live editing by simply typing:
4 |
5 | >>> from moviepy.editor import *
6 |
7 | - Starts a pygame session to enable ``clip.show()`` and ``clip.preview()``
8 | if pygame is installed
9 | - Enables ``clip.ipython_display()`` if in an IPython Notebook
10 | - Allows the use of ``sliders`` if Matplotlib is installed
11 | """
12 |
13 | import os
14 |
15 | import moviepy # So that we can access moviepy.__all__ later
16 | from moviepy import *
17 | from moviepy.video.io.html_tools import ipython_display
18 |
19 |
20 | try:
21 | from moviepy.video.io.sliders import sliders
22 | except ImportError:
23 |
24 | def sliders(*args, **kwargs):
25 | """NOT AVAILABLE: sliders requires matplotlib installed."""
26 | raise ImportError("sliders requires matplotlib installed")
27 |
28 |
29 | # adds easy ipython integration
30 | VideoClip.ipython_display = ipython_display
31 | AudioClip.ipython_display = ipython_display
32 |
33 |
34 | # -----------------------------------------------------------------
35 | # Previews: try to import pygame, else make methods which raise
36 | # exceptions saying to install PyGame
37 |
38 | # Hide the welcome message from pygame: https://github.com/pygame/pygame/issues/542
39 | os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "1"
40 |
41 | # Add methods preview and show (only if pygame installed)
42 | try:
43 | from moviepy.video.io.preview import preview, show
44 | except ImportError:
45 |
46 | def preview(self, *args, **kwargs):
47 | """NOT AVAILABLE: clip.preview requires Pygame installed."""
48 | raise ImportError("clip.preview requires Pygame installed")
49 |
50 | def show(self, *args, **kwargs):
51 | """NOT AVAILABLE: clip.show requires Pygame installed."""
52 | raise ImportError("clip.show requires Pygame installed")
53 |
54 |
55 | VideoClip.preview = preview
56 | VideoClip.show = show
57 |
58 | try:
59 | from moviepy.audio.io.preview import preview
60 | except ImportError:
61 |
62 | def preview(self, *args, **kwargs):
63 | """NOT AVAILABLE: clip.preview requires Pygame installed."""
64 | raise ImportError("clip.preview requires Pygame installed")
65 |
66 |
67 | AudioClip.preview = preview
68 |
69 | __all__ = moviepy.__all__ + ["ipython_display", "sliders"]
70 |
71 | del preview, show
72 |
--------------------------------------------------------------------------------
/moviepy/video/fx/margin.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from moviepy.decorators import apply_to_mask
4 | from moviepy.video.VideoClip import ImageClip
5 |
6 |
7 | @apply_to_mask
8 | def margin(
9 | clip,
10 | margin_size=None,
11 | left=0,
12 | right=0,
13 | top=0,
14 | bottom=0,
15 | color=(0, 0, 0),
16 | opacity=1.0,
17 | ):
18 | """
19 | Draws an external margin all around the frame.
20 |
21 | Parameters
22 | ----------
23 |
24 | margin_size : int, optional
25 | If not ``None``, then the new clip has a margin size of
26 | size ``margin_size`` in pixels on the left, right, top, and bottom.
27 |
28 | left : int, optional
29 | If ``margin_size=None``, margin size for the new clip in left direction.
30 |
31 | right : int, optional
32 | If ``margin_size=None``, margin size for the new clip in right direction.
33 |
34 | top : int, optional
35 | If ``margin_size=None``, margin size for the new clip in top direction.
36 |
37 | bottom : int, optional
38 | If ``margin_size=None``, margin size for the new clip in bottom direction.
39 |
40 | color : tuple, optional
41 | Color of the margin.
42 |
43 | opacity : float, optional
44 | Opacity of the margin. Setting this value to 0 yields transparent margins.
45 | """
46 | if (opacity != 1.0) and (clip.mask is None) and not (clip.is_mask):
47 | clip = clip.add_mask()
48 |
49 | if margin_size is not None:
50 | left = right = top = bottom = margin_size
51 |
52 | def make_bg(w, h):
53 | new_w, new_h = w + left + right, h + top + bottom
54 | if clip.is_mask:
55 | shape = (new_h, new_w)
56 | bg = np.tile(opacity, (new_h, new_w)).astype(float).reshape(shape)
57 | else:
58 | shape = (new_h, new_w, 3)
59 | bg = np.tile(color, (new_h, new_w)).reshape(shape)
60 | return bg
61 |
62 | if isinstance(clip, ImageClip):
63 | im = make_bg(clip.w, clip.h)
64 | im[top : top + clip.h, left : left + clip.w] = clip.img
65 | return clip.image_transform(lambda pic: im)
66 |
67 | else:
68 |
69 | def filter(get_frame, t):
70 | pic = get_frame(t)
71 | h, w = pic.shape[:2]
72 | im = make_bg(w, h)
73 | im[top : top + h, left : left + w] = pic
74 | return im
75 |
76 | return clip.transform(filter)
77 |
--------------------------------------------------------------------------------
/moviepy/audio/io/preview.py:
--------------------------------------------------------------------------------
1 | """Audio preview functions for MoviePy editor."""
2 |
3 | import time
4 |
5 | import numpy as np
6 | import pygame as pg
7 |
8 | from moviepy.decorators import requires_duration
9 |
10 |
11 | pg.init()
12 | pg.display.set_caption("MoviePy")
13 |
14 |
15 | @requires_duration
16 | def preview(
17 | clip, fps=22050, buffersize=4000, nbytes=2, audio_flag=None, video_flag=None
18 | ):
19 | """
20 | Plays the sound clip with pygame.
21 |
22 | Parameters
23 | ----------
24 |
25 | fps
26 | Frame rate of the sound. 44100 gives top quality, but may cause
27 | problems if your computer is not fast enough and your clip is
28 | complicated. If the sound jumps during the preview, lower it
29 | (11025 is still fine, 5000 is tolerable).
30 |
31 | buffersize
32 | The sound is not generated all at once, but rather made by bunches
33 | of frames (chunks). ``buffersize`` is the size of such a chunk.
34 | Try varying it if you meet audio problems (but you shouldn't
35 | have to).
36 |
37 | nbytes:
38 | Number of bytes to encode the sound: 1 for 8bit sound, 2 for
39 | 16bit, 4 for 32bit sound. 2 bytes is fine.
40 |
41 | audio_flag, video_flag:
42 | Instances of class threading events that are used to synchronize
43 | video and audio during ``VideoClip.preview()``.
44 |
45 | """
46 | pg.mixer.quit()
47 |
48 | pg.mixer.init(fps, -8 * nbytes, clip.nchannels, 1024)
49 | totalsize = int(fps * clip.duration)
50 | pospos = np.array(list(range(0, totalsize, buffersize)) + [totalsize])
51 | timings = (1.0 / fps) * np.arange(pospos[0], pospos[1])
52 | sndarray = clip.to_soundarray(timings, nbytes=nbytes, quantize=True)
53 | chunk = pg.sndarray.make_sound(sndarray)
54 |
55 | if (audio_flag is not None) and (video_flag is not None):
56 | audio_flag.set()
57 | video_flag.wait()
58 |
59 | channel = chunk.play()
60 | for i in range(1, len(pospos) - 1):
61 | timings = (1.0 / fps) * np.arange(pospos[i], pospos[i + 1])
62 | sndarray = clip.to_soundarray(timings, nbytes=nbytes, quantize=True)
63 | chunk = pg.sndarray.make_sound(sndarray)
64 | while channel.get_queue():
65 | time.sleep(0.003)
66 | if video_flag is not None:
67 | if not video_flag.is_set():
68 | channel.stop()
69 | del channel
70 | return
71 | channel.queue(chunk)
72 |
--------------------------------------------------------------------------------
/moviepy/audio/fx/multiply_volume.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from moviepy.decorators import audio_video_fx, convert_parameter_to_seconds
4 |
5 |
6 | def _multiply_volume_in_range(factor, start_time, end_time, nchannels):
7 | def factors_filter(factor, t):
8 | return np.array([factor if start_time <= t_ <= end_time else 1 for t_ in t])
9 |
10 | def multiply_stereo_volume(get_frame, t):
11 | return np.multiply(
12 | get_frame(t),
13 | np.array([factors_filter(factor, t) for _ in range(nchannels)]).T,
14 | )
15 |
16 | def multiply_mono_volume(get_frame, t):
17 | return np.multiply(get_frame(t), factors_filter(factor, t))
18 |
19 | return multiply_mono_volume if nchannels == 1 else multiply_stereo_volume
20 |
21 |
22 | @audio_video_fx
23 | @convert_parameter_to_seconds(["start_time", "end_time"])
24 | def multiply_volume(clip, factor, start_time=None, end_time=None):
25 | """Returns a clip with audio volume multiplied by the
26 | value `factor`. Can be applied to both audio and video clips.
27 |
28 | Parameters
29 | ----------
30 |
31 | factor : float
32 | Volume multiplication factor.
33 |
34 | start_time : float, optional
35 | Time from the beginning of the clip until the volume transformation
36 | begins to take effect, in seconds. By default at the beginning.
37 |
38 | end_time : float, optional
39 | Time from the beginning of the clip until the volume transformation
40 | ends to take effect, in seconds. By default at the end.
41 |
42 | Examples
43 | --------
44 |
45 | >>> from moviepy import AudioFileClip
46 | >>>
47 | >>> music = AudioFileClip('music.ogg')
48 | >>> doubled_audio_clip = clip.multiply_volume(2) # doubles audio volume
49 | >>> half_audio_clip = clip.multiply_volume(0.5) # half audio
50 | >>>
51 | >>> # silenced clip during one second at third
52 | >>> silenced_clip = clip.multiply_volume(0, start_time=2, end_time=3)
53 | """
54 | if start_time is None and end_time is None:
55 | return clip.transform(
56 | lambda get_frame, t: factor * get_frame(t),
57 | keep_duration=True,
58 | )
59 |
60 | return clip.transform(
61 | _multiply_volume_in_range(
62 | factor,
63 | clip.start if start_time is None else start_time,
64 | clip.end if end_time is None else end_time,
65 | clip.nchannels,
66 | ),
67 | keep_duration=True,
68 | )
69 |
--------------------------------------------------------------------------------
/moviepy/audio/io/AudioFileClip.py:
--------------------------------------------------------------------------------
1 | """Implements AudioFileClip, a class for audio clips creation using audio files."""
2 |
3 | from moviepy.audio.AudioClip import AudioClip
4 | from moviepy.audio.io.readers import FFMPEG_AudioReader
5 | from moviepy.decorators import convert_path_to_string
6 |
7 |
8 | class AudioFileClip(AudioClip):
9 | """
10 | An audio clip read from a sound file, or an array.
11 | The whole file is not loaded in memory. Instead, only a portion is
12 | read and stored in memory. this portion includes frames before
13 | and after the last frames read, so that it is fast to read the sound
14 | backward and forward.
15 |
16 | Parameters
17 | ----------
18 |
19 | filename
20 | Either a soundfile name (of any extension supported by ffmpeg)
21 | as a string or a path-like object,
22 | or an array representing a sound. If the soundfile is not a .wav,
23 | it will be converted to .wav first, using the ``fps`` and
24 | ``bitrate`` arguments.
25 |
26 | buffersize:
27 | Size to load in memory (in number of frames)
28 |
29 |
30 | Attributes
31 | ----------
32 |
33 | nbytes
34 | Number of bits per frame of the original audio file.
35 |
36 | fps
37 | Number of frames per second in the audio file
38 |
39 | buffersize
40 | See Parameters.
41 |
42 | Lifetime
43 | --------
44 |
45 | Note that this creates subprocesses and locks files. If you construct one
46 | of these instances, you must call close() afterwards, or the subresources
47 | will not be cleaned up until the process ends.
48 |
49 | Examples
50 | --------
51 |
52 | >>> snd = AudioFileClip("song.wav")
53 | >>> snd.close()
54 | """
55 |
56 | @convert_path_to_string("filename")
57 | def __init__(
58 | self, filename, decode_file=False, buffersize=200000, nbytes=2, fps=44100
59 | ):
60 |
61 | AudioClip.__init__(self)
62 |
63 | self.filename = filename
64 | self.reader = FFMPEG_AudioReader(
65 | filename,
66 | decode_file=decode_file,
67 | fps=fps,
68 | nbytes=nbytes,
69 | buffersize=buffersize,
70 | )
71 | self.fps = fps
72 | self.duration = self.reader.duration
73 | self.end = self.reader.duration
74 | self.buffersize = self.reader.buffersize
75 | self.filename = filename
76 |
77 | self.make_frame = lambda t: self.reader.get_frame(t)
78 | self.nchannels = self.reader.nchannels
79 |
80 | def close(self):
81 | """Close the internal reader."""
82 | if self.reader:
83 | self.reader.close()
84 | self.reader = None
85 |
--------------------------------------------------------------------------------
/moviepy/video/tools/segmenting.py:
--------------------------------------------------------------------------------
1 | """Utilities related with segmenting useful working with video clips."""
2 |
3 | import numpy as np
4 | import scipy.ndimage as ndi
5 |
6 | from moviepy.video.VideoClip import ImageClip
7 |
8 |
9 | def find_objects(clip, size_threshold=500, preview=False):
10 | """Returns a list of ImageClips representing each a separate object on
11 | the screen.
12 |
13 | Parameters
14 | ----------
15 |
16 | clip : video.VideoClip.ImageClip
17 | MoviePy video clip where the objects will be searched.
18 |
19 | size_threshold : float, optional
20 | Minimum size of what is considered an object. All objects found with
21 | ``size < size_threshold`` will be considered false positives and will
22 | be removed.
23 |
24 | preview : bool, optional
25 | Previews with matplotlib the different objects found in the image before
26 | applying the size threshold. Requires matplotlib installed.
27 |
28 |
29 | Examples
30 | --------
31 |
32 | >>> clip = ImageClip("media/afterimage.png")
33 | >>> objects = find_objects(clip)
34 | >>>
35 | >>> print(len(objects))
36 | >>> print([obj_.screenpos for obj_ in objects])
37 | """
38 | image = clip.get_frame(0)
39 | if not clip.mask: # pragma: no cover
40 | clip = clip.add_mask()
41 |
42 | mask = clip.mask.get_frame(0)
43 | labelled, num_features = ndi.measurements.label(image[:, :, 0])
44 |
45 | # find the objects
46 | slices = []
47 | for obj in ndi.find_objects(labelled):
48 | if mask[obj[0], obj[1]].mean() <= 0.2:
49 | # remove letter holes (in o,e,a, etc.)
50 | continue
51 | if image[obj[0], obj[1]].size <= size_threshold:
52 | # remove very small slices
53 | continue
54 | slices.append(obj)
55 | indexed_slices = sorted(enumerate(slices), key=lambda slice: slice[1][1].start)
56 |
57 | letters = []
58 | for i, (sy, sx) in indexed_slices:
59 | # crop each letter separately
60 | sy = slice(sy.start - 1, sy.stop + 1)
61 | sx = slice(sx.start - 1, sx.stop + 1)
62 | letter = image[sy, sx]
63 | labletter = labelled[sy, sx]
64 | maskletter = (labletter == (i + 1)) * mask[sy, sx]
65 | letter = ImageClip(image[sy, sx])
66 | letter.mask = ImageClip(maskletter, is_mask=True)
67 | letter.screenpos = np.array((sx.start, sy.start))
68 | letters.append(letter)
69 |
70 | if preview: # pragma: no cover
71 | import matplotlib.pyplot as plt
72 |
73 | print(f"Found {num_features} objects")
74 | fig, ax = plt.subplots(2)
75 | ax[0].axis("off")
76 | ax[0].imshow(labelled)
77 | ax[1].imshow([range(num_features)], interpolation="nearest")
78 | ax[1].set_yticks([])
79 | plt.show()
80 |
81 | return letters
82 |
--------------------------------------------------------------------------------
/moviepy/__init__.py:
--------------------------------------------------------------------------------
1 | """Imports everything that you need from the MoviePy submodules so that every thing
2 | can be directly imported like `from moviepy import VideoFileClip`.
3 |
4 | In particular it loads all effects from the video.fx and audio.fx folders
5 | and turns them into VideoClip and AudioClip methods, so that instead of
6 | ``clip.fx(vfx.resize, 2)`` or ``vfx.resize(clip, 2)``
7 | you can write ``clip.resize(2)``.
8 | """
9 |
10 | import inspect
11 |
12 | from moviepy.audio import fx as afx
13 | from moviepy.audio.AudioClip import (
14 | AudioClip,
15 | CompositeAudioClip,
16 | concatenate_audioclips,
17 | )
18 | from moviepy.audio.io.AudioFileClip import AudioFileClip
19 | from moviepy.tools import convert_to_seconds
20 | from moviepy.version import __version__
21 | from moviepy.video import fx as vfx, tools as videotools
22 | from moviepy.video.compositing import transitions as transfx
23 | from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip, clips_array
24 | from moviepy.video.compositing.concatenate import concatenate_videoclips
25 | from moviepy.video.io import ffmpeg_tools
26 | from moviepy.video.io.downloader import download_webfile
27 | from moviepy.video.io.ImageSequenceClip import ImageSequenceClip
28 | from moviepy.video.io.VideoFileClip import VideoFileClip
29 | from moviepy.video.VideoClip import (
30 | BitmapClip,
31 | ColorClip,
32 | ImageClip,
33 | TextClip,
34 | VideoClip,
35 | )
36 |
37 |
38 | # Transforms the effects into Clip methods so that
39 | # they can be called with clip.resize(width=500) instead of
40 | # clip.fx(vfx.resize, width=500)
41 | audio_fxs = inspect.getmembers(afx, inspect.isfunction) + [("loop", vfx.loop)]
42 | video_fxs = (
43 | inspect.getmembers(vfx, inspect.isfunction)
44 | + inspect.getmembers(transfx, inspect.isfunction)
45 | + audio_fxs
46 | )
47 |
48 | for name, function in video_fxs:
49 | setattr(VideoClip, name, function)
50 |
51 | for name, function in audio_fxs:
52 | setattr(AudioClip, name, function)
53 |
54 |
55 | def preview(self, *args, **kwargs):
56 | """NOT AVAILABLE: clip.preview requires importing from moviepy.editor"""
57 | raise ImportError("clip.preview requires importing from moviepy.editor")
58 |
59 |
60 | def show(self, *args, **kwargs):
61 | """NOT AVAILABLE: clip.show requires importing from moviepy.editor"""
62 | raise ImportError("clip.show requires importing from moviepy.editor")
63 |
64 |
65 | VideoClip.preview = preview
66 | VideoClip.show = show
67 | AudioClip.preview = preview
68 |
69 | # Cleanup namespace
70 | del audio_fxs, video_fxs, name, function, preview, show
71 | del inspect
72 |
73 | # Importing with `from moviepy import *` will only import these names
74 | __all__ = [
75 | "__version__",
76 | "VideoClip",
77 | "ImageClip",
78 | "ColorClip",
79 | "TextClip",
80 | "BitmapClip",
81 | "VideoFileClip",
82 | "CompositeVideoClip",
83 | "clips_array",
84 | "ImageSequenceClip",
85 | "concatenate_videoclips",
86 | "download_webfile",
87 | "AudioClip",
88 | "CompositeAudioClip",
89 | "concatenate_audioclips",
90 | "AudioFileClip",
91 | "vfx",
92 | "afx",
93 | "transfx",
94 | "videotools",
95 | "ffmpeg_tools",
96 | "convert_to_seconds",
97 | ]
98 |
--------------------------------------------------------------------------------
/moviepy/video/compositing/transitions.py:
--------------------------------------------------------------------------------
1 | """Here is the current catalogue. These are meant to be used with ``clip.fx``
2 | There are available as ``transfx.crossfadein`` etc.
3 | """
4 |
5 | from moviepy.decorators import add_mask_if_none, requires_duration
6 | from moviepy.video.fx.fadein import fadein
7 | from moviepy.video.fx.fadeout import fadeout
8 |
9 |
10 | __all__ = ["crossfadein", "crossfadeout", "slide_in", "slide_out"]
11 |
12 |
13 | @requires_duration
14 | @add_mask_if_none
15 | def crossfadein(clip, duration):
16 | """Makes the clip appear progressively, over ``duration`` seconds.
17 | Only works when the clip is included in a CompositeVideoClip.
18 | """
19 | clip.mask.duration = clip.duration
20 | new_clip = clip.copy()
21 | new_clip.mask = clip.mask.fx(fadein, duration)
22 | return new_clip
23 |
24 |
25 | @requires_duration
26 | @add_mask_if_none
27 | def crossfadeout(clip, duration):
28 | """Makes the clip disappear progressively, over ``duration`` seconds.
29 | Only works when the clip is included in a CompositeVideoClip.
30 | """
31 | clip.mask.duration = clip.duration
32 | new_clip = clip.copy()
33 | new_clip.mask = clip.mask.fx(fadeout, duration)
34 | return new_clip
35 |
36 |
37 | def slide_in(clip, duration, side):
38 | """Makes the clip arrive from one side of the screen.
39 |
40 | Only works when the clip is included in a CompositeVideoClip,
41 | and if the clip has the same size as the whole composition.
42 |
43 | Parameters
44 | ----------
45 |
46 | clip : moviepy.Clip.Clip
47 | A video clip.
48 |
49 | duration : float
50 | Time taken for the clip to be fully visible
51 |
52 | side : str
53 | Side of the screen where the clip comes from. One of
54 | 'top', 'bottom', 'left' or 'right'.
55 |
56 | Examples
57 | --------
58 |
59 | >>> from moviepy import *
60 | >>>
61 | >>> clips = [... make a list of clips]
62 | >>> slided_clips = [
63 | ... CompositeVideoClip([clip.fx(transfx.slide_in, 1, "left")])
64 | ... for clip in clips
65 | ... ]
66 | >>> final_clip = concatenate_videoclips(slided_clips, padding=-1)
67 | >>>
68 | >>> clip = ColorClip(
69 | ... color=(255, 0, 0), duration=1, size=(300, 300)
70 | ... ).with_fps(60)
71 | >>> final_clip = CompositeVideoClip([transfx.slide_in(clip, 1, "right")])
72 | """
73 | w, h = clip.size
74 | pos_dict = {
75 | "left": lambda t: (min(0, w * (t / duration - 1)), "center"),
76 | "right": lambda t: (max(0, w * (1 - t / duration)), "center"),
77 | "top": lambda t: ("center", min(0, h * (t / duration - 1))),
78 | "bottom": lambda t: ("center", max(0, h * (1 - t / duration))),
79 | }
80 |
81 | return clip.with_position(pos_dict[side])
82 |
83 |
84 | @requires_duration
85 | def slide_out(clip, duration, side):
86 | """Makes the clip go away by one side of the screen.
87 |
88 | Only works when the clip is included in a CompositeVideoClip,
89 | and if the clip has the same size as the whole composition.
90 |
91 | Parameters
92 | ----------
93 |
94 | clip : moviepy.Clip.Clip
95 | A video clip.
96 |
97 | duration : float
98 | Time taken for the clip to fully disappear.
99 |
100 | side : str
101 | Side of the screen where the clip goes. One of
102 | 'top', 'bottom', 'left' or 'right'.
103 |
104 | Examples
105 | --------
106 |
107 | >>> clips = [... make a list of clips]
108 | >>> slided_clips = [
109 | ... CompositeVideoClip([clip.fx(transfx.slide_out, 1, "left")])
110 | ... for clip in clips
111 | ... ]
112 | >>> final_clip = concatenate_videoclips(slided_clips, padding=-1)
113 | >>>
114 | >>> clip = ColorClip(
115 | ... color=(255, 0, 0), duration=1, size=(300, 300)
116 | ... ).with_fps(60)
117 | >>> final_clip = CompositeVideoClip([transfx.slide_out(clip, 1, "right")])
118 | """
119 | w, h = clip.size
120 | ts = clip.duration - duration # start time of the effect.
121 | pos_dict = {
122 | "left": lambda t: (min(0, w * (-(t - ts) / duration)), "center"),
123 | "right": lambda t: (max(0, w * ((t - ts) / duration)), "center"),
124 | "top": lambda t: ("center", min(0, h * (-(t - ts) / duration))),
125 | "bottom": lambda t: ("center", max(0, h * ((t - ts) / duration))),
126 | }
127 |
128 | return clip.with_position(pos_dict[side])
129 |
--------------------------------------------------------------------------------
/moviepy/video/tools/credits.py:
--------------------------------------------------------------------------------
1 | """Contains different functions to make end and opening credits, even though it is
2 | difficult to fill everyone needs in this matter.
3 | """
4 | from moviepy.decorators import convert_path_to_string
5 | from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
6 | from moviepy.video.fx.resize import resize
7 | from moviepy.video.VideoClip import ImageClip, TextClip
8 |
9 |
10 | class CreditsClip(TextClip):
11 | """Credits clip.
12 |
13 | Parameters
14 | ----------
15 |
16 | creditfile
17 | A string or path like object pointing to a text file
18 | whose content must be as follows: ::
19 |
20 | # This is a comment
21 | # The next line says : leave 4 blank lines
22 | .blank 4
23 |
24 | ..Executive Story Editor
25 | MARCEL DURAND
26 |
27 | ..Associate Producers
28 | MARTIN MARCEL
29 | DIDIER MARTIN
30 |
31 | ..Music Supervisor
32 | JEAN DIDIER
33 |
34 |
35 | width
36 | Total width of the credits text in pixels
37 |
38 | gap
39 | Horizontal gap in pixels between the jobs and the names
40 |
41 | color
42 | Color of the text. See ``TextClip.list('color')``
43 | for a list of acceptable names.
44 |
45 | font
46 | Name of the font to use. See ``TextClip.list('font')`` for
47 | the list of fonts you can use on your computer.
48 |
49 | font_size
50 | Size of font to use
51 |
52 | stroke_color
53 | Color of the stroke (=contour line) of the text. If ``None``,
54 | there will be no stroke.
55 |
56 | stroke_width
57 | Width of the stroke, in pixels. Can be a float, like 1.5.
58 |
59 | bg_color
60 | Color of the background. If ``None``, the background will be transparent.
61 |
62 | Returns
63 | -------
64 |
65 | image
66 | An ImageClip instance that looks like this and can be scrolled
67 | to make some credits: ::
68 |
69 | Executive Story Editor MARCEL DURAND
70 | Associate Producers MARTIN MARCEL
71 | DIDIER MARTIN
72 | Music Supervisor JEAN DIDIER
73 |
74 | """
75 |
76 | @convert_path_to_string("creditfile")
77 | def __init__(
78 | self,
79 | creditfile,
80 | width,
81 | stretch=30,
82 | color="white",
83 | stroke_color="black",
84 | stroke_width=2,
85 | font="Impact-Normal",
86 | font_size=60,
87 | bg_color=None,
88 | gap=0,
89 | ):
90 | # Parse the .txt file
91 | texts = []
92 | one_line = True
93 |
94 | with open(creditfile) as file:
95 | for line in file:
96 | if line.startswith(("\n", "#")):
97 | # exclude blank lines or comments
98 | continue
99 | elif line.startswith(".blank"):
100 | # ..blank n
101 | for i in range(int(line.split(" ")[1])):
102 | texts.append(["\n", "\n"])
103 | elif line.startswith(".."):
104 | texts.append([line[2:], ""])
105 | one_line = True
106 | elif one_line:
107 | texts.append(["", line])
108 | one_line = False
109 | else:
110 | texts.append(["\n", line])
111 |
112 | left, right = ("".join(line) for line in zip(*texts))
113 |
114 | # Make two columns for the credits
115 | left, right = [
116 | TextClip(
117 | txt,
118 | color=color,
119 | stroke_color=stroke_color,
120 | stroke_width=stroke_width,
121 | font=font,
122 | font_size=font_size,
123 | align=align,
124 | )
125 | for txt, align in [(left, "East"), (right, "West")]
126 | ]
127 |
128 | both_columns = CompositeVideoClip(
129 | [left, right.with_position((left.w + gap, 0))],
130 | size=(left.w + right.w + gap, right.h),
131 | bg_color=bg_color,
132 | )
133 |
134 | # Scale to the required size
135 | scaled = resize(both_columns, width=width)
136 |
137 | # Transform the CompositeVideoClip into an ImageClip
138 |
139 | # Calls ImageClip.__init__()
140 | super(TextClip, self).__init__(scaled.get_frame(0))
141 | self.mask = ImageClip(scaled.mask.get_frame(0), is_mask=True)
142 |
--------------------------------------------------------------------------------
/moviepy/config.py:
--------------------------------------------------------------------------------
1 | """Third party programs configuration for MoviePy."""
2 |
3 | import os
4 | import subprocess as sp
5 | from pathlib import Path
6 |
7 | from moviepy.tools import cross_platform_popen_params
8 |
9 |
10 | if os.name == "nt":
11 | import winreg as wr
12 |
13 | try:
14 | from dotenv import find_dotenv, load_dotenv
15 |
16 | DOTENV = find_dotenv()
17 | load_dotenv(DOTENV)
18 | except ImportError:
19 | DOTENV = None
20 |
21 | FFMPEG_BINARY = os.getenv("FFMPEG_BINARY", "ffmpeg-imageio")
22 | IMAGEMAGICK_BINARY = os.getenv("IMAGEMAGICK_BINARY", "auto-detect")
23 |
24 | IS_POSIX_OS = os.name == "posix"
25 |
26 |
27 | def try_cmd(cmd):
28 | """TODO: add documentation"""
29 | try:
30 | popen_params = cross_platform_popen_params(
31 | {"stdout": sp.PIPE, "stderr": sp.PIPE, "stdin": sp.DEVNULL}
32 | )
33 | proc = sp.Popen(cmd, **popen_params)
34 | proc.communicate()
35 | except Exception as err:
36 | return False, err
37 | else:
38 | return True, None
39 |
40 |
41 | if FFMPEG_BINARY == "ffmpeg-imageio":
42 | from imageio.plugins.ffmpeg import get_exe
43 |
44 | FFMPEG_BINARY = get_exe()
45 |
46 | elif FFMPEG_BINARY == "auto-detect":
47 |
48 | if try_cmd(["ffmpeg"])[0]:
49 | FFMPEG_BINARY = "ffmpeg"
50 | elif not IS_POSIX_OS and try_cmd(["ffmpeg.exe"])[0]:
51 | FFMPEG_BINARY = "ffmpeg.exe"
52 | else: # pragma: no cover
53 | FFMPEG_BINARY = "unset"
54 | else:
55 | success, err = try_cmd([FFMPEG_BINARY])
56 | if not success:
57 | raise IOError(
58 | f"{err} - The path specified for the ffmpeg binary might be wrong"
59 | )
60 |
61 | if IMAGEMAGICK_BINARY == "auto-detect":
62 | if os.name == "nt":
63 | # Try a few different ways of finding the ImageMagick binary on Windows
64 | try:
65 | key = wr.OpenKey(wr.HKEY_LOCAL_MACHINE, "SOFTWARE\\ImageMagick\\Current")
66 | IMAGEMAGICK_BINARY = wr.QueryValueEx(key, "BinPath")[0] + r"\magick.exe"
67 | key.Close()
68 | except Exception:
69 | for imagemagick_filename in ["convert.exe", "magick.exe"]:
70 | try:
71 | imagemagick_path = sp.check_output(
72 | r'dir /B /O-N "C:\\Program Files\\ImageMagick-*"',
73 | shell=True,
74 | encoding="utf-8",
75 | ).split("\n")[0]
76 | IMAGEMAGICK_BINARY = sp.check_output( # pragma: no cover
77 | rf'dir /B /S "C:\Program Files\{imagemagick_path}\\'
78 | f'*{imagemagick_filename}"',
79 | shell=True,
80 | encoding="utf-8",
81 | ).split("\n")[0]
82 | break
83 | except Exception:
84 | IMAGEMAGICK_BINARY = "unset"
85 |
86 | if IMAGEMAGICK_BINARY in ["unset", "auto-detect"]:
87 | if try_cmd(["convert"])[0]:
88 | IMAGEMAGICK_BINARY = "convert"
89 | elif not IS_POSIX_OS and try_cmd(["convert.exe"])[0]: # pragma: no cover
90 | IMAGEMAGICK_BINARY = "convert.exe"
91 | else: # pragma: no cover
92 | IMAGEMAGICK_BINARY = "unset"
93 | else:
94 | if not os.path.exists(IMAGEMAGICK_BINARY):
95 | raise IOError(f"ImageMagick binary cannot be found at {IMAGEMAGICK_BINARY}")
96 |
97 | if not os.path.isfile(IMAGEMAGICK_BINARY):
98 | raise IOError(f"ImageMagick binary found at {IMAGEMAGICK_BINARY} is not a file")
99 | success, err = try_cmd([IMAGEMAGICK_BINARY])
100 | if not success:
101 | raise IOError(
102 | f"{err} - The path specified for the ImageMagick binary might "
103 | f"be wrong: {IMAGEMAGICK_BINARY}"
104 | )
105 |
106 |
107 | def check():
108 | """Check if moviepy has found the binaries of FFmpeg and ImageMagick."""
109 | if try_cmd([FFMPEG_BINARY])[0]:
110 | print(f"MoviePy: ffmpeg successfully found in '{FFMPEG_BINARY}'.")
111 | else: # pragma: no cover
112 | print(f"MoviePy: can't find or access ffmpeg in '{FFMPEG_BINARY}'.")
113 |
114 | if try_cmd([IMAGEMAGICK_BINARY])[0]:
115 | print(f"MoviePy: ImageMagick successfully found in '{IMAGEMAGICK_BINARY}'.")
116 | else: # pragma: no cover
117 | print(f"MoviePy: can't find or access ImageMagick in '{IMAGEMAGICK_BINARY}'.")
118 |
119 | if DOTENV:
120 | print(f"\n.env file content at {DOTENV}:\n")
121 | print(Path(DOTENV).read_text())
122 |
123 |
124 | if __name__ == "__main__": # pragma: no cover
125 | check()
126 |
--------------------------------------------------------------------------------
/moviepy/video/compositing/concatenate.py:
--------------------------------------------------------------------------------
1 | """Video clips concatenation."""
2 |
3 | from functools import reduce
4 |
5 | import numpy as np
6 |
7 | from moviepy.audio.AudioClip import CompositeAudioClip
8 | from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
9 | from moviepy.video.VideoClip import ColorClip, VideoClip
10 |
11 |
12 | def concatenate_videoclips(
13 | clips, method="chain", transition=None, bg_color=None, is_mask=False, padding=0
14 | ):
15 | """Concatenates several video clips.
16 |
17 | Returns a video clip made by clip by concatenating several video clips.
18 | (Concatenated means that they will be played one after another).
19 |
20 | There are two methods:
21 |
22 | - method="chain": will produce a clip that simply outputs
23 | the frames of the successive clips, without any correction if they are
24 | not of the same size of anything. If none of the clips have masks the
25 | resulting clip has no mask, else the mask is a concatenation of masks
26 | (using completely opaque for clips that don't have masks, obviously).
27 | If you have clips of different size and you want to write directly the
28 | result of the concatenation to a file, use the method "compose" instead.
29 |
30 | - method="compose", if the clips do not have the same resolution, the final
31 | resolution will be such that no clip has to be resized.
32 | As a consequence the final clip has the height of the highest clip and the
33 | width of the widest clip of the list. All the clips with smaller dimensions
34 | will appear centered. The border will be transparent if mask=True, else it
35 | will be of the color specified by ``bg_color``.
36 |
37 | The clip with the highest FPS will be the FPS of the result clip.
38 |
39 | Parameters
40 | ----------
41 | clips
42 | A list of video clips which must all have their ``duration``
43 | attributes set.
44 | method
45 | "chain" or "compose": see above.
46 | transition
47 | A clip that will be played between each two clips of the list.
48 |
49 | bg_color
50 | Only for method='compose'. Color of the background.
51 | Set to None for a transparent clip
52 |
53 | padding
54 | Only for method='compose'. Duration during two consecutive clips.
55 | Note that for negative padding, a clip will partly play at the same
56 | time as the clip it follows (negative padding is cool for clips who fade
57 | in on one another). A non-null padding automatically sets the method to
58 | `compose`.
59 |
60 | """
61 | if transition is not None:
62 | clip_transition_pairs = [[v, transition] for v in clips[:-1]]
63 | clips = reduce(lambda x, y: x + y, clip_transition_pairs) + [clips[-1]]
64 | transition = None
65 |
66 | timings = np.cumsum([0] + [clip.duration for clip in clips])
67 |
68 | sizes = [clip.size for clip in clips]
69 |
70 | w = max(size[0] for size in sizes)
71 | h = max(size[1] for size in sizes)
72 |
73 | timings = np.maximum(0, timings + padding * np.arange(len(timings)))
74 | timings[-1] -= padding # Last element is the duration of the whole
75 |
76 | if method == "chain":
77 |
78 | def make_frame(t):
79 | i = max([i for i, e in enumerate(timings) if e <= t])
80 | return clips[i].get_frame(t - timings[i])
81 |
82 | def get_mask(clip):
83 | mask = clip.mask or ColorClip([1, 1], color=1, is_mask=True)
84 | if mask.duration is None:
85 | mask.duration = clip.duration
86 | return mask
87 |
88 | result = VideoClip(is_mask=is_mask, make_frame=make_frame)
89 | if any([clip.mask is not None for clip in clips]):
90 | masks = [get_mask(clip) for clip in clips]
91 | result.mask = concatenate_videoclips(masks, method="chain", is_mask=True)
92 | result.clips = clips
93 | elif method == "compose":
94 | result = CompositeVideoClip(
95 | [
96 | clip.with_start(t).with_position("center")
97 | for (clip, t) in zip(clips, timings)
98 | ],
99 | size=(w, h),
100 | bg_color=bg_color,
101 | is_mask=is_mask,
102 | )
103 | else:
104 | raise Exception(
105 | "Moviepy Error: The 'method' argument of "
106 | "concatenate_videoclips must be 'chain' or 'compose'"
107 | )
108 |
109 | result.timings = timings
110 |
111 | result.start_times = timings[:-1]
112 | result.start, result.duration, result.end = 0, timings[-1], timings[-1]
113 |
114 | audio_t = [
115 | (clip.audio, t) for clip, t in zip(clips, timings) if clip.audio is not None
116 | ]
117 | if audio_t:
118 | result.audio = CompositeAudioClip([a.with_start(t) for a, t in audio_t])
119 |
120 | fpss = [clip.fps for clip in clips if getattr(clip, "fps", None) is not None]
121 | result.fps = max(fpss) if fpss else None
122 | return result
123 |
--------------------------------------------------------------------------------
/moviepy/decorators.py:
--------------------------------------------------------------------------------
1 | """Decorators used by moviepy."""
2 | import inspect
3 | import os
4 |
5 | import decorator
6 |
7 | from moviepy.tools import convert_to_seconds
8 |
9 |
10 | @decorator.decorator
11 | def outplace(func, clip, *args, **kwargs):
12 | """Applies ``func(clip.copy(), *args, **kwargs)`` and returns ``clip.copy()``."""
13 | new_clip = clip.copy()
14 | func(new_clip, *args, **kwargs)
15 | return new_clip
16 |
17 |
18 | @decorator.decorator
19 | def convert_masks_to_RGB(func, clip, *args, **kwargs):
20 | """If the clip is a mask, convert it to RGB before running the function."""
21 | if clip.is_mask:
22 | clip = clip.to_RGB()
23 | return func(clip, *args, **kwargs)
24 |
25 |
26 | @decorator.decorator
27 | def apply_to_mask(func, clip, *args, **kwargs):
28 | """Applies the same function ``func`` to the mask of the clip created with
29 | ``func``.
30 | """
31 | new_clip = func(clip, *args, **kwargs)
32 | if getattr(new_clip, "mask", None):
33 | new_clip.mask = func(new_clip.mask, *args, **kwargs)
34 | return new_clip
35 |
36 |
37 | @decorator.decorator
38 | def apply_to_audio(func, clip, *args, **kwargs):
39 | """Applies the function ``func`` to the audio of the clip created with ``func``."""
40 | new_clip = func(clip, *args, **kwargs)
41 | if getattr(new_clip, "audio", None):
42 | new_clip.audio = func(new_clip.audio, *args, **kwargs)
43 | return new_clip
44 |
45 |
46 | @decorator.decorator
47 | def requires_duration(func, clip, *args, **kwargs):
48 | """Raises an error if the clip has no duration."""
49 | if clip.duration is None:
50 | raise ValueError("Attribute 'duration' not set")
51 | else:
52 | return func(clip, *args, **kwargs)
53 |
54 |
55 | @decorator.decorator
56 | def requires_fps(func, clip, *args, **kwargs):
57 | """Raises an error if the clip has no fps."""
58 | if not hasattr(clip, "fps") or clip.fps is None:
59 | raise ValueError("Attribute 'fps' not set")
60 | else:
61 | return func(clip, *args, **kwargs)
62 |
63 |
64 | @decorator.decorator
65 | def audio_video_fx(func, clip, *args, **kwargs):
66 | """Use an audio function on a video/audio clip.
67 |
68 | This decorator tells that the function func (audioclip -> audioclip)
69 | can be also used on a video clip, at which case it returns a
70 | videoclip with unmodified video and modified audio.
71 | """
72 | if hasattr(clip, "audio"):
73 | new_clip = clip.copy()
74 | if clip.audio is not None:
75 | new_clip.audio = func(clip.audio, *args, **kwargs)
76 | return new_clip
77 | else:
78 | return func(clip, *args, **kwargs)
79 |
80 |
81 | def preprocess_args(fun, varnames):
82 | """Applies fun to variables in varnames before launching the function."""
83 |
84 | def wrapper(func, *args, **kwargs):
85 | names = inspect.getfullargspec(func).args
86 | new_args = [
87 | fun(arg) if (name in varnames) and (arg is not None) else arg
88 | for (arg, name) in zip(args, names)
89 | ]
90 | new_kwargs = {
91 | kwarg: fun(value) if kwarg in varnames else value
92 | for (kwarg, value) in kwargs.items()
93 | }
94 | return func(*new_args, **new_kwargs)
95 |
96 | return decorator.decorator(wrapper)
97 |
98 |
99 | def convert_parameter_to_seconds(varnames):
100 | """Converts the specified variables to seconds."""
101 | return preprocess_args(convert_to_seconds, varnames)
102 |
103 |
104 | def convert_path_to_string(varnames):
105 | """Converts the specified variables to a path string."""
106 | return preprocess_args(os.fspath, varnames)
107 |
108 |
109 | @decorator.decorator
110 | def add_mask_if_none(func, clip, *args, **kwargs):
111 | """Add a mask to the clip if there is none."""
112 | if clip.mask is None:
113 | clip = clip.add_mask()
114 | return func(clip, *args, **kwargs)
115 |
116 |
117 | @decorator.decorator
118 | def use_clip_fps_by_default(func, clip, *args, **kwargs):
119 | """Will use ``clip.fps`` if no ``fps=...`` is provided in **kwargs**."""
120 |
121 | def find_fps(fps):
122 | if fps is not None:
123 | return fps
124 | elif getattr(clip, "fps", None):
125 | return clip.fps
126 | raise AttributeError(
127 | "No 'fps' (frames per second) attribute specified"
128 | " for function %s and the clip has no 'fps' attribute. Either"
129 | " provide e.g. fps=24 in the arguments of the function, or define"
130 | " the clip's fps with `clip.fps=24`" % func.__name__
131 | )
132 |
133 | names = inspect.getfullargspec(func).args[1:]
134 |
135 | new_args = [
136 | find_fps(arg) if (name == "fps") else arg for (arg, name) in zip(args, names)
137 | ]
138 | new_kwargs = {
139 | kwarg: find_fps(value) if kwarg == "fps" else value
140 | for (kwarg, value) in kwargs.items()
141 | }
142 |
143 | return func(clip, *new_args, **new_kwargs)
144 |
--------------------------------------------------------------------------------
/moviepy/tools.py:
--------------------------------------------------------------------------------
1 | """Misc. useful functions that can be used at many places in the program."""
2 | import os
3 | import subprocess as sp
4 | import warnings
5 |
6 | import proglog
7 |
8 |
9 | OS_NAME = os.name
10 |
11 |
12 | def cross_platform_popen_params(popen_params):
13 | """Wrap with this function a dictionary of ``subprocess.Popen`` kwargs and
14 | will be ready to work without unexpected behaviours in any platform.
15 | Currently, the implementation will add to them:
16 |
17 | - ``creationflags=0x08000000``: no extra unwanted window opens on Windows
18 | when the child process is created. Only added on Windows.
19 | """
20 | if OS_NAME == "nt":
21 | popen_params["creationflags"] = 0x08000000
22 | return popen_params
23 |
24 |
25 | def subprocess_call(cmd, logger="bar"):
26 | """Executes the given subprocess command.
27 |
28 | Set logger to None or a custom Proglog logger to avoid printings.
29 | """
30 | logger = proglog.default_bar_logger(logger)
31 | logger(message="Moviepy - Running:\n>>> " + " ".join(cmd))
32 |
33 | popen_params = cross_platform_popen_params(
34 | {"stdout": sp.DEVNULL, "stderr": sp.PIPE, "stdin": sp.DEVNULL}
35 | )
36 |
37 | proc = sp.Popen(cmd, **popen_params)
38 |
39 | out, err = proc.communicate() # proc.wait()
40 | proc.stderr.close()
41 |
42 | if proc.returncode:
43 | logger(message="Moviepy - Command returned an error")
44 | raise IOError(err.decode("utf8"))
45 | else:
46 | logger(message="Moviepy - Command successful")
47 |
48 | del proc
49 |
50 |
51 | def convert_to_seconds(time):
52 | """Will convert any time into seconds.
53 |
54 | If the type of `time` is not valid,
55 | it's returned as is.
56 |
57 | Here are the accepted formats:
58 |
59 | >>> convert_to_seconds(15.4) # seconds
60 | 15.4
61 | >>> convert_to_seconds((1, 21.5)) # (min,sec)
62 | 81.5
63 | >>> convert_to_seconds((1, 1, 2)) # (hr, min, sec)
64 | 3662
65 | >>> convert_to_seconds('01:01:33.045')
66 | 3693.045
67 | >>> convert_to_seconds('01:01:33,5') # coma works too
68 | 3693.5
69 | >>> convert_to_seconds('1:33,5') # only minutes and secs
70 | 99.5
71 | >>> convert_to_seconds('33.5') # only secs
72 | 33.5
73 | """
74 | factors = (1, 60, 3600)
75 |
76 | if isinstance(time, str):
77 | time = [float(part.replace(",", ".")) for part in time.split(":")]
78 |
79 | if not isinstance(time, (tuple, list)):
80 | return time
81 |
82 | return sum(mult * part for mult, part in zip(factors, reversed(time)))
83 |
84 |
85 | def deprecated_version_of(func, old_name):
86 | """Indicates that a function is deprecated and has a new name.
87 |
88 | `func` is the new function and `old_name` is the name of the deprecated
89 | function.
90 |
91 | Returns
92 | -------
93 |
94 | deprecated_func
95 | A function that does the same thing as `func`, but with a docstring
96 | and a printed message on call which say that the function is
97 | deprecated and that you should use `func` instead.
98 |
99 | Examples
100 | --------
101 |
102 | >>> # The badly named method 'to_file' is replaced by 'write_file'
103 | >>> class Clip:
104 | >>> def write_file(self, some args):
105 | >>> # blablabla
106 | >>>
107 | >>> Clip.to_file = deprecated_version_of(Clip.write_file, 'to_file')
108 | """
109 | # Detect new name of func
110 | new_name = func.__name__
111 |
112 | warning = (
113 | "The function ``%s`` is deprecated and is kept temporarily "
114 | "for backwards compatibility.\nPlease use the new name, "
115 | "``%s``, instead."
116 | ) % (old_name, new_name)
117 |
118 | def deprecated_func(*args, **kwargs):
119 | warnings.warn("MoviePy: " + warning, PendingDeprecationWarning)
120 | return func(*args, **kwargs)
121 |
122 | deprecated_func.__doc__ = warning
123 |
124 | return deprecated_func
125 |
126 |
127 | # Non-exhaustive dictionary to store default information.
128 | # Any addition is most welcome.
129 | # Note that 'gif' is complicated to place. From a VideoFileClip point of view,
130 | # it is a video, but from a HTML5 point of view, it is an image.
131 |
132 | extensions_dict = {
133 | "mp4": {"type": "video", "codec": ["libx264", "libmpeg4", "aac"]},
134 | "mkv": {"type": "video", "codec": ["libx264", "libmpeg4", "aac"]},
135 | "ogv": {"type": "video", "codec": ["libtheora"]},
136 | "webm": {"type": "video", "codec": ["libvpx"]},
137 | "avi": {"type": "video"},
138 | "mov": {"type": "video"},
139 | "ogg": {"type": "audio", "codec": ["libvorbis"]},
140 | "mp3": {"type": "audio", "codec": ["libmp3lame"]},
141 | "wav": {"type": "audio", "codec": ["pcm_s16le", "pcm_s24le", "pcm_s32le"]},
142 | "m4a": {"type": "audio", "codec": ["libfdk_aac"]},
143 | }
144 |
145 | for ext in ["jpg", "jpeg", "png", "bmp", "tiff"]:
146 | extensions_dict[ext] = {"type": "image"}
147 |
148 |
149 | def find_extension(codec):
150 | """Returns the correspondent file extension for a codec.
151 |
152 | Parameters
153 | ----------
154 |
155 | codec : str
156 | Video or audio codec name.
157 | """
158 | if codec in extensions_dict:
159 | # codec is already the extension
160 | return codec
161 |
162 | for ext, infos in extensions_dict.items():
163 | if codec in infos.get("codec", []):
164 | return ext
165 | raise ValueError(
166 | "The audio_codec you chose is unknown by MoviePy. "
167 | "You should report this. In the meantime, you can "
168 | "specify a temp_audiofile with the right extension "
169 | "in write_videofile."
170 | )
171 |
--------------------------------------------------------------------------------
/moviepy/video/io/ImageSequenceClip.py:
--------------------------------------------------------------------------------
1 | """Implements ImageSequenceClip, a class to create a video clip from a set
2 | of image files.
3 | """
4 |
5 | import os
6 |
7 | import numpy as np
8 | from imageio import imread
9 |
10 | from moviepy.video.VideoClip import VideoClip
11 |
12 |
13 | class ImageSequenceClip(VideoClip):
14 | """A VideoClip made from a series of images.
15 |
16 | Parameters
17 | ----------
18 |
19 | sequence
20 | Can be one of these:
21 |
22 | - The name of a folder (containing only pictures). The pictures
23 | will be considered in alphanumerical order.
24 | - A list of names of image files. In this case you can choose to
25 | load the pictures in memory pictures
26 | - A list of Numpy arrays representing images. In this last case,
27 | masks are not supported currently.
28 |
29 | fps
30 | Number of picture frames to read per second. Instead, you can provide
31 | the duration of each image with durations (see below)
32 |
33 | durations
34 | List of the duration of each picture.
35 |
36 | with_mask
37 | Should the alpha layer of PNG images be considered as a mask ?
38 |
39 | is_mask
40 | Will this sequence of pictures be used as an animated mask.
41 | """
42 |
43 | def __init__(
44 | self,
45 | sequence,
46 | fps=None,
47 | durations=None,
48 | with_mask=True,
49 | is_mask=False,
50 | load_images=False,
51 | ):
52 |
53 | # CODE WRITTEN AS IT CAME, MAY BE IMPROVED IN THE FUTURE
54 |
55 | if (fps is None) and (durations is None):
56 | raise ValueError("Please provide either 'fps' or 'durations'.")
57 | VideoClip.__init__(self, is_mask=is_mask)
58 |
59 | # Parse the data
60 |
61 | fromfiles = True
62 |
63 | if isinstance(sequence, list):
64 | if isinstance(sequence[0], str):
65 | if load_images:
66 | sequence = [imread(file) for file in sequence]
67 | fromfiles = False
68 | else:
69 | fromfiles = True
70 | else:
71 | # sequence is already a list of numpy arrays
72 | fromfiles = False
73 | else:
74 | # sequence is a folder name, make it a list of files:
75 | fromfiles = True
76 | sequence = sorted(
77 | [os.path.join(sequence, file) for file in os.listdir(sequence)]
78 | )
79 |
80 | # check that all the images are of the same size
81 | if isinstance(sequence[0], str):
82 | size = imread(sequence[0]).shape
83 | else:
84 | size = sequence[0].shape
85 |
86 | for image in sequence:
87 | image1 = image
88 | if isinstance(image, str):
89 | image1 = imread(image)
90 | if size != image1.shape:
91 | raise Exception(
92 | "Moviepy: ImageSequenceClip requires all images to be the same size"
93 | )
94 |
95 | self.fps = fps
96 | if fps is not None:
97 | durations = [1.0 / fps for image in sequence]
98 | self.images_starts = [
99 | 1.0 * i / fps - np.finfo(np.float32).eps for i in range(len(sequence))
100 | ]
101 | else:
102 | self.images_starts = [0] + list(np.cumsum(durations))
103 | self.durations = durations
104 | self.duration = sum(durations)
105 | self.end = self.duration
106 | self.sequence = sequence
107 |
108 | def find_image_index(t):
109 | return max(
110 | [i for i in range(len(self.sequence)) if self.images_starts[i] <= t]
111 | )
112 |
113 | if fromfiles:
114 |
115 | self.last_index = None
116 | self.last_image = None
117 |
118 | def make_frame(t):
119 |
120 | index = find_image_index(t)
121 |
122 | if index != self.last_index:
123 | self.last_image = imread(self.sequence[index])[:, :, :3]
124 | self.last_index = index
125 |
126 | return self.last_image
127 |
128 | if with_mask and (imread(self.sequence[0]).shape[2] == 4):
129 |
130 | self.mask = VideoClip(is_mask=True)
131 | self.mask.last_index = None
132 | self.mask.last_image = None
133 |
134 | def mask_make_frame(t):
135 |
136 | index = find_image_index(t)
137 | if index != self.mask.last_index:
138 | frame = imread(self.sequence[index])[:, :, 3]
139 | self.mask.last_image = frame.astype(float) / 255
140 | self.mask.last_index = index
141 |
142 | return self.mask.last_image
143 |
144 | self.mask.make_frame = mask_make_frame
145 | self.mask.size = mask_make_frame(0).shape[:2][::-1]
146 |
147 | else:
148 |
149 | def make_frame(t):
150 |
151 | index = find_image_index(t)
152 | return self.sequence[index][:, :, :3]
153 |
154 | if with_mask and (self.sequence[0].shape[2] == 4):
155 |
156 | self.mask = VideoClip(is_mask=True)
157 |
158 | def mask_make_frame(t):
159 | index = find_image_index(t)
160 | return 1.0 * self.sequence[index][:, :, 3] / 255
161 |
162 | self.mask.make_frame = mask_make_frame
163 | self.mask.size = mask_make_frame(0).shape[:2][::-1]
164 |
165 | self.make_frame = make_frame
166 | self.size = make_frame(0).shape[:2][::-1]
167 |
--------------------------------------------------------------------------------
/moviepy/video/io/ffmpeg_tools.py:
--------------------------------------------------------------------------------
1 | """Miscellaneous bindings to ffmpeg."""
2 |
3 | import os
4 |
5 | from moviepy.config import FFMPEG_BINARY
6 | from moviepy.decorators import convert_parameter_to_seconds, convert_path_to_string
7 | from moviepy.tools import subprocess_call
8 |
9 |
10 | @convert_path_to_string(("inputfile", "outputfile"))
11 | @convert_parameter_to_seconds(("start_time", "end_time"))
12 | def ffmpeg_extract_subclip(
13 | inputfile, start_time, end_time, outputfile=None, logger="bar"
14 | ):
15 | """Makes a new video file playing video file between two times.
16 |
17 | Parameters
18 | ----------
19 |
20 | inputfile : str
21 | Path to the file from which the subclip will be extracted.
22 |
23 | start_time : float
24 | Moment of the input clip that marks the start of the produced subclip.
25 |
26 | end_time : float
27 | Moment of the input clip that marks the end of the produced subclip.
28 |
29 | outputfile : str, optional
30 | Path to the output file. Defaults to
31 | ``SUB_``.
32 | """
33 | if not outputfile:
34 | name, ext = os.path.splitext(inputfile)
35 | t1, t2 = [int(1000 * t) for t in [start_time, end_time]]
36 | outputfile = "%sSUB%d_%d%s" % (name, t1, t2, ext)
37 |
38 | cmd = [
39 | FFMPEG_BINARY,
40 | "-y",
41 | "-ss",
42 | "%0.2f" % start_time,
43 | "-i",
44 | inputfile,
45 | "-t",
46 | "%0.2f" % (end_time - start_time),
47 | "-map",
48 | "0",
49 | "-vcodec",
50 | "copy",
51 | "-acodec",
52 | "copy",
53 | "-copyts",
54 | outputfile,
55 | ]
56 | subprocess_call(cmd, logger=logger)
57 |
58 |
59 | @convert_path_to_string(("videofile", "audiofile", "outputfile"))
60 | def ffmpeg_merge_video_audio(
61 | videofile,
62 | audiofile,
63 | outputfile,
64 | video_codec="copy",
65 | audio_codec="copy",
66 | logger="bar",
67 | ):
68 | """Merges video file and audio file into one movie file.
69 |
70 | Parameters
71 | ----------
72 |
73 | videofile : str
74 | Path to the video file used in the merge.
75 |
76 | audiofile : str
77 | Path to the audio file used in the merge.
78 |
79 | outputfile : str
80 | Path to the output file.
81 |
82 | video_codec : str, optional
83 | Video codec used by FFmpeg in the merge.
84 |
85 | audio_codec : str, optional
86 | Audio codec used by FFmpeg in the merge.
87 | """
88 | cmd = [
89 | FFMPEG_BINARY,
90 | "-y",
91 | "-i",
92 | audiofile,
93 | "-i",
94 | videofile,
95 | "-vcodec",
96 | video_codec,
97 | "-acodec",
98 | audio_codec,
99 | outputfile,
100 | ]
101 |
102 | subprocess_call(cmd, logger=logger)
103 |
104 |
105 | @convert_path_to_string(("inputfile", "outputfile"))
106 | def ffmpeg_extract_audio(inputfile, outputfile, bitrate=3000, fps=44100, logger="bar"):
107 | """Extract the sound from a video file and save it in ``outputfile``.
108 |
109 | Parameters
110 | ----------
111 |
112 | inputfile : str
113 | The path to the file from which the audio will be extracted.
114 |
115 | outputfile : str
116 | The path to the file to which the audio will be stored.
117 |
118 | bitrate : int, optional
119 | Bitrate for the new audio file.
120 |
121 | fps : int, optional
122 | Frame rate for the new audio file.
123 | """
124 | cmd = [
125 | FFMPEG_BINARY,
126 | "-y",
127 | "-i",
128 | inputfile,
129 | "-ab",
130 | "%dk" % bitrate,
131 | "-ar",
132 | "%d" % fps,
133 | outputfile,
134 | ]
135 | subprocess_call(cmd, logger=logger)
136 |
137 |
138 | @convert_path_to_string(("inputfile", "outputfile"))
139 | def ffmpeg_resize(inputfile, outputfile, size, logger="bar"):
140 | """Resizes a file to new size and write the result in another.
141 |
142 | Parameters
143 | ----------
144 |
145 | inputfile : str
146 | Path to the file to be resized.
147 |
148 | outputfile : str
149 | Path to the output file.
150 |
151 | size : list or tuple
152 | New size in format ``[width, height]`` for the output file.
153 | """
154 | cmd = [
155 | FFMPEG_BINARY,
156 | "-i",
157 | inputfile,
158 | "-vf",
159 | "scale=%d:%d" % (size[0], size[1]),
160 | outputfile,
161 | ]
162 |
163 | subprocess_call(cmd, logger=logger)
164 |
165 |
166 | @convert_path_to_string(("inputfile", "outputfile", "output_dir"))
167 | def ffmpeg_stabilize_video(
168 | inputfile, outputfile=None, output_dir="", overwrite_file=True, logger="bar"
169 | ):
170 | """
171 | Stabilizes ``filename`` and write the result to ``output``.
172 |
173 | Parameters
174 | ----------
175 |
176 | inputfile : str
177 | The name of the shaky video.
178 |
179 | outputfile : str, optional
180 | The name of new stabilized video. Defaults to appending '_stabilized' to
181 | the input file name.
182 |
183 | output_dir : str, optional
184 | The directory to place the output video in. Defaults to the current
185 | working directory.
186 |
187 | overwrite_file : bool, optional
188 | If ``outputfile`` already exists in ``output_dir``, then overwrite
189 | ``outputfile`` Defaults to True.
190 | """
191 | if not outputfile:
192 | without_dir = os.path.basename(inputfile)
193 | name, ext = os.path.splitext(without_dir)
194 | outputfile = f"{name}_stabilized{ext}"
195 |
196 | outputfile = os.path.join(output_dir, outputfile)
197 | cmd = [FFMPEG_BINARY, "-i", inputfile, "-vf", "deshake", outputfile]
198 | if overwrite_file:
199 | cmd.append("-y")
200 | subprocess_call(cmd, logger=logger)
201 |
--------------------------------------------------------------------------------
/moviepy/video/fx/rotate.py:
--------------------------------------------------------------------------------
1 | import math
2 | import warnings
3 |
4 | import numpy as np
5 |
6 |
7 | try:
8 | import PIL
9 |
10 | PIL_rotate_kwargs_supported = {
11 | # [moviepy rotate argument name,
12 | # PIL.rotate argument supported,
13 | # minimum PIL version required]
14 | "fillcolor": ["bg_color", False, (5, 2, 0)],
15 | "center": ["center", False, (4, 0, 0)],
16 | "translate": ["translate", False, (4, 0, 0)],
17 | }
18 |
19 | if hasattr(PIL, "__version__"):
20 | # check support for PIL.rotate arguments
21 | PIL__version_info__ = tuple(int(n) for n in PIL.__version__ if n.isdigit())
22 |
23 | for PIL_rotate_kw_name, support_data in PIL_rotate_kwargs_supported.items():
24 | if PIL__version_info__ >= support_data[2]:
25 | PIL_rotate_kwargs_supported[PIL_rotate_kw_name][1] = True
26 |
27 | Image = PIL.Image
28 |
29 | except ImportError: # pragma: no cover
30 | Image = None
31 |
32 |
33 | def rotate(
34 | clip,
35 | angle,
36 | unit="deg",
37 | resample="bicubic",
38 | expand=True,
39 | center=None,
40 | translate=None,
41 | bg_color=None,
42 | ):
43 | """
44 | Rotates the specified clip by ``angle`` degrees (or radians) anticlockwise
45 | If the angle is not a multiple of 90 (degrees) or ``center``, ``translate``,
46 | and ``bg_color`` are not ``None``, the package ``pillow`` must be installed,
47 | and there will be black borders. You can make them transparent with:
48 |
49 | >>> new_clip = clip.add_mask().rotate(72)
50 |
51 | Parameters
52 | ----------
53 |
54 | clip : VideoClip
55 | A video clip.
56 |
57 | angle : float
58 | Either a value or a function angle(t) representing the angle of rotation.
59 |
60 | unit : str, optional
61 | Unit of parameter `angle` (either "deg" for degrees or "rad" for radians).
62 |
63 | resample : str, optional
64 | An optional resampling filter. One of "nearest", "bilinear", or "bicubic".
65 |
66 | expand : bool, optional
67 | If true, expands the output image to make it large enough to hold the
68 | entire rotated image. If false or omitted, make the output image the same
69 | size as the input image.
70 |
71 | translate : tuple, optional
72 | An optional post-rotate translation (a 2-tuple).
73 |
74 | center : tuple, optional
75 | Optional center of rotation (a 2-tuple). Origin is the upper left corner.
76 |
77 | bg_color : tuple, optional
78 | An optional color for area outside the rotated image. Only has effect if
79 | ``expand`` is true.
80 | """
81 | if Image:
82 | try:
83 | resample = {
84 | "bilinear": Image.BILINEAR,
85 | "nearest": Image.NEAREST,
86 | "bicubic": Image.BICUBIC,
87 | }[resample]
88 | except KeyError:
89 | raise ValueError(
90 | "'resample' argument must be either 'bilinear', 'nearest' or 'bicubic'"
91 | )
92 |
93 | if hasattr(angle, "__call__"):
94 | get_angle = angle
95 | else:
96 | get_angle = lambda t: angle
97 |
98 | def filter(get_frame, t):
99 | angle = get_angle(t)
100 | im = get_frame(t)
101 |
102 | if unit == "rad":
103 | angle = math.degrees(angle)
104 |
105 | angle %= 360
106 | if not center and not translate and not bg_color:
107 | if (angle == 0) and expand:
108 | return im
109 | if (angle == 90) and expand:
110 | transpose = [1, 0] if len(im.shape) == 2 else [1, 0, 2]
111 | return np.transpose(im, axes=transpose)[::-1]
112 | elif (angle == 270) and expand:
113 | transpose = [1, 0] if len(im.shape) == 2 else [1, 0, 2]
114 | return np.transpose(im, axes=transpose)[:, ::-1]
115 | elif (angle == 180) and expand:
116 | return im[::-1, ::-1]
117 |
118 | if not Image:
119 | raise ValueError(
120 | 'Without "Pillow" installed, only angles that are a multiple of 90'
121 | " without centering, translation and background color transformations"
122 | ' are supported, please install "Pillow" with `pip install pillow`'
123 | )
124 |
125 | # build PIL.rotate kwargs
126 | kwargs, _locals = ({}, locals())
127 | for PIL_rotate_kw_name, (
128 | kw_name,
129 | supported,
130 | min_version,
131 | ) in PIL_rotate_kwargs_supported.items():
132 | # get the value passed to rotate FX from `locals()` dictionary
133 | kw_value = _locals[kw_name]
134 |
135 | if supported: # if argument supported by PIL version
136 | kwargs[PIL_rotate_kw_name] = kw_value
137 | else:
138 | if kw_value is not None: # if not default value
139 | warnings.warn(
140 | f"rotate '{kw_name}' argument is not supported"
141 | " by your Pillow version and is being ignored. Minimum"
142 | " Pillow version required:"
143 | f" v{'.'.join(str(n) for n in min_version)}",
144 | UserWarning,
145 | )
146 |
147 | # PIL expects uint8 type data. However a mask image has values in the
148 | # range [0, 1] and is of float type. To handle this we scale it up by
149 | # a factor 'a' for use with PIL and then back again by 'a' afterwards.
150 | if im.dtype == "float64":
151 | # this is a mask image
152 | a = 255.0
153 | else:
154 | a = 1
155 |
156 | # call PIL.rotate
157 | return (
158 | np.array(
159 | Image.fromarray(np.array(a * im).astype(np.uint8)).rotate(
160 | angle, expand=expand, resample=resample, **kwargs
161 | )
162 | )
163 | / a
164 | )
165 |
166 | return clip.transform(filter, apply_to=["mask"])
167 |
--------------------------------------------------------------------------------
/moviepy/video/io/VideoFileClip.py:
--------------------------------------------------------------------------------
1 | """Implements VideoFileClip, a class for video clips creation using video files."""
2 |
3 | from moviepy.audio.io.AudioFileClip import AudioFileClip
4 | from moviepy.decorators import convert_path_to_string
5 | from moviepy.video.io.ffmpeg_reader import FFMPEG_VideoReader
6 | from moviepy.video.VideoClip import VideoClip
7 |
8 |
9 | class VideoFileClip(VideoClip):
10 | """
11 | A video clip originating from a movie file. For instance: ::
12 |
13 | >>> clip = VideoFileClip("myHolidays.mp4")
14 | >>> clip.close()
15 | >>> with VideoFileClip("myMaskVideo.avi") as clip2:
16 | >>> pass # Implicit close called by context manager.
17 |
18 |
19 | Parameters
20 | ----------
21 |
22 | filename:
23 | The name of the video file, as a string or a path-like object.
24 | It can have any extension supported by ffmpeg:
25 | .ogv, .mp4, .mpeg, .avi, .mov etc.
26 |
27 | has_mask:
28 | Set this to 'True' if there is a mask included in the videofile.
29 | Video files rarely contain masks, but some video codecs enable
30 | that. For instance if you have a MoviePy VideoClip with a mask you
31 | can save it to a videofile with a mask. (see also
32 | ``VideoClip.write_videofile`` for more details).
33 |
34 | audio:
35 | Set to `False` if the clip doesn't have any audio or if you do not
36 | wish to read the audio.
37 |
38 | target_resolution:
39 | Set to (desired_width, desired_height) to have ffmpeg resize the frames
40 | before returning them. This is much faster than streaming in high-res
41 | and then resizing. If either dimension is None, the frames are resized
42 | by keeping the existing aspect ratio.
43 |
44 | resize_algorithm:
45 | The algorithm used for resizing. Default: "bicubic", other popular
46 | options include "bilinear" and "fast_bilinear". For more information, see
47 | https://ffmpeg.org/ffmpeg-scaler.html
48 |
49 | fps_source:
50 | The fps value to collect from the metadata. Set by default to 'fps', but
51 | can be set to 'tbr', which may be helpful if you are finding that it is reading
52 | the incorrect fps from the file.
53 |
54 | pixel_format
55 | Optional: Pixel format for the video to read. If is not specified
56 | 'rgb24' will be used as the default format unless ``has_mask`` is set
57 | as ``True``, then 'rgba' will be used.
58 |
59 |
60 | Attributes
61 | ----------
62 |
63 | filename:
64 | Name of the original video file.
65 |
66 | fps:
67 | Frames per second in the original file.
68 |
69 |
70 | Read docs for Clip() and VideoClip() for other, more generic, attributes.
71 |
72 | Lifetime
73 | --------
74 |
75 | Note that this creates subprocesses and locks files. If you construct one
76 | of these instances, you must call close() afterwards, or the subresources
77 | will not be cleaned up until the process ends.
78 |
79 | If copies are made, and close() is called on one, it may cause methods on
80 | the other copies to fail.
81 |
82 | """
83 |
84 | @convert_path_to_string("filename")
85 | def __init__(
86 | self,
87 | filename,
88 | decode_file=False,
89 | has_mask=False,
90 | audio=True,
91 | audio_buffersize=200000,
92 | target_resolution=None,
93 | resize_algorithm="bicubic",
94 | audio_fps=44100,
95 | audio_nbytes=2,
96 | fps_source="fps",
97 | pixel_format=None,
98 | ):
99 |
100 | VideoClip.__init__(self)
101 |
102 | # Make a reader
103 | if not pixel_format:
104 | pixel_format = "rgba" if has_mask else "rgb24"
105 | self.reader = FFMPEG_VideoReader(
106 | filename,
107 | decode_file=decode_file,
108 | pixel_format=pixel_format,
109 | target_resolution=target_resolution,
110 | resize_algo=resize_algorithm,
111 | fps_source=fps_source,
112 | )
113 |
114 | # Make some of the reader's attributes accessible from the clip
115 | self.duration = self.reader.duration
116 | self.end = self.reader.duration
117 |
118 | self.fps = self.reader.fps
119 | self.size = self.reader.size
120 | self.rotation = self.reader.rotation
121 |
122 | self.filename = filename
123 |
124 | if has_mask:
125 |
126 | self.make_frame = lambda t: self.reader.get_frame(t)[:, :, :3]
127 |
128 | def mask_make_frame(t):
129 | return self.reader.get_frame(t)[:, :, 3] / 255.0
130 |
131 | self.mask = VideoClip(
132 | is_mask=True, make_frame=mask_make_frame
133 | ).with_duration(self.duration)
134 | self.mask.fps = self.fps
135 |
136 | else:
137 |
138 | self.make_frame = lambda t: self.reader.get_frame(t)
139 |
140 | # Make a reader for the audio, if any.
141 | if audio and self.reader.infos["audio_found"]:
142 |
143 | self.audio = AudioFileClip(
144 | filename,
145 | buffersize=audio_buffersize,
146 | fps=audio_fps,
147 | nbytes=audio_nbytes,
148 | )
149 |
150 | def __deepcopy__(self, memo):
151 | """Implements ``copy.deepcopy(clip)`` behaviour as ``copy.copy(clip)``.
152 |
153 | VideoFileClip class instances can't be deeply copied because the locked Thread
154 | of ``proc`` isn't pickleable. Without this override, calls to
155 | ``copy.deepcopy(clip)`` would raise a ``TypeError``:
156 |
157 | ```
158 | TypeError: cannot pickle '_thread.lock' object
159 | ```
160 | """
161 | return self.__copy__()
162 |
163 | def close(self):
164 | """Close the internal reader."""
165 | if self.reader:
166 | self.reader.close()
167 | self.reader = None
168 |
169 | try:
170 | if self.audio:
171 | self.audio.close()
172 | self.audio = None
173 | except AttributeError: # pragma: no cover
174 | pass
175 |
--------------------------------------------------------------------------------
/moviepy/video/io/preview.py:
--------------------------------------------------------------------------------
1 | """Video preview functions for MoviePy editor."""
2 |
3 | import threading
4 | import time
5 |
6 | import numpy as np
7 | import pygame as pg
8 |
9 | from moviepy.decorators import (
10 | convert_masks_to_RGB,
11 | convert_parameter_to_seconds,
12 | requires_duration,
13 | )
14 | from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
15 |
16 |
17 | pg.init()
18 | pg.display.set_caption("MoviePy")
19 |
20 |
21 | def imdisplay(imarray, screen=None):
22 | """Splashes the given image array on the given pygame screen."""
23 | a = pg.surfarray.make_surface(imarray.swapaxes(0, 1))
24 | if screen is None:
25 | screen = pg.display.set_mode(imarray.shape[:2][::-1])
26 | screen.blit(a, (0, 0))
27 | pg.display.flip()
28 |
29 |
30 | @convert_masks_to_RGB
31 | @convert_parameter_to_seconds(["t"])
32 | def show(clip, t=0, with_mask=True, interactive=False):
33 | """
34 | Splashes the frame of clip corresponding to time ``t``.
35 |
36 | Parameters
37 | ----------
38 |
39 | t : float or tuple or str, optional
40 | Time in seconds of the frame to display.
41 |
42 | with_mask : bool, optional
43 | ``False`` if the clip has a mask but you want to see the clip without
44 | the mask.
45 |
46 | interactive : bool, optional
47 | Displays the image freezed and you can clip in each pixel to see the
48 | pixel number and its color.
49 |
50 | Examples
51 | --------
52 |
53 | >>> from moviepy.editor import *
54 | >>>
55 | >>> clip = VideoFileClip("media/chaplin.mp4")
56 | >>> clip.show(t=4, interactive=True)
57 | """
58 | if with_mask and (clip.mask is not None):
59 | clip = CompositeVideoClip([clip.with_position((0, 0))])
60 |
61 | img = clip.get_frame(t)
62 | imdisplay(img)
63 |
64 | if interactive:
65 | result = []
66 | while True:
67 | for event in pg.event.get():
68 | if event.type == pg.KEYDOWN:
69 | if event.key == pg.K_ESCAPE:
70 | print("Keyboard interrupt")
71 | return result
72 | elif event.type == pg.MOUSEBUTTONDOWN:
73 | x, y = pg.mouse.get_pos()
74 | rgb = img[y, x]
75 | result.append({"position": (x, y), "color": rgb})
76 | print("position, color : ", "%s, %s" % (str((x, y)), str(rgb)))
77 | time.sleep(0.03)
78 |
79 |
80 | @requires_duration
81 | @convert_masks_to_RGB
82 | def preview(
83 | clip,
84 | fps=15,
85 | audio=True,
86 | audio_fps=22050,
87 | audio_buffersize=3000,
88 | audio_nbytes=2,
89 | fullscreen=False,
90 | ):
91 | """
92 | Displays the clip in a window, at the given frames per second (of movie)
93 | rate. It will avoid that the clip be played faster than normal, but it
94 | cannot avoid the clip to be played slower than normal if the computations
95 | are complex. In this case, try reducing the ``fps``.
96 |
97 | Parameters
98 | ----------
99 |
100 | fps : int, optional
101 | Number of frames per seconds in the displayed video.
102 |
103 | audio : bool, optional
104 | ``True`` (default) if you want the clip's audio be played during
105 | the preview.
106 |
107 | audio_fps : int, optional
108 | The frames per second to use when generating the audio sound.
109 |
110 | audio_buffersize : int, optional
111 | The sized of the buffer used generating the audio sound.
112 |
113 | audio_nbytes : int, optional
114 | The number of bytes used generating the audio sound.
115 |
116 | fullscreen : bool, optional
117 | ``True`` if you want the preview to be displayed fullscreen.
118 |
119 | Examples
120 | --------
121 |
122 | >>> from moviepy.editor import *
123 | >>>
124 | >>> clip = VideoFileClip("media/chaplin.mp4")
125 | >>> clip.preview(fps=10, audio=False)
126 | """
127 | if fullscreen:
128 | flags = pg.FULLSCREEN
129 | else:
130 | flags = 0
131 |
132 | # compute and splash the first image
133 | screen = pg.display.set_mode(clip.size, flags)
134 |
135 | audio = audio and (clip.audio is not None)
136 |
137 | if audio:
138 | # the sound will be played in parallel. We are not
139 | # parralellizing it on different CPUs because it seems that
140 | # pygame and openCV already use several cpus it seems.
141 |
142 | # two synchro-flags to tell whether audio and video are ready
143 | video_flag = threading.Event()
144 | audio_flag = threading.Event()
145 | # launch the thread
146 | audiothread = threading.Thread(
147 | target=clip.audio.preview,
148 | args=(audio_fps, audio_buffersize, audio_nbytes, audio_flag, video_flag),
149 | )
150 | audiothread.start()
151 |
152 | img = clip.get_frame(0)
153 | imdisplay(img, screen)
154 | if audio: # synchronize with audio
155 | video_flag.set() # say to the audio: video is ready
156 | audio_flag.wait() # wait for the audio to be ready
157 |
158 | result = []
159 |
160 | t0 = time.time()
161 | for t in np.arange(1.0 / fps, clip.duration - 0.001, 1.0 / fps):
162 |
163 | img = clip.get_frame(t)
164 |
165 | for event in pg.event.get():
166 | if event.type == pg.QUIT or (
167 | event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE
168 | ):
169 | if audio:
170 | video_flag.clear()
171 | print("Interrupt")
172 | pg.quit()
173 | return result
174 |
175 | elif event.type == pg.MOUSEBUTTONDOWN:
176 | x, y = pg.mouse.get_pos()
177 | rgb = img[y, x]
178 | result.append({"time": t, "position": (x, y), "color": rgb})
179 | print(
180 | "time, position, color : ",
181 | "%.03f, %s, %s" % (t, str((x, y)), str(rgb)),
182 | )
183 |
184 | t1 = time.time()
185 | time.sleep(max(0, t - (t1 - t0)))
186 | imdisplay(img, screen)
187 |
188 | pg.quit()
189 |
--------------------------------------------------------------------------------
/moviepy/video/tools/interpolators.py:
--------------------------------------------------------------------------------
1 | """Classes for easy interpolation of trajectories and curves."""
2 |
3 | import numpy as np
4 |
5 |
6 | class Interpolator:
7 | """Poorman's linear interpolator.
8 |
9 | Parameters
10 | ----------
11 |
12 | tt : list, optional
13 | List of time frames for the interpolator.
14 |
15 | ss : list, optional
16 | List of values for the interpolator.
17 |
18 | ttss : list, optional
19 | Lists of time frames and their correspondients values for the
20 | interpolator. This argument can be used instead of ``tt`` and ``ss``
21 | to instantiate the interpolator using an unique argument.
22 |
23 | left : float, optional
24 | Value to return when ``t < tt[0]``.
25 |
26 | right : float, optional
27 | Value to return when ``t > tt[-1]``.
28 |
29 |
30 | Examples
31 | --------
32 |
33 | >>> # instantiate using `tt` and `ss`
34 | >>> interpolator = Interpolator(tt=[0, 1, 2], ss=[3, 4, 5])
35 | >>>
36 | >>> # instantiate using `ttss`
37 | >>> interpolator = Interpolator(ttss=[[0, 3], [1, 4], [2, 5]]) # [t, value]
38 | """
39 |
40 | def __init__(self, tt=None, ss=None, ttss=None, left=None, right=None):
41 |
42 | if ttss is not None:
43 | tt, ss = zip(*ttss)
44 |
45 | self.tt = 1.0 * np.array(tt)
46 | self.ss = 1.0 * np.array(ss)
47 | self.left = left
48 | self.right = right
49 | self.tmin, self.tmax = min(tt), max(tt)
50 |
51 | def __call__(self, t):
52 | """Interpolates ``t``.
53 |
54 | Parameters
55 | ----------
56 |
57 | t : float
58 | Time frame for which the correspondent value will be returned.
59 | """
60 | return np.interp(t, self.tt, self.ss, self.left, self.right)
61 |
62 |
63 | class Trajectory:
64 | """Trajectory compound by time frames and (x, y) pixels.
65 |
66 | It's designed as an interpolator, so you can get the position at a given
67 | time ``t``. You can instantiate it from a file using the methods
68 | ``from_file`` and ``load_list``.
69 |
70 |
71 | Parameters
72 | ----------
73 |
74 | tt : list or numpy.ndarray
75 | Time frames.
76 |
77 | xx : list or numpy.ndarray
78 | X positions in the trajectory.
79 |
80 | yy : list or numpy.ndarray
81 | Y positions in the trajectory.
82 |
83 |
84 | Examples
85 | --------
86 |
87 | >>> trajectory = Trajectory([0, .166, .333], [554, 474, 384], [100, 90, 91])
88 | """
89 |
90 | def __init__(self, tt, xx, yy):
91 |
92 | self.tt = 1.0 * np.array(tt)
93 | self.xx = np.array(xx)
94 | self.yy = np.array(yy)
95 | self.update_interpolators()
96 |
97 | def __call__(self, t):
98 | """Interpolates the trajectory at the given time ``t``.
99 |
100 | Parameters
101 | ----------
102 |
103 | t : float
104 | Time for which to the corresponding position will be returned.
105 | """
106 | return np.array([self.xi(t), self.yi(t)])
107 |
108 | def addx(self, x):
109 | """Adds a value to the ``xx`` position of the trajectory.
110 |
111 | Parameters
112 | ----------
113 |
114 | x : int
115 | Value added to ``xx`` in the trajectory.
116 |
117 |
118 | Returns
119 | -------
120 |
121 | Trajectory : new instance with the new X position included.
122 | """
123 | return Trajectory(self.tt, self.xx + x, self.yy)
124 |
125 | def addy(self, y):
126 | """Adds a value to the ``yy`` position of the trajectory.
127 |
128 | Parameters
129 | ----------
130 |
131 | y : int
132 | Value added to ``yy`` in the trajectory.
133 |
134 |
135 | Returns
136 | -------
137 |
138 | Trajectory : new instance with the new Y position included.
139 | """
140 | return Trajectory(self.tt, self.xx, self.yy + y)
141 |
142 | def update_interpolators(self):
143 | """Updates the internal X and Y position interpolators for the instance."""
144 | self.xi = Interpolator(self.tt, self.xx)
145 | self.yi = Interpolator(self.tt, self.yy)
146 |
147 | def txy(self, tms=False):
148 | """Returns all times with the X and Y values of each position.
149 |
150 | Parameters
151 | ----------
152 |
153 | tms : bool, optional
154 | If is ``True``, the time will be returned in milliseconds.
155 | """
156 | return zip((1000 if tms else 1) * self.tt, self.xx, self.yy)
157 |
158 | def to_file(self, filename):
159 | """Saves the trajectory data in a text file.
160 |
161 | Parameters
162 | ----------
163 |
164 | filename : str
165 | Path to the location of the new trajectory text file.
166 | """
167 | np.savetxt(
168 | filename,
169 | np.array(list(self.txy(tms=True))),
170 | fmt="%d",
171 | delimiter="\t",
172 | )
173 |
174 | @staticmethod
175 | def from_file(filename):
176 | """Instantiates an object of Trajectory using a data text file.
177 |
178 | Parameters
179 | ----------
180 |
181 | filename : str
182 | Path to the location of trajectory text file to load.
183 |
184 |
185 | Returns
186 | -------
187 |
188 | Trajectory : new instance loaded from text file.
189 | """
190 | arr = np.loadtxt(filename, delimiter="\t")
191 | tt, xx, yy = arr.T
192 | return Trajectory(1.0 * tt / 1000, xx, yy)
193 |
194 | @staticmethod
195 | def save_list(trajs, filename):
196 | """Saves a set of trajectories into a text file.
197 |
198 | Parameters
199 | ----------
200 |
201 | trajs : list
202 | List of trajectories to be saved.
203 |
204 | filename : str
205 | Path of the text file that will store the trajectories data.
206 | """
207 | N = len(trajs)
208 | arr = np.hstack([np.array(list(t.txy(tms=True))) for t in trajs])
209 | np.savetxt(
210 | filename,
211 | arr,
212 | fmt="%d",
213 | delimiter="\t",
214 | header="\t".join(N * ["t(ms)", "x", "y"]),
215 | )
216 |
217 | @staticmethod
218 | def load_list(filename):
219 | """Loads a list of trajectories from a data text file.
220 |
221 | Parameters
222 | ----------
223 |
224 | filename : str
225 | Path of the text file that stores the data of a set of trajectories.
226 |
227 |
228 | Returns
229 | -------
230 |
231 | list : List of trajectories loaded from the file.
232 | """
233 | arr = np.loadtxt(filename, delimiter="\t").T
234 | Nlines = arr.shape[0]
235 | return [
236 | Trajectory(tt=1.0 * a[0] / 1000, xx=a[1], yy=a[2])
237 | for a in np.split(arr, Nlines / 3)
238 | ]
239 |
--------------------------------------------------------------------------------
/moviepy/video/tools/subtitles.py:
--------------------------------------------------------------------------------
1 | """Experimental module for subtitles support."""
2 |
3 | import re
4 |
5 | import numpy as np
6 |
7 | from moviepy.decorators import convert_path_to_string
8 | from moviepy.tools import convert_to_seconds
9 | from moviepy.video.VideoClip import TextClip, VideoClip
10 |
11 |
12 | class SubtitlesClip(VideoClip):
13 | """A Clip that serves as "subtitle track" in videos.
14 |
15 | One particularity of this class is that the images of the
16 | subtitle texts are not generated beforehand, but only if
17 | needed.
18 |
19 | Parameters
20 | ----------
21 |
22 | subtitles
23 | Either the name of a file as a string or path-like object, or a list
24 |
25 | encoding
26 | Optional, specifies srt file encoding.
27 | Any standard Python encoding is allowed (listed at
28 | https://docs.python.org/3.8/library/codecs.html#standard-encodings)
29 |
30 | Examples
31 | --------
32 |
33 | >>> from moviepy.video.tools.subtitles import SubtitlesClip
34 | >>> from moviepy.video.io.VideoFileClip import VideoFileClip
35 | >>> generator = lambda text: TextClip(text, font='Georgia-Regular',
36 | ... font_size=24, color='white')
37 | >>> sub = SubtitlesClip("subtitles.srt", generator)
38 | >>> sub = SubtitlesClip("subtitles.srt", generator, encoding='utf-8')
39 | >>> myvideo = VideoFileClip("myvideo.avi")
40 | >>> final = CompositeVideoClip([clip, subtitles])
41 | >>> final.write_videofile("final.mp4", fps=myvideo.fps)
42 |
43 | """
44 |
45 | def __init__(self, subtitles, make_textclip=None, encoding=None):
46 |
47 | VideoClip.__init__(self, has_constant_size=False)
48 |
49 | if not isinstance(subtitles, list):
50 | # `subtitles` is a string or path-like object
51 | subtitles = file_to_subtitles(subtitles, encoding=encoding)
52 |
53 | # subtitles = [(map(convert_to_seconds, times), text)
54 | # for times, text in subtitles]
55 | self.subtitles = subtitles
56 | self.textclips = dict()
57 |
58 | if make_textclip is None:
59 |
60 | def make_textclip(txt):
61 | return TextClip(
62 | txt,
63 | font="Georgia-Bold",
64 | font_size=24,
65 | color="white",
66 | stroke_color="black",
67 | stroke_width=0.5,
68 | )
69 |
70 | self.make_textclip = make_textclip
71 | self.start = 0
72 | self.duration = max([tb for ((ta, tb), txt) in self.subtitles])
73 | self.end = self.duration
74 |
75 | def add_textclip_if_none(t):
76 | """Will generate a textclip if it hasn't been generated asked
77 | to generate it yet. If there is no subtitle to show at t, return
78 | false.
79 | """
80 | sub = [
81 | ((text_start, text_end), text)
82 | for ((text_start, text_end), text) in self.textclips.keys()
83 | if (text_start <= t < text_end)
84 | ]
85 | if not sub:
86 | sub = [
87 | ((text_start, text_end), text)
88 | for ((text_start, text_end), text) in self.subtitles
89 | if (text_start <= t < text_end)
90 | ]
91 | if not sub:
92 | return False
93 | sub = sub[0]
94 | if sub not in self.textclips.keys():
95 | self.textclips[sub] = self.make_textclip(sub[1])
96 |
97 | return sub
98 |
99 | def make_frame(t):
100 | sub = add_textclip_if_none(t)
101 | return self.textclips[sub].get_frame(t) if sub else np.array([[[0, 0, 0]]])
102 |
103 | def make_mask_frame(t):
104 | sub = add_textclip_if_none(t)
105 | return self.textclips[sub].mask.get_frame(t) if sub else np.array([[0]])
106 |
107 | self.make_frame = make_frame
108 | hasmask = bool(self.make_textclip("T").mask)
109 | self.mask = VideoClip(make_mask_frame, is_mask=True) if hasmask else None
110 |
111 | def in_subclip(self, start_time=None, end_time=None):
112 | """Returns a sequence of [(t1,t2), text] covering all the given subclip
113 | from start_time to end_time. The first and last times will be cropped so as
114 | to be exactly start_time and end_time if possible.
115 | """
116 |
117 | def is_in_subclip(t1, t2):
118 | try:
119 | return (start_time <= t1 < end_time) or (start_time < t2 <= end_time)
120 | except Exception:
121 | return False
122 |
123 | def try_cropping(t1, t2):
124 | try:
125 | return max(t1, start_time), min(t2, end_time)
126 | except Exception:
127 | return t1, t2
128 |
129 | return [
130 | (try_cropping(t1, t2), txt)
131 | for ((t1, t2), txt) in self.subtitles
132 | if is_in_subclip(t1, t2)
133 | ]
134 |
135 | def __iter__(self):
136 | return iter(self.subtitles)
137 |
138 | def __getitem__(self, k):
139 | return self.subtitles[k]
140 |
141 | def __str__(self):
142 | def to_srt(sub_element):
143 | (start_time, end_time), text = sub_element
144 | formatted_start_time = convert_to_seconds(start_time)
145 | formatted_end_time = convert_to_seconds(end_time)
146 | return "%s - %s\n%s" % (formatted_start_time, formatted_end_time, text)
147 |
148 | return "\n\n".join(to_srt(sub) for sub in self.subtitles)
149 |
150 | def match_expr(self, expr):
151 | """Matches a regular expression against the subtitles of the clip."""
152 | return SubtitlesClip(
153 | [sub for sub in self.subtitles if re.findall(expr, sub[1]) != []]
154 | )
155 |
156 | def write_srt(self, filename):
157 | """Writes an ``.srt`` file with the content of the clip."""
158 | with open(filename, "w+") as file:
159 | file.write(str(self))
160 |
161 |
162 | @convert_path_to_string("filename")
163 | def file_to_subtitles(filename, encoding=None):
164 | """Converts a srt file into subtitles.
165 |
166 | The returned list is of the form ``[((start_time,end_time),'some text'),...]``
167 | and can be fed to SubtitlesClip.
168 |
169 | Only works for '.srt' format for the moment.
170 | """
171 | times_texts = []
172 | current_times = None
173 | current_text = ""
174 | with open(filename, "r", encoding=encoding) as file:
175 | for line in file:
176 | times = re.findall("([0-9]*:[0-9]*:[0-9]*,[0-9]*)", line)
177 | if times:
178 | current_times = [convert_to_seconds(t) for t in times]
179 | elif line.strip() == "":
180 | times_texts.append((current_times, current_text.strip("\n")))
181 | current_times, current_text = None, ""
182 | elif current_times:
183 | current_text += line
184 | return times_texts
185 |
--------------------------------------------------------------------------------
/moviepy/audio/io/ffmpeg_audiowriter.py:
--------------------------------------------------------------------------------
1 | """MoviePy audio writing with ffmpeg."""
2 |
3 | import subprocess as sp
4 |
5 | import proglog
6 |
7 | from moviepy.config import FFMPEG_BINARY
8 | from moviepy.decorators import requires_duration
9 | from moviepy.tools import cross_platform_popen_params
10 |
11 |
12 | class FFMPEG_AudioWriter:
13 | """
14 | A class to write an AudioClip into an audio file.
15 |
16 | Parameters
17 | ----------
18 |
19 | filename
20 | Name of any video or audio file, like ``video.mp4`` or ``sound.wav`` etc.
21 |
22 | size
23 | Size (width,height) in pixels of the output video.
24 |
25 | fps_input
26 | Frames per second of the input audio (given by the AUdioClip being
27 | written down).
28 |
29 | codec
30 | Name of the ffmpeg codec to use for the output.
31 |
32 | bitrate:
33 | A string indicating the bitrate of the final video. Only
34 | relevant for codecs which accept a bitrate.
35 |
36 | """
37 |
38 | def __init__(
39 | self,
40 | filename,
41 | fps_input,
42 | nbytes=2,
43 | nchannels=2,
44 | codec="libfdk_aac",
45 | bitrate=None,
46 | input_video=None,
47 | logfile=None,
48 | ffmpeg_params=None,
49 | ):
50 | if logfile is None:
51 | logfile = sp.PIPE
52 | self.logfile = logfile
53 | self.filename = filename
54 | self.codec = codec
55 | self.ext = self.filename.split(".")[-1]
56 |
57 | # order is important
58 | cmd = [
59 | FFMPEG_BINARY,
60 | "-y",
61 | "-loglevel",
62 | "error" if logfile == sp.PIPE else "info",
63 | "-f",
64 | "s%dle" % (8 * nbytes),
65 | "-acodec",
66 | "pcm_s%dle" % (8 * nbytes),
67 | "-ar",
68 | "%d" % fps_input,
69 | "-ac",
70 | "%d" % nchannels,
71 | "-i",
72 | "-",
73 | ]
74 | if input_video is None:
75 | cmd.extend(["-vn"])
76 | else:
77 | cmd.extend(["-i", input_video, "-vcodec", "copy"])
78 |
79 | cmd.extend(["-acodec", codec] + ["-ar", "%d" % fps_input])
80 | cmd.extend(["-strict", "-2"]) # needed to support codec 'aac'
81 | if bitrate is not None:
82 | cmd.extend(["-ab", bitrate])
83 | if ffmpeg_params is not None:
84 | cmd.extend(ffmpeg_params)
85 | cmd.extend([filename])
86 |
87 | popen_params = cross_platform_popen_params(
88 | {"stdout": sp.DEVNULL, "stderr": logfile, "stdin": sp.PIPE}
89 | )
90 |
91 | self.proc = sp.Popen(cmd, **popen_params)
92 |
93 | def write_frames(self, frames_array):
94 | """TODO: add documentation"""
95 | try:
96 | self.proc.stdin.write(frames_array.tobytes())
97 | except IOError as err:
98 | _, ffmpeg_error = self.proc.communicate()
99 | if ffmpeg_error is not None:
100 | ffmpeg_error = ffmpeg_error.decode()
101 | else:
102 | # The error was redirected to a logfile with `write_logfile=True`,
103 | # so read the error from that file instead
104 | self.logfile.seek(0)
105 | ffmpeg_error = self.logfile.read()
106 |
107 | error = (
108 | f"{err}\n\nMoviePy error: FFMPEG encountered the following error while "
109 | f"writing file {self.filename}:\n\n {ffmpeg_error}"
110 | )
111 |
112 | if "Unknown encoder" in ffmpeg_error:
113 | error += (
114 | "\n\nThe audio export failed because FFMPEG didn't find the "
115 | f"specified codec for audio encoding {self.codec}. "
116 | "Please install this codec or change the codec when calling "
117 | "write_videofile or write_audiofile.\nFor instance for mp3:\n"
118 | " >>> write_videofile('myvid.mp4', audio_codec='libmp3lame')"
119 | )
120 |
121 | elif "incorrect codec parameters ?" in ffmpeg_error:
122 | error += (
123 | "\n\nThe audio export failed, possibly because the "
124 | f"codec specified for the video {self.codec} is not compatible"
125 | f" with the given extension {self.ext}. Please specify a "
126 | "valid 'codec' argument in write_audiofile or 'audio_codoc'"
127 | "argument in write_videofile. This would be "
128 | "'libmp3lame' for mp3, 'libvorbis' for ogg..."
129 | )
130 |
131 | elif "bitrate not specified" in ffmpeg_error:
132 | error += (
133 | "\n\nThe audio export failed, possibly because the "
134 | "bitrate you specified was too high or too low for "
135 | "the audio codec."
136 | )
137 |
138 | elif "Invalid encoder type" in ffmpeg_error:
139 | error += (
140 | "\n\nThe audio export failed because the codec "
141 | "or file extension you provided is not suitable for audio"
142 | )
143 |
144 | raise IOError(error)
145 |
146 | def close(self):
147 | """Closes the writer, terminating the subprocess if is still alive."""
148 | if hasattr(self, "proc") and self.proc:
149 | self.proc.stdin.close()
150 | self.proc.stdin = None
151 | if self.proc.stderr is not None:
152 | self.proc.stderr.close()
153 | self.proc.stderr = None
154 | # If this causes deadlocks, consider terminating instead.
155 | self.proc.wait()
156 | self.proc = None
157 |
158 | def __del__(self):
159 | # If the garbage collector comes, make sure the subprocess is terminated.
160 | self.close()
161 |
162 | # Support the Context Manager protocol, to ensure that resources are cleaned up.
163 |
164 | def __enter__(self):
165 | return self
166 |
167 | def __exit__(self, exc_type, exc_value, traceback):
168 | self.close()
169 |
170 |
171 | @requires_duration
172 | def ffmpeg_audiowrite(
173 | clip,
174 | filename,
175 | fps,
176 | nbytes,
177 | buffersize,
178 | codec="libvorbis",
179 | bitrate=None,
180 | write_logfile=False,
181 | ffmpeg_params=None,
182 | logger="bar",
183 | ):
184 | """
185 | A function that wraps the FFMPEG_AudioWriter to write an AudioClip
186 | to a file.
187 | """
188 | if write_logfile:
189 | logfile = open(filename + ".log", "w+")
190 | else:
191 | logfile = None
192 | logger = proglog.default_bar_logger(logger)
193 | logger(message="MoviePy - Writing audio in %s" % filename)
194 | writer = FFMPEG_AudioWriter(
195 | filename,
196 | fps,
197 | nbytes,
198 | clip.nchannels,
199 | codec=codec,
200 | bitrate=bitrate,
201 | logfile=logfile,
202 | ffmpeg_params=ffmpeg_params,
203 | )
204 |
205 | for chunk in clip.iter_chunks(
206 | chunksize=buffersize, quantize=True, nbytes=nbytes, fps=fps, logger=logger
207 | ):
208 | writer.write_frames(chunk)
209 |
210 | writer.close()
211 |
212 | if write_logfile:
213 | logfile.close()
214 | logger(message="MoviePy - Done.")
215 |
--------------------------------------------------------------------------------
/moviepy/video/compositing/CompositeVideoClip.py:
--------------------------------------------------------------------------------
1 | """Main video composition interface of MoviePy."""
2 |
3 | import numpy as np
4 | from PIL import Image
5 |
6 | from moviepy.audio.AudioClip import CompositeAudioClip
7 | from moviepy.video.VideoClip import ColorClip, VideoClip
8 |
9 |
10 | class CompositeVideoClip(VideoClip):
11 | """
12 | A VideoClip made of other videoclips displayed together. This is the
13 | base class for most compositions.
14 |
15 | Parameters
16 | ----------
17 |
18 | size
19 | The size (width, height) of the final clip.
20 |
21 | clips
22 | A list of videoclips.
23 |
24 | Clips with a higher ``layer`` attribute will be displayed
25 | on top of other clips in a lower layer.
26 | If two or more clips share the same ``layer``,
27 | then the one appearing latest in ``clips`` will be displayed
28 | on top (i.e. it has the higher layer).
29 |
30 | For each clip:
31 |
32 | - The attribute ``pos`` determines where the clip is placed.
33 | See ``VideoClip.set_pos``
34 | - The mask of the clip determines which parts are visible.
35 |
36 | Finally, if all the clips in the list have their ``duration``
37 | attribute set, then the duration of the composite video clip
38 | is computed automatically
39 |
40 | bg_color
41 | Color for the unmasked and unfilled regions. Set to None for these
42 | regions to be transparent (will be slower).
43 |
44 | use_bgclip
45 | Set to True if the first clip in the list should be used as the
46 | 'background' on which all other clips are blitted. That first clip must
47 | have the same size as the final clip. If it has no transparency, the final
48 | clip will have no mask.
49 |
50 | The clip with the highest FPS will be the FPS of the composite clip.
51 |
52 | """
53 |
54 | def __init__(
55 | self, clips, size=None, bg_color=None, use_bgclip=False, is_mask=False
56 | ):
57 |
58 | if size is None:
59 | size = clips[0].size
60 |
61 | if use_bgclip and (clips[0].mask is None):
62 | transparent = False
63 | else:
64 | transparent = bg_color is None
65 |
66 | if bg_color is None:
67 | bg_color = 0.0 if is_mask else (0, 0, 0)
68 |
69 | fpss = [clip.fps for clip in clips if getattr(clip, "fps", None)]
70 | self.fps = max(fpss) if fpss else None
71 |
72 | VideoClip.__init__(self)
73 |
74 | self.size = size
75 | self.is_mask = is_mask
76 | self.clips = clips
77 | self.bg_color = bg_color
78 |
79 | if use_bgclip:
80 | self.bg = clips[0]
81 | self.clips = clips[1:]
82 | self.created_bg = False
83 | else:
84 | self.clips = clips
85 | self.bg = ColorClip(size, color=self.bg_color, is_mask=is_mask)
86 | self.created_bg = True
87 |
88 | # order self.clips by layer
89 | self.clips = sorted(self.clips, key=lambda clip: clip.layer)
90 |
91 | # compute duration
92 | ends = [clip.end for clip in self.clips]
93 | if None not in ends:
94 | duration = max(ends)
95 | self.duration = duration
96 | self.end = duration
97 |
98 | # compute audio
99 | audioclips = [v.audio for v in self.clips if v.audio is not None]
100 | if audioclips:
101 | self.audio = CompositeAudioClip(audioclips)
102 |
103 | # compute mask if necessary
104 | if transparent:
105 | maskclips = [
106 | (clip.mask if (clip.mask is not None) else clip.add_mask().mask)
107 | .with_position(clip.pos)
108 | .with_end(clip.end)
109 | .with_start(clip.start, change_end=False)
110 | .with_layer(clip.layer)
111 | for clip in self.clips
112 | ]
113 |
114 | self.mask = CompositeVideoClip(
115 | maskclips, self.size, is_mask=True, bg_color=0.0
116 | )
117 |
118 | def make_frame(self, t):
119 | """The clips playing at time `t` are blitted over one another."""
120 | frame = self.bg.get_frame(t).astype("uint8")
121 | im = Image.fromarray(frame)
122 |
123 | if self.bg.mask is not None:
124 | frame_mask = self.bg.mask.get_frame(t)
125 | im_mask = Image.fromarray(255 * frame_mask).convert("L")
126 | im = im.putalpha(im_mask)
127 |
128 | for clip in self.playing_clips(t):
129 | im = clip.blit_on(im, t)
130 |
131 | return np.array(im)
132 |
133 | def playing_clips(self, t=0):
134 | """Returns a list of the clips in the composite clips that are
135 | actually playing at the given time `t`.
136 | """
137 | return [clip for clip in self.clips if clip.is_playing(t)]
138 |
139 | def close(self):
140 | """Closes the instance, releasing all the resources."""
141 | if self.created_bg and self.bg:
142 | # Only close the background clip if it was locally created.
143 | # Otherwise, it remains the job of whoever created it.
144 | self.bg.close()
145 | self.bg = None
146 | if hasattr(self, "audio") and self.audio:
147 | self.audio.close()
148 | self.audio = None
149 |
150 |
151 | def clips_array(array, rows_widths=None, cols_heights=None, bg_color=None):
152 | """Given a matrix whose rows are clips, creates a CompositeVideoClip where
153 | all clips are placed side by side horizontally for each clip in each row
154 | and one row on top of the other for each row. So given next matrix of clips
155 | with same size:
156 |
157 | ```python
158 | clips_array([[clip1, clip2, clip3], [clip4, clip5, clip6]])
159 | ```
160 |
161 | the result will be a CompositeVideoClip with a layout displayed like:
162 |
163 | ```
164 | ┏━━━━━━━┳━━━━━━━┳━━━━━━━┓
165 | ┃ ┃ ┃ ┃
166 | ┃ clip1 ┃ clip2 ┃ clip3 ┃
167 | ┃ ┃ ┃ ┃
168 | ┣━━━━━━━╋━━━━━━━╋━━━━━━━┫
169 | ┃ ┃ ┃ ┃
170 | ┃ clip4 ┃ clip5 ┃ clip6 ┃
171 | ┃ ┃ ┃ ┃
172 | ┗━━━━━━━┻━━━━━━━┻━━━━━━━┛
173 | ```
174 |
175 | If some clips doesn't fulfill the space required by the rows or columns
176 | in which are placed, that space will be filled by the color defined in
177 | ``bg_color``.
178 |
179 | array
180 | Matrix of clips included in the returned composited video clip.
181 |
182 | rows_widths
183 | Widths of the different rows in pixels. If ``None``, is set automatically.
184 |
185 | cols_heights
186 | Heights of the different columns in pixels. If ``None``, is set automatically.
187 |
188 | bg_color
189 | Fill color for the masked and unfilled regions. Set to ``None`` for these
190 | regions to be transparent (processing will be slower).
191 | """
192 | array = np.array(array)
193 | sizes_array = np.array([[clip.size for clip in line] for line in array])
194 |
195 | # find row width and col_widths automatically if not provided
196 | if rows_widths is None:
197 | rows_widths = sizes_array[:, :, 1].max(axis=1)
198 | if cols_heights is None:
199 | cols_heights = sizes_array[:, :, 0].max(axis=0)
200 |
201 | # compute start positions of X for rows and Y for columns
202 | xs = np.cumsum([0] + list(cols_heights))
203 | ys = np.cumsum([0] + list(rows_widths))
204 |
205 | for j, (x, ch) in enumerate(zip(xs[:-1], cols_heights)):
206 | for i, (y, rw) in enumerate(zip(ys[:-1], rows_widths)):
207 | clip = array[i, j]
208 | w, h = clip.size
209 | # if clip not fulfill row width or column height
210 | if (w < ch) or (h < rw):
211 | clip = CompositeVideoClip(
212 | [clip.with_position("center")], size=(ch, rw), bg_color=bg_color
213 | ).with_duration(clip.duration)
214 |
215 | array[i, j] = clip.with_position((x, y))
216 |
217 | return CompositeVideoClip(array.flatten(), size=(xs[-1], ys[-1]), bg_color=bg_color)
218 |
--------------------------------------------------------------------------------
/moviepy/video/tools/tracking.py:
--------------------------------------------------------------------------------
1 | """
2 | Contains different functions for tracking objects in videos, manually or automatically.
3 | The tracking functions return results under the form: ``( txy, (fx,fy) )`` where txy
4 | is of the form [(ti, xi, yi)...] and (fx(t),fy(t)) give the position of the track for
5 | all times t (if the time t is out of the time bounds of the tracking time interval fx
6 | and fy return the position of the object at the start or at the end of the tracking time
7 | interval).
8 | """
9 |
10 | import numpy as np
11 |
12 | from moviepy.decorators import convert_parameter_to_seconds, use_clip_fps_by_default
13 | from moviepy.video.io.preview import imdisplay
14 | from moviepy.video.tools.interpolators import Trajectory
15 |
16 |
17 | try:
18 | import cv2
19 |
20 | autotracking_possible = True
21 | except Exception:
22 | # Note: this will be later fixed with scipy/skimage replacements
23 | # but for the moment OpenCV is mandatory, so...
24 | autotracking_possible = False
25 |
26 |
27 | @convert_parameter_to_seconds(["t1", "t2"])
28 | @use_clip_fps_by_default
29 | def manual_tracking(clip, t1=None, t2=None, fps=None, n_objects=1, savefile=None):
30 | """Manual tracking of objects in videoclips using the mouse.
31 |
32 | Allows manual tracking of an object(s) in the video clip between
33 | times `t1` and `t2`. This displays the clip frame by frame
34 | and you must click on the object(s) in each frame. If ``t2=None``
35 | only the frame at ``t1`` is taken into account.
36 |
37 | Returns a list ``[(t1, x1, y1), (t2, x2, y2)...]`` if there is one
38 | object per frame, else returns a list whose elements are of the
39 | form ``(ti, [(xi1, yi1), (xi2, yi2)...])``.
40 |
41 |
42 | Parameters
43 | ----------
44 |
45 | clip : video.VideoClip.VideoClip
46 | MoviePy video clip to track.
47 |
48 | t1 : float or str or tuple, optional
49 | Start time to to track (defaults is start of the clip). Can be expressed
50 | in seconds like ``15.35``, in ``(min, sec)``, in ``(hour, min, sec)``,
51 | or as a string: ``"01:03:05.35"``.
52 |
53 | t2 : float or str or tuple, optional
54 | End time to to track (defaults is end of the clip). Can be expressed
55 | in seconds like ``15.35``, in ``(min, sec)``, in ``(hour, min, sec)``,
56 | or as a string: ``"01:03:05.35"``.
57 |
58 | fps : int, optional
59 | Number of frames per second to freeze on. If None, the clip's
60 | fps attribute is used instead.
61 |
62 | n_objects : int, optional
63 | Number of objects to click on each frame.
64 |
65 | savefile : str, optional
66 | If provided, the result is saved to a file, which makes it easier to edit
67 | and re-use later.
68 |
69 |
70 | Examples
71 | --------
72 |
73 | >>> from moviepy import VideoFileClip
74 | >>> from moviepy.video.tools.tracking import manual_tracking
75 | >>>
76 | >>> clip = VideoFileClip("media/chaplin.mp4")
77 | >>>
78 | >>> # manually indicate 3 trajectories, save them to a file
79 | >>> trajectories = manual_tracking(clip, start_time=5, t2=7, fps=5,
80 | ... nobjects=3, savefile="track.text")
81 | >>>
82 | >>> # ...
83 | >>> # later, in another script, recover these trajectories
84 | >>> from moviepy.video.tools.tracking import Trajectory
85 | >>>
86 | >>> traj1, traj2, traj3 = Trajectory.load_list('track.text')
87 | >>>
88 | >>> # If ever you only have one object being tracked, recover it with
89 | >>> traj, = Trajectory.load_list('track.text')
90 | """
91 | import pygame as pg
92 |
93 | screen = pg.display.set_mode(clip.size)
94 | step = 1.0 / fps
95 | if (t1 is None) and (t2 is None):
96 | t1, t2 = 0, clip.duration
97 | elif t2 is None:
98 | t2 = t1 + step / 2
99 | t = t1
100 | txy_list = []
101 |
102 | def gatherClicks(t):
103 |
104 | imdisplay(clip.get_frame(t), screen)
105 | objects_to_click = n_objects
106 | clicks = []
107 | while objects_to_click:
108 |
109 | for event in pg.event.get():
110 |
111 | if event.type == pg.KEYDOWN:
112 | if event.key == pg.K_BACKSLASH:
113 | return "return"
114 | elif event.key == pg.K_ESCAPE:
115 | raise KeyboardInterrupt()
116 |
117 | elif event.type == pg.MOUSEBUTTONDOWN:
118 | x, y = pg.mouse.get_pos()
119 | clicks.append((x, y))
120 | objects_to_click -= 1
121 |
122 | return clicks
123 |
124 | while t < t2:
125 |
126 | clicks = gatherClicks(t)
127 | if clicks == "return":
128 | txy_list.pop()
129 | t -= step
130 | else:
131 | txy_list.append((t, clicks))
132 | t += step
133 |
134 | tt, xylist = zip(*txy_list)
135 | result = []
136 | for i in range(n_objects):
137 | xys = [e[i] for e in xylist]
138 | xx, yy = zip(*xys)
139 | result.append(Trajectory(tt, xx, yy))
140 |
141 | if savefile is not None:
142 | Trajectory.save_list(result, savefile)
143 | return result
144 |
145 |
146 | def findAround(pic, pat, xy=None, r=None):
147 | """Find an image pattern in a picture optionally defining bounds to search.
148 |
149 | The image is found is ``pat`` is inside ``pic[x +/- r, y +/- r]``.
150 |
151 | Parameters
152 | ----------
153 |
154 | pic : numpy.ndarray
155 | Image where the pattern will be searched.
156 |
157 | pat : numpy.ndarray
158 | Pattern to search inside the image.
159 |
160 | xy : tuple or list, optional
161 | Position to search for the pattern. Use it in combination with ``radius``
162 | parameter to define the bounds of the search. If is ``None``, consider
163 | the whole picture.
164 |
165 | r : float, optional
166 | Radius used to define the bounds of the search when ``xy`` argument is
167 | defined.
168 | """
169 | if xy and r:
170 | h, w = pat.shape[:2]
171 | x, y = xy
172 | pic = pic[y - r : y + h + r, x - r : x + w + r]
173 |
174 | matches = cv2.matchTemplate(pat, pic, cv2.TM_CCOEFF_NORMED)
175 | yf, xf = np.unravel_index(matches.argmax(), matches.shape)
176 | return (x - r + xf, y - r + yf) if (xy and r) else (xf, yf)
177 |
178 |
179 | def autoTrack(clip, pattern, tt=None, fps=None, radius=20, xy0=None):
180 | """Tracks a given pattern (small image array) in a video clip.
181 |
182 | Returns ``[(x1, y1), (x2, y2)...]`` where ``(xi, yi)`` are the coordinates
183 | of the pattern in the clip on frame ``i``. To select the frames you can
184 | either specify a list of times with ``tt`` or select a frame rate with
185 | ``fps``.
186 |
187 | This algorithm assumes that the pattern's aspect does not vary much and
188 | that the distance between two occurrences of the pattern in two consecutive
189 | frames is smaller than ``radius`` (if you set ``radius`` to -1 the pattern
190 | will be searched in the whole screen at each frame). You can also provide
191 | the original position of the pattern with xy0.
192 |
193 | Parameters
194 | ----------
195 |
196 | clip : video.VideoClip.VideoClip
197 | MoviePy video clip to track.
198 |
199 | pattern : numpy.ndarray
200 | Image to search inside the clip frames.
201 |
202 | tt : numpy.ndarray, optional
203 | Time frames used for auto tracking. As default is used the clip time
204 | frames according to its fps.
205 |
206 | fps : int, optional
207 | Overwrites fps value used computing time frames. As default, clip's fps.
208 |
209 | radius : int, optional
210 | Maximum radius to search looking for the pattern. Set to ``-1``,
211 | the pattern will be searched in the whole screen at each frame.
212 |
213 | xy0 : tuple or list, optional
214 | Original position of the pattern. If not provided, will be taken from the
215 | first tracked frame of the clip.
216 | """
217 | if not autotracking_possible:
218 | raise IOError(
219 | "Sorry, autotrack requires OpenCV for the moment. "
220 | "Install OpenCV (aka cv2) to use it."
221 | )
222 |
223 | if not xy0:
224 | xy0 = findAround(clip.get_frame(tt[0]), pattern)
225 |
226 | if tt is None:
227 | tt = np.arange(0, clip.duration, 1.0 / fps)
228 |
229 | xys = [xy0]
230 | for t in tt[1:]:
231 | xys.append(findAround(clip.get_frame(t), pattern, xy=xys[-1], r=radius))
232 |
233 | xx, yy = zip(*xys)
234 |
235 | return Trajectory(tt, xx, yy)
236 |
--------------------------------------------------------------------------------
/moviepy/video/fx/resize.py:
--------------------------------------------------------------------------------
1 | import numbers
2 |
3 |
4 | def _get_cv2_resizer():
5 | try:
6 | import cv2
7 | except ImportError:
8 | return (None, ["OpenCV not found (install 'opencv-python')"])
9 |
10 | def resizer(pic, new_size):
11 | lx, ly = int(new_size[0]), int(new_size[1])
12 | if lx > pic.shape[1] or ly > pic.shape[0]:
13 | # For upsizing use linear for good quality & decent speed
14 | interpolation = cv2.INTER_LINEAR
15 | else:
16 | # For dowsizing use area to prevent aliasing
17 | interpolation = cv2.INTER_AREA
18 | return cv2.resize(+pic.astype("uint8"), (lx, ly), interpolation=interpolation)
19 |
20 | return (resizer, [])
21 |
22 |
23 | def _get_PIL_resizer():
24 | try:
25 | from PIL import Image
26 | except ImportError:
27 | return (None, ["PIL not found (install 'Pillow')"])
28 |
29 | import numpy as np
30 |
31 | def resizer(pic, new_size):
32 | new_size = list(map(int, new_size))[::-1]
33 | # shape = pic.shape
34 | # if len(shape) == 3:
35 | # newshape = (new_size[0], new_size[1], shape[2])
36 | # else:
37 | # newshape = (new_size[0], new_size[1])
38 |
39 | pil_img = Image.fromarray(pic)
40 | resized_pil = pil_img.resize(new_size[::-1], Image.ANTIALIAS)
41 | # arr = np.fromstring(resized_pil.tostring(), dtype="uint8")
42 | # arr.reshape(newshape)
43 | return np.array(resized_pil)
44 |
45 | return (resizer, [])
46 |
47 |
48 | def _get_scipy_resizer():
49 | try:
50 | from scipy.misc import imresize
51 | except ImportError:
52 | try:
53 | from scipy import __version__ as __scipy_version__
54 | except ImportError:
55 | return (None, ["Scipy not found (install 'scipy' or 'Pillow')"])
56 |
57 | scipy_version_info = tuple(
58 | int(num) for num in __scipy_version__.split(".") if num.isdigit()
59 | )
60 |
61 | # ``scipy.misc.imresize`` was removed in v1.3.0
62 | if scipy_version_info >= (1, 3, 0):
63 | return (
64 | None,
65 | [
66 | "scipy.misc.imresize not found (was removed in scipy v1.3.0,"
67 | f" you are using v{__scipy_version__}, install 'Pillow')"
68 | ],
69 | )
70 |
71 | # unknown reason
72 | return (None, "scipy.misc.imresize not found")
73 |
74 | def resizer(pic, new_size):
75 | return imresize(pic, map(int, new_size[::-1]))
76 |
77 | return (resizer, [])
78 |
79 |
80 | def _get_resizer():
81 | """Tries to define a ``resizer`` function using next libraries, in the given
82 | order:
83 |
84 | - cv2
85 | - PIL
86 | - scipy
87 |
88 | Returns a dictionary with following attributes:
89 |
90 | - ``resizer``: Function used to resize images in ``resize`` FX function.
91 | - ``origin``: Library used to resize.
92 | - ``error_msgs``: If any of the libraries is available, shows the user why
93 | this feature is not available and how to fix it in several error messages
94 | which are formatted in the error displayed, if resizing is not possible.
95 | """
96 | error_messages = []
97 |
98 | resizer_getters = {
99 | "cv2": _get_cv2_resizer,
100 | "PIL": _get_PIL_resizer,
101 | "scipy": _get_scipy_resizer,
102 | }
103 | for origin, resizer_getter in resizer_getters.items():
104 | resizer, _error_messages = resizer_getter()
105 | if resizer is not None:
106 | return {"resizer": resizer, "origin": origin, "error_msgs": []}
107 | else:
108 | error_messages.extend(_error_messages)
109 |
110 | return {"resizer": None, "origin": None, "error_msgs": reversed(error_messages)}
111 |
112 |
113 | resizer = None
114 | _resizer_data = _get_resizer()
115 | if _resizer_data["resizer"] is not None:
116 | resizer = _resizer_data["resizer"]
117 | resizer.origin = _resizer_data["origin"]
118 | del _resizer_data["error_msgs"]
119 |
120 |
121 | def resize(clip, new_size=None, height=None, width=None, apply_to_mask=True):
122 | """Returns a video clip that is a resized version of the clip.
123 |
124 | Parameters
125 | ----------
126 |
127 | new_size : tuple or float or function, optional
128 | Can be either
129 | - ``(width, height)`` in pixels or a float representing
130 | - A scaling factor, like ``0.5``.
131 | - A function of time returning one of these.
132 |
133 | width : int, optional
134 | Width of the new clip in pixels. The height is then computed so
135 | that the width/height ratio is conserved.
136 |
137 | height : int, optional
138 | Height of the new clip in pixels. The width is then computed so
139 | that the width/height ratio is conserved.
140 |
141 | Examples
142 | --------
143 |
144 | >>> myClip.resize( (460,720) ) # New resolution: (460,720)
145 | >>> myClip.resize(0.6) # width and height multiplied by 0.6
146 | >>> myClip.resize(width=800) # height computed automatically.
147 | >>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
148 | """
149 | w, h = clip.size
150 |
151 | if new_size is not None:
152 |
153 | def translate_new_size(new_size_):
154 | """Returns a [w, h] pair from `new_size_`. If `new_size_` is a
155 | scalar, then work out the correct pair using the clip's size.
156 | Otherwise just return `new_size_`
157 | """
158 | if isinstance(new_size_, numbers.Number):
159 | return [new_size_ * w, new_size_ * h]
160 | else:
161 | return new_size_
162 |
163 | if hasattr(new_size, "__call__"):
164 | # The resizing is a function of time
165 |
166 | def get_new_size(t):
167 | return translate_new_size(new_size(t))
168 |
169 | if clip.is_mask:
170 |
171 | def filter(get_frame, t):
172 | return (
173 | resizer((255 * get_frame(t)).astype("uint8"), get_new_size(t))
174 | / 255.0
175 | )
176 |
177 | else:
178 |
179 | def filter(get_frame, t):
180 | return resizer(get_frame(t).astype("uint8"), get_new_size(t))
181 |
182 | newclip = clip.transform(
183 | filter, keep_duration=True, apply_to=(["mask"] if apply_to_mask else [])
184 | )
185 | if apply_to_mask and clip.mask is not None:
186 | newclip.mask = resize(clip.mask, new_size, apply_to_mask=False)
187 |
188 | return newclip
189 |
190 | else:
191 | new_size = translate_new_size(new_size)
192 |
193 | elif height is not None:
194 |
195 | if hasattr(height, "__call__"):
196 |
197 | def func(t):
198 | return 1.0 * int(height(t)) / h
199 |
200 | return resize(clip, func)
201 |
202 | else:
203 | new_size = [w * height / h, height]
204 |
205 | elif width is not None:
206 |
207 | if hasattr(width, "__call__"):
208 |
209 | def func(t):
210 | return 1.0 * width(t) / w
211 |
212 | return resize(clip, func)
213 |
214 | else:
215 | new_size = [width, h * width / w]
216 | else:
217 | raise ValueError("You must provide either 'new_size' or 'height' or 'width'")
218 |
219 | # From here, the resizing is constant (not a function of time), size=newsize
220 |
221 | if clip.is_mask:
222 |
223 | def image_filter(pic):
224 | return 1.0 * resizer((255 * pic).astype("uint8"), new_size) / 255.0
225 |
226 | else:
227 |
228 | def image_filter(pic):
229 | return resizer(pic.astype("uint8"), new_size)
230 |
231 | new_clip = clip.image_transform(image_filter)
232 |
233 | if apply_to_mask and clip.mask is not None:
234 | new_clip.mask = resize(clip.mask, new_size, apply_to_mask=False)
235 |
236 | return new_clip
237 |
238 |
239 | if resizer is None:
240 | del resizer
241 |
242 | doc = resize.__doc__
243 |
244 | def resize(clip, new_size=None, height=None, width=None):
245 | """Fallback resize FX function, if OpenCV, Scipy and PIL are not installed.
246 |
247 | This docstring will be replaced at runtime.
248 | """
249 | fix_tips = "- " + "\n- ".join(_resizer_data["error_msgs"])
250 | raise ImportError(f"fx resize needs OpenCV or Scipy or PIL\n{fix_tips}")
251 |
252 | resize.__doc__ = doc
253 |
254 | del _resizer_data["origin"], _resizer_data["resizer"]
255 |
--------------------------------------------------------------------------------
/moviepy/video/io/html_tools.py:
--------------------------------------------------------------------------------
1 | """Implements ``ipython_display``, a function to embed images/videos/audio in the
2 | IPython Notebook.
3 | """
4 |
5 | # Notes:
6 | # All media are physically embedded in the IPython Notebook
7 | # (instead of simple links to the original files)
8 | # That is because most browsers use a cache system and they won't
9 | # properly refresh the media when the original files are changed.
10 |
11 | import inspect
12 | import os
13 | from base64 import b64encode
14 |
15 | from moviepy.audio.AudioClip import AudioClip
16 | from moviepy.tools import extensions_dict
17 | from moviepy.video.io.ffmpeg_reader import ffmpeg_parse_infos
18 | from moviepy.video.VideoClip import ImageClip, VideoClip
19 |
20 |
21 | try: # pragma: no cover
22 | from IPython.display import HTML
23 |
24 | ipython_available = True
25 |
26 | class HTML2(HTML): # noqa D101
27 | def __add__(self, other):
28 | return HTML2(self.data + other.data)
29 |
30 |
31 | except ImportError:
32 |
33 | def HTML2(content): # noqa D103
34 | return content
35 |
36 | ipython_available = False
37 |
38 |
39 | sorry = "Sorry, seems like your browser doesn't support HTML5 audio/video"
40 | templates = {
41 | "audio": (
42 | ""
46 | ),
47 | "image": "",
48 | "video": (
49 | ""
51 | ),
52 | }
53 |
54 |
55 | def html_embed(
56 | clip, filetype=None, maxduration=60, rd_kwargs=None, center=True, **html_kwargs
57 | ):
58 | """Returns HTML5 code embedding the clip.
59 |
60 | Parameters
61 | ----------
62 |
63 | clip : moviepy.Clip.Clip
64 | Either a file name, or a clip to preview.
65 | Either an image, a sound or a video. Clips will actually be
66 | written to a file and embedded as if a filename was provided.
67 |
68 | filetype : str, optional
69 | One of 'video','image','audio'. If None is given, it is determined
70 | based on the extension of ``filename``, but this can bug.
71 |
72 | maxduration : float, optional
73 | An error will be raised if the clip's duration is more than the indicated
74 | value (in seconds), to avoid spoiling the browser's cache and the RAM.
75 |
76 | rd_kwargs : dict, optional
77 | Keyword arguments for the rendering, like ``dict(fps=15, bitrate="50k")``.
78 | Allow you to give some options to the render process. You can, for
79 | example, disable the logger bar passing ``dict(logger=None)``.
80 |
81 | center : bool, optional
82 | If true (default), the content will be wrapped in a
83 | ``
`` HTML container, so the content will be displayed
84 | at the center.
85 |
86 | html_kwargs
87 | Allow you to give some options, like ``width=260``, ``autoplay=True``,
88 | ``loop=1`` etc.
89 |
90 | Examples
91 | --------
92 |
93 | >>> from moviepy.editor import *
94 | >>> # later ...
95 | >>> html_embed(clip, width=360)
96 | >>> html_embed(clip.audio)
97 |
98 | >>> clip.write_gif("test.gif")
99 | >>> html_embed('test.gif')
100 |
101 | >>> clip.save_frame("first_frame.jpeg")
102 | >>> html_embed("first_frame.jpeg")
103 | """
104 | if rd_kwargs is None: # pragma: no cover
105 | rd_kwargs = {}
106 |
107 | if "Clip" in str(clip.__class__):
108 | TEMP_PREFIX = "__temp__"
109 | if isinstance(clip, ImageClip):
110 | filename = TEMP_PREFIX + ".png"
111 | kwargs = {"filename": filename, "with_mask": True}
112 | argnames = inspect.getfullargspec(clip.save_frame).args
113 | kwargs.update(
114 | {key: value for key, value in rd_kwargs.items() if key in argnames}
115 | )
116 | clip.save_frame(**kwargs)
117 | elif isinstance(clip, VideoClip):
118 | filename = TEMP_PREFIX + ".mp4"
119 | kwargs = {"filename": filename, "preset": "ultrafast"}
120 | kwargs.update(rd_kwargs)
121 | clip.write_videofile(**kwargs)
122 | elif isinstance(clip, AudioClip):
123 | filename = TEMP_PREFIX + ".mp3"
124 | kwargs = {"filename": filename}
125 | kwargs.update(rd_kwargs)
126 | clip.write_audiofile(**kwargs)
127 | else:
128 | raise ValueError("Unknown class for the clip. Cannot embed and preview.")
129 |
130 | return html_embed(
131 | filename,
132 | maxduration=maxduration,
133 | rd_kwargs=rd_kwargs,
134 | center=center,
135 | **html_kwargs,
136 | )
137 |
138 | filename = clip
139 | options = " ".join(["%s='%s'" % (str(k), str(v)) for k, v in html_kwargs.items()])
140 | name, ext = os.path.splitext(filename)
141 | ext = ext[1:]
142 |
143 | if filetype is None:
144 | ext = filename.split(".")[-1].lower()
145 | if ext == "gif":
146 | filetype = "image"
147 | elif ext in extensions_dict:
148 | filetype = extensions_dict[ext]["type"]
149 | else:
150 | raise ValueError(
151 | "No file type is known for the provided file. Please provide "
152 | "argument `filetype` (one of 'image', 'video', 'sound') to the "
153 | "ipython display function."
154 | )
155 |
156 | if filetype == "video":
157 | # The next lines set the HTML5-cvompatible extension and check that the
158 | # extension is HTML5-valid
159 | exts_htmltype = {"mp4": "mp4", "webm": "webm", "ogv": "ogg"}
160 | allowed_exts = " ".join(exts_htmltype.keys())
161 | try:
162 | ext = exts_htmltype[ext]
163 | except Exception:
164 | raise ValueError(
165 | "This video extension cannot be displayed in the "
166 | "IPython Notebook. Allowed extensions: " + allowed_exts
167 | )
168 |
169 | if filetype in ["audio", "video"]:
170 | duration = ffmpeg_parse_infos(filename, decode_file=True)["duration"]
171 | if duration > maxduration:
172 | raise ValueError(
173 | (
174 | "The duration of video %s (%.1f) exceeds the 'maxduration'"
175 | " attribute. You can increase 'maxduration', by passing"
176 | " 'maxduration' parameter to ipython_display function."
177 | " But note that embedding large videos may take all the memory"
178 | " away!"
179 | )
180 | % (filename, duration)
181 | )
182 |
183 | with open(filename, "rb") as file:
184 | data = b64encode(file.read()).decode("utf-8")
185 |
186 | template = templates[filetype]
187 |
188 | result = template % {"data": data, "options": options, "ext": ext}
189 | if center:
190 | result = r"
%s
" % result
191 |
192 | return result
193 |
194 |
195 | def ipython_display(
196 | clip,
197 | filetype=None,
198 | maxduration=60,
199 | t=None,
200 | fps=None,
201 | rd_kwargs=None,
202 | center=True,
203 | **html_kwargs,
204 | ):
205 | """Displays clip content in an IPython Notebook.
206 |
207 | Remarks: If your browser doesn't support HTML5, this should warn you.
208 | If nothing is displayed, maybe your file or filename is wrong.
209 | Important: The media will be physically embedded in the notebook.
210 |
211 | Parameters
212 | ----------
213 |
214 | clip : moviepy.Clip.Clip
215 | Either the name of a file, or a clip to preview. The clip will actually
216 | be written to a file and embedded as if a filename was provided.
217 |
218 | filetype : str, optional
219 | One of ``"video"``, ``"image"`` or ``"audio"``. If None is given, it is
220 | determined based on the extension of ``filename``, but this can bug.
221 |
222 | maxduration : float, optional
223 | An error will be raised if the clip's duration is more than the indicated
224 | value (in seconds), to avoid spoiling the browser's cache and the RAM.
225 |
226 | t : float, optional
227 | If not None, only the frame at time t will be displayed in the notebook,
228 | instead of a video of the clip.
229 |
230 | fps : int, optional
231 | Enables to specify an fps, as required for clips whose fps is unknown.
232 |
233 | rd_kwargs : dict, optional
234 | Keyword arguments for the rendering, like ``dict(fps=15, bitrate="50k")``.
235 | Allow you to give some options to the render process. You can, for
236 | example, disable the logger bar passing ``dict(logger=None)``.
237 |
238 | center : bool, optional
239 | If true (default), the content will be wrapped in a
240 | ``
`` HTML container, so the content will be displayed
241 | at the center.
242 |
243 | kwargs
244 | Allow you to give some options, like ``width=260``, etc. When editing
245 | looping gifs, a good choice is ``loop=1, autoplay=1``.
246 |
247 | Examples
248 | --------
249 |
250 | >>> from moviepy.editor import *
251 | >>> # later ...
252 | >>> clip.ipython_display(width=360)
253 | >>> clip.audio.ipython_display()
254 |
255 | >>> clip.write_gif("test.gif")
256 | >>> ipython_display('test.gif')
257 |
258 | >>> clip.save_frame("first_frame.jpeg")
259 | >>> ipython_display("first_frame.jpeg")
260 | """
261 | if not ipython_available:
262 | raise ImportError("Only works inside an IPython Notebook")
263 |
264 | if rd_kwargs is None:
265 | rd_kwargs = {}
266 |
267 | if fps is not None:
268 | rd_kwargs["fps"] = fps
269 |
270 | if t is not None:
271 | clip = clip.to_ImageClip(t)
272 |
273 | return HTML2(
274 | html_embed(
275 | clip,
276 | filetype=filetype,
277 | maxduration=maxduration,
278 | center=center,
279 | rd_kwargs=rd_kwargs,
280 | **html_kwargs,
281 | )
282 | )
283 |
--------------------------------------------------------------------------------
/moviepy/audio/io/readers.py:
--------------------------------------------------------------------------------
1 | """MoviePy audio reading with ffmpeg."""
2 |
3 | import subprocess as sp
4 | import warnings
5 |
6 | import numpy as np
7 |
8 | from moviepy.config import FFMPEG_BINARY
9 | from moviepy.tools import cross_platform_popen_params
10 | from moviepy.video.io.ffmpeg_reader import ffmpeg_parse_infos
11 |
12 |
13 | class FFMPEG_AudioReader:
14 | """
15 | A class to read the audio in either video files or audio files
16 | using ffmpeg. ffmpeg will read any audio and transform them into
17 | raw data.
18 |
19 | Parameters
20 | ----------
21 |
22 | filename
23 | Name of any video or audio file, like ``video.mp4`` or
24 | ``sound.wav`` etc.
25 |
26 | buffersize
27 | The size of the buffer to use. Should be bigger than the buffer
28 | used by ``write_audiofile``
29 |
30 | print_infos
31 | Print the ffmpeg infos on the file being read (for debugging)
32 |
33 | fps
34 | Desired frames per second in the decoded signal that will be
35 | received from ffmpeg
36 |
37 | nbytes
38 | Desired number of bytes (1,2,4) in the signal that will be
39 | received from ffmpeg
40 |
41 | """
42 |
43 | def __init__(
44 | self,
45 | filename,
46 | buffersize,
47 | decode_file=False,
48 | print_infos=False,
49 | fps=44100,
50 | nbytes=2,
51 | nchannels=2,
52 | ):
53 | # TODO bring FFMPEG_AudioReader more in line with FFMPEG_VideoReader
54 | # E.g. here self.pos is still 1-indexed.
55 | # (or have them inherit from a shared parent class)
56 | self.filename = filename
57 | self.nbytes = nbytes
58 | self.fps = fps
59 | self.format = "s%dle" % (8 * nbytes)
60 | self.codec = "pcm_s%dle" % (8 * nbytes)
61 | self.nchannels = nchannels
62 | infos = ffmpeg_parse_infos(filename, decode_file=decode_file)
63 | self.duration = infos["duration"]
64 | self.bitrate = infos["audio_bitrate"]
65 | self.infos = infos
66 | self.proc = None
67 |
68 | self.n_frames = int(self.fps * self.duration)
69 | self.buffersize = min(self.n_frames + 1, buffersize)
70 | self.buffer = None
71 | self.buffer_startframe = 1
72 | self.initialize()
73 | self.buffer_around(1)
74 |
75 | def initialize(self, start_time=0):
76 | """Opens the file, creates the pipe."""
77 | self.close() # if any
78 |
79 | if start_time != 0:
80 | offset = min(1, start_time)
81 | i_arg = [
82 | "-ss",
83 | "%.05f" % (start_time - offset),
84 | "-i",
85 | self.filename,
86 | "-vn",
87 | "-ss",
88 | "%.05f" % offset,
89 | ]
90 | else:
91 | i_arg = ["-i", self.filename, "-vn"]
92 |
93 | cmd = (
94 | [FFMPEG_BINARY]
95 | + i_arg
96 | + [
97 | "-loglevel",
98 | "error",
99 | "-f",
100 | self.format,
101 | "-acodec",
102 | self.codec,
103 | "-ar",
104 | "%d" % self.fps,
105 | "-ac",
106 | "%d" % self.nchannels,
107 | "-",
108 | ]
109 | )
110 |
111 | popen_params = cross_platform_popen_params(
112 | {
113 | "bufsize": self.buffersize,
114 | "stdout": sp.PIPE,
115 | "stderr": sp.PIPE,
116 | "stdin": sp.DEVNULL,
117 | }
118 | )
119 |
120 | self.proc = sp.Popen(cmd, **popen_params)
121 |
122 | self.pos = np.round(self.fps * start_time)
123 |
124 | def skip_chunk(self, chunksize):
125 | """TODO: add documentation"""
126 | _ = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes)
127 | self.proc.stdout.flush()
128 | self.pos = self.pos + chunksize
129 |
130 | def read_chunk(self, chunksize):
131 | """TODO: add documentation"""
132 | # chunksize is not being autoconverted from float to int
133 | chunksize = int(round(chunksize))
134 | s = self.proc.stdout.read(self.nchannels * chunksize * self.nbytes)
135 | data_type = {1: "int8", 2: "int16", 4: "int32"}[self.nbytes]
136 | if hasattr(np, "frombuffer"):
137 | result = np.frombuffer(s, dtype=data_type)
138 | else:
139 | result = np.fromstring(s, dtype=data_type)
140 | result = (1.0 * result / 2 ** (8 * self.nbytes - 1)).reshape(
141 | (int(len(result) / self.nchannels), self.nchannels)
142 | )
143 |
144 | # Pad the read chunk with zeros when there isn't enough audio
145 | # left to read, so the buffer is always at full length.
146 | pad = np.zeros((chunksize - len(result), self.nchannels), dtype=result.dtype)
147 | result = np.concatenate([result, pad])
148 | # self.proc.stdout.flush()
149 | self.pos = self.pos + chunksize
150 | return result
151 |
152 | def seek(self, pos):
153 | """
154 | Reads a frame at time t. Note for coders: getting an arbitrary
155 | frame in the video with ffmpeg can be painfully slow if some
156 | decoding has to be done. This function tries to avoid fectching
157 | arbitrary frames whenever possible, by moving between adjacent
158 | frames.
159 | """
160 | if (pos < self.pos) or (pos > (self.pos + 1000000)):
161 | t = 1.0 * pos / self.fps
162 | self.initialize(t)
163 | elif pos > self.pos:
164 | # print pos
165 | self.skip_chunk(pos - self.pos)
166 | # last case standing: pos = current pos
167 | self.pos = pos
168 |
169 | def get_frame(self, tt):
170 | """TODO: add documentation"""
171 | if isinstance(tt, np.ndarray):
172 | # lazy implementation, but should not cause problems in
173 | # 99.99 % of the cases
174 |
175 | # elements of t that are actually in the range of the
176 | # audio file.
177 | in_time = (tt >= 0) & (tt < self.duration)
178 |
179 | # Check that the requested time is in the valid range
180 | if not in_time.any():
181 | raise IOError(
182 | "Error in file %s, " % (self.filename)
183 | + "Accessing time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
184 | + "with clip duration=%f seconds, " % self.duration
185 | )
186 |
187 | # The np.round in the next line is super-important.
188 | # Removing it results in artifacts in the noise.
189 | frames = np.round((self.fps * tt)).astype(int)[in_time]
190 | fr_min, fr_max = frames.min(), frames.max()
191 |
192 | if not (0 <= (fr_min - self.buffer_startframe) < len(self.buffer)):
193 | self.buffer_around(fr_min)
194 | elif not (0 <= (fr_max - self.buffer_startframe) < len(self.buffer)):
195 | self.buffer_around(fr_max)
196 |
197 | try:
198 | result = np.zeros((len(tt), self.nchannels))
199 | indices = frames - self.buffer_startframe
200 | result[in_time] = self.buffer[indices]
201 | return result
202 |
203 | except IndexError as error:
204 | warnings.warn(
205 | "Error in file %s, " % (self.filename)
206 | + "At time t=%.02f-%.02f seconds, " % (tt[0], tt[-1])
207 | + "indices wanted: %d-%d, " % (indices.min(), indices.max())
208 | + "but len(buffer)=%d\n" % (len(self.buffer))
209 | + str(error),
210 | UserWarning,
211 | )
212 |
213 | # repeat the last frame instead
214 | indices[indices >= len(self.buffer)] = len(self.buffer) - 1
215 | result[in_time] = self.buffer[indices]
216 | return result
217 |
218 | else:
219 |
220 | ind = int(self.fps * tt)
221 | if ind < 0 or ind > self.n_frames: # out of time: return 0
222 | return np.zeros(self.nchannels)
223 |
224 | if not (0 <= (ind - self.buffer_startframe) < len(self.buffer)):
225 | # out of the buffer: recenter the buffer
226 | self.buffer_around(ind)
227 |
228 | # read the frame in the buffer
229 | return self.buffer[ind - self.buffer_startframe]
230 |
231 | def buffer_around(self, frame_number):
232 | """
233 | Fills the buffer with frames, centered on ``frame_number``
234 | if possible
235 | """
236 | # start-frame for the buffer
237 | new_bufferstart = max(0, frame_number - self.buffersize // 2)
238 |
239 | if self.buffer is not None:
240 | current_f_end = self.buffer_startframe + self.buffersize
241 | if new_bufferstart < current_f_end < new_bufferstart + self.buffersize:
242 | # We already have part of what must be read
243 | conserved = current_f_end - new_bufferstart
244 | chunksize = self.buffersize - conserved
245 | array = self.read_chunk(chunksize)
246 | self.buffer = np.vstack([self.buffer[-conserved:], array])
247 | else:
248 | self.seek(new_bufferstart)
249 | self.buffer = self.read_chunk(self.buffersize)
250 | else:
251 | self.seek(new_bufferstart)
252 | self.buffer = self.read_chunk(self.buffersize)
253 |
254 | self.buffer_startframe = new_bufferstart
255 |
256 | def close(self):
257 | """Closes the reader, terminating the subprocess if is still alive."""
258 | if self.proc:
259 | if self.proc.poll() is None:
260 | self.proc.terminate()
261 | self.proc.stdout.close()
262 | self.proc.stderr.close()
263 | self.proc.wait()
264 | self.proc = None
265 |
266 | def __del__(self):
267 | # If the garbage collector comes, make sure the subprocess is terminated.
268 | self.close()
269 |
--------------------------------------------------------------------------------